diff options
654 files changed, 7526 insertions, 3430 deletions
diff --git a/Documentation/acpi/acpi-lid.txt b/Documentation/acpi/acpi-lid.txt index 22cb3091f297..effe7af3a5af 100644 --- a/Documentation/acpi/acpi-lid.txt +++ b/Documentation/acpi/acpi-lid.txt | |||
@@ -59,20 +59,28 @@ button driver uses the following 3 modes in order not to trigger issues. | |||
59 | If the userspace hasn't been prepared to ignore the unreliable "opened" | 59 | If the userspace hasn't been prepared to ignore the unreliable "opened" |
60 | events and the unreliable initial state notification, Linux users can use | 60 | events and the unreliable initial state notification, Linux users can use |
61 | the following kernel parameters to handle the possible issues: | 61 | the following kernel parameters to handle the possible issues: |
62 | A. button.lid_init_state=open: | 62 | A. button.lid_init_state=method: |
63 | When this option is specified, the ACPI button driver reports the | ||
64 | initial lid state using the returning value of the _LID control method | ||
65 | and whether the "opened"/"closed" events are paired fully relies on the | ||
66 | firmware implementation. | ||
67 | This option can be used to fix some platforms where the returning value | ||
68 | of the _LID control method is reliable but the initial lid state | ||
69 | notification is missing. | ||
70 | This option is the default behavior during the period the userspace | ||
71 | isn't ready to handle the buggy AML tables. | ||
72 | B. button.lid_init_state=open: | ||
63 | When this option is specified, the ACPI button driver always reports the | 73 | When this option is specified, the ACPI button driver always reports the |
64 | initial lid state as "opened" and whether the "opened"/"closed" events | 74 | initial lid state as "opened" and whether the "opened"/"closed" events |
65 | are paired fully relies on the firmware implementation. | 75 | are paired fully relies on the firmware implementation. |
66 | This may fix some platforms where the returning value of the _LID | 76 | This may fix some platforms where the returning value of the _LID |
67 | control method is not reliable and the initial lid state notification is | 77 | control method is not reliable and the initial lid state notification is |
68 | missing. | 78 | missing. |
69 | This option is the default behavior during the period the userspace | ||
70 | isn't ready to handle the buggy AML tables. | ||
71 | 79 | ||
72 | If the userspace has been prepared to ignore the unreliable "opened" events | 80 | If the userspace has been prepared to ignore the unreliable "opened" events |
73 | and the unreliable initial state notification, Linux users should always | 81 | and the unreliable initial state notification, Linux users should always |
74 | use the following kernel parameter: | 82 | use the following kernel parameter: |
75 | B. button.lid_init_state=ignore: | 83 | C. button.lid_init_state=ignore: |
76 | When this option is specified, the ACPI button driver never reports the | 84 | When this option is specified, the ACPI button driver never reports the |
77 | initial lid state and there is a compensation mechanism implemented to | 85 | initial lid state and there is a compensation mechanism implemented to |
78 | ensure that the reliable "closed" notifications can always be delievered | 86 | ensure that the reliable "closed" notifications can always be delievered |
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst index 289c80f7760e..09aa2e949787 100644 --- a/Documentation/admin-guide/pm/cpufreq.rst +++ b/Documentation/admin-guide/pm/cpufreq.rst | |||
@@ -1,4 +1,5 @@ | |||
1 | .. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>` | 1 | .. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>` |
2 | .. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>` | ||
2 | 3 | ||
3 | ======================= | 4 | ======================= |
4 | CPU Performance Scaling | 5 | CPU Performance Scaling |
@@ -75,7 +76,7 @@ feedback registers, as that information is typically specific to the hardware | |||
75 | interface it comes from and may not be easily represented in an abstract, | 76 | interface it comes from and may not be easily represented in an abstract, |
76 | platform-independent way. For this reason, ``CPUFreq`` allows scaling drivers | 77 | platform-independent way. For this reason, ``CPUFreq`` allows scaling drivers |
77 | to bypass the governor layer and implement their own performance scaling | 78 | to bypass the governor layer and implement their own performance scaling |
78 | algorithms. That is done by the ``intel_pstate`` scaling driver. | 79 | algorithms. That is done by the |intel_pstate| scaling driver. |
79 | 80 | ||
80 | 81 | ||
81 | ``CPUFreq`` Policy Objects | 82 | ``CPUFreq`` Policy Objects |
@@ -174,13 +175,13 @@ necessary to restart the scaling governor so that it can take the new online CPU | |||
174 | into account. That is achieved by invoking the governor's ``->stop`` and | 175 | into account. That is achieved by invoking the governor's ``->stop`` and |
175 | ``->start()`` callbacks, in this order, for the entire policy. | 176 | ``->start()`` callbacks, in this order, for the entire policy. |
176 | 177 | ||
177 | As mentioned before, the ``intel_pstate`` scaling driver bypasses the scaling | 178 | As mentioned before, the |intel_pstate| scaling driver bypasses the scaling |
178 | governor layer of ``CPUFreq`` and provides its own P-state selection algorithms. | 179 | governor layer of ``CPUFreq`` and provides its own P-state selection algorithms. |
179 | Consequently, if ``intel_pstate`` is used, scaling governors are not attached to | 180 | Consequently, if |intel_pstate| is used, scaling governors are not attached to |
180 | new policy objects. Instead, the driver's ``->setpolicy()`` callback is invoked | 181 | new policy objects. Instead, the driver's ``->setpolicy()`` callback is invoked |
181 | to register per-CPU utilization update callbacks for each policy. These | 182 | to register per-CPU utilization update callbacks for each policy. These |
182 | callbacks are invoked by the CPU scheduler in the same way as for scaling | 183 | callbacks are invoked by the CPU scheduler in the same way as for scaling |
183 | governors, but in the ``intel_pstate`` case they both determine the P-state to | 184 | governors, but in the |intel_pstate| case they both determine the P-state to |
184 | use and change the hardware configuration accordingly in one go from scheduler | 185 | use and change the hardware configuration accordingly in one go from scheduler |
185 | context. | 186 | context. |
186 | 187 | ||
@@ -257,7 +258,7 @@ are the following: | |||
257 | 258 | ||
258 | ``scaling_available_governors`` | 259 | ``scaling_available_governors`` |
259 | List of ``CPUFreq`` scaling governors present in the kernel that can | 260 | List of ``CPUFreq`` scaling governors present in the kernel that can |
260 | be attached to this policy or (if the ``intel_pstate`` scaling driver is | 261 | be attached to this policy or (if the |intel_pstate| scaling driver is |
261 | in use) list of scaling algorithms provided by the driver that can be | 262 | in use) list of scaling algorithms provided by the driver that can be |
262 | applied to this policy. | 263 | applied to this policy. |
263 | 264 | ||
@@ -274,7 +275,7 @@ are the following: | |||
274 | the CPU is actually running at (due to hardware design and other | 275 | the CPU is actually running at (due to hardware design and other |
275 | limitations). | 276 | limitations). |
276 | 277 | ||
277 | Some scaling drivers (e.g. ``intel_pstate``) attempt to provide | 278 | Some scaling drivers (e.g. |intel_pstate|) attempt to provide |
278 | information more precisely reflecting the current CPU frequency through | 279 | information more precisely reflecting the current CPU frequency through |
279 | this attribute, but that still may not be the exact current CPU | 280 | this attribute, but that still may not be the exact current CPU |
280 | frequency as seen by the hardware at the moment. | 281 | frequency as seen by the hardware at the moment. |
@@ -284,13 +285,13 @@ are the following: | |||
284 | 285 | ||
285 | ``scaling_governor`` | 286 | ``scaling_governor`` |
286 | The scaling governor currently attached to this policy or (if the | 287 | The scaling governor currently attached to this policy or (if the |
287 | ``intel_pstate`` scaling driver is in use) the scaling algorithm | 288 | |intel_pstate| scaling driver is in use) the scaling algorithm |
288 | provided by the driver that is currently applied to this policy. | 289 | provided by the driver that is currently applied to this policy. |
289 | 290 | ||
290 | This attribute is read-write and writing to it will cause a new scaling | 291 | This attribute is read-write and writing to it will cause a new scaling |
291 | governor to be attached to this policy or a new scaling algorithm | 292 | governor to be attached to this policy or a new scaling algorithm |
292 | provided by the scaling driver to be applied to it (in the | 293 | provided by the scaling driver to be applied to it (in the |
293 | ``intel_pstate`` case), as indicated by the string written to this | 294 | |intel_pstate| case), as indicated by the string written to this |
294 | attribute (which must be one of the names listed by the | 295 | attribute (which must be one of the names listed by the |
295 | ``scaling_available_governors`` attribute described above). | 296 | ``scaling_available_governors`` attribute described above). |
296 | 297 | ||
@@ -619,7 +620,7 @@ This file is located under :file:`/sys/devices/system/cpu/cpufreq/` and controls | |||
619 | the "boost" setting for the whole system. It is not present if the underlying | 620 | the "boost" setting for the whole system. It is not present if the underlying |
620 | scaling driver does not support the frequency boost mechanism (or supports it, | 621 | scaling driver does not support the frequency boost mechanism (or supports it, |
621 | but provides a driver-specific interface for controlling it, like | 622 | but provides a driver-specific interface for controlling it, like |
622 | ``intel_pstate``). | 623 | |intel_pstate|). |
623 | 624 | ||
624 | If the value in this file is 1, the frequency boost mechanism is enabled. This | 625 | If the value in this file is 1, the frequency boost mechanism is enabled. This |
625 | means that either the hardware can be put into states in which it is able to | 626 | means that either the hardware can be put into states in which it is able to |
diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst index c80f087321fc..7f148f76f432 100644 --- a/Documentation/admin-guide/pm/index.rst +++ b/Documentation/admin-guide/pm/index.rst | |||
@@ -6,6 +6,7 @@ Power Management | |||
6 | :maxdepth: 2 | 6 | :maxdepth: 2 |
7 | 7 | ||
8 | cpufreq | 8 | cpufreq |
9 | intel_pstate | ||
9 | 10 | ||
10 | .. only:: subproject and html | 11 | .. only:: subproject and html |
11 | 12 | ||
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst new file mode 100644 index 000000000000..33d703989ea8 --- /dev/null +++ b/Documentation/admin-guide/pm/intel_pstate.rst | |||
@@ -0,0 +1,755 @@ | |||
1 | =============================================== | ||
2 | ``intel_pstate`` CPU Performance Scaling Driver | ||
3 | =============================================== | ||
4 | |||
5 | :: | ||
6 | |||
7 | Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
8 | |||
9 | |||
10 | General Information | ||
11 | =================== | ||
12 | |||
13 | ``intel_pstate`` is a part of the | ||
14 | :doc:`CPU performance scaling subsystem <cpufreq>` in the Linux kernel | ||
15 | (``CPUFreq``). It is a scaling driver for the Sandy Bridge and later | ||
16 | generations of Intel processors. Note, however, that some of those processors | ||
17 | may not be supported. [To understand ``intel_pstate`` it is necessary to know | ||
18 | how ``CPUFreq`` works in general, so this is the time to read :doc:`cpufreq` if | ||
19 | you have not done that yet.] | ||
20 | |||
21 | For the processors supported by ``intel_pstate``, the P-state concept is broader | ||
22 | than just an operating frequency or an operating performance point (see the | ||
23 | `LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more | ||
24 | information about that). For this reason, the representation of P-states used | ||
25 | by ``intel_pstate`` internally follows the hardware specification (for details | ||
26 | refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual | ||
27 | Volume 3: System Programming Guide <SDM_>`_). However, the ``CPUFreq`` core | ||
28 | uses frequencies for identifying operating performance points of CPUs and | ||
29 | frequencies are involved in the user space interface exposed by it, so | ||
30 | ``intel_pstate`` maps its internal representation of P-states to frequencies too | ||
31 | (fortunately, that mapping is unambiguous). At the same time, it would not be | ||
32 | practical for ``intel_pstate`` to supply the ``CPUFreq`` core with a table of | ||
33 | available frequencies due to the possible size of it, so the driver does not do | ||
34 | that. Some functionality of the core is limited by that. | ||
35 | |||
36 | Since the hardware P-state selection interface used by ``intel_pstate`` is | ||
37 | available at the logical CPU level, the driver always works with individual | ||
38 | CPUs. Consequently, if ``intel_pstate`` is in use, every ``CPUFreq`` policy | ||
39 | object corresponds to one logical CPU and ``CPUFreq`` policies are effectively | ||
40 | equivalent to CPUs. In particular, this means that they become "inactive" every | ||
41 | time the corresponding CPU is taken offline and need to be re-initialized when | ||
42 | it goes back online. | ||
43 | |||
44 | ``intel_pstate`` is not modular, so it cannot be unloaded, which means that the | ||
45 | only way to pass early-configuration-time parameters to it is via the kernel | ||
46 | command line. However, its configuration can be adjusted via ``sysfs`` to a | ||
47 | great extent. In some configurations it even is possible to unregister it via | ||
48 | ``sysfs`` which allows another ``CPUFreq`` scaling driver to be loaded and | ||
49 | registered (see `below <status_attr_>`_). | ||
50 | |||
51 | |||
52 | Operation Modes | ||
53 | =============== | ||
54 | |||
55 | ``intel_pstate`` can operate in three different modes: in the active mode with | ||
56 | or without hardware-managed P-states support and in the passive mode. Which of | ||
57 | them will be in effect depends on what kernel command line options are used and | ||
58 | on the capabilities of the processor. | ||
59 | |||
60 | Active Mode | ||
61 | ----------- | ||
62 | |||
63 | This is the default operation mode of ``intel_pstate``. If it works in this | ||
64 | mode, the ``scaling_driver`` policy attribute in ``sysfs`` for all ``CPUFreq`` | ||
65 | policies contains the string "intel_pstate". | ||
66 | |||
67 | In this mode the driver bypasses the scaling governors layer of ``CPUFreq`` and | ||
68 | provides its own scaling algorithms for P-state selection. Those algorithms | ||
69 | can be applied to ``CPUFreq`` policies in the same way as generic scaling | ||
70 | governors (that is, through the ``scaling_governor`` policy attribute in | ||
71 | ``sysfs``). [Note that different P-state selection algorithms may be chosen for | ||
72 | different policies, but that is not recommended.] | ||
73 | |||
74 | They are not generic scaling governors, but their names are the same as the | ||
75 | names of some of those governors. Moreover, confusingly enough, they generally | ||
76 | do not work in the same way as the generic governors they share the names with. | ||
77 | For example, the ``powersave`` P-state selection algorithm provided by | ||
78 | ``intel_pstate`` is not a counterpart of the generic ``powersave`` governor | ||
79 | (roughly, it corresponds to the ``schedutil`` and ``ondemand`` governors). | ||
80 | |||
81 | There are two P-state selection algorithms provided by ``intel_pstate`` in the | ||
82 | active mode: ``powersave`` and ``performance``. The way they both operate | ||
83 | depends on whether or not the hardware-managed P-states (HWP) feature has been | ||
84 | enabled in the processor and possibly on the processor model. | ||
85 | |||
86 | Which of the P-state selection algorithms is used by default depends on the | ||
87 | :c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option. | ||
88 | Namely, if that option is set, the ``performance`` algorithm will be used by | ||
89 | default, and the other one will be used by default if it is not set. | ||
90 | |||
91 | Active Mode With HWP | ||
92 | ~~~~~~~~~~~~~~~~~~~~ | ||
93 | |||
94 | If the processor supports the HWP feature, it will be enabled during the | ||
95 | processor initialization and cannot be disabled after that. It is possible | ||
96 | to avoid enabling it by passing the ``intel_pstate=no_hwp`` argument to the | ||
97 | kernel in the command line. | ||
98 | |||
99 | If the HWP feature has been enabled, ``intel_pstate`` relies on the processor to | ||
100 | select P-states by itself, but still it can give hints to the processor's | ||
101 | internal P-state selection logic. What those hints are depends on which P-state | ||
102 | selection algorithm has been applied to the given policy (or to the CPU it | ||
103 | corresponds to). | ||
104 | |||
105 | Even though the P-state selection is carried out by the processor automatically, | ||
106 | ``intel_pstate`` registers utilization update callbacks with the CPU scheduler | ||
107 | in this mode. However, they are not used for running a P-state selection | ||
108 | algorithm, but for periodic updates of the current CPU frequency information to | ||
109 | be made available from the ``scaling_cur_freq`` policy attribute in ``sysfs``. | ||
110 | |||
111 | HWP + ``performance`` | ||
112 | ..................... | ||
113 | |||
114 | In this configuration ``intel_pstate`` will write 0 to the processor's | ||
115 | Energy-Performance Preference (EPP) knob (if supported) or its | ||
116 | Energy-Performance Bias (EPB) knob (otherwise), which means that the processor's | ||
117 | internal P-state selection logic is expected to focus entirely on performance. | ||
118 | |||
119 | This will override the EPP/EPB setting coming from the ``sysfs`` interface | ||
120 | (see `Energy vs Performance Hints`_ below). | ||
121 | |||
122 | Also, in this configuration the range of P-states available to the processor's | ||
123 | internal P-state selection logic is always restricted to the upper boundary | ||
124 | (that is, the maximum P-state that the driver is allowed to use). | ||
125 | |||
126 | HWP + ``powersave`` | ||
127 | ................... | ||
128 | |||
129 | In this configuration ``intel_pstate`` will set the processor's | ||
130 | Energy-Performance Preference (EPP) knob (if supported) or its | ||
131 | Energy-Performance Bias (EPB) knob (otherwise) to whatever value it was | ||
132 | previously set to via ``sysfs`` (or whatever default value it was | ||
133 | set to by the platform firmware). This usually causes the processor's | ||
134 | internal P-state selection logic to be less performance-focused. | ||
135 | |||
136 | Active Mode Without HWP | ||
137 | ~~~~~~~~~~~~~~~~~~~~~~~ | ||
138 | |||
139 | This is the default operation mode for processors that do not support the HWP | ||
140 | feature. It also is used by default with the ``intel_pstate=no_hwp`` argument | ||
141 | in the kernel command line. However, in this mode ``intel_pstate`` may refuse | ||
142 | to work with the given processor if it does not recognize it. [Note that | ||
143 | ``intel_pstate`` will never refuse to work with any processor with the HWP | ||
144 | feature enabled.] | ||
145 | |||
146 | In this mode ``intel_pstate`` registers utilization update callbacks with the | ||
147 | CPU scheduler in order to run a P-state selection algorithm, either | ||
148 | ``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy | ||
149 | setting in ``sysfs``. The current CPU frequency information to be made | ||
150 | available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is | ||
151 | periodically updated by those utilization update callbacks too. | ||
152 | |||
153 | ``performance`` | ||
154 | ............... | ||
155 | |||
156 | Without HWP, this P-state selection algorithm is always the same regardless of | ||
157 | the processor model and platform configuration. | ||
158 | |||
159 | It selects the maximum P-state it is allowed to use, subject to limits set via | ||
160 | ``sysfs``, every time the P-state selection computations are carried out by the | ||
161 | driver's utilization update callback for the given CPU (that does not happen | ||
162 | more often than every 10 ms), but the hardware configuration will not be changed | ||
163 | if the new P-state is the same as the current one. | ||
164 | |||
165 | This is the default P-state selection algorithm if the | ||
166 | :c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option | ||
167 | is set. | ||
168 | |||
169 | ``powersave`` | ||
170 | ............. | ||
171 | |||
172 | Without HWP, this P-state selection algorithm generally depends on the | ||
173 | processor model and/or the system profile setting in the ACPI tables and there | ||
174 | are two variants of it. | ||
175 | |||
176 | One of them is used with processors from the Atom line and (regardless of the | ||
177 | processor model) on platforms with the system profile in the ACPI tables set to | ||
178 | "mobile" (laptops mostly), "tablet", "appliance PC", "desktop", or | ||
179 | "workstation". It is also used with processors supporting the HWP feature if | ||
180 | that feature has not been enabled (that is, with the ``intel_pstate=no_hwp`` | ||
181 | argument in the kernel command line). It is similar to the algorithm | ||
182 | implemented by the generic ``schedutil`` scaling governor except that the | ||
183 | utilization metric used by it is based on numbers coming from feedback | ||
184 | registers of the CPU. It generally selects P-states proportional to the | ||
185 | current CPU utilization, so it is referred to as the "proportional" algorithm. | ||
186 | |||
187 | The second variant of the ``powersave`` P-state selection algorithm, used in all | ||
188 | of the other cases (generally, on processors from the Core line, so it is | ||
189 | referred to as the "Core" algorithm), is based on the values read from the APERF | ||
190 | and MPERF feedback registers and the previously requested target P-state. | ||
191 | It does not really take CPU utilization into account explicitly, but as a rule | ||
192 | it causes the CPU P-state to ramp up very quickly in response to increased | ||
193 | utilization which is generally desirable in server environments. | ||
194 | |||
195 | Regardless of the variant, this algorithm is run by the driver's utilization | ||
196 | update callback for the given CPU when it is invoked by the CPU scheduler, but | ||
197 | not more often than every 10 ms (that can be tweaked via ``debugfs`` in `this | ||
198 | particular case <Tuning Interface in debugfs_>`_). Like in the ``performance`` | ||
199 | case, the hardware configuration is not touched if the new P-state turns out to | ||
200 | be the same as the current one. | ||
201 | |||
202 | This is the default P-state selection algorithm if the | ||
203 | :c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option | ||
204 | is not set. | ||
205 | |||
206 | Passive Mode | ||
207 | ------------ | ||
208 | |||
209 | This mode is used if the ``intel_pstate=passive`` argument is passed to the | ||
210 | kernel in the command line (it implies the ``intel_pstate=no_hwp`` setting too). | ||
211 | Like in the active mode without HWP support, in this mode ``intel_pstate`` may | ||
212 | refuse to work with the given processor if it does not recognize it. | ||
213 | |||
214 | If the driver works in this mode, the ``scaling_driver`` policy attribute in | ||
215 | ``sysfs`` for all ``CPUFreq`` policies contains the string "intel_cpufreq". | ||
216 | Then, the driver behaves like a regular ``CPUFreq`` scaling driver. That is, | ||
217 | it is invoked by generic scaling governors when necessary to talk to the | ||
218 | hardware in order to change the P-state of a CPU (in particular, the | ||
219 | ``schedutil`` governor can invoke it directly from scheduler context). | ||
220 | |||
221 | While in this mode, ``intel_pstate`` can be used with all of the (generic) | ||
222 | scaling governors listed by the ``scaling_available_governors`` policy attribute | ||
223 | in ``sysfs`` (and the P-state selection algorithms described above are not | ||
224 | used). Then, it is responsible for the configuration of policy objects | ||
225 | corresponding to CPUs and provides the ``CPUFreq`` core (and the scaling | ||
226 | governors attached to the policy objects) with accurate information on the | ||
227 | maximum and minimum operating frequencies supported by the hardware (including | ||
228 | the so-called "turbo" frequency ranges). In other words, in the passive mode | ||
229 | the entire range of available P-states is exposed by ``intel_pstate`` to the | ||
230 | ``CPUFreq`` core. However, in this mode the driver does not register | ||
231 | utilization update callbacks with the CPU scheduler and the ``scaling_cur_freq`` | ||
232 | information comes from the ``CPUFreq`` core (and is the last frequency selected | ||
233 | by the current scaling governor for the given policy). | ||
234 | |||
235 | |||
236 | .. _turbo: | ||
237 | |||
238 | Turbo P-states Support | ||
239 | ====================== | ||
240 | |||
241 | In the majority of cases, the entire range of P-states available to | ||
242 | ``intel_pstate`` can be divided into two sub-ranges that correspond to | ||
243 | different types of processor behavior, above and below a boundary that | ||
244 | will be referred to as the "turbo threshold" in what follows. | ||
245 | |||
246 | The P-states above the turbo threshold are referred to as "turbo P-states" and | ||
247 | the whole sub-range of P-states they belong to is referred to as the "turbo | ||
248 | range". These names are related to the Turbo Boost technology allowing a | ||
249 | multicore processor to opportunistically increase the P-state of one or more | ||
250 | cores if there is enough power to do that and if that is not going to cause the | ||
251 | thermal envelope of the processor package to be exceeded. | ||
252 | |||
253 | Specifically, if software sets the P-state of a CPU core within the turbo range | ||
254 | (that is, above the turbo threshold), the processor is permitted to take over | ||
255 | performance scaling control for that core and put it into turbo P-states of its | ||
256 | choice going forward. However, that permission is interpreted differently by | ||
257 | different processor generations. Namely, the Sandy Bridge generation of | ||
258 | processors will never use any P-states above the last one set by software for | ||
259 | the given core, even if it is within the turbo range, whereas all of the later | ||
260 | processor generations will take it as a license to use any P-states from the | ||
261 | turbo range, even above the one set by software. In other words, on those | ||
262 | processors setting any P-state from the turbo range will enable the processor | ||
263 | to put the given core into all turbo P-states up to and including the maximum | ||
264 | supported one as it sees fit. | ||
265 | |||
266 | One important property of turbo P-states is that they are not sustainable. More | ||
267 | precisely, there is no guarantee that any CPUs will be able to stay in any of | ||
268 | those states indefinitely, because the power distribution within the processor | ||
269 | package may change over time or the thermal envelope it was designed for might | ||
270 | be exceeded if a turbo P-state was used for too long. | ||
271 | |||
272 | In turn, the P-states below the turbo threshold generally are sustainable. In | ||
273 | fact, if one of them is set by software, the processor is not expected to change | ||
274 | it to a lower one unless in a thermal stress or a power limit violation | ||
275 | situation (a higher P-state may still be used if it is set for another CPU in | ||
276 | the same package at the same time, for example). | ||
277 | |||
278 | Some processors allow multiple cores to be in turbo P-states at the same time, | ||
279 | but the maximum P-state that can be set for them generally depends on the number | ||
280 | of cores running concurrently. The maximum turbo P-state that can be set for 3 | ||
281 | cores at the same time usually is lower than the analogous maximum P-state for | ||
282 | 2 cores, which in turn usually is lower than the maximum turbo P-state that can | ||
283 | be set for 1 core. The one-core maximum turbo P-state is thus the maximum | ||
284 | supported one overall. | ||
285 | |||
286 | The maximum supported turbo P-state, the turbo threshold (the maximum supported | ||
287 | non-turbo P-state) and the minimum supported P-state are specific to the | ||
288 | processor model and can be determined by reading the processor's model-specific | ||
289 | registers (MSRs). Moreover, some processors support the Configurable TDP | ||
290 | (Thermal Design Power) feature and, when that feature is enabled, the turbo | ||
291 | threshold effectively becomes a configurable value that can be set by the | ||
292 | platform firmware. | ||
293 | |||
294 | Unlike ``_PSS`` objects in the ACPI tables, ``intel_pstate`` always exposes | ||
295 | the entire range of available P-states, including the whole turbo range, to the | ||
296 | ``CPUFreq`` core and (in the passive mode) to generic scaling governors. This | ||
297 | generally causes turbo P-states to be set more often when ``intel_pstate`` is | ||
298 | used relative to ACPI-based CPU performance scaling (see `below <acpi-cpufreq_>`_ | ||
299 | for more information). | ||
300 | |||
301 | Moreover, since ``intel_pstate`` always knows what the real turbo threshold is | ||
302 | (even if the Configurable TDP feature is enabled in the processor), its | ||
303 | ``no_turbo`` attribute in ``sysfs`` (described `below <no_turbo_attr_>`_) should | ||
304 | work as expected in all cases (that is, if set to disable turbo P-states, it | ||
305 | always should prevent ``intel_pstate`` from using them). | ||
306 | |||
307 | |||
308 | Processor Support | ||
309 | ================= | ||
310 | |||
311 | To handle a given processor ``intel_pstate`` requires a number of different | ||
312 | pieces of information on it to be known, including: | ||
313 | |||
314 | * The minimum supported P-state. | ||
315 | |||
316 | * The maximum supported `non-turbo P-state <turbo_>`_. | ||
317 | |||
318 | * Whether or not turbo P-states are supported at all. | ||
319 | |||
320 | * The maximum supported `one-core turbo P-state <turbo_>`_ (if turbo P-states | ||
321 | are supported). | ||
322 | |||
323 | * The scaling formula to translate the driver's internal representation | ||
324 | of P-states into frequencies and the other way around. | ||
325 | |||
326 | Generally, ways to obtain that information are specific to the processor model | ||
327 | or family. Although it often is possible to obtain all of it from the processor | ||
328 | itself (using model-specific registers), there are cases in which hardware | ||
329 | manuals need to be consulted to get to it too. | ||
330 | |||
331 | For this reason, there is a list of supported processors in ``intel_pstate`` and | ||
332 | the driver initialization will fail if the detected processor is not in that | ||
333 | list, unless it supports the `HWP feature <Active Mode_>`_. [The interface to | ||
334 | obtain all of the information listed above is the same for all of the processors | ||
335 | supporting the HWP feature, which is why they all are supported by | ||
336 | ``intel_pstate``.] | ||
337 | |||
338 | |||
339 | User Space Interface in ``sysfs`` | ||
340 | ================================= | ||
341 | |||
342 | Global Attributes | ||
343 | ----------------- | ||
344 | |||
345 | ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to | ||
346 | control its functionality at the system level. They are located in the | ||
347 | ``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all | ||
348 | CPUs. | ||
349 | |||
350 | Some of them are not present if the ``intel_pstate=per_cpu_perf_limits`` | ||
351 | argument is passed to the kernel in the command line. | ||
352 | |||
353 | ``max_perf_pct`` | ||
354 | Maximum P-state the driver is allowed to set in percent of the | ||
355 | maximum supported performance level (the highest supported `turbo | ||
356 | P-state <turbo_>`_). | ||
357 | |||
358 | This attribute will not be exposed if the | ||
359 | ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel | ||
360 | command line. | ||
361 | |||
362 | ``min_perf_pct`` | ||
363 | Minimum P-state the driver is allowed to set in percent of the | ||
364 | maximum supported performance level (the highest supported `turbo | ||
365 | P-state <turbo_>`_). | ||
366 | |||
367 | This attribute will not be exposed if the | ||
368 | ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel | ||
369 | command line. | ||
370 | |||
371 | ``num_pstates`` | ||
372 | Number of P-states supported by the processor (between 0 and 255 | ||
373 | inclusive) including both turbo and non-turbo P-states (see | ||
374 | `Turbo P-states Support`_). | ||
375 | |||
376 | The value of this attribute is not affected by the ``no_turbo`` | ||
377 | setting described `below <no_turbo_attr_>`_. | ||
378 | |||
379 | This attribute is read-only. | ||
380 | |||
381 | ``turbo_pct`` | ||
382 | Ratio of the `turbo range <turbo_>`_ size to the size of the entire | ||
383 | range of supported P-states, in percent. | ||
384 | |||
385 | This attribute is read-only. | ||
386 | |||
387 | .. _no_turbo_attr: | ||
388 | |||
389 | ``no_turbo`` | ||
390 | If set (equal to 1), the driver is not allowed to set any turbo P-states | ||
391 | (see `Turbo P-states Support`_). If unset (equalt to 0, which is the | ||
392 | default), turbo P-states can be set by the driver. | ||
393 | [Note that ``intel_pstate`` does not support the general ``boost`` | ||
394 | attribute (supported by some other scaling drivers) which is replaced | ||
395 | by this one.] | ||
396 | |||
397 | This attrubute does not affect the maximum supported frequency value | ||
398 | supplied to the ``CPUFreq`` core and exposed via the policy interface, | ||
399 | but it affects the maximum possible value of per-policy P-state limits | ||
400 | (see `Interpretation of Policy Attributes`_ below for details). | ||
401 | |||
402 | .. _status_attr: | ||
403 | |||
404 | ``status`` | ||
405 | Operation mode of the driver: "active", "passive" or "off". | ||
406 | |||
407 | "active" | ||
408 | The driver is functional and in the `active mode | ||
409 | <Active Mode_>`_. | ||
410 | |||
411 | "passive" | ||
412 | The driver is functional and in the `passive mode | ||
413 | <Passive Mode_>`_. | ||
414 | |||
415 | "off" | ||
416 | The driver is not functional (it is not registered as a scaling | ||
417 | driver with the ``CPUFreq`` core). | ||
418 | |||
419 | This attribute can be written to in order to change the driver's | ||
420 | operation mode or to unregister it. The string written to it must be | ||
421 | one of the possible values of it and, if successful, the write will | ||
422 | cause the driver to switch over to the operation mode represented by | ||
423 | that string - or to be unregistered in the "off" case. [Actually, | ||
424 | switching over from the active mode to the passive mode or the other | ||
425 | way around causes the driver to be unregistered and registered again | ||
426 | with a different set of callbacks, so all of its settings (the global | ||
427 | as well as the per-policy ones) are then reset to their default | ||
428 | values, possibly depending on the target operation mode.] | ||
429 | |||
430 | That only is supported in some configurations, though (for example, if | ||
431 | the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, | ||
432 | the operation mode of the driver cannot be changed), and if it is not | ||
433 | supported in the current configuration, writes to this attribute with | ||
434 | fail with an appropriate error. | ||
435 | |||
436 | Interpretation of Policy Attributes | ||
437 | ----------------------------------- | ||
438 | |||
439 | The interpretation of some ``CPUFreq`` policy attributes described in | ||
440 | :doc:`cpufreq` is special with ``intel_pstate`` as the current scaling driver | ||
441 | and it generally depends on the driver's `operation mode <Operation Modes_>`_. | ||
442 | |||
443 | First of all, the values of the ``cpuinfo_max_freq``, ``cpuinfo_min_freq`` and | ||
444 | ``scaling_cur_freq`` attributes are produced by applying a processor-specific | ||
445 | multiplier to the internal P-state representation used by ``intel_pstate``. | ||
446 | Also, the values of the ``scaling_max_freq`` and ``scaling_min_freq`` | ||
447 | attributes are capped by the frequency corresponding to the maximum P-state that | ||
448 | the driver is allowed to set. | ||
449 | |||
450 | If the ``no_turbo`` `global attribute <no_turbo_attr_>`_ is set, the driver is | ||
451 | not allowed to use turbo P-states, so the maximum value of ``scaling_max_freq`` | ||
452 | and ``scaling_min_freq`` is limited to the maximum non-turbo P-state frequency. | ||
453 | Accordingly, setting ``no_turbo`` causes ``scaling_max_freq`` and | ||
454 | ``scaling_min_freq`` to go down to that value if they were above it before. | ||
455 | However, the old values of ``scaling_max_freq`` and ``scaling_min_freq`` will be | ||
456 | restored after unsetting ``no_turbo``, unless these attributes have been written | ||
457 | to after ``no_turbo`` was set. | ||
458 | |||
459 | If ``no_turbo`` is not set, the maximum possible value of ``scaling_max_freq`` | ||
460 | and ``scaling_min_freq`` corresponds to the maximum supported turbo P-state, | ||
461 | which also is the value of ``cpuinfo_max_freq`` in either case. | ||
462 | |||
463 | Next, the following policy attributes have special meaning if | ||
464 | ``intel_pstate`` works in the `active mode <Active Mode_>`_: | ||
465 | |||
466 | ``scaling_available_governors`` | ||
467 | List of P-state selection algorithms provided by ``intel_pstate``. | ||
468 | |||
469 | ``scaling_governor`` | ||
470 | P-state selection algorithm provided by ``intel_pstate`` currently in | ||
471 | use with the given policy. | ||
472 | |||
473 | ``scaling_cur_freq`` | ||
474 | Frequency of the average P-state of the CPU represented by the given | ||
475 | policy for the time interval between the last two invocations of the | ||
476 | driver's utilization update callback by the CPU scheduler for that CPU. | ||
477 | |||
478 | The meaning of these attributes in the `passive mode <Passive Mode_>`_ is the | ||
479 | same as for other scaling drivers. | ||
480 | |||
481 | Additionally, the value of the ``scaling_driver`` attribute for ``intel_pstate`` | ||
482 | depends on the operation mode of the driver. Namely, it is either | ||
483 | "intel_pstate" (in the `active mode <Active Mode_>`_) or "intel_cpufreq" (in the | ||
484 | `passive mode <Passive Mode_>`_). | ||
485 | |||
486 | Coordination of P-State Limits | ||
487 | ------------------------------ | ||
488 | |||
489 | ``intel_pstate`` allows P-state limits to be set in two ways: with the help of | ||
490 | the ``max_perf_pct`` and ``min_perf_pct`` `global attributes | ||
491 | <Global Attributes_>`_ or via the ``scaling_max_freq`` and ``scaling_min_freq`` | ||
492 | ``CPUFreq`` policy attributes. The coordination between those limits is based | ||
493 | on the following rules, regardless of the current operation mode of the driver: | ||
494 | |||
495 | 1. All CPUs are affected by the global limits (that is, none of them can be | ||
496 | requested to run faster than the global maximum and none of them can be | ||
497 | requested to run slower than the global minimum). | ||
498 | |||
499 | 2. Each individual CPU is affected by its own per-policy limits (that is, it | ||
500 | cannot be requested to run faster than its own per-policy maximum and it | ||
501 | cannot be requested to run slower than its own per-policy minimum). | ||
502 | |||
503 | 3. The global and per-policy limits can be set independently. | ||
504 | |||
505 | If the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, the | ||
506 | resulting effective values are written into its registers whenever the limits | ||
507 | change in order to request its internal P-state selection logic to always set | ||
508 | P-states within these limits. Otherwise, the limits are taken into account by | ||
509 | scaling governors (in the `passive mode <Passive Mode_>`_) and by the driver | ||
510 | every time before setting a new P-state for a CPU. | ||
511 | |||
512 | Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument | ||
513 | is passed to the kernel, ``max_perf_pct`` and ``min_perf_pct`` are not exposed | ||
514 | at all and the only way to set the limits is by using the policy attributes. | ||
515 | |||
516 | |||
517 | Energy vs Performance Hints | ||
518 | --------------------------- | ||
519 | |||
520 | If ``intel_pstate`` works in the `active mode with the HWP feature enabled | ||
521 | <Active Mode With HWP_>`_ in the processor, additional attributes are present | ||
522 | in every ``CPUFreq`` policy directory in ``sysfs``. They are intended to allow | ||
523 | user space to help ``intel_pstate`` to adjust the processor's internal P-state | ||
524 | selection logic by focusing it on performance or on energy-efficiency, or | ||
525 | somewhere between the two extremes: | ||
526 | |||
527 | ``energy_performance_preference`` | ||
528 | Current value of the energy vs performance hint for the given policy | ||
529 | (or the CPU represented by it). | ||
530 | |||
531 | The hint can be changed by writing to this attribute. | ||
532 | |||
533 | ``energy_performance_available_preferences`` | ||
534 | List of strings that can be written to the | ||
535 | ``energy_performance_preference`` attribute. | ||
536 | |||
537 | They represent different energy vs performance hints and should be | ||
538 | self-explanatory, except that ``default`` represents whatever hint | ||
539 | value was set by the platform firmware. | ||
540 | |||
541 | Strings written to the ``energy_performance_preference`` attribute are | ||
542 | internally translated to integer values written to the processor's | ||
543 | Energy-Performance Preference (EPP) knob (if supported) or its | ||
544 | Energy-Performance Bias (EPB) knob. | ||
545 | |||
546 | [Note that tasks may by migrated from one CPU to another by the scheduler's | ||
547 | load-balancing algorithm and if different energy vs performance hints are | ||
548 | set for those CPUs, that may lead to undesirable outcomes. To avoid such | ||
549 | issues it is better to set the same energy vs performance hint for all CPUs | ||
550 | or to pin every task potentially sensitive to them to a specific CPU.] | ||
551 | |||
552 | .. _acpi-cpufreq: | ||
553 | |||
554 | ``intel_pstate`` vs ``acpi-cpufreq`` | ||
555 | ==================================== | ||
556 | |||
557 | On the majority of systems supported by ``intel_pstate``, the ACPI tables | ||
558 | provided by the platform firmware contain ``_PSS`` objects returning information | ||
559 | that can be used for CPU performance scaling (refer to the `ACPI specification`_ | ||
560 | for details on the ``_PSS`` objects and the format of the information returned | ||
561 | by them). | ||
562 | |||
563 | The information returned by the ACPI ``_PSS`` objects is used by the | ||
564 | ``acpi-cpufreq`` scaling driver. On systems supported by ``intel_pstate`` | ||
565 | the ``acpi-cpufreq`` driver uses the same hardware CPU performance scaling | ||
566 | interface, but the set of P-states it can use is limited by the ``_PSS`` | ||
567 | output. | ||
568 | |||
569 | On those systems each ``_PSS`` object returns a list of P-states supported by | ||
570 | the corresponding CPU which basically is a subset of the P-states range that can | ||
571 | be used by ``intel_pstate`` on the same system, with one exception: the whole | ||
572 | `turbo range <turbo_>`_ is represented by one item in it (the topmost one). By | ||
573 | convention, the frequency returned by ``_PSS`` for that item is greater by 1 MHz | ||
574 | than the frequency of the highest non-turbo P-state listed by it, but the | ||
575 | corresponding P-state representation (following the hardware specification) | ||
576 | returned for it matches the maximum supported turbo P-state (or is the | ||
577 | special value 255 meaning essentially "go as high as you can get"). | ||
578 | |||
579 | The list of P-states returned by ``_PSS`` is reflected by the table of | ||
580 | available frequencies supplied by ``acpi-cpufreq`` to the ``CPUFreq`` core and | ||
581 | scaling governors and the minimum and maximum supported frequencies reported by | ||
582 | it come from that list as well. In particular, given the special representation | ||
583 | of the turbo range described above, this means that the maximum supported | ||
584 | frequency reported by ``acpi-cpufreq`` is higher by 1 MHz than the frequency | ||
585 | of the highest supported non-turbo P-state listed by ``_PSS`` which, of course, | ||
586 | affects decisions made by the scaling governors, except for ``powersave`` and | ||
587 | ``performance``. | ||
588 | |||
589 | For example, if a given governor attempts to select a frequency proportional to | ||
590 | estimated CPU load and maps the load of 100% to the maximum supported frequency | ||
591 | (possibly multiplied by a constant), then it will tend to choose P-states below | ||
592 | the turbo threshold if ``acpi-cpufreq`` is used as the scaling driver, because | ||
593 | in that case the turbo range corresponds to a small fraction of the frequency | ||
594 | band it can use (1 MHz vs 1 GHz or more). In consequence, it will only go to | ||
595 | the turbo range for the highest loads and the other loads above 50% that might | ||
596 | benefit from running at turbo frequencies will be given non-turbo P-states | ||
597 | instead. | ||
598 | |||
599 | One more issue related to that may appear on systems supporting the | ||
600 | `Configurable TDP feature <turbo_>`_ allowing the platform firmware to set the | ||
601 | turbo threshold. Namely, if that is not coordinated with the lists of P-states | ||
602 | returned by ``_PSS`` properly, there may be more than one item corresponding to | ||
603 | a turbo P-state in those lists and there may be a problem with avoiding the | ||
604 | turbo range (if desirable or necessary). Usually, to avoid using turbo | ||
605 | P-states overall, ``acpi-cpufreq`` simply avoids using the topmost state listed | ||
606 | by ``_PSS``, but that is not sufficient when there are other turbo P-states in | ||
607 | the list returned by it. | ||
608 | |||
609 | Apart from the above, ``acpi-cpufreq`` works like ``intel_pstate`` in the | ||
610 | `passive mode <Passive Mode_>`_, except that the number of P-states it can set | ||
611 | is limited to the ones listed by the ACPI ``_PSS`` objects. | ||
612 | |||
613 | |||
614 | Kernel Command Line Options for ``intel_pstate`` | ||
615 | ================================================ | ||
616 | |||
617 | Several kernel command line options can be used to pass early-configuration-time | ||
618 | parameters to ``intel_pstate`` in order to enforce specific behavior of it. All | ||
619 | of them have to be prepended with the ``intel_pstate=`` prefix. | ||
620 | |||
621 | ``disable`` | ||
622 | Do not register ``intel_pstate`` as the scaling driver even if the | ||
623 | processor is supported by it. | ||
624 | |||
625 | ``passive`` | ||
626 | Register ``intel_pstate`` in the `passive mode <Passive Mode_>`_ to | ||
627 | start with. | ||
628 | |||
629 | This option implies the ``no_hwp`` one described below. | ||
630 | |||
631 | ``force`` | ||
632 | Register ``intel_pstate`` as the scaling driver instead of | ||
633 | ``acpi-cpufreq`` even if the latter is preferred on the given system. | ||
634 | |||
635 | This may prevent some platform features (such as thermal controls and | ||
636 | power capping) that rely on the availability of ACPI P-states | ||
637 | information from functioning as expected, so it should be used with | ||
638 | caution. | ||
639 | |||
640 | This option does not work with processors that are not supported by | ||
641 | ``intel_pstate`` and on platforms where the ``pcc-cpufreq`` scaling | ||
642 | driver is used instead of ``acpi-cpufreq``. | ||
643 | |||
644 | ``no_hwp`` | ||
645 | Do not enable the `hardware-managed P-states (HWP) feature | ||
646 | <Active Mode With HWP_>`_ even if it is supported by the processor. | ||
647 | |||
648 | ``hwp_only`` | ||
649 | Register ``intel_pstate`` as the scaling driver only if the | ||
650 | `hardware-managed P-states (HWP) feature <Active Mode With HWP_>`_ is | ||
651 | supported by the processor. | ||
652 | |||
653 | ``support_acpi_ppc`` | ||
654 | Take ACPI ``_PPC`` performance limits into account. | ||
655 | |||
656 | If the preferred power management profile in the FADT (Fixed ACPI | ||
657 | Description Table) is set to "Enterprise Server" or "Performance | ||
658 | Server", the ACPI ``_PPC`` limits are taken into account by default | ||
659 | and this option has no effect. | ||
660 | |||
661 | ``per_cpu_perf_limits`` | ||
662 | Use per-logical-CPU P-State limits (see `Coordination of P-state | ||
663 | Limits`_ for details). | ||
664 | |||
665 | |||
666 | Diagnostics and Tuning | ||
667 | ====================== | ||
668 | |||
669 | Trace Events | ||
670 | ------------ | ||
671 | |||
672 | There are two static trace events that can be used for ``intel_pstate`` | ||
673 | diagnostics. One of them is the ``cpu_frequency`` trace event generally used | ||
674 | by ``CPUFreq``, and the other one is the ``pstate_sample`` trace event specific | ||
675 | to ``intel_pstate``. Both of them are triggered by ``intel_pstate`` only if | ||
676 | it works in the `active mode <Active Mode_>`_. | ||
677 | |||
678 | The following sequence of shell commands can be used to enable them and see | ||
679 | their output (if the kernel is generally configured to support event tracing):: | ||
680 | |||
681 | # cd /sys/kernel/debug/tracing/ | ||
682 | # echo 1 > events/power/pstate_sample/enable | ||
683 | # echo 1 > events/power/cpu_frequency/enable | ||
684 | # cat trace | ||
685 | gnome-terminal--4510 [001] ..s. 1177.680733: pstate_sample: core_busy=107 scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618 freq=2474476 | ||
686 | cat-5235 [002] ..s. 1177.681723: cpu_frequency: state=2900000 cpu_id=2 | ||
687 | |||
688 | If ``intel_pstate`` works in the `passive mode <Passive Mode_>`_, the | ||
689 | ``cpu_frequency`` trace event will be triggered either by the ``schedutil`` | ||
690 | scaling governor (for the policies it is attached to), or by the ``CPUFreq`` | ||
691 | core (for the policies with other scaling governors). | ||
692 | |||
693 | ``ftrace`` | ||
694 | ---------- | ||
695 | |||
696 | The ``ftrace`` interface can be used for low-level diagnostics of | ||
697 | ``intel_pstate``. For example, to check how often the function to set a | ||
698 | P-state is called, the ``ftrace`` filter can be set to to | ||
699 | :c:func:`intel_pstate_set_pstate`:: | ||
700 | |||
701 | # cd /sys/kernel/debug/tracing/ | ||
702 | # cat available_filter_functions | grep -i pstate | ||
703 | intel_pstate_set_pstate | ||
704 | intel_pstate_cpu_init | ||
705 | ... | ||
706 | # echo intel_pstate_set_pstate > set_ftrace_filter | ||
707 | # echo function > current_tracer | ||
708 | # cat trace | head -15 | ||
709 | # tracer: function | ||
710 | # | ||
711 | # entries-in-buffer/entries-written: 80/80 #P:4 | ||
712 | # | ||
713 | # _-----=> irqs-off | ||
714 | # / _----=> need-resched | ||
715 | # | / _---=> hardirq/softirq | ||
716 | # || / _--=> preempt-depth | ||
717 | # ||| / delay | ||
718 | # TASK-PID CPU# |||| TIMESTAMP FUNCTION | ||
719 | # | | | |||| | | | ||
720 | Xorg-3129 [000] ..s. 2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
721 | gnome-terminal--4510 [002] ..s. 2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
722 | gnome-shell-3409 [001] ..s. 2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
723 | <idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
724 | |||
725 | Tuning Interface in ``debugfs`` | ||
726 | ------------------------------- | ||
727 | |||
728 | The ``powersave`` algorithm provided by ``intel_pstate`` for `the Core line of | ||
729 | processors in the active mode <powersave_>`_ is based on a `PID controller`_ | ||
730 | whose parameters were chosen to address a number of different use cases at the | ||
731 | same time. However, it still is possible to fine-tune it to a specific workload | ||
732 | and the ``debugfs`` interface under ``/sys/kernel/debug/pstate_snb/`` is | ||
733 | provided for this purpose. [Note that the ``pstate_snb`` directory will be | ||
734 | present only if the specific P-state selection algorithm matching the interface | ||
735 | in it actually is in use.] | ||
736 | |||
737 | The following files present in that directory can be used to modify the PID | ||
738 | controller parameters at run time: | ||
739 | |||
740 | | ``deadband`` | ||
741 | | ``d_gain_pct`` | ||
742 | | ``i_gain_pct`` | ||
743 | | ``p_gain_pct`` | ||
744 | | ``sample_rate_ms`` | ||
745 | | ``setpoint`` | ||
746 | |||
747 | Note, however, that achieving desirable results this way generally requires | ||
748 | expert-level understanding of the power vs performance tradeoff, so extra care | ||
749 | is recommended when attempting to do that. | ||
750 | |||
751 | |||
752 | .. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf | ||
753 | .. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html | ||
754 | .. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf | ||
755 | .. _PID controller: https://en.wikipedia.org/wiki/PID_controller | ||
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt deleted file mode 100644 index 3fdcdfd968ba..000000000000 --- a/Documentation/cpu-freq/intel-pstate.txt +++ /dev/null | |||
@@ -1,281 +0,0 @@ | |||
1 | Intel P-State driver | ||
2 | -------------------- | ||
3 | |||
4 | This driver provides an interface to control the P-State selection for the | ||
5 | SandyBridge+ Intel processors. | ||
6 | |||
7 | The following document explains P-States: | ||
8 | http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf | ||
9 | As stated in the document, P-State doesn’t exactly mean a frequency. However, for | ||
10 | the sake of the relationship with cpufreq, P-State and frequency are used | ||
11 | interchangeably. | ||
12 | |||
13 | Understanding the cpufreq core governors and policies are important before | ||
14 | discussing more details about the Intel P-State driver. Based on what callbacks | ||
15 | a cpufreq driver provides to the cpufreq core, it can support two types of | ||
16 | drivers: | ||
17 | - with target_index() callback: In this mode, the drivers using cpufreq core | ||
18 | simply provide the minimum and maximum frequency limits and an additional | ||
19 | interface target_index() to set the current frequency. The cpufreq subsystem | ||
20 | has a number of scaling governors ("performance", "powersave", "ondemand", | ||
21 | etc.). Depending on which governor is in use, cpufreq core will call for | ||
22 | transitions to a specific frequency using target_index() callback. | ||
23 | - setpolicy() callback: In this mode, drivers do not provide target_index() | ||
24 | callback, so cpufreq core can't request a transition to a specific frequency. | ||
25 | The driver provides minimum and maximum frequency limits and callbacks to set a | ||
26 | policy. The policy in cpufreq sysfs is referred to as the "scaling governor". | ||
27 | The cpufreq core can request the driver to operate in any of the two policies: | ||
28 | "performance" and "powersave". The driver decides which frequency to use based | ||
29 | on the above policy selection considering minimum and maximum frequency limits. | ||
30 | |||
31 | The Intel P-State driver falls under the latter category, which implements the | ||
32 | setpolicy() callback. This driver decides what P-State to use based on the | ||
33 | requested policy from the cpufreq core. If the processor is capable of | ||
34 | selecting its next P-State internally, then the driver will offload this | ||
35 | responsibility to the processor (aka HWP: Hardware P-States). If not, the | ||
36 | driver implements algorithms to select the next P-State. | ||
37 | |||
38 | Since these policies are implemented in the driver, they are not same as the | ||
39 | cpufreq scaling governors implementation, even if they have the same name in | ||
40 | the cpufreq sysfs (scaling_governors). For example the "performance" policy is | ||
41 | similar to cpufreq’s "performance" governor, but "powersave" is completely | ||
42 | different than the cpufreq "powersave" governor. The strategy here is similar | ||
43 | to cpufreq "ondemand", where the requested P-State is related to the system load. | ||
44 | |||
45 | Sysfs Interface | ||
46 | |||
47 | In addition to the frequency-controlling interfaces provided by the cpufreq | ||
48 | core, the driver provides its own sysfs files to control the P-State selection. | ||
49 | These files have been added to /sys/devices/system/cpu/intel_pstate/. | ||
50 | Any changes made to these files are applicable to all CPUs (even in a | ||
51 | multi-package system, Refer to later section on placing "Per-CPU limits"). | ||
52 | |||
53 | max_perf_pct: Limits the maximum P-State that will be requested by | ||
54 | the driver. It states it as a percentage of the available performance. The | ||
55 | available (P-State) performance may be reduced by the no_turbo | ||
56 | setting described below. | ||
57 | |||
58 | min_perf_pct: Limits the minimum P-State that will be requested by | ||
59 | the driver. It states it as a percentage of the max (non-turbo) | ||
60 | performance level. | ||
61 | |||
62 | no_turbo: Limits the driver to selecting P-State below the turbo | ||
63 | frequency range. | ||
64 | |||
65 | turbo_pct: Displays the percentage of the total performance that | ||
66 | is supported by hardware that is in the turbo range. This number | ||
67 | is independent of whether turbo has been disabled or not. | ||
68 | |||
69 | num_pstates: Displays the number of P-States that are supported | ||
70 | by hardware. This number is independent of whether turbo has | ||
71 | been disabled or not. | ||
72 | |||
73 | For example, if a system has these parameters: | ||
74 | Max 1 core turbo ratio: 0x21 (Max 1 core ratio is the maximum P-State) | ||
75 | Max non turbo ratio: 0x17 | ||
76 | Minimum ratio : 0x08 (Here the ratio is called max efficiency ratio) | ||
77 | |||
78 | Sysfs will show : | ||
79 | max_perf_pct:100, which corresponds to 1 core ratio | ||
80 | min_perf_pct:24, max_efficiency_ratio / max 1 Core ratio | ||
81 | no_turbo:0, turbo is not disabled | ||
82 | num_pstates:26 = (max 1 Core ratio - Max Efficiency Ratio + 1) | ||
83 | turbo_pct:39 = (max 1 core ratio - max non turbo ratio) / num_pstates | ||
84 | |||
85 | Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual | ||
86 | Volume 3: System Programming Guide" to understand ratios. | ||
87 | |||
88 | There is one more sysfs attribute in /sys/devices/system/cpu/intel_pstate/ | ||
89 | that can be used for controlling the operation mode of the driver: | ||
90 | |||
91 | status: Three settings are possible: | ||
92 | "off" - The driver is not in use at this time. | ||
93 | "active" - The driver works as a P-state governor (default). | ||
94 | "passive" - The driver works as a regular cpufreq one and collaborates | ||
95 | with the generic cpufreq governors (it sets P-states as | ||
96 | requested by those governors). | ||
97 | The current setting is returned by reads from this attribute. Writing one | ||
98 | of the above strings to it changes the operation mode as indicated by that | ||
99 | string, if possible. If HW-managed P-states (HWP) are enabled, it is not | ||
100 | possible to change the driver's operation mode and attempts to write to | ||
101 | this attribute will fail. | ||
102 | |||
103 | cpufreq sysfs for Intel P-State | ||
104 | |||
105 | Since this driver registers with cpufreq, cpufreq sysfs is also presented. | ||
106 | There are some important differences, which need to be considered. | ||
107 | |||
108 | scaling_cur_freq: This displays the real frequency which was used during | ||
109 | the last sample period instead of what is requested. Some other cpufreq driver, | ||
110 | like acpi-cpufreq, displays what is requested (Some changes are on the | ||
111 | way to fix this for acpi-cpufreq driver). The same is true for frequencies | ||
112 | displayed at /proc/cpuinfo. | ||
113 | |||
114 | scaling_governor: This displays current active policy. Since each CPU has a | ||
115 | cpufreq sysfs, it is possible to set a scaling governor to each CPU. But this | ||
116 | is not possible with Intel P-States, as there is one common policy for all | ||
117 | CPUs. Here, the last requested policy will be applicable to all CPUs. It is | ||
118 | suggested that one use the cpupower utility to change policy to all CPUs at the | ||
119 | same time. | ||
120 | |||
121 | scaling_setspeed: This attribute can never be used with Intel P-State. | ||
122 | |||
123 | scaling_max_freq/scaling_min_freq: This interface can be used similarly to | ||
124 | the max_perf_pct/min_perf_pct of Intel P-State sysfs. However since frequencies | ||
125 | are converted to nearest possible P-State, this is prone to rounding errors. | ||
126 | This method is not preferred to limit performance. | ||
127 | |||
128 | affected_cpus: Not used | ||
129 | related_cpus: Not used | ||
130 | |||
131 | For contemporary Intel processors, the frequency is controlled by the | ||
132 | processor itself and the P-State exposed to software is related to | ||
133 | performance levels. The idea that frequency can be set to a single | ||
134 | frequency is fictional for Intel Core processors. Even if the scaling | ||
135 | driver selects a single P-State, the actual frequency the processor | ||
136 | will run at is selected by the processor itself. | ||
137 | |||
138 | Per-CPU limits | ||
139 | |||
140 | The kernel command line option "intel_pstate=per_cpu_perf_limits" forces | ||
141 | the intel_pstate driver to use per-CPU performance limits. When it is set, | ||
142 | the sysfs control interface described above is subject to limitations. | ||
143 | - The following controls are not available for both read and write | ||
144 | /sys/devices/system/cpu/intel_pstate/max_perf_pct | ||
145 | /sys/devices/system/cpu/intel_pstate/min_perf_pct | ||
146 | - The following controls can be used to set performance limits, as far as the | ||
147 | architecture of the processor permits: | ||
148 | /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq | ||
149 | /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq | ||
150 | /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor | ||
151 | - User can still observe turbo percent and number of P-States from | ||
152 | /sys/devices/system/cpu/intel_pstate/turbo_pct | ||
153 | /sys/devices/system/cpu/intel_pstate/num_pstates | ||
154 | - User can read write system wide turbo status | ||
155 | /sys/devices/system/cpu/no_turbo | ||
156 | |||
157 | Support of energy performance hints | ||
158 | It is possible to provide hints to the HWP algorithms in the processor | ||
159 | to be more performance centric to more energy centric. When the driver | ||
160 | is using HWP, two additional cpufreq sysfs attributes are presented for | ||
161 | each logical CPU. | ||
162 | These attributes are: | ||
163 | - energy_performance_available_preferences | ||
164 | - energy_performance_preference | ||
165 | |||
166 | To get list of supported hints: | ||
167 | $ cat energy_performance_available_preferences | ||
168 | default performance balance_performance balance_power power | ||
169 | |||
170 | The current preference can be read or changed via cpufreq sysfs | ||
171 | attribute "energy_performance_preference". Reading from this attribute | ||
172 | will display current effective setting. User can write any of the valid | ||
173 | preference string to this attribute. User can always restore to power-on | ||
174 | default by writing "default". | ||
175 | |||
176 | Since threads can migrate to different CPUs, this is possible that the | ||
177 | new CPU may have different energy performance preference than the previous | ||
178 | one. To avoid such issues, either threads can be pinned to specific CPUs | ||
179 | or set the same energy performance preference value to all CPUs. | ||
180 | |||
181 | Tuning Intel P-State driver | ||
182 | |||
183 | When the performance can be tuned using PID (Proportional Integral | ||
184 | Derivative) controller, debugfs files are provided for adjusting performance. | ||
185 | They are presented under: | ||
186 | /sys/kernel/debug/pstate_snb/ | ||
187 | |||
188 | The PID tunable parameters are: | ||
189 | deadband | ||
190 | d_gain_pct | ||
191 | i_gain_pct | ||
192 | p_gain_pct | ||
193 | sample_rate_ms | ||
194 | setpoint | ||
195 | |||
196 | To adjust these parameters, some understanding of driver implementation is | ||
197 | necessary. There are some tweeks described here, but be very careful. Adjusting | ||
198 | them requires expert level understanding of power and performance relationship. | ||
199 | These limits are only useful when the "powersave" policy is active. | ||
200 | |||
201 | -To make the system more responsive to load changes, sample_rate_ms can | ||
202 | be adjusted (current default is 10ms). | ||
203 | -To make the system use higher performance, even if the load is lower, setpoint | ||
204 | can be adjusted to a lower number. This will also lead to faster ramp up time | ||
205 | to reach the maximum P-State. | ||
206 | If there are no derivative and integral coefficients, The next P-State will be | ||
207 | equal to: | ||
208 | current P-State - ((setpoint - current cpu load) * p_gain_pct) | ||
209 | |||
210 | For example, if the current PID parameters are (Which are defaults for the core | ||
211 | processors like SandyBridge): | ||
212 | deadband = 0 | ||
213 | d_gain_pct = 0 | ||
214 | i_gain_pct = 0 | ||
215 | p_gain_pct = 20 | ||
216 | sample_rate_ms = 10 | ||
217 | setpoint = 97 | ||
218 | |||
219 | If the current P-State = 0x08 and current load = 100, this will result in the | ||
220 | next P-State = 0x08 - ((97 - 100) * 0.2) = 8.6 (rounded to 9). Here the P-State | ||
221 | goes up by only 1. If during next sample interval the current load doesn't | ||
222 | change and still 100, then P-State goes up by one again. This process will | ||
223 | continue as long as the load is more than the setpoint until the maximum P-State | ||
224 | is reached. | ||
225 | |||
226 | For the same load at setpoint = 60, this will result in the next P-State | ||
227 | = 0x08 - ((60 - 100) * 0.2) = 16 | ||
228 | So by changing the setpoint from 97 to 60, there is an increase of the | ||
229 | next P-State from 9 to 16. So this will make processor execute at higher | ||
230 | P-State for the same CPU load. If the load continues to be more than the | ||
231 | setpoint during next sample intervals, then P-State will go up again till the | ||
232 | maximum P-State is reached. But the ramp up time to reach the maximum P-State | ||
233 | will be much faster when the setpoint is 60 compared to 97. | ||
234 | |||
235 | Debugging Intel P-State driver | ||
236 | |||
237 | Event tracing | ||
238 | To debug P-State transition, the Linux event tracing interface can be used. | ||
239 | There are two specific events, which can be enabled (Provided the kernel | ||
240 | configs related to event tracing are enabled). | ||
241 | |||
242 | # cd /sys/kernel/debug/tracing/ | ||
243 | # echo 1 > events/power/pstate_sample/enable | ||
244 | # echo 1 > events/power/cpu_frequency/enable | ||
245 | # cat trace | ||
246 | gnome-terminal--4510 [001] ..s. 1177.680733: pstate_sample: core_busy=107 | ||
247 | scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618 | ||
248 | freq=2474476 | ||
249 | cat-5235 [002] ..s. 1177.681723: cpu_frequency: state=2900000 cpu_id=2 | ||
250 | |||
251 | |||
252 | Using ftrace | ||
253 | |||
254 | If function level tracing is required, the Linux ftrace interface can be used. | ||
255 | For example if we want to check how often a function to set a P-State is | ||
256 | called, we can set ftrace filter to intel_pstate_set_pstate. | ||
257 | |||
258 | # cd /sys/kernel/debug/tracing/ | ||
259 | # cat available_filter_functions | grep -i pstate | ||
260 | intel_pstate_set_pstate | ||
261 | intel_pstate_cpu_init | ||
262 | ... | ||
263 | |||
264 | # echo intel_pstate_set_pstate > set_ftrace_filter | ||
265 | # echo function > current_tracer | ||
266 | # cat trace | head -15 | ||
267 | # tracer: function | ||
268 | # | ||
269 | # entries-in-buffer/entries-written: 80/80 #P:4 | ||
270 | # | ||
271 | # _-----=> irqs-off | ||
272 | # / _----=> need-resched | ||
273 | # | / _---=> hardirq/softirq | ||
274 | # || / _--=> preempt-depth | ||
275 | # ||| / delay | ||
276 | # TASK-PID CPU# |||| TIMESTAMP FUNCTION | ||
277 | # | | | |||| | | | ||
278 | Xorg-3129 [000] ..s. 2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
279 | gnome-terminal--4510 [002] ..s. 2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
280 | gnome-shell-3409 [001] ..s. 2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
281 | <idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func | ||
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt index 6db22103e2dd..025cf8c9324a 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt | |||
@@ -36,7 +36,7 @@ Optional properties: | |||
36 | control gpios | 36 | control gpios |
37 | 37 | ||
38 | - threshold: allows setting the "click"-threshold in the range | 38 | - threshold: allows setting the "click"-threshold in the range |
39 | from 20 to 80. | 39 | from 0 to 80. |
40 | 40 | ||
41 | - gain: allows setting the sensitivity in the range from 0 to | 41 | - gain: allows setting the sensitivity in the range from 0 to |
42 | 31. Note that lower values indicate higher | 42 | 31. Note that lower values indicate higher |
diff --git a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt index 05485699d70e..9630ac0e4b56 100644 --- a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt +++ b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt | |||
@@ -16,6 +16,11 @@ Required properties: | |||
16 | - reg: Base address of PMIC on Hi6220 SoC. | 16 | - reg: Base address of PMIC on Hi6220 SoC. |
17 | - interrupt-controller: Hi655x has internal IRQs (has own IRQ domain). | 17 | - interrupt-controller: Hi655x has internal IRQs (has own IRQ domain). |
18 | - pmic-gpios: The GPIO used by PMIC IRQ. | 18 | - pmic-gpios: The GPIO used by PMIC IRQ. |
19 | - #clock-cells: From common clock binding; shall be set to 0 | ||
20 | |||
21 | Optional properties: | ||
22 | - clock-output-names: From common clock binding to override the | ||
23 | default output clock name | ||
19 | 24 | ||
20 | Example: | 25 | Example: |
21 | pmic: pmic@f8000000 { | 26 | pmic: pmic@f8000000 { |
@@ -24,4 +29,5 @@ Example: | |||
24 | interrupt-controller; | 29 | interrupt-controller; |
25 | #interrupt-cells = <2>; | 30 | #interrupt-cells = <2>; |
26 | pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; | 31 | pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; |
32 | #clock-cells = <0>; | ||
27 | } | 33 | } |
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt index e25436861867..9029b45b8a22 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt +++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt | |||
@@ -18,6 +18,8 @@ Optional properties: | |||
18 | "ext_clock" (External clock provided to the card). | 18 | "ext_clock" (External clock provided to the card). |
19 | - post-power-on-delay-ms : Delay in ms after powering the card and | 19 | - post-power-on-delay-ms : Delay in ms after powering the card and |
20 | de-asserting the reset-gpios (if any) | 20 | de-asserting the reset-gpios (if any) |
21 | - power-off-delay-us : Delay in us after asserting the reset-gpios (if any) | ||
22 | during power off of the card. | ||
21 | 23 | ||
22 | Example: | 24 | Example: |
23 | 25 | ||
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index a1e3693cca16..6f55bdd52f8a 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt | |||
@@ -15,6 +15,10 @@ Optional properties: | |||
15 | - phy-reset-active-high : If present then the reset sequence using the GPIO | 15 | - phy-reset-active-high : If present then the reset sequence using the GPIO |
16 | specified in the "phy-reset-gpios" property is reversed (H=reset state, | 16 | specified in the "phy-reset-gpios" property is reversed (H=reset state, |
17 | L=operation state). | 17 | L=operation state). |
18 | - phy-reset-post-delay : Post reset delay in milliseconds. If present then | ||
19 | a delay of phy-reset-post-delay milliseconds will be observed after the | ||
20 | phy-reset-gpios has been toggled. Can be omitted thus no delay is | ||
21 | observed. Delay is in range of 1ms to 1000ms. Other delays are invalid. | ||
18 | - phy-supply : regulator that powers the Ethernet PHY. | 22 | - phy-supply : regulator that powers the Ethernet PHY. |
19 | - phy-handle : phandle to the PHY device connected to this device. | 23 | - phy-handle : phandle to the PHY device connected to this device. |
20 | - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. | 24 | - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. |
diff --git a/Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt b/Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt deleted file mode 100644 index c59e27c632c1..000000000000 --- a/Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | Hi6220 SoC ION | ||
2 | =================================================================== | ||
3 | Required properties: | ||
4 | - compatible : "hisilicon,hi6220-ion" | ||
5 | - list of the ION heaps | ||
6 | - heap name : maybe heap_sys_user@0 | ||
7 | - heap id : id should be unique in the system. | ||
8 | - heap base : base ddr address of the heap,0 means that | ||
9 | it is dynamic. | ||
10 | - heap size : memory size and 0 means it is dynamic. | ||
11 | - heap type : the heap type of the heap, please also | ||
12 | see the define in ion.h(drivers/staging/android/uapi/ion.h) | ||
13 | ------------------------------------------------------------------- | ||
14 | Example: | ||
15 | hi6220-ion { | ||
16 | compatible = "hisilicon,hi6220-ion"; | ||
17 | heap_sys_user@0 { | ||
18 | heap-name = "sys_user"; | ||
19 | heap-id = <0x0>; | ||
20 | heap-base = <0x0>; | ||
21 | heap-size = <0x0>; | ||
22 | heap-type = "ion_system"; | ||
23 | }; | ||
24 | heap_sys_contig@0 { | ||
25 | heap-name = "sys_contig"; | ||
26 | heap-id = <0x1>; | ||
27 | heap-base = <0x0>; | ||
28 | heap-size = <0x0>; | ||
29 | heap-type = "ion_system_contig"; | ||
30 | }; | ||
31 | }; | ||
diff --git a/Documentation/input/devices/edt-ft5x06.rst b/Documentation/input/devices/edt-ft5x06.rst index 2032f0b7a8fa..1ccc94b192b7 100644 --- a/Documentation/input/devices/edt-ft5x06.rst +++ b/Documentation/input/devices/edt-ft5x06.rst | |||
@@ -15,7 +15,7 @@ It has been tested with the following devices: | |||
15 | The driver allows configuration of the touch screen via a set of sysfs files: | 15 | The driver allows configuration of the touch screen via a set of sysfs files: |
16 | 16 | ||
17 | /sys/class/input/eventX/device/device/threshold: | 17 | /sys/class/input/eventX/device/device/threshold: |
18 | allows setting the "click"-threshold in the range from 20 to 80. | 18 | allows setting the "click"-threshold in the range from 0 to 80. |
19 | 19 | ||
20 | /sys/class/input/eventX/device/device/gain: | 20 | /sys/class/input/eventX/device/device/gain: |
21 | allows setting the sensitivity in the range from 0 to 31. Note that | 21 | allows setting the sensitivity in the range from 0 to 31. Note that |
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index 5338673c88d9..773d2bfacc6c 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst | |||
@@ -16,6 +16,8 @@ ALC880 | |||
16 | 6-jack in back, 2-jack in front | 16 | 6-jack in back, 2-jack in front |
17 | 6stack-digout | 17 | 6stack-digout |
18 | 6-jack with a SPDIF out | 18 | 6-jack with a SPDIF out |
19 | 6stack-automute | ||
20 | 6-jack with headphone jack detection | ||
19 | 21 | ||
20 | ALC260 | 22 | ALC260 |
21 | ====== | 23 | ====== |
@@ -62,6 +64,8 @@ lenovo-dock | |||
62 | Enables docking station I/O for some Lenovos | 64 | Enables docking station I/O for some Lenovos |
63 | hp-gpio-led | 65 | hp-gpio-led |
64 | GPIO LED support on HP laptops | 66 | GPIO LED support on HP laptops |
67 | hp-dock-gpio-mic1-led | ||
68 | HP dock with mic LED support | ||
65 | dell-headset-multi | 69 | dell-headset-multi |
66 | Headset jack, which can also be used as mic-in | 70 | Headset jack, which can also be used as mic-in |
67 | dell-headset-dock | 71 | dell-headset-dock |
@@ -72,6 +76,12 @@ alc283-sense-combo | |||
72 | Combo jack sensing on ALC283 | 76 | Combo jack sensing on ALC283 |
73 | tpt440-dock | 77 | tpt440-dock |
74 | Pin configs for Lenovo Thinkpad Dock support | 78 | Pin configs for Lenovo Thinkpad Dock support |
79 | tpt440 | ||
80 | Lenovo Thinkpad T440s setup | ||
81 | tpt460 | ||
82 | Lenovo Thinkpad T460/560 setup | ||
83 | dual-codecs | ||
84 | Lenovo laptops with dual codecs | ||
75 | 85 | ||
76 | ALC66x/67x/892 | 86 | ALC66x/67x/892 |
77 | ============== | 87 | ============== |
@@ -97,6 +107,8 @@ inv-dmic | |||
97 | Inverted internal mic workaround | 107 | Inverted internal mic workaround |
98 | dell-headset-multi | 108 | dell-headset-multi |
99 | Headset jack, which can also be used as mic-in | 109 | Headset jack, which can also be used as mic-in |
110 | dual-codecs | ||
111 | Lenovo laptops with dual codecs | ||
100 | 112 | ||
101 | ALC680 | 113 | ALC680 |
102 | ====== | 114 | ====== |
@@ -114,6 +126,8 @@ inv-dmic | |||
114 | Inverted internal mic workaround | 126 | Inverted internal mic workaround |
115 | no-primary-hp | 127 | no-primary-hp |
116 | VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) | 128 | VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) |
129 | dual-codecs | ||
130 | ALC1220 dual codecs for Gaming mobos | ||
117 | 131 | ||
118 | ALC861/660 | 132 | ALC861/660 |
119 | ========== | 133 | ========== |
@@ -206,65 +220,47 @@ auto | |||
206 | 220 | ||
207 | Conexant 5045 | 221 | Conexant 5045 |
208 | ============= | 222 | ============= |
209 | laptop-hpsense | 223 | cap-mix-amp |
210 | Laptop with HP sense (old model laptop) | 224 | Fix max input level on mixer widget |
211 | laptop-micsense | 225 | toshiba-p105 |
212 | Laptop with Mic sense (old model fujitsu) | 226 | Toshiba P105 quirk |
213 | laptop-hpmicsense | 227 | hp-530 |
214 | Laptop with HP and Mic senses | 228 | HP 530 quirk |
215 | benq | ||
216 | Benq R55E | ||
217 | laptop-hp530 | ||
218 | HP 530 laptop | ||
219 | test | ||
220 | for testing/debugging purpose, almost all controls can be | ||
221 | adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y | ||
222 | 229 | ||
223 | Conexant 5047 | 230 | Conexant 5047 |
224 | ============= | 231 | ============= |
225 | laptop | 232 | cap-mix-amp |
226 | Basic Laptop config | 233 | Fix max input level on mixer widget |
227 | laptop-hp | ||
228 | Laptop config for some HP models (subdevice 30A5) | ||
229 | laptop-eapd | ||
230 | Laptop config with EAPD support | ||
231 | test | ||
232 | for testing/debugging purpose, almost all controls can be | ||
233 | adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y | ||
234 | 234 | ||
235 | Conexant 5051 | 235 | Conexant 5051 |
236 | ============= | 236 | ============= |
237 | laptop | 237 | lenovo-x200 |
238 | Basic Laptop config (default) | 238 | Lenovo X200 quirk |
239 | hp | ||
240 | HP Spartan laptop | ||
241 | hp-dv6736 | ||
242 | HP dv6736 | ||
243 | hp-f700 | ||
244 | HP Compaq Presario F700 | ||
245 | ideapad | ||
246 | Lenovo IdeaPad laptop | ||
247 | toshiba | ||
248 | Toshiba Satellite M300 | ||
249 | 239 | ||
250 | Conexant 5066 | 240 | Conexant 5066 |
251 | ============= | 241 | ============= |
252 | laptop | 242 | stereo-dmic |
253 | Basic Laptop config (default) | 243 | Workaround for inverted stereo digital mic |
254 | hp-laptop | 244 | gpio1 |
255 | HP laptops, e g G60 | 245 | Enable GPIO1 pin |
256 | asus | 246 | headphone-mic-pin |
257 | Asus K52JU, Lenovo G560 | 247 | Enable headphone mic NID 0x18 without detection |
258 | dell-laptop | 248 | tp410 |
259 | Dell laptops | 249 | Thinkpad T400 & co quirks |
260 | dell-vostro | ||
261 | Dell Vostro | ||
262 | olpc-xo-1_5 | ||
263 | OLPC XO 1.5 | ||
264 | ideapad | ||
265 | Lenovo IdeaPad U150 | ||
266 | thinkpad | 250 | thinkpad |
267 | Lenovo Thinkpad | 251 | Thinkpad mute/mic LED quirk |
252 | lemote-a1004 | ||
253 | Lemote A1004 quirk | ||
254 | lemote-a1205 | ||
255 | Lemote A1205 quirk | ||
256 | olpc-xo | ||
257 | OLPC XO quirk | ||
258 | mute-led-eapd | ||
259 | Mute LED control via EAPD | ||
260 | hp-dock | ||
261 | HP dock support | ||
262 | mute-led-gpio | ||
263 | Mute LED control via GPIO | ||
268 | 264 | ||
269 | STAC9200 | 265 | STAC9200 |
270 | ======== | 266 | ======== |
@@ -444,6 +440,8 @@ dell-eq | |||
444 | Dell desktops/laptops | 440 | Dell desktops/laptops |
445 | alienware | 441 | alienware |
446 | Alienware M17x | 442 | Alienware M17x |
443 | asus-mobo | ||
444 | Pin configs for ASUS mobo with 5.1/SPDIF out | ||
447 | auto | 445 | auto |
448 | BIOS setup (default) | 446 | BIOS setup (default) |
449 | 447 | ||
@@ -477,6 +475,8 @@ hp-envy-ts-bass | |||
477 | Pin fixup for HP Envy TS bass speaker (NID 0x10) | 475 | Pin fixup for HP Envy TS bass speaker (NID 0x10) |
478 | hp-bnb13-eq | 476 | hp-bnb13-eq |
479 | Hardware equalizer setup for HP laptops | 477 | Hardware equalizer setup for HP laptops |
478 | hp-envy-ts-bass | ||
479 | HP Envy TS bass support | ||
480 | auto | 480 | auto |
481 | BIOS setup (default) | 481 | BIOS setup (default) |
482 | 482 | ||
@@ -496,10 +496,22 @@ auto | |||
496 | 496 | ||
497 | Cirrus Logic CS4206/4207 | 497 | Cirrus Logic CS4206/4207 |
498 | ======================== | 498 | ======================== |
499 | mbp53 | ||
500 | MacBook Pro 5,3 | ||
499 | mbp55 | 501 | mbp55 |
500 | MacBook Pro 5,5 | 502 | MacBook Pro 5,5 |
501 | imac27 | 503 | imac27 |
502 | IMac 27 Inch | 504 | IMac 27 Inch |
505 | imac27_122 | ||
506 | iMac 12,2 | ||
507 | apple | ||
508 | Generic Apple quirk | ||
509 | mbp101 | ||
510 | MacBookPro 10,1 | ||
511 | mbp81 | ||
512 | MacBookPro 8,1 | ||
513 | mba42 | ||
514 | MacBookAir 4,2 | ||
503 | auto | 515 | auto |
504 | BIOS setup (default) | 516 | BIOS setup (default) |
505 | 517 | ||
@@ -509,6 +521,10 @@ mba6 | |||
509 | MacBook Air 6,1 and 6,2 | 521 | MacBook Air 6,1 and 6,2 |
510 | gpio0 | 522 | gpio0 |
511 | Enable GPIO 0 amp | 523 | Enable GPIO 0 amp |
524 | mbp11 | ||
525 | MacBookPro 11,2 | ||
526 | macmini | ||
527 | MacMini 7,1 | ||
512 | auto | 528 | auto |
513 | BIOS setup (default) | 529 | BIOS setup (default) |
514 | 530 | ||
diff --git a/Documentation/usb/typec.rst b/Documentation/usb/typec.rst index b67a46779de9..8a7249f2ff04 100644 --- a/Documentation/usb/typec.rst +++ b/Documentation/usb/typec.rst | |||
@@ -114,8 +114,7 @@ the details during registration. The class offers the following API for | |||
114 | registering/unregistering cables and their plugs: | 114 | registering/unregistering cables and their plugs: |
115 | 115 | ||
116 | .. kernel-doc:: drivers/usb/typec/typec.c | 116 | .. kernel-doc:: drivers/usb/typec/typec.c |
117 | :functions: typec_register_cable typec_unregister_cable typec_register_plug | 117 | :functions: typec_register_cable typec_unregister_cable typec_register_plug typec_unregister_plug |
118 | typec_unregister_plug | ||
119 | 118 | ||
120 | The class will provide a handle to struct typec_cable and struct typec_plug if | 119 | The class will provide a handle to struct typec_cable and struct typec_plug if |
121 | the registration is successful, or NULL if it isn't. | 120 | the registration is successful, or NULL if it isn't. |
@@ -137,8 +136,7 @@ during connection of a partner or cable, the port driver must use the following | |||
137 | APIs to report it to the class: | 136 | APIs to report it to the class: |
138 | 137 | ||
139 | .. kernel-doc:: drivers/usb/typec/typec.c | 138 | .. kernel-doc:: drivers/usb/typec/typec.c |
140 | :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role | 139 | :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role typec_set_pwr_opmode |
141 | typec_set_pwr_opmode | ||
142 | 140 | ||
143 | Alternate Modes | 141 | Alternate Modes |
144 | ~~~~~~~~~~~~~~~ | 142 | ~~~~~~~~~~~~~~~ |
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index 4f7d86dd0a5d..914518aeb972 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt | |||
@@ -117,7 +117,7 @@ nowayout: Watchdog cannot be stopped once started | |||
117 | ------------------------------------------------- | 117 | ------------------------------------------------- |
118 | iTCO_wdt: | 118 | iTCO_wdt: |
119 | heartbeat: Watchdog heartbeat in seconds. | 119 | heartbeat: Watchdog heartbeat in seconds. |
120 | (2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30) | 120 | (5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30) |
121 | nowayout: Watchdog cannot be stopped once started | 121 | nowayout: Watchdog cannot be stopped once started |
122 | (default=kernel config parameter) | 122 | (default=kernel config parameter) |
123 | ------------------------------------------------- | 123 | ------------------------------------------------- |
diff --git a/MAINTAINERS b/MAINTAINERS index f7d568b8f133..053c3bdd1fe5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -846,7 +846,6 @@ M: Laura Abbott <labbott@redhat.com> | |||
846 | M: Sumit Semwal <sumit.semwal@linaro.org> | 846 | M: Sumit Semwal <sumit.semwal@linaro.org> |
847 | L: devel@driverdev.osuosl.org | 847 | L: devel@driverdev.osuosl.org |
848 | S: Supported | 848 | S: Supported |
849 | F: Documentation/devicetree/bindings/staging/ion/ | ||
850 | F: drivers/staging/android/ion | 849 | F: drivers/staging/android/ion |
851 | F: drivers/staging/android/uapi/ion.h | 850 | F: drivers/staging/android/uapi/ion.h |
852 | F: drivers/staging/android/uapi/ion_test.h | 851 | F: drivers/staging/android/uapi/ion_test.h |
@@ -3116,6 +3115,14 @@ F: drivers/net/ieee802154/cc2520.c | |||
3116 | F: include/linux/spi/cc2520.h | 3115 | F: include/linux/spi/cc2520.h |
3117 | F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt | 3116 | F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt |
3118 | 3117 | ||
3118 | CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER | ||
3119 | M: Gilad Ben-Yossef <gilad@benyossef.com> | ||
3120 | L: linux-crypto@vger.kernel.org | ||
3121 | L: driverdev-devel@linuxdriverproject.org | ||
3122 | S: Supported | ||
3123 | F: drivers/staging/ccree/ | ||
3124 | W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family | ||
3125 | |||
3119 | CEC FRAMEWORK | 3126 | CEC FRAMEWORK |
3120 | M: Hans Verkuil <hans.verkuil@cisco.com> | 3127 | M: Hans Verkuil <hans.verkuil@cisco.com> |
3121 | L: linux-media@vger.kernel.org | 3128 | L: linux-media@vger.kernel.org |
@@ -5695,7 +5702,7 @@ M: Alex Elder <elder@kernel.org> | |||
5695 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 5702 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
5696 | S: Maintained | 5703 | S: Maintained |
5697 | F: drivers/staging/greybus/ | 5704 | F: drivers/staging/greybus/ |
5698 | L: greybus-dev@lists.linaro.org | 5705 | L: greybus-dev@lists.linaro.org (moderated for non-subscribers) |
5699 | 5706 | ||
5700 | GREYBUS AUDIO PROTOCOLS DRIVERS | 5707 | GREYBUS AUDIO PROTOCOLS DRIVERS |
5701 | M: Vaibhav Agarwal <vaibhav.sr@gmail.com> | 5708 | M: Vaibhav Agarwal <vaibhav.sr@gmail.com> |
@@ -7136,7 +7143,7 @@ S: Maintained | |||
7136 | F: drivers/media/platform/rcar_jpu.c | 7143 | F: drivers/media/platform/rcar_jpu.c |
7137 | 7144 | ||
7138 | JSM Neo PCI based serial card | 7145 | JSM Neo PCI based serial card |
7139 | M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> | 7146 | M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com> |
7140 | L: linux-serial@vger.kernel.org | 7147 | L: linux-serial@vger.kernel.org |
7141 | S: Maintained | 7148 | S: Maintained |
7142 | F: drivers/tty/serial/jsm/ | 7149 | F: drivers/tty/serial/jsm/ |
@@ -9553,10 +9560,6 @@ F: drivers/net/wireless/intersil/orinoco/ | |||
9553 | 9560 | ||
9554 | OSD LIBRARY and FILESYSTEM | 9561 | OSD LIBRARY and FILESYSTEM |
9555 | M: Boaz Harrosh <ooo@electrozaur.com> | 9562 | M: Boaz Harrosh <ooo@electrozaur.com> |
9556 | M: Benny Halevy <bhalevy@primarydata.com> | ||
9557 | L: osd-dev@open-osd.org | ||
9558 | W: http://open-osd.org | ||
9559 | T: git git://git.open-osd.org/open-osd.git | ||
9560 | S: Maintained | 9563 | S: Maintained |
9561 | F: drivers/scsi/osd/ | 9564 | F: drivers/scsi/osd/ |
9562 | F: include/scsi/osd_* | 9565 | F: include/scsi/osd_* |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 12 | 2 | PATCHLEVEL = 12 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -1172,7 +1172,7 @@ headers_check_all: headers_install_all | |||
1172 | PHONY += headers_check | 1172 | PHONY += headers_check |
1173 | headers_check: headers_install | 1173 | headers_check: headers_install |
1174 | $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 | 1174 | $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 |
1175 | $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/ $(hdr-dst) HDRCHECK=1 | 1175 | $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1 |
1176 | 1176 | ||
1177 | # --------------------------------------------------------------------------- | 1177 | # --------------------------------------------------------------------------- |
1178 | # Kernel selftest | 1178 | # Kernel selftest |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 9ec56dc97374..ce93124a850b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -1201,8 +1201,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, | |||
1201 | if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) | 1201 | if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) |
1202 | return -EFAULT; | 1202 | return -EFAULT; |
1203 | 1203 | ||
1204 | err = 0; | 1204 | err = put_user(status, ustatus); |
1205 | err |= put_user(status, ustatus); | 1205 | if (ret < 0) |
1206 | return err ? err : ret; | ||
1207 | |||
1206 | err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); | 1208 | err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); |
1207 | err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); | 1209 | err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); |
1208 | err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); | 1210 | err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); |
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi index 12c981e51134..9a0599f711ff 100644 --- a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi +++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi | |||
@@ -1,6 +1,6 @@ | |||
1 | / { | 1 | / { |
2 | aliases { | 2 | aliases { |
3 | ethernet = ðernet; | 3 | ethernet0 = ðernet; |
4 | }; | 4 | }; |
5 | }; | 5 | }; |
6 | 6 | ||
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi index 3f0a56ebcf1f..dc7ae776db5f 100644 --- a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi +++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi | |||
@@ -1,6 +1,6 @@ | |||
1 | / { | 1 | / { |
2 | aliases { | 2 | aliases { |
3 | ethernet = ðernet; | 3 | ethernet0 = ðernet; |
4 | }; | 4 | }; |
5 | }; | 5 | }; |
6 | 6 | ||
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 35cea3fcaf5c..561f27d8d922 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi | |||
@@ -198,8 +198,8 @@ | |||
198 | brcm,pins = <0 1>; | 198 | brcm,pins = <0 1>; |
199 | brcm,function = <BCM2835_FSEL_ALT0>; | 199 | brcm,function = <BCM2835_FSEL_ALT0>; |
200 | }; | 200 | }; |
201 | i2c0_gpio32: i2c0_gpio32 { | 201 | i2c0_gpio28: i2c0_gpio28 { |
202 | brcm,pins = <32 34>; | 202 | brcm,pins = <28 29>; |
203 | brcm,function = <BCM2835_FSEL_ALT0>; | 203 | brcm,function = <BCM2835_FSEL_ALT0>; |
204 | }; | 204 | }; |
205 | i2c0_gpio44: i2c0_gpio44 { | 205 | i2c0_gpio44: i2c0_gpio44 { |
@@ -295,20 +295,28 @@ | |||
295 | /* Separate from the uart0_gpio14 group | 295 | /* Separate from the uart0_gpio14 group |
296 | * because it conflicts with spi1_gpio16, and | 296 | * because it conflicts with spi1_gpio16, and |
297 | * people often run uart0 on the two pins | 297 | * people often run uart0 on the two pins |
298 | * without flow contrl. | 298 | * without flow control. |
299 | */ | 299 | */ |
300 | uart0_ctsrts_gpio16: uart0_ctsrts_gpio16 { | 300 | uart0_ctsrts_gpio16: uart0_ctsrts_gpio16 { |
301 | brcm,pins = <16 17>; | 301 | brcm,pins = <16 17>; |
302 | brcm,function = <BCM2835_FSEL_ALT3>; | 302 | brcm,function = <BCM2835_FSEL_ALT3>; |
303 | }; | 303 | }; |
304 | uart0_gpio30: uart0_gpio30 { | 304 | uart0_ctsrts_gpio30: uart0_ctsrts_gpio30 { |
305 | brcm,pins = <30 31>; | 305 | brcm,pins = <30 31>; |
306 | brcm,function = <BCM2835_FSEL_ALT3>; | 306 | brcm,function = <BCM2835_FSEL_ALT3>; |
307 | }; | 307 | }; |
308 | uart0_ctsrts_gpio32: uart0_ctsrts_gpio32 { | 308 | uart0_gpio32: uart0_gpio32 { |
309 | brcm,pins = <32 33>; | 309 | brcm,pins = <32 33>; |
310 | brcm,function = <BCM2835_FSEL_ALT3>; | 310 | brcm,function = <BCM2835_FSEL_ALT3>; |
311 | }; | 311 | }; |
312 | uart0_gpio36: uart0_gpio36 { | ||
313 | brcm,pins = <36 37>; | ||
314 | brcm,function = <BCM2835_FSEL_ALT2>; | ||
315 | }; | ||
316 | uart0_ctsrts_gpio38: uart0_ctsrts_gpio38 { | ||
317 | brcm,pins = <38 39>; | ||
318 | brcm,function = <BCM2835_FSEL_ALT2>; | ||
319 | }; | ||
312 | 320 | ||
313 | uart1_gpio14: uart1_gpio14 { | 321 | uart1_gpio14: uart1_gpio14 { |
314 | brcm,pins = <14 15>; | 322 | brcm,pins = <14 15>; |
@@ -326,10 +334,6 @@ | |||
326 | brcm,pins = <30 31>; | 334 | brcm,pins = <30 31>; |
327 | brcm,function = <BCM2835_FSEL_ALT5>; | 335 | brcm,function = <BCM2835_FSEL_ALT5>; |
328 | }; | 336 | }; |
329 | uart1_gpio36: uart1_gpio36 { | ||
330 | brcm,pins = <36 37 38 39>; | ||
331 | brcm,function = <BCM2835_FSEL_ALT2>; | ||
332 | }; | ||
333 | uart1_gpio40: uart1_gpio40 { | 337 | uart1_gpio40: uart1_gpio40 { |
334 | brcm,pins = <40 41>; | 338 | brcm,pins = <40 41>; |
335 | brcm,function = <BCM2835_FSEL_ALT5>; | 339 | brcm,function = <BCM2835_FSEL_ALT5>; |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 4bc4b575c99b..31a9e061ddd0 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -204,6 +204,8 @@ | |||
204 | tps659038: tps659038@58 { | 204 | tps659038: tps659038@58 { |
205 | compatible = "ti,tps659038"; | 205 | compatible = "ti,tps659038"; |
206 | reg = <0x58>; | 206 | reg = <0x58>; |
207 | ti,palmas-override-powerhold; | ||
208 | ti,system-power-controller; | ||
207 | 209 | ||
208 | tps659038_pmic { | 210 | tps659038_pmic { |
209 | compatible = "ti,tps659038-pmic"; | 211 | compatible = "ti,tps659038-pmic"; |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 57892f264cea..e7144662af45 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -2017,4 +2017,8 @@ | |||
2017 | coefficients = <0 2000>; | 2017 | coefficients = <0 2000>; |
2018 | }; | 2018 | }; |
2019 | 2019 | ||
2020 | &cpu_crit { | ||
2021 | temperature = <120000>; /* milli Celsius */ | ||
2022 | }; | ||
2023 | |||
2020 | /include/ "dra7xx-clocks.dtsi" | 2024 | /include/ "dra7xx-clocks.dtsi" |
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index de2215832372..4e103a905dc9 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts | |||
@@ -23,7 +23,7 @@ | |||
23 | imx53-qsrb { | 23 | imx53-qsrb { |
24 | pinctrl_pmic: pmicgrp { | 24 | pinctrl_pmic: pmicgrp { |
25 | fsl,pins = < | 25 | fsl,pins = < |
26 | MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ | 26 | MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */ |
27 | >; | 27 | >; |
28 | }; | 28 | }; |
29 | }; | 29 | }; |
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 5bb8fd57e7f5..d71da30c9cff 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts | |||
@@ -12,23 +12,6 @@ | |||
12 | model = "Freescale i.MX6 SoloX SDB RevB Board"; | 12 | model = "Freescale i.MX6 SoloX SDB RevB Board"; |
13 | }; | 13 | }; |
14 | 14 | ||
15 | &cpu0 { | ||
16 | operating-points = < | ||
17 | /* kHz uV */ | ||
18 | 996000 1250000 | ||
19 | 792000 1175000 | ||
20 | 396000 1175000 | ||
21 | 198000 1175000 | ||
22 | >; | ||
23 | fsl,soc-operating-points = < | ||
24 | /* ARM kHz SOC uV */ | ||
25 | 996000 1250000 | ||
26 | 792000 1175000 | ||
27 | 396000 1175000 | ||
28 | 198000 1175000 | ||
29 | >; | ||
30 | }; | ||
31 | |||
32 | &i2c1 { | 15 | &i2c1 { |
33 | clock-frequency = <100000>; | 16 | clock-frequency = <100000>; |
34 | pinctrl-names = "default"; | 17 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/include/arm b/arch/arm/boot/dts/include/arm deleted file mode 120000 index a96aa0ea9d8c..000000000000 --- a/arch/arm/boot/dts/include/arm +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | .. \ No newline at end of file | ||
diff --git a/arch/arm/boot/dts/include/arm64 b/arch/arm/boot/dts/include/arm64 deleted file mode 120000 index 074a835fca3e..000000000000 --- a/arch/arm/boot/dts/include/arm64 +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../arm64/boot/dts \ No newline at end of file | ||
diff --git a/arch/arm/boot/dts/include/dt-bindings b/arch/arm/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/arm/boot/dts/include/dt-bindings +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../../include/dt-bindings \ No newline at end of file | ||
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 08cce17a25a0..43e9364083de 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts | |||
@@ -249,9 +249,9 @@ | |||
249 | OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0) /* cam_xclka.cam_xclka */ | 249 | OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0) /* cam_xclka.cam_xclka */ |
250 | OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0) /* cam_pclk.cam_pclk */ | 250 | OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0) /* cam_pclk.cam_pclk */ |
251 | 251 | ||
252 | OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */ | 252 | OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */ |
253 | OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */ | 253 | OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */ |
254 | OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */ | 254 | OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */ |
255 | OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0) /* cam_d3.cam_d3 */ | 255 | OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0) /* cam_d3.cam_d3 */ |
256 | OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0) /* cam_d4.cam_d4 */ | 256 | OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0) /* cam_d4.cam_d4 */ |
257 | OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0) /* cam_d5.cam_d5 */ | 257 | OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0) /* cam_d5.cam_d5 */ |
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index 402579ab70d2..3a9e9b6aea68 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi | |||
@@ -72,6 +72,8 @@ | |||
72 | <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, | 72 | <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, |
73 | <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, | 73 | <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, |
74 | <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; | 74 | <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; |
75 | clock-frequency = <13000000>; | ||
76 | arm,cpu-registers-not-fw-configured; | ||
75 | }; | 77 | }; |
76 | 78 | ||
77 | watchdog: watchdog@10007000 { | 79 | watchdog: watchdog@10007000 { |
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index b3a8b1f24499..9ec737069369 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi | |||
@@ -55,7 +55,8 @@ | |||
55 | simple-audio-card,bitclock-master = <&telephony_link_master>; | 55 | simple-audio-card,bitclock-master = <&telephony_link_master>; |
56 | simple-audio-card,frame-master = <&telephony_link_master>; | 56 | simple-audio-card,frame-master = <&telephony_link_master>; |
57 | simple-audio-card,format = "i2s"; | 57 | simple-audio-card,format = "i2s"; |
58 | 58 | simple-audio-card,bitclock-inversion; | |
59 | simple-audio-card,frame-inversion; | ||
59 | simple-audio-card,cpu { | 60 | simple-audio-card,cpu { |
60 | sound-dai = <&mcbsp4>; | 61 | sound-dai = <&mcbsp4>; |
61 | }; | 62 | }; |
diff --git a/arch/arm/boot/dts/omap4-panda-a4.dts b/arch/arm/boot/dts/omap4-panda-a4.dts index 78d363177762..f1a6476af371 100644 --- a/arch/arm/boot/dts/omap4-panda-a4.dts +++ b/arch/arm/boot/dts/omap4-panda-a4.dts | |||
@@ -13,7 +13,7 @@ | |||
13 | /* Pandaboard Rev A4+ have external pullups on SCL & SDA */ | 13 | /* Pandaboard Rev A4+ have external pullups on SCL & SDA */ |
14 | &dss_hdmi_pins { | 14 | &dss_hdmi_pins { |
15 | pinctrl-single,pins = < | 15 | pinctrl-single,pins = < |
16 | OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ | 16 | OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ |
17 | OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ | 17 | OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ |
18 | OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ | 18 | OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ |
19 | >; | 19 | >; |
diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts index 119f8e657edc..940fe4f7c5f6 100644 --- a/arch/arm/boot/dts/omap4-panda-es.dts +++ b/arch/arm/boot/dts/omap4-panda-es.dts | |||
@@ -34,7 +34,7 @@ | |||
34 | /* PandaboardES has external pullups on SCL & SDA */ | 34 | /* PandaboardES has external pullups on SCL & SDA */ |
35 | &dss_hdmi_pins { | 35 | &dss_hdmi_pins { |
36 | pinctrl-single,pins = < | 36 | pinctrl-single,pins = < |
37 | OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ | 37 | OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ |
38 | OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ | 38 | OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ |
39 | OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ | 39 | OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ |
40 | >; | 40 | >; |
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig new file mode 100644 index 000000000000..d2d75fa664a6 --- /dev/null +++ b/arch/arm/configs/gemini_defconfig | |||
@@ -0,0 +1,68 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | ||
2 | CONFIG_SYSVIPC=y | ||
3 | CONFIG_NO_HZ_IDLE=y | ||
4 | CONFIG_BSD_PROCESS_ACCT=y | ||
5 | CONFIG_USER_NS=y | ||
6 | CONFIG_RELAY=y | ||
7 | CONFIG_BLK_DEV_INITRD=y | ||
8 | CONFIG_PARTITION_ADVANCED=y | ||
9 | CONFIG_ARCH_MULTI_V4=y | ||
10 | # CONFIG_ARCH_MULTI_V7 is not set | ||
11 | CONFIG_ARCH_GEMINI=y | ||
12 | CONFIG_PCI=y | ||
13 | CONFIG_PREEMPT=y | ||
14 | CONFIG_AEABI=y | ||
15 | CONFIG_CMDLINE="console=ttyS0,115200n8" | ||
16 | CONFIG_KEXEC=y | ||
17 | CONFIG_BINFMT_MISC=y | ||
18 | CONFIG_PM=y | ||
19 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
20 | CONFIG_DEVTMPFS=y | ||
21 | CONFIG_MTD=y | ||
22 | CONFIG_MTD_BLOCK=y | ||
23 | CONFIG_MTD_CFI=y | ||
24 | CONFIG_MTD_CFI_INTELEXT=y | ||
25 | CONFIG_MTD_CFI_AMDSTD=y | ||
26 | CONFIG_MTD_CFI_STAA=y | ||
27 | CONFIG_MTD_PHYSMAP=y | ||
28 | CONFIG_MTD_PHYSMAP_OF=y | ||
29 | CONFIG_BLK_DEV_RAM=y | ||
30 | CONFIG_BLK_DEV_RAM_SIZE=16384 | ||
31 | # CONFIG_SCSI_PROC_FS is not set | ||
32 | CONFIG_BLK_DEV_SD=y | ||
33 | # CONFIG_SCSI_LOWLEVEL is not set | ||
34 | CONFIG_ATA=y | ||
35 | CONFIG_INPUT_EVDEV=y | ||
36 | CONFIG_KEYBOARD_GPIO=y | ||
37 | # CONFIG_INPUT_MOUSE is not set | ||
38 | # CONFIG_LEGACY_PTYS is not set | ||
39 | CONFIG_SERIAL_8250=y | ||
40 | CONFIG_SERIAL_8250_CONSOLE=y | ||
41 | CONFIG_SERIAL_8250_NR_UARTS=1 | ||
42 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 | ||
43 | CONFIG_SERIAL_OF_PLATFORM=y | ||
44 | # CONFIG_HW_RANDOM is not set | ||
45 | # CONFIG_HWMON is not set | ||
46 | CONFIG_WATCHDOG=y | ||
47 | CONFIG_GEMINI_WATCHDOG=y | ||
48 | CONFIG_USB=y | ||
49 | CONFIG_USB_MON=y | ||
50 | CONFIG_USB_FOTG210_HCD=y | ||
51 | CONFIG_USB_STORAGE=y | ||
52 | CONFIG_NEW_LEDS=y | ||
53 | CONFIG_LEDS_CLASS=y | ||
54 | CONFIG_LEDS_GPIO=y | ||
55 | CONFIG_LEDS_TRIGGERS=y | ||
56 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
57 | CONFIG_RTC_CLASS=y | ||
58 | CONFIG_RTC_DRV_GEMINI=y | ||
59 | CONFIG_DMADEVICES=y | ||
60 | # CONFIG_DNOTIFY is not set | ||
61 | CONFIG_TMPFS=y | ||
62 | CONFIG_TMPFS_POSIX_ACL=y | ||
63 | CONFIG_ROMFS_FS=y | ||
64 | CONFIG_NLS_CODEPAGE_437=y | ||
65 | CONFIG_NLS_ISO8859_1=y | ||
66 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | ||
67 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
68 | CONFIG_DEBUG_FS=y | ||
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h index 4917c2f7e459..e74ab0fbab79 100644 --- a/arch/arm/include/asm/kvm_coproc.h +++ b/arch/arm/include/asm/kvm_coproc.h | |||
@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); | |||
31 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); | 31 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); |
32 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); | 32 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); |
33 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); | 33 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); |
34 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); | 34 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); |
35 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
35 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); | 36 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); |
36 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); | 37 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); |
37 | 38 | ||
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 2c14b69511e9..6d1d2e26dfe5 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/vfp.h> | 32 | #include <asm/vfp.h> |
33 | #include "../vfp/vfpinstr.h" | 33 | #include "../vfp/vfpinstr.h" |
34 | 34 | ||
35 | #define CREATE_TRACE_POINTS | ||
35 | #include "trace.h" | 36 | #include "trace.h" |
36 | #include "coproc.h" | 37 | #include "coproc.h" |
37 | 38 | ||
@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
111 | return 1; | 112 | return 1; |
112 | } | 113 | } |
113 | 114 | ||
114 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
115 | { | ||
116 | kvm_inject_undefined(vcpu); | ||
117 | return 1; | ||
118 | } | ||
119 | |||
120 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 115 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
121 | { | 116 | { |
122 | /* | 117 | /* |
@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, | |||
284 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | 279 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for |
285 | * all PM registers, which doesn't crash the guest kernel at least. | 280 | * all PM registers, which doesn't crash the guest kernel at least. |
286 | */ | 281 | */ |
287 | static bool pm_fake(struct kvm_vcpu *vcpu, | 282 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
288 | const struct coproc_params *p, | 283 | const struct coproc_params *p, |
289 | const struct coproc_reg *r) | 284 | const struct coproc_reg *r) |
290 | { | 285 | { |
@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu, | |||
294 | return read_zero(vcpu, p); | 289 | return read_zero(vcpu, p); |
295 | } | 290 | } |
296 | 291 | ||
297 | #define access_pmcr pm_fake | 292 | #define access_pmcr trap_raz_wi |
298 | #define access_pmcntenset pm_fake | 293 | #define access_pmcntenset trap_raz_wi |
299 | #define access_pmcntenclr pm_fake | 294 | #define access_pmcntenclr trap_raz_wi |
300 | #define access_pmovsr pm_fake | 295 | #define access_pmovsr trap_raz_wi |
301 | #define access_pmselr pm_fake | 296 | #define access_pmselr trap_raz_wi |
302 | #define access_pmceid0 pm_fake | 297 | #define access_pmceid0 trap_raz_wi |
303 | #define access_pmceid1 pm_fake | 298 | #define access_pmceid1 trap_raz_wi |
304 | #define access_pmccntr pm_fake | 299 | #define access_pmccntr trap_raz_wi |
305 | #define access_pmxevtyper pm_fake | 300 | #define access_pmxevtyper trap_raz_wi |
306 | #define access_pmxevcntr pm_fake | 301 | #define access_pmxevcntr trap_raz_wi |
307 | #define access_pmuserenr pm_fake | 302 | #define access_pmuserenr trap_raz_wi |
308 | #define access_pmintenset pm_fake | 303 | #define access_pmintenset trap_raz_wi |
309 | #define access_pmintenclr pm_fake | 304 | #define access_pmintenclr trap_raz_wi |
310 | 305 | ||
311 | /* Architected CP15 registers. | 306 | /* Architected CP15 registers. |
312 | * CRn denotes the primary register number, but is copied to the CRm in the | 307 | * CRn denotes the primary register number, but is copied to the CRm in the |
@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, | |||
532 | return 1; | 527 | return 1; |
533 | } | 528 | } |
534 | 529 | ||
535 | /** | 530 | static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu) |
536 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | ||
537 | * @vcpu: The VCPU pointer | ||
538 | * @run: The kvm_run struct | ||
539 | */ | ||
540 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
541 | { | 531 | { |
542 | struct coproc_params params; | 532 | struct coproc_params params; |
543 | 533 | ||
@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
551 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; | 541 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
552 | params.CRm = 0; | 542 | params.CRm = 0; |
553 | 543 | ||
544 | return params; | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | ||
549 | * @vcpu: The VCPU pointer | ||
550 | * @run: The kvm_run struct | ||
551 | */ | ||
552 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
553 | { | ||
554 | struct coproc_params params = decode_64bit_hsr(vcpu); | ||
555 | |||
554 | return emulate_cp15(vcpu, ¶ms); | 556 | return emulate_cp15(vcpu, ¶ms); |
555 | } | 557 | } |
556 | 558 | ||
559 | /** | ||
560 | * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access | ||
561 | * @vcpu: The VCPU pointer | ||
562 | * @run: The kvm_run struct | ||
563 | */ | ||
564 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
565 | { | ||
566 | struct coproc_params params = decode_64bit_hsr(vcpu); | ||
567 | |||
568 | /* raz_wi cp14 */ | ||
569 | trap_raz_wi(vcpu, ¶ms, NULL); | ||
570 | |||
571 | /* handled */ | ||
572 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
573 | return 1; | ||
574 | } | ||
575 | |||
557 | static void reset_coproc_regs(struct kvm_vcpu *vcpu, | 576 | static void reset_coproc_regs(struct kvm_vcpu *vcpu, |
558 | const struct coproc_reg *table, size_t num) | 577 | const struct coproc_reg *table, size_t num) |
559 | { | 578 | { |
@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu, | |||
564 | table[i].reset(vcpu, &table[i]); | 583 | table[i].reset(vcpu, &table[i]); |
565 | } | 584 | } |
566 | 585 | ||
567 | /** | 586 | static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) |
568 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | ||
569 | * @vcpu: The VCPU pointer | ||
570 | * @run: The kvm_run struct | ||
571 | */ | ||
572 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
573 | { | 587 | { |
574 | struct coproc_params params; | 588 | struct coproc_params params; |
575 | 589 | ||
@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
583 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; | 597 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; |
584 | params.Rt2 = 0; | 598 | params.Rt2 = 0; |
585 | 599 | ||
600 | return params; | ||
601 | } | ||
602 | |||
603 | /** | ||
604 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | ||
605 | * @vcpu: The VCPU pointer | ||
606 | * @run: The kvm_run struct | ||
607 | */ | ||
608 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
609 | { | ||
610 | struct coproc_params params = decode_32bit_hsr(vcpu); | ||
586 | return emulate_cp15(vcpu, ¶ms); | 611 | return emulate_cp15(vcpu, ¶ms); |
587 | } | 612 | } |
588 | 613 | ||
614 | /** | ||
615 | * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access | ||
616 | * @vcpu: The VCPU pointer | ||
617 | * @run: The kvm_run struct | ||
618 | */ | ||
619 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
620 | { | ||
621 | struct coproc_params params = decode_32bit_hsr(vcpu); | ||
622 | |||
623 | /* raz_wi cp14 */ | ||
624 | trap_raz_wi(vcpu, ¶ms, NULL); | ||
625 | |||
626 | /* handled */ | ||
627 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
628 | return 1; | ||
629 | } | ||
630 | |||
589 | /****************************************************************************** | 631 | /****************************************************************************** |
590 | * Userspace API | 632 | * Userspace API |
591 | *****************************************************************************/ | 633 | *****************************************************************************/ |
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 5fd7968cdae9..f86a9aaef462 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = { | |||
95 | [HSR_EC_WFI] = kvm_handle_wfx, | 95 | [HSR_EC_WFI] = kvm_handle_wfx, |
96 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | 96 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, |
97 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | 97 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, |
98 | [HSR_EC_CP14_MR] = kvm_handle_cp14_access, | 98 | [HSR_EC_CP14_MR] = kvm_handle_cp14_32, |
99 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, | 99 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, |
100 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | 100 | [HSR_EC_CP14_64] = kvm_handle_cp14_64, |
101 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | 101 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, |
102 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | 102 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, |
103 | [HSR_EC_HVC] = handle_hvc, | 103 | [HSR_EC_HVC] = handle_hvc, |
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 3023bb530edf..8679405b0b2b 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Makefile for Kernel-based Virtual Machine module, HYP part | 2 | # Makefile for Kernel-based Virtual Machine module, HYP part |
3 | # | 3 | # |
4 | 4 | ||
5 | ccflags-y += -fno-stack-protector | ||
6 | |||
5 | KVM=../../../../virt/kvm | 7 | KVM=../../../../virt/kvm |
6 | 8 | ||
7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | 9 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index 92678b7bd046..624a510d31df 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c | |||
@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) | |||
48 | write_sysreg(HSTR_T(15), HSTR); | 48 | write_sysreg(HSTR_T(15), HSTR); |
49 | write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); | 49 | write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); |
50 | val = read_sysreg(HDCR); | 50 | val = read_sysreg(HDCR); |
51 | write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); | 51 | val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */ |
52 | val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */ | ||
53 | write_sysreg(val, HDCR); | ||
52 | } | 54 | } |
53 | 55 | ||
54 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | 56 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index fc0943776db2..b0d10648c486 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | 1 | #if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ) |
2 | #define _TRACE_KVM_H | 2 | #define _TRACE_ARM_KVM_H |
3 | 3 | ||
4 | #include <linux/tracepoint.h> | 4 | #include <linux/tracepoint.h> |
5 | 5 | ||
@@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc, | |||
74 | __entry->vcpu_pc, __entry->r0, __entry->imm) | 74 | __entry->vcpu_pc, __entry->r0, __entry->imm) |
75 | ); | 75 | ); |
76 | 76 | ||
77 | #endif /* _TRACE_KVM_H */ | 77 | #endif /* _TRACE_ARM_KVM_H */ |
78 | 78 | ||
79 | #undef TRACE_INCLUDE_PATH | 79 | #undef TRACE_INCLUDE_PATH |
80 | #define TRACE_INCLUDE_PATH arch/arm/kvm | 80 | #define TRACE_INCLUDE_PATH . |
81 | #undef TRACE_INCLUDE_FILE | 81 | #undef TRACE_INCLUDE_FILE |
82 | #define TRACE_INCLUDE_FILE trace | 82 | #define TRACE_INCLUDE_FILE trace |
83 | 83 | ||
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 2cd27c830ab6..283e79ab587d 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -335,7 +335,7 @@ static const struct ramc_info ramc_infos[] __initconst = { | |||
335 | { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, | 335 | { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, |
336 | }; | 336 | }; |
337 | 337 | ||
338 | static const struct of_device_id const ramc_ids[] __initconst = { | 338 | static const struct of_device_id ramc_ids[] __initconst = { |
339 | { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] }, | 339 | { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] }, |
340 | { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] }, | 340 | { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] }, |
341 | { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] }, | 341 | { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] }, |
diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c index cf3f8658f0e5..a55a7ecf146a 100644 --- a/arch/arm/mach-bcm/bcm_kona_smc.c +++ b/arch/arm/mach-bcm/bcm_kona_smc.c | |||
@@ -33,7 +33,7 @@ struct bcm_kona_smc_data { | |||
33 | unsigned result; | 33 | unsigned result; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static const struct of_device_id const bcm_kona_smc_ids[] __initconst = { | 36 | static const struct of_device_id bcm_kona_smc_ids[] __initconst = { |
37 | {.compatible = "brcm,kona-smc"}, | 37 | {.compatible = "brcm,kona-smc"}, |
38 | {.compatible = "bcm,kona-smc"}, /* deprecated name */ | 38 | {.compatible = "bcm,kona-smc"}, /* deprecated name */ |
39 | {}, | 39 | {}, |
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index 03da3813f1ab..7d5a44a06648 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c | |||
@@ -346,7 +346,7 @@ static struct usb_ohci_pdata cns3xxx_usb_ohci_pdata = { | |||
346 | .power_off = csn3xxx_usb_power_off, | 346 | .power_off = csn3xxx_usb_power_off, |
347 | }; | 347 | }; |
348 | 348 | ||
349 | static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = { | 349 | static const struct of_dev_auxdata cns3xxx_auxdata[] __initconst = { |
350 | { "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata }, | 350 | { "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata }, |
351 | { "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata }, | 351 | { "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata }, |
352 | { "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL }, | 352 | { "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL }, |
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 3089d3bfa19b..8cc6338fcb12 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h | |||
@@ -266,11 +266,12 @@ extern int omap4_cpu_kill(unsigned int cpu); | |||
266 | extern const struct smp_operations omap4_smp_ops; | 266 | extern const struct smp_operations omap4_smp_ops; |
267 | #endif | 267 | #endif |
268 | 268 | ||
269 | extern u32 omap4_get_cpu1_ns_pa_addr(void); | ||
270 | |||
269 | #if defined(CONFIG_SMP) && defined(CONFIG_PM) | 271 | #if defined(CONFIG_SMP) && defined(CONFIG_PM) |
270 | extern int omap4_mpuss_init(void); | 272 | extern int omap4_mpuss_init(void); |
271 | extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); | 273 | extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); |
272 | extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); | 274 | extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); |
273 | extern u32 omap4_get_cpu1_ns_pa_addr(void); | ||
274 | #else | 275 | #else |
275 | static inline int omap4_enter_lowpower(unsigned int cpu, | 276 | static inline int omap4_enter_lowpower(unsigned int cpu, |
276 | unsigned int power_state) | 277 | unsigned int power_state) |
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 03ec6d307c82..4cfc4f9b2c69 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c | |||
@@ -213,11 +213,6 @@ static void __init save_l2x0_context(void) | |||
213 | {} | 213 | {} |
214 | #endif | 214 | #endif |
215 | 215 | ||
216 | u32 omap4_get_cpu1_ns_pa_addr(void) | ||
217 | { | ||
218 | return old_cpu1_ns_pa_addr; | ||
219 | } | ||
220 | |||
221 | /** | 216 | /** |
222 | * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function | 217 | * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function |
223 | * The purpose of this function is to manage low power programming | 218 | * The purpose of this function is to manage low power programming |
@@ -457,6 +452,11 @@ int __init omap4_mpuss_init(void) | |||
457 | 452 | ||
458 | #endif | 453 | #endif |
459 | 454 | ||
455 | u32 omap4_get_cpu1_ns_pa_addr(void) | ||
456 | { | ||
457 | return old_cpu1_ns_pa_addr; | ||
458 | } | ||
459 | |||
460 | /* | 460 | /* |
461 | * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to | 461 | * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to |
462 | * current kernel's secondary_startup() early before | 462 | * current kernel's secondary_startup() early before |
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 3faf454ba487..33e4953c61a8 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
@@ -306,7 +306,6 @@ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) | |||
306 | 306 | ||
307 | cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + | 307 | cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + |
308 | OMAP_AUX_CORE_BOOT_1); | 308 | OMAP_AUX_CORE_BOOT_1); |
309 | cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); | ||
310 | 309 | ||
311 | /* Did the configured secondary_startup() get overwritten? */ | 310 | /* Did the configured secondary_startup() get overwritten? */ |
312 | if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) | 311 | if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) |
@@ -316,9 +315,13 @@ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) | |||
316 | * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a | 315 | * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a |
317 | * deeper idle state in WFI and will wake to an invalid address. | 316 | * deeper idle state in WFI and will wake to an invalid address. |
318 | */ | 317 | */ |
319 | if ((soc_is_omap44xx() || soc_is_omap54xx()) && | 318 | if ((soc_is_omap44xx() || soc_is_omap54xx())) { |
320 | !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) | 319 | cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); |
321 | needs_reset = true; | 320 | if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) |
321 | needs_reset = true; | ||
322 | } else { | ||
323 | cpu1_ns_pa_addr = 0; | ||
324 | } | ||
322 | 325 | ||
323 | if (!needs_reset || !c->cpu1_rstctrl_va) | 326 | if (!needs_reset || !c->cpu1_rstctrl_va) |
324 | return; | 327 | return; |
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 2b138b65129a..dc11841ca334 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c | |||
@@ -711,7 +711,7 @@ static struct omap_prcm_init_data scrm_data __initdata = { | |||
711 | }; | 711 | }; |
712 | #endif | 712 | #endif |
713 | 713 | ||
714 | static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = { | 714 | static const struct of_device_id omap_prcm_dt_match_table[] __initconst = { |
715 | #ifdef CONFIG_SOC_AM33XX | 715 | #ifdef CONFIG_SOC_AM33XX |
716 | { .compatible = "ti,am3-prcm", .data = &am3_prm_data }, | 716 | { .compatible = "ti,am3-prcm", .data = &am3_prm_data }, |
717 | #endif | 717 | #endif |
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index 2028167fff31..d76b1e5eb8ba 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c | |||
@@ -559,7 +559,7 @@ struct i2c_init_data { | |||
559 | u8 hsscll_12; | 559 | u8 hsscll_12; |
560 | }; | 560 | }; |
561 | 561 | ||
562 | static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = { | 562 | static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = { |
563 | { | 563 | { |
564 | .load = 50, | 564 | .load = 50, |
565 | .loadbits = 0x3, | 565 | .loadbits = 0x3, |
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c index 4878ba90026d..289e036c9c30 100644 --- a/arch/arm/mach-spear/time.c +++ b/arch/arm/mach-spear/time.c | |||
@@ -204,7 +204,7 @@ static void __init spear_clockevent_init(int irq) | |||
204 | setup_irq(irq, &spear_timer_irq); | 204 | setup_irq(irq, &spear_timer_irq); |
205 | } | 205 | } |
206 | 206 | ||
207 | static const struct of_device_id const timer_of_match[] __initconst = { | 207 | static const struct of_device_id timer_of_match[] __initconst = { |
208 | { .compatible = "st,spear-timer", }, | 208 | { .compatible = "st,spear-timer", }, |
209 | { }, | 209 | { }, |
210 | }; | 210 | }; |
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 4afcffcb46cb..73272f43ca01 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms | |||
@@ -106,8 +106,13 @@ config ARCH_MVEBU | |||
106 | select ARMADA_AP806_SYSCON | 106 | select ARMADA_AP806_SYSCON |
107 | select ARMADA_CP110_SYSCON | 107 | select ARMADA_CP110_SYSCON |
108 | select ARMADA_37XX_CLK | 108 | select ARMADA_37XX_CLK |
109 | select GPIOLIB | ||
110 | select GPIOLIB_IRQCHIP | ||
109 | select MVEBU_ODMI | 111 | select MVEBU_ODMI |
110 | select MVEBU_PIC | 112 | select MVEBU_PIC |
113 | select OF_GPIO | ||
114 | select PINCTRL | ||
115 | select PINCTRL_ARMADA_37XX | ||
111 | help | 116 | help |
112 | This enables support for Marvell EBU familly, including: | 117 | This enables support for Marvell EBU familly, including: |
113 | - Armada 3700 SoC Family | 118 | - Armada 3700 SoC Family |
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index 75bce2d0b1a8..49f6a6242cf9 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts | |||
@@ -81,6 +81,45 @@ | |||
81 | }; | 81 | }; |
82 | }; | 82 | }; |
83 | 83 | ||
84 | reg_sys_5v: regulator@0 { | ||
85 | compatible = "regulator-fixed"; | ||
86 | regulator-name = "SYS_5V"; | ||
87 | regulator-min-microvolt = <5000000>; | ||
88 | regulator-max-microvolt = <5000000>; | ||
89 | regulator-boot-on; | ||
90 | regulator-always-on; | ||
91 | }; | ||
92 | |||
93 | reg_vdd_3v3: regulator@1 { | ||
94 | compatible = "regulator-fixed"; | ||
95 | regulator-name = "VDD_3V3"; | ||
96 | regulator-min-microvolt = <3300000>; | ||
97 | regulator-max-microvolt = <3300000>; | ||
98 | regulator-boot-on; | ||
99 | regulator-always-on; | ||
100 | vin-supply = <®_sys_5v>; | ||
101 | }; | ||
102 | |||
103 | reg_5v_hub: regulator@2 { | ||
104 | compatible = "regulator-fixed"; | ||
105 | regulator-name = "5V_HUB"; | ||
106 | regulator-min-microvolt = <5000000>; | ||
107 | regulator-max-microvolt = <5000000>; | ||
108 | regulator-boot-on; | ||
109 | gpio = <&gpio0 7 0>; | ||
110 | regulator-always-on; | ||
111 | vin-supply = <®_sys_5v>; | ||
112 | }; | ||
113 | |||
114 | wl1835_pwrseq: wl1835-pwrseq { | ||
115 | compatible = "mmc-pwrseq-simple"; | ||
116 | /* WLAN_EN GPIO */ | ||
117 | reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; | ||
118 | clocks = <&pmic>; | ||
119 | clock-names = "ext_clock"; | ||
120 | power-off-delay-us = <10>; | ||
121 | }; | ||
122 | |||
84 | soc { | 123 | soc { |
85 | spi0: spi@f7106000 { | 124 | spi0: spi@f7106000 { |
86 | status = "ok"; | 125 | status = "ok"; |
@@ -256,11 +295,31 @@ | |||
256 | 295 | ||
257 | /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ | 296 | /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ |
258 | 297 | ||
298 | dwmmc_0: dwmmc0@f723d000 { | ||
299 | cap-mmc-highspeed; | ||
300 | non-removable; | ||
301 | bus-width = <0x8>; | ||
302 | vmmc-supply = <&ldo19>; | ||
303 | }; | ||
304 | |||
305 | dwmmc_1: dwmmc1@f723e000 { | ||
306 | card-detect-delay = <200>; | ||
307 | cap-sd-highspeed; | ||
308 | sd-uhs-sdr12; | ||
309 | sd-uhs-sdr25; | ||
310 | sd-uhs-sdr50; | ||
311 | vqmmc-supply = <&ldo7>; | ||
312 | vmmc-supply = <&ldo10>; | ||
313 | bus-width = <0x4>; | ||
314 | disable-wp; | ||
315 | cd-gpios = <&gpio1 0 1>; | ||
316 | }; | ||
317 | |||
259 | dwmmc_2: dwmmc2@f723f000 { | 318 | dwmmc_2: dwmmc2@f723f000 { |
260 | ti,non-removable; | 319 | bus-width = <0x4>; |
261 | non-removable; | 320 | non-removable; |
262 | /* WL_EN */ | 321 | vmmc-supply = <®_vdd_3v3>; |
263 | vmmc-supply = <&wlan_en_reg>; | 322 | mmc-pwrseq = <&wl1835_pwrseq>; |
264 | 323 | ||
265 | #address-cells = <0x1>; | 324 | #address-cells = <0x1>; |
266 | #size-cells = <0x0>; | 325 | #size-cells = <0x0>; |
@@ -272,18 +331,6 @@ | |||
272 | interrupts = <3 IRQ_TYPE_EDGE_RISING>; | 331 | interrupts = <3 IRQ_TYPE_EDGE_RISING>; |
273 | }; | 332 | }; |
274 | }; | 333 | }; |
275 | |||
276 | wlan_en_reg: regulator@1 { | ||
277 | compatible = "regulator-fixed"; | ||
278 | regulator-name = "wlan-en-regulator"; | ||
279 | regulator-min-microvolt = <1800000>; | ||
280 | regulator-max-microvolt = <1800000>; | ||
281 | /* WLAN_EN GPIO */ | ||
282 | gpio = <&gpio0 5 0>; | ||
283 | /* WLAN card specific delay */ | ||
284 | startup-delay-us = <70000>; | ||
285 | enable-active-high; | ||
286 | }; | ||
287 | }; | 334 | }; |
288 | 335 | ||
289 | leds { | 336 | leds { |
@@ -330,6 +377,7 @@ | |||
330 | pmic: pmic@f8000000 { | 377 | pmic: pmic@f8000000 { |
331 | compatible = "hisilicon,hi655x-pmic"; | 378 | compatible = "hisilicon,hi655x-pmic"; |
332 | reg = <0x0 0xf8000000 0x0 0x1000>; | 379 | reg = <0x0 0xf8000000 0x0 0x1000>; |
380 | #clock-cells = <0>; | ||
333 | interrupt-controller; | 381 | interrupt-controller; |
334 | #interrupt-cells = <2>; | 382 | #interrupt-cells = <2>; |
335 | pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; | 383 | pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; |
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi index 1e5129b19280..5013e4b2ea71 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi | |||
@@ -725,20 +725,10 @@ | |||
725 | status = "disabled"; | 725 | status = "disabled"; |
726 | }; | 726 | }; |
727 | 727 | ||
728 | fixed_5v_hub: regulator@0 { | ||
729 | compatible = "regulator-fixed"; | ||
730 | regulator-name = "fixed_5v_hub"; | ||
731 | regulator-min-microvolt = <5000000>; | ||
732 | regulator-max-microvolt = <5000000>; | ||
733 | regulator-boot-on; | ||
734 | gpio = <&gpio0 7 0>; | ||
735 | regulator-always-on; | ||
736 | }; | ||
737 | |||
738 | usb_phy: usbphy { | 728 | usb_phy: usbphy { |
739 | compatible = "hisilicon,hi6220-usb-phy"; | 729 | compatible = "hisilicon,hi6220-usb-phy"; |
740 | #phy-cells = <0>; | 730 | #phy-cells = <0>; |
741 | phy-supply = <&fixed_5v_hub>; | 731 | phy-supply = <®_5v_hub>; |
742 | hisilicon,peripheral-syscon = <&sys_ctrl>; | 732 | hisilicon,peripheral-syscon = <&sys_ctrl>; |
743 | }; | 733 | }; |
744 | 734 | ||
@@ -766,17 +756,12 @@ | |||
766 | 756 | ||
767 | dwmmc_0: dwmmc0@f723d000 { | 757 | dwmmc_0: dwmmc0@f723d000 { |
768 | compatible = "hisilicon,hi6220-dw-mshc"; | 758 | compatible = "hisilicon,hi6220-dw-mshc"; |
769 | num-slots = <0x1>; | ||
770 | cap-mmc-highspeed; | ||
771 | non-removable; | ||
772 | reg = <0x0 0xf723d000 0x0 0x1000>; | 759 | reg = <0x0 0xf723d000 0x0 0x1000>; |
773 | interrupts = <0x0 0x48 0x4>; | 760 | interrupts = <0x0 0x48 0x4>; |
774 | clocks = <&sys_ctrl 2>, <&sys_ctrl 1>; | 761 | clocks = <&sys_ctrl 2>, <&sys_ctrl 1>; |
775 | clock-names = "ciu", "biu"; | 762 | clock-names = "ciu", "biu"; |
776 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>; | 763 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>; |
777 | reset-names = "reset"; | 764 | reset-names = "reset"; |
778 | bus-width = <0x8>; | ||
779 | vmmc-supply = <&ldo19>; | ||
780 | pinctrl-names = "default"; | 765 | pinctrl-names = "default"; |
781 | pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func | 766 | pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func |
782 | &emmc_cfg_func &emmc_rst_cfg_func>; | 767 | &emmc_cfg_func &emmc_rst_cfg_func>; |
@@ -784,13 +769,7 @@ | |||
784 | 769 | ||
785 | dwmmc_1: dwmmc1@f723e000 { | 770 | dwmmc_1: dwmmc1@f723e000 { |
786 | compatible = "hisilicon,hi6220-dw-mshc"; | 771 | compatible = "hisilicon,hi6220-dw-mshc"; |
787 | num-slots = <0x1>; | ||
788 | card-detect-delay = <200>; | ||
789 | hisilicon,peripheral-syscon = <&ao_ctrl>; | 772 | hisilicon,peripheral-syscon = <&ao_ctrl>; |
790 | cap-sd-highspeed; | ||
791 | sd-uhs-sdr12; | ||
792 | sd-uhs-sdr25; | ||
793 | sd-uhs-sdr50; | ||
794 | reg = <0x0 0xf723e000 0x0 0x1000>; | 773 | reg = <0x0 0xf723e000 0x0 0x1000>; |
795 | interrupts = <0x0 0x49 0x4>; | 774 | interrupts = <0x0 0x49 0x4>; |
796 | #address-cells = <0x1>; | 775 | #address-cells = <0x1>; |
@@ -799,11 +778,6 @@ | |||
799 | clock-names = "ciu", "biu"; | 778 | clock-names = "ciu", "biu"; |
800 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>; | 779 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>; |
801 | reset-names = "reset"; | 780 | reset-names = "reset"; |
802 | vqmmc-supply = <&ldo7>; | ||
803 | vmmc-supply = <&ldo10>; | ||
804 | bus-width = <0x4>; | ||
805 | disable-wp; | ||
806 | cd-gpios = <&gpio1 0 1>; | ||
807 | pinctrl-names = "default", "idle"; | 781 | pinctrl-names = "default", "idle"; |
808 | pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>; | 782 | pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>; |
809 | pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>; | 783 | pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>; |
@@ -811,15 +785,12 @@ | |||
811 | 785 | ||
812 | dwmmc_2: dwmmc2@f723f000 { | 786 | dwmmc_2: dwmmc2@f723f000 { |
813 | compatible = "hisilicon,hi6220-dw-mshc"; | 787 | compatible = "hisilicon,hi6220-dw-mshc"; |
814 | num-slots = <0x1>; | ||
815 | reg = <0x0 0xf723f000 0x0 0x1000>; | 788 | reg = <0x0 0xf723f000 0x0 0x1000>; |
816 | interrupts = <0x0 0x4a 0x4>; | 789 | interrupts = <0x0 0x4a 0x4>; |
817 | clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>; | 790 | clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>; |
818 | clock-names = "ciu", "biu"; | 791 | clock-names = "ciu", "biu"; |
819 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>; | 792 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>; |
820 | reset-names = "reset"; | 793 | reset-names = "reset"; |
821 | bus-width = <0x4>; | ||
822 | broken-cd; | ||
823 | pinctrl-names = "default", "idle"; | 794 | pinctrl-names = "default", "idle"; |
824 | pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>; | 795 | pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>; |
825 | pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>; | 796 | pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>; |
diff --git a/arch/arm64/boot/dts/include/arm b/arch/arm64/boot/dts/include/arm deleted file mode 120000 index cf63d80e2b93..000000000000 --- a/arch/arm64/boot/dts/include/arm +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../arm/boot/dts \ No newline at end of file | ||
diff --git a/arch/arm64/boot/dts/include/arm64 b/arch/arm64/boot/dts/include/arm64 deleted file mode 120000 index a96aa0ea9d8c..000000000000 --- a/arch/arm64/boot/dts/include/arm64 +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | .. \ No newline at end of file | ||
diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/arm64/boot/dts/include/dt-bindings +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../../include/dt-bindings \ No newline at end of file | ||
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts index cef5f976bc0f..a89855f57091 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts | |||
@@ -79,6 +79,8 @@ | |||
79 | }; | 79 | }; |
80 | 80 | ||
81 | &i2c0 { | 81 | &i2c0 { |
82 | pinctrl-names = "default"; | ||
83 | pinctrl-0 = <&i2c1_pins>; | ||
82 | status = "okay"; | 84 | status = "okay"; |
83 | 85 | ||
84 | gpio_exp: pca9555@22 { | 86 | gpio_exp: pca9555@22 { |
@@ -113,6 +115,8 @@ | |||
113 | 115 | ||
114 | &spi0 { | 116 | &spi0 { |
115 | status = "okay"; | 117 | status = "okay"; |
118 | pinctrl-names = "default"; | ||
119 | pinctrl-0 = <&spi_quad_pins>; | ||
116 | 120 | ||
117 | m25p80@0 { | 121 | m25p80@0 { |
118 | compatible = "jedec,spi-nor"; | 122 | compatible = "jedec,spi-nor"; |
@@ -143,6 +147,8 @@ | |||
143 | 147 | ||
144 | /* Exported on the micro USB connector CON32 through an FTDI */ | 148 | /* Exported on the micro USB connector CON32 through an FTDI */ |
145 | &uart0 { | 149 | &uart0 { |
150 | pinctrl-names = "default"; | ||
151 | pinctrl-0 = <&uart1_pins>; | ||
146 | status = "okay"; | 152 | status = "okay"; |
147 | }; | 153 | }; |
148 | 154 | ||
@@ -184,6 +190,8 @@ | |||
184 | }; | 190 | }; |
185 | 191 | ||
186 | ð0 { | 192 | ð0 { |
193 | pinctrl-names = "default"; | ||
194 | pinctrl-0 = <&rgmii_pins>; | ||
187 | phy-mode = "rgmii-id"; | 195 | phy-mode = "rgmii-id"; |
188 | phy = <&phy0>; | 196 | phy = <&phy0>; |
189 | status = "okay"; | 197 | status = "okay"; |
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 58ae9e095af2..4d495ec39202 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi | |||
@@ -161,16 +161,83 @@ | |||
161 | #clock-cells = <1>; | 161 | #clock-cells = <1>; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | gpio1: gpio@13800 { | 164 | pinctrl_nb: pinctrl@13800 { |
165 | compatible = "marvell,mvebu-gpio-3700", | 165 | compatible = "marvell,armada3710-nb-pinctrl", |
166 | "syscon", "simple-mfd"; | 166 | "syscon", "simple-mfd"; |
167 | reg = <0x13800 0x500>; | 167 | reg = <0x13800 0x100>, <0x13C00 0x20>; |
168 | gpionb: gpio { | ||
169 | #gpio-cells = <2>; | ||
170 | gpio-ranges = <&pinctrl_nb 0 0 36>; | ||
171 | gpio-controller; | ||
172 | interrupts = | ||
173 | <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>, | ||
174 | <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>, | ||
175 | <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>, | ||
176 | <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, | ||
177 | <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, | ||
178 | <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, | ||
179 | <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>, | ||
180 | <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>, | ||
181 | <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>, | ||
182 | <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>, | ||
183 | <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>, | ||
184 | <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>; | ||
185 | |||
186 | }; | ||
168 | 187 | ||
169 | xtalclk: xtal-clk { | 188 | xtalclk: xtal-clk { |
170 | compatible = "marvell,armada-3700-xtal-clock"; | 189 | compatible = "marvell,armada-3700-xtal-clock"; |
171 | clock-output-names = "xtal"; | 190 | clock-output-names = "xtal"; |
172 | #clock-cells = <0>; | 191 | #clock-cells = <0>; |
173 | }; | 192 | }; |
193 | |||
194 | spi_quad_pins: spi-quad-pins { | ||
195 | groups = "spi_quad"; | ||
196 | function = "spi"; | ||
197 | }; | ||
198 | |||
199 | i2c1_pins: i2c1-pins { | ||
200 | groups = "i2c1"; | ||
201 | function = "i2c"; | ||
202 | }; | ||
203 | |||
204 | i2c2_pins: i2c2-pins { | ||
205 | groups = "i2c2"; | ||
206 | function = "i2c"; | ||
207 | }; | ||
208 | |||
209 | uart1_pins: uart1-pins { | ||
210 | groups = "uart1"; | ||
211 | function = "uart"; | ||
212 | }; | ||
213 | |||
214 | uart2_pins: uart2-pins { | ||
215 | groups = "uart2"; | ||
216 | function = "uart"; | ||
217 | }; | ||
218 | }; | ||
219 | |||
220 | pinctrl_sb: pinctrl@18800 { | ||
221 | compatible = "marvell,armada3710-sb-pinctrl", | ||
222 | "syscon", "simple-mfd"; | ||
223 | reg = <0x18800 0x100>, <0x18C00 0x20>; | ||
224 | gpiosb: gpio { | ||
225 | #gpio-cells = <2>; | ||
226 | gpio-ranges = <&pinctrl_sb 0 0 29>; | ||
227 | gpio-controller; | ||
228 | interrupts = | ||
229 | <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, | ||
230 | <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>, | ||
231 | <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>, | ||
232 | <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>, | ||
233 | <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>; | ||
234 | }; | ||
235 | |||
236 | rgmii_pins: mii-pins { | ||
237 | groups = "rgmii"; | ||
238 | function = "mii"; | ||
239 | }; | ||
240 | |||
174 | }; | 241 | }; |
175 | 242 | ||
176 | eth0: ethernet@30000 { | 243 | eth0: ethernet@30000 { |
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 0ecaad4333a7..1c3634fa94bf 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts | |||
@@ -134,6 +134,9 @@ | |||
134 | bus-width = <8>; | 134 | bus-width = <8>; |
135 | max-frequency = <50000000>; | 135 | max-frequency = <50000000>; |
136 | cap-mmc-highspeed; | 136 | cap-mmc-highspeed; |
137 | mediatek,hs200-cmd-int-delay=<26>; | ||
138 | mediatek,hs400-cmd-int-delay=<14>; | ||
139 | mediatek,hs400-cmd-resp-sel-rising; | ||
137 | vmmc-supply = <&mt6397_vemc_3v3_reg>; | 140 | vmmc-supply = <&mt6397_vemc_3v3_reg>; |
138 | vqmmc-supply = <&mt6397_vio18_reg>; | 141 | vqmmc-supply = <&mt6397_vio18_reg>; |
139 | non-removable; | 142 | non-removable; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 658bb9dc9dfd..7bd31066399b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts | |||
@@ -44,7 +44,7 @@ | |||
44 | 44 | ||
45 | /dts-v1/; | 45 | /dts-v1/; |
46 | #include "rk3399-gru.dtsi" | 46 | #include "rk3399-gru.dtsi" |
47 | #include <include/dt-bindings/input/linux-event-codes.h> | 47 | #include <dt-bindings/input/linux-event-codes.h> |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Kevin-specific things | 50 | * Kevin-specific things |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index ce072859e3b2..65cdd878cfbd 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -30,7 +30,6 @@ CONFIG_PROFILING=y | |||
30 | CONFIG_JUMP_LABEL=y | 30 | CONFIG_JUMP_LABEL=y |
31 | CONFIG_MODULES=y | 31 | CONFIG_MODULES=y |
32 | CONFIG_MODULE_UNLOAD=y | 32 | CONFIG_MODULE_UNLOAD=y |
33 | # CONFIG_BLK_DEV_BSG is not set | ||
34 | # CONFIG_IOSCHED_DEADLINE is not set | 33 | # CONFIG_IOSCHED_DEADLINE is not set |
35 | CONFIG_ARCH_SUNXI=y | 34 | CONFIG_ARCH_SUNXI=y |
36 | CONFIG_ARCH_ALPINE=y | 35 | CONFIG_ARCH_ALPINE=y |
@@ -62,16 +61,15 @@ CONFIG_ARCH_XGENE=y | |||
62 | CONFIG_ARCH_ZX=y | 61 | CONFIG_ARCH_ZX=y |
63 | CONFIG_ARCH_ZYNQMP=y | 62 | CONFIG_ARCH_ZYNQMP=y |
64 | CONFIG_PCI=y | 63 | CONFIG_PCI=y |
65 | CONFIG_PCI_MSI=y | ||
66 | CONFIG_PCI_IOV=y | 64 | CONFIG_PCI_IOV=y |
67 | CONFIG_PCI_AARDVARK=y | ||
68 | CONFIG_PCIE_RCAR=y | ||
69 | CONFIG_PCI_HOST_GENERIC=y | ||
70 | CONFIG_PCI_XGENE=y | ||
71 | CONFIG_PCI_LAYERSCAPE=y | 65 | CONFIG_PCI_LAYERSCAPE=y |
72 | CONFIG_PCI_HISI=y | 66 | CONFIG_PCI_HISI=y |
73 | CONFIG_PCIE_QCOM=y | 67 | CONFIG_PCIE_QCOM=y |
74 | CONFIG_PCIE_ARMADA_8K=y | 68 | CONFIG_PCIE_ARMADA_8K=y |
69 | CONFIG_PCI_AARDVARK=y | ||
70 | CONFIG_PCIE_RCAR=y | ||
71 | CONFIG_PCI_HOST_GENERIC=y | ||
72 | CONFIG_PCI_XGENE=y | ||
75 | CONFIG_ARM64_VA_BITS_48=y | 73 | CONFIG_ARM64_VA_BITS_48=y |
76 | CONFIG_SCHED_MC=y | 74 | CONFIG_SCHED_MC=y |
77 | CONFIG_NUMA=y | 75 | CONFIG_NUMA=y |
@@ -80,12 +78,11 @@ CONFIG_KSM=y | |||
80 | CONFIG_TRANSPARENT_HUGEPAGE=y | 78 | CONFIG_TRANSPARENT_HUGEPAGE=y |
81 | CONFIG_CMA=y | 79 | CONFIG_CMA=y |
82 | CONFIG_SECCOMP=y | 80 | CONFIG_SECCOMP=y |
83 | CONFIG_XEN=y | ||
84 | CONFIG_KEXEC=y | 81 | CONFIG_KEXEC=y |
85 | CONFIG_CRASH_DUMP=y | 82 | CONFIG_CRASH_DUMP=y |
83 | CONFIG_XEN=y | ||
86 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 84 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
87 | CONFIG_COMPAT=y | 85 | CONFIG_COMPAT=y |
88 | CONFIG_CPU_IDLE=y | ||
89 | CONFIG_HIBERNATION=y | 86 | CONFIG_HIBERNATION=y |
90 | CONFIG_ARM_CPUIDLE=y | 87 | CONFIG_ARM_CPUIDLE=y |
91 | CONFIG_CPU_FREQ=y | 88 | CONFIG_CPU_FREQ=y |
@@ -155,8 +152,8 @@ CONFIG_MTD_SPI_NOR=y | |||
155 | CONFIG_BLK_DEV_LOOP=y | 152 | CONFIG_BLK_DEV_LOOP=y |
156 | CONFIG_BLK_DEV_NBD=m | 153 | CONFIG_BLK_DEV_NBD=m |
157 | CONFIG_VIRTIO_BLK=y | 154 | CONFIG_VIRTIO_BLK=y |
158 | CONFIG_EEPROM_AT25=m | ||
159 | CONFIG_SRAM=y | 155 | CONFIG_SRAM=y |
156 | CONFIG_EEPROM_AT25=m | ||
160 | # CONFIG_SCSI_PROC_FS is not set | 157 | # CONFIG_SCSI_PROC_FS is not set |
161 | CONFIG_BLK_DEV_SD=y | 158 | CONFIG_BLK_DEV_SD=y |
162 | CONFIG_SCSI_SAS_ATA=y | 159 | CONFIG_SCSI_SAS_ATA=y |
@@ -168,8 +165,8 @@ CONFIG_AHCI_CEVA=y | |||
168 | CONFIG_AHCI_MVEBU=y | 165 | CONFIG_AHCI_MVEBU=y |
169 | CONFIG_AHCI_XGENE=y | 166 | CONFIG_AHCI_XGENE=y |
170 | CONFIG_AHCI_QORIQ=y | 167 | CONFIG_AHCI_QORIQ=y |
171 | CONFIG_SATA_RCAR=y | ||
172 | CONFIG_SATA_SIL24=y | 168 | CONFIG_SATA_SIL24=y |
169 | CONFIG_SATA_RCAR=y | ||
173 | CONFIG_PATA_PLATFORM=y | 170 | CONFIG_PATA_PLATFORM=y |
174 | CONFIG_PATA_OF_PLATFORM=y | 171 | CONFIG_PATA_OF_PLATFORM=y |
175 | CONFIG_NETDEVICES=y | 172 | CONFIG_NETDEVICES=y |
@@ -186,18 +183,17 @@ CONFIG_HNS_ENET=y | |||
186 | CONFIG_E1000E=y | 183 | CONFIG_E1000E=y |
187 | CONFIG_IGB=y | 184 | CONFIG_IGB=y |
188 | CONFIG_IGBVF=y | 185 | CONFIG_IGBVF=y |
189 | CONFIG_MVPP2=y | ||
190 | CONFIG_MVNETA=y | 186 | CONFIG_MVNETA=y |
187 | CONFIG_MVPP2=y | ||
191 | CONFIG_SKY2=y | 188 | CONFIG_SKY2=y |
192 | CONFIG_RAVB=y | 189 | CONFIG_RAVB=y |
193 | CONFIG_SMC91X=y | 190 | CONFIG_SMC91X=y |
194 | CONFIG_SMSC911X=y | 191 | CONFIG_SMSC911X=y |
195 | CONFIG_STMMAC_ETH=m | 192 | CONFIG_STMMAC_ETH=m |
196 | CONFIG_REALTEK_PHY=m | 193 | CONFIG_MDIO_BUS_MUX_MMIOREG=y |
197 | CONFIG_MESON_GXL_PHY=m | 194 | CONFIG_MESON_GXL_PHY=m |
198 | CONFIG_MICREL_PHY=y | 195 | CONFIG_MICREL_PHY=y |
199 | CONFIG_MDIO_BUS_MUX=y | 196 | CONFIG_REALTEK_PHY=m |
200 | CONFIG_MDIO_BUS_MUX_MMIOREG=y | ||
201 | CONFIG_USB_PEGASUS=m | 197 | CONFIG_USB_PEGASUS=m |
202 | CONFIG_USB_RTL8150=m | 198 | CONFIG_USB_RTL8150=m |
203 | CONFIG_USB_RTL8152=m | 199 | CONFIG_USB_RTL8152=m |
@@ -230,14 +226,14 @@ CONFIG_SERIAL_8250_UNIPHIER=y | |||
230 | CONFIG_SERIAL_OF_PLATFORM=y | 226 | CONFIG_SERIAL_OF_PLATFORM=y |
231 | CONFIG_SERIAL_AMBA_PL011=y | 227 | CONFIG_SERIAL_AMBA_PL011=y |
232 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y | 228 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y |
229 | CONFIG_SERIAL_MESON=y | ||
230 | CONFIG_SERIAL_MESON_CONSOLE=y | ||
233 | CONFIG_SERIAL_SAMSUNG=y | 231 | CONFIG_SERIAL_SAMSUNG=y |
234 | CONFIG_SERIAL_SAMSUNG_CONSOLE=y | 232 | CONFIG_SERIAL_SAMSUNG_CONSOLE=y |
235 | CONFIG_SERIAL_TEGRA=y | 233 | CONFIG_SERIAL_TEGRA=y |
236 | CONFIG_SERIAL_SH_SCI=y | 234 | CONFIG_SERIAL_SH_SCI=y |
237 | CONFIG_SERIAL_SH_SCI_NR_UARTS=11 | 235 | CONFIG_SERIAL_SH_SCI_NR_UARTS=11 |
238 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | 236 | CONFIG_SERIAL_SH_SCI_CONSOLE=y |
239 | CONFIG_SERIAL_MESON=y | ||
240 | CONFIG_SERIAL_MESON_CONSOLE=y | ||
241 | CONFIG_SERIAL_MSM=y | 237 | CONFIG_SERIAL_MSM=y |
242 | CONFIG_SERIAL_MSM_CONSOLE=y | 238 | CONFIG_SERIAL_MSM_CONSOLE=y |
243 | CONFIG_SERIAL_XILINX_PS_UART=y | 239 | CONFIG_SERIAL_XILINX_PS_UART=y |
@@ -261,14 +257,14 @@ CONFIG_I2C_UNIPHIER_F=y | |||
261 | CONFIG_I2C_RCAR=y | 257 | CONFIG_I2C_RCAR=y |
262 | CONFIG_I2C_CROS_EC_TUNNEL=y | 258 | CONFIG_I2C_CROS_EC_TUNNEL=y |
263 | CONFIG_SPI=y | 259 | CONFIG_SPI=y |
264 | CONFIG_SPI_MESON_SPIFC=m | ||
265 | CONFIG_SPI_BCM2835=m | 260 | CONFIG_SPI_BCM2835=m |
266 | CONFIG_SPI_BCM2835AUX=m | 261 | CONFIG_SPI_BCM2835AUX=m |
262 | CONFIG_SPI_MESON_SPIFC=m | ||
267 | CONFIG_SPI_ORION=y | 263 | CONFIG_SPI_ORION=y |
268 | CONFIG_SPI_PL022=y | 264 | CONFIG_SPI_PL022=y |
269 | CONFIG_SPI_QUP=y | 265 | CONFIG_SPI_QUP=y |
270 | CONFIG_SPI_SPIDEV=m | ||
271 | CONFIG_SPI_S3C64XX=y | 266 | CONFIG_SPI_S3C64XX=y |
267 | CONFIG_SPI_SPIDEV=m | ||
272 | CONFIG_SPMI=y | 268 | CONFIG_SPMI=y |
273 | CONFIG_PINCTRL_SINGLE=y | 269 | CONFIG_PINCTRL_SINGLE=y |
274 | CONFIG_PINCTRL_MAX77620=y | 270 | CONFIG_PINCTRL_MAX77620=y |
@@ -286,33 +282,30 @@ CONFIG_GPIO_PCA953X=y | |||
286 | CONFIG_GPIO_PCA953X_IRQ=y | 282 | CONFIG_GPIO_PCA953X_IRQ=y |
287 | CONFIG_GPIO_MAX77620=y | 283 | CONFIG_GPIO_MAX77620=y |
288 | CONFIG_POWER_RESET_MSM=y | 284 | CONFIG_POWER_RESET_MSM=y |
289 | CONFIG_BATTERY_BQ27XXX=y | ||
290 | CONFIG_POWER_RESET_XGENE=y | 285 | CONFIG_POWER_RESET_XGENE=y |
291 | CONFIG_POWER_RESET_SYSCON=y | 286 | CONFIG_POWER_RESET_SYSCON=y |
287 | CONFIG_BATTERY_BQ27XXX=y | ||
288 | CONFIG_SENSORS_ARM_SCPI=y | ||
292 | CONFIG_SENSORS_LM90=m | 289 | CONFIG_SENSORS_LM90=m |
293 | CONFIG_SENSORS_INA2XX=m | 290 | CONFIG_SENSORS_INA2XX=m |
294 | CONFIG_SENSORS_ARM_SCPI=y | ||
295 | CONFIG_THERMAL=y | ||
296 | CONFIG_THERMAL_EMULATION=y | ||
297 | CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y | 291 | CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y |
298 | CONFIG_CPU_THERMAL=y | 292 | CONFIG_CPU_THERMAL=y |
299 | CONFIG_BCM2835_THERMAL=y | 293 | CONFIG_THERMAL_EMULATION=y |
300 | CONFIG_EXYNOS_THERMAL=y | 294 | CONFIG_EXYNOS_THERMAL=y |
301 | CONFIG_WATCHDOG=y | 295 | CONFIG_WATCHDOG=y |
302 | CONFIG_BCM2835_WDT=y | ||
303 | CONFIG_RENESAS_WDT=y | ||
304 | CONFIG_S3C2410_WATCHDOG=y | 296 | CONFIG_S3C2410_WATCHDOG=y |
305 | CONFIG_MESON_GXBB_WATCHDOG=m | 297 | CONFIG_MESON_GXBB_WATCHDOG=m |
306 | CONFIG_MESON_WATCHDOG=m | 298 | CONFIG_MESON_WATCHDOG=m |
299 | CONFIG_RENESAS_WDT=y | ||
300 | CONFIG_BCM2835_WDT=y | ||
301 | CONFIG_MFD_CROS_EC=y | ||
302 | CONFIG_MFD_CROS_EC_I2C=y | ||
307 | CONFIG_MFD_EXYNOS_LPASS=m | 303 | CONFIG_MFD_EXYNOS_LPASS=m |
304 | CONFIG_MFD_HI655X_PMIC=y | ||
308 | CONFIG_MFD_MAX77620=y | 305 | CONFIG_MFD_MAX77620=y |
309 | CONFIG_MFD_RK808=y | ||
310 | CONFIG_MFD_SPMI_PMIC=y | 306 | CONFIG_MFD_SPMI_PMIC=y |
307 | CONFIG_MFD_RK808=y | ||
311 | CONFIG_MFD_SEC_CORE=y | 308 | CONFIG_MFD_SEC_CORE=y |
312 | CONFIG_MFD_HI655X_PMIC=y | ||
313 | CONFIG_REGULATOR=y | ||
314 | CONFIG_MFD_CROS_EC=y | ||
315 | CONFIG_MFD_CROS_EC_I2C=y | ||
316 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 309 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
317 | CONFIG_REGULATOR_GPIO=y | 310 | CONFIG_REGULATOR_GPIO=y |
318 | CONFIG_REGULATOR_HI655X=y | 311 | CONFIG_REGULATOR_HI655X=y |
@@ -345,13 +338,12 @@ CONFIG_DRM_EXYNOS_DSI=y | |||
345 | CONFIG_DRM_EXYNOS_HDMI=y | 338 | CONFIG_DRM_EXYNOS_HDMI=y |
346 | CONFIG_DRM_EXYNOS_MIC=y | 339 | CONFIG_DRM_EXYNOS_MIC=y |
347 | CONFIG_DRM_RCAR_DU=m | 340 | CONFIG_DRM_RCAR_DU=m |
348 | CONFIG_DRM_RCAR_HDMI=y | ||
349 | CONFIG_DRM_RCAR_LVDS=y | 341 | CONFIG_DRM_RCAR_LVDS=y |
350 | CONFIG_DRM_RCAR_VSP=y | 342 | CONFIG_DRM_RCAR_VSP=y |
351 | CONFIG_DRM_TEGRA=m | 343 | CONFIG_DRM_TEGRA=m |
352 | CONFIG_DRM_VC4=m | ||
353 | CONFIG_DRM_PANEL_SIMPLE=m | 344 | CONFIG_DRM_PANEL_SIMPLE=m |
354 | CONFIG_DRM_I2C_ADV7511=m | 345 | CONFIG_DRM_I2C_ADV7511=m |
346 | CONFIG_DRM_VC4=m | ||
355 | CONFIG_DRM_HISI_KIRIN=m | 347 | CONFIG_DRM_HISI_KIRIN=m |
356 | CONFIG_DRM_MESON=m | 348 | CONFIG_DRM_MESON=m |
357 | CONFIG_FB=y | 349 | CONFIG_FB=y |
@@ -366,39 +358,37 @@ CONFIG_SOUND=y | |||
366 | CONFIG_SND=y | 358 | CONFIG_SND=y |
367 | CONFIG_SND_SOC=y | 359 | CONFIG_SND_SOC=y |
368 | CONFIG_SND_BCM2835_SOC_I2S=m | 360 | CONFIG_SND_BCM2835_SOC_I2S=m |
369 | CONFIG_SND_SOC_RCAR=y | ||
370 | CONFIG_SND_SOC_SAMSUNG=y | 361 | CONFIG_SND_SOC_SAMSUNG=y |
362 | CONFIG_SND_SOC_RCAR=y | ||
371 | CONFIG_SND_SOC_AK4613=y | 363 | CONFIG_SND_SOC_AK4613=y |
372 | CONFIG_USB=y | 364 | CONFIG_USB=y |
373 | CONFIG_USB_OTG=y | 365 | CONFIG_USB_OTG=y |
374 | CONFIG_USB_XHCI_HCD=y | 366 | CONFIG_USB_XHCI_HCD=y |
375 | CONFIG_USB_XHCI_PLATFORM=y | ||
376 | CONFIG_USB_XHCI_RCAR=y | ||
377 | CONFIG_USB_EHCI_EXYNOS=y | ||
378 | CONFIG_USB_XHCI_TEGRA=y | 367 | CONFIG_USB_XHCI_TEGRA=y |
379 | CONFIG_USB_EHCI_HCD=y | 368 | CONFIG_USB_EHCI_HCD=y |
380 | CONFIG_USB_EHCI_MSM=y | 369 | CONFIG_USB_EHCI_MSM=y |
370 | CONFIG_USB_EHCI_EXYNOS=y | ||
381 | CONFIG_USB_EHCI_HCD_PLATFORM=y | 371 | CONFIG_USB_EHCI_HCD_PLATFORM=y |
382 | CONFIG_USB_OHCI_EXYNOS=y | ||
383 | CONFIG_USB_OHCI_HCD=y | 372 | CONFIG_USB_OHCI_HCD=y |
373 | CONFIG_USB_OHCI_EXYNOS=y | ||
384 | CONFIG_USB_OHCI_HCD_PLATFORM=y | 374 | CONFIG_USB_OHCI_HCD_PLATFORM=y |
385 | CONFIG_USB_RENESAS_USBHS=m | 375 | CONFIG_USB_RENESAS_USBHS=m |
386 | CONFIG_USB_STORAGE=y | 376 | CONFIG_USB_STORAGE=y |
387 | CONFIG_USB_DWC2=y | ||
388 | CONFIG_USB_DWC3=y | 377 | CONFIG_USB_DWC3=y |
378 | CONFIG_USB_DWC2=y | ||
389 | CONFIG_USB_CHIPIDEA=y | 379 | CONFIG_USB_CHIPIDEA=y |
390 | CONFIG_USB_CHIPIDEA_UDC=y | 380 | CONFIG_USB_CHIPIDEA_UDC=y |
391 | CONFIG_USB_CHIPIDEA_HOST=y | 381 | CONFIG_USB_CHIPIDEA_HOST=y |
392 | CONFIG_USB_ISP1760=y | 382 | CONFIG_USB_ISP1760=y |
393 | CONFIG_USB_HSIC_USB3503=y | 383 | CONFIG_USB_HSIC_USB3503=y |
394 | CONFIG_USB_MSM_OTG=y | 384 | CONFIG_USB_MSM_OTG=y |
385 | CONFIG_USB_QCOM_8X16_PHY=y | ||
395 | CONFIG_USB_ULPI=y | 386 | CONFIG_USB_ULPI=y |
396 | CONFIG_USB_GADGET=y | 387 | CONFIG_USB_GADGET=y |
397 | CONFIG_USB_RENESAS_USBHS_UDC=m | 388 | CONFIG_USB_RENESAS_USBHS_UDC=m |
398 | CONFIG_MMC=y | 389 | CONFIG_MMC=y |
399 | CONFIG_MMC_BLOCK_MINORS=32 | 390 | CONFIG_MMC_BLOCK_MINORS=32 |
400 | CONFIG_MMC_ARMMMCI=y | 391 | CONFIG_MMC_ARMMMCI=y |
401 | CONFIG_MMC_MESON_GX=y | ||
402 | CONFIG_MMC_SDHCI=y | 392 | CONFIG_MMC_SDHCI=y |
403 | CONFIG_MMC_SDHCI_ACPI=y | 393 | CONFIG_MMC_SDHCI_ACPI=y |
404 | CONFIG_MMC_SDHCI_PLTFM=y | 394 | CONFIG_MMC_SDHCI_PLTFM=y |
@@ -406,6 +396,7 @@ CONFIG_MMC_SDHCI_OF_ARASAN=y | |||
406 | CONFIG_MMC_SDHCI_OF_ESDHC=y | 396 | CONFIG_MMC_SDHCI_OF_ESDHC=y |
407 | CONFIG_MMC_SDHCI_CADENCE=y | 397 | CONFIG_MMC_SDHCI_CADENCE=y |
408 | CONFIG_MMC_SDHCI_TEGRA=y | 398 | CONFIG_MMC_SDHCI_TEGRA=y |
399 | CONFIG_MMC_MESON_GX=y | ||
409 | CONFIG_MMC_SDHCI_MSM=y | 400 | CONFIG_MMC_SDHCI_MSM=y |
410 | CONFIG_MMC_SPI=y | 401 | CONFIG_MMC_SPI=y |
411 | CONFIG_MMC_SDHI=y | 402 | CONFIG_MMC_SDHI=y |
@@ -414,32 +405,31 @@ CONFIG_MMC_DW_EXYNOS=y | |||
414 | CONFIG_MMC_DW_K3=y | 405 | CONFIG_MMC_DW_K3=y |
415 | CONFIG_MMC_DW_ROCKCHIP=y | 406 | CONFIG_MMC_DW_ROCKCHIP=y |
416 | CONFIG_MMC_SUNXI=y | 407 | CONFIG_MMC_SUNXI=y |
417 | CONFIG_MMC_SDHCI_XENON=y | ||
418 | CONFIG_MMC_BCM2835=y | 408 | CONFIG_MMC_BCM2835=y |
409 | CONFIG_MMC_SDHCI_XENON=y | ||
419 | CONFIG_NEW_LEDS=y | 410 | CONFIG_NEW_LEDS=y |
420 | CONFIG_LEDS_CLASS=y | 411 | CONFIG_LEDS_CLASS=y |
421 | CONFIG_LEDS_GPIO=y | 412 | CONFIG_LEDS_GPIO=y |
422 | CONFIG_LEDS_PWM=y | 413 | CONFIG_LEDS_PWM=y |
423 | CONFIG_LEDS_SYSCON=y | 414 | CONFIG_LEDS_SYSCON=y |
424 | CONFIG_LEDS_TRIGGERS=y | ||
425 | CONFIG_LEDS_TRIGGER_DEFAULT_ON=y | ||
426 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 415 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
427 | CONFIG_LEDS_TRIGGER_CPU=y | 416 | CONFIG_LEDS_TRIGGER_CPU=y |
417 | CONFIG_LEDS_TRIGGER_DEFAULT_ON=y | ||
428 | CONFIG_RTC_CLASS=y | 418 | CONFIG_RTC_CLASS=y |
429 | CONFIG_RTC_DRV_MAX77686=y | 419 | CONFIG_RTC_DRV_MAX77686=y |
420 | CONFIG_RTC_DRV_RK808=m | ||
430 | CONFIG_RTC_DRV_S5M=y | 421 | CONFIG_RTC_DRV_S5M=y |
431 | CONFIG_RTC_DRV_DS3232=y | 422 | CONFIG_RTC_DRV_DS3232=y |
432 | CONFIG_RTC_DRV_EFI=y | 423 | CONFIG_RTC_DRV_EFI=y |
424 | CONFIG_RTC_DRV_S3C=y | ||
433 | CONFIG_RTC_DRV_PL031=y | 425 | CONFIG_RTC_DRV_PL031=y |
434 | CONFIG_RTC_DRV_SUN6I=y | 426 | CONFIG_RTC_DRV_SUN6I=y |
435 | CONFIG_RTC_DRV_RK808=m | ||
436 | CONFIG_RTC_DRV_TEGRA=y | 427 | CONFIG_RTC_DRV_TEGRA=y |
437 | CONFIG_RTC_DRV_XGENE=y | 428 | CONFIG_RTC_DRV_XGENE=y |
438 | CONFIG_RTC_DRV_S3C=y | ||
439 | CONFIG_DMADEVICES=y | 429 | CONFIG_DMADEVICES=y |
430 | CONFIG_DMA_BCM2835=m | ||
440 | CONFIG_MV_XOR_V2=y | 431 | CONFIG_MV_XOR_V2=y |
441 | CONFIG_PL330_DMA=y | 432 | CONFIG_PL330_DMA=y |
442 | CONFIG_DMA_BCM2835=m | ||
443 | CONFIG_TEGRA20_APB_DMA=y | 433 | CONFIG_TEGRA20_APB_DMA=y |
444 | CONFIG_QCOM_BAM_DMA=y | 434 | CONFIG_QCOM_BAM_DMA=y |
445 | CONFIG_QCOM_HIDMA_MGMT=y | 435 | CONFIG_QCOM_HIDMA_MGMT=y |
@@ -452,52 +442,53 @@ CONFIG_VIRTIO_BALLOON=y | |||
452 | CONFIG_VIRTIO_MMIO=y | 442 | CONFIG_VIRTIO_MMIO=y |
453 | CONFIG_XEN_GNTDEV=y | 443 | CONFIG_XEN_GNTDEV=y |
454 | CONFIG_XEN_GRANT_DEV_ALLOC=y | 444 | CONFIG_XEN_GRANT_DEV_ALLOC=y |
445 | CONFIG_COMMON_CLK_RK808=y | ||
455 | CONFIG_COMMON_CLK_SCPI=y | 446 | CONFIG_COMMON_CLK_SCPI=y |
456 | CONFIG_COMMON_CLK_CS2000_CP=y | 447 | CONFIG_COMMON_CLK_CS2000_CP=y |
457 | CONFIG_COMMON_CLK_S2MPS11=y | 448 | CONFIG_COMMON_CLK_S2MPS11=y |
458 | CONFIG_COMMON_CLK_PWM=y | ||
459 | CONFIG_COMMON_CLK_RK808=y | ||
460 | CONFIG_CLK_QORIQ=y | 449 | CONFIG_CLK_QORIQ=y |
450 | CONFIG_COMMON_CLK_PWM=y | ||
461 | CONFIG_COMMON_CLK_QCOM=y | 451 | CONFIG_COMMON_CLK_QCOM=y |
452 | CONFIG_QCOM_CLK_SMD_RPM=y | ||
462 | CONFIG_MSM_GCC_8916=y | 453 | CONFIG_MSM_GCC_8916=y |
463 | CONFIG_MSM_GCC_8994=y | 454 | CONFIG_MSM_GCC_8994=y |
464 | CONFIG_MSM_MMCC_8996=y | 455 | CONFIG_MSM_MMCC_8996=y |
465 | CONFIG_HWSPINLOCK_QCOM=y | 456 | CONFIG_HWSPINLOCK_QCOM=y |
466 | CONFIG_MAILBOX=y | ||
467 | CONFIG_ARM_MHU=y | 457 | CONFIG_ARM_MHU=y |
468 | CONFIG_PLATFORM_MHU=y | 458 | CONFIG_PLATFORM_MHU=y |
469 | CONFIG_BCM2835_MBOX=y | 459 | CONFIG_BCM2835_MBOX=y |
470 | CONFIG_HI6220_MBOX=y | 460 | CONFIG_HI6220_MBOX=y |
471 | CONFIG_ARM_SMMU=y | 461 | CONFIG_ARM_SMMU=y |
472 | CONFIG_ARM_SMMU_V3=y | 462 | CONFIG_ARM_SMMU_V3=y |
463 | CONFIG_RPMSG_QCOM_SMD=y | ||
473 | CONFIG_RASPBERRYPI_POWER=y | 464 | CONFIG_RASPBERRYPI_POWER=y |
474 | CONFIG_QCOM_SMEM=y | 465 | CONFIG_QCOM_SMEM=y |
475 | CONFIG_QCOM_SMD=y | ||
476 | CONFIG_QCOM_SMD_RPM=y | 466 | CONFIG_QCOM_SMD_RPM=y |
467 | CONFIG_QCOM_SMP2P=y | ||
468 | CONFIG_QCOM_SMSM=y | ||
477 | CONFIG_ROCKCHIP_PM_DOMAINS=y | 469 | CONFIG_ROCKCHIP_PM_DOMAINS=y |
478 | CONFIG_ARCH_TEGRA_132_SOC=y | 470 | CONFIG_ARCH_TEGRA_132_SOC=y |
479 | CONFIG_ARCH_TEGRA_210_SOC=y | 471 | CONFIG_ARCH_TEGRA_210_SOC=y |
480 | CONFIG_ARCH_TEGRA_186_SOC=y | 472 | CONFIG_ARCH_TEGRA_186_SOC=y |
481 | CONFIG_EXTCON_USB_GPIO=y | 473 | CONFIG_EXTCON_USB_GPIO=y |
474 | CONFIG_IIO=y | ||
475 | CONFIG_EXYNOS_ADC=y | ||
482 | CONFIG_PWM=y | 476 | CONFIG_PWM=y |
483 | CONFIG_PWM_BCM2835=m | 477 | CONFIG_PWM_BCM2835=m |
478 | CONFIG_PWM_MESON=m | ||
484 | CONFIG_PWM_ROCKCHIP=y | 479 | CONFIG_PWM_ROCKCHIP=y |
480 | CONFIG_PWM_SAMSUNG=y | ||
485 | CONFIG_PWM_TEGRA=m | 481 | CONFIG_PWM_TEGRA=m |
486 | CONFIG_PWM_MESON=m | ||
487 | CONFIG_COMMON_RESET_HI6220=y | ||
488 | CONFIG_PHY_RCAR_GEN3_USB2=y | 482 | CONFIG_PHY_RCAR_GEN3_USB2=y |
489 | CONFIG_PHY_HI6220_USB=y | 483 | CONFIG_PHY_HI6220_USB=y |
484 | CONFIG_PHY_SUN4I_USB=y | ||
490 | CONFIG_PHY_ROCKCHIP_INNO_USB2=y | 485 | CONFIG_PHY_ROCKCHIP_INNO_USB2=y |
491 | CONFIG_PHY_ROCKCHIP_EMMC=y | 486 | CONFIG_PHY_ROCKCHIP_EMMC=y |
492 | CONFIG_PHY_SUN4I_USB=y | ||
493 | CONFIG_PHY_XGENE=y | 487 | CONFIG_PHY_XGENE=y |
494 | CONFIG_PHY_TEGRA_XUSB=y | 488 | CONFIG_PHY_TEGRA_XUSB=y |
495 | CONFIG_ARM_SCPI_PROTOCOL=y | 489 | CONFIG_ARM_SCPI_PROTOCOL=y |
496 | CONFIG_ACPI=y | ||
497 | CONFIG_IIO=y | ||
498 | CONFIG_EXYNOS_ADC=y | ||
499 | CONFIG_PWM_SAMSUNG=y | ||
500 | CONFIG_RASPBERRYPI_FIRMWARE=y | 490 | CONFIG_RASPBERRYPI_FIRMWARE=y |
491 | CONFIG_ACPI=y | ||
501 | CONFIG_EXT2_FS=y | 492 | CONFIG_EXT2_FS=y |
502 | CONFIG_EXT3_FS=y | 493 | CONFIG_EXT3_FS=y |
503 | CONFIG_EXT4_FS_POSIX_ACL=y | 494 | CONFIG_EXT4_FS_POSIX_ACL=y |
@@ -511,7 +502,6 @@ CONFIG_FUSE_FS=m | |||
511 | CONFIG_CUSE=m | 502 | CONFIG_CUSE=m |
512 | CONFIG_OVERLAY_FS=m | 503 | CONFIG_OVERLAY_FS=m |
513 | CONFIG_VFAT_FS=y | 504 | CONFIG_VFAT_FS=y |
514 | CONFIG_TMPFS=y | ||
515 | CONFIG_HUGETLBFS=y | 505 | CONFIG_HUGETLBFS=y |
516 | CONFIG_CONFIGFS_FS=y | 506 | CONFIG_CONFIGFS_FS=y |
517 | CONFIG_EFIVAR_FS=y | 507 | CONFIG_EFIVAR_FS=y |
@@ -539,11 +529,9 @@ CONFIG_MEMTEST=y | |||
539 | CONFIG_SECURITY=y | 529 | CONFIG_SECURITY=y |
540 | CONFIG_CRYPTO_ECHAINIV=y | 530 | CONFIG_CRYPTO_ECHAINIV=y |
541 | CONFIG_CRYPTO_ANSI_CPRNG=y | 531 | CONFIG_CRYPTO_ANSI_CPRNG=y |
542 | CONFIG_CRYPTO_DEV_SAFEXCEL=m | ||
543 | CONFIG_ARM64_CRYPTO=y | 532 | CONFIG_ARM64_CRYPTO=y |
544 | CONFIG_CRYPTO_SHA1_ARM64_CE=y | 533 | CONFIG_CRYPTO_SHA1_ARM64_CE=y |
545 | CONFIG_CRYPTO_SHA2_ARM64_CE=y | 534 | CONFIG_CRYPTO_SHA2_ARM64_CE=y |
546 | CONFIG_CRYPTO_GHASH_ARM64_CE=y | 535 | CONFIG_CRYPTO_GHASH_ARM64_CE=y |
547 | CONFIG_CRYPTO_AES_ARM64_CE_CCM=y | 536 | CONFIG_CRYPTO_AES_ARM64_CE_CCM=y |
548 | CONFIG_CRYPTO_AES_ARM64_CE_BLK=y | 537 | CONFIG_CRYPTO_AES_ARM64_CE_BLK=y |
549 | # CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set | ||
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f819fdcff1ac..f5a2d09afb38 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h | |||
@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ | |||
264 | " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ | 264 | " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ |
265 | " cbnz %w[tmp], 1b\n" \ | 265 | " cbnz %w[tmp], 1b\n" \ |
266 | " " #mb "\n" \ | 266 | " " #mb "\n" \ |
267 | " mov %" #w "[oldval], %" #w "[old]\n" \ | ||
268 | "2:" \ | 267 | "2:" \ |
269 | : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ | 268 | : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ |
270 | [v] "+Q" (*(unsigned long *)ptr) \ | 269 | [v] "+Q" (*(unsigned long *)ptr) \ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e7f84a7b4465..428ee1f2468c 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities { | |||
115 | 115 | ||
116 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 116 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
117 | extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; | 117 | extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; |
118 | extern struct static_key_false arm64_const_caps_ready; | ||
118 | 119 | ||
119 | bool this_cpu_has_cap(unsigned int cap); | 120 | bool this_cpu_has_cap(unsigned int cap); |
120 | 121 | ||
@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num) | |||
124 | } | 125 | } |
125 | 126 | ||
126 | /* System capability check for constant caps */ | 127 | /* System capability check for constant caps */ |
127 | static inline bool cpus_have_const_cap(int num) | 128 | static inline bool __cpus_have_const_cap(int num) |
128 | { | 129 | { |
129 | if (num >= ARM64_NCAPS) | 130 | if (num >= ARM64_NCAPS) |
130 | return false; | 131 | return false; |
@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num) | |||
138 | return test_bit(num, cpu_hwcaps); | 139 | return test_bit(num, cpu_hwcaps); |
139 | } | 140 | } |
140 | 141 | ||
142 | static inline bool cpus_have_const_cap(int num) | ||
143 | { | ||
144 | if (static_branch_likely(&arm64_const_caps_ready)) | ||
145 | return __cpus_have_const_cap(num); | ||
146 | else | ||
147 | return cpus_have_cap(num); | ||
148 | } | ||
149 | |||
141 | static inline void cpus_set_cap(unsigned int num) | 150 | static inline void cpus_set_cap(unsigned int num) |
142 | { | 151 | { |
143 | if (num >= ARM64_NCAPS) { | 152 | if (num >= ARM64_NCAPS) { |
@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num) | |||
145 | num, ARM64_NCAPS); | 154 | num, ARM64_NCAPS); |
146 | } else { | 155 | } else { |
147 | __set_bit(num, cpu_hwcaps); | 156 | __set_bit(num, cpu_hwcaps); |
148 | static_branch_enable(&cpu_hwcap_keys[num]); | ||
149 | } | 157 | } |
150 | } | 158 | } |
151 | 159 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5e19165c5fa8..1f252a95bc02 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/kvm_types.h> | 26 | #include <linux/kvm_types.h> |
27 | #include <asm/cpufeature.h> | ||
27 | #include <asm/kvm.h> | 28 | #include <asm/kvm.h> |
28 | #include <asm/kvm_asm.h> | 29 | #include <asm/kvm_asm.h> |
29 | #include <asm/kvm_mmio.h> | 30 | #include <asm/kvm_mmio.h> |
@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, | |||
355 | unsigned long vector_ptr) | 356 | unsigned long vector_ptr) |
356 | { | 357 | { |
357 | /* | 358 | /* |
358 | * Call initialization code, and switch to the full blown | 359 | * Call initialization code, and switch to the full blown HYP code. |
359 | * HYP code. | 360 | * If the cpucaps haven't been finalized yet, something has gone very |
361 | * wrong, and hyp will crash and burn when it uses any | ||
362 | * cpus_have_const_cap() wrapper. | ||
360 | */ | 363 | */ |
364 | BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); | ||
361 | __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); | 365 | __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); |
362 | } | 366 | } |
363 | 367 | ||
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 94b8f7fc3310..817ce3365e20 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, | |||
985 | */ | 985 | */ |
986 | void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) | 986 | void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) |
987 | { | 987 | { |
988 | for (; caps->matches; caps++) | 988 | for (; caps->matches; caps++) { |
989 | if (caps->enable && cpus_have_cap(caps->capability)) | 989 | unsigned int num = caps->capability; |
990 | |||
991 | if (!cpus_have_cap(num)) | ||
992 | continue; | ||
993 | |||
994 | /* Ensure cpus_have_const_cap(num) works */ | ||
995 | static_branch_enable(&cpu_hwcap_keys[num]); | ||
996 | |||
997 | if (caps->enable) { | ||
990 | /* | 998 | /* |
991 | * Use stop_machine() as it schedules the work allowing | 999 | * Use stop_machine() as it schedules the work allowing |
992 | * us to modify PSTATE, instead of on_each_cpu() which | 1000 | * us to modify PSTATE, instead of on_each_cpu() which |
@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) | |||
994 | * we return. | 1002 | * we return. |
995 | */ | 1003 | */ |
996 | stop_machine(caps->enable, NULL, cpu_online_mask); | 1004 | stop_machine(caps->enable, NULL, cpu_online_mask); |
1005 | } | ||
1006 | } | ||
997 | } | 1007 | } |
998 | 1008 | ||
999 | /* | 1009 | /* |
@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void) | |||
1096 | enable_cpu_capabilities(arm64_features); | 1106 | enable_cpu_capabilities(arm64_features); |
1097 | } | 1107 | } |
1098 | 1108 | ||
1109 | DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); | ||
1110 | EXPORT_SYMBOL(arm64_const_caps_ready); | ||
1111 | |||
1112 | static void __init mark_const_caps_ready(void) | ||
1113 | { | ||
1114 | static_branch_enable(&arm64_const_caps_ready); | ||
1115 | } | ||
1116 | |||
1099 | /* | 1117 | /* |
1100 | * Check if the current CPU has a given feature capability. | 1118 | * Check if the current CPU has a given feature capability. |
1101 | * Should be called from non-preemptible context. | 1119 | * Should be called from non-preemptible context. |
@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void) | |||
1131 | /* Set the CPU feature capabilies */ | 1149 | /* Set the CPU feature capabilies */ |
1132 | setup_feature_capabilities(); | 1150 | setup_feature_capabilities(); |
1133 | enable_errata_workarounds(); | 1151 | enable_errata_workarounds(); |
1152 | mark_const_caps_ready(); | ||
1134 | setup_elf_hwcaps(arm64_elf_hwcaps); | 1153 | setup_elf_hwcaps(arm64_elf_hwcaps); |
1135 | 1154 | ||
1136 | if (system_supports_32bit_el0()) | 1155 | if (system_supports_32bit_el0()) |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index bcc79471b38e..83a1b1ad189f 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |||
877 | 877 | ||
878 | if (attr->exclude_idle) | 878 | if (attr->exclude_idle) |
879 | return -EPERM; | 879 | return -EPERM; |
880 | if (is_kernel_in_hyp_mode() && | 880 | |
881 | attr->exclude_kernel != attr->exclude_hv) | 881 | /* |
882 | return -EINVAL; | 882 | * If we're running in hyp mode, then we *are* the hypervisor. |
883 | * Therefore we ignore exclude_hv in this configuration, since | ||
884 | * there's no hypervisor to sample anyway. This is consistent | ||
885 | * with other architectures (x86 and Power). | ||
886 | */ | ||
887 | if (is_kernel_in_hyp_mode()) { | ||
888 | if (!attr->exclude_kernel) | ||
889 | config_base |= ARMV8_PMU_INCLUDE_EL2; | ||
890 | } else { | ||
891 | if (attr->exclude_kernel) | ||
892 | config_base |= ARMV8_PMU_EXCLUDE_EL1; | ||
893 | if (!attr->exclude_hv) | ||
894 | config_base |= ARMV8_PMU_INCLUDE_EL2; | ||
895 | } | ||
883 | if (attr->exclude_user) | 896 | if (attr->exclude_user) |
884 | config_base |= ARMV8_PMU_EXCLUDE_EL0; | 897 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
885 | if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) | ||
886 | config_base |= ARMV8_PMU_EXCLUDE_EL1; | ||
887 | if (!attr->exclude_hv) | ||
888 | config_base |= ARMV8_PMU_INCLUDE_EL2; | ||
889 | 898 | ||
890 | /* | 899 | /* |
891 | * Install the filter into config_base as this is used to | 900 | * Install the filter into config_base as this is used to |
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index aaf42ae8d8c3..14c4e3b14bcb 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Makefile for Kernel-based Virtual Machine module, HYP part | 2 | # Makefile for Kernel-based Virtual Machine module, HYP part |
3 | # | 3 | # |
4 | 4 | ||
5 | ccflags-y += -fno-stack-protector | ||
6 | |||
5 | KVM=../../../../virt/kvm | 7 | KVM=../../../../virt/kvm |
6 | 8 | ||
7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | 9 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index c6e53580aefe..71f930501ade 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -253,8 +253,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) | |||
253 | */ | 253 | */ |
254 | off = offsetof(struct bpf_array, ptrs); | 254 | off = offsetof(struct bpf_array, ptrs); |
255 | emit_a64_mov_i64(tmp, off, ctx); | 255 | emit_a64_mov_i64(tmp, off, ctx); |
256 | emit(A64_LDR64(tmp, r2, tmp), ctx); | 256 | emit(A64_ADD(1, tmp, r2, tmp), ctx); |
257 | emit(A64_LDR64(prg, tmp, r3), ctx); | 257 | emit(A64_LSL(1, prg, r3, 3), ctx); |
258 | emit(A64_LDR64(prg, tmp, prg), ctx); | ||
258 | emit(A64_CBZ(1, prg, jmp_offset), ctx); | 259 | emit(A64_CBZ(1, prg, jmp_offset), ctx); |
259 | 260 | ||
260 | /* goto *(prog->bpf_func + prologue_size); */ | 261 | /* goto *(prog->bpf_func + prologue_size); */ |
diff --git a/arch/cris/boot/dts/include/dt-bindings b/arch/cris/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/cris/boot/dts/include/dt-bindings +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../../include/dt-bindings \ No newline at end of file | ||
diff --git a/arch/metag/boot/dts/include/dt-bindings b/arch/metag/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/metag/boot/dts/include/dt-bindings +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../../include/dt-bindings \ No newline at end of file | ||
diff --git a/arch/mips/boot/dts/include/dt-bindings b/arch/mips/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/mips/boot/dts/include/dt-bindings +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../../include/dt-bindings \ No newline at end of file | ||
diff --git a/arch/powerpc/boot/dts/include/dt-bindings b/arch/powerpc/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/powerpc/boot/dts/include/dt-bindings +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../../include/dt-bindings \ No newline at end of file | ||
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 53885512b8d3..6c0132c7212f 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h | |||
@@ -14,6 +14,10 @@ | |||
14 | #include <asm-generic/module.h> | 14 | #include <asm-generic/module.h> |
15 | 15 | ||
16 | 16 | ||
17 | #ifdef CC_USING_MPROFILE_KERNEL | ||
18 | #define MODULE_ARCH_VERMAGIC "mprofile-kernel" | ||
19 | #endif | ||
20 | |||
17 | #ifndef __powerpc64__ | 21 | #ifndef __powerpc64__ |
18 | /* | 22 | /* |
19 | * Thanks to Paul M for explaining this. | 23 | * Thanks to Paul M for explaining this. |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 2a32483c7b6c..8da5d4c1cab2 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -132,7 +132,19 @@ extern long long virt_phys_offset; | |||
132 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) | 132 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
133 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) | 133 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
134 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 134 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
135 | |||
136 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
137 | /* | ||
138 | * On hash the vmalloc and other regions alias to the kernel region when passed | ||
139 | * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can | ||
140 | * return true for some vmalloc addresses, which is incorrect. So explicitly | ||
141 | * check that the address is in the kernel region. | ||
142 | */ | ||
143 | #define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \ | ||
144 | pfn_valid(virt_to_pfn(kaddr))) | ||
145 | #else | ||
135 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) | 146 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) |
147 | #endif | ||
136 | 148 | ||
137 | /* | 149 | /* |
138 | * On Book-E parts we need __va to parse the device tree and we can't | 150 | * On Book-E parts we need __va to parse the device tree and we can't |
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index 3e7ce86d5c13..4d877144f377 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h | |||
@@ -46,6 +46,8 @@ | |||
46 | #define PPC_FEATURE2_HTM_NOSC 0x01000000 | 46 | #define PPC_FEATURE2_HTM_NOSC 0x01000000 |
47 | #define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ | 47 | #define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ |
48 | #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ | 48 | #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ |
49 | #define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */ | ||
50 | #define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */ | ||
49 | 51 | ||
50 | /* | 52 | /* |
51 | * IMPORTANT! | 53 | * IMPORTANT! |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9b3e88b1a9c8..6f849832a669 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -124,7 +124,8 @@ extern void __restore_cpu_e6500(void); | |||
124 | #define COMMON_USER_POWER9 COMMON_USER_POWER8 | 124 | #define COMMON_USER_POWER9 COMMON_USER_POWER8 |
125 | #define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ | 125 | #define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ |
126 | PPC_FEATURE2_ARCH_3_00 | \ | 126 | PPC_FEATURE2_ARCH_3_00 | \ |
127 | PPC_FEATURE2_HAS_IEEE128) | 127 | PPC_FEATURE2_HAS_IEEE128 | \ |
128 | PPC_FEATURE2_DARN ) | ||
128 | 129 | ||
129 | #ifdef CONFIG_PPC_BOOK3E_64 | 130 | #ifdef CONFIG_PPC_BOOK3E_64 |
130 | #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) | 131 | #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 07d4e0ad60db..4898d676dcae 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
@@ -416,7 +416,7 @@ power9_dd1_recover_paca: | |||
416 | * which needs to be restored from the stack. | 416 | * which needs to be restored from the stack. |
417 | */ | 417 | */ |
418 | li r3, 1 | 418 | li r3, 1 |
419 | stb r0,PACA_NAPSTATELOST(r13) | 419 | stb r3,PACA_NAPSTATELOST(r13) |
420 | blr | 420 | blr |
421 | 421 | ||
422 | /* | 422 | /* |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 160ae0fa7d0d..fc4343514bed 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -305,16 +305,17 @@ int kprobe_handler(struct pt_regs *regs) | |||
305 | save_previous_kprobe(kcb); | 305 | save_previous_kprobe(kcb); |
306 | set_current_kprobe(p, regs, kcb); | 306 | set_current_kprobe(p, regs, kcb); |
307 | kprobes_inc_nmissed_count(p); | 307 | kprobes_inc_nmissed_count(p); |
308 | prepare_singlestep(p, regs); | ||
309 | kcb->kprobe_status = KPROBE_REENTER; | 308 | kcb->kprobe_status = KPROBE_REENTER; |
310 | if (p->ainsn.boostable >= 0) { | 309 | if (p->ainsn.boostable >= 0) { |
311 | ret = try_to_emulate(p, regs); | 310 | ret = try_to_emulate(p, regs); |
312 | 311 | ||
313 | if (ret > 0) { | 312 | if (ret > 0) { |
314 | restore_previous_kprobe(kcb); | 313 | restore_previous_kprobe(kcb); |
314 | preempt_enable_no_resched(); | ||
315 | return 1; | 315 | return 1; |
316 | } | 316 | } |
317 | } | 317 | } |
318 | prepare_singlestep(p, regs); | ||
318 | return 1; | 319 | return 1; |
319 | } else { | 320 | } else { |
320 | if (*addr != BREAKPOINT_INSTRUCTION) { | 321 | if (*addr != BREAKPOINT_INSTRUCTION) { |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index d645da302bf2..baae104b16c7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr, | |||
864 | if (!MSR_TM_SUSPENDED(mfmsr())) | 864 | if (!MSR_TM_SUSPENDED(mfmsr())) |
865 | return; | 865 | return; |
866 | 866 | ||
867 | /* | ||
868 | * If we are in a transaction and FP is off then we can't have | ||
869 | * used FP inside that transaction. Hence the checkpointed | ||
870 | * state is the same as the live state. We need to copy the | ||
871 | * live state to the checkpointed state so that when the | ||
872 | * transaction is restored, the checkpointed state is correct | ||
873 | * and the aborted transaction sees the correct state. We use | ||
874 | * ckpt_regs.msr here as that's what tm_reclaim will use to | ||
875 | * determine if it's going to write the checkpointed state or | ||
876 | * not. So either this will write the checkpointed registers, | ||
877 | * or reclaim will. Similarly for VMX. | ||
878 | */ | ||
879 | if ((thr->ckpt_regs.msr & MSR_FP) == 0) | ||
880 | memcpy(&thr->ckfp_state, &thr->fp_state, | ||
881 | sizeof(struct thread_fp_state)); | ||
882 | if ((thr->ckpt_regs.msr & MSR_VEC) == 0) | ||
883 | memcpy(&thr->ckvr_state, &thr->vr_state, | ||
884 | sizeof(struct thread_vr_state)); | ||
885 | |||
867 | giveup_all(container_of(thr, struct task_struct, thread)); | 886 | giveup_all(container_of(thr, struct task_struct, thread)); |
868 | 887 | ||
869 | tm_reclaim(thr, thr->ckpt_regs.msr, cause); | 888 | tm_reclaim(thr, thr->ckpt_regs.msr, cause); |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 40c4887c27b6..f83056297441 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -161,7 +161,9 @@ static struct ibm_pa_feature { | |||
161 | { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, | 161 | { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, |
162 | { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, | 162 | { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, |
163 | { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, | 163 | { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, |
164 | #ifdef CONFIG_PPC_RADIX_MMU | ||
164 | { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, | 165 | { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, |
166 | #endif | ||
165 | { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, | 167 | { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, |
166 | { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, | 168 | { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, |
167 | .cpu_user_ftrs = PPC_FEATURE_TRUE_LE }, | 169 | .cpu_user_ftrs = PPC_FEATURE_TRUE_LE }, |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 24de532c1736..0c52cb5d43f5 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -67,7 +67,7 @@ config KVM_BOOK3S_64 | |||
67 | select KVM_BOOK3S_64_HANDLER | 67 | select KVM_BOOK3S_64_HANDLER |
68 | select KVM | 68 | select KVM |
69 | select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE | 69 | select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE |
70 | select SPAPR_TCE_IOMMU if IOMMU_SUPPORT | 70 | select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) |
71 | ---help--- | 71 | ---help--- |
72 | Support running unmodified book3s_64 and book3s_32 guest kernels | 72 | Support running unmodified book3s_64 and book3s_32 guest kernels |
73 | in virtual machines on book3s_64 host processors. | 73 | in virtual machines on book3s_64 host processors. |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index d91a2604c496..381a6ec0ff3b 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -46,7 +46,7 @@ kvm-e500mc-objs := \ | |||
46 | e500_emulate.o | 46 | e500_emulate.o |
47 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) | 47 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) |
48 | 48 | ||
49 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \ | 49 | kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \ |
50 | book3s_64_vio_hv.o | 50 | book3s_64_vio_hv.o |
51 | 51 | ||
52 | kvm-pr-y := \ | 52 | kvm-pr-y := \ |
@@ -90,11 +90,11 @@ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ | |||
90 | book3s_xics.o | 90 | book3s_xics.o |
91 | 91 | ||
92 | kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o | 92 | kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o |
93 | kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o | ||
93 | 94 | ||
94 | kvm-book3s_64-module-objs := \ | 95 | kvm-book3s_64-module-objs := \ |
95 | $(common-objs-y) \ | 96 | $(common-objs-y) \ |
96 | book3s.o \ | 97 | book3s.o \ |
97 | book3s_64_vio.o \ | ||
98 | book3s_rtas.o \ | 98 | book3s_rtas.o \ |
99 | $(kvm-book3s_64-objs-y) | 99 | $(kvm-book3s_64-objs-y) |
100 | 100 | ||
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index eda0a8f6fae8..3adfd2f5301c 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -301,6 +301,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
301 | /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ | 301 | /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ |
302 | /* liobn, ioba, tce); */ | 302 | /* liobn, ioba, tce); */ |
303 | 303 | ||
304 | /* For radix, we might be in virtual mode, so punt */ | ||
305 | if (kvm_is_radix(vcpu->kvm)) | ||
306 | return H_TOO_HARD; | ||
307 | |||
304 | stt = kvmppc_find_table(vcpu->kvm, liobn); | 308 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
305 | if (!stt) | 309 | if (!stt) |
306 | return H_TOO_HARD; | 310 | return H_TOO_HARD; |
@@ -381,6 +385,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
381 | bool prereg = false; | 385 | bool prereg = false; |
382 | struct kvmppc_spapr_tce_iommu_table *stit; | 386 | struct kvmppc_spapr_tce_iommu_table *stit; |
383 | 387 | ||
388 | /* For radix, we might be in virtual mode, so punt */ | ||
389 | if (kvm_is_radix(vcpu->kvm)) | ||
390 | return H_TOO_HARD; | ||
391 | |||
384 | stt = kvmppc_find_table(vcpu->kvm, liobn); | 392 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
385 | if (!stt) | 393 | if (!stt) |
386 | return H_TOO_HARD; | 394 | return H_TOO_HARD; |
@@ -491,6 +499,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
491 | long i, ret; | 499 | long i, ret; |
492 | struct kvmppc_spapr_tce_iommu_table *stit; | 500 | struct kvmppc_spapr_tce_iommu_table *stit; |
493 | 501 | ||
502 | /* For radix, we might be in virtual mode, so punt */ | ||
503 | if (kvm_is_radix(vcpu->kvm)) | ||
504 | return H_TOO_HARD; | ||
505 | |||
494 | stt = kvmppc_find_table(vcpu->kvm, liobn); | 506 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
495 | if (!stt) | 507 | if (!stt) |
496 | return H_TOO_HARD; | 508 | return H_TOO_HARD; |
@@ -527,6 +539,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
527 | return H_SUCCESS; | 539 | return H_SUCCESS; |
528 | } | 540 | } |
529 | 541 | ||
542 | /* This can be called in either virtual mode or real mode */ | ||
530 | long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 543 | long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
531 | unsigned long ioba) | 544 | unsigned long ioba) |
532 | { | 545 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 88a65923c649..ee4c2558c305 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
@@ -207,7 +207,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); | |||
207 | 207 | ||
208 | long kvmppc_h_random(struct kvm_vcpu *vcpu) | 208 | long kvmppc_h_random(struct kvm_vcpu *vcpu) |
209 | { | 209 | { |
210 | if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) | 210 | int r; |
211 | |||
212 | /* Only need to do the expensive mfmsr() on radix */ | ||
213 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) | ||
214 | r = powernv_get_random_long(&vcpu->arch.gpr[4]); | ||
215 | else | ||
216 | r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); | ||
217 | if (r) | ||
211 | return H_SUCCESS; | 218 | return H_SUCCESS; |
212 | 219 | ||
213 | return H_HARDWARE; | 220 | return H_HARDWARE; |
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index bcbeeb62dd13..8a4205fa774f 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
50 | pteg_addr = get_pteg_addr(vcpu, pte_index); | 50 | pteg_addr = get_pteg_addr(vcpu, pte_index); |
51 | 51 | ||
52 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 52 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
53 | copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); | 53 | ret = H_FUNCTION; |
54 | if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) | ||
55 | goto done; | ||
54 | hpte = pteg; | 56 | hpte = pteg; |
55 | 57 | ||
56 | ret = H_PTEG_FULL; | 58 | ret = H_PTEG_FULL; |
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
71 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); | 73 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); |
72 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); | 74 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); |
73 | pteg_addr += i * HPTE_SIZE; | 75 | pteg_addr += i * HPTE_SIZE; |
74 | copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); | 76 | ret = H_FUNCTION; |
77 | if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) | ||
78 | goto done; | ||
75 | kvmppc_set_gpr(vcpu, 4, pte_index | i); | 79 | kvmppc_set_gpr(vcpu, 4, pte_index | i); |
76 | ret = H_SUCCESS; | 80 | ret = H_SUCCESS; |
77 | 81 | ||
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
93 | 97 | ||
94 | pteg = get_pteg_addr(vcpu, pte_index); | 98 | pteg = get_pteg_addr(vcpu, pte_index); |
95 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 99 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
96 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 100 | ret = H_FUNCTION; |
101 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) | ||
102 | goto done; | ||
97 | pte[0] = be64_to_cpu((__force __be64)pte[0]); | 103 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
98 | pte[1] = be64_to_cpu((__force __be64)pte[1]); | 104 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
99 | 105 | ||
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
103 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) | 109 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) |
104 | goto done; | 110 | goto done; |
105 | 111 | ||
106 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | 112 | ret = H_FUNCTION; |
113 | if (copy_to_user((void __user *)pteg, &v, sizeof(v))) | ||
114 | goto done; | ||
107 | 115 | ||
108 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); | 116 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); |
109 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 117 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
171 | } | 179 | } |
172 | 180 | ||
173 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); | 181 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); |
174 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 182 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { |
183 | ret = H_FUNCTION; | ||
184 | break; | ||
185 | } | ||
175 | pte[0] = be64_to_cpu((__force __be64)pte[0]); | 186 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
176 | pte[1] = be64_to_cpu((__force __be64)pte[1]); | 187 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
177 | 188 | ||
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
184 | tsh |= H_BULK_REMOVE_NOT_FOUND; | 195 | tsh |= H_BULK_REMOVE_NOT_FOUND; |
185 | } else { | 196 | } else { |
186 | /* Splat the pteg in (userland) hpt */ | 197 | /* Splat the pteg in (userland) hpt */ |
187 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | 198 | if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { |
199 | ret = H_FUNCTION; | ||
200 | break; | ||
201 | } | ||
188 | 202 | ||
189 | rb = compute_tlbie_rb(pte[0], pte[1], | 203 | rb = compute_tlbie_rb(pte[0], pte[1], |
190 | tsh & H_BULK_REMOVE_PTEX); | 204 | tsh & H_BULK_REMOVE_PTEX); |
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
211 | 225 | ||
212 | pteg = get_pteg_addr(vcpu, pte_index); | 226 | pteg = get_pteg_addr(vcpu, pte_index); |
213 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 227 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
214 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 228 | ret = H_FUNCTION; |
229 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) | ||
230 | goto done; | ||
215 | pte[0] = be64_to_cpu((__force __be64)pte[0]); | 231 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
216 | pte[1] = be64_to_cpu((__force __be64)pte[1]); | 232 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
217 | 233 | ||
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
234 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 250 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
235 | pte[0] = (__force u64)cpu_to_be64(pte[0]); | 251 | pte[0] = (__force u64)cpu_to_be64(pte[0]); |
236 | pte[1] = (__force u64)cpu_to_be64(pte[1]); | 252 | pte[1] = (__force u64)cpu_to_be64(pte[1]); |
237 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); | 253 | ret = H_FUNCTION; |
254 | if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) | ||
255 | goto done; | ||
238 | ret = H_SUCCESS; | 256 | ret = H_SUCCESS; |
239 | 257 | ||
240 | done: | 258 | done: |
@@ -244,36 +262,37 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
244 | return EMULATE_DONE; | 262 | return EMULATE_DONE; |
245 | } | 263 | } |
246 | 264 | ||
247 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | 265 | static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) |
248 | { | 266 | { |
249 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
250 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
251 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
252 | long rc; | 267 | long rc; |
253 | 268 | ||
254 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); | 269 | rc = kvmppc_h_logical_ci_load(vcpu); |
255 | if (rc == H_TOO_HARD) | 270 | if (rc == H_TOO_HARD) |
256 | return EMULATE_FAIL; | 271 | return EMULATE_FAIL; |
257 | kvmppc_set_gpr(vcpu, 3, rc); | 272 | kvmppc_set_gpr(vcpu, 3, rc); |
258 | return EMULATE_DONE; | 273 | return EMULATE_DONE; |
259 | } | 274 | } |
260 | 275 | ||
261 | static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) | 276 | static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) |
262 | { | 277 | { |
263 | long rc; | 278 | long rc; |
264 | 279 | ||
265 | rc = kvmppc_h_logical_ci_load(vcpu); | 280 | rc = kvmppc_h_logical_ci_store(vcpu); |
266 | if (rc == H_TOO_HARD) | 281 | if (rc == H_TOO_HARD) |
267 | return EMULATE_FAIL; | 282 | return EMULATE_FAIL; |
268 | kvmppc_set_gpr(vcpu, 3, rc); | 283 | kvmppc_set_gpr(vcpu, 3, rc); |
269 | return EMULATE_DONE; | 284 | return EMULATE_DONE; |
270 | } | 285 | } |
271 | 286 | ||
272 | static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) | 287 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
288 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
273 | { | 289 | { |
290 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
291 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
292 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
274 | long rc; | 293 | long rc; |
275 | 294 | ||
276 | rc = kvmppc_h_logical_ci_store(vcpu); | 295 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); |
277 | if (rc == H_TOO_HARD) | 296 | if (rc == H_TOO_HARD) |
278 | return EMULATE_FAIL; | 297 | return EMULATE_FAIL; |
279 | kvmppc_set_gpr(vcpu, 3, rc); | 298 | kvmppc_set_gpr(vcpu, 3, rc); |
@@ -311,6 +330,23 @@ static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) | |||
311 | return EMULATE_DONE; | 330 | return EMULATE_DONE; |
312 | } | 331 | } |
313 | 332 | ||
333 | #else /* CONFIG_SPAPR_TCE_IOMMU */ | ||
334 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
335 | { | ||
336 | return EMULATE_FAIL; | ||
337 | } | ||
338 | |||
339 | static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) | ||
340 | { | ||
341 | return EMULATE_FAIL; | ||
342 | } | ||
343 | |||
344 | static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) | ||
345 | { | ||
346 | return EMULATE_FAIL; | ||
347 | } | ||
348 | #endif /* CONFIG_SPAPR_TCE_IOMMU */ | ||
349 | |||
314 | static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) | 350 | static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) |
315 | { | 351 | { |
316 | long rc = kvmppc_xics_hcall(vcpu, cmd); | 352 | long rc = kvmppc_xics_hcall(vcpu, cmd); |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f7cf2cd564ef..7f71ab5fcad1 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -1749,7 +1749,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1749 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); | 1749 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); |
1750 | break; | 1750 | break; |
1751 | } | 1751 | } |
1752 | #ifdef CONFIG_PPC_BOOK3S_64 | 1752 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
1753 | case KVM_CREATE_SPAPR_TCE_64: { | 1753 | case KVM_CREATE_SPAPR_TCE_64: { |
1754 | struct kvm_create_spapr_tce_64 create_tce_64; | 1754 | struct kvm_create_spapr_tce_64 create_tce_64; |
1755 | 1755 | ||
@@ -1780,6 +1780,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1780 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | 1780 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
1781 | goto out; | 1781 | goto out; |
1782 | } | 1782 | } |
1783 | #endif | ||
1784 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1783 | case KVM_PPC_GET_SMMU_INFO: { | 1785 | case KVM_PPC_GET_SMMU_INFO: { |
1784 | struct kvm_ppc_smmu_info info; | 1786 | struct kvm_ppc_smmu_info info; |
1785 | struct kvm *kvm = filp->private_data; | 1787 | struct kvm *kvm = filp->private_data; |
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index d659345a98d6..44fe4833910f 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c | |||
@@ -16,6 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | #include <linux/debugfs.h> | 17 | #include <linux/debugfs.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/hugetlb.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
21 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
@@ -391,7 +392,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | |||
391 | 392 | ||
392 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { | 393 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { |
393 | addr = start + i * PMD_SIZE; | 394 | addr = start + i * PMD_SIZE; |
394 | if (!pmd_none(*pmd)) | 395 | if (!pmd_none(*pmd) && !pmd_huge(*pmd)) |
395 | /* pmd exists */ | 396 | /* pmd exists */ |
396 | walk_pte(st, pmd, addr); | 397 | walk_pte(st, pmd, addr); |
397 | else | 398 | else |
@@ -407,7 +408,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) | |||
407 | 408 | ||
408 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { | 409 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { |
409 | addr = start + i * PUD_SIZE; | 410 | addr = start + i * PUD_SIZE; |
410 | if (!pud_none(*pud)) | 411 | if (!pud_none(*pud) && !pud_huge(*pud)) |
411 | /* pud exists */ | 412 | /* pud exists */ |
412 | walk_pmd(st, pud, addr); | 413 | walk_pmd(st, pud, addr); |
413 | else | 414 | else |
@@ -427,7 +428,7 @@ static void walk_pagetables(struct pg_state *st) | |||
427 | */ | 428 | */ |
428 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { | 429 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { |
429 | addr = KERN_VIRT_START + i * PGDIR_SIZE; | 430 | addr = KERN_VIRT_START + i * PGDIR_SIZE; |
430 | if (!pgd_none(*pgd)) | 431 | if (!pgd_none(*pgd) && !pgd_huge(*pgd)) |
431 | /* pgd exists */ | 432 | /* pgd exists */ |
432 | walk_pud(st, pgd, addr); | 433 | walk_pud(st, pgd, addr); |
433 | else | 434 | else |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 96c2b8a40630..0c45cdbac4cf 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
197 | (REGION_ID(ea) != USER_REGION_ID)) { | 197 | (REGION_ID(ea) != USER_REGION_ID)) { |
198 | 198 | ||
199 | spin_unlock(&spu->register_lock); | 199 | spin_unlock(&spu->register_lock); |
200 | ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr); | 200 | ret = hash_page(ea, |
201 | _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED, | ||
202 | 0x300, dsisr); | ||
201 | spin_lock(&spu->register_lock); | 203 | spin_lock(&spu->register_lock); |
202 | 204 | ||
203 | if (!ret) { | 205 | if (!ret) { |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 067defeea691..78fa9395b8c5 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -714,7 +714,7 @@ static void pnv_npu2_release_context(struct kref *kref) | |||
714 | void pnv_npu2_destroy_context(struct npu_context *npu_context, | 714 | void pnv_npu2_destroy_context(struct npu_context *npu_context, |
715 | struct pci_dev *gpdev) | 715 | struct pci_dev *gpdev) |
716 | { | 716 | { |
717 | struct pnv_phb *nphb, *phb; | 717 | struct pnv_phb *nphb; |
718 | struct npu *npu; | 718 | struct npu *npu; |
719 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | 719 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); |
720 | struct device_node *nvlink_dn; | 720 | struct device_node *nvlink_dn; |
@@ -728,13 +728,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, | |||
728 | 728 | ||
729 | nphb = pci_bus_to_host(npdev->bus)->private_data; | 729 | nphb = pci_bus_to_host(npdev->bus)->private_data; |
730 | npu = &nphb->npu; | 730 | npu = &nphb->npu; |
731 | phb = pci_bus_to_host(gpdev->bus)->private_data; | ||
732 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); | 731 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); |
733 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", | 732 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", |
734 | &nvlink_index))) | 733 | &nvlink_index))) |
735 | return; | 734 | return; |
736 | npu_context->npdev[npu->index][nvlink_index] = NULL; | 735 | npu_context->npdev[npu->index][nvlink_index] = NULL; |
737 | opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id, | 736 | opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, |
738 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); | 737 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); |
739 | kref_put(&npu_context->kref, pnv_npu2_release_context); | 738 | kref_put(&npu_context->kref, pnv_npu2_release_context); |
740 | } | 739 | } |
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 0206c8052328..df7b54ea956d 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
13 | #include <linux/refcount.h> | ||
13 | #include <uapi/asm/debug.h> | 14 | #include <uapi/asm/debug.h> |
14 | 15 | ||
15 | #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ | 16 | #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ |
@@ -31,7 +32,7 @@ struct debug_view; | |||
31 | typedef struct debug_info { | 32 | typedef struct debug_info { |
32 | struct debug_info* next; | 33 | struct debug_info* next; |
33 | struct debug_info* prev; | 34 | struct debug_info* prev; |
34 | atomic_t ref_count; | 35 | refcount_t ref_count; |
35 | spinlock_t lock; | 36 | spinlock_t lock; |
36 | int level; | 37 | int level; |
37 | int nr_areas; | 38 | int nr_areas; |
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h index 60323c21938b..37f617dfbede 100644 --- a/arch/s390/include/asm/dis.h +++ b/arch/s390/include/asm/dis.h | |||
@@ -40,6 +40,8 @@ static inline int insn_length(unsigned char code) | |||
40 | return ((((int) code + 64) >> 7) + 1) << 1; | 40 | return ((((int) code + 64) >> 7) + 1) << 1; |
41 | } | 41 | } |
42 | 42 | ||
43 | struct pt_regs; | ||
44 | |||
43 | void show_code(struct pt_regs *regs); | 45 | void show_code(struct pt_regs *regs); |
44 | void print_fn_code(unsigned char *code, unsigned long len); | 46 | void print_fn_code(unsigned char *code, unsigned long len); |
45 | int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len); | 47 | int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len); |
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 1293c4066cfc..28792ef82c83 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h | |||
@@ -27,12 +27,21 @@ | |||
27 | * 2005-Dec Used as a template for s390 by Mike Grundy | 27 | * 2005-Dec Used as a template for s390 by Mike Grundy |
28 | * <grundym@us.ibm.com> | 28 | * <grundym@us.ibm.com> |
29 | */ | 29 | */ |
30 | #include <linux/types.h> | ||
30 | #include <asm-generic/kprobes.h> | 31 | #include <asm-generic/kprobes.h> |
31 | 32 | ||
32 | #define BREAKPOINT_INSTRUCTION 0x0002 | 33 | #define BREAKPOINT_INSTRUCTION 0x0002 |
33 | 34 | ||
35 | #define FIXUP_PSW_NORMAL 0x08 | ||
36 | #define FIXUP_BRANCH_NOT_TAKEN 0x04 | ||
37 | #define FIXUP_RETURN_REGISTER 0x02 | ||
38 | #define FIXUP_NOT_REQUIRED 0x01 | ||
39 | |||
40 | int probe_is_prohibited_opcode(u16 *insn); | ||
41 | int probe_get_fixup_type(u16 *insn); | ||
42 | int probe_is_insn_relative_long(u16 *insn); | ||
43 | |||
34 | #ifdef CONFIG_KPROBES | 44 | #ifdef CONFIG_KPROBES |
35 | #include <linux/types.h> | ||
36 | #include <linux/ptrace.h> | 45 | #include <linux/ptrace.h> |
37 | #include <linux/percpu.h> | 46 | #include <linux/percpu.h> |
38 | #include <linux/sched/task_stack.h> | 47 | #include <linux/sched/task_stack.h> |
@@ -56,11 +65,6 @@ typedef u16 kprobe_opcode_t; | |||
56 | 65 | ||
57 | #define KPROBE_SWAP_INST 0x10 | 66 | #define KPROBE_SWAP_INST 0x10 |
58 | 67 | ||
59 | #define FIXUP_PSW_NORMAL 0x08 | ||
60 | #define FIXUP_BRANCH_NOT_TAKEN 0x04 | ||
61 | #define FIXUP_RETURN_REGISTER 0x02 | ||
62 | #define FIXUP_NOT_REQUIRED 0x01 | ||
63 | |||
64 | /* Architecture specific copy of original instruction */ | 68 | /* Architecture specific copy of original instruction */ |
65 | struct arch_specific_insn { | 69 | struct arch_specific_insn { |
66 | /* copy of original instruction */ | 70 | /* copy of original instruction */ |
@@ -90,10 +94,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | |||
90 | int kprobe_exceptions_notify(struct notifier_block *self, | 94 | int kprobe_exceptions_notify(struct notifier_block *self, |
91 | unsigned long val, void *data); | 95 | unsigned long val, void *data); |
92 | 96 | ||
93 | int probe_is_prohibited_opcode(u16 *insn); | ||
94 | int probe_get_fixup_type(u16 *insn); | ||
95 | int probe_is_insn_relative_long(u16 *insn); | ||
96 | |||
97 | #define flush_insn_slot(p) do { } while (0) | 97 | #define flush_insn_slot(p) do { } while (0) |
98 | 98 | ||
99 | #endif /* CONFIG_KPROBES */ | 99 | #endif /* CONFIG_KPROBES */ |
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 73bff45ced55..e784bed6ed7f 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h | |||
@@ -146,7 +146,7 @@ extern int topology_max_mnest; | |||
146 | * Returns the maximum nesting level supported by the cpu topology code. | 146 | * Returns the maximum nesting level supported by the cpu topology code. |
147 | * The current maximum level is 4 which is the drawer level. | 147 | * The current maximum level is 4 which is the drawer level. |
148 | */ | 148 | */ |
149 | static inline int topology_mnest_limit(void) | 149 | static inline unsigned char topology_mnest_limit(void) |
150 | { | 150 | { |
151 | return min(topology_max_mnest, 4); | 151 | return min(topology_max_mnest, 4); |
152 | } | 152 | } |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 530226b6cb19..86b3e74f569e 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas, | |||
277 | memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); | 277 | memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); |
278 | memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * | 278 | memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * |
279 | sizeof(struct dentry*)); | 279 | sizeof(struct dentry*)); |
280 | atomic_set(&(rc->ref_count), 0); | 280 | refcount_set(&(rc->ref_count), 0); |
281 | 281 | ||
282 | return rc; | 282 | return rc; |
283 | 283 | ||
@@ -361,7 +361,7 @@ debug_info_create(const char *name, int pages_per_area, int nr_areas, | |||
361 | debug_area_last = rc; | 361 | debug_area_last = rc; |
362 | rc->next = NULL; | 362 | rc->next = NULL; |
363 | 363 | ||
364 | debug_info_get(rc); | 364 | refcount_set(&rc->ref_count, 1); |
365 | out: | 365 | out: |
366 | return rc; | 366 | return rc; |
367 | } | 367 | } |
@@ -416,7 +416,7 @@ static void | |||
416 | debug_info_get(debug_info_t * db_info) | 416 | debug_info_get(debug_info_t * db_info) |
417 | { | 417 | { |
418 | if (db_info) | 418 | if (db_info) |
419 | atomic_inc(&db_info->ref_count); | 419 | refcount_inc(&db_info->ref_count); |
420 | } | 420 | } |
421 | 421 | ||
422 | /* | 422 | /* |
@@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info) | |||
431 | 431 | ||
432 | if (!db_info) | 432 | if (!db_info) |
433 | return; | 433 | return; |
434 | if (atomic_dec_and_test(&db_info->ref_count)) { | 434 | if (refcount_dec_and_test(&db_info->ref_count)) { |
435 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { | 435 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { |
436 | if (!db_info->views[i]) | 436 | if (!db_info->views[i]) |
437 | continue; | 437 | continue; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index a5f5d3bb3dbc..e408d9cc5b96 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -312,6 +312,7 @@ ENTRY(system_call) | |||
312 | lg %r14,__LC_VDSO_PER_CPU | 312 | lg %r14,__LC_VDSO_PER_CPU |
313 | lmg %r0,%r10,__PT_R0(%r11) | 313 | lmg %r0,%r10,__PT_R0(%r11) |
314 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | 314 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
315 | .Lsysc_exit_timer: | ||
315 | stpt __LC_EXIT_TIMER | 316 | stpt __LC_EXIT_TIMER |
316 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | 317 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
317 | lmg %r11,%r15,__PT_R11(%r11) | 318 | lmg %r11,%r15,__PT_R11(%r11) |
@@ -623,6 +624,7 @@ ENTRY(io_int_handler) | |||
623 | lg %r14,__LC_VDSO_PER_CPU | 624 | lg %r14,__LC_VDSO_PER_CPU |
624 | lmg %r0,%r10,__PT_R0(%r11) | 625 | lmg %r0,%r10,__PT_R0(%r11) |
625 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | 626 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
627 | .Lio_exit_timer: | ||
626 | stpt __LC_EXIT_TIMER | 628 | stpt __LC_EXIT_TIMER |
627 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | 629 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
628 | lmg %r11,%r15,__PT_R11(%r11) | 630 | lmg %r11,%r15,__PT_R11(%r11) |
@@ -1174,15 +1176,23 @@ cleanup_critical: | |||
1174 | br %r14 | 1176 | br %r14 |
1175 | 1177 | ||
1176 | .Lcleanup_sysc_restore: | 1178 | .Lcleanup_sysc_restore: |
1179 | # check if stpt has been executed | ||
1177 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) | 1180 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) |
1181 | jh 0f | ||
1182 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
1183 | cghi %r11,__LC_SAVE_AREA_ASYNC | ||
1178 | je 0f | 1184 | je 0f |
1185 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
1186 | 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) | ||
1187 | je 1f | ||
1179 | lg %r9,24(%r11) # get saved pointer to pt_regs | 1188 | lg %r9,24(%r11) # get saved pointer to pt_regs |
1180 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | 1189 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
1181 | mvc 0(64,%r11),__PT_R8(%r9) | 1190 | mvc 0(64,%r11),__PT_R8(%r9) |
1182 | lmg %r0,%r7,__PT_R0(%r9) | 1191 | lmg %r0,%r7,__PT_R0(%r9) |
1183 | 0: lmg %r8,%r9,__LC_RETURN_PSW | 1192 | 1: lmg %r8,%r9,__LC_RETURN_PSW |
1184 | br %r14 | 1193 | br %r14 |
1185 | .Lcleanup_sysc_restore_insn: | 1194 | .Lcleanup_sysc_restore_insn: |
1195 | .quad .Lsysc_exit_timer | ||
1186 | .quad .Lsysc_done - 4 | 1196 | .quad .Lsysc_done - 4 |
1187 | 1197 | ||
1188 | .Lcleanup_io_tif: | 1198 | .Lcleanup_io_tif: |
@@ -1190,15 +1200,20 @@ cleanup_critical: | |||
1190 | br %r14 | 1200 | br %r14 |
1191 | 1201 | ||
1192 | .Lcleanup_io_restore: | 1202 | .Lcleanup_io_restore: |
1203 | # check if stpt has been executed | ||
1193 | clg %r9,BASED(.Lcleanup_io_restore_insn) | 1204 | clg %r9,BASED(.Lcleanup_io_restore_insn) |
1194 | je 0f | 1205 | jh 0f |
1206 | mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
1207 | 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8) | ||
1208 | je 1f | ||
1195 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs | 1209 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
1196 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | 1210 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
1197 | mvc 0(64,%r11),__PT_R8(%r9) | 1211 | mvc 0(64,%r11),__PT_R8(%r9) |
1198 | lmg %r0,%r7,__PT_R0(%r9) | 1212 | lmg %r0,%r7,__PT_R0(%r9) |
1199 | 0: lmg %r8,%r9,__LC_RETURN_PSW | 1213 | 1: lmg %r8,%r9,__LC_RETURN_PSW |
1200 | br %r14 | 1214 | br %r14 |
1201 | .Lcleanup_io_restore_insn: | 1215 | .Lcleanup_io_restore_insn: |
1216 | .quad .Lio_exit_timer | ||
1202 | .quad .Lio_done - 4 | 1217 | .quad .Lio_done - 4 |
1203 | 1218 | ||
1204 | .Lcleanup_idle: | 1219 | .Lcleanup_idle: |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 27477f34cc0a..d03a6d12c4bd 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -173,6 +173,8 @@ int __init ftrace_dyn_arch_init(void) | |||
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | #ifdef CONFIG_MODULES | ||
177 | |||
176 | static int __init ftrace_plt_init(void) | 178 | static int __init ftrace_plt_init(void) |
177 | { | 179 | { |
178 | unsigned int *ip; | 180 | unsigned int *ip; |
@@ -191,6 +193,8 @@ static int __init ftrace_plt_init(void) | |||
191 | } | 193 | } |
192 | device_initcall(ftrace_plt_init); | 194 | device_initcall(ftrace_plt_init); |
193 | 195 | ||
196 | #endif /* CONFIG_MODULES */ | ||
197 | |||
194 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 198 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
195 | /* | 199 | /* |
196 | * Hook the return address and push it in the stack of return addresses | 200 | * Hook the return address and push it in the stack of return addresses |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 72307f108c40..6e2c42bd1c3b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -31,8 +31,14 @@ SECTIONS | |||
31 | { | 31 | { |
32 | . = 0x00000000; | 32 | . = 0x00000000; |
33 | .text : { | 33 | .text : { |
34 | _text = .; /* Text and read-only data */ | 34 | /* Text and read-only data */ |
35 | HEAD_TEXT | 35 | HEAD_TEXT |
36 | /* | ||
37 | * E.g. perf doesn't like symbols starting at address zero, | ||
38 | * therefore skip the initial PSW and channel program located | ||
39 | * at address zero and let _text start at 0x200. | ||
40 | */ | ||
41 | _text = 0x200; | ||
36 | TEXT_TEXT | 42 | TEXT_TEXT |
37 | SCHED_TEXT | 43 | SCHED_TEXT |
38 | CPUIDLE_TEXT | 44 | CPUIDLE_TEXT |
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c index ae90e1ae3607..1963ddbf4ab3 100644 --- a/arch/s390/lib/probes.c +++ b/arch/s390/lib/probes.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright IBM Corp. 2014 | 4 | * Copyright IBM Corp. 2014 |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/errno.h> | ||
7 | #include <asm/kprobes.h> | 8 | #include <asm/kprobes.h> |
8 | #include <asm/dis.h> | 9 | #include <asm/dis.h> |
9 | 10 | ||
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 1e5bb2b86c42..b3bd3f23b8e8 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c | |||
@@ -337,8 +337,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long size) | |||
337 | return 0; | 337 | return 0; |
338 | done = 0; | 338 | done = 0; |
339 | do { | 339 | do { |
340 | offset = (size_t)src & ~PAGE_MASK; | 340 | offset = (size_t)src & (L1_CACHE_BYTES - 1); |
341 | len = min(size - done, PAGE_SIZE - offset); | 341 | len = min(size - done, L1_CACHE_BYTES - offset); |
342 | if (copy_from_user(dst, src, len)) | 342 | if (copy_from_user(dst, src, len)) |
343 | return -EFAULT; | 343 | return -EFAULT; |
344 | len_str = strnlen(dst, len); | 344 | len_str = strnlen(dst, len); |
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index dcbf985ab243..d1f837dc77a4 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h | |||
@@ -24,9 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, | |||
24 | static inline int prepare_hugepage_range(struct file *file, | 24 | static inline int prepare_hugepage_range(struct file *file, |
25 | unsigned long addr, unsigned long len) | 25 | unsigned long addr, unsigned long len) |
26 | { | 26 | { |
27 | if (len & ~HPAGE_MASK) | 27 | struct hstate *h = hstate_file(file); |
28 | |||
29 | if (len & ~huge_page_mask(h)) | ||
28 | return -EINVAL; | 30 | return -EINVAL; |
29 | if (addr & ~HPAGE_MASK) | 31 | if (addr & ~huge_page_mask(h)) |
30 | return -EINVAL; | 32 | return -EINVAL; |
31 | return 0; | 33 | return 0; |
32 | } | 34 | } |
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index ce6f56980aef..cf190728360b 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h | |||
@@ -91,9 +91,9 @@ extern unsigned long pfn_base; | |||
91 | * ZERO_PAGE is a global shared page that is always zero: used | 91 | * ZERO_PAGE is a global shared page that is always zero: used |
92 | * for zero-mapped memory areas etc.. | 92 | * for zero-mapped memory areas etc.. |
93 | */ | 93 | */ |
94 | extern unsigned long empty_zero_page; | 94 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
95 | 95 | ||
96 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) | 96 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * In general all page table modifications should use the V8 atomic | 99 | * In general all page table modifications should use the V8 atomic |
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 478bf6bb4598..3fae200dd251 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h | |||
@@ -16,7 +16,7 @@ extern char reboot_command[]; | |||
16 | */ | 16 | */ |
17 | extern unsigned char boot_cpu_id; | 17 | extern unsigned char boot_cpu_id; |
18 | 18 | ||
19 | extern unsigned long empty_zero_page; | 19 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
20 | 20 | ||
21 | extern int serial_console; | 21 | extern int serial_console; |
22 | static inline int con_is_present(void) | 22 | static inline int con_is_present(void) |
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 6bcff698069b..cec54dc4ab81 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c | |||
@@ -130,17 +130,16 @@ unsigned long prepare_ftrace_return(unsigned long parent, | |||
130 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 130 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
131 | return parent + 8UL; | 131 | return parent + 8UL; |
132 | 132 | ||
133 | if (ftrace_push_return_trace(parent, self_addr, &trace.depth, | ||
134 | frame_pointer, NULL) == -EBUSY) | ||
135 | return parent + 8UL; | ||
136 | |||
137 | trace.func = self_addr; | 133 | trace.func = self_addr; |
134 | trace.depth = current->curr_ret_stack + 1; | ||
138 | 135 | ||
139 | /* Only trace if the calling function expects to */ | 136 | /* Only trace if the calling function expects to */ |
140 | if (!ftrace_graph_entry(&trace)) { | 137 | if (!ftrace_graph_entry(&trace)) |
141 | current->curr_ret_stack--; | 138 | return parent + 8UL; |
139 | |||
140 | if (ftrace_push_return_trace(parent, self_addr, &trace.depth, | ||
141 | frame_pointer, NULL) == -EBUSY) | ||
142 | return parent + 8UL; | 142 | return parent + 8UL; |
143 | } | ||
144 | 143 | ||
145 | return return_hooker; | 144 | return return_hooker; |
146 | } | 145 | } |
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index c6afe98de4d9..3bd0d513bddb 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c | |||
@@ -290,7 +290,7 @@ void __init mem_init(void) | |||
290 | 290 | ||
291 | 291 | ||
292 | /* Saves us work later. */ | 292 | /* Saves us work later. */ |
293 | memset((void *)&empty_zero_page, 0, PAGE_SIZE); | 293 | memset((void *)empty_zero_page, 0, PAGE_SIZE); |
294 | 294 | ||
295 | i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); | 295 | i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); |
296 | i += 1; | 296 | i += 1; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cd18994a9555..4ccfacc7232a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -360,7 +360,7 @@ config SMP | |||
360 | Management" code will be disabled if you say Y here. | 360 | Management" code will be disabled if you say Y here. |
361 | 361 | ||
362 | See also <file:Documentation/x86/i386/IO-APIC.txt>, | 362 | See also <file:Documentation/x86/i386/IO-APIC.txt>, |
363 | <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at | 363 | <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at |
364 | <http://www.tldp.org/docs.html#howto>. | 364 | <http://www.tldp.org/docs.html#howto>. |
365 | 365 | ||
366 | If you don't know what to do here, say N. | 366 | If you don't know what to do here, say N. |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 5851411e60fb..bf240b920473 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER | |||
159 | # If '-Os' is enabled, disable it and print a warning. | 159 | # If '-Os' is enabled, disable it and print a warning. |
160 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE | 160 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
161 | undefine CONFIG_CC_OPTIMIZE_FOR_SIZE | 161 | undefine CONFIG_CC_OPTIMIZE_FOR_SIZE |
162 | $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.) | 162 | $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.) |
163 | endif | 163 | endif |
164 | 164 | ||
165 | endif | 165 | endif |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 44163e8c3868..2c860ad4fe06 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o | |||
94 | quiet_cmd_check_data_rel = DATAREL $@ | 94 | quiet_cmd_check_data_rel = DATAREL $@ |
95 | define cmd_check_data_rel | 95 | define cmd_check_data_rel |
96 | for obj in $(filter %.o,$^); do \ | 96 | for obj in $(filter %.o,$^); do \ |
97 | readelf -S $$obj | grep -qF .rel.local && { \ | 97 | ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ |
98 | echo "error: $$obj has data relocations!" >&2; \ | 98 | echo "error: $$obj has data relocations!" >&2; \ |
99 | exit 1; \ | 99 | exit 1; \ |
100 | } || true; \ | 100 | } || true; \ |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 50bc26949e9e..48ef7bb32c42 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -252,6 +252,23 @@ ENTRY(__switch_to_asm) | |||
252 | END(__switch_to_asm) | 252 | END(__switch_to_asm) |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * The unwinder expects the last frame on the stack to always be at the same | ||
256 | * offset from the end of the page, which allows it to validate the stack. | ||
257 | * Calling schedule_tail() directly would break that convention because its an | ||
258 | * asmlinkage function so its argument has to be pushed on the stack. This | ||
259 | * wrapper creates a proper "end of stack" frame header before the call. | ||
260 | */ | ||
261 | ENTRY(schedule_tail_wrapper) | ||
262 | FRAME_BEGIN | ||
263 | |||
264 | pushl %eax | ||
265 | call schedule_tail | ||
266 | popl %eax | ||
267 | |||
268 | FRAME_END | ||
269 | ret | ||
270 | ENDPROC(schedule_tail_wrapper) | ||
271 | /* | ||
255 | * A newly forked process directly context switches into this address. | 272 | * A newly forked process directly context switches into this address. |
256 | * | 273 | * |
257 | * eax: prev task we switched from | 274 | * eax: prev task we switched from |
@@ -259,24 +276,15 @@ END(__switch_to_asm) | |||
259 | * edi: kernel thread arg | 276 | * edi: kernel thread arg |
260 | */ | 277 | */ |
261 | ENTRY(ret_from_fork) | 278 | ENTRY(ret_from_fork) |
262 | FRAME_BEGIN /* help unwinder find end of stack */ | 279 | call schedule_tail_wrapper |
263 | |||
264 | /* | ||
265 | * schedule_tail() is asmlinkage so we have to put its 'prev' argument | ||
266 | * on the stack. | ||
267 | */ | ||
268 | pushl %eax | ||
269 | call schedule_tail | ||
270 | popl %eax | ||
271 | 280 | ||
272 | testl %ebx, %ebx | 281 | testl %ebx, %ebx |
273 | jnz 1f /* kernel threads are uncommon */ | 282 | jnz 1f /* kernel threads are uncommon */ |
274 | 283 | ||
275 | 2: | 284 | 2: |
276 | /* When we fork, we trace the syscall return in the child, too. */ | 285 | /* When we fork, we trace the syscall return in the child, too. */ |
277 | leal FRAME_OFFSET(%esp), %eax | 286 | movl %esp, %eax |
278 | call syscall_return_slowpath | 287 | call syscall_return_slowpath |
279 | FRAME_END | ||
280 | jmp restore_all | 288 | jmp restore_all |
281 | 289 | ||
282 | /* kernel thread */ | 290 | /* kernel thread */ |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 607d72c4a485..4a4c0834f965 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <asm/smap.h> | 36 | #include <asm/smap.h> |
37 | #include <asm/pgtable_types.h> | 37 | #include <asm/pgtable_types.h> |
38 | #include <asm/export.h> | 38 | #include <asm/export.h> |
39 | #include <asm/frame.h> | ||
40 | #include <linux/err.h> | 39 | #include <linux/err.h> |
41 | 40 | ||
42 | .code64 | 41 | .code64 |
@@ -406,19 +405,17 @@ END(__switch_to_asm) | |||
406 | * r12: kernel thread arg | 405 | * r12: kernel thread arg |
407 | */ | 406 | */ |
408 | ENTRY(ret_from_fork) | 407 | ENTRY(ret_from_fork) |
409 | FRAME_BEGIN /* help unwinder find end of stack */ | ||
410 | movq %rax, %rdi | 408 | movq %rax, %rdi |
411 | call schedule_tail /* rdi: 'prev' task parameter */ | 409 | call schedule_tail /* rdi: 'prev' task parameter */ |
412 | 410 | ||
413 | testq %rbx, %rbx /* from kernel_thread? */ | 411 | testq %rbx, %rbx /* from kernel_thread? */ |
414 | jnz 1f /* kernel threads are uncommon */ | 412 | jnz 1f /* kernel threads are uncommon */ |
415 | 413 | ||
416 | 2: | 414 | 2: |
417 | leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */ | 415 | movq %rsp, %rdi |
418 | call syscall_return_slowpath /* returns with IRQs disabled */ | 416 | call syscall_return_slowpath /* returns with IRQs disabled */ |
419 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ | 417 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ |
420 | SWAPGS | 418 | SWAPGS |
421 | FRAME_END | ||
422 | jmp restore_regs_and_iret | 419 | jmp restore_regs_and_iret |
423 | 420 | ||
424 | 1: | 421 | 1: |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9c761fea0c98..695605eb1dfb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -43,7 +43,7 @@ | |||
43 | #define KVM_PRIVATE_MEM_SLOTS 3 | 43 | #define KVM_PRIVATE_MEM_SLOTS 3 |
44 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) | 44 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
45 | 45 | ||
46 | #define KVM_HALT_POLL_NS_DEFAULT 400000 | 46 | #define KVM_HALT_POLL_NS_DEFAULT 200000 |
47 | 47 | ||
48 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS | 48 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
49 | 49 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4fd5195deed0..3f9a3d2a5209 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s | |||
266 | #endif | 266 | #endif |
267 | 267 | ||
268 | int mce_available(struct cpuinfo_x86 *c); | 268 | int mce_available(struct cpuinfo_x86 *c); |
269 | bool mce_is_memory_error(struct mce *m); | ||
269 | 270 | ||
270 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 271 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
271 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 272 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 68766b276d9e..a059aac9e937 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -319,10 +319,10 @@ do { \ | |||
319 | #define __get_user_asm_u64(x, ptr, retval, errret) \ | 319 | #define __get_user_asm_u64(x, ptr, retval, errret) \ |
320 | ({ \ | 320 | ({ \ |
321 | __typeof__(ptr) __ptr = (ptr); \ | 321 | __typeof__(ptr) __ptr = (ptr); \ |
322 | asm volatile(ASM_STAC "\n" \ | 322 | asm volatile("\n" \ |
323 | "1: movl %2,%%eax\n" \ | 323 | "1: movl %2,%%eax\n" \ |
324 | "2: movl %3,%%edx\n" \ | 324 | "2: movl %3,%%edx\n" \ |
325 | "3: " ASM_CLAC "\n" \ | 325 | "3:\n" \ |
326 | ".section .fixup,\"ax\"\n" \ | 326 | ".section .fixup,\"ax\"\n" \ |
327 | "4: mov %4,%0\n" \ | 327 | "4: mov %4,%0\n" \ |
328 | " xorl %%eax,%%eax\n" \ | 328 | " xorl %%eax,%%eax\n" \ |
@@ -331,7 +331,7 @@ do { \ | |||
331 | ".previous\n" \ | 331 | ".previous\n" \ |
332 | _ASM_EXTABLE(1b, 4b) \ | 332 | _ASM_EXTABLE(1b, 4b) \ |
333 | _ASM_EXTABLE(2b, 4b) \ | 333 | _ASM_EXTABLE(2b, 4b) \ |
334 | : "=r" (retval), "=A"(x) \ | 334 | : "=r" (retval), "=&A"(x) \ |
335 | : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ | 335 | : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ |
336 | "i" (errret), "0" (retval)); \ | 336 | "i" (errret), "0" (retval)); \ |
337 | }) | 337 | }) |
@@ -703,14 +703,15 @@ extern struct movsl_mask { | |||
703 | #define unsafe_put_user(x, ptr, err_label) \ | 703 | #define unsafe_put_user(x, ptr, err_label) \ |
704 | do { \ | 704 | do { \ |
705 | int __pu_err; \ | 705 | int __pu_err; \ |
706 | __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ | 706 | __typeof__(*(ptr)) __pu_val = (x); \ |
707 | __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ | ||
707 | if (unlikely(__pu_err)) goto err_label; \ | 708 | if (unlikely(__pu_err)) goto err_label; \ |
708 | } while (0) | 709 | } while (0) |
709 | 710 | ||
710 | #define unsafe_get_user(x, ptr, err_label) \ | 711 | #define unsafe_get_user(x, ptr, err_label) \ |
711 | do { \ | 712 | do { \ |
712 | int __gu_err; \ | 713 | int __gu_err; \ |
713 | unsigned long __gu_val; \ | 714 | __inttype(*(ptr)) __gu_val; \ |
714 | __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ | 715 | __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ |
715 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ | 716 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
716 | if (unlikely(__gu_err)) goto err_label; \ | 717 | if (unlikely(__gu_err)) goto err_label; \ |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c5b8f760473c..32e14d137416 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, | |||
409 | memcpy(insnbuf, replacement, a->replacementlen); | 409 | memcpy(insnbuf, replacement, a->replacementlen); |
410 | insnbuf_sz = a->replacementlen; | 410 | insnbuf_sz = a->replacementlen; |
411 | 411 | ||
412 | /* 0xe8 is a relative jump; fix the offset. */ | 412 | /* |
413 | if (*insnbuf == 0xe8 && a->replacementlen == 5) { | 413 | * 0xe8 is a relative jump; fix the offset. |
414 | * | ||
415 | * Instruction length is checked before the opcode to avoid | ||
416 | * accessing uninitialized bytes for zero-length replacements. | ||
417 | */ | ||
418 | if (a->replacementlen == 5 && *insnbuf == 0xe8) { | ||
414 | *(s32 *)(insnbuf + 1) += replacement - instr; | 419 | *(s32 *)(insnbuf + 1) += replacement - instr; |
415 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", | 420 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", |
416 | *(s32 *)(insnbuf + 1), | 421 | *(s32 *)(insnbuf + 1), |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5abd4bf73d6e..5cfbaeb6529a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m) | |||
499 | return 1; | 499 | return 1; |
500 | } | 500 | } |
501 | 501 | ||
502 | static bool memory_error(struct mce *m) | 502 | bool mce_is_memory_error(struct mce *m) |
503 | { | 503 | { |
504 | struct cpuinfo_x86 *c = &boot_cpu_data; | 504 | if (m->cpuvendor == X86_VENDOR_AMD) { |
505 | |||
506 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
507 | /* ErrCodeExt[20:16] */ | 505 | /* ErrCodeExt[20:16] */ |
508 | u8 xec = (m->status >> 16) & 0x1f; | 506 | u8 xec = (m->status >> 16) & 0x1f; |
509 | 507 | ||
510 | return (xec == 0x0 || xec == 0x8); | 508 | return (xec == 0x0 || xec == 0x8); |
511 | } else if (c->x86_vendor == X86_VENDOR_INTEL) { | 509 | } else if (m->cpuvendor == X86_VENDOR_INTEL) { |
512 | /* | 510 | /* |
513 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes | 511 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes |
514 | * | 512 | * |
@@ -529,6 +527,7 @@ static bool memory_error(struct mce *m) | |||
529 | 527 | ||
530 | return false; | 528 | return false; |
531 | } | 529 | } |
530 | EXPORT_SYMBOL_GPL(mce_is_memory_error); | ||
532 | 531 | ||
533 | static bool cec_add_mce(struct mce *m) | 532 | static bool cec_add_mce(struct mce *m) |
534 | { | 533 | { |
@@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m) | |||
536 | return false; | 535 | return false; |
537 | 536 | ||
538 | /* We eat only correctable DRAM errors with usable addresses. */ | 537 | /* We eat only correctable DRAM errors with usable addresses. */ |
539 | if (memory_error(m) && | 538 | if (mce_is_memory_error(m) && |
540 | !(m->status & MCI_STATUS_UC) && | 539 | !(m->status & MCI_STATUS_UC) && |
541 | mce_usable_address(m)) | 540 | mce_usable_address(m)) |
542 | if (!cec_add_elem(m->addr >> PAGE_SHIFT)) | 541 | if (!cec_add_elem(m->addr >> PAGE_SHIFT)) |
@@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
713 | 712 | ||
714 | severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); | 713 | severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); |
715 | 714 | ||
716 | if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) | 715 | if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m)) |
717 | if (m.status & MCI_STATUS_ADDRV) | 716 | if (m.status & MCI_STATUS_ADDRV) |
718 | m.severity = severity; | 717 | m.severity = severity; |
719 | 718 | ||
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index c2f8dde3255c..d5d44c452624 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c | |||
@@ -90,6 +90,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) | |||
90 | * Boot time FPU feature detection code: | 90 | * Boot time FPU feature detection code: |
91 | */ | 91 | */ |
92 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 92 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
93 | EXPORT_SYMBOL_GPL(mxcsr_feature_mask); | ||
93 | 94 | ||
94 | static void __init fpu__init_system_mxcsr(void) | 95 | static void __init fpu__init_system_mxcsr(void) |
95 | { | 96 | { |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0651e974dcb3..9bef1bbeba63 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size) | |||
689 | { | 689 | { |
690 | return module_alloc(size); | 690 | return module_alloc(size); |
691 | } | 691 | } |
692 | static inline void tramp_free(void *tramp) | 692 | static inline void tramp_free(void *tramp, int size) |
693 | { | 693 | { |
694 | int npages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
695 | |||
696 | set_memory_nx((unsigned long)tramp, npages); | ||
697 | set_memory_rw((unsigned long)tramp, npages); | ||
694 | module_memfree(tramp); | 698 | module_memfree(tramp); |
695 | } | 699 | } |
696 | #else | 700 | #else |
@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size) | |||
699 | { | 703 | { |
700 | return NULL; | 704 | return NULL; |
701 | } | 705 | } |
702 | static inline void tramp_free(void *tramp) { } | 706 | static inline void tramp_free(void *tramp, int size) { } |
703 | #endif | 707 | #endif |
704 | 708 | ||
705 | /* Defined as markers to the end of the ftrace default trampolines */ | 709 | /* Defined as markers to the end of the ftrace default trampolines */ |
@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) | |||
771 | /* Copy ftrace_caller onto the trampoline memory */ | 775 | /* Copy ftrace_caller onto the trampoline memory */ |
772 | ret = probe_kernel_read(trampoline, (void *)start_offset, size); | 776 | ret = probe_kernel_read(trampoline, (void *)start_offset, size); |
773 | if (WARN_ON(ret < 0)) { | 777 | if (WARN_ON(ret < 0)) { |
774 | tramp_free(trampoline); | 778 | tramp_free(trampoline, *tramp_size); |
775 | return 0; | 779 | return 0; |
776 | } | 780 | } |
777 | 781 | ||
@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) | |||
797 | 801 | ||
798 | /* Are we pointing to the reference? */ | 802 | /* Are we pointing to the reference? */ |
799 | if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { | 803 | if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { |
800 | tramp_free(trampoline); | 804 | tramp_free(trampoline, *tramp_size); |
801 | return 0; | 805 | return 0; |
802 | } | 806 | } |
803 | 807 | ||
@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
839 | unsigned long offset; | 843 | unsigned long offset; |
840 | unsigned long ip; | 844 | unsigned long ip; |
841 | unsigned int size; | 845 | unsigned int size; |
842 | int ret; | 846 | int ret, npages; |
843 | 847 | ||
844 | if (ops->trampoline) { | 848 | if (ops->trampoline) { |
845 | /* | 849 | /* |
@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
848 | */ | 852 | */ |
849 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | 853 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) |
850 | return; | 854 | return; |
855 | npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT; | ||
856 | set_memory_rw(ops->trampoline, npages); | ||
851 | } else { | 857 | } else { |
852 | ops->trampoline = create_trampoline(ops, &size); | 858 | ops->trampoline = create_trampoline(ops, &size); |
853 | if (!ops->trampoline) | 859 | if (!ops->trampoline) |
854 | return; | 860 | return; |
855 | ops->trampoline_size = size; | 861 | ops->trampoline_size = size; |
862 | npages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
856 | } | 863 | } |
857 | 864 | ||
858 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); | 865 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); |
@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
863 | /* Do a safe modify in case the trampoline is executing */ | 870 | /* Do a safe modify in case the trampoline is executing */ |
864 | new = ftrace_call_replace(ip, (unsigned long)func); | 871 | new = ftrace_call_replace(ip, (unsigned long)func); |
865 | ret = update_ftrace_func(ip, new); | 872 | ret = update_ftrace_func(ip, new); |
873 | set_memory_ro(ops->trampoline, npages); | ||
866 | 874 | ||
867 | /* The update should never fail */ | 875 | /* The update should never fail */ |
868 | WARN_ON(ret); | 876 | WARN_ON(ret); |
@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) | |||
939 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | 947 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) |
940 | return; | 948 | return; |
941 | 949 | ||
942 | tramp_free((void *)ops->trampoline); | 950 | tramp_free((void *)ops->trampoline, ops->trampoline_size); |
943 | ops->trampoline = 0; | 951 | ops->trampoline = 0; |
944 | } | 952 | } |
945 | 953 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 5b2bbfbb3712..6b877807598b 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/ftrace.h> | 52 | #include <linux/ftrace.h> |
53 | #include <linux/frame.h> | 53 | #include <linux/frame.h> |
54 | #include <linux/kasan.h> | 54 | #include <linux/kasan.h> |
55 | #include <linux/moduleloader.h> | ||
55 | 56 | ||
56 | #include <asm/text-patching.h> | 57 | #include <asm/text-patching.h> |
57 | #include <asm/cacheflush.h> | 58 | #include <asm/cacheflush.h> |
@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn) | |||
417 | } | 418 | } |
418 | } | 419 | } |
419 | 420 | ||
421 | /* Recover page to RW mode before releasing it */ | ||
422 | void free_insn_page(void *page) | ||
423 | { | ||
424 | set_memory_nx((unsigned long)page & PAGE_MASK, 1); | ||
425 | set_memory_rw((unsigned long)page & PAGE_MASK, 1); | ||
426 | module_memfree(page); | ||
427 | } | ||
428 | |||
420 | static int arch_copy_kprobe(struct kprobe *p) | 429 | static int arch_copy_kprobe(struct kprobe *p) |
421 | { | 430 | { |
422 | struct insn insn; | 431 | struct insn insn; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0b4d3c686b1e..f81823695014 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p) | |||
980 | */ | 980 | */ |
981 | x86_configure_nx(); | 981 | x86_configure_nx(); |
982 | 982 | ||
983 | simple_udelay_calibration(); | ||
984 | |||
985 | parse_early_param(); | 983 | parse_early_param(); |
986 | 984 | ||
987 | #ifdef CONFIG_MEMORY_HOTPLUG | 985 | #ifdef CONFIG_MEMORY_HOTPLUG |
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p) | |||
1041 | */ | 1039 | */ |
1042 | init_hypervisor_platform(); | 1040 | init_hypervisor_platform(); |
1043 | 1041 | ||
1042 | simple_udelay_calibration(); | ||
1043 | |||
1044 | x86_init.resources.probe_roms(); | 1044 | x86_init.resources.probe_roms(); |
1045 | 1045 | ||
1046 | /* after parse_early_param, so could debug it */ | 1046 | /* after parse_early_param, so could debug it */ |
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 82c6d7f1fd73..b9389d72b2f7 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c | |||
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state) | |||
104 | return (unsigned long *)task_pt_regs(state->task) - 2; | 104 | return (unsigned long *)task_pt_regs(state->task) - 2; |
105 | } | 105 | } |
106 | 106 | ||
107 | static bool is_last_frame(struct unwind_state *state) | ||
108 | { | ||
109 | return state->bp == last_frame(state); | ||
110 | } | ||
111 | |||
107 | #ifdef CONFIG_X86_32 | 112 | #ifdef CONFIG_X86_32 |
108 | #define GCC_REALIGN_WORDS 3 | 113 | #define GCC_REALIGN_WORDS 3 |
109 | #else | 114 | #else |
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state) | |||
115 | return last_frame(state) - GCC_REALIGN_WORDS; | 120 | return last_frame(state) - GCC_REALIGN_WORDS; |
116 | } | 121 | } |
117 | 122 | ||
118 | static bool is_last_task_frame(struct unwind_state *state) | 123 | static bool is_last_aligned_frame(struct unwind_state *state) |
119 | { | 124 | { |
120 | unsigned long *last_bp = last_frame(state); | 125 | unsigned long *last_bp = last_frame(state); |
121 | unsigned long *aligned_bp = last_aligned_frame(state); | 126 | unsigned long *aligned_bp = last_aligned_frame(state); |
122 | 127 | ||
123 | /* | 128 | /* |
124 | * We have to check for the last task frame at two different locations | 129 | * GCC can occasionally decide to realign the stack pointer and change |
125 | * because gcc can occasionally decide to realign the stack pointer and | 130 | * the offset of the stack frame in the prologue of a function called |
126 | * change the offset of the stack frame in the prologue of a function | 131 | * by head/entry code. Examples: |
127 | * called by head/entry code. Examples: | ||
128 | * | 132 | * |
129 | * <start_secondary>: | 133 | * <start_secondary>: |
130 | * push %edi | 134 | * push %edi |
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state) | |||
141 | * push %rbp | 145 | * push %rbp |
142 | * mov %rsp,%rbp | 146 | * mov %rsp,%rbp |
143 | * | 147 | * |
144 | * Note that after aligning the stack, it pushes a duplicate copy of | 148 | * After aligning the stack, it pushes a duplicate copy of the return |
145 | * the return address before pushing the frame pointer. | 149 | * address before pushing the frame pointer. |
150 | */ | ||
151 | return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1)); | ||
152 | } | ||
153 | |||
154 | static bool is_last_ftrace_frame(struct unwind_state *state) | ||
155 | { | ||
156 | unsigned long *last_bp = last_frame(state); | ||
157 | unsigned long *last_ftrace_bp = last_bp - 3; | ||
158 | |||
159 | /* | ||
160 | * When unwinding from an ftrace handler of a function called by entry | ||
161 | * code, the stack layout of the last frame is: | ||
162 | * | ||
163 | * bp | ||
164 | * parent ret addr | ||
165 | * bp | ||
166 | * function ret addr | ||
167 | * parent ret addr | ||
168 | * pt_regs | ||
169 | * ----------------- | ||
146 | */ | 170 | */ |
147 | return (state->bp == last_bp || | 171 | return (state->bp == last_ftrace_bp && |
148 | (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); | 172 | *state->bp == *(state->bp + 2) && |
173 | *(state->bp + 1) == *(state->bp + 4)); | ||
174 | } | ||
175 | |||
176 | static bool is_last_task_frame(struct unwind_state *state) | ||
177 | { | ||
178 | return is_last_frame(state) || is_last_aligned_frame(state) || | ||
179 | is_last_ftrace_frame(state); | ||
149 | } | 180 | } |
150 | 181 | ||
151 | /* | 182 | /* |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c25cfaf584e7..0816ab2e8adc 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -4173,7 +4173,7 @@ static int check_dr_write(struct x86_emulate_ctxt *ctxt) | |||
4173 | 4173 | ||
4174 | static int check_svme(struct x86_emulate_ctxt *ctxt) | 4174 | static int check_svme(struct x86_emulate_ctxt *ctxt) |
4175 | { | 4175 | { |
4176 | u64 efer; | 4176 | u64 efer = 0; |
4177 | 4177 | ||
4178 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | 4178 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); |
4179 | 4179 | ||
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 56241746abbd..b0454c7e4cff 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -283,11 +283,13 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, | |||
283 | pt_element_t pte; | 283 | pt_element_t pte; |
284 | pt_element_t __user *uninitialized_var(ptep_user); | 284 | pt_element_t __user *uninitialized_var(ptep_user); |
285 | gfn_t table_gfn; | 285 | gfn_t table_gfn; |
286 | unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey; | 286 | u64 pt_access, pte_access; |
287 | unsigned index, accessed_dirty, pte_pkey; | ||
287 | unsigned nested_access; | 288 | unsigned nested_access; |
288 | gpa_t pte_gpa; | 289 | gpa_t pte_gpa; |
289 | bool have_ad; | 290 | bool have_ad; |
290 | int offset; | 291 | int offset; |
292 | u64 walk_nx_mask = 0; | ||
291 | const int write_fault = access & PFERR_WRITE_MASK; | 293 | const int write_fault = access & PFERR_WRITE_MASK; |
292 | const int user_fault = access & PFERR_USER_MASK; | 294 | const int user_fault = access & PFERR_USER_MASK; |
293 | const int fetch_fault = access & PFERR_FETCH_MASK; | 295 | const int fetch_fault = access & PFERR_FETCH_MASK; |
@@ -302,6 +304,7 @@ retry_walk: | |||
302 | have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); | 304 | have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); |
303 | 305 | ||
304 | #if PTTYPE == 64 | 306 | #if PTTYPE == 64 |
307 | walk_nx_mask = 1ULL << PT64_NX_SHIFT; | ||
305 | if (walker->level == PT32E_ROOT_LEVEL) { | 308 | if (walker->level == PT32E_ROOT_LEVEL) { |
306 | pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); | 309 | pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); |
307 | trace_kvm_mmu_paging_element(pte, walker->level); | 310 | trace_kvm_mmu_paging_element(pte, walker->level); |
@@ -313,8 +316,6 @@ retry_walk: | |||
313 | walker->max_level = walker->level; | 316 | walker->max_level = walker->level; |
314 | ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); | 317 | ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); |
315 | 318 | ||
316 | accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0; | ||
317 | |||
318 | /* | 319 | /* |
319 | * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging | 320 | * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging |
320 | * by the MOV to CR instruction are treated as reads and do not cause the | 321 | * by the MOV to CR instruction are treated as reads and do not cause the |
@@ -322,14 +323,14 @@ retry_walk: | |||
322 | */ | 323 | */ |
323 | nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; | 324 | nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; |
324 | 325 | ||
325 | pt_access = pte_access = ACC_ALL; | 326 | pte_access = ~0; |
326 | ++walker->level; | 327 | ++walker->level; |
327 | 328 | ||
328 | do { | 329 | do { |
329 | gfn_t real_gfn; | 330 | gfn_t real_gfn; |
330 | unsigned long host_addr; | 331 | unsigned long host_addr; |
331 | 332 | ||
332 | pt_access &= pte_access; | 333 | pt_access = pte_access; |
333 | --walker->level; | 334 | --walker->level; |
334 | 335 | ||
335 | index = PT_INDEX(addr, walker->level); | 336 | index = PT_INDEX(addr, walker->level); |
@@ -371,6 +372,12 @@ retry_walk: | |||
371 | 372 | ||
372 | trace_kvm_mmu_paging_element(pte, walker->level); | 373 | trace_kvm_mmu_paging_element(pte, walker->level); |
373 | 374 | ||
375 | /* | ||
376 | * Inverting the NX it lets us AND it like other | ||
377 | * permission bits. | ||
378 | */ | ||
379 | pte_access = pt_access & (pte ^ walk_nx_mask); | ||
380 | |||
374 | if (unlikely(!FNAME(is_present_gpte)(pte))) | 381 | if (unlikely(!FNAME(is_present_gpte)(pte))) |
375 | goto error; | 382 | goto error; |
376 | 383 | ||
@@ -379,14 +386,16 @@ retry_walk: | |||
379 | goto error; | 386 | goto error; |
380 | } | 387 | } |
381 | 388 | ||
382 | accessed_dirty &= pte; | ||
383 | pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); | ||
384 | |||
385 | walker->ptes[walker->level - 1] = pte; | 389 | walker->ptes[walker->level - 1] = pte; |
386 | } while (!is_last_gpte(mmu, walker->level, pte)); | 390 | } while (!is_last_gpte(mmu, walker->level, pte)); |
387 | 391 | ||
388 | pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); | 392 | pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); |
389 | errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access); | 393 | accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0; |
394 | |||
395 | /* Convert to ACC_*_MASK flags for struct guest_walker. */ | ||
396 | walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask); | ||
397 | walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask); | ||
398 | errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); | ||
390 | if (unlikely(errcode)) | 399 | if (unlikely(errcode)) |
391 | goto error; | 400 | goto error; |
392 | 401 | ||
@@ -403,7 +412,7 @@ retry_walk: | |||
403 | walker->gfn = real_gpa >> PAGE_SHIFT; | 412 | walker->gfn = real_gpa >> PAGE_SHIFT; |
404 | 413 | ||
405 | if (!write_fault) | 414 | if (!write_fault) |
406 | FNAME(protect_clean_gpte)(mmu, &pte_access, pte); | 415 | FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); |
407 | else | 416 | else |
408 | /* | 417 | /* |
409 | * On a write fault, fold the dirty bit into accessed_dirty. | 418 | * On a write fault, fold the dirty bit into accessed_dirty. |
@@ -421,10 +430,8 @@ retry_walk: | |||
421 | goto retry_walk; | 430 | goto retry_walk; |
422 | } | 431 | } |
423 | 432 | ||
424 | walker->pt_access = pt_access; | ||
425 | walker->pte_access = pte_access; | ||
426 | pgprintk("%s: pte %llx pte_access %x pt_access %x\n", | 433 | pgprintk("%s: pte %llx pte_access %x pt_access %x\n", |
427 | __func__, (u64)pte, pte_access, pt_access); | 434 | __func__, (u64)pte, walker->pte_access, walker->pt_access); |
428 | return 1; | 435 | return 1; |
429 | 436 | ||
430 | error: | 437 | error: |
@@ -452,7 +459,7 @@ error: | |||
452 | */ | 459 | */ |
453 | if (!(errcode & PFERR_RSVD_MASK)) { | 460 | if (!(errcode & PFERR_RSVD_MASK)) { |
454 | vcpu->arch.exit_qualification &= 0x187; | 461 | vcpu->arch.exit_qualification &= 0x187; |
455 | vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; | 462 | vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3; |
456 | } | 463 | } |
457 | #endif | 464 | #endif |
458 | walker->fault.address = addr; | 465 | walker->fault.address = addr; |
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 9d4a8504a95a..5ab4a364348e 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c | |||
@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) | |||
294 | ((u64)1 << edx.split.bit_width_fixed) - 1; | 294 | ((u64)1 << edx.split.bit_width_fixed) - 1; |
295 | } | 295 | } |
296 | 296 | ||
297 | pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | | 297 | pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | |
298 | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); | 298 | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); |
299 | pmu->global_ctrl_mask = ~pmu->global_ctrl; | 299 | pmu->global_ctrl_mask = ~pmu->global_ctrl; |
300 | 300 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c27ac6923a18..183ddb235fb4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1272,7 +1272,8 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
1272 | 1272 | ||
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index) | 1275 | static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, |
1276 | unsigned int index) | ||
1276 | { | 1277 | { |
1277 | u64 *avic_physical_id_table; | 1278 | u64 *avic_physical_id_table; |
1278 | struct kvm_arch *vm_data = &vcpu->kvm->arch; | 1279 | struct kvm_arch *vm_data = &vcpu->kvm->arch; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c6f4ad44aa95..72f78396bc09 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6504,7 +6504,7 @@ static __init int hardware_setup(void) | |||
6504 | enable_ept_ad_bits = 0; | 6504 | enable_ept_ad_bits = 0; |
6505 | } | 6505 | } |
6506 | 6506 | ||
6507 | if (!cpu_has_vmx_ept_ad_bits()) | 6507 | if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) |
6508 | enable_ept_ad_bits = 0; | 6508 | enable_ept_ad_bits = 0; |
6509 | 6509 | ||
6510 | if (!cpu_has_vmx_unrestricted_guest()) | 6510 | if (!cpu_has_vmx_unrestricted_guest()) |
@@ -11213,7 +11213,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) | |||
11213 | if (!nested_cpu_has_pml(vmcs12)) | 11213 | if (!nested_cpu_has_pml(vmcs12)) |
11214 | return 0; | 11214 | return 0; |
11215 | 11215 | ||
11216 | if (vmcs12->guest_pml_index > PML_ENTITY_NUM) { | 11216 | if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { |
11217 | vmx->nested.pml_full = true; | 11217 | vmx->nested.pml_full = true; |
11218 | return 1; | 11218 | return 1; |
11219 | } | 11219 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 464da936c53d..02363e37d4a6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1763,6 +1763,7 @@ u64 get_kvmclock_ns(struct kvm *kvm) | |||
1763 | { | 1763 | { |
1764 | struct kvm_arch *ka = &kvm->arch; | 1764 | struct kvm_arch *ka = &kvm->arch; |
1765 | struct pvclock_vcpu_time_info hv_clock; | 1765 | struct pvclock_vcpu_time_info hv_clock; |
1766 | u64 ret; | ||
1766 | 1767 | ||
1767 | spin_lock(&ka->pvclock_gtod_sync_lock); | 1768 | spin_lock(&ka->pvclock_gtod_sync_lock); |
1768 | if (!ka->use_master_clock) { | 1769 | if (!ka->use_master_clock) { |
@@ -1774,10 +1775,17 @@ u64 get_kvmclock_ns(struct kvm *kvm) | |||
1774 | hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; | 1775 | hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; |
1775 | spin_unlock(&ka->pvclock_gtod_sync_lock); | 1776 | spin_unlock(&ka->pvclock_gtod_sync_lock); |
1776 | 1777 | ||
1778 | /* both __this_cpu_read() and rdtsc() should be on the same cpu */ | ||
1779 | get_cpu(); | ||
1780 | |||
1777 | kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, | 1781 | kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, |
1778 | &hv_clock.tsc_shift, | 1782 | &hv_clock.tsc_shift, |
1779 | &hv_clock.tsc_to_system_mul); | 1783 | &hv_clock.tsc_to_system_mul); |
1780 | return __pvclock_read_cycles(&hv_clock, rdtsc()); | 1784 | ret = __pvclock_read_cycles(&hv_clock, rdtsc()); |
1785 | |||
1786 | put_cpu(); | ||
1787 | |||
1788 | return ret; | ||
1781 | } | 1789 | } |
1782 | 1790 | ||
1783 | static void kvm_setup_pvclock_page(struct kvm_vcpu *v) | 1791 | static void kvm_setup_pvclock_page(struct kvm_vcpu *v) |
@@ -3288,11 +3296,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, | |||
3288 | } | 3296 | } |
3289 | } | 3297 | } |
3290 | 3298 | ||
3299 | #define XSAVE_MXCSR_OFFSET 24 | ||
3300 | |||
3291 | static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, | 3301 | static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, |
3292 | struct kvm_xsave *guest_xsave) | 3302 | struct kvm_xsave *guest_xsave) |
3293 | { | 3303 | { |
3294 | u64 xstate_bv = | 3304 | u64 xstate_bv = |
3295 | *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; | 3305 | *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; |
3306 | u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; | ||
3296 | 3307 | ||
3297 | if (boot_cpu_has(X86_FEATURE_XSAVE)) { | 3308 | if (boot_cpu_has(X86_FEATURE_XSAVE)) { |
3298 | /* | 3309 | /* |
@@ -3300,11 +3311,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, | |||
3300 | * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility | 3311 | * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility |
3301 | * with old userspace. | 3312 | * with old userspace. |
3302 | */ | 3313 | */ |
3303 | if (xstate_bv & ~kvm_supported_xcr0()) | 3314 | if (xstate_bv & ~kvm_supported_xcr0() || |
3315 | mxcsr & ~mxcsr_feature_mask) | ||
3304 | return -EINVAL; | 3316 | return -EINVAL; |
3305 | load_xsave(vcpu, (u8 *)guest_xsave->region); | 3317 | load_xsave(vcpu, (u8 *)guest_xsave->region); |
3306 | } else { | 3318 | } else { |
3307 | if (xstate_bv & ~XFEATURE_MASK_FPSSE) | 3319 | if (xstate_bv & ~XFEATURE_MASK_FPSSE || |
3320 | mxcsr & ~mxcsr_feature_mask) | ||
3308 | return -EINVAL; | 3321 | return -EINVAL; |
3309 | memcpy(&vcpu->arch.guest_fpu.state.fxsave, | 3322 | memcpy(&vcpu->arch.guest_fpu.state.fxsave, |
3310 | guest_xsave->region, sizeof(struct fxregs_state)); | 3323 | guest_xsave->region, sizeof(struct fxregs_state)); |
@@ -4818,16 +4831,20 @@ emul_write: | |||
4818 | 4831 | ||
4819 | static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) | 4832 | static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) |
4820 | { | 4833 | { |
4821 | /* TODO: String I/O for in kernel device */ | 4834 | int r = 0, i; |
4822 | int r; | ||
4823 | 4835 | ||
4824 | if (vcpu->arch.pio.in) | 4836 | for (i = 0; i < vcpu->arch.pio.count; i++) { |
4825 | r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, | 4837 | if (vcpu->arch.pio.in) |
4826 | vcpu->arch.pio.size, pd); | 4838 | r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, |
4827 | else | 4839 | vcpu->arch.pio.size, pd); |
4828 | r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, | 4840 | else |
4829 | vcpu->arch.pio.port, vcpu->arch.pio.size, | 4841 | r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, |
4830 | pd); | 4842 | vcpu->arch.pio.port, vcpu->arch.pio.size, |
4843 | pd); | ||
4844 | if (r) | ||
4845 | break; | ||
4846 | pd += vcpu->arch.pio.size; | ||
4847 | } | ||
4831 | return r; | 4848 | return r; |
4832 | } | 4849 | } |
4833 | 4850 | ||
@@ -4865,6 +4882,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
4865 | if (vcpu->arch.pio.count) | 4882 | if (vcpu->arch.pio.count) |
4866 | goto data_avail; | 4883 | goto data_avail; |
4867 | 4884 | ||
4885 | memset(vcpu->arch.pio_data, 0, size * count); | ||
4886 | |||
4868 | ret = emulator_pio_in_out(vcpu, size, port, val, count, true); | 4887 | ret = emulator_pio_in_out(vcpu, size, port, val, count, true); |
4869 | if (ret) { | 4888 | if (ret) { |
4870 | data_avail: | 4889 | data_avail: |
@@ -5048,6 +5067,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, | |||
5048 | 5067 | ||
5049 | if (var.unusable) { | 5068 | if (var.unusable) { |
5050 | memset(desc, 0, sizeof(*desc)); | 5069 | memset(desc, 0, sizeof(*desc)); |
5070 | if (base3) | ||
5071 | *base3 = 0; | ||
5051 | return false; | 5072 | return false; |
5052 | } | 5073 | } |
5053 | 5074 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1dcd2be4cce4..c8520b2c62d2 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) | |||
186 | unsigned int i, level; | 186 | unsigned int i, level; |
187 | unsigned long addr; | 187 | unsigned long addr; |
188 | 188 | ||
189 | BUG_ON(irqs_disabled()); | 189 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
190 | WARN_ON(PAGE_ALIGN(start) != start); | 190 | WARN_ON(PAGE_ALIGN(start) != start); |
191 | 191 | ||
192 | on_each_cpu(__cpa_flush_range, NULL, 1); | 192 | on_each_cpu(__cpa_flush_range, NULL, 1); |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 9b78685b66e6..83a59a67757a 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -65,9 +65,11 @@ static int __init nopat(char *str) | |||
65 | } | 65 | } |
66 | early_param("nopat", nopat); | 66 | early_param("nopat", nopat); |
67 | 67 | ||
68 | static bool __read_mostly __pat_initialized = false; | ||
69 | |||
68 | bool pat_enabled(void) | 70 | bool pat_enabled(void) |
69 | { | 71 | { |
70 | return !!__pat_enabled; | 72 | return __pat_initialized; |
71 | } | 73 | } |
72 | EXPORT_SYMBOL_GPL(pat_enabled); | 74 | EXPORT_SYMBOL_GPL(pat_enabled); |
73 | 75 | ||
@@ -225,13 +227,14 @@ static void pat_bsp_init(u64 pat) | |||
225 | } | 227 | } |
226 | 228 | ||
227 | wrmsrl(MSR_IA32_CR_PAT, pat); | 229 | wrmsrl(MSR_IA32_CR_PAT, pat); |
230 | __pat_initialized = true; | ||
228 | 231 | ||
229 | __init_cache_modes(pat); | 232 | __init_cache_modes(pat); |
230 | } | 233 | } |
231 | 234 | ||
232 | static void pat_ap_init(u64 pat) | 235 | static void pat_ap_init(u64 pat) |
233 | { | 236 | { |
234 | if (!boot_cpu_has(X86_FEATURE_PAT)) { | 237 | if (!this_cpu_has(X86_FEATURE_PAT)) { |
235 | /* | 238 | /* |
236 | * If this happens we are on a secondary CPU, but switched to | 239 | * If this happens we are on a secondary CPU, but switched to |
237 | * PAT on the boot CPU. We have no way to undo PAT. | 240 | * PAT on the boot CPU. We have no way to undo PAT. |
@@ -306,7 +309,7 @@ void pat_init(void) | |||
306 | u64 pat; | 309 | u64 pat; |
307 | struct cpuinfo_x86 *c = &boot_cpu_data; | 310 | struct cpuinfo_x86 *c = &boot_cpu_data; |
308 | 311 | ||
309 | if (!pat_enabled()) { | 312 | if (!__pat_enabled) { |
310 | init_cache_modes(); | 313 | init_cache_modes(); |
311 | return; | 314 | return; |
312 | } | 315 | } |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 7cd442690f9d..f33eef4ebd12 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
@@ -142,9 +142,7 @@ static void __init xen_banner(void) | |||
142 | struct xen_extraversion extra; | 142 | struct xen_extraversion extra; |
143 | HYPERVISOR_xen_version(XENVER_extraversion, &extra); | 143 | HYPERVISOR_xen_version(XENVER_extraversion, &extra); |
144 | 144 | ||
145 | pr_info("Booting paravirtualized kernel %son %s\n", | 145 | pr_info("Booting paravirtualized kernel on %s\n", pv_info.name); |
146 | xen_feature(XENFEAT_auto_translated_physmap) ? | ||
147 | "with PVH extensions " : "", pv_info.name); | ||
148 | printk(KERN_INFO "Xen version: %d.%d%s%s\n", | 146 | printk(KERN_INFO "Xen version: %d.%d%s%s\n", |
149 | version >> 16, version & 0xffff, extra.extraversion, | 147 | version >> 16, version & 0xffff, extra.extraversion, |
150 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); | 148 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); |
@@ -957,15 +955,10 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) | |||
957 | 955 | ||
958 | void xen_setup_shared_info(void) | 956 | void xen_setup_shared_info(void) |
959 | { | 957 | { |
960 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 958 | set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); |
961 | set_fixmap(FIX_PARAVIRT_BOOTMAP, | ||
962 | xen_start_info->shared_info); | ||
963 | 959 | ||
964 | HYPERVISOR_shared_info = | 960 | HYPERVISOR_shared_info = |
965 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | 961 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); |
966 | } else | ||
967 | HYPERVISOR_shared_info = | ||
968 | (struct shared_info *)__va(xen_start_info->shared_info); | ||
969 | 962 | ||
970 | #ifndef CONFIG_SMP | 963 | #ifndef CONFIG_SMP |
971 | /* In UP this is as good a place as any to set up shared info */ | 964 | /* In UP this is as good a place as any to set up shared info */ |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e375a5e815f..3be06f3caf3c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -42,7 +42,7 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
42 | } | 42 | } |
43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); | 43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); |
44 | 44 | ||
45 | void xen_flush_tlb_all(void) | 45 | static void xen_flush_tlb_all(void) |
46 | { | 46 | { |
47 | struct mmuext_op *op; | 47 | struct mmuext_op *op; |
48 | struct multicall_space mcs; | 48 | struct multicall_space mcs; |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 7397d8b8459d..1f386d7fdf70 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -355,10 +355,8 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
355 | pteval_t flags = val & PTE_FLAGS_MASK; | 355 | pteval_t flags = val & PTE_FLAGS_MASK; |
356 | unsigned long mfn; | 356 | unsigned long mfn; |
357 | 357 | ||
358 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | 358 | mfn = __pfn_to_mfn(pfn); |
359 | mfn = __pfn_to_mfn(pfn); | 359 | |
360 | else | ||
361 | mfn = pfn; | ||
362 | /* | 360 | /* |
363 | * If there's no mfn for the pfn, then just create an | 361 | * If there's no mfn for the pfn, then just create an |
364 | * empty non-present pte. Unfortunately this loses | 362 | * empty non-present pte. Unfortunately this loses |
@@ -647,9 +645,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, | |||
647 | limit--; | 645 | limit--; |
648 | BUG_ON(limit >= FIXADDR_TOP); | 646 | BUG_ON(limit >= FIXADDR_TOP); |
649 | 647 | ||
650 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
651 | return 0; | ||
652 | |||
653 | /* | 648 | /* |
654 | * 64-bit has a great big hole in the middle of the address | 649 | * 64-bit has a great big hole in the middle of the address |
655 | * space, which contains the Xen mappings. On 32-bit these | 650 | * space, which contains the Xen mappings. On 32-bit these |
@@ -1289,9 +1284,6 @@ static void __init xen_pagetable_cleanhighmap(void) | |||
1289 | 1284 | ||
1290 | static void __init xen_pagetable_p2m_setup(void) | 1285 | static void __init xen_pagetable_p2m_setup(void) |
1291 | { | 1286 | { |
1292 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
1293 | return; | ||
1294 | |||
1295 | xen_vmalloc_p2m_tree(); | 1287 | xen_vmalloc_p2m_tree(); |
1296 | 1288 | ||
1297 | #ifdef CONFIG_X86_64 | 1289 | #ifdef CONFIG_X86_64 |
@@ -1314,8 +1306,7 @@ static void __init xen_pagetable_init(void) | |||
1314 | xen_build_mfn_list_list(); | 1306 | xen_build_mfn_list_list(); |
1315 | 1307 | ||
1316 | /* Remap memory freed due to conflicts with E820 map */ | 1308 | /* Remap memory freed due to conflicts with E820 map */ |
1317 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | 1309 | xen_remap_memory(); |
1318 | xen_remap_memory(); | ||
1319 | 1310 | ||
1320 | xen_setup_shared_info(); | 1311 | xen_setup_shared_info(); |
1321 | } | 1312 | } |
@@ -1925,21 +1916,20 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1925 | /* Zap identity mapping */ | 1916 | /* Zap identity mapping */ |
1926 | init_level4_pgt[0] = __pgd(0); | 1917 | init_level4_pgt[0] = __pgd(0); |
1927 | 1918 | ||
1928 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 1919 | /* Pre-constructed entries are in pfn, so convert to mfn */ |
1929 | /* Pre-constructed entries are in pfn, so convert to mfn */ | 1920 | /* L4[272] -> level3_ident_pgt */ |
1930 | /* L4[272] -> level3_ident_pgt | 1921 | /* L4[511] -> level3_kernel_pgt */ |
1931 | * L4[511] -> level3_kernel_pgt */ | 1922 | convert_pfn_mfn(init_level4_pgt); |
1932 | convert_pfn_mfn(init_level4_pgt); | ||
1933 | 1923 | ||
1934 | /* L3_i[0] -> level2_ident_pgt */ | 1924 | /* L3_i[0] -> level2_ident_pgt */ |
1935 | convert_pfn_mfn(level3_ident_pgt); | 1925 | convert_pfn_mfn(level3_ident_pgt); |
1936 | /* L3_k[510] -> level2_kernel_pgt | 1926 | /* L3_k[510] -> level2_kernel_pgt */ |
1937 | * L3_k[511] -> level2_fixmap_pgt */ | 1927 | /* L3_k[511] -> level2_fixmap_pgt */ |
1938 | convert_pfn_mfn(level3_kernel_pgt); | 1928 | convert_pfn_mfn(level3_kernel_pgt); |
1929 | |||
1930 | /* L3_k[511][506] -> level1_fixmap_pgt */ | ||
1931 | convert_pfn_mfn(level2_fixmap_pgt); | ||
1939 | 1932 | ||
1940 | /* L3_k[511][506] -> level1_fixmap_pgt */ | ||
1941 | convert_pfn_mfn(level2_fixmap_pgt); | ||
1942 | } | ||
1943 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ | 1933 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
1944 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); | 1934 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); |
1945 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); | 1935 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); |
@@ -1962,34 +1952,30 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1962 | if (i && i < pgd_index(__START_KERNEL_map)) | 1952 | if (i && i < pgd_index(__START_KERNEL_map)) |
1963 | init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; | 1953 | init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; |
1964 | 1954 | ||
1965 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 1955 | /* Make pagetable pieces RO */ |
1966 | /* Make pagetable pieces RO */ | 1956 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); |
1967 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | 1957 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); |
1968 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | 1958 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); |
1969 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); | 1959 | set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); |
1970 | set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); | 1960 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); |
1971 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); | 1961 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
1972 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 1962 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); |
1973 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | 1963 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); |
1974 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); | 1964 | |
1975 | 1965 | /* Pin down new L4 */ | |
1976 | /* Pin down new L4 */ | 1966 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, |
1977 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | 1967 | PFN_DOWN(__pa_symbol(init_level4_pgt))); |
1978 | PFN_DOWN(__pa_symbol(init_level4_pgt))); | 1968 | |
1979 | 1969 | /* Unpin Xen-provided one */ | |
1980 | /* Unpin Xen-provided one */ | 1970 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
1981 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | ||
1982 | 1971 | ||
1983 | /* | 1972 | /* |
1984 | * At this stage there can be no user pgd, and no page | 1973 | * At this stage there can be no user pgd, and no page structure to |
1985 | * structure to attach it to, so make sure we just set kernel | 1974 | * attach it to, so make sure we just set kernel pgd. |
1986 | * pgd. | 1975 | */ |
1987 | */ | 1976 | xen_mc_batch(); |
1988 | xen_mc_batch(); | 1977 | __xen_write_cr3(true, __pa(init_level4_pgt)); |
1989 | __xen_write_cr3(true, __pa(init_level4_pgt)); | 1978 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
1990 | xen_mc_issue(PARAVIRT_LAZY_CPU); | ||
1991 | } else | ||
1992 | native_write_cr3(__pa(init_level4_pgt)); | ||
1993 | 1979 | ||
1994 | /* We can't that easily rip out L3 and L2, as the Xen pagetables are | 1980 | /* We can't that easily rip out L3 and L2, as the Xen pagetables are |
1995 | * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for | 1981 | * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for |
@@ -2403,9 +2389,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
2403 | 2389 | ||
2404 | static void __init xen_post_allocator_init(void) | 2390 | static void __init xen_post_allocator_init(void) |
2405 | { | 2391 | { |
2406 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
2407 | return; | ||
2408 | |||
2409 | pv_mmu_ops.set_pte = xen_set_pte; | 2392 | pv_mmu_ops.set_pte = xen_set_pte; |
2410 | pv_mmu_ops.set_pmd = xen_set_pmd; | 2393 | pv_mmu_ops.set_pmd = xen_set_pmd; |
2411 | pv_mmu_ops.set_pud = xen_set_pud; | 2394 | pv_mmu_ops.set_pud = xen_set_pud; |
@@ -2511,9 +2494,6 @@ void __init xen_init_mmu_ops(void) | |||
2511 | { | 2494 | { |
2512 | x86_init.paging.pagetable_init = xen_pagetable_init; | 2495 | x86_init.paging.pagetable_init = xen_pagetable_init; |
2513 | 2496 | ||
2514 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
2515 | return; | ||
2516 | |||
2517 | pv_mmu_ops = xen_mmu_ops; | 2497 | pv_mmu_ops = xen_mmu_ops; |
2518 | 2498 | ||
2519 | memset(dummy_mapping, 0xff, PAGE_SIZE); | 2499 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
@@ -2650,9 +2630,6 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, | |||
2650 | * this function are redundant and can be ignored. | 2630 | * this function are redundant and can be ignored. |
2651 | */ | 2631 | */ |
2652 | 2632 | ||
2653 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
2654 | return 0; | ||
2655 | |||
2656 | if (unlikely(order > MAX_CONTIG_ORDER)) | 2633 | if (unlikely(order > MAX_CONTIG_ORDER)) |
2657 | return -ENOMEM; | 2634 | return -ENOMEM; |
2658 | 2635 | ||
@@ -2689,9 +2666,6 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) | |||
2689 | int success; | 2666 | int success; |
2690 | unsigned long vstart; | 2667 | unsigned long vstart; |
2691 | 2668 | ||
2692 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
2693 | return; | ||
2694 | |||
2695 | if (unlikely(order > MAX_CONTIG_ORDER)) | 2669 | if (unlikely(order > MAX_CONTIG_ORDER)) |
2696 | return; | 2670 | return; |
2697 | 2671 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index a69ad122ed66..f2224ffd225d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -628,25 +628,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, | |||
628 | } | 628 | } |
629 | EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); | 629 | EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); |
630 | 630 | ||
631 | void blk_mq_abort_requeue_list(struct request_queue *q) | ||
632 | { | ||
633 | unsigned long flags; | ||
634 | LIST_HEAD(rq_list); | ||
635 | |||
636 | spin_lock_irqsave(&q->requeue_lock, flags); | ||
637 | list_splice_init(&q->requeue_list, &rq_list); | ||
638 | spin_unlock_irqrestore(&q->requeue_lock, flags); | ||
639 | |||
640 | while (!list_empty(&rq_list)) { | ||
641 | struct request *rq; | ||
642 | |||
643 | rq = list_first_entry(&rq_list, struct request, queuelist); | ||
644 | list_del_init(&rq->queuelist); | ||
645 | blk_mq_end_request(rq, -EIO); | ||
646 | } | ||
647 | } | ||
648 | EXPORT_SYMBOL(blk_mq_abort_requeue_list); | ||
649 | |||
650 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) | 631 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) |
651 | { | 632 | { |
652 | if (tag < tags->nr_tags) { | 633 | if (tag < tags->nr_tags) { |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 504fee940052..712b018e9f54 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -887,10 +887,10 @@ int blk_register_queue(struct gendisk *disk) | |||
887 | goto unlock; | 887 | goto unlock; |
888 | } | 888 | } |
889 | 889 | ||
890 | if (q->mq_ops) | 890 | if (q->mq_ops) { |
891 | __blk_mq_register_dev(dev, q); | 891 | __blk_mq_register_dev(dev, q); |
892 | 892 | blk_mq_debugfs_register(q); | |
893 | blk_mq_debugfs_register(q); | 893 | } |
894 | 894 | ||
895 | kobject_uevent(&q->kobj, KOBJ_ADD); | 895 | kobject_uevent(&q->kobj, KOBJ_ADD); |
896 | 896 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index b78db2e5fdff..fc13dd0c6e39 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -22,11 +22,11 @@ static int throtl_quantum = 32; | |||
22 | #define DFL_THROTL_SLICE_HD (HZ / 10) | 22 | #define DFL_THROTL_SLICE_HD (HZ / 10) |
23 | #define DFL_THROTL_SLICE_SSD (HZ / 50) | 23 | #define DFL_THROTL_SLICE_SSD (HZ / 50) |
24 | #define MAX_THROTL_SLICE (HZ) | 24 | #define MAX_THROTL_SLICE (HZ) |
25 | #define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */ | ||
26 | #define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */ | ||
27 | #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ | 25 | #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ |
28 | /* default latency target is 0, eg, guarantee IO latency by default */ | 26 | #define MIN_THROTL_BPS (320 * 1024) |
29 | #define DFL_LATENCY_TARGET (0) | 27 | #define MIN_THROTL_IOPS (10) |
28 | #define DFL_LATENCY_TARGET (-1L) | ||
29 | #define DFL_IDLE_THRESHOLD (0) | ||
30 | 30 | ||
31 | #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) | 31 | #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) |
32 | 32 | ||
@@ -157,6 +157,7 @@ struct throtl_grp { | |||
157 | unsigned long last_check_time; | 157 | unsigned long last_check_time; |
158 | 158 | ||
159 | unsigned long latency_target; /* us */ | 159 | unsigned long latency_target; /* us */ |
160 | unsigned long latency_target_conf; /* us */ | ||
160 | /* When did we start a new slice */ | 161 | /* When did we start a new slice */ |
161 | unsigned long slice_start[2]; | 162 | unsigned long slice_start[2]; |
162 | unsigned long slice_end[2]; | 163 | unsigned long slice_end[2]; |
@@ -165,6 +166,7 @@ struct throtl_grp { | |||
165 | unsigned long checked_last_finish_time; /* ns / 1024 */ | 166 | unsigned long checked_last_finish_time; /* ns / 1024 */ |
166 | unsigned long avg_idletime; /* ns / 1024 */ | 167 | unsigned long avg_idletime; /* ns / 1024 */ |
167 | unsigned long idletime_threshold; /* us */ | 168 | unsigned long idletime_threshold; /* us */ |
169 | unsigned long idletime_threshold_conf; /* us */ | ||
168 | 170 | ||
169 | unsigned int bio_cnt; /* total bios */ | 171 | unsigned int bio_cnt; /* total bios */ |
170 | unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ | 172 | unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ |
@@ -201,8 +203,6 @@ struct throtl_data | |||
201 | unsigned int limit_index; | 203 | unsigned int limit_index; |
202 | bool limit_valid[LIMIT_CNT]; | 204 | bool limit_valid[LIMIT_CNT]; |
203 | 205 | ||
204 | unsigned long dft_idletime_threshold; /* us */ | ||
205 | |||
206 | unsigned long low_upgrade_time; | 206 | unsigned long low_upgrade_time; |
207 | unsigned long low_downgrade_time; | 207 | unsigned long low_downgrade_time; |
208 | 208 | ||
@@ -294,8 +294,14 @@ static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) | |||
294 | 294 | ||
295 | td = tg->td; | 295 | td = tg->td; |
296 | ret = tg->bps[rw][td->limit_index]; | 296 | ret = tg->bps[rw][td->limit_index]; |
297 | if (ret == 0 && td->limit_index == LIMIT_LOW) | 297 | if (ret == 0 && td->limit_index == LIMIT_LOW) { |
298 | return tg->bps[rw][LIMIT_MAX]; | 298 | /* intermediate node or iops isn't 0 */ |
299 | if (!list_empty(&blkg->blkcg->css.children) || | ||
300 | tg->iops[rw][td->limit_index]) | ||
301 | return U64_MAX; | ||
302 | else | ||
303 | return MIN_THROTL_BPS; | ||
304 | } | ||
299 | 305 | ||
300 | if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && | 306 | if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && |
301 | tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { | 307 | tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { |
@@ -315,10 +321,17 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) | |||
315 | 321 | ||
316 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) | 322 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) |
317 | return UINT_MAX; | 323 | return UINT_MAX; |
324 | |||
318 | td = tg->td; | 325 | td = tg->td; |
319 | ret = tg->iops[rw][td->limit_index]; | 326 | ret = tg->iops[rw][td->limit_index]; |
320 | if (ret == 0 && tg->td->limit_index == LIMIT_LOW) | 327 | if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { |
321 | return tg->iops[rw][LIMIT_MAX]; | 328 | /* intermediate node or bps isn't 0 */ |
329 | if (!list_empty(&blkg->blkcg->css.children) || | ||
330 | tg->bps[rw][td->limit_index]) | ||
331 | return UINT_MAX; | ||
332 | else | ||
333 | return MIN_THROTL_IOPS; | ||
334 | } | ||
322 | 335 | ||
323 | if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && | 336 | if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && |
324 | tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { | 337 | tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { |
@@ -482,6 +495,9 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node) | |||
482 | /* LIMIT_LOW will have default value 0 */ | 495 | /* LIMIT_LOW will have default value 0 */ |
483 | 496 | ||
484 | tg->latency_target = DFL_LATENCY_TARGET; | 497 | tg->latency_target = DFL_LATENCY_TARGET; |
498 | tg->latency_target_conf = DFL_LATENCY_TARGET; | ||
499 | tg->idletime_threshold = DFL_IDLE_THRESHOLD; | ||
500 | tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; | ||
485 | 501 | ||
486 | return &tg->pd; | 502 | return &tg->pd; |
487 | } | 503 | } |
@@ -510,8 +526,6 @@ static void throtl_pd_init(struct blkg_policy_data *pd) | |||
510 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) | 526 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) |
511 | sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; | 527 | sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; |
512 | tg->td = td; | 528 | tg->td = td; |
513 | |||
514 | tg->idletime_threshold = td->dft_idletime_threshold; | ||
515 | } | 529 | } |
516 | 530 | ||
517 | /* | 531 | /* |
@@ -1349,7 +1363,7 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v) | |||
1349 | return 0; | 1363 | return 0; |
1350 | } | 1364 | } |
1351 | 1365 | ||
1352 | static void tg_conf_updated(struct throtl_grp *tg) | 1366 | static void tg_conf_updated(struct throtl_grp *tg, bool global) |
1353 | { | 1367 | { |
1354 | struct throtl_service_queue *sq = &tg->service_queue; | 1368 | struct throtl_service_queue *sq = &tg->service_queue; |
1355 | struct cgroup_subsys_state *pos_css; | 1369 | struct cgroup_subsys_state *pos_css; |
@@ -1367,8 +1381,26 @@ static void tg_conf_updated(struct throtl_grp *tg) | |||
1367 | * restrictions in the whole hierarchy and allows them to bypass | 1381 | * restrictions in the whole hierarchy and allows them to bypass |
1368 | * blk-throttle. | 1382 | * blk-throttle. |
1369 | */ | 1383 | */ |
1370 | blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg)) | 1384 | blkg_for_each_descendant_pre(blkg, pos_css, |
1371 | tg_update_has_rules(blkg_to_tg(blkg)); | 1385 | global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { |
1386 | struct throtl_grp *this_tg = blkg_to_tg(blkg); | ||
1387 | struct throtl_grp *parent_tg; | ||
1388 | |||
1389 | tg_update_has_rules(this_tg); | ||
1390 | /* ignore root/second level */ | ||
1391 | if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || | ||
1392 | !blkg->parent->parent) | ||
1393 | continue; | ||
1394 | parent_tg = blkg_to_tg(blkg->parent); | ||
1395 | /* | ||
1396 | * make sure all children has lower idle time threshold and | ||
1397 | * higher latency target | ||
1398 | */ | ||
1399 | this_tg->idletime_threshold = min(this_tg->idletime_threshold, | ||
1400 | parent_tg->idletime_threshold); | ||
1401 | this_tg->latency_target = max(this_tg->latency_target, | ||
1402 | parent_tg->latency_target); | ||
1403 | } | ||
1372 | 1404 | ||
1373 | /* | 1405 | /* |
1374 | * We're already holding queue_lock and know @tg is valid. Let's | 1406 | * We're already holding queue_lock and know @tg is valid. Let's |
@@ -1413,7 +1445,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, | |||
1413 | else | 1445 | else |
1414 | *(unsigned int *)((void *)tg + of_cft(of)->private) = v; | 1446 | *(unsigned int *)((void *)tg + of_cft(of)->private) = v; |
1415 | 1447 | ||
1416 | tg_conf_updated(tg); | 1448 | tg_conf_updated(tg, false); |
1417 | ret = 0; | 1449 | ret = 0; |
1418 | out_finish: | 1450 | out_finish: |
1419 | blkg_conf_finish(&ctx); | 1451 | blkg_conf_finish(&ctx); |
@@ -1497,34 +1529,34 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, | |||
1497 | tg->iops_conf[READ][off] == iops_dft && | 1529 | tg->iops_conf[READ][off] == iops_dft && |
1498 | tg->iops_conf[WRITE][off] == iops_dft && | 1530 | tg->iops_conf[WRITE][off] == iops_dft && |
1499 | (off != LIMIT_LOW || | 1531 | (off != LIMIT_LOW || |
1500 | (tg->idletime_threshold == tg->td->dft_idletime_threshold && | 1532 | (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && |
1501 | tg->latency_target == DFL_LATENCY_TARGET))) | 1533 | tg->latency_target_conf == DFL_LATENCY_TARGET))) |
1502 | return 0; | 1534 | return 0; |
1503 | 1535 | ||
1504 | if (tg->bps_conf[READ][off] != bps_dft) | 1536 | if (tg->bps_conf[READ][off] != U64_MAX) |
1505 | snprintf(bufs[0], sizeof(bufs[0]), "%llu", | 1537 | snprintf(bufs[0], sizeof(bufs[0]), "%llu", |
1506 | tg->bps_conf[READ][off]); | 1538 | tg->bps_conf[READ][off]); |
1507 | if (tg->bps_conf[WRITE][off] != bps_dft) | 1539 | if (tg->bps_conf[WRITE][off] != U64_MAX) |
1508 | snprintf(bufs[1], sizeof(bufs[1]), "%llu", | 1540 | snprintf(bufs[1], sizeof(bufs[1]), "%llu", |
1509 | tg->bps_conf[WRITE][off]); | 1541 | tg->bps_conf[WRITE][off]); |
1510 | if (tg->iops_conf[READ][off] != iops_dft) | 1542 | if (tg->iops_conf[READ][off] != UINT_MAX) |
1511 | snprintf(bufs[2], sizeof(bufs[2]), "%u", | 1543 | snprintf(bufs[2], sizeof(bufs[2]), "%u", |
1512 | tg->iops_conf[READ][off]); | 1544 | tg->iops_conf[READ][off]); |
1513 | if (tg->iops_conf[WRITE][off] != iops_dft) | 1545 | if (tg->iops_conf[WRITE][off] != UINT_MAX) |
1514 | snprintf(bufs[3], sizeof(bufs[3]), "%u", | 1546 | snprintf(bufs[3], sizeof(bufs[3]), "%u", |
1515 | tg->iops_conf[WRITE][off]); | 1547 | tg->iops_conf[WRITE][off]); |
1516 | if (off == LIMIT_LOW) { | 1548 | if (off == LIMIT_LOW) { |
1517 | if (tg->idletime_threshold == ULONG_MAX) | 1549 | if (tg->idletime_threshold_conf == ULONG_MAX) |
1518 | strcpy(idle_time, " idle=max"); | 1550 | strcpy(idle_time, " idle=max"); |
1519 | else | 1551 | else |
1520 | snprintf(idle_time, sizeof(idle_time), " idle=%lu", | 1552 | snprintf(idle_time, sizeof(idle_time), " idle=%lu", |
1521 | tg->idletime_threshold); | 1553 | tg->idletime_threshold_conf); |
1522 | 1554 | ||
1523 | if (tg->latency_target == ULONG_MAX) | 1555 | if (tg->latency_target_conf == ULONG_MAX) |
1524 | strcpy(latency_time, " latency=max"); | 1556 | strcpy(latency_time, " latency=max"); |
1525 | else | 1557 | else |
1526 | snprintf(latency_time, sizeof(latency_time), | 1558 | snprintf(latency_time, sizeof(latency_time), |
1527 | " latency=%lu", tg->latency_target); | 1559 | " latency=%lu", tg->latency_target_conf); |
1528 | } | 1560 | } |
1529 | 1561 | ||
1530 | seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", | 1562 | seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", |
@@ -1563,8 +1595,8 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, | |||
1563 | v[2] = tg->iops_conf[READ][index]; | 1595 | v[2] = tg->iops_conf[READ][index]; |
1564 | v[3] = tg->iops_conf[WRITE][index]; | 1596 | v[3] = tg->iops_conf[WRITE][index]; |
1565 | 1597 | ||
1566 | idle_time = tg->idletime_threshold; | 1598 | idle_time = tg->idletime_threshold_conf; |
1567 | latency_time = tg->latency_target; | 1599 | latency_time = tg->latency_target_conf; |
1568 | while (true) { | 1600 | while (true) { |
1569 | char tok[27]; /* wiops=18446744073709551616 */ | 1601 | char tok[27]; /* wiops=18446744073709551616 */ |
1570 | char *p; | 1602 | char *p; |
@@ -1623,17 +1655,33 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, | |||
1623 | tg->iops_conf[READ][LIMIT_MAX]); | 1655 | tg->iops_conf[READ][LIMIT_MAX]); |
1624 | tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], | 1656 | tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], |
1625 | tg->iops_conf[WRITE][LIMIT_MAX]); | 1657 | tg->iops_conf[WRITE][LIMIT_MAX]); |
1658 | tg->idletime_threshold_conf = idle_time; | ||
1659 | tg->latency_target_conf = latency_time; | ||
1660 | |||
1661 | /* force user to configure all settings for low limit */ | ||
1662 | if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || | ||
1663 | tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || | ||
1664 | tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || | ||
1665 | tg->latency_target_conf == DFL_LATENCY_TARGET) { | ||
1666 | tg->bps[READ][LIMIT_LOW] = 0; | ||
1667 | tg->bps[WRITE][LIMIT_LOW] = 0; | ||
1668 | tg->iops[READ][LIMIT_LOW] = 0; | ||
1669 | tg->iops[WRITE][LIMIT_LOW] = 0; | ||
1670 | tg->idletime_threshold = DFL_IDLE_THRESHOLD; | ||
1671 | tg->latency_target = DFL_LATENCY_TARGET; | ||
1672 | } else if (index == LIMIT_LOW) { | ||
1673 | tg->idletime_threshold = tg->idletime_threshold_conf; | ||
1674 | tg->latency_target = tg->latency_target_conf; | ||
1675 | } | ||
1626 | 1676 | ||
1627 | if (index == LIMIT_LOW) { | 1677 | blk_throtl_update_limit_valid(tg->td); |
1628 | blk_throtl_update_limit_valid(tg->td); | 1678 | if (tg->td->limit_valid[LIMIT_LOW]) { |
1629 | if (tg->td->limit_valid[LIMIT_LOW]) | 1679 | if (index == LIMIT_LOW) |
1630 | tg->td->limit_index = LIMIT_LOW; | 1680 | tg->td->limit_index = LIMIT_LOW; |
1631 | tg->idletime_threshold = (idle_time == ULONG_MAX) ? | 1681 | } else |
1632 | ULONG_MAX : idle_time; | 1682 | tg->td->limit_index = LIMIT_MAX; |
1633 | tg->latency_target = (latency_time == ULONG_MAX) ? | 1683 | tg_conf_updated(tg, index == LIMIT_LOW && |
1634 | ULONG_MAX : latency_time; | 1684 | tg->td->limit_valid[LIMIT_LOW]); |
1635 | } | ||
1636 | tg_conf_updated(tg); | ||
1637 | ret = 0; | 1685 | ret = 0; |
1638 | out_finish: | 1686 | out_finish: |
1639 | blkg_conf_finish(&ctx); | 1687 | blkg_conf_finish(&ctx); |
@@ -1722,17 +1770,25 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg) | |||
1722 | /* | 1770 | /* |
1723 | * cgroup is idle if: | 1771 | * cgroup is idle if: |
1724 | * - single idle is too long, longer than a fixed value (in case user | 1772 | * - single idle is too long, longer than a fixed value (in case user |
1725 | * configure a too big threshold) or 4 times of slice | 1773 | * configure a too big threshold) or 4 times of idletime threshold |
1726 | * - average think time is more than threshold | 1774 | * - average think time is more than threshold |
1727 | * - IO latency is largely below threshold | 1775 | * - IO latency is largely below threshold |
1728 | */ | 1776 | */ |
1729 | unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice); | 1777 | unsigned long time; |
1730 | 1778 | bool ret; | |
1731 | time = min_t(unsigned long, MAX_IDLE_TIME, time); | 1779 | |
1732 | return (ktime_get_ns() >> 10) - tg->last_finish_time > time || | 1780 | time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); |
1733 | tg->avg_idletime > tg->idletime_threshold || | 1781 | ret = tg->latency_target == DFL_LATENCY_TARGET || |
1734 | (tg->latency_target && tg->bio_cnt && | 1782 | tg->idletime_threshold == DFL_IDLE_THRESHOLD || |
1783 | (ktime_get_ns() >> 10) - tg->last_finish_time > time || | ||
1784 | tg->avg_idletime > tg->idletime_threshold || | ||
1785 | (tg->latency_target && tg->bio_cnt && | ||
1735 | tg->bad_bio_cnt * 5 < tg->bio_cnt); | 1786 | tg->bad_bio_cnt * 5 < tg->bio_cnt); |
1787 | throtl_log(&tg->service_queue, | ||
1788 | "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d", | ||
1789 | tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, | ||
1790 | tg->bio_cnt, ret, tg->td->scale); | ||
1791 | return ret; | ||
1736 | } | 1792 | } |
1737 | 1793 | ||
1738 | static bool throtl_tg_can_upgrade(struct throtl_grp *tg) | 1794 | static bool throtl_tg_can_upgrade(struct throtl_grp *tg) |
@@ -1828,6 +1884,7 @@ static void throtl_upgrade_state(struct throtl_data *td) | |||
1828 | struct cgroup_subsys_state *pos_css; | 1884 | struct cgroup_subsys_state *pos_css; |
1829 | struct blkcg_gq *blkg; | 1885 | struct blkcg_gq *blkg; |
1830 | 1886 | ||
1887 | throtl_log(&td->service_queue, "upgrade to max"); | ||
1831 | td->limit_index = LIMIT_MAX; | 1888 | td->limit_index = LIMIT_MAX; |
1832 | td->low_upgrade_time = jiffies; | 1889 | td->low_upgrade_time = jiffies; |
1833 | td->scale = 0; | 1890 | td->scale = 0; |
@@ -1850,6 +1907,7 @@ static void throtl_downgrade_state(struct throtl_data *td, int new) | |||
1850 | { | 1907 | { |
1851 | td->scale /= 2; | 1908 | td->scale /= 2; |
1852 | 1909 | ||
1910 | throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); | ||
1853 | if (td->scale) { | 1911 | if (td->scale) { |
1854 | td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; | 1912 | td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; |
1855 | return; | 1913 | return; |
@@ -2023,6 +2081,11 @@ static void throtl_update_latency_buckets(struct throtl_data *td) | |||
2023 | td->avg_buckets[i].valid = true; | 2081 | td->avg_buckets[i].valid = true; |
2024 | last_latency = td->avg_buckets[i].latency; | 2082 | last_latency = td->avg_buckets[i].latency; |
2025 | } | 2083 | } |
2084 | |||
2085 | for (i = 0; i < LATENCY_BUCKET_SIZE; i++) | ||
2086 | throtl_log(&td->service_queue, | ||
2087 | "Latency bucket %d: latency=%ld, valid=%d", i, | ||
2088 | td->avg_buckets[i].latency, td->avg_buckets[i].valid); | ||
2026 | } | 2089 | } |
2027 | #else | 2090 | #else |
2028 | static inline void throtl_update_latency_buckets(struct throtl_data *td) | 2091 | static inline void throtl_update_latency_buckets(struct throtl_data *td) |
@@ -2354,19 +2417,14 @@ void blk_throtl_exit(struct request_queue *q) | |||
2354 | void blk_throtl_register_queue(struct request_queue *q) | 2417 | void blk_throtl_register_queue(struct request_queue *q) |
2355 | { | 2418 | { |
2356 | struct throtl_data *td; | 2419 | struct throtl_data *td; |
2357 | struct cgroup_subsys_state *pos_css; | ||
2358 | struct blkcg_gq *blkg; | ||
2359 | 2420 | ||
2360 | td = q->td; | 2421 | td = q->td; |
2361 | BUG_ON(!td); | 2422 | BUG_ON(!td); |
2362 | 2423 | ||
2363 | if (blk_queue_nonrot(q)) { | 2424 | if (blk_queue_nonrot(q)) |
2364 | td->throtl_slice = DFL_THROTL_SLICE_SSD; | 2425 | td->throtl_slice = DFL_THROTL_SLICE_SSD; |
2365 | td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD; | 2426 | else |
2366 | } else { | ||
2367 | td->throtl_slice = DFL_THROTL_SLICE_HD; | 2427 | td->throtl_slice = DFL_THROTL_SLICE_HD; |
2368 | td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD; | ||
2369 | } | ||
2370 | #ifndef CONFIG_BLK_DEV_THROTTLING_LOW | 2428 | #ifndef CONFIG_BLK_DEV_THROTTLING_LOW |
2371 | /* if no low limit, use previous default */ | 2429 | /* if no low limit, use previous default */ |
2372 | td->throtl_slice = DFL_THROTL_SLICE_HD; | 2430 | td->throtl_slice = DFL_THROTL_SLICE_HD; |
@@ -2375,18 +2433,6 @@ void blk_throtl_register_queue(struct request_queue *q) | |||
2375 | td->track_bio_latency = !q->mq_ops && !q->request_fn; | 2433 | td->track_bio_latency = !q->mq_ops && !q->request_fn; |
2376 | if (!td->track_bio_latency) | 2434 | if (!td->track_bio_latency) |
2377 | blk_stat_enable_accounting(q); | 2435 | blk_stat_enable_accounting(q); |
2378 | |||
2379 | /* | ||
2380 | * some tg are created before queue is fully initialized, eg, nonrot | ||
2381 | * isn't initialized yet | ||
2382 | */ | ||
2383 | rcu_read_lock(); | ||
2384 | blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) { | ||
2385 | struct throtl_grp *tg = blkg_to_tg(blkg); | ||
2386 | |||
2387 | tg->idletime_threshold = td->dft_idletime_threshold; | ||
2388 | } | ||
2389 | rcu_read_unlock(); | ||
2390 | } | 2436 | } |
2391 | 2437 | ||
2392 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | 2438 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
diff --git a/block/partition-generic.c b/block/partition-generic.c index ff07b9143ca4..c5ec8246e25e 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, | |||
320 | 320 | ||
321 | if (info) { | 321 | if (info) { |
322 | struct partition_meta_info *pinfo = alloc_part_info(disk); | 322 | struct partition_meta_info *pinfo = alloc_part_info(disk); |
323 | if (!pinfo) | 323 | if (!pinfo) { |
324 | err = -ENOMEM; | ||
324 | goto out_free_stats; | 325 | goto out_free_stats; |
326 | } | ||
325 | memcpy(pinfo, info, sizeof(*info)); | 327 | memcpy(pinfo, info, sizeof(*info)); |
326 | p->info = pinfo; | 328 | p->info = pinfo; |
327 | } | 329 | } |
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 93e7c1b32edd..5610cd537da7 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c | |||
@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state, | |||
300 | continue; | 300 | continue; |
301 | bsd_start = le32_to_cpu(p->p_offset); | 301 | bsd_start = le32_to_cpu(p->p_offset); |
302 | bsd_size = le32_to_cpu(p->p_size); | 302 | bsd_size = le32_to_cpu(p->p_size); |
303 | if (memcmp(flavour, "bsd\0", 4) == 0) | ||
304 | bsd_start += offset; | ||
303 | if (offset == bsd_start && size == bsd_size) | 305 | if (offset == bsd_start && size == bsd_size) |
304 | /* full parent partition, we have it already */ | 306 | /* full parent partition, we have it already */ |
305 | continue; | 307 | continue; |
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 014af741fc6a..4faa0fd53b0c 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
@@ -764,6 +764,44 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) | |||
764 | return 0; | 764 | return 0; |
765 | } | 765 | } |
766 | 766 | ||
767 | static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, | ||
768 | const u8 *key, unsigned int keylen) | ||
769 | { | ||
770 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | ||
771 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | ||
772 | u8 *buffer, *alignbuffer; | ||
773 | unsigned long absize; | ||
774 | int ret; | ||
775 | |||
776 | absize = keylen + alignmask; | ||
777 | buffer = kmalloc(absize, GFP_ATOMIC); | ||
778 | if (!buffer) | ||
779 | return -ENOMEM; | ||
780 | |||
781 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | ||
782 | memcpy(alignbuffer, key, keylen); | ||
783 | ret = cipher->setkey(tfm, alignbuffer, keylen); | ||
784 | kzfree(buffer); | ||
785 | return ret; | ||
786 | } | ||
787 | |||
788 | static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | ||
789 | unsigned int keylen) | ||
790 | { | ||
791 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | ||
792 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | ||
793 | |||
794 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | ||
795 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
796 | return -EINVAL; | ||
797 | } | ||
798 | |||
799 | if ((unsigned long)key & alignmask) | ||
800 | return skcipher_setkey_unaligned(tfm, key, keylen); | ||
801 | |||
802 | return cipher->setkey(tfm, key, keylen); | ||
803 | } | ||
804 | |||
767 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) | 805 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
768 | { | 806 | { |
769 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | 807 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
@@ -784,7 +822,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) | |||
784 | tfm->__crt_alg->cra_type == &crypto_givcipher_type) | 822 | tfm->__crt_alg->cra_type == &crypto_givcipher_type) |
785 | return crypto_init_skcipher_ops_ablkcipher(tfm); | 823 | return crypto_init_skcipher_ops_ablkcipher(tfm); |
786 | 824 | ||
787 | skcipher->setkey = alg->setkey; | 825 | skcipher->setkey = skcipher_setkey; |
788 | skcipher->encrypt = alg->encrypt; | 826 | skcipher->encrypt = alg->encrypt; |
789 | skcipher->decrypt = alg->decrypt; | 827 | skcipher->decrypt = alg->decrypt; |
790 | skcipher->ivsize = alg->ivsize; | 828 | skcipher->ivsize = alg->ivsize; |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index b7c2a06963d6..25aba9b107dd 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -57,6 +57,7 @@ | |||
57 | 57 | ||
58 | #define ACPI_BUTTON_LID_INIT_IGNORE 0x00 | 58 | #define ACPI_BUTTON_LID_INIT_IGNORE 0x00 |
59 | #define ACPI_BUTTON_LID_INIT_OPEN 0x01 | 59 | #define ACPI_BUTTON_LID_INIT_OPEN 0x01 |
60 | #define ACPI_BUTTON_LID_INIT_METHOD 0x02 | ||
60 | 61 | ||
61 | #define _COMPONENT ACPI_BUTTON_COMPONENT | 62 | #define _COMPONENT ACPI_BUTTON_COMPONENT |
62 | ACPI_MODULE_NAME("button"); | 63 | ACPI_MODULE_NAME("button"); |
@@ -376,6 +377,9 @@ static void acpi_lid_initialize_state(struct acpi_device *device) | |||
376 | case ACPI_BUTTON_LID_INIT_OPEN: | 377 | case ACPI_BUTTON_LID_INIT_OPEN: |
377 | (void)acpi_lid_notify_state(device, 1); | 378 | (void)acpi_lid_notify_state(device, 1); |
378 | break; | 379 | break; |
380 | case ACPI_BUTTON_LID_INIT_METHOD: | ||
381 | (void)acpi_lid_update_state(device); | ||
382 | break; | ||
379 | case ACPI_BUTTON_LID_INIT_IGNORE: | 383 | case ACPI_BUTTON_LID_INIT_IGNORE: |
380 | default: | 384 | default: |
381 | break; | 385 | break; |
@@ -560,6 +564,9 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp) | |||
560 | if (!strncmp(val, "open", sizeof("open") - 1)) { | 564 | if (!strncmp(val, "open", sizeof("open") - 1)) { |
561 | lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; | 565 | lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; |
562 | pr_info("Notify initial lid state as open\n"); | 566 | pr_info("Notify initial lid state as open\n"); |
567 | } else if (!strncmp(val, "method", sizeof("method") - 1)) { | ||
568 | lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; | ||
569 | pr_info("Notify initial lid state with _LID return value\n"); | ||
563 | } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { | 570 | } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { |
564 | lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; | 571 | lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; |
565 | pr_info("Do not notify initial lid state\n"); | 572 | pr_info("Do not notify initial lid state\n"); |
@@ -573,6 +580,8 @@ static int param_get_lid_init_state(char *buffer, struct kernel_param *kp) | |||
573 | switch (lid_init_state) { | 580 | switch (lid_init_state) { |
574 | case ACPI_BUTTON_LID_INIT_OPEN: | 581 | case ACPI_BUTTON_LID_INIT_OPEN: |
575 | return sprintf(buffer, "open"); | 582 | return sprintf(buffer, "open"); |
583 | case ACPI_BUTTON_LID_INIT_METHOD: | ||
584 | return sprintf(buffer, "method"); | ||
576 | case ACPI_BUTTON_LID_INIT_IGNORE: | 585 | case ACPI_BUTTON_LID_INIT_IGNORE: |
577 | return sprintf(buffer, "ignore"); | 586 | return sprintf(buffer, "ignore"); |
578 | default: | 587 | default: |
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index 3ba1c3472cf9..fd86bec98dea 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c | |||
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, | |||
26 | struct nfit_spa *nfit_spa; | 26 | struct nfit_spa *nfit_spa; |
27 | 27 | ||
28 | /* We only care about memory errors */ | 28 | /* We only care about memory errors */ |
29 | if (!(mce->status & MCACOD)) | 29 | if (!mce_is_memory_error(mce)) |
30 | return NOTIFY_DONE; | 30 | return NOTIFY_DONE; |
31 | 31 | ||
32 | /* | 32 | /* |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f62082fdd670..9c36b27996fc 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -512,13 +512,12 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws) | |||
512 | /** | 512 | /** |
513 | * wakup_source_activate - Mark given wakeup source as active. | 513 | * wakup_source_activate - Mark given wakeup source as active. |
514 | * @ws: Wakeup source to handle. | 514 | * @ws: Wakeup source to handle. |
515 | * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. | ||
516 | * | 515 | * |
517 | * Update the @ws' statistics and, if @ws has just been activated, notify the PM | 516 | * Update the @ws' statistics and, if @ws has just been activated, notify the PM |
518 | * core of the event by incrementing the counter of of wakeup events being | 517 | * core of the event by incrementing the counter of of wakeup events being |
519 | * processed. | 518 | * processed. |
520 | */ | 519 | */ |
521 | static void wakeup_source_activate(struct wakeup_source *ws, bool hard) | 520 | static void wakeup_source_activate(struct wakeup_source *ws) |
522 | { | 521 | { |
523 | unsigned int cec; | 522 | unsigned int cec; |
524 | 523 | ||
@@ -526,9 +525,6 @@ static void wakeup_source_activate(struct wakeup_source *ws, bool hard) | |||
526 | "unregistered wakeup source\n")) | 525 | "unregistered wakeup source\n")) |
527 | return; | 526 | return; |
528 | 527 | ||
529 | if (hard) | ||
530 | pm_system_wakeup(); | ||
531 | |||
532 | ws->active = true; | 528 | ws->active = true; |
533 | ws->active_count++; | 529 | ws->active_count++; |
534 | ws->last_time = ktime_get(); | 530 | ws->last_time = ktime_get(); |
@@ -554,7 +550,10 @@ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) | |||
554 | ws->wakeup_count++; | 550 | ws->wakeup_count++; |
555 | 551 | ||
556 | if (!ws->active) | 552 | if (!ws->active) |
557 | wakeup_source_activate(ws, hard); | 553 | wakeup_source_activate(ws); |
554 | |||
555 | if (hard) | ||
556 | pm_system_wakeup(); | ||
558 | } | 557 | } |
559 | 558 | ||
560 | /** | 559 | /** |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index b5730e17b455..656624314f0d 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) | |||
315 | } | 315 | } |
316 | 316 | ||
317 | /* still holds resource->req_lock */ | 317 | /* still holds resource->req_lock */ |
318 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) | 318 | static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
319 | { | 319 | { |
320 | struct drbd_device *device = req->device; | 320 | struct drbd_device *device = req->device; |
321 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); | 321 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
322 | 322 | ||
323 | if (!put) | ||
324 | return; | ||
325 | |||
323 | if (!atomic_sub_and_test(put, &req->completion_ref)) | 326 | if (!atomic_sub_and_test(put, &req->completion_ref)) |
324 | return 0; | 327 | return; |
325 | 328 | ||
326 | drbd_req_complete(req, m); | 329 | drbd_req_complete(req, m); |
327 | 330 | ||
331 | /* local completion may still come in later, | ||
332 | * we need to keep the req object around. */ | ||
333 | if (req->rq_state & RQ_LOCAL_ABORTED) | ||
334 | return; | ||
335 | |||
328 | if (req->rq_state & RQ_POSTPONED) { | 336 | if (req->rq_state & RQ_POSTPONED) { |
329 | /* don't destroy the req object just yet, | 337 | /* don't destroy the req object just yet, |
330 | * but queue it for retry */ | 338 | * but queue it for retry */ |
331 | drbd_restart_request(req); | 339 | drbd_restart_request(req); |
332 | return 0; | 340 | return; |
333 | } | 341 | } |
334 | 342 | ||
335 | return 1; | 343 | kref_put(&req->kref, drbd_req_destroy); |
336 | } | 344 | } |
337 | 345 | ||
338 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) | 346 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
@@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, | |||
519 | if (req->i.waiting) | 527 | if (req->i.waiting) |
520 | wake_up(&device->misc_wait); | 528 | wake_up(&device->misc_wait); |
521 | 529 | ||
522 | if (c_put) { | 530 | drbd_req_put_completion_ref(req, m, c_put); |
523 | if (drbd_req_put_completion_ref(req, m, c_put)) | 531 | kref_put(&req->kref, drbd_req_destroy); |
524 | kref_put(&req->kref, drbd_req_destroy); | ||
525 | } else { | ||
526 | kref_put(&req->kref, drbd_req_destroy); | ||
527 | } | ||
528 | } | 532 | } |
529 | 533 | ||
530 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) | 534 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) |
@@ -1366,8 +1370,7 @@ nodata: | |||
1366 | } | 1370 | } |
1367 | 1371 | ||
1368 | out: | 1372 | out: |
1369 | if (drbd_req_put_completion_ref(req, &m, 1)) | 1373 | drbd_req_put_completion_ref(req, &m, 1); |
1370 | kref_put(&req->kref, drbd_req_destroy); | ||
1371 | spin_unlock_irq(&resource->req_lock); | 1374 | spin_unlock_irq(&resource->req_lock); |
1372 | 1375 | ||
1373 | /* Even though above is a kref_put(), this is safe. | 1376 | /* Even though above is a kref_put(), this is safe. |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 8fe61b5dc5a6..1f3dfaa54d87 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -504,11 +504,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev) | |||
504 | 504 | ||
505 | dev_set_drvdata(&dev->dev, NULL); | 505 | dev_set_drvdata(&dev->dev, NULL); |
506 | 506 | ||
507 | if (be->blkif) | 507 | if (be->blkif) { |
508 | xen_blkif_disconnect(be->blkif); | 508 | xen_blkif_disconnect(be->blkif); |
509 | 509 | ||
510 | /* Put the reference we set in xen_blkif_alloc(). */ | 510 | /* Put the reference we set in xen_blkif_alloc(). */ |
511 | xen_blkif_put(be->blkif); | 511 | xen_blkif_put(be->blkif); |
512 | } | ||
513 | |||
512 | kfree(be->mode); | 514 | kfree(be->mode); |
513 | kfree(be); | 515 | kfree(be); |
514 | return 0; | 516 | return 0; |
diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 565e4cf04a02..8249762192d5 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c | |||
@@ -859,7 +859,11 @@ static int __init lp_setup (char *str) | |||
859 | } else if (!strcmp(str, "auto")) { | 859 | } else if (!strcmp(str, "auto")) { |
860 | parport_nr[0] = LP_PARPORT_AUTO; | 860 | parport_nr[0] = LP_PARPORT_AUTO; |
861 | } else if (!strcmp(str, "none")) { | 861 | } else if (!strcmp(str, "none")) { |
862 | parport_nr[parport_ptr++] = LP_PARPORT_NONE; | 862 | if (parport_ptr < LP_NO) |
863 | parport_nr[parport_ptr++] = LP_PARPORT_NONE; | ||
864 | else | ||
865 | printk(KERN_INFO "lp: too many ports, %s ignored.\n", | ||
866 | str); | ||
863 | } else if (!strcmp(str, "reset")) { | 867 | } else if (!strcmp(str, "reset")) { |
864 | reset = 1; | 868 | reset = 1; |
865 | } | 869 | } |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 7e4a9d1296bb..6e0cbe092220 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = { | |||
340 | static int mmap_mem(struct file *file, struct vm_area_struct *vma) | 340 | static int mmap_mem(struct file *file, struct vm_area_struct *vma) |
341 | { | 341 | { |
342 | size_t size = vma->vm_end - vma->vm_start; | 342 | size_t size = vma->vm_end - vma->vm_start; |
343 | phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; | ||
344 | |||
345 | /* It's illegal to wrap around the end of the physical address space. */ | ||
346 | if (offset + (phys_addr_t)size < offset) | ||
347 | return -EINVAL; | ||
343 | 348 | ||
344 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) | 349 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
345 | return -EINVAL; | 350 | return -EINVAL; |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 74ed7e9a7f27..2011fec2d6ad 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -71,6 +71,15 @@ config ARM_HIGHBANK_CPUFREQ | |||
71 | 71 | ||
72 | If in doubt, say N. | 72 | If in doubt, say N. |
73 | 73 | ||
74 | config ARM_DB8500_CPUFREQ | ||
75 | tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500 | ||
76 | default ARCH_U8500 | ||
77 | depends on HAS_IOMEM | ||
78 | depends on !CPU_THERMAL || THERMAL | ||
79 | help | ||
80 | This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC | ||
81 | series. | ||
82 | |||
74 | config ARM_IMX6Q_CPUFREQ | 83 | config ARM_IMX6Q_CPUFREQ |
75 | tristate "Freescale i.MX6 cpufreq support" | 84 | tristate "Freescale i.MX6 cpufreq support" |
76 | depends on ARCH_MXC | 85 | depends on ARCH_MXC |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b7e78f063c4f..ab3a42cd29ef 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -53,7 +53,7 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o | |||
53 | 53 | ||
54 | obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o | 54 | obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o |
55 | obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o | 55 | obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o |
56 | obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o | 56 | obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o |
57 | obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o | 57 | obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o |
58 | obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o | 58 | obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o |
59 | obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o | 59 | obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o |
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index ebf43f531ada..6ed32aac8bbe 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
@@ -44,6 +44,7 @@ void dax_read_unlock(int id) | |||
44 | } | 44 | } |
45 | EXPORT_SYMBOL_GPL(dax_read_unlock); | 45 | EXPORT_SYMBOL_GPL(dax_read_unlock); |
46 | 46 | ||
47 | #ifdef CONFIG_BLOCK | ||
47 | int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, | 48 | int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, |
48 | pgoff_t *pgoff) | 49 | pgoff_t *pgoff) |
49 | { | 50 | { |
@@ -112,6 +113,7 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) | |||
112 | return 0; | 113 | return 0; |
113 | } | 114 | } |
114 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); | 115 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); |
116 | #endif | ||
115 | 117 | ||
116 | /** | 118 | /** |
117 | * struct dax_device - anchor object for dax services | 119 | * struct dax_device - anchor object for dax services |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 82dab1692264..3aea55698165 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) | |||
782 | 782 | ||
783 | static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) | 783 | static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) |
784 | { | 784 | { |
785 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; | 785 | int dimm, size0, size1, cs0, cs1; |
786 | int dimm, size0, size1; | ||
787 | 786 | ||
788 | edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); | 787 | edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); |
789 | 788 | ||
790 | for (dimm = 0; dimm < 4; dimm++) { | 789 | for (dimm = 0; dimm < 4; dimm++) { |
791 | size0 = 0; | 790 | size0 = 0; |
791 | cs0 = dimm * 2; | ||
792 | 792 | ||
793 | if (dcsb[dimm*2] & DCSB_CS_ENABLE) | 793 | if (csrow_enabled(cs0, ctrl, pvt)) |
794 | size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); | 794 | size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0); |
795 | 795 | ||
796 | size1 = 0; | 796 | size1 = 0; |
797 | if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) | 797 | cs1 = dimm * 2 + 1; |
798 | size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); | 798 | |
799 | if (csrow_enabled(cs1, ctrl, pvt)) | ||
800 | size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1); | ||
799 | 801 | ||
800 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", | 802 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
801 | dimm * 2, size0, | 803 | cs0, size0, |
802 | dimm * 2 + 1, size1); | 804 | cs1, size1); |
803 | } | 805 | } |
804 | } | 806 | } |
805 | 807 | ||
@@ -2756,26 +2758,22 @@ skip: | |||
2756 | * encompasses | 2758 | * encompasses |
2757 | * | 2759 | * |
2758 | */ | 2760 | */ |
2759 | static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) | 2761 | static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig) |
2760 | { | 2762 | { |
2761 | u32 cs_mode, nr_pages; | ||
2762 | u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; | 2763 | u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; |
2764 | int csrow_nr = csrow_nr_orig; | ||
2765 | u32 cs_mode, nr_pages; | ||
2763 | 2766 | ||
2767 | if (!pvt->umc) | ||
2768 | csrow_nr >>= 1; | ||
2764 | 2769 | ||
2765 | /* | 2770 | cs_mode = DBAM_DIMM(csrow_nr, dbam); |
2766 | * The math on this doesn't look right on the surface because x/2*4 can | ||
2767 | * be simplified to x*2 but this expression makes use of the fact that | ||
2768 | * it is integral math where 1/2=0. This intermediate value becomes the | ||
2769 | * number of bits to shift the DBAM register to extract the proper CSROW | ||
2770 | * field. | ||
2771 | */ | ||
2772 | cs_mode = DBAM_DIMM(csrow_nr / 2, dbam); | ||
2773 | 2771 | ||
2774 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) | 2772 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); |
2775 | << (20 - PAGE_SHIFT); | 2773 | nr_pages <<= 20 - PAGE_SHIFT; |
2776 | 2774 | ||
2777 | edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", | 2775 | edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", |
2778 | csrow_nr, dct, cs_mode); | 2776 | csrow_nr_orig, dct, cs_mode); |
2779 | edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); | 2777 | edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); |
2780 | 2778 | ||
2781 | return nr_pages; | 2779 | return nr_pages; |
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index ed3137c1ceb0..ef1fafdad400 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c | |||
@@ -53,6 +53,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, | |||
53 | if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", | 53 | if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", |
54 | &record->type, &part, &cnt, &time, &data_type) == 5) { | 54 | &record->type, &part, &cnt, &time, &data_type) == 5) { |
55 | record->id = generic_id(time, part, cnt); | 55 | record->id = generic_id(time, part, cnt); |
56 | record->part = part; | ||
56 | record->count = cnt; | 57 | record->count = cnt; |
57 | record->time.tv_sec = time; | 58 | record->time.tv_sec = time; |
58 | record->time.tv_nsec = 0; | 59 | record->time.tv_nsec = 0; |
@@ -64,6 +65,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, | |||
64 | } else if (sscanf(name, "dump-type%u-%u-%d-%lu", | 65 | } else if (sscanf(name, "dump-type%u-%u-%d-%lu", |
65 | &record->type, &part, &cnt, &time) == 4) { | 66 | &record->type, &part, &cnt, &time) == 4) { |
66 | record->id = generic_id(time, part, cnt); | 67 | record->id = generic_id(time, part, cnt); |
68 | record->part = part; | ||
67 | record->count = cnt; | 69 | record->count = cnt; |
68 | record->time.tv_sec = time; | 70 | record->time.tv_sec = time; |
69 | record->time.tv_nsec = 0; | 71 | record->time.tv_nsec = 0; |
@@ -77,6 +79,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, | |||
77 | * multiple logs, remains. | 79 | * multiple logs, remains. |
78 | */ | 80 | */ |
79 | record->id = generic_id(time, part, 0); | 81 | record->id = generic_id(time, part, 0); |
82 | record->part = part; | ||
80 | record->count = 0; | 83 | record->count = 0; |
81 | record->time.tv_sec = time; | 84 | record->time.tv_sec = time; |
82 | record->time.tv_nsec = 0; | 85 | record->time.tv_nsec = 0; |
@@ -155,19 +158,14 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, | |||
155 | * efi_pstore_sysfs_entry_iter | 158 | * efi_pstore_sysfs_entry_iter |
156 | * | 159 | * |
157 | * @record: pstore record to pass to callback | 160 | * @record: pstore record to pass to callback |
158 | * @pos: entry to begin iterating from | ||
159 | * | 161 | * |
160 | * You MUST call efivar_enter_iter_begin() before this function, and | 162 | * You MUST call efivar_enter_iter_begin() before this function, and |
161 | * efivar_entry_iter_end() afterwards. | 163 | * efivar_entry_iter_end() afterwards. |
162 | * | 164 | * |
163 | * It is possible to begin iteration from an arbitrary entry within | ||
164 | * the list by passing @pos. @pos is updated on return to point to | ||
165 | * the next entry of the last one passed to efi_pstore_read_func(). | ||
166 | * To begin iterating from the beginning of the list @pos must be %NULL. | ||
167 | */ | 165 | */ |
168 | static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, | 166 | static int efi_pstore_sysfs_entry_iter(struct pstore_record *record) |
169 | struct efivar_entry **pos) | ||
170 | { | 167 | { |
168 | struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data; | ||
171 | struct efivar_entry *entry, *n; | 169 | struct efivar_entry *entry, *n; |
172 | struct list_head *head = &efivar_sysfs_list; | 170 | struct list_head *head = &efivar_sysfs_list; |
173 | int size = 0; | 171 | int size = 0; |
@@ -218,7 +216,6 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, | |||
218 | */ | 216 | */ |
219 | static ssize_t efi_pstore_read(struct pstore_record *record) | 217 | static ssize_t efi_pstore_read(struct pstore_record *record) |
220 | { | 218 | { |
221 | struct efivar_entry *entry = (struct efivar_entry *)record->psi->data; | ||
222 | ssize_t size; | 219 | ssize_t size; |
223 | 220 | ||
224 | record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); | 221 | record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); |
@@ -229,7 +226,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record) | |||
229 | size = -EINTR; | 226 | size = -EINTR; |
230 | goto out; | 227 | goto out; |
231 | } | 228 | } |
232 | size = efi_pstore_sysfs_entry_iter(record, &entry); | 229 | size = efi_pstore_sysfs_entry_iter(record); |
233 | efivar_entry_iter_end(); | 230 | efivar_entry_iter_end(); |
234 | 231 | ||
235 | out: | 232 | out: |
@@ -247,9 +244,15 @@ static int efi_pstore_write(struct pstore_record *record) | |||
247 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; | 244 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; |
248 | int i, ret = 0; | 245 | int i, ret = 0; |
249 | 246 | ||
247 | record->time.tv_sec = get_seconds(); | ||
248 | record->time.tv_nsec = 0; | ||
249 | |||
250 | record->id = generic_id(record->time.tv_sec, record->part, | ||
251 | record->count); | ||
252 | |||
250 | snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c", | 253 | snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c", |
251 | record->type, record->part, record->count, | 254 | record->type, record->part, record->count, |
252 | get_seconds(), record->compressed ? 'C' : 'D'); | 255 | record->time.tv_sec, record->compressed ? 'C' : 'D'); |
253 | 256 | ||
254 | for (i = 0; i < DUMP_NAME_LEN; i++) | 257 | for (i = 0; i < DUMP_NAME_LEN; i++) |
255 | efi_name[i] = name[i]; | 258 | efi_name[i] = name[i]; |
@@ -261,7 +264,6 @@ static int efi_pstore_write(struct pstore_record *record) | |||
261 | if (record->reason == KMSG_DUMP_OOPS) | 264 | if (record->reason == KMSG_DUMP_OOPS) |
262 | efivar_run_worker(); | 265 | efivar_run_worker(); |
263 | 266 | ||
264 | record->id = record->part; | ||
265 | return ret; | 267 | return ret; |
266 | }; | 268 | }; |
267 | 269 | ||
@@ -293,7 +295,7 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) | |||
293 | * holding multiple logs, remains. | 295 | * holding multiple logs, remains. |
294 | */ | 296 | */ |
295 | snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu", | 297 | snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu", |
296 | ed->record->type, (unsigned int)ed->record->id, | 298 | ed->record->type, ed->record->part, |
297 | ed->record->time.tv_sec); | 299 | ed->record->time.tv_sec); |
298 | 300 | ||
299 | for (i = 0; i < DUMP_NAME_LEN; i++) | 301 | for (i = 0; i < DUMP_NAME_LEN; i++) |
@@ -326,10 +328,7 @@ static int efi_pstore_erase(struct pstore_record *record) | |||
326 | char name[DUMP_NAME_LEN]; | 328 | char name[DUMP_NAME_LEN]; |
327 | efi_char16_t efi_name[DUMP_NAME_LEN]; | 329 | efi_char16_t efi_name[DUMP_NAME_LEN]; |
328 | int found, i; | 330 | int found, i; |
329 | unsigned int part; | ||
330 | 331 | ||
331 | do_div(record->id, 1000); | ||
332 | part = do_div(record->id, 100); | ||
333 | snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu", | 332 | snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu", |
334 | record->type, record->part, record->count, | 333 | record->type, record->part, record->count, |
335 | record->time.tv_sec); | 334 | record->time.tv_sec); |
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 3ce813110d5e..1e7860f02f4f 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c | |||
@@ -116,9 +116,13 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len, | |||
116 | return VPD_OK; | 116 | return VPD_OK; |
117 | 117 | ||
118 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 118 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
119 | info->key = kzalloc(key_len + 1, GFP_KERNEL); | 119 | if (!info) |
120 | if (!info->key) | ||
121 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | info->key = kzalloc(key_len + 1, GFP_KERNEL); | ||
122 | if (!info->key) { | ||
123 | ret = -ENOMEM; | ||
124 | goto free_info; | ||
125 | } | ||
122 | 126 | ||
123 | memcpy(info->key, key, key_len); | 127 | memcpy(info->key, key, key_len); |
124 | 128 | ||
@@ -135,12 +139,17 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len, | |||
135 | list_add_tail(&info->list, &sec->attribs); | 139 | list_add_tail(&info->list, &sec->attribs); |
136 | 140 | ||
137 | ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); | 141 | ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); |
138 | if (ret) { | 142 | if (ret) |
139 | kfree(info->key); | 143 | goto free_info_key; |
140 | return ret; | ||
141 | } | ||
142 | 144 | ||
143 | return 0; | 145 | return 0; |
146 | |||
147 | free_info_key: | ||
148 | kfree(info->key); | ||
149 | free_info: | ||
150 | kfree(info); | ||
151 | |||
152 | return ret; | ||
144 | } | 153 | } |
145 | 154 | ||
146 | static void vpd_section_attrib_destroy(struct vpd_section *sec) | 155 | static void vpd_section_attrib_destroy(struct vpd_section *sec) |
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 874ff32db366..00cfed3c3e1a 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c | |||
@@ -202,7 +202,8 @@ static int ti_sci_debugfs_create(struct platform_device *pdev, | |||
202 | info->debug_buffer[info->debug_region_size] = 0; | 202 | info->debug_buffer[info->debug_region_size] = 0; |
203 | 203 | ||
204 | info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), | 204 | info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), |
205 | sizeof(debug_name)), | 205 | sizeof(debug_name) - |
206 | sizeof("ti_sci_debug@")), | ||
206 | 0444, NULL, info, &ti_sci_debug_fops); | 207 | 0444, NULL, info, &ti_sci_debug_fops); |
207 | if (IS_ERR(info->d)) | 208 | if (IS_ERR(info->d)) |
208 | return PTR_ERR(info->d); | 209 | return PTR_ERR(info->d); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 236d9950221b..c0d8c6ff6380 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) | |||
425 | 425 | ||
426 | void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) | 426 | void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) |
427 | { | 427 | { |
428 | struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; | 428 | struct amdgpu_fbdev *afbdev; |
429 | struct drm_fb_helper *fb_helper; | 429 | struct drm_fb_helper *fb_helper; |
430 | int ret; | 430 | int ret; |
431 | 431 | ||
432 | if (!adev) | ||
433 | return; | ||
434 | |||
435 | afbdev = adev->mode_info.rfbdev; | ||
436 | |||
432 | if (!afbdev) | 437 | if (!afbdev) |
433 | return; | 438 | return; |
434 | 439 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 07ff3b1514f1..8ecf82c5fe74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | |||
634 | mutex_unlock(&id_mgr->lock); | 634 | mutex_unlock(&id_mgr->lock); |
635 | } | 635 | } |
636 | 636 | ||
637 | if (gds_switch_needed) { | 637 | if (ring->funcs->emit_gds_switch && gds_switch_needed) { |
638 | id->gds_base = job->gds_base; | 638 | id->gds_base = job->gds_base; |
639 | id->gds_size = job->gds_size; | 639 | id->gds_size = job->gds_size; |
640 | id->gws_base = job->gws_base; | 640 | id->gws_base = job->gws_base; |
@@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, | |||
672 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | 672 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
673 | struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; | 673 | struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; |
674 | 674 | ||
675 | atomic64_set(&id->owner, 0); | ||
675 | id->gds_base = 0; | 676 | id->gds_base = 0; |
676 | id->gds_size = 0; | 677 | id->gds_size = 0; |
677 | id->gws_base = 0; | 678 | id->gws_base = 0; |
@@ -681,6 +682,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, | |||
681 | } | 682 | } |
682 | 683 | ||
683 | /** | 684 | /** |
685 | * amdgpu_vm_reset_all_id - reset VMID to zero | ||
686 | * | ||
687 | * @adev: amdgpu device structure | ||
688 | * | ||
689 | * Reset VMID to force flush on next use | ||
690 | */ | ||
691 | void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) | ||
692 | { | ||
693 | unsigned i, j; | ||
694 | |||
695 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | ||
696 | struct amdgpu_vm_id_manager *id_mgr = | ||
697 | &adev->vm_manager.id_mgr[i]; | ||
698 | |||
699 | for (j = 1; j < id_mgr->num_ids; ++j) | ||
700 | amdgpu_vm_reset_id(adev, i, j); | ||
701 | } | ||
702 | } | ||
703 | |||
704 | /** | ||
684 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | 705 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo |
685 | * | 706 | * |
686 | * @vm: requested vm | 707 | * @vm: requested vm |
@@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |||
2270 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 2291 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
2271 | adev->vm_manager.seqno[i] = 0; | 2292 | adev->vm_manager.seqno[i] = 0; |
2272 | 2293 | ||
2273 | |||
2274 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); | 2294 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); |
2275 | atomic64_set(&adev->vm_manager.client_counter, 0); | 2295 | atomic64_set(&adev->vm_manager.client_counter, 0); |
2276 | spin_lock_init(&adev->vm_manager.prt_lock); | 2296 | spin_lock_init(&adev->vm_manager.prt_lock); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index d97e28b4bdc4..e1d951ece433 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
204 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | 204 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); |
205 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, | 205 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, |
206 | unsigned vmid); | 206 | unsigned vmid); |
207 | void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); | ||
207 | int amdgpu_vm_update_directories(struct amdgpu_device *adev, | 208 | int amdgpu_vm_update_directories(struct amdgpu_device *adev, |
208 | struct amdgpu_vm *vm); | 209 | struct amdgpu_vm *vm); |
209 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | 210 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 6dc1410b380f..ec93714e4524 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) | |||
906 | u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); | 906 | u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); |
907 | u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; | 907 | u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; |
908 | 908 | ||
909 | /* disable mclk switching if the refresh is >120Hz, even if the | ||
910 | * blanking period would allow it | ||
911 | */ | ||
912 | if (amdgpu_dpm_get_vrefresh(adev) > 120) | ||
913 | return true; | ||
914 | |||
909 | if (vblank_time < switch_limit) | 915 | if (vblank_time < switch_limit) |
910 | return true; | 916 | return true; |
911 | else | 917 | else |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index a572979f186c..d860939152df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle) | |||
950 | { | 950 | { |
951 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 951 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
952 | 952 | ||
953 | if (adev->vm_manager.enabled) { | ||
954 | gmc_v6_0_vm_fini(adev); | ||
955 | adev->vm_manager.enabled = false; | ||
956 | } | ||
957 | gmc_v6_0_hw_fini(adev); | 953 | gmc_v6_0_hw_fini(adev); |
958 | 954 | ||
959 | return 0; | 955 | return 0; |
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle) | |||
968 | if (r) | 964 | if (r) |
969 | return r; | 965 | return r; |
970 | 966 | ||
971 | if (!adev->vm_manager.enabled) { | 967 | amdgpu_vm_reset_all_ids(adev); |
972 | r = gmc_v6_0_vm_init(adev); | ||
973 | if (r) { | ||
974 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
975 | return r; | ||
976 | } | ||
977 | adev->vm_manager.enabled = true; | ||
978 | } | ||
979 | 968 | ||
980 | return r; | 969 | return 0; |
981 | } | 970 | } |
982 | 971 | ||
983 | static bool gmc_v6_0_is_idle(void *handle) | 972 | static bool gmc_v6_0_is_idle(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index a9083a16a250..2750e5c23813 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle) | |||
1117 | { | 1117 | { |
1118 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1118 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1119 | 1119 | ||
1120 | if (adev->vm_manager.enabled) { | ||
1121 | gmc_v7_0_vm_fini(adev); | ||
1122 | adev->vm_manager.enabled = false; | ||
1123 | } | ||
1124 | gmc_v7_0_hw_fini(adev); | 1120 | gmc_v7_0_hw_fini(adev); |
1125 | 1121 | ||
1126 | return 0; | 1122 | return 0; |
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle) | |||
1135 | if (r) | 1131 | if (r) |
1136 | return r; | 1132 | return r; |
1137 | 1133 | ||
1138 | if (!adev->vm_manager.enabled) { | 1134 | amdgpu_vm_reset_all_ids(adev); |
1139 | r = gmc_v7_0_vm_init(adev); | ||
1140 | if (r) { | ||
1141 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
1142 | return r; | ||
1143 | } | ||
1144 | adev->vm_manager.enabled = true; | ||
1145 | } | ||
1146 | 1135 | ||
1147 | return r; | 1136 | return 0; |
1148 | } | 1137 | } |
1149 | 1138 | ||
1150 | static bool gmc_v7_0_is_idle(void *handle) | 1139 | static bool gmc_v7_0_is_idle(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 4ac99784160a..f56b4089ee9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle) | |||
1209 | { | 1209 | { |
1210 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1210 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1211 | 1211 | ||
1212 | if (adev->vm_manager.enabled) { | ||
1213 | gmc_v8_0_vm_fini(adev); | ||
1214 | adev->vm_manager.enabled = false; | ||
1215 | } | ||
1216 | gmc_v8_0_hw_fini(adev); | 1212 | gmc_v8_0_hw_fini(adev); |
1217 | 1213 | ||
1218 | return 0; | 1214 | return 0; |
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle) | |||
1227 | if (r) | 1223 | if (r) |
1228 | return r; | 1224 | return r; |
1229 | 1225 | ||
1230 | if (!adev->vm_manager.enabled) { | 1226 | amdgpu_vm_reset_all_ids(adev); |
1231 | r = gmc_v8_0_vm_init(adev); | ||
1232 | if (r) { | ||
1233 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
1234 | return r; | ||
1235 | } | ||
1236 | adev->vm_manager.enabled = true; | ||
1237 | } | ||
1238 | 1227 | ||
1239 | return r; | 1228 | return 0; |
1240 | } | 1229 | } |
1241 | 1230 | ||
1242 | static bool gmc_v8_0_is_idle(void *handle) | 1231 | static bool gmc_v8_0_is_idle(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index dc1e1c1d6b24..f936332a069d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle) | |||
791 | { | 791 | { |
792 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 792 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
793 | 793 | ||
794 | if (adev->vm_manager.enabled) { | ||
795 | gmc_v9_0_vm_fini(adev); | ||
796 | adev->vm_manager.enabled = false; | ||
797 | } | ||
798 | gmc_v9_0_hw_fini(adev); | 794 | gmc_v9_0_hw_fini(adev); |
799 | 795 | ||
800 | return 0; | 796 | return 0; |
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle) | |||
809 | if (r) | 805 | if (r) |
810 | return r; | 806 | return r; |
811 | 807 | ||
812 | if (!adev->vm_manager.enabled) { | 808 | amdgpu_vm_reset_all_ids(adev); |
813 | r = gmc_v9_0_vm_init(adev); | ||
814 | if (r) { | ||
815 | dev_err(adev->dev, | ||
816 | "vm manager initialization failed (%d).\n", r); | ||
817 | return r; | ||
818 | } | ||
819 | adev->vm_manager.enabled = true; | ||
820 | } | ||
821 | 809 | ||
822 | return r; | 810 | return 0; |
823 | } | 811 | } |
824 | 812 | ||
825 | static bool gmc_v9_0_is_idle(void *handle) | 813 | static bool gmc_v9_0_is_idle(void *handle) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index a74a3db3056c..102eb6d029fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) | |||
2655 | return sizeof(struct smu7_power_state); | 2655 | return sizeof(struct smu7_power_state); |
2656 | } | 2656 | } |
2657 | 2657 | ||
2658 | static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, | ||
2659 | uint32_t vblank_time_us) | ||
2660 | { | ||
2661 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | ||
2662 | uint32_t switch_limit_us; | ||
2663 | |||
2664 | switch (hwmgr->chip_id) { | ||
2665 | case CHIP_POLARIS10: | ||
2666 | case CHIP_POLARIS11: | ||
2667 | case CHIP_POLARIS12: | ||
2668 | switch_limit_us = data->is_memory_gddr5 ? 190 : 150; | ||
2669 | break; | ||
2670 | default: | ||
2671 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; | ||
2672 | break; | ||
2673 | } | ||
2674 | |||
2675 | if (vblank_time_us < switch_limit_us) | ||
2676 | return true; | ||
2677 | else | ||
2678 | return false; | ||
2679 | } | ||
2658 | 2680 | ||
2659 | static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | 2681 | static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, |
2660 | struct pp_power_state *request_ps, | 2682 | struct pp_power_state *request_ps, |
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
2669 | bool disable_mclk_switching; | 2691 | bool disable_mclk_switching; |
2670 | bool disable_mclk_switching_for_frame_lock; | 2692 | bool disable_mclk_switching_for_frame_lock; |
2671 | struct cgs_display_info info = {0}; | 2693 | struct cgs_display_info info = {0}; |
2694 | struct cgs_mode_info mode_info = {0}; | ||
2672 | const struct phm_clock_and_voltage_limits *max_limits; | 2695 | const struct phm_clock_and_voltage_limits *max_limits; |
2673 | uint32_t i; | 2696 | uint32_t i; |
2674 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 2697 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
2677 | int32_t count; | 2700 | int32_t count; |
2678 | int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; | 2701 | int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; |
2679 | 2702 | ||
2703 | info.mode_info = &mode_info; | ||
2680 | data->battery_state = (PP_StateUILabel_Battery == | 2704 | data->battery_state = (PP_StateUILabel_Battery == |
2681 | request_ps->classification.ui_label); | 2705 | request_ps->classification.ui_label); |
2682 | 2706 | ||
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
2703 | 2727 | ||
2704 | cgs_get_active_displays_info(hwmgr->device, &info); | 2728 | cgs_get_active_displays_info(hwmgr->device, &info); |
2705 | 2729 | ||
2706 | /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ | ||
2707 | |||
2708 | minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; | 2730 | minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; |
2709 | minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; | 2731 | minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; |
2710 | 2732 | ||
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
2769 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); | 2791 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); |
2770 | 2792 | ||
2771 | 2793 | ||
2772 | disable_mclk_switching = (1 < info.display_count) || | 2794 | disable_mclk_switching = ((1 < info.display_count) || |
2773 | disable_mclk_switching_for_frame_lock; | 2795 | disable_mclk_switching_for_frame_lock || |
2796 | smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || | ||
2797 | (mode_info.refresh_rate > 120)); | ||
2774 | 2798 | ||
2775 | sclk = smu7_ps->performance_levels[0].engine_clock; | 2799 | sclk = smu7_ps->performance_levels[0].engine_clock; |
2776 | mclk = smu7_ps->performance_levels[0].memory_clock; | 2800 | mclk = smu7_ps->performance_levels[0].memory_clock; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ad30f5d3a10d..2614af2f553f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
@@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, | |||
4186 | enum pp_clock_type type, uint32_t mask) | 4186 | enum pp_clock_type type, uint32_t mask) |
4187 | { | 4187 | { |
4188 | struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); | 4188 | struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); |
4189 | uint32_t i; | 4189 | int i; |
4190 | 4190 | ||
4191 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) | 4191 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) |
4192 | return -EINVAL; | 4192 | return -EINVAL; |
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 798a3cc480a2..1a3359c0f6cd 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <drm/drmP.h> | 12 | #include <drm/drmP.h> |
13 | #include <drm/drm_atomic.h> | ||
13 | #include <drm/drm_atomic_helper.h> | 14 | #include <drm/drm_atomic_helper.h> |
14 | #include <drm/drm_crtc.h> | 15 | #include <drm/drm_crtc.h> |
15 | #include <drm/drm_crtc_helper.h> | 16 | #include <drm/drm_crtc_helper.h> |
@@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { | |||
226 | static int hdlcd_plane_atomic_check(struct drm_plane *plane, | 227 | static int hdlcd_plane_atomic_check(struct drm_plane *plane, |
227 | struct drm_plane_state *state) | 228 | struct drm_plane_state *state) |
228 | { | 229 | { |
229 | u32 src_w, src_h; | 230 | struct drm_rect clip = { 0 }; |
231 | struct drm_crtc_state *crtc_state; | ||
232 | u32 src_h = state->src_h >> 16; | ||
230 | 233 | ||
231 | src_w = state->src_w >> 16; | 234 | /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */ |
232 | src_h = state->src_h >> 16; | 235 | if (src_h >= HDLCD_MAX_YRES) { |
236 | DRM_DEBUG_KMS("Invalid source width: %d\n", src_h); | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | |||
240 | if (!state->fb || !state->crtc) | ||
241 | return 0; | ||
233 | 242 | ||
234 | /* we can't do any scaling of the plane source */ | 243 | crtc_state = drm_atomic_get_existing_crtc_state(state->state, |
235 | if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) | 244 | state->crtc); |
245 | if (!crtc_state) { | ||
246 | DRM_DEBUG_KMS("Invalid crtc state\n"); | ||
236 | return -EINVAL; | 247 | return -EINVAL; |
248 | } | ||
237 | 249 | ||
238 | return 0; | 250 | clip.x2 = crtc_state->adjusted_mode.hdisplay; |
251 | clip.y2 = crtc_state->adjusted_mode.vdisplay; | ||
252 | |||
253 | return drm_plane_helper_check_state(state, &clip, | ||
254 | DRM_PLANE_HELPER_NO_SCALING, | ||
255 | DRM_PLANE_HELPER_NO_SCALING, | ||
256 | false, true); | ||
239 | } | 257 | } |
240 | 258 | ||
241 | static void hdlcd_plane_atomic_update(struct drm_plane *plane, | 259 | static void hdlcd_plane_atomic_update(struct drm_plane *plane, |
@@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, | |||
244 | struct drm_framebuffer *fb = plane->state->fb; | 262 | struct drm_framebuffer *fb = plane->state->fb; |
245 | struct hdlcd_drm_private *hdlcd; | 263 | struct hdlcd_drm_private *hdlcd; |
246 | struct drm_gem_cma_object *gem; | 264 | struct drm_gem_cma_object *gem; |
247 | u32 src_w, src_h, dest_w, dest_h; | 265 | u32 src_x, src_y, dest_h; |
248 | dma_addr_t scanout_start; | 266 | dma_addr_t scanout_start; |
249 | 267 | ||
250 | if (!fb) | 268 | if (!fb) |
251 | return; | 269 | return; |
252 | 270 | ||
253 | src_w = plane->state->src_w >> 16; | 271 | src_x = plane->state->src.x1 >> 16; |
254 | src_h = plane->state->src_h >> 16; | 272 | src_y = plane->state->src.y1 >> 16; |
255 | dest_w = plane->state->crtc_w; | 273 | dest_h = drm_rect_height(&plane->state->dst); |
256 | dest_h = plane->state->crtc_h; | ||
257 | gem = drm_fb_cma_get_gem_obj(fb, 0); | 274 | gem = drm_fb_cma_get_gem_obj(fb, 0); |
275 | |||
258 | scanout_start = gem->paddr + fb->offsets[0] + | 276 | scanout_start = gem->paddr + fb->offsets[0] + |
259 | plane->state->crtc_y * fb->pitches[0] + | 277 | src_y * fb->pitches[0] + |
260 | plane->state->crtc_x * | 278 | src_x * fb->format->cpp[0]; |
261 | fb->format->cpp[0]; | ||
262 | 279 | ||
263 | hdlcd = plane->dev->dev_private; | 280 | hdlcd = plane->dev->dev_private; |
264 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); | 281 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); |
@@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) | |||
305 | formats, ARRAY_SIZE(formats), | 322 | formats, ARRAY_SIZE(formats), |
306 | DRM_PLANE_TYPE_PRIMARY, NULL); | 323 | DRM_PLANE_TYPE_PRIMARY, NULL); |
307 | if (ret) { | 324 | if (ret) { |
308 | devm_kfree(drm->dev, plane); | ||
309 | return ERR_PTR(ret); | 325 | return ERR_PTR(ret); |
310 | } | 326 | } |
311 | 327 | ||
@@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm) | |||
329 | &hdlcd_crtc_funcs, NULL); | 345 | &hdlcd_crtc_funcs, NULL); |
330 | if (ret) { | 346 | if (ret) { |
331 | hdlcd_plane_destroy(primary); | 347 | hdlcd_plane_destroy(primary); |
332 | devm_kfree(drm->dev, primary); | ||
333 | return ret; | 348 | return ret; |
334 | } | 349 | } |
335 | 350 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 65a3bd7a0c00..423dda2785d4 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c | |||
@@ -152,8 +152,7 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = { | |||
152 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 152 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, | 155 | static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) |
156 | const struct device_node *np) | ||
157 | { | 156 | { |
158 | struct atmel_hlcdc_dc *dc = dev->dev_private; | 157 | struct atmel_hlcdc_dc *dc = dev->dev_private; |
159 | struct atmel_hlcdc_rgb_output *output; | 158 | struct atmel_hlcdc_rgb_output *output; |
@@ -161,6 +160,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, | |||
161 | struct drm_bridge *bridge; | 160 | struct drm_bridge *bridge; |
162 | int ret; | 161 | int ret; |
163 | 162 | ||
163 | ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint, | ||
164 | &panel, &bridge); | ||
165 | if (ret) | ||
166 | return ret; | ||
167 | |||
164 | output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); | 168 | output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); |
165 | if (!output) | 169 | if (!output) |
166 | return -EINVAL; | 170 | return -EINVAL; |
@@ -177,10 +181,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, | |||
177 | 181 | ||
178 | output->encoder.possible_crtcs = 0x1; | 182 | output->encoder.possible_crtcs = 0x1; |
179 | 183 | ||
180 | ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge); | ||
181 | if (ret) | ||
182 | return ret; | ||
183 | |||
184 | if (panel) { | 184 | if (panel) { |
185 | output->connector.dpms = DRM_MODE_DPMS_OFF; | 185 | output->connector.dpms = DRM_MODE_DPMS_OFF; |
186 | output->connector.polled = DRM_CONNECTOR_POLL_CONNECT; | 186 | output->connector.polled = DRM_CONNECTOR_POLL_CONNECT; |
@@ -220,22 +220,14 @@ err_encoder_cleanup: | |||
220 | 220 | ||
221 | int atmel_hlcdc_create_outputs(struct drm_device *dev) | 221 | int atmel_hlcdc_create_outputs(struct drm_device *dev) |
222 | { | 222 | { |
223 | struct device_node *remote; | 223 | int endpoint, ret = 0; |
224 | int ret = -ENODEV; | 224 | |
225 | int endpoint = 0; | 225 | for (endpoint = 0; !ret; endpoint++) |
226 | 226 | ret = atmel_hlcdc_attach_endpoint(dev, endpoint); | |
227 | while (true) { | 227 | |
228 | /* Loop thru possible multiple connections to the output */ | 228 | /* At least one device was successfully attached.*/ |
229 | remote = of_graph_get_remote_node(dev->dev->of_node, 0, | 229 | if (ret == -ENODEV && endpoint) |
230 | endpoint++); | 230 | return 0; |
231 | if (!remote) | ||
232 | break; | ||
233 | |||
234 | ret = atmel_hlcdc_attach_endpoint(dev, remote); | ||
235 | of_node_put(remote); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | } | ||
239 | 231 | ||
240 | return ret; | 232 | return ret; |
241 | } | 233 | } |
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index fedd4d60d9cd..5dc8c4350602 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c | |||
@@ -948,8 +948,6 @@ retry: | |||
948 | } | 948 | } |
949 | 949 | ||
950 | out: | 950 | out: |
951 | if (ret && crtc->funcs->page_flip_target) | ||
952 | drm_crtc_vblank_put(crtc); | ||
953 | if (fb) | 951 | if (fb) |
954 | drm_framebuffer_put(fb); | 952 | drm_framebuffer_put(fb); |
955 | if (crtc->primary->old_fb) | 953 | if (crtc->primary->old_fb) |
@@ -964,5 +962,8 @@ out: | |||
964 | drm_modeset_drop_locks(&ctx); | 962 | drm_modeset_drop_locks(&ctx); |
965 | drm_modeset_acquire_fini(&ctx); | 963 | drm_modeset_acquire_fini(&ctx); |
966 | 964 | ||
965 | if (ret && crtc->funcs->page_flip_target) | ||
966 | drm_crtc_vblank_put(crtc); | ||
967 | |||
967 | return ret; | 968 | return ret; |
968 | } | 969 | } |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index e1909429837e..de80ee1b71df 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
@@ -44,6 +44,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, | |||
44 | 44 | ||
45 | /* initially, until copy_from_user() and bo lookup succeeds: */ | 45 | /* initially, until copy_from_user() and bo lookup succeeds: */ |
46 | submit->nr_bos = 0; | 46 | submit->nr_bos = 0; |
47 | submit->fence = NULL; | ||
47 | 48 | ||
48 | ww_acquire_init(&submit->ticket, &reservation_ww_class); | 49 | ww_acquire_init(&submit->ticket, &reservation_ww_class); |
49 | } | 50 | } |
@@ -294,7 +295,8 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit) | |||
294 | } | 295 | } |
295 | 296 | ||
296 | ww_acquire_fini(&submit->ticket); | 297 | ww_acquire_fini(&submit->ticket); |
297 | dma_fence_put(submit->fence); | 298 | if (submit->fence) |
299 | dma_fence_put(submit->fence); | ||
298 | kfree(submit); | 300 | kfree(submit); |
299 | } | 301 | } |
300 | 302 | ||
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 0066fe7e622e..be3eefec5152 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c | |||
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev, | |||
759 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { | 759 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { |
760 | mode_dev->panel_fixed_mode = | 760 | mode_dev->panel_fixed_mode = |
761 | drm_mode_duplicate(dev, scan); | 761 | drm_mode_duplicate(dev, scan); |
762 | DRM_DEBUG_KMS("Using mode from DDC\n"); | ||
762 | goto out; /* FIXME: check for quirks */ | 763 | goto out; /* FIXME: check for quirks */ |
763 | } | 764 | } |
764 | } | 765 | } |
765 | 766 | ||
766 | /* Failed to get EDID, what about VBT? do we need this? */ | 767 | /* Failed to get EDID, what about VBT? do we need this? */ |
767 | if (mode_dev->vbt_mode) | 768 | if (dev_priv->lfp_lvds_vbt_mode) { |
768 | mode_dev->panel_fixed_mode = | 769 | mode_dev->panel_fixed_mode = |
769 | drm_mode_duplicate(dev, mode_dev->vbt_mode); | 770 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
770 | 771 | ||
771 | if (!mode_dev->panel_fixed_mode) | 772 | if (mode_dev->panel_fixed_mode) { |
772 | if (dev_priv->lfp_lvds_vbt_mode) | 773 | mode_dev->panel_fixed_mode->type |= |
773 | mode_dev->panel_fixed_mode = | 774 | DRM_MODE_TYPE_PREFERRED; |
774 | drm_mode_duplicate(dev, | 775 | DRM_DEBUG_KMS("Using mode from VBT\n"); |
775 | dev_priv->lfp_lvds_vbt_mode); | 776 | goto out; |
777 | } | ||
778 | } | ||
776 | 779 | ||
777 | /* | 780 | /* |
778 | * If we didn't get EDID, try checking if the panel is already turned | 781 | * If we didn't get EDID, try checking if the panel is already turned |
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev, | |||
789 | if (mode_dev->panel_fixed_mode) { | 792 | if (mode_dev->panel_fixed_mode) { |
790 | mode_dev->panel_fixed_mode->type |= | 793 | mode_dev->panel_fixed_mode->type |= |
791 | DRM_MODE_TYPE_PREFERRED; | 794 | DRM_MODE_TYPE_PREFERRED; |
795 | DRM_DEBUG_KMS("Using pre-programmed mode\n"); | ||
792 | goto out; /* FIXME: check for quirks */ | 796 | goto out; /* FIXME: check for quirks */ |
793 | } | 797 | } |
794 | } | 798 | } |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0ad1a508e2af..c995e540ff96 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1244 | mode = vgpu_vreg(vgpu, offset); | 1244 | mode = vgpu_vreg(vgpu, offset); |
1245 | 1245 | ||
1246 | if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { | 1246 | if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { |
1247 | WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n", | 1247 | WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n", |
1248 | vgpu->id); | 1248 | vgpu->id); |
1249 | return 0; | 1249 | return 0; |
1250 | } | 1250 | } |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index c6e7972ac21d..a5e11d89df2f 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
@@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id) | |||
340 | } else | 340 | } else |
341 | v = mmio->value; | 341 | v = mmio->value; |
342 | 342 | ||
343 | if (mmio->in_context) | ||
344 | continue; | ||
345 | |||
343 | I915_WRITE(mmio->reg, v); | 346 | I915_WRITE(mmio->reg, v); |
344 | POSTING_READ(mmio->reg); | 347 | POSTING_READ(mmio->reg); |
345 | 348 | ||
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 79ba4b3440aa..f25ff133865f 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) | |||
129 | struct vgpu_sched_data *vgpu_data; | 129 | struct vgpu_sched_data *vgpu_data; |
130 | ktime_t cur_time; | 130 | ktime_t cur_time; |
131 | 131 | ||
132 | /* no target to schedule */ | 132 | /* no need to schedule if next_vgpu is the same with current_vgpu, |
133 | if (!scheduler->next_vgpu) | 133 | * let scheduler chose next_vgpu again by setting it to NULL. |
134 | */ | ||
135 | if (scheduler->next_vgpu == scheduler->current_vgpu) { | ||
136 | scheduler->next_vgpu = NULL; | ||
134 | return; | 137 | return; |
138 | } | ||
135 | 139 | ||
136 | /* | 140 | /* |
137 | * after the flag is set, workload dispatch thread will | 141 | * after the flag is set, workload dispatch thread will |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2aa6b97fd22f..a0563e18d753 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -195,9 +195,12 @@ static int ppgtt_bind_vma(struct i915_vma *vma, | |||
195 | u32 pte_flags; | 195 | u32 pte_flags; |
196 | int ret; | 196 | int ret; |
197 | 197 | ||
198 | ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size); | 198 | if (!(vma->flags & I915_VMA_LOCAL_BIND)) { |
199 | if (ret) | 199 | ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, |
200 | return ret; | 200 | vma->size); |
201 | if (ret) | ||
202 | return ret; | ||
203 | } | ||
201 | 204 | ||
202 | vma->pages = vma->obj->mm.pages; | 205 | vma->pages = vma->obj->mm.pages; |
203 | 206 | ||
@@ -2306,7 +2309,8 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, | |||
2306 | if (flags & I915_VMA_LOCAL_BIND) { | 2309 | if (flags & I915_VMA_LOCAL_BIND) { |
2307 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; | 2310 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; |
2308 | 2311 | ||
2309 | if (appgtt->base.allocate_va_range) { | 2312 | if (!(vma->flags & I915_VMA_LOCAL_BIND) && |
2313 | appgtt->base.allocate_va_range) { | ||
2310 | ret = appgtt->base.allocate_va_range(&appgtt->base, | 2314 | ret = appgtt->base.allocate_va_range(&appgtt->base, |
2311 | vma->node.start, | 2315 | vma->node.start, |
2312 | vma->node.size); | 2316 | vma->node.size); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 11b12f412492..5a7c63e64381 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -3051,10 +3051,14 @@ enum skl_disp_power_wells { | |||
3051 | #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ | 3051 | #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ |
3052 | #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ | 3052 | #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ |
3053 | #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ | 3053 | #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ |
3054 | #define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */ | ||
3054 | #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ | 3055 | #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ |
3055 | /* Note, below two are guess */ | 3056 | /* |
3056 | #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ | 3057 | * Note that on at least on ELK the below value is reported for both |
3057 | #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ | 3058 | * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet |
3059 | * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz. | ||
3060 | */ | ||
3061 | #define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */ | ||
3058 | #define CLKCFG_FSB_MASK (7 << 0) | 3062 | #define CLKCFG_FSB_MASK (7 << 0) |
3059 | #define CLKCFG_MEM_533 (1 << 4) | 3063 | #define CLKCFG_MEM_533 (1 << 4) |
3060 | #define CLKCFG_MEM_667 (2 << 4) | 3064 | #define CLKCFG_MEM_667 (2 << 4) |
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index dd3ad52b7dfe..f29a226e24d8 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c | |||
@@ -1798,13 +1798,11 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) | |||
1798 | case CLKCFG_FSB_800: | 1798 | case CLKCFG_FSB_800: |
1799 | return 200000; | 1799 | return 200000; |
1800 | case CLKCFG_FSB_1067: | 1800 | case CLKCFG_FSB_1067: |
1801 | case CLKCFG_FSB_1067_ALT: | ||
1801 | return 266667; | 1802 | return 266667; |
1802 | case CLKCFG_FSB_1333: | 1803 | case CLKCFG_FSB_1333: |
1804 | case CLKCFG_FSB_1333_ALT: | ||
1803 | return 333333; | 1805 | return 333333; |
1804 | /* these two are just a guess; one of them might be right */ | ||
1805 | case CLKCFG_FSB_1600: | ||
1806 | case CLKCFG_FSB_1600_ALT: | ||
1807 | return 400000; | ||
1808 | default: | 1806 | default: |
1809 | return 133333; | 1807 | return 133333; |
1810 | } | 1808 | } |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 3ffe8b1f1d48..fc0ef492252a 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -410,11 +410,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) | |||
410 | val |= (ULPS_STATE_ENTER | DEVICE_READY); | 410 | val |= (ULPS_STATE_ENTER | DEVICE_READY); |
411 | I915_WRITE(MIPI_DEVICE_READY(port), val); | 411 | I915_WRITE(MIPI_DEVICE_READY(port), val); |
412 | 412 | ||
413 | /* Wait for ULPS Not active */ | 413 | /* Wait for ULPS active */ |
414 | if (intel_wait_for_register(dev_priv, | 414 | if (intel_wait_for_register(dev_priv, |
415 | MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, | 415 | MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20)) |
416 | GLK_ULPS_NOT_ACTIVE, 20)) | 416 | DRM_ERROR("ULPS not active\n"); |
417 | DRM_ERROR("ULPS is still active\n"); | ||
418 | 417 | ||
419 | /* Exit ULPS */ | 418 | /* Exit ULPS */ |
420 | val = I915_READ(MIPI_DEVICE_READY(port)); | 419 | val = I915_READ(MIPI_DEVICE_READY(port)); |
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 25d8e76489e4..668f00480d97 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/acpi.h> | 63 | #include <linux/acpi.h> |
64 | #include <linux/device.h> | 64 | #include <linux/device.h> |
65 | #include <linux/pci.h> | 65 | #include <linux/pci.h> |
66 | #include <linux/pm_runtime.h> | ||
66 | 67 | ||
67 | #include "i915_drv.h" | 68 | #include "i915_drv.h" |
68 | #include <linux/delay.h> | 69 | #include <linux/delay.h> |
@@ -121,6 +122,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) | |||
121 | 122 | ||
122 | kfree(rsc); | 123 | kfree(rsc); |
123 | 124 | ||
125 | pm_runtime_forbid(&platdev->dev); | ||
126 | pm_runtime_set_active(&platdev->dev); | ||
127 | pm_runtime_enable(&platdev->dev); | ||
128 | |||
124 | return platdev; | 129 | return platdev; |
125 | 130 | ||
126 | err: | 131 | err: |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 21b10f9840c9..549763f5e17d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -360,6 +360,8 @@ nouveau_display_hpd_work(struct work_struct *work) | |||
360 | pm_runtime_get_sync(drm->dev->dev); | 360 | pm_runtime_get_sync(drm->dev->dev); |
361 | 361 | ||
362 | drm_helper_hpd_irq_event(drm->dev); | 362 | drm_helper_hpd_irq_event(drm->dev); |
363 | /* enable polling for external displays */ | ||
364 | drm_kms_helper_poll_enable(drm->dev); | ||
363 | 365 | ||
364 | pm_runtime_mark_last_busy(drm->dev->dev); | 366 | pm_runtime_mark_last_busy(drm->dev->dev); |
365 | pm_runtime_put_sync(drm->dev->dev); | 367 | pm_runtime_put_sync(drm->dev->dev); |
@@ -413,10 +415,6 @@ nouveau_display_init(struct drm_device *dev) | |||
413 | if (ret) | 415 | if (ret) |
414 | return ret; | 416 | return ret; |
415 | 417 | ||
416 | /* enable polling for external displays */ | ||
417 | if (!dev->mode_config.poll_enabled) | ||
418 | drm_kms_helper_poll_enable(dev); | ||
419 | |||
420 | /* enable hotplug interrupts */ | 418 | /* enable hotplug interrupts */ |
421 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 419 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
422 | struct nouveau_connector *conn = nouveau_connector(connector); | 420 | struct nouveau_connector *conn = nouveau_connector(connector); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2b6ac24ce690..36268e1802b5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -502,6 +502,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
502 | pm_runtime_allow(dev->dev); | 502 | pm_runtime_allow(dev->dev); |
503 | pm_runtime_mark_last_busy(dev->dev); | 503 | pm_runtime_mark_last_busy(dev->dev); |
504 | pm_runtime_put(dev->dev); | 504 | pm_runtime_put(dev->dev); |
505 | } else { | ||
506 | /* enable polling for external displays */ | ||
507 | drm_kms_helper_poll_enable(dev); | ||
505 | } | 508 | } |
506 | return 0; | 509 | return 0; |
507 | 510 | ||
@@ -774,9 +777,6 @@ nouveau_pmops_runtime_resume(struct device *dev) | |||
774 | 777 | ||
775 | ret = nouveau_do_resume(drm_dev, true); | 778 | ret = nouveau_do_resume(drm_dev, true); |
776 | 779 | ||
777 | if (!drm_dev->mode_config.poll_enabled) | ||
778 | drm_kms_helper_poll_enable(drm_dev); | ||
779 | |||
780 | /* do magic */ | 780 | /* do magic */ |
781 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); | 781 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); |
782 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); | 782 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 3a24788c3185..a7e55c422501 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | |||
@@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) | |||
148 | case NVKM_MEM_TARGET_NCOH: target = 3; break; | 148 | case NVKM_MEM_TARGET_NCOH: target = 3; break; |
149 | default: | 149 | default: |
150 | WARN_ON(1); | 150 | WARN_ON(1); |
151 | return; | 151 | goto unlock; |
152 | } | 152 | } |
153 | 153 | ||
154 | nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | | 154 | nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | |
@@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) | |||
160 | & 0x00100000), | 160 | & 0x00100000), |
161 | msecs_to_jiffies(2000)) == 0) | 161 | msecs_to_jiffies(2000)) == 0) |
162 | nvkm_error(subdev, "runlist %d update timeout\n", runl); | 162 | nvkm_error(subdev, "runlist %d update timeout\n", runl); |
163 | unlock: | ||
163 | mutex_unlock(&subdev->mutex); | 164 | mutex_unlock(&subdev->mutex); |
164 | } | 165 | } |
165 | 166 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c index d1cf02d22db1..1b0c793c0192 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c | |||
@@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img, | |||
116 | ret = nvkm_firmware_get(subdev->device, f, &sig); | 116 | ret = nvkm_firmware_get(subdev->device, f, &sig); |
117 | if (ret) | 117 | if (ret) |
118 | goto free_data; | 118 | goto free_data; |
119 | |||
119 | img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); | 120 | img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); |
120 | if (!img->sig) { | 121 | if (!img->sig) { |
121 | ret = -ENOMEM; | 122 | ret = -ENOMEM; |
@@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img, | |||
126 | img->ucode_data = ls_ucode_img_build(bl, code, data, | 127 | img->ucode_data = ls_ucode_img_build(bl, code, data, |
127 | &img->ucode_desc); | 128 | &img->ucode_desc); |
128 | if (IS_ERR(img->ucode_data)) { | 129 | if (IS_ERR(img->ucode_data)) { |
130 | kfree(img->sig); | ||
129 | ret = PTR_ERR(img->ucode_data); | 131 | ret = PTR_ERR(img->ucode_data); |
130 | goto free_data; | 132 | goto free_sig; |
131 | } | 133 | } |
132 | img->ucode_size = img->ucode_desc.image_size; | 134 | img->ucode_size = img->ucode_desc.image_size; |
133 | 135 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 058340a002c2..4a340efd8ba6 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
575 | if (ret) | 575 | if (ret) |
576 | return; | 576 | return; |
577 | 577 | ||
578 | cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); | ||
579 | |||
580 | if (fb != old_state->fb) { | 578 | if (fb != old_state->fb) { |
581 | obj = to_qxl_framebuffer(fb)->obj; | 579 | obj = to_qxl_framebuffer(fb)->obj; |
582 | user_bo = gem_to_qxl_bo(obj); | 580 | user_bo = gem_to_qxl_bo(obj); |
@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
614 | qxl_bo_kunmap(cursor_bo); | 612 | qxl_bo_kunmap(cursor_bo); |
615 | qxl_bo_kunmap(user_bo); | 613 | qxl_bo_kunmap(user_bo); |
616 | 614 | ||
615 | cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); | ||
617 | cmd->u.set.visible = 1; | 616 | cmd->u.set.visible = 1; |
618 | cmd->u.set.shape = qxl_bo_physical_address(qdev, | 617 | cmd->u.set.shape = qxl_bo_physical_address(qdev, |
619 | cursor_bo, 0); | 618 | cursor_bo, 0); |
@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
624 | if (ret) | 623 | if (ret) |
625 | goto out_free_release; | 624 | goto out_free_release; |
626 | 625 | ||
626 | cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); | ||
627 | cmd->type = QXL_CURSOR_MOVE; | 627 | cmd->type = QXL_CURSOR_MOVE; |
628 | } | 628 | } |
629 | 629 | ||
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 7ba450832e6b..ea36dc4dd5d2 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) | |||
776 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); | 776 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); |
777 | u32 switch_limit = pi->mem_gddr5 ? 450 : 300; | 777 | u32 switch_limit = pi->mem_gddr5 ? 450 : 300; |
778 | 778 | ||
779 | /* disable mclk switching if the refresh is >120Hz, even if the | ||
780 | * blanking period would allow it | ||
781 | */ | ||
782 | if (r600_dpm_get_vrefresh(rdev) > 120) | ||
783 | return true; | ||
784 | |||
779 | if (vblank_time < switch_limit) | 785 | if (vblank_time < switch_limit) |
780 | return true; | 786 | return true; |
781 | else | 787 | else |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ccebe0f8d2e1..008c145b7f29 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7401 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 7401 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
7402 | } | 7402 | } |
7403 | if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { | 7403 | if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
7404 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 7404 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
7405 | tmp |= DC_HPDx_INT_ACK; | 7405 | tmp |= DC_HPDx_INT_ACK; |
7406 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 7406 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
7407 | } | 7407 | } |
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7431 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 7431 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
7432 | } | 7432 | } |
7433 | if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { | 7433 | if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
7434 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 7434 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
7435 | tmp |= DC_HPDx_RX_INT_ACK; | 7435 | tmp |= DC_HPDx_RX_INT_ACK; |
7436 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 7436 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
7437 | } | 7437 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f130ec41ee4b..0bf103536404 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) | |||
4927 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 4927 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
4928 | } | 4928 | } |
4929 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { | 4929 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
4930 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 4930 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
4931 | tmp |= DC_HPDx_INT_ACK; | 4931 | tmp |= DC_HPDx_INT_ACK; |
4932 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 4932 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
4933 | } | 4933 | } |
@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) | |||
4958 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 4958 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
4959 | } | 4959 | } |
4960 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { | 4960 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
4961 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 4961 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
4962 | tmp |= DC_HPDx_RX_INT_ACK; | 4962 | tmp |= DC_HPDx_RX_INT_ACK; |
4963 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 4963 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
4964 | } | 4964 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0a085176e79b..e06e2d8feab3 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev) | |||
3988 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 3988 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
3989 | } | 3989 | } |
3990 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { | 3990 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { |
3991 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 3991 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
3992 | tmp |= DC_HPDx_INT_ACK; | 3992 | tmp |= DC_HPDx_INT_ACK; |
3993 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 3993 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
3994 | } | 3994 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index e3e7cb1d10a2..4761f27f2ca2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
116 | if ((radeon_runtime_pm != 0) && | 116 | if ((radeon_runtime_pm != 0) && |
117 | radeon_has_atpx() && | 117 | radeon_has_atpx() && |
118 | ((flags & RADEON_IS_IGP) == 0) && | 118 | ((flags & RADEON_IS_IGP) == 0) && |
119 | !pci_is_thunderbolt_attached(rdev->pdev)) | 119 | !pci_is_thunderbolt_attached(dev->pdev)) |
120 | flags |= RADEON_IS_PX; | 120 | flags |= RADEON_IS_PX; |
121 | 121 | ||
122 | /* radeon_device_init should report only fatal error | 122 | /* radeon_device_init should report only fatal error |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ceee87f029d9..76d1888528e6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) | |||
6317 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 6317 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
6318 | } | 6318 | } |
6319 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { | 6319 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
6320 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 6320 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
6321 | tmp |= DC_HPDx_INT_ACK; | 6321 | tmp |= DC_HPDx_INT_ACK; |
6322 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 6322 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
6323 | } | 6323 | } |
@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) | |||
6348 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 6348 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
6349 | } | 6349 | } |
6350 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { | 6350 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
6351 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 6351 | tmp = RREG32(DC_HPD6_INT_CONTROL); |
6352 | tmp |= DC_HPDx_RX_INT_ACK; | 6352 | tmp |= DC_HPDx_RX_INT_ACK; |
6353 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 6353 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
6354 | } | 6354 | } |
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig index b2fd029d67b3..91916326957f 100644 --- a/drivers/gpu/host1x/Kconfig +++ b/drivers/gpu/host1x/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config TEGRA_HOST1X | 1 | config TEGRA_HOST1X |
2 | tristate "NVIDIA Tegra host1x driver" | 2 | tristate "NVIDIA Tegra host1x driver" |
3 | depends on ARCH_TEGRA || (ARM && COMPILE_TEST) | 3 | depends on ARCH_TEGRA || (ARM && COMPILE_TEST) |
4 | select IOMMU_IOVA if IOMMU_SUPPORT | ||
4 | help | 5 | help |
5 | Driver for the NVIDIA Tegra host1x hardware. | 6 | Driver for the NVIDIA Tegra host1x hardware. |
6 | 7 | ||
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 3ac4c03ba77b..c13a4fd86b3c 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -605,6 +605,13 @@ static int coretemp_cpu_online(unsigned int cpu) | |||
605 | struct platform_data *pdata; | 605 | struct platform_data *pdata; |
606 | 606 | ||
607 | /* | 607 | /* |
608 | * Don't execute this on resume as the offline callback did | ||
609 | * not get executed on suspend. | ||
610 | */ | ||
611 | if (cpuhp_tasks_frozen) | ||
612 | return 0; | ||
613 | |||
614 | /* | ||
608 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal | 615 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal |
609 | * sensors. We check this bit only, all the early CPUs | 616 | * sensors. We check this bit only, all the early CPUs |
610 | * without thermal sensors will be filtered out. | 617 | * without thermal sensors will be filtered out. |
@@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu) | |||
654 | struct temp_data *tdata; | 661 | struct temp_data *tdata; |
655 | int indx, target; | 662 | int indx, target; |
656 | 663 | ||
664 | /* | ||
665 | * Don't execute this on suspend as the device remove locks | ||
666 | * up the machine. | ||
667 | */ | ||
668 | if (cpuhp_tasks_frozen) | ||
669 | return 0; | ||
670 | |||
657 | /* If the physical CPU device does not exist, just return */ | 671 | /* If the physical CPU device does not exist, just return */ |
658 | if (!pdev) | 672 | if (!pdev) |
659 | return 0; | 673 | return 0; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index f2acd4b6bf01..d1263b82d646 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -94,6 +94,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], | |||
94 | static int dw_i2c_acpi_configure(struct platform_device *pdev) | 94 | static int dw_i2c_acpi_configure(struct platform_device *pdev) |
95 | { | 95 | { |
96 | struct dw_i2c_dev *dev = platform_get_drvdata(pdev); | 96 | struct dw_i2c_dev *dev = platform_get_drvdata(pdev); |
97 | u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; | ||
97 | acpi_handle handle = ACPI_HANDLE(&pdev->dev); | 98 | acpi_handle handle = ACPI_HANDLE(&pdev->dev); |
98 | const struct acpi_device_id *id; | 99 | const struct acpi_device_id *id; |
99 | struct acpi_device *adev; | 100 | struct acpi_device *adev; |
@@ -107,23 +108,24 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) | |||
107 | * Try to get SDA hold time and *CNT values from an ACPI method for | 108 | * Try to get SDA hold time and *CNT values from an ACPI method for |
108 | * selected speed modes. | 109 | * selected speed modes. |
109 | */ | 110 | */ |
111 | dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); | ||
112 | dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); | ||
113 | dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); | ||
114 | dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); | ||
115 | |||
110 | switch (dev->clk_freq) { | 116 | switch (dev->clk_freq) { |
111 | case 100000: | 117 | case 100000: |
112 | dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, | 118 | dev->sda_hold_time = ss_ht; |
113 | &dev->sda_hold_time); | ||
114 | break; | 119 | break; |
115 | case 1000000: | 120 | case 1000000: |
116 | dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, | 121 | dev->sda_hold_time = fp_ht; |
117 | &dev->sda_hold_time); | ||
118 | break; | 122 | break; |
119 | case 3400000: | 123 | case 3400000: |
120 | dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, | 124 | dev->sda_hold_time = hs_ht; |
121 | &dev->sda_hold_time); | ||
122 | break; | 125 | break; |
123 | case 400000: | 126 | case 400000: |
124 | default: | 127 | default: |
125 | dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, | 128 | dev->sda_hold_time = fs_ht; |
126 | &dev->sda_hold_time); | ||
127 | break; | 129 | break; |
128 | } | 130 | } |
129 | 131 | ||
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index cf737ec8563b..5c4db65c5019 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -819,7 +819,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, | |||
819 | rc = -EINVAL; | 819 | rc = -EINVAL; |
820 | goto out; | 820 | goto out; |
821 | } | 821 | } |
822 | drv_data->irq = irq_of_parse_and_map(np, 0); | ||
823 | 822 | ||
824 | drv_data->rstc = devm_reset_control_get_optional(dev, NULL); | 823 | drv_data->rstc = devm_reset_control_get_optional(dev, NULL); |
825 | if (IS_ERR(drv_data->rstc)) { | 824 | if (IS_ERR(drv_data->rstc)) { |
@@ -902,10 +901,11 @@ mv64xxx_i2c_probe(struct platform_device *pd) | |||
902 | if (!IS_ERR(drv_data->clk)) | 901 | if (!IS_ERR(drv_data->clk)) |
903 | clk_prepare_enable(drv_data->clk); | 902 | clk_prepare_enable(drv_data->clk); |
904 | 903 | ||
904 | drv_data->irq = platform_get_irq(pd, 0); | ||
905 | |||
905 | if (pdata) { | 906 | if (pdata) { |
906 | drv_data->freq_m = pdata->freq_m; | 907 | drv_data->freq_m = pdata->freq_m; |
907 | drv_data->freq_n = pdata->freq_n; | 908 | drv_data->freq_n = pdata->freq_n; |
908 | drv_data->irq = platform_get_irq(pd, 0); | ||
909 | drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); | 909 | drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); |
910 | drv_data->offload_enabled = false; | 910 | drv_data->offload_enabled = false; |
911 | memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); | 911 | memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); |
@@ -915,7 +915,7 @@ mv64xxx_i2c_probe(struct platform_device *pd) | |||
915 | goto exit_clk; | 915 | goto exit_clk; |
916 | } | 916 | } |
917 | if (drv_data->irq < 0) { | 917 | if (drv_data->irq < 0) { |
918 | rc = -ENXIO; | 918 | rc = drv_data->irq; |
919 | goto exit_reset; | 919 | goto exit_reset; |
920 | } | 920 | } |
921 | 921 | ||
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 0ed77eeff31e..a2e3dd715380 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c | |||
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd, | |||
178 | int value, int index, void *data, int len) | 178 | int value, int index, void *data, int len) |
179 | { | 179 | { |
180 | struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; | 180 | struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; |
181 | void *dmadata = kmalloc(len, GFP_KERNEL); | ||
182 | int ret; | ||
183 | |||
184 | if (!dmadata) | ||
185 | return -ENOMEM; | ||
181 | 186 | ||
182 | /* do control transfer */ | 187 | /* do control transfer */ |
183 | return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), | 188 | ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), |
184 | cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | | 189 | cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | |
185 | USB_DIR_IN, value, index, data, len, 2000); | 190 | USB_DIR_IN, value, index, dmadata, len, 2000); |
191 | |||
192 | memcpy(data, dmadata, len); | ||
193 | kfree(dmadata); | ||
194 | return ret; | ||
186 | } | 195 | } |
187 | 196 | ||
188 | static int usb_write(struct i2c_adapter *adapter, int cmd, | 197 | static int usb_write(struct i2c_adapter *adapter, int cmd, |
189 | int value, int index, void *data, int len) | 198 | int value, int index, void *data, int len) |
190 | { | 199 | { |
191 | struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; | 200 | struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; |
201 | void *dmadata = kmemdup(data, len, GFP_KERNEL); | ||
202 | int ret; | ||
203 | |||
204 | if (!dmadata) | ||
205 | return -ENOMEM; | ||
192 | 206 | ||
193 | /* do control transfer */ | 207 | /* do control transfer */ |
194 | return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), | 208 | ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), |
195 | cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, | 209 | cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, |
196 | value, index, data, len, 2000); | 210 | value, index, dmadata, len, 2000); |
211 | |||
212 | kfree(dmadata); | ||
213 | return ret; | ||
197 | } | 214 | } |
198 | 215 | ||
199 | static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) | 216 | static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) |
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index dbe7e44c9321..6ba6c83ca8f1 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c | |||
@@ -416,6 +416,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev) | |||
416 | adapter->class = I2C_CLASS_HWMON; | 416 | adapter->class = I2C_CLASS_HWMON; |
417 | adapter->dev.parent = &pdev->dev; | 417 | adapter->dev.parent = &pdev->dev; |
418 | adapter->dev.of_node = pdev->dev.of_node; | 418 | adapter->dev.of_node = pdev->dev.of_node; |
419 | ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); | ||
419 | i2c_set_adapdata(adapter, ctx); | 420 | i2c_set_adapdata(adapter, ctx); |
420 | rc = i2c_add_adapter(adapter); | 421 | rc = i2c_add_adapter(adapter); |
421 | if (rc) { | 422 | if (rc) { |
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 26f7237558ba..9669ca4937b8 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c | |||
@@ -395,18 +395,20 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc, | |||
395 | if (force_nr) { | 395 | if (force_nr) { |
396 | priv->adap.nr = force_nr; | 396 | priv->adap.nr = force_nr; |
397 | ret = i2c_add_numbered_adapter(&priv->adap); | 397 | ret = i2c_add_numbered_adapter(&priv->adap); |
398 | dev_err(&parent->dev, | 398 | if (ret < 0) { |
399 | "failed to add mux-adapter %u as bus %u (error=%d)\n", | 399 | dev_err(&parent->dev, |
400 | chan_id, force_nr, ret); | 400 | "failed to add mux-adapter %u as bus %u (error=%d)\n", |
401 | chan_id, force_nr, ret); | ||
402 | goto err_free_priv; | ||
403 | } | ||
401 | } else { | 404 | } else { |
402 | ret = i2c_add_adapter(&priv->adap); | 405 | ret = i2c_add_adapter(&priv->adap); |
403 | dev_err(&parent->dev, | 406 | if (ret < 0) { |
404 | "failed to add mux-adapter %u (error=%d)\n", | 407 | dev_err(&parent->dev, |
405 | chan_id, ret); | 408 | "failed to add mux-adapter %u (error=%d)\n", |
406 | } | 409 | chan_id, ret); |
407 | if (ret < 0) { | 410 | goto err_free_priv; |
408 | kfree(priv); | 411 | } |
409 | return ret; | ||
410 | } | 412 | } |
411 | 413 | ||
412 | WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, | 414 | WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, |
@@ -422,6 +424,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc, | |||
422 | 424 | ||
423 | muxc->adapter[muxc->num_adapters++] = &priv->adap; | 425 | muxc->adapter[muxc->num_adapters++] = &priv->adap; |
424 | return 0; | 426 | return 0; |
427 | |||
428 | err_free_priv: | ||
429 | kfree(priv); | ||
430 | return ret; | ||
425 | } | 431 | } |
426 | EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); | 432 | EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); |
427 | 433 | ||
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 406d5059072c..d97031804de8 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c | |||
@@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) | |||
196 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 196 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
197 | mux->data.reg_size = resource_size(res); | 197 | mux->data.reg_size = resource_size(res); |
198 | mux->data.reg = devm_ioremap_resource(&pdev->dev, res); | 198 | mux->data.reg = devm_ioremap_resource(&pdev->dev, res); |
199 | if (IS_ERR(mux->data.reg)) | 199 | if (IS_ERR(mux->data.reg)) { |
200 | return PTR_ERR(mux->data.reg); | 200 | ret = PTR_ERR(mux->data.reg); |
201 | goto err_put_parent; | ||
202 | } | ||
201 | } | 203 | } |
202 | 204 | ||
203 | if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && | 205 | if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && |
204 | mux->data.reg_size != 1) { | 206 | mux->data.reg_size != 1) { |
205 | dev_err(&pdev->dev, "Invalid register size\n"); | 207 | dev_err(&pdev->dev, "Invalid register size\n"); |
206 | return -EINVAL; | 208 | ret = -EINVAL; |
209 | goto err_put_parent; | ||
207 | } | 210 | } |
208 | 211 | ||
209 | muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, | 212 | muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, |
210 | i2c_mux_reg_select, NULL); | 213 | i2c_mux_reg_select, NULL); |
211 | if (!muxc) | 214 | if (!muxc) { |
212 | return -ENOMEM; | 215 | ret = -ENOMEM; |
216 | goto err_put_parent; | ||
217 | } | ||
213 | muxc->priv = mux; | 218 | muxc->priv = mux; |
214 | 219 | ||
215 | platform_set_drvdata(pdev, muxc); | 220 | platform_set_drvdata(pdev, muxc); |
@@ -223,7 +228,7 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) | |||
223 | 228 | ||
224 | ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); | 229 | ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); |
225 | if (ret) | 230 | if (ret) |
226 | goto add_adapter_failed; | 231 | goto err_del_mux_adapters; |
227 | } | 232 | } |
228 | 233 | ||
229 | dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", | 234 | dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", |
@@ -231,8 +236,10 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) | |||
231 | 236 | ||
232 | return 0; | 237 | return 0; |
233 | 238 | ||
234 | add_adapter_failed: | 239 | err_del_mux_adapters: |
235 | i2c_mux_del_adapters(muxc); | 240 | i2c_mux_del_adapters(muxc); |
241 | err_put_parent: | ||
242 | i2c_put_adapter(parent); | ||
236 | 243 | ||
237 | return ret; | 244 | return ret; |
238 | } | 245 | } |
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index a679e56c44cd..f431da07f861 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c | |||
@@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, | |||
554 | struct completion *completion) | 554 | struct completion *completion) |
555 | { | 555 | { |
556 | struct device *dev = &client->dev; | 556 | struct device *dev = &client->dev; |
557 | long ret; | ||
558 | int error; | 557 | int error; |
559 | int len; | 558 | int len; |
560 | u8 buffer[ETP_I2C_INF_LENGTH]; | 559 | u8 buffer[ETP_I2C_REPORT_LEN]; |
560 | |||
561 | len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); | ||
562 | if (len != ETP_I2C_REPORT_LEN) { | ||
563 | error = len < 0 ? len : -EIO; | ||
564 | dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", | ||
565 | error, len); | ||
566 | } | ||
561 | 567 | ||
562 | reinit_completion(completion); | 568 | reinit_completion(completion); |
563 | enable_irq(client->irq); | 569 | enable_irq(client->irq); |
564 | 570 | ||
565 | error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); | 571 | error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); |
566 | if (!error) | ||
567 | ret = wait_for_completion_interruptible_timeout(completion, | ||
568 | msecs_to_jiffies(300)); | ||
569 | disable_irq(client->irq); | ||
570 | |||
571 | if (error) { | 572 | if (error) { |
572 | dev_err(dev, "device reset failed: %d\n", error); | 573 | dev_err(dev, "device reset failed: %d\n", error); |
573 | return error; | 574 | } else if (!wait_for_completion_timeout(completion, |
574 | } else if (ret == 0) { | 575 | msecs_to_jiffies(300))) { |
575 | dev_err(dev, "timeout waiting for device reset\n"); | 576 | dev_err(dev, "timeout waiting for device reset\n"); |
576 | return -ETIMEDOUT; | 577 | error = -ETIMEDOUT; |
577 | } else if (ret < 0) { | ||
578 | error = ret; | ||
579 | dev_err(dev, "error waiting for device reset: %d\n", error); | ||
580 | return error; | ||
581 | } | 578 | } |
582 | 579 | ||
580 | disable_irq(client->irq); | ||
581 | |||
582 | if (error) | ||
583 | return error; | ||
584 | |||
583 | len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); | 585 | len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); |
584 | if (len != ETP_I2C_INF_LENGTH) { | 586 | if (len != ETP_I2C_INF_LENGTH) { |
585 | error = len < 0 ? len : -EIO; | 587 | error = len < 0 ? len : -EIO; |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 2302aef2b2d4..dd042a9b0aaa 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
@@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type) | |||
350 | case MXT_TOUCH_KEYARRAY_T15: | 350 | case MXT_TOUCH_KEYARRAY_T15: |
351 | case MXT_TOUCH_PROXIMITY_T23: | 351 | case MXT_TOUCH_PROXIMITY_T23: |
352 | case MXT_TOUCH_PROXKEY_T52: | 352 | case MXT_TOUCH_PROXKEY_T52: |
353 | case MXT_TOUCH_MULTITOUCHSCREEN_T100: | ||
353 | case MXT_PROCI_GRIPFACE_T20: | 354 | case MXT_PROCI_GRIPFACE_T20: |
354 | case MXT_PROCG_NOISE_T22: | 355 | case MXT_PROCG_NOISE_T22: |
355 | case MXT_PROCI_ONETOUCH_T24: | 356 | case MXT_PROCI_ONETOUCH_T24: |
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 8cf8d8d5d4ef..f872817e81e4 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c | |||
@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, | |||
471 | static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, | 471 | static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, |
472 | M09_REGISTER_OFFSET, 0, 31); | 472 | M09_REGISTER_OFFSET, 0, 31); |
473 | static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, | 473 | static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, |
474 | M09_REGISTER_THRESHOLD, 20, 80); | 474 | M09_REGISTER_THRESHOLD, 0, 80); |
475 | static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, | 475 | static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, |
476 | NO_REGISTER, 3, 14); | 476 | NO_REGISTER, 3, 14); |
477 | 477 | ||
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 8348f366ddd1..62618e77bedc 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -396,13 +396,13 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, | |||
396 | dma_addr_t iova, size_t size) | 396 | dma_addr_t iova, size_t size) |
397 | { | 397 | { |
398 | struct iova_domain *iovad = &cookie->iovad; | 398 | struct iova_domain *iovad = &cookie->iovad; |
399 | unsigned long shift = iova_shift(iovad); | ||
400 | 399 | ||
401 | /* The MSI case is only ever cleaning up its most recent allocation */ | 400 | /* The MSI case is only ever cleaning up its most recent allocation */ |
402 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) | 401 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
403 | cookie->msi_iova -= size; | 402 | cookie->msi_iova -= size; |
404 | else | 403 | else |
405 | free_iova_fast(iovad, iova >> shift, size >> shift); | 404 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
405 | size >> iova_shift(iovad)); | ||
406 | } | 406 | } |
407 | 407 | ||
408 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | 408 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, |
@@ -617,11 +617,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | |||
617 | { | 617 | { |
618 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 618 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
619 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 619 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
620 | struct iova_domain *iovad = &cookie->iovad; | 620 | size_t iova_off = 0; |
621 | size_t iova_off = iova_offset(iovad, phys); | ||
622 | dma_addr_t iova; | 621 | dma_addr_t iova; |
623 | 622 | ||
624 | size = iova_align(iovad, size + iova_off); | 623 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { |
624 | iova_off = iova_offset(&cookie->iovad, phys); | ||
625 | size = iova_align(&cookie->iovad, size + iova_off); | ||
626 | } | ||
627 | |||
625 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | 628 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
626 | if (!iova) | 629 | if (!iova) |
627 | return DMA_ERROR_CODE; | 630 | return DMA_ERROR_CODE; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 90ab0115d78e..fc2765ccdb57 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -2055,11 +2055,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
2055 | if (context_copied(context)) { | 2055 | if (context_copied(context)) { |
2056 | u16 did_old = context_domain_id(context); | 2056 | u16 did_old = context_domain_id(context); |
2057 | 2057 | ||
2058 | if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) | 2058 | if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) { |
2059 | iommu->flush.flush_context(iommu, did_old, | 2059 | iommu->flush.flush_context(iommu, did_old, |
2060 | (((u16)bus) << 8) | devfn, | 2060 | (((u16)bus) << 8) | devfn, |
2061 | DMA_CCMD_MASK_NOBIT, | 2061 | DMA_CCMD_MASK_NOBIT, |
2062 | DMA_CCMD_DEVICE_INVL); | 2062 | DMA_CCMD_DEVICE_INVL); |
2063 | iommu->flush.flush_iotlb(iommu, did_old, 0, 0, | ||
2064 | DMA_TLB_DSI_FLUSH); | ||
2065 | } | ||
2063 | } | 2066 | } |
2064 | 2067 | ||
2065 | pgd = domain->pgd; | 2068 | pgd = domain->pgd; |
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index a27ef570c328..bc1efbfb9ddf 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/component.h> | 19 | #include <linux/component.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/dma-iommu.h> | 22 | #include <linux/dma-iommu.h> |
22 | #include <linux/err.h> | 23 | #include <linux/err.h> |
23 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index d2306c821ebb..31d6b5a582d2 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c | |||
@@ -106,10 +106,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, | |||
106 | static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, | 106 | static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, |
107 | u32 *mask, u32 *addr) | 107 | u32 *mask, u32 *addr) |
108 | { | 108 | { |
109 | unsigned int ofst; | 109 | unsigned int ofst = (hwirq / 32) * 4; |
110 | |||
111 | hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP; | ||
112 | ofst = hwirq / 32 * 4; | ||
113 | 110 | ||
114 | *mask = 1 << (hwirq % 32); | 111 | *mask = 1 << (hwirq % 32); |
115 | *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; | 112 | *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; |
@@ -337,9 +334,15 @@ static int mbigen_device_probe(struct platform_device *pdev) | |||
337 | mgn_chip->pdev = pdev; | 334 | mgn_chip->pdev = pdev; |
338 | 335 | ||
339 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 336 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
340 | mgn_chip->base = devm_ioremap_resource(&pdev->dev, res); | 337 | if (!res) |
341 | if (IS_ERR(mgn_chip->base)) | 338 | return -EINVAL; |
342 | return PTR_ERR(mgn_chip->base); | 339 | |
340 | mgn_chip->base = devm_ioremap(&pdev->dev, res->start, | ||
341 | resource_size(res)); | ||
342 | if (!mgn_chip->base) { | ||
343 | dev_err(&pdev->dev, "failed to ioremap %pR\n", res); | ||
344 | return -ENOMEM; | ||
345 | } | ||
343 | 346 | ||
344 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) | 347 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) |
345 | err = mbigen_of_create_domain(pdev, mgn_chip); | 348 | err = mbigen_of_create_domain(pdev, mgn_chip); |
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 78a7ce816a47..9a873118ea5f 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c | |||
@@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client, | |||
285 | "slave address 0x%02x\n", | 285 | "slave address 0x%02x\n", |
286 | client->name, chip->bits, client->addr); | 286 | client->name, chip->bits, client->addr); |
287 | 287 | ||
288 | if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) | 288 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) |
289 | return -EIO; | 289 | return -EIO; |
290 | 290 | ||
291 | if (pdata) { | 291 | if (pdata) { |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 5db11a405129..cd8139593ccd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock); | |||
218 | * Buffers are freed after this timeout | 218 | * Buffers are freed after this timeout |
219 | */ | 219 | */ |
220 | static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; | 220 | static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; |
221 | static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; | 221 | static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
222 | 222 | ||
223 | static unsigned long dm_bufio_peak_allocated; | 223 | static unsigned long dm_bufio_peak_allocated; |
224 | static unsigned long dm_bufio_allocated_kmem_cache; | 224 | static unsigned long dm_bufio_allocated_kmem_cache; |
@@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) | |||
1558 | return true; | 1558 | return true; |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | static unsigned get_retain_buffers(struct dm_bufio_client *c) | 1561 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
1562 | { | 1562 | { |
1563 | unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); | 1563 | unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); |
1564 | return retain_bytes / c->block_size; | 1564 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); |
1565 | } | 1565 | } |
1566 | 1566 | ||
1567 | static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, | 1567 | static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, |
@@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, | |||
1571 | struct dm_buffer *b, *tmp; | 1571 | struct dm_buffer *b, *tmp; |
1572 | unsigned long freed = 0; | 1572 | unsigned long freed = 0; |
1573 | unsigned long count = nr_to_scan; | 1573 | unsigned long count = nr_to_scan; |
1574 | unsigned retain_target = get_retain_buffers(c); | 1574 | unsigned long retain_target = get_retain_buffers(c); |
1575 | 1575 | ||
1576 | for (l = 0; l < LIST_SIZE; l++) { | 1576 | for (l = 0; l < LIST_SIZE; l++) { |
1577 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { | 1577 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { |
@@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz) | |||
1794 | static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) | 1794 | static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) |
1795 | { | 1795 | { |
1796 | struct dm_buffer *b, *tmp; | 1796 | struct dm_buffer *b, *tmp; |
1797 | unsigned retain_target = get_retain_buffers(c); | 1797 | unsigned long retain_target = get_retain_buffers(c); |
1798 | unsigned count; | 1798 | unsigned long count; |
1799 | LIST_HEAD(write_list); | 1799 | LIST_HEAD(write_list); |
1800 | 1800 | ||
1801 | dm_bufio_lock(c); | 1801 | dm_bufio_lock(c); |
@@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); | |||
1955 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); | 1955 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); |
1956 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); | 1956 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); |
1957 | 1957 | ||
1958 | module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); | 1958 | module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); |
1959 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); | 1959 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); |
1960 | 1960 | ||
1961 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); | 1961 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); |
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c index 9b1afdfb13f0..707233891291 100644 --- a/drivers/md/dm-cache-background-tracker.c +++ b/drivers/md/dm-cache-background-tracker.c | |||
@@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work) | |||
33 | { | 33 | { |
34 | struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); | 34 | struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); |
35 | 35 | ||
36 | if (!b) { | ||
37 | DMERR("couldn't create background_tracker"); | ||
38 | return NULL; | ||
39 | } | ||
40 | |||
36 | b->max_work = max_work; | 41 | b->max_work = max_work; |
37 | atomic_set(&b->pending_promotes, 0); | 42 | atomic_set(&b->pending_promotes, 0); |
38 | atomic_set(&b->pending_writebacks, 0); | 43 | atomic_set(&b->pending_writebacks, 0); |
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 72479bd61e11..e5eb9c9b4bc8 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c | |||
@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) | |||
1120 | * Cache entries may not be populated. So we cannot rely on the | 1120 | * Cache entries may not be populated. So we cannot rely on the |
1121 | * size of the clean queue. | 1121 | * size of the clean queue. |
1122 | */ | 1122 | */ |
1123 | unsigned nr_clean; | ||
1124 | |||
1125 | if (idle) { | 1123 | if (idle) { |
1126 | /* | 1124 | /* |
1127 | * We'd like to clean everything. | 1125 | * We'd like to clean everything. |
@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) | |||
1129 | return q_size(&mq->dirty) == 0u; | 1127 | return q_size(&mq->dirty) == 0u; |
1130 | } | 1128 | } |
1131 | 1129 | ||
1132 | nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); | 1130 | /* |
1133 | return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= | 1131 | * If we're busy we don't worry about cleaning at all. |
1134 | percent_to_target(mq, CLEAN_TARGET); | 1132 | */ |
1133 | return true; | ||
1135 | } | 1134 | } |
1136 | 1135 | ||
1137 | static bool free_target_met(struct smq_policy *mq, bool idle) | 1136 | static bool free_target_met(struct smq_policy *mq) |
1138 | { | 1137 | { |
1139 | unsigned nr_free; | 1138 | unsigned nr_free; |
1140 | 1139 | ||
1141 | if (!idle) | ||
1142 | return true; | ||
1143 | |||
1144 | nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; | 1140 | nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; |
1145 | return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= | 1141 | return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= |
1146 | percent_to_target(mq, FREE_TARGET); | 1142 | percent_to_target(mq, FREE_TARGET); |
@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq) | |||
1190 | if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) | 1186 | if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) |
1191 | return; | 1187 | return; |
1192 | 1188 | ||
1193 | e = q_peek(&mq->clean, mq->clean.nr_levels, true); | 1189 | e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); |
1194 | if (!e) { | 1190 | if (!e) { |
1195 | if (!clean_target_met(mq, false)) | 1191 | if (!clean_target_met(mq, true)) |
1196 | queue_writeback(mq); | 1192 | queue_writeback(mq); |
1197 | return; | 1193 | return; |
1198 | } | 1194 | } |
@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, | |||
1220 | * We always claim to be 'idle' to ensure some demotions happen | 1216 | * We always claim to be 'idle' to ensure some demotions happen |
1221 | * with continuous loads. | 1217 | * with continuous loads. |
1222 | */ | 1218 | */ |
1223 | if (!free_target_met(mq, true)) | 1219 | if (!free_target_met(mq)) |
1224 | queue_demotion(mq); | 1220 | queue_demotion(mq); |
1225 | return; | 1221 | return; |
1226 | } | 1222 | } |
@@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle, | |||
1421 | spin_lock_irqsave(&mq->lock, flags); | 1417 | spin_lock_irqsave(&mq->lock, flags); |
1422 | r = btracker_issue(mq->bg_work, result); | 1418 | r = btracker_issue(mq->bg_work, result); |
1423 | if (r == -ENODATA) { | 1419 | if (r == -ENODATA) { |
1424 | /* find some writeback work to do */ | 1420 | if (!clean_target_met(mq, idle)) { |
1425 | if (mq->migrations_allowed && !free_target_met(mq, idle)) | ||
1426 | queue_demotion(mq); | ||
1427 | |||
1428 | else if (!clean_target_met(mq, idle)) | ||
1429 | queue_writeback(mq); | 1421 | queue_writeback(mq); |
1430 | 1422 | r = btracker_issue(mq->bg_work, result); | |
1431 | r = btracker_issue(mq->bg_work, result); | 1423 | } |
1432 | } | 1424 | } |
1433 | spin_unlock_irqrestore(&mq->lock, flags); | 1425 | spin_unlock_irqrestore(&mq->lock, flags); |
1434 | 1426 | ||
@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq, | |||
1452 | clear_pending(mq, e); | 1444 | clear_pending(mq, e); |
1453 | if (success) { | 1445 | if (success) { |
1454 | e->oblock = work->oblock; | 1446 | e->oblock = work->oblock; |
1447 | e->level = NR_CACHE_LEVELS - 1; | ||
1455 | push(mq, e); | 1448 | push(mq, e); |
1456 | // h, q, a | 1449 | // h, q, a |
1457 | } else { | 1450 | } else { |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1db375f50a13..d682a0511381 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len) | |||
94 | 94 | ||
95 | static void __iot_io_end(struct io_tracker *iot, sector_t len) | 95 | static void __iot_io_end(struct io_tracker *iot, sector_t len) |
96 | { | 96 | { |
97 | if (!len) | ||
98 | return; | ||
99 | |||
97 | iot->in_flight -= len; | 100 | iot->in_flight -= len; |
98 | if (!iot->in_flight) | 101 | if (!iot->in_flight) |
99 | iot->idle_time = jiffies; | 102 | iot->idle_time = jiffies; |
@@ -474,7 +477,7 @@ struct cache { | |||
474 | spinlock_t invalidation_lock; | 477 | spinlock_t invalidation_lock; |
475 | struct list_head invalidation_requests; | 478 | struct list_head invalidation_requests; |
476 | 479 | ||
477 | struct io_tracker origin_tracker; | 480 | struct io_tracker tracker; |
478 | 481 | ||
479 | struct work_struct commit_ws; | 482 | struct work_struct commit_ws; |
480 | struct batcher committer; | 483 | struct batcher committer; |
@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | |||
901 | 904 | ||
902 | static bool accountable_bio(struct cache *cache, struct bio *bio) | 905 | static bool accountable_bio(struct cache *cache, struct bio *bio) |
903 | { | 906 | { |
904 | return ((bio->bi_bdev == cache->origin_dev->bdev) && | 907 | return bio_op(bio) != REQ_OP_DISCARD; |
905 | bio_op(bio) != REQ_OP_DISCARD); | ||
906 | } | 908 | } |
907 | 909 | ||
908 | static void accounted_begin(struct cache *cache, struct bio *bio) | 910 | static void accounted_begin(struct cache *cache, struct bio *bio) |
@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio) | |||
912 | 914 | ||
913 | if (accountable_bio(cache, bio)) { | 915 | if (accountable_bio(cache, bio)) { |
914 | pb->len = bio_sectors(bio); | 916 | pb->len = bio_sectors(bio); |
915 | iot_io_begin(&cache->origin_tracker, pb->len); | 917 | iot_io_begin(&cache->tracker, pb->len); |
916 | } | 918 | } |
917 | } | 919 | } |
918 | 920 | ||
@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio) | |||
921 | size_t pb_data_size = get_per_bio_data_size(cache); | 923 | size_t pb_data_size = get_per_bio_data_size(cache); |
922 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 924 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
923 | 925 | ||
924 | iot_io_end(&cache->origin_tracker, pb->len); | 926 | iot_io_end(&cache->tracker, pb->len); |
925 | } | 927 | } |
926 | 928 | ||
927 | static void accounted_request(struct cache *cache, struct bio *bio) | 929 | static void accounted_request(struct cache *cache, struct bio *bio) |
@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, | |||
1716 | 1718 | ||
1717 | enum busy { | 1719 | enum busy { |
1718 | IDLE, | 1720 | IDLE, |
1719 | MODERATE, | ||
1720 | BUSY | 1721 | BUSY |
1721 | }; | 1722 | }; |
1722 | 1723 | ||
1723 | static enum busy spare_migration_bandwidth(struct cache *cache) | 1724 | static enum busy spare_migration_bandwidth(struct cache *cache) |
1724 | { | 1725 | { |
1725 | bool idle = iot_idle_for(&cache->origin_tracker, HZ); | 1726 | bool idle = iot_idle_for(&cache->tracker, HZ); |
1726 | sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * | 1727 | sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * |
1727 | cache->sectors_per_block; | 1728 | cache->sectors_per_block; |
1728 | 1729 | ||
1729 | if (current_volume <= cache->migration_threshold) | 1730 | if (idle && current_volume <= cache->migration_threshold) |
1730 | return idle ? IDLE : MODERATE; | 1731 | return IDLE; |
1731 | else | 1732 | else |
1732 | return idle ? MODERATE : BUSY; | 1733 | return BUSY; |
1733 | } | 1734 | } |
1734 | 1735 | ||
1735 | static void inc_hit_counter(struct cache *cache, struct bio *bio) | 1736 | static void inc_hit_counter(struct cache *cache, struct bio *bio) |
@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws) | |||
2045 | 2046 | ||
2046 | for (;;) { | 2047 | for (;;) { |
2047 | b = spare_migration_bandwidth(cache); | 2048 | b = spare_migration_bandwidth(cache); |
2048 | if (b == BUSY) | ||
2049 | break; | ||
2050 | 2049 | ||
2051 | r = policy_get_background_work(cache->policy, b == IDLE, &op); | 2050 | r = policy_get_background_work(cache->policy, b == IDLE, &op); |
2052 | if (r == -ENODATA) | 2051 | if (r == -ENODATA) |
@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2717 | 2716 | ||
2718 | batcher_init(&cache->committer, commit_op, cache, | 2717 | batcher_init(&cache->committer, commit_op, cache, |
2719 | issue_op, cache, cache->wq); | 2718 | issue_op, cache, cache->wq); |
2720 | iot_init(&cache->origin_tracker); | 2719 | iot_init(&cache->tracker); |
2721 | 2720 | ||
2722 | init_rwsem(&cache->background_work_lock); | 2721 | init_rwsem(&cache->background_work_lock); |
2723 | prevent_background_work(cache); | 2722 | prevent_background_work(cache); |
@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti) | |||
2941 | 2940 | ||
2942 | cancel_delayed_work(&cache->waker); | 2941 | cancel_delayed_work(&cache->waker); |
2943 | flush_workqueue(cache->wq); | 2942 | flush_workqueue(cache->wq); |
2944 | WARN_ON(cache->origin_tracker.in_flight); | 2943 | WARN_ON(cache->tracker.in_flight); |
2945 | 2944 | ||
2946 | /* | 2945 | /* |
2947 | * If it's a flush suspend there won't be any deferred bios, so this | 2946 | * If it's a flush suspend there won't be any deferred bios, so this |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 926a6bcb32c8..3df056b73b66 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -447,7 +447,7 @@ failed: | |||
447 | * it has been invoked. | 447 | * it has been invoked. |
448 | */ | 448 | */ |
449 | #define dm_report_EIO(m) \ | 449 | #define dm_report_EIO(m) \ |
450 | ({ \ | 450 | do { \ |
451 | struct mapped_device *md = dm_table_get_md((m)->ti->table); \ | 451 | struct mapped_device *md = dm_table_get_md((m)->ti->table); \ |
452 | \ | 452 | \ |
453 | pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ | 453 | pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ |
@@ -455,8 +455,7 @@ failed: | |||
455 | test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ | 455 | test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ |
456 | test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ | 456 | test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ |
457 | dm_noflush_suspending((m)->ti)); \ | 457 | dm_noflush_suspending((m)->ti)); \ |
458 | -EIO; \ | 458 | } while (0) |
459 | }) | ||
460 | 459 | ||
461 | /* | 460 | /* |
462 | * Map cloned requests (request-based multipath) | 461 | * Map cloned requests (request-based multipath) |
@@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | |||
481 | if (!pgpath) { | 480 | if (!pgpath) { |
482 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | 481 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) |
483 | return DM_MAPIO_DELAY_REQUEUE; | 482 | return DM_MAPIO_DELAY_REQUEUE; |
484 | return dm_report_EIO(m); /* Failed */ | 483 | dm_report_EIO(m); /* Failed */ |
484 | return DM_MAPIO_KILL; | ||
485 | } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || | 485 | } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || |
486 | test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { | 486 | test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { |
487 | if (pg_init_all_paths(m)) | 487 | if (pg_init_all_paths(m)) |
@@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
558 | if (!pgpath) { | 558 | if (!pgpath) { |
559 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | 559 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) |
560 | return DM_MAPIO_REQUEUE; | 560 | return DM_MAPIO_REQUEUE; |
561 | return dm_report_EIO(m); | 561 | dm_report_EIO(m); |
562 | return -EIO; | ||
562 | } | 563 | } |
563 | 564 | ||
564 | mpio->pgpath = pgpath; | 565 | mpio->pgpath = pgpath; |
@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
1493 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1494 | if (atomic_read(&m->nr_valid_paths) == 0 && |
1494 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1495 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1495 | if (error == -EIO) | 1496 | if (error == -EIO) |
1496 | error = dm_report_EIO(m); | 1497 | dm_report_EIO(m); |
1497 | /* complete with the original error */ | 1498 | /* complete with the original error */ |
1498 | r = DM_ENDIO_DONE; | 1499 | r = DM_ENDIO_DONE; |
1499 | } | 1500 | } |
@@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone, | |||
1524 | fail_path(mpio->pgpath); | 1525 | fail_path(mpio->pgpath); |
1525 | 1526 | ||
1526 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1527 | if (atomic_read(&m->nr_valid_paths) == 0 && |
1527 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | 1528 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1528 | return dm_report_EIO(m); | 1529 | dm_report_EIO(m); |
1530 | return -EIO; | ||
1531 | } | ||
1529 | 1532 | ||
1530 | /* Queue for the daemon to resubmit */ | 1533 | /* Queue for the daemon to resubmit */ |
1531 | dm_bio_restore(get_bio_details_from_bio(clone), clone); | 1534 | dm_bio_restore(get_bio_details_from_bio(clone), clone); |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 2af27026aa2e..b639fa7246ee 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio) | |||
507 | case DM_MAPIO_KILL: | 507 | case DM_MAPIO_KILL: |
508 | /* The target wants to complete the I/O */ | 508 | /* The target wants to complete the I/O */ |
509 | dm_kill_unmapped_request(rq, -EIO); | 509 | dm_kill_unmapped_request(rq, -EIO); |
510 | break; | ||
510 | default: | 511 | default: |
511 | DMWARN("unimplemented target map return value: %d", r); | 512 | DMWARN("unimplemented target map return value: %d", r); |
512 | BUG(); | 513 | BUG(); |
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 0f0251d0d337..d31d18d9727c 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
@@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) | |||
484 | if (r < 0) | 484 | if (r < 0) |
485 | return r; | 485 | return r; |
486 | 486 | ||
487 | r = save_sm_roots(pmd); | 487 | r = dm_tm_pre_commit(pmd->tm); |
488 | if (r < 0) | 488 | if (r < 0) |
489 | return r; | 489 | return r; |
490 | 490 | ||
491 | r = dm_tm_pre_commit(pmd->tm); | 491 | r = save_sm_roots(pmd); |
492 | if (r < 0) | 492 | if (r < 0) |
493 | return r; | 493 | return r; |
494 | 494 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 82f798be964f..10367ffe92e3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -8022,18 +8022,15 @@ EXPORT_SYMBOL(md_write_end); | |||
8022 | * may proceed without blocking. It is important to call this before | 8022 | * may proceed without blocking. It is important to call this before |
8023 | * attempting a GFP_KERNEL allocation while holding the mddev lock. | 8023 | * attempting a GFP_KERNEL allocation while holding the mddev lock. |
8024 | * Must be called with mddev_lock held. | 8024 | * Must be called with mddev_lock held. |
8025 | * | ||
8026 | * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock | ||
8027 | * is dropped, so return -EAGAIN after notifying userspace. | ||
8028 | */ | 8025 | */ |
8029 | int md_allow_write(struct mddev *mddev) | 8026 | void md_allow_write(struct mddev *mddev) |
8030 | { | 8027 | { |
8031 | if (!mddev->pers) | 8028 | if (!mddev->pers) |
8032 | return 0; | 8029 | return; |
8033 | if (mddev->ro) | 8030 | if (mddev->ro) |
8034 | return 0; | 8031 | return; |
8035 | if (!mddev->pers->sync_request) | 8032 | if (!mddev->pers->sync_request) |
8036 | return 0; | 8033 | return; |
8037 | 8034 | ||
8038 | spin_lock(&mddev->lock); | 8035 | spin_lock(&mddev->lock); |
8039 | if (mddev->in_sync) { | 8036 | if (mddev->in_sync) { |
@@ -8046,13 +8043,12 @@ int md_allow_write(struct mddev *mddev) | |||
8046 | spin_unlock(&mddev->lock); | 8043 | spin_unlock(&mddev->lock); |
8047 | md_update_sb(mddev, 0); | 8044 | md_update_sb(mddev, 0); |
8048 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 8045 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
8046 | /* wait for the dirty state to be recorded in the metadata */ | ||
8047 | wait_event(mddev->sb_wait, | ||
8048 | !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) && | ||
8049 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); | ||
8049 | } else | 8050 | } else |
8050 | spin_unlock(&mddev->lock); | 8051 | spin_unlock(&mddev->lock); |
8051 | |||
8052 | if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) | ||
8053 | return -EAGAIN; | ||
8054 | else | ||
8055 | return 0; | ||
8056 | } | 8052 | } |
8057 | EXPORT_SYMBOL_GPL(md_allow_write); | 8053 | EXPORT_SYMBOL_GPL(md_allow_write); |
8058 | 8054 | ||
diff --git a/drivers/md/md.h b/drivers/md/md.h index 4e75d121bfcc..11f15146ce51 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -665,7 +665,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | |||
665 | bool metadata_op); | 665 | bool metadata_op); |
666 | extern void md_do_sync(struct md_thread *thread); | 666 | extern void md_do_sync(struct md_thread *thread); |
667 | extern void md_new_event(struct mddev *mddev); | 667 | extern void md_new_event(struct mddev *mddev); |
668 | extern int md_allow_write(struct mddev *mddev); | 668 | extern void md_allow_write(struct mddev *mddev); |
669 | extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); | 669 | extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); |
670 | extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); | 670 | extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); |
671 | extern int md_check_no_bitmap(struct mddev *mddev); | 671 | extern int md_check_no_bitmap(struct mddev *mddev); |
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index ebb280a14325..32adf6b4a9c7 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c | |||
@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) | |||
142 | 142 | ||
143 | static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) | 143 | static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) |
144 | { | 144 | { |
145 | int r; | ||
146 | uint32_t old_count; | ||
145 | enum allocation_event ev; | 147 | enum allocation_event ev; |
146 | struct sm_disk *smd = container_of(sm, struct sm_disk, sm); | 148 | struct sm_disk *smd = container_of(sm, struct sm_disk, sm); |
147 | 149 | ||
148 | return sm_ll_dec(&smd->ll, b, &ev); | 150 | r = sm_ll_dec(&smd->ll, b, &ev); |
151 | if (!r && (ev == SM_FREE)) { | ||
152 | /* | ||
153 | * It's only free if it's also free in the last | ||
154 | * transaction. | ||
155 | */ | ||
156 | r = sm_ll_lookup(&smd->old_ll, b, &old_count); | ||
157 | if (!r && !old_count) | ||
158 | smd->nr_allocated_this_transaction--; | ||
159 | } | ||
160 | |||
161 | return r; | ||
149 | } | 162 | } |
150 | 163 | ||
151 | static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) | 164 | static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 84e58596594d..d6c0bc76e837 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -385,7 +385,7 @@ static int raid0_run(struct mddev *mddev) | |||
385 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 385 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
386 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | 386 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); |
387 | blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); | 387 | blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); |
388 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | 388 | blk_queue_max_discard_sectors(mddev->queue, UINT_MAX); |
389 | 389 | ||
390 | blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); | 390 | blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); |
391 | blk_queue_io_opt(mddev->queue, | 391 | blk_queue_io_opt(mddev->queue, |
@@ -459,6 +459,95 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, | |||
459 | } | 459 | } |
460 | } | 460 | } |
461 | 461 | ||
462 | static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) | ||
463 | { | ||
464 | struct r0conf *conf = mddev->private; | ||
465 | struct strip_zone *zone; | ||
466 | sector_t start = bio->bi_iter.bi_sector; | ||
467 | sector_t end; | ||
468 | unsigned int stripe_size; | ||
469 | sector_t first_stripe_index, last_stripe_index; | ||
470 | sector_t start_disk_offset; | ||
471 | unsigned int start_disk_index; | ||
472 | sector_t end_disk_offset; | ||
473 | unsigned int end_disk_index; | ||
474 | unsigned int disk; | ||
475 | |||
476 | zone = find_zone(conf, &start); | ||
477 | |||
478 | if (bio_end_sector(bio) > zone->zone_end) { | ||
479 | struct bio *split = bio_split(bio, | ||
480 | zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, | ||
481 | mddev->bio_set); | ||
482 | bio_chain(split, bio); | ||
483 | generic_make_request(bio); | ||
484 | bio = split; | ||
485 | end = zone->zone_end; | ||
486 | } else | ||
487 | end = bio_end_sector(bio); | ||
488 | |||
489 | if (zone != conf->strip_zone) | ||
490 | end = end - zone[-1].zone_end; | ||
491 | |||
492 | /* Now start and end is the offset in zone */ | ||
493 | stripe_size = zone->nb_dev * mddev->chunk_sectors; | ||
494 | |||
495 | first_stripe_index = start; | ||
496 | sector_div(first_stripe_index, stripe_size); | ||
497 | last_stripe_index = end; | ||
498 | sector_div(last_stripe_index, stripe_size); | ||
499 | |||
500 | start_disk_index = (int)(start - first_stripe_index * stripe_size) / | ||
501 | mddev->chunk_sectors; | ||
502 | start_disk_offset = ((int)(start - first_stripe_index * stripe_size) % | ||
503 | mddev->chunk_sectors) + | ||
504 | first_stripe_index * mddev->chunk_sectors; | ||
505 | end_disk_index = (int)(end - last_stripe_index * stripe_size) / | ||
506 | mddev->chunk_sectors; | ||
507 | end_disk_offset = ((int)(end - last_stripe_index * stripe_size) % | ||
508 | mddev->chunk_sectors) + | ||
509 | last_stripe_index * mddev->chunk_sectors; | ||
510 | |||
511 | for (disk = 0; disk < zone->nb_dev; disk++) { | ||
512 | sector_t dev_start, dev_end; | ||
513 | struct bio *discard_bio = NULL; | ||
514 | struct md_rdev *rdev; | ||
515 | |||
516 | if (disk < start_disk_index) | ||
517 | dev_start = (first_stripe_index + 1) * | ||
518 | mddev->chunk_sectors; | ||
519 | else if (disk > start_disk_index) | ||
520 | dev_start = first_stripe_index * mddev->chunk_sectors; | ||
521 | else | ||
522 | dev_start = start_disk_offset; | ||
523 | |||
524 | if (disk < end_disk_index) | ||
525 | dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; | ||
526 | else if (disk > end_disk_index) | ||
527 | dev_end = last_stripe_index * mddev->chunk_sectors; | ||
528 | else | ||
529 | dev_end = end_disk_offset; | ||
530 | |||
531 | if (dev_end <= dev_start) | ||
532 | continue; | ||
533 | |||
534 | rdev = conf->devlist[(zone - conf->strip_zone) * | ||
535 | conf->strip_zone[0].nb_dev + disk]; | ||
536 | if (__blkdev_issue_discard(rdev->bdev, | ||
537 | dev_start + zone->dev_start + rdev->data_offset, | ||
538 | dev_end - dev_start, GFP_NOIO, 0, &discard_bio) || | ||
539 | !discard_bio) | ||
540 | continue; | ||
541 | bio_chain(discard_bio, bio); | ||
542 | if (mddev->gendisk) | ||
543 | trace_block_bio_remap(bdev_get_queue(rdev->bdev), | ||
544 | discard_bio, disk_devt(mddev->gendisk), | ||
545 | bio->bi_iter.bi_sector); | ||
546 | generic_make_request(discard_bio); | ||
547 | } | ||
548 | bio_endio(bio); | ||
549 | } | ||
550 | |||
462 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) | 551 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
463 | { | 552 | { |
464 | struct strip_zone *zone; | 553 | struct strip_zone *zone; |
@@ -473,6 +562,11 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
473 | return; | 562 | return; |
474 | } | 563 | } |
475 | 564 | ||
565 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { | ||
566 | raid0_handle_discard(mddev, bio); | ||
567 | return; | ||
568 | } | ||
569 | |||
476 | bio_sector = bio->bi_iter.bi_sector; | 570 | bio_sector = bio->bi_iter.bi_sector; |
477 | sector = bio_sector; | 571 | sector = bio_sector; |
478 | chunk_sects = mddev->chunk_sectors; | 572 | chunk_sects = mddev->chunk_sectors; |
@@ -498,19 +592,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
498 | bio->bi_iter.bi_sector = sector + zone->dev_start + | 592 | bio->bi_iter.bi_sector = sector + zone->dev_start + |
499 | tmp_dev->data_offset; | 593 | tmp_dev->data_offset; |
500 | 594 | ||
501 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 595 | if (mddev->gendisk) |
502 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | 596 | trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), |
503 | /* Just ignore it */ | 597 | bio, disk_devt(mddev->gendisk), |
504 | bio_endio(bio); | 598 | bio_sector); |
505 | } else { | 599 | mddev_check_writesame(mddev, bio); |
506 | if (mddev->gendisk) | 600 | mddev_check_write_zeroes(mddev, bio); |
507 | trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), | 601 | generic_make_request(bio); |
508 | bio, disk_devt(mddev->gendisk), | ||
509 | bio_sector); | ||
510 | mddev_check_writesame(mddev, bio); | ||
511 | mddev_check_write_zeroes(mddev, bio); | ||
512 | generic_make_request(bio); | ||
513 | } | ||
514 | } | 602 | } |
515 | 603 | ||
516 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) | 604 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7ed59351fe97..af5056d56878 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -666,8 +666,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
666 | break; | 666 | break; |
667 | } | 667 | } |
668 | continue; | 668 | continue; |
669 | } else | 669 | } else { |
670 | if ((sectors > best_good_sectors) && (best_disk >= 0)) | ||
671 | best_disk = -1; | ||
670 | best_good_sectors = sectors; | 672 | best_good_sectors = sectors; |
673 | } | ||
671 | 674 | ||
672 | if (best_disk >= 0) | 675 | if (best_disk >= 0) |
673 | /* At least two disks to choose from so failfast is OK */ | 676 | /* At least two disks to choose from so failfast is OK */ |
@@ -1529,17 +1532,16 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, | |||
1529 | plug = container_of(cb, struct raid1_plug_cb, cb); | 1532 | plug = container_of(cb, struct raid1_plug_cb, cb); |
1530 | else | 1533 | else |
1531 | plug = NULL; | 1534 | plug = NULL; |
1532 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1533 | if (plug) { | 1535 | if (plug) { |
1534 | bio_list_add(&plug->pending, mbio); | 1536 | bio_list_add(&plug->pending, mbio); |
1535 | plug->pending_cnt++; | 1537 | plug->pending_cnt++; |
1536 | } else { | 1538 | } else { |
1539 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1537 | bio_list_add(&conf->pending_bio_list, mbio); | 1540 | bio_list_add(&conf->pending_bio_list, mbio); |
1538 | conf->pending_count++; | 1541 | conf->pending_count++; |
1539 | } | 1542 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1540 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1541 | if (!plug) | ||
1542 | md_wakeup_thread(mddev->thread); | 1543 | md_wakeup_thread(mddev->thread); |
1544 | } | ||
1543 | } | 1545 | } |
1544 | 1546 | ||
1545 | r1_bio_write_done(r1_bio); | 1547 | r1_bio_write_done(r1_bio); |
@@ -3197,7 +3199,7 @@ static int raid1_reshape(struct mddev *mddev) | |||
3197 | struct r1conf *conf = mddev->private; | 3199 | struct r1conf *conf = mddev->private; |
3198 | int cnt, raid_disks; | 3200 | int cnt, raid_disks; |
3199 | unsigned long flags; | 3201 | unsigned long flags; |
3200 | int d, d2, err; | 3202 | int d, d2; |
3201 | 3203 | ||
3202 | /* Cannot change chunk_size, layout, or level */ | 3204 | /* Cannot change chunk_size, layout, or level */ |
3203 | if (mddev->chunk_sectors != mddev->new_chunk_sectors || | 3205 | if (mddev->chunk_sectors != mddev->new_chunk_sectors || |
@@ -3209,11 +3211,8 @@ static int raid1_reshape(struct mddev *mddev) | |||
3209 | return -EINVAL; | 3211 | return -EINVAL; |
3210 | } | 3212 | } |
3211 | 3213 | ||
3212 | if (!mddev_is_clustered(mddev)) { | 3214 | if (!mddev_is_clustered(mddev)) |
3213 | err = md_allow_write(mddev); | 3215 | md_allow_write(mddev); |
3214 | if (err) | ||
3215 | return err; | ||
3216 | } | ||
3217 | 3216 | ||
3218 | raid_disks = mddev->raid_disks + mddev->delta_disks; | 3217 | raid_disks = mddev->raid_disks + mddev->delta_disks; |
3219 | 3218 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6b86a0032cf8..4343d7ff9916 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1282,17 +1282,16 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, | |||
1282 | plug = container_of(cb, struct raid10_plug_cb, cb); | 1282 | plug = container_of(cb, struct raid10_plug_cb, cb); |
1283 | else | 1283 | else |
1284 | plug = NULL; | 1284 | plug = NULL; |
1285 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1286 | if (plug) { | 1285 | if (plug) { |
1287 | bio_list_add(&plug->pending, mbio); | 1286 | bio_list_add(&plug->pending, mbio); |
1288 | plug->pending_cnt++; | 1287 | plug->pending_cnt++; |
1289 | } else { | 1288 | } else { |
1289 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1290 | bio_list_add(&conf->pending_bio_list, mbio); | 1290 | bio_list_add(&conf->pending_bio_list, mbio); |
1291 | conf->pending_count++; | 1291 | conf->pending_count++; |
1292 | } | 1292 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1293 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1294 | if (!plug) | ||
1295 | md_wakeup_thread(mddev->thread); | 1293 | md_wakeup_thread(mddev->thread); |
1294 | } | ||
1296 | } | 1295 | } |
1297 | 1296 | ||
1298 | static void raid10_write_request(struct mddev *mddev, struct bio *bio, | 1297 | static void raid10_write_request(struct mddev *mddev, struct bio *bio, |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 26ba09282e7c..4c00bc248287 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include "md.h" | 24 | #include "md.h" |
25 | #include "raid5.h" | 25 | #include "raid5.h" |
26 | #include "bitmap.h" | 26 | #include "bitmap.h" |
27 | #include "raid5-log.h" | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * metadata/data stored in disk with 4k size unit (a block) regardless | 30 | * metadata/data stored in disk with 4k size unit (a block) regardless |
@@ -622,20 +623,30 @@ static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) | |||
622 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); | 623 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); |
623 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 624 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
624 | 625 | ||
626 | /* | ||
627 | * In case of journal device failures, submit_bio will get error | ||
628 | * and calls endio, then active stripes will continue write | ||
629 | * process. Therefore, it is not necessary to check Faulty bit | ||
630 | * of journal device here. | ||
631 | * | ||
632 | * We can't check split_bio after current_bio is submitted. If | ||
633 | * io->split_bio is null, after current_bio is submitted, current_bio | ||
634 | * might already be completed and the io_unit is freed. We submit | ||
635 | * split_bio first to avoid the issue. | ||
636 | */ | ||
637 | if (io->split_bio) { | ||
638 | if (io->has_flush) | ||
639 | io->split_bio->bi_opf |= REQ_PREFLUSH; | ||
640 | if (io->has_fua) | ||
641 | io->split_bio->bi_opf |= REQ_FUA; | ||
642 | submit_bio(io->split_bio); | ||
643 | } | ||
644 | |||
625 | if (io->has_flush) | 645 | if (io->has_flush) |
626 | io->current_bio->bi_opf |= REQ_PREFLUSH; | 646 | io->current_bio->bi_opf |= REQ_PREFLUSH; |
627 | if (io->has_fua) | 647 | if (io->has_fua) |
628 | io->current_bio->bi_opf |= REQ_FUA; | 648 | io->current_bio->bi_opf |= REQ_FUA; |
629 | submit_bio(io->current_bio); | 649 | submit_bio(io->current_bio); |
630 | |||
631 | if (!io->split_bio) | ||
632 | return; | ||
633 | |||
634 | if (io->has_flush) | ||
635 | io->split_bio->bi_opf |= REQ_PREFLUSH; | ||
636 | if (io->has_fua) | ||
637 | io->split_bio->bi_opf |= REQ_FUA; | ||
638 | submit_bio(io->split_bio); | ||
639 | } | 650 | } |
640 | 651 | ||
641 | /* deferred io_unit will be dispatched here */ | 652 | /* deferred io_unit will be dispatched here */ |
@@ -670,6 +681,11 @@ static void r5c_disable_writeback_async(struct work_struct *work) | |||
670 | return; | 681 | return; |
671 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", | 682 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", |
672 | mdname(mddev)); | 683 | mdname(mddev)); |
684 | |||
685 | /* wait superblock change before suspend */ | ||
686 | wait_event(mddev->sb_wait, | ||
687 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); | ||
688 | |||
673 | mddev_suspend(mddev); | 689 | mddev_suspend(mddev); |
674 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 690 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
675 | mddev_resume(mddev); | 691 | mddev_resume(mddev); |
@@ -2621,8 +2637,11 @@ int r5c_try_caching_write(struct r5conf *conf, | |||
2621 | * When run in degraded mode, array is set to write-through mode. | 2637 | * When run in degraded mode, array is set to write-through mode. |
2622 | * This check helps drain pending write safely in the transition to | 2638 | * This check helps drain pending write safely in the transition to |
2623 | * write-through mode. | 2639 | * write-through mode. |
2640 | * | ||
2641 | * When a stripe is syncing, the write is also handled in write | ||
2642 | * through mode. | ||
2624 | */ | 2643 | */ |
2625 | if (s->failed) { | 2644 | if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { |
2626 | r5c_make_stripe_write_out(sh); | 2645 | r5c_make_stripe_write_out(sh); |
2627 | return -EAGAIN; | 2646 | return -EAGAIN; |
2628 | } | 2647 | } |
@@ -2825,6 +2844,9 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, | |||
2825 | } | 2844 | } |
2826 | 2845 | ||
2827 | r5l_append_flush_payload(log, sh->sector); | 2846 | r5l_append_flush_payload(log, sh->sector); |
2847 | /* stripe is flused to raid disks, we can do resync now */ | ||
2848 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) | ||
2849 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2828 | } | 2850 | } |
2829 | 2851 | ||
2830 | int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) | 2852 | int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) |
@@ -2973,7 +2995,7 @@ ioerr: | |||
2973 | return ret; | 2995 | return ret; |
2974 | } | 2996 | } |
2975 | 2997 | ||
2976 | void r5c_update_on_rdev_error(struct mddev *mddev) | 2998 | void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) |
2977 | { | 2999 | { |
2978 | struct r5conf *conf = mddev->private; | 3000 | struct r5conf *conf = mddev->private; |
2979 | struct r5l_log *log = conf->log; | 3001 | struct r5l_log *log = conf->log; |
@@ -2981,7 +3003,8 @@ void r5c_update_on_rdev_error(struct mddev *mddev) | |||
2981 | if (!log) | 3003 | if (!log) |
2982 | return; | 3004 | return; |
2983 | 3005 | ||
2984 | if (raid5_calc_degraded(conf) > 0 && | 3006 | if ((raid5_calc_degraded(conf) > 0 || |
3007 | test_bit(Journal, &rdev->flags)) && | ||
2985 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) | 3008 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) |
2986 | schedule_work(&log->disable_writeback_work); | 3009 | schedule_work(&log->disable_writeback_work); |
2987 | } | 3010 | } |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index 27097101ccca..328d67aedda4 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
@@ -28,7 +28,8 @@ extern void r5c_flush_cache(struct r5conf *conf, int num); | |||
28 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); | 28 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); |
29 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); | 29 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); |
30 | extern struct md_sysfs_entry r5c_journal_mode; | 30 | extern struct md_sysfs_entry r5c_journal_mode; |
31 | extern void r5c_update_on_rdev_error(struct mddev *mddev); | 31 | extern void r5c_update_on_rdev_error(struct mddev *mddev, |
32 | struct md_rdev *rdev); | ||
32 | extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); | 33 | extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); |
33 | 34 | ||
34 | extern struct dma_async_tx_descriptor * | 35 | extern struct dma_async_tx_descriptor * |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2e38cfac5b1d..9c4f7659f8b1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -103,8 +103,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) | |||
103 | static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) | 103 | static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) |
104 | { | 104 | { |
105 | int i; | 105 | int i; |
106 | local_irq_disable(); | 106 | spin_lock_irq(conf->hash_locks); |
107 | spin_lock(conf->hash_locks); | ||
108 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) | 107 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) |
109 | spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); | 108 | spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); |
110 | spin_lock(&conf->device_lock); | 109 | spin_lock(&conf->device_lock); |
@@ -114,9 +113,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) | |||
114 | { | 113 | { |
115 | int i; | 114 | int i; |
116 | spin_unlock(&conf->device_lock); | 115 | spin_unlock(&conf->device_lock); |
117 | for (i = NR_STRIPE_HASH_LOCKS; i; i--) | 116 | for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) |
118 | spin_unlock(conf->hash_locks + i - 1); | 117 | spin_unlock(conf->hash_locks + i); |
119 | local_irq_enable(); | 118 | spin_unlock_irq(conf->hash_locks); |
120 | } | 119 | } |
121 | 120 | ||
122 | /* Find first data disk in a raid6 stripe */ | 121 | /* Find first data disk in a raid6 stripe */ |
@@ -234,11 +233,15 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
234 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) | 233 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) |
235 | injournal++; | 234 | injournal++; |
236 | /* | 235 | /* |
237 | * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with | 236 | * In the following cases, the stripe cannot be released to cached |
238 | * data in journal, so they are not released to cached lists | 237 | * lists. Therefore, we make the stripe write out and set |
238 | * STRIPE_HANDLE: | ||
239 | * 1. when quiesce in r5c write back; | ||
240 | * 2. when resync is requested fot the stripe. | ||
239 | */ | 241 | */ |
240 | if (conf->quiesce && r5c_is_writeback(conf->log) && | 242 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || |
241 | !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { | 243 | (conf->quiesce && r5c_is_writeback(conf->log) && |
244 | !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { | ||
242 | if (test_bit(STRIPE_R5C_CACHING, &sh->state)) | 245 | if (test_bit(STRIPE_R5C_CACHING, &sh->state)) |
243 | r5c_make_stripe_write_out(sh); | 246 | r5c_make_stripe_write_out(sh); |
244 | set_bit(STRIPE_HANDLE, &sh->state); | 247 | set_bit(STRIPE_HANDLE, &sh->state); |
@@ -714,12 +717,11 @@ static bool is_full_stripe_write(struct stripe_head *sh) | |||
714 | 717 | ||
715 | static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) | 718 | static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
716 | { | 719 | { |
717 | local_irq_disable(); | ||
718 | if (sh1 > sh2) { | 720 | if (sh1 > sh2) { |
719 | spin_lock(&sh2->stripe_lock); | 721 | spin_lock_irq(&sh2->stripe_lock); |
720 | spin_lock_nested(&sh1->stripe_lock, 1); | 722 | spin_lock_nested(&sh1->stripe_lock, 1); |
721 | } else { | 723 | } else { |
722 | spin_lock(&sh1->stripe_lock); | 724 | spin_lock_irq(&sh1->stripe_lock); |
723 | spin_lock_nested(&sh2->stripe_lock, 1); | 725 | spin_lock_nested(&sh2->stripe_lock, 1); |
724 | } | 726 | } |
725 | } | 727 | } |
@@ -727,8 +729,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) | |||
727 | static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) | 729 | static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
728 | { | 730 | { |
729 | spin_unlock(&sh1->stripe_lock); | 731 | spin_unlock(&sh1->stripe_lock); |
730 | spin_unlock(&sh2->stripe_lock); | 732 | spin_unlock_irq(&sh2->stripe_lock); |
731 | local_irq_enable(); | ||
732 | } | 733 | } |
733 | 734 | ||
734 | /* Only freshly new full stripe normal write stripe can be added to a batch list */ | 735 | /* Only freshly new full stripe normal write stripe can be added to a batch list */ |
@@ -2312,14 +2313,12 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2312 | struct stripe_head *osh, *nsh; | 2313 | struct stripe_head *osh, *nsh; |
2313 | LIST_HEAD(newstripes); | 2314 | LIST_HEAD(newstripes); |
2314 | struct disk_info *ndisks; | 2315 | struct disk_info *ndisks; |
2315 | int err; | 2316 | int err = 0; |
2316 | struct kmem_cache *sc; | 2317 | struct kmem_cache *sc; |
2317 | int i; | 2318 | int i; |
2318 | int hash, cnt; | 2319 | int hash, cnt; |
2319 | 2320 | ||
2320 | err = md_allow_write(conf->mddev); | 2321 | md_allow_write(conf->mddev); |
2321 | if (err) | ||
2322 | return err; | ||
2323 | 2322 | ||
2324 | /* Step 1 */ | 2323 | /* Step 1 */ |
2325 | sc = kmem_cache_create(conf->cache_name[1-conf->active_name], | 2324 | sc = kmem_cache_create(conf->cache_name[1-conf->active_name], |
@@ -2694,7 +2693,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2694 | bdevname(rdev->bdev, b), | 2693 | bdevname(rdev->bdev, b), |
2695 | mdname(mddev), | 2694 | mdname(mddev), |
2696 | conf->raid_disks - mddev->degraded); | 2695 | conf->raid_disks - mddev->degraded); |
2697 | r5c_update_on_rdev_error(mddev); | 2696 | r5c_update_on_rdev_error(mddev, rdev); |
2698 | } | 2697 | } |
2699 | 2698 | ||
2700 | /* | 2699 | /* |
@@ -3055,6 +3054,11 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
3055 | * When LOG_CRITICAL, stripes with injournal == 0 will be sent to | 3054 | * When LOG_CRITICAL, stripes with injournal == 0 will be sent to |
3056 | * no_space_stripes list. | 3055 | * no_space_stripes list. |
3057 | * | 3056 | * |
3057 | * 3. during journal failure | ||
3058 | * In journal failure, we try to flush all cached data to raid disks | ||
3059 | * based on data in stripe cache. The array is read-only to upper | ||
3060 | * layers, so we would skip all pending writes. | ||
3061 | * | ||
3058 | */ | 3062 | */ |
3059 | static inline bool delay_towrite(struct r5conf *conf, | 3063 | static inline bool delay_towrite(struct r5conf *conf, |
3060 | struct r5dev *dev, | 3064 | struct r5dev *dev, |
@@ -3068,6 +3072,9 @@ static inline bool delay_towrite(struct r5conf *conf, | |||
3068 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && | 3072 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && |
3069 | s->injournal > 0) | 3073 | s->injournal > 0) |
3070 | return true; | 3074 | return true; |
3075 | /* case 3 above */ | ||
3076 | if (s->log_failed && s->injournal) | ||
3077 | return true; | ||
3071 | return false; | 3078 | return false; |
3072 | } | 3079 | } |
3073 | 3080 | ||
@@ -4653,8 +4660,13 @@ static void handle_stripe(struct stripe_head *sh) | |||
4653 | 4660 | ||
4654 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { | 4661 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { |
4655 | spin_lock(&sh->stripe_lock); | 4662 | spin_lock(&sh->stripe_lock); |
4656 | /* Cannot process 'sync' concurrently with 'discard' */ | 4663 | /* |
4657 | if (!test_bit(STRIPE_DISCARD, &sh->state) && | 4664 | * Cannot process 'sync' concurrently with 'discard'. |
4665 | * Flush data in r5cache before 'sync'. | ||
4666 | */ | ||
4667 | if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && | ||
4668 | !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && | ||
4669 | !test_bit(STRIPE_DISCARD, &sh->state) && | ||
4658 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { | 4670 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { |
4659 | set_bit(STRIPE_SYNCING, &sh->state); | 4671 | set_bit(STRIPE_SYNCING, &sh->state); |
4660 | clear_bit(STRIPE_INSYNC, &sh->state); | 4672 | clear_bit(STRIPE_INSYNC, &sh->state); |
@@ -4701,10 +4713,15 @@ static void handle_stripe(struct stripe_head *sh) | |||
4701 | " to_write=%d failed=%d failed_num=%d,%d\n", | 4713 | " to_write=%d failed=%d failed_num=%d,%d\n", |
4702 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, | 4714 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
4703 | s.failed_num[0], s.failed_num[1]); | 4715 | s.failed_num[0], s.failed_num[1]); |
4704 | /* check if the array has lost more than max_degraded devices and, | 4716 | /* |
4717 | * check if the array has lost more than max_degraded devices and, | ||
4705 | * if so, some requests might need to be failed. | 4718 | * if so, some requests might need to be failed. |
4719 | * | ||
4720 | * When journal device failed (log_failed), we will only process | ||
4721 | * the stripe if there is data need write to raid disks | ||
4706 | */ | 4722 | */ |
4707 | if (s.failed > conf->max_degraded || s.log_failed) { | 4723 | if (s.failed > conf->max_degraded || |
4724 | (s.log_failed && s.injournal == 0)) { | ||
4708 | sh->check_state = 0; | 4725 | sh->check_state = 0; |
4709 | sh->reconstruct_state = 0; | 4726 | sh->reconstruct_state = 0; |
4710 | break_stripe_batch_list(sh, 0); | 4727 | break_stripe_batch_list(sh, 0); |
@@ -5277,8 +5294,10 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) | |||
5277 | struct stripe_head *sh, *tmp; | 5294 | struct stripe_head *sh, *tmp; |
5278 | struct list_head *handle_list = NULL; | 5295 | struct list_head *handle_list = NULL; |
5279 | struct r5worker_group *wg; | 5296 | struct r5worker_group *wg; |
5280 | bool second_try = !r5c_is_writeback(conf->log); | 5297 | bool second_try = !r5c_is_writeback(conf->log) && |
5281 | bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state); | 5298 | !r5l_log_disk_error(conf); |
5299 | bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || | ||
5300 | r5l_log_disk_error(conf); | ||
5282 | 5301 | ||
5283 | again: | 5302 | again: |
5284 | wg = NULL; | 5303 | wg = NULL; |
@@ -6313,7 +6332,6 @@ int | |||
6313 | raid5_set_cache_size(struct mddev *mddev, int size) | 6332 | raid5_set_cache_size(struct mddev *mddev, int size) |
6314 | { | 6333 | { |
6315 | struct r5conf *conf = mddev->private; | 6334 | struct r5conf *conf = mddev->private; |
6316 | int err; | ||
6317 | 6335 | ||
6318 | if (size <= 16 || size > 32768) | 6336 | if (size <= 16 || size > 32768) |
6319 | return -EINVAL; | 6337 | return -EINVAL; |
@@ -6325,10 +6343,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) | |||
6325 | ; | 6343 | ; |
6326 | mutex_unlock(&conf->cache_size_mutex); | 6344 | mutex_unlock(&conf->cache_size_mutex); |
6327 | 6345 | ||
6328 | 6346 | md_allow_write(mddev); | |
6329 | err = md_allow_write(mddev); | ||
6330 | if (err) | ||
6331 | return err; | ||
6332 | 6347 | ||
6333 | mutex_lock(&conf->cache_size_mutex); | 6348 | mutex_lock(&conf->cache_size_mutex); |
6334 | while (size > conf->max_nr_stripes) | 6349 | while (size > conf->max_nr_stripes) |
@@ -7530,7 +7545,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
7530 | * neilb: there is no locking about new writes here, | 7545 | * neilb: there is no locking about new writes here, |
7531 | * so this cannot be safe. | 7546 | * so this cannot be safe. |
7532 | */ | 7547 | */ |
7533 | if (atomic_read(&conf->active_stripes)) { | 7548 | if (atomic_read(&conf->active_stripes) || |
7549 | atomic_read(&conf->r5c_cached_full_stripes) || | ||
7550 | atomic_read(&conf->r5c_cached_partial_stripes)) { | ||
7534 | return -EBUSY; | 7551 | return -EBUSY; |
7535 | } | 7552 | } |
7536 | log_exit(conf); | 7553 | log_exit(conf); |
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index bf0fe0137dfe..a80e17de906d 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
@@ -460,12 +460,12 @@ static int get_gpmc_timing_reg( | |||
460 | if (l) | 460 | if (l) |
461 | time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1; | 461 | time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1; |
462 | time_ns = gpmc_clk_ticks_to_ns(l, cs, cd); | 462 | time_ns = gpmc_clk_ticks_to_ns(l, cs, cd); |
463 | pr_info("gpmc,%s = <%u> /* %u ns - %u ns; %i ticks%s*/\n", | 463 | pr_info("gpmc,%s = <%u>; /* %u ns - %u ns; %i ticks%s*/\n", |
464 | name, time_ns, time_ns_min, time_ns, l, | 464 | name, time_ns, time_ns_min, time_ns, l, |
465 | invalid ? "; invalid " : " "); | 465 | invalid ? "; invalid " : " "); |
466 | } else { | 466 | } else { |
467 | /* raw format */ | 467 | /* raw format */ |
468 | pr_info("gpmc,%s = <%u>%s\n", name, l, | 468 | pr_info("gpmc,%s = <%u>;%s\n", name, l, |
469 | invalid ? " /* invalid */" : ""); | 469 | invalid ? " /* invalid */" : ""); |
470 | } | 470 | } |
471 | 471 | ||
@@ -512,7 +512,7 @@ static void gpmc_cs_show_timings(int cs, const char *desc) | |||
512 | pr_info("gpmc cs%i access configuration:\n", cs); | 512 | pr_info("gpmc cs%i access configuration:\n", cs); |
513 | GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity"); | 513 | GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity"); |
514 | GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data"); | 514 | GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data"); |
515 | GPMC_GET_RAW_MAX(GPMC_CS_CONFIG1, 12, 13, | 515 | GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1, |
516 | GPMC_CONFIG1_DEVICESIZE_MAX, "device-width"); | 516 | GPMC_CONFIG1_DEVICESIZE_MAX, "device-width"); |
517 | GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin"); | 517 | GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin"); |
518 | GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write"); | 518 | GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write"); |
@@ -2083,8 +2083,11 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, | |||
2083 | } else { | 2083 | } else { |
2084 | ret = of_property_read_u32(child, "bank-width", | 2084 | ret = of_property_read_u32(child, "bank-width", |
2085 | &gpmc_s.device_width); | 2085 | &gpmc_s.device_width); |
2086 | if (ret < 0) | 2086 | if (ret < 0) { |
2087 | dev_err(&pdev->dev, "%s has no 'bank-width' property\n", | ||
2088 | child->full_name); | ||
2087 | goto err; | 2089 | goto err; |
2090 | } | ||
2088 | } | 2091 | } |
2089 | 2092 | ||
2090 | /* Reserve wait pin if it is required and valid */ | 2093 | /* Reserve wait pin if it is required and valid */ |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2cba76e6fa3c..07bbd4cc1852 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -492,6 +492,7 @@ config ASPEED_LPC_CTRL | |||
492 | 492 | ||
493 | config PCI_ENDPOINT_TEST | 493 | config PCI_ENDPOINT_TEST |
494 | depends on PCI | 494 | depends on PCI |
495 | select CRC32 | ||
495 | tristate "PCI Endpoint Test driver" | 496 | tristate "PCI Endpoint Test driver" |
496 | ---help--- | 497 | ---help--- |
497 | Enable this configuration option to enable the host side test driver | 498 | Enable this configuration option to enable the host side test driver |
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index 1304160de168..13ef162cf066 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c | |||
@@ -27,6 +27,7 @@ struct mmc_pwrseq_simple { | |||
27 | struct mmc_pwrseq pwrseq; | 27 | struct mmc_pwrseq pwrseq; |
28 | bool clk_enabled; | 28 | bool clk_enabled; |
29 | u32 post_power_on_delay_ms; | 29 | u32 post_power_on_delay_ms; |
30 | u32 power_off_delay_us; | ||
30 | struct clk *ext_clk; | 31 | struct clk *ext_clk; |
31 | struct gpio_descs *reset_gpios; | 32 | struct gpio_descs *reset_gpios; |
32 | }; | 33 | }; |
@@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host) | |||
78 | 79 | ||
79 | mmc_pwrseq_simple_set_gpios_value(pwrseq, 1); | 80 | mmc_pwrseq_simple_set_gpios_value(pwrseq, 1); |
80 | 81 | ||
82 | if (pwrseq->power_off_delay_us) | ||
83 | usleep_range(pwrseq->power_off_delay_us, | ||
84 | 2 * pwrseq->power_off_delay_us); | ||
85 | |||
81 | if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) { | 86 | if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) { |
82 | clk_disable_unprepare(pwrseq->ext_clk); | 87 | clk_disable_unprepare(pwrseq->ext_clk); |
83 | pwrseq->clk_enabled = false; | 88 | pwrseq->clk_enabled = false; |
@@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev) | |||
119 | 124 | ||
120 | device_property_read_u32(dev, "post-power-on-delay-ms", | 125 | device_property_read_u32(dev, "post-power-on-delay-ms", |
121 | &pwrseq->post_power_on_delay_ms); | 126 | &pwrseq->post_power_on_delay_ms); |
127 | device_property_read_u32(dev, "power-off-delay-us", | ||
128 | &pwrseq->power_off_delay_us); | ||
122 | 129 | ||
123 | pwrseq->pwrseq.dev = dev; | 130 | pwrseq->pwrseq.dev = dev; |
124 | pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; | 131 | pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; |
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c index 772d0900026d..951d2cdd7888 100644 --- a/drivers/mmc/host/cavium-octeon.c +++ b/drivers/mmc/host/cavium-octeon.c | |||
@@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host) | |||
108 | static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) | 108 | static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) |
109 | { | 109 | { |
110 | writeq(val, host->base + MIO_EMM_INT(host)); | 110 | writeq(val, host->base + MIO_EMM_INT(host)); |
111 | if (!host->dma_active || (host->dma_active && !host->has_ciu3)) | 111 | if (!host->has_ciu3) |
112 | writeq(val, host->base + MIO_EMM_INT_EN(host)); | 112 | writeq(val, host->base + MIO_EMM_INT_EN(host)); |
113 | } | 113 | } |
114 | 114 | ||
@@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev) | |||
267 | } | 267 | } |
268 | 268 | ||
269 | host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, | 269 | host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, |
270 | "power-gpios", | 270 | "power", |
271 | GPIOD_OUT_HIGH); | 271 | GPIOD_OUT_HIGH); |
272 | if (IS_ERR(host->global_pwr_gpiod)) { | 272 | if (IS_ERR(host->global_pwr_gpiod)) { |
273 | dev_err(&pdev->dev, "Invalid power GPIO\n"); | 273 | dev_err(&pdev->dev, "Invalid power GPIO\n"); |
@@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev) | |||
288 | if (ret) { | 288 | if (ret) { |
289 | dev_err(&pdev->dev, "Error populating slots\n"); | 289 | dev_err(&pdev->dev, "Error populating slots\n"); |
290 | octeon_mmc_set_shared_power(host, 0); | 290 | octeon_mmc_set_shared_power(host, 0); |
291 | return ret; | 291 | goto error; |
292 | } | 292 | } |
293 | i++; | 293 | i++; |
294 | } | 294 | } |
295 | return 0; | 295 | return 0; |
296 | |||
297 | error: | ||
298 | for (i = 0; i < CAVIUM_MAX_MMC; i++) { | ||
299 | if (host->slot[i]) | ||
300 | cvm_mmc_of_slot_remove(host->slot[i]); | ||
301 | if (host->slot_pdev[i]) | ||
302 | of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); | ||
303 | } | ||
304 | return ret; | ||
296 | } | 305 | } |
297 | 306 | ||
298 | static int octeon_mmc_remove(struct platform_device *pdev) | 307 | static int octeon_mmc_remove(struct platform_device *pdev) |
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index fe3d77267cd6..b9cc95998799 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c | |||
@@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev, | |||
146 | return 0; | 146 | return 0; |
147 | 147 | ||
148 | error: | 148 | error: |
149 | for (i = 0; i < CAVIUM_MAX_MMC; i++) { | ||
150 | if (host->slot[i]) | ||
151 | cvm_mmc_of_slot_remove(host->slot[i]); | ||
152 | if (host->slot_pdev[i]) | ||
153 | of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); | ||
154 | } | ||
149 | clk_disable_unprepare(host->clk); | 155 | clk_disable_unprepare(host->clk); |
150 | return ret; | 156 | return ret; |
151 | } | 157 | } |
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index 58b51ba6aabd..b8aaf0fdb77c 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c | |||
@@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
839 | cvm_mmc_reset_bus(slot); | 839 | cvm_mmc_reset_bus(slot); |
840 | if (host->global_pwr_gpiod) | 840 | if (host->global_pwr_gpiod) |
841 | host->set_shared_power(host, 0); | 841 | host->set_shared_power(host, 0); |
842 | else | 842 | else if (!IS_ERR(mmc->supply.vmmc)) |
843 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); | 843 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); |
844 | break; | 844 | break; |
845 | 845 | ||
846 | case MMC_POWER_UP: | 846 | case MMC_POWER_UP: |
847 | if (host->global_pwr_gpiod) | 847 | if (host->global_pwr_gpiod) |
848 | host->set_shared_power(host, 1); | 848 | host->set_shared_power(host, 1); |
849 | else | 849 | else if (!IS_ERR(mmc->supply.vmmc)) |
850 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); | 850 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
851 | break; | 851 | break; |
852 | } | 852 | } |
@@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot) | |||
968 | return -EINVAL; | 968 | return -EINVAL; |
969 | } | 969 | } |
970 | 970 | ||
971 | mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); | 971 | ret = mmc_regulator_get_supply(mmc); |
972 | if (IS_ERR(mmc->supply.vmmc)) { | 972 | if (ret == -EPROBE_DEFER) |
973 | if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) | 973 | return ret; |
974 | return -EPROBE_DEFER; | 974 | /* |
975 | /* | 975 | * Legacy Octeon firmware has no regulator entry, fall-back to |
976 | * Legacy Octeon firmware has no regulator entry, fall-back to | 976 | * a hard-coded voltage to get a sane OCR. |
977 | * a hard-coded voltage to get a sane OCR. | 977 | */ |
978 | */ | 978 | if (IS_ERR(mmc->supply.vmmc)) |
979 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 979 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
980 | } else { | ||
981 | ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); | ||
982 | if (ret > 0) | ||
983 | mmc->ocr_avail = ret; | ||
984 | } | ||
985 | 980 | ||
986 | /* Common MMC bindings */ | 981 | /* Common MMC bindings */ |
987 | ret = mmc_of_parse(mmc); | 982 | ret = mmc_of_parse(mmc); |
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 3275d4995812..61666d269771 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c | |||
@@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = { | |||
187 | }; | 187 | }; |
188 | 188 | ||
189 | static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { | 189 | static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { |
190 | .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, | 190 | .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | |
191 | SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, | ||
191 | .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, | 192 | .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, |
192 | .ops = &sdhci_iproc_ops, | 193 | .ops = &sdhci_iproc_ops, |
193 | }; | 194 | }; |
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index 6356781f1cca..f7e26b031e76 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c | |||
@@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios) | |||
787 | return ret; | 787 | return ret; |
788 | } | 788 | } |
789 | 789 | ||
790 | void xenon_clean_phy(struct sdhci_host *host) | ||
791 | { | ||
792 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
793 | struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); | ||
794 | |||
795 | kfree(priv->phy_params); | ||
796 | } | ||
797 | |||
798 | static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, | 790 | static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, |
799 | const char *phy_name) | 791 | const char *phy_name) |
800 | { | 792 | { |
@@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, | |||
819 | if (ret) | 811 | if (ret) |
820 | return ret; | 812 | return ret; |
821 | 813 | ||
822 | ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); | 814 | return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); |
823 | if (ret) | ||
824 | xenon_clean_phy(host); | ||
825 | |||
826 | return ret; | ||
827 | } | 815 | } |
828 | 816 | ||
829 | int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) | 817 | int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) |
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 67246655315b..bc1781bb070b 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c | |||
@@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev) | |||
486 | 486 | ||
487 | err = xenon_sdhc_prepare(host); | 487 | err = xenon_sdhc_prepare(host); |
488 | if (err) | 488 | if (err) |
489 | goto clean_phy_param; | 489 | goto err_clk; |
490 | 490 | ||
491 | err = sdhci_add_host(host); | 491 | err = sdhci_add_host(host); |
492 | if (err) | 492 | if (err) |
@@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev) | |||
496 | 496 | ||
497 | remove_sdhc: | 497 | remove_sdhc: |
498 | xenon_sdhc_unprepare(host); | 498 | xenon_sdhc_unprepare(host); |
499 | clean_phy_param: | ||
500 | xenon_clean_phy(host); | ||
501 | err_clk: | 499 | err_clk: |
502 | clk_disable_unprepare(pltfm_host->clk); | 500 | clk_disable_unprepare(pltfm_host->clk); |
503 | free_pltfm: | 501 | free_pltfm: |
@@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev) | |||
510 | struct sdhci_host *host = platform_get_drvdata(pdev); | 508 | struct sdhci_host *host = platform_get_drvdata(pdev); |
511 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 509 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
512 | 510 | ||
513 | xenon_clean_phy(host); | ||
514 | |||
515 | sdhci_remove_host(host, 0); | 511 | sdhci_remove_host(host, 0); |
516 | 512 | ||
517 | xenon_sdhc_unprepare(host); | 513 | xenon_sdhc_unprepare(host); |
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 6e6523ea01ce..73debb42dc2f 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h | |||
@@ -93,7 +93,6 @@ struct xenon_priv { | |||
93 | }; | 93 | }; |
94 | 94 | ||
95 | int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); | 95 | int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); |
96 | void xenon_clean_phy(struct sdhci_host *host); | ||
97 | int xenon_phy_parse_dt(struct device_node *np, | 96 | int xenon_phy_parse_dt(struct device_node *np, |
98 | struct sdhci_host *host); | 97 | struct sdhci_host *host); |
99 | void xenon_soc_pad_ctrl(struct sdhci_host *host, | 98 | void xenon_soc_pad_ctrl(struct sdhci_host *host, |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index c5fd4259da33..b44a6aeb346d 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2577,7 +2577,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, | |||
2577 | return -1; | 2577 | return -1; |
2578 | 2578 | ||
2579 | ad_info->aggregator_id = aggregator->aggregator_identifier; | 2579 | ad_info->aggregator_id = aggregator->aggregator_identifier; |
2580 | ad_info->ports = aggregator->num_of_ports; | 2580 | ad_info->ports = __agg_active_ports(aggregator); |
2581 | ad_info->actor_key = aggregator->actor_oper_aggregator_key; | 2581 | ad_info->actor_key = aggregator->actor_oper_aggregator_key; |
2582 | ad_info->partner_key = aggregator->partner_oper_aggregator_key; | 2582 | ad_info->partner_key = aggregator->partner_oper_aggregator_key; |
2583 | ether_addr_copy(ad_info->partner_system, | 2583 | ether_addr_copy(ad_info->partner_system, |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2be78807fd6e..2359478b977f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) | |||
2612 | bond_for_each_slave_rcu(bond, slave, iter) { | 2612 | bond_for_each_slave_rcu(bond, slave, iter) { |
2613 | unsigned long trans_start = dev_trans_start(slave->dev); | 2613 | unsigned long trans_start = dev_trans_start(slave->dev); |
2614 | 2614 | ||
2615 | slave->new_link = BOND_LINK_NOCHANGE; | ||
2616 | |||
2615 | if (slave->link != BOND_LINK_UP) { | 2617 | if (slave->link != BOND_LINK_UP) { |
2616 | if (bond_time_in_interval(bond, trans_start, 1) && | 2618 | if (bond_time_in_interval(bond, trans_start, 1) && |
2617 | bond_time_in_interval(bond, slave->last_rx, 1)) { | 2619 | bond_time_in_interval(bond, slave->last_rx, 1)) { |
2618 | 2620 | ||
2619 | slave->link = BOND_LINK_UP; | 2621 | slave->new_link = BOND_LINK_UP; |
2620 | slave_state_changed = 1; | 2622 | slave_state_changed = 1; |
2621 | 2623 | ||
2622 | /* primary_slave has no meaning in round-robin | 2624 | /* primary_slave has no meaning in round-robin |
@@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) | |||
2643 | if (!bond_time_in_interval(bond, trans_start, 2) || | 2645 | if (!bond_time_in_interval(bond, trans_start, 2) || |
2644 | !bond_time_in_interval(bond, slave->last_rx, 2)) { | 2646 | !bond_time_in_interval(bond, slave->last_rx, 2)) { |
2645 | 2647 | ||
2646 | slave->link = BOND_LINK_DOWN; | 2648 | slave->new_link = BOND_LINK_DOWN; |
2647 | slave_state_changed = 1; | 2649 | slave_state_changed = 1; |
2648 | 2650 | ||
2649 | if (slave->link_failure_count < UINT_MAX) | 2651 | if (slave->link_failure_count < UINT_MAX) |
@@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) | |||
2674 | if (!rtnl_trylock()) | 2676 | if (!rtnl_trylock()) |
2675 | goto re_arm; | 2677 | goto re_arm; |
2676 | 2678 | ||
2679 | bond_for_each_slave(bond, slave, iter) { | ||
2680 | if (slave->new_link != BOND_LINK_NOCHANGE) | ||
2681 | slave->link = slave->new_link; | ||
2682 | } | ||
2683 | |||
2677 | if (slave_state_changed) { | 2684 | if (slave_state_changed) { |
2678 | bond_slave_state_change(bond); | 2685 | bond_slave_state_change(bond); |
2679 | if (BOND_MODE(bond) == BOND_MODE_XOR) | 2686 | if (BOND_MODE(bond) == BOND_MODE_XOR) |
@@ -4271,10 +4278,10 @@ static int bond_check_params(struct bond_params *params) | |||
4271 | int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; | 4278 | int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; |
4272 | struct bond_opt_value newval; | 4279 | struct bond_opt_value newval; |
4273 | const struct bond_opt_value *valptr; | 4280 | const struct bond_opt_value *valptr; |
4274 | int arp_all_targets_value; | 4281 | int arp_all_targets_value = 0; |
4275 | u16 ad_actor_sys_prio = 0; | 4282 | u16 ad_actor_sys_prio = 0; |
4276 | u16 ad_user_port_key = 0; | 4283 | u16 ad_user_port_key = 0; |
4277 | __be32 arp_target[BOND_MAX_ARP_TARGETS]; | 4284 | __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 }; |
4278 | int arp_ip_count; | 4285 | int arp_ip_count; |
4279 | int bond_mode = BOND_MODE_ROUNDROBIN; | 4286 | int bond_mode = BOND_MODE_ROUNDROBIN; |
4280 | int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; | 4287 | int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; |
@@ -4501,7 +4508,6 @@ static int bond_check_params(struct bond_params *params) | |||
4501 | arp_validate_value = 0; | 4508 | arp_validate_value = 0; |
4502 | } | 4509 | } |
4503 | 4510 | ||
4504 | arp_all_targets_value = 0; | ||
4505 | if (arp_all_targets) { | 4511 | if (arp_all_targets) { |
4506 | bond_opt_initstr(&newval, arp_all_targets); | 4512 | bond_opt_initstr(&newval, arp_all_targets); |
4507 | valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), | 4513 | valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 19581d783d8e..d034d8cd7d22 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -849,6 +849,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, | |||
849 | mv88e6xxx_g1_stats_read(chip, reg, &low); | 849 | mv88e6xxx_g1_stats_read(chip, reg, &low); |
850 | if (s->sizeof_stat == 8) | 850 | if (s->sizeof_stat == 8) |
851 | mv88e6xxx_g1_stats_read(chip, reg + 1, &high); | 851 | mv88e6xxx_g1_stats_read(chip, reg + 1, &high); |
852 | break; | ||
853 | default: | ||
854 | return UINT64_MAX; | ||
852 | } | 855 | } |
853 | value = (((u64)high) << 16) | low; | 856 | value = (((u64)high) << 16) | low; |
854 | return value; | 857 | return value; |
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index b0a3b85fc6f8..db02bc2fb4b2 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c | |||
@@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev) | |||
748 | 748 | ||
749 | ret = ax_mii_init(dev); | 749 | ret = ax_mii_init(dev); |
750 | if (ret) | 750 | if (ret) |
751 | goto out_irq; | 751 | goto err_out; |
752 | 752 | ||
753 | ax_NS8390_init(dev, 0); | 753 | ax_NS8390_init(dev, 0); |
754 | 754 | ||
755 | ret = register_netdev(dev); | 755 | ret = register_netdev(dev); |
756 | if (ret) | 756 | if (ret) |
757 | goto out_irq; | 757 | goto err_out; |
758 | 758 | ||
759 | netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", | 759 | netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", |
760 | ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, | 760 | ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, |
@@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev) | |||
762 | 762 | ||
763 | return 0; | 763 | return 0; |
764 | 764 | ||
765 | out_irq: | ||
766 | /* cleanup irq */ | ||
767 | free_irq(dev->irq, dev); | ||
768 | err_out: | 765 | err_out: |
769 | return ret; | 766 | return ret; |
770 | } | 767 | } |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 4ee15ff06a44..faeb4935ef3e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
@@ -200,29 +200,18 @@ err_exit: | |||
200 | static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, | 200 | static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, |
201 | struct aq_nic_cfg_s *aq_nic_cfg) | 201 | struct aq_nic_cfg_s *aq_nic_cfg) |
202 | { | 202 | { |
203 | int err = 0; | ||
204 | |||
205 | /* TX checksums offloads*/ | 203 | /* TX checksums offloads*/ |
206 | tpo_ipv4header_crc_offload_en_set(self, 1); | 204 | tpo_ipv4header_crc_offload_en_set(self, 1); |
207 | tpo_tcp_udp_crc_offload_en_set(self, 1); | 205 | tpo_tcp_udp_crc_offload_en_set(self, 1); |
208 | if (err < 0) | ||
209 | goto err_exit; | ||
210 | 206 | ||
211 | /* RX checksums offloads*/ | 207 | /* RX checksums offloads*/ |
212 | rpo_ipv4header_crc_offload_en_set(self, 1); | 208 | rpo_ipv4header_crc_offload_en_set(self, 1); |
213 | rpo_tcp_udp_crc_offload_en_set(self, 1); | 209 | rpo_tcp_udp_crc_offload_en_set(self, 1); |
214 | if (err < 0) | ||
215 | goto err_exit; | ||
216 | 210 | ||
217 | /* LSO offloads*/ | 211 | /* LSO offloads*/ |
218 | tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); | 212 | tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); |
219 | if (err < 0) | ||
220 | goto err_exit; | ||
221 | |||
222 | err = aq_hw_err_from_flags(self); | ||
223 | 213 | ||
224 | err_exit: | 214 | return aq_hw_err_from_flags(self); |
225 | return err; | ||
226 | } | 215 | } |
227 | 216 | ||
228 | static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) | 217 | static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 42150708191d..1bceb7358e5c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
@@ -200,25 +200,18 @@ err_exit: | |||
200 | static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, | 200 | static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, |
201 | struct aq_nic_cfg_s *aq_nic_cfg) | 201 | struct aq_nic_cfg_s *aq_nic_cfg) |
202 | { | 202 | { |
203 | int err = 0; | ||
204 | unsigned int i; | 203 | unsigned int i; |
205 | 204 | ||
206 | /* TX checksums offloads*/ | 205 | /* TX checksums offloads*/ |
207 | tpo_ipv4header_crc_offload_en_set(self, 1); | 206 | tpo_ipv4header_crc_offload_en_set(self, 1); |
208 | tpo_tcp_udp_crc_offload_en_set(self, 1); | 207 | tpo_tcp_udp_crc_offload_en_set(self, 1); |
209 | if (err < 0) | ||
210 | goto err_exit; | ||
211 | 208 | ||
212 | /* RX checksums offloads*/ | 209 | /* RX checksums offloads*/ |
213 | rpo_ipv4header_crc_offload_en_set(self, 1); | 210 | rpo_ipv4header_crc_offload_en_set(self, 1); |
214 | rpo_tcp_udp_crc_offload_en_set(self, 1); | 211 | rpo_tcp_udp_crc_offload_en_set(self, 1); |
215 | if (err < 0) | ||
216 | goto err_exit; | ||
217 | 212 | ||
218 | /* LSO offloads*/ | 213 | /* LSO offloads*/ |
219 | tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); | 214 | tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); |
220 | if (err < 0) | ||
221 | goto err_exit; | ||
222 | 215 | ||
223 | /* LRO offloads */ | 216 | /* LRO offloads */ |
224 | { | 217 | { |
@@ -245,10 +238,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, | |||
245 | 238 | ||
246 | rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); | 239 | rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); |
247 | } | 240 | } |
248 | err = aq_hw_err_from_flags(self); | 241 | return aq_hw_err_from_flags(self); |
249 | |||
250 | err_exit: | ||
251 | return err; | ||
252 | } | 242 | } |
253 | 243 | ||
254 | static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) | 244 | static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) |
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 63f2deec2a52..77a1c03255de 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c | |||
@@ -1353,6 +1353,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1353 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && | 1353 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && |
1354 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { | 1354 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { |
1355 | printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); | 1355 | printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); |
1356 | err = -EIO; | ||
1356 | goto err_dma; | 1357 | goto err_dma; |
1357 | } | 1358 | } |
1358 | 1359 | ||
@@ -1366,10 +1367,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1366 | * pcibios_set_master to do the needed arch specific settings */ | 1367 | * pcibios_set_master to do the needed arch specific settings */ |
1367 | pci_set_master(pdev); | 1368 | pci_set_master(pdev); |
1368 | 1369 | ||
1369 | err = -ENOMEM; | ||
1370 | netdev = alloc_etherdev(sizeof(struct atl2_adapter)); | 1370 | netdev = alloc_etherdev(sizeof(struct atl2_adapter)); |
1371 | if (!netdev) | 1371 | if (!netdev) { |
1372 | err = -ENOMEM; | ||
1372 | goto err_alloc_etherdev; | 1373 | goto err_alloc_etherdev; |
1374 | } | ||
1373 | 1375 | ||
1374 | SET_NETDEV_DEV(netdev, &pdev->dev); | 1376 | SET_NETDEV_DEV(netdev, &pdev->dev); |
1375 | 1377 | ||
@@ -1408,8 +1410,6 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1408 | if (err) | 1410 | if (err) |
1409 | goto err_sw_init; | 1411 | goto err_sw_init; |
1410 | 1412 | ||
1411 | err = -EIO; | ||
1412 | |||
1413 | netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; | 1413 | netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; |
1414 | netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); | 1414 | netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); |
1415 | 1415 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b56c54d68d5e..03f55daecb20 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -7630,8 +7630,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7630 | dev->min_mtu = ETH_ZLEN; | 7630 | dev->min_mtu = ETH_ZLEN; |
7631 | dev->max_mtu = BNXT_MAX_MTU; | 7631 | dev->max_mtu = BNXT_MAX_MTU; |
7632 | 7632 | ||
7633 | bnxt_dcb_init(bp); | ||
7634 | |||
7635 | #ifdef CONFIG_BNXT_SRIOV | 7633 | #ifdef CONFIG_BNXT_SRIOV |
7636 | init_waitqueue_head(&bp->sriov_cfg_wait); | 7634 | init_waitqueue_head(&bp->sriov_cfg_wait); |
7637 | #endif | 7635 | #endif |
@@ -7669,6 +7667,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7669 | bnxt_hwrm_func_qcfg(bp); | 7667 | bnxt_hwrm_func_qcfg(bp); |
7670 | bnxt_hwrm_port_led_qcaps(bp); | 7668 | bnxt_hwrm_port_led_qcaps(bp); |
7671 | bnxt_ethtool_init(bp); | 7669 | bnxt_ethtool_init(bp); |
7670 | bnxt_dcb_init(bp); | ||
7672 | 7671 | ||
7673 | bnxt_set_rx_skb_mode(bp, false); | 7672 | bnxt_set_rx_skb_mode(bp, false); |
7674 | bnxt_set_tpa_flags(bp); | 7673 | bnxt_set_tpa_flags(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 46de2f8ff024..5c6dd0ce209f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | |||
@@ -553,8 +553,10 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) | |||
553 | if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) | 553 | if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) |
554 | return 1; | 554 | return 1; |
555 | 555 | ||
556 | if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp)) | 556 | if (mode & DCB_CAP_DCBX_HOST) { |
557 | return 1; | 557 | if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) |
558 | return 1; | ||
559 | } | ||
558 | 560 | ||
559 | if (mode == bp->dcbx_cap) | 561 | if (mode == bp->dcbx_cap) |
560 | return 0; | 562 | return 0; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index fa376444e57c..3549d3876278 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | #define T4FW_VERSION_MAJOR 0x01 | 38 | #define T4FW_VERSION_MAJOR 0x01 |
39 | #define T4FW_VERSION_MINOR 0x10 | 39 | #define T4FW_VERSION_MINOR 0x10 |
40 | #define T4FW_VERSION_MICRO 0x21 | 40 | #define T4FW_VERSION_MICRO 0x2B |
41 | #define T4FW_VERSION_BUILD 0x00 | 41 | #define T4FW_VERSION_BUILD 0x00 |
42 | 42 | ||
43 | #define T4FW_MIN_VERSION_MAJOR 0x01 | 43 | #define T4FW_MIN_VERSION_MAJOR 0x01 |
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #define T5FW_VERSION_MAJOR 0x01 | 47 | #define T5FW_VERSION_MAJOR 0x01 |
48 | #define T5FW_VERSION_MINOR 0x10 | 48 | #define T5FW_VERSION_MINOR 0x10 |
49 | #define T5FW_VERSION_MICRO 0x21 | 49 | #define T5FW_VERSION_MICRO 0x2B |
50 | #define T5FW_VERSION_BUILD 0x00 | 50 | #define T5FW_VERSION_BUILD 0x00 |
51 | 51 | ||
52 | #define T5FW_MIN_VERSION_MAJOR 0x00 | 52 | #define T5FW_MIN_VERSION_MAJOR 0x00 |
@@ -55,7 +55,7 @@ | |||
55 | 55 | ||
56 | #define T6FW_VERSION_MAJOR 0x01 | 56 | #define T6FW_VERSION_MAJOR 0x01 |
57 | #define T6FW_VERSION_MINOR 0x10 | 57 | #define T6FW_VERSION_MINOR 0x10 |
58 | #define T6FW_VERSION_MICRO 0x21 | 58 | #define T6FW_VERSION_MICRO 0x2B |
59 | #define T6FW_VERSION_BUILD 0x00 | 59 | #define T6FW_VERSION_BUILD 0x00 |
60 | 60 | ||
61 | #define T6FW_MIN_VERSION_MAJOR 0x00 | 61 | #define T6FW_MIN_VERSION_MAJOR 0x00 |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f3a09ab55900..4eee18ce9be4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb, | |||
5078 | struct be_adapter *adapter = netdev_priv(dev); | 5078 | struct be_adapter *adapter = netdev_priv(dev); |
5079 | u8 l4_hdr = 0; | 5079 | u8 l4_hdr = 0; |
5080 | 5080 | ||
5081 | /* The code below restricts offload features for some tunneled packets. | 5081 | /* The code below restricts offload features for some tunneled and |
5082 | * Q-in-Q packets. | ||
5082 | * Offload features for normal (non tunnel) packets are unchanged. | 5083 | * Offload features for normal (non tunnel) packets are unchanged. |
5083 | */ | 5084 | */ |
5085 | features = vlan_features_check(skb, features); | ||
5084 | if (!skb->encapsulation || | 5086 | if (!skb->encapsulation || |
5085 | !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) | 5087 | !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) |
5086 | return features; | 5088 | return features; |
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 6ac336b546e6..1536356e2ea8 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c | |||
@@ -1174,11 +1174,17 @@ static int ftmac100_remove(struct platform_device *pdev) | |||
1174 | return 0; | 1174 | return 0; |
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | static const struct of_device_id ftmac100_of_ids[] = { | ||
1178 | { .compatible = "andestech,atmac100" }, | ||
1179 | { } | ||
1180 | }; | ||
1181 | |||
1177 | static struct platform_driver ftmac100_driver = { | 1182 | static struct platform_driver ftmac100_driver = { |
1178 | .probe = ftmac100_probe, | 1183 | .probe = ftmac100_probe, |
1179 | .remove = ftmac100_remove, | 1184 | .remove = ftmac100_remove, |
1180 | .driver = { | 1185 | .driver = { |
1181 | .name = DRV_NAME, | 1186 | .name = DRV_NAME, |
1187 | .of_match_table = ftmac100_of_ids | ||
1182 | }, | 1188 | }, |
1183 | }; | 1189 | }; |
1184 | 1190 | ||
@@ -1202,3 +1208,4 @@ module_exit(ftmac100_exit); | |||
1202 | MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); | 1208 | MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); |
1203 | MODULE_DESCRIPTION("FTMAC100 driver"); | 1209 | MODULE_DESCRIPTION("FTMAC100 driver"); |
1204 | MODULE_LICENSE("GPL"); | 1210 | MODULE_LICENSE("GPL"); |
1211 | MODULE_DEVICE_TABLE(of, ftmac100_of_ids); | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 56a563f90b0b..f7c8649fd28f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev) | |||
3192 | { | 3192 | { |
3193 | int err, phy_reset; | 3193 | int err, phy_reset; |
3194 | bool active_high = false; | 3194 | bool active_high = false; |
3195 | int msec = 1; | 3195 | int msec = 1, phy_post_delay = 0; |
3196 | struct device_node *np = pdev->dev.of_node; | 3196 | struct device_node *np = pdev->dev.of_node; |
3197 | 3197 | ||
3198 | if (!np) | 3198 | if (!np) |
@@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev) | |||
3209 | else if (!gpio_is_valid(phy_reset)) | 3209 | else if (!gpio_is_valid(phy_reset)) |
3210 | return 0; | 3210 | return 0; |
3211 | 3211 | ||
3212 | err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); | ||
3213 | /* valid reset duration should be less than 1s */ | ||
3214 | if (!err && phy_post_delay > 1000) | ||
3215 | return -EINVAL; | ||
3216 | |||
3212 | active_high = of_property_read_bool(np, "phy-reset-active-high"); | 3217 | active_high = of_property_read_bool(np, "phy-reset-active-high"); |
3213 | 3218 | ||
3214 | err = devm_gpio_request_one(&pdev->dev, phy_reset, | 3219 | err = devm_gpio_request_one(&pdev->dev, phy_reset, |
@@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev) | |||
3226 | 3231 | ||
3227 | gpio_set_value_cansleep(phy_reset, !active_high); | 3232 | gpio_set_value_cansleep(phy_reset, !active_high); |
3228 | 3233 | ||
3234 | if (!phy_post_delay) | ||
3235 | return 0; | ||
3236 | |||
3237 | if (phy_post_delay > 20) | ||
3238 | msleep(phy_post_delay); | ||
3239 | else | ||
3240 | usleep_range(phy_post_delay * 1000, | ||
3241 | phy_post_delay * 1000 + 1000); | ||
3242 | |||
3229 | return 0; | 3243 | return 0; |
3230 | } | 3244 | } |
3231 | #else /* CONFIG_OF */ | 3245 | #else /* CONFIG_OF */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 703205475524..83aab1e4c8c8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -2862,12 +2862,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
2862 | int port = 0; | 2862 | int port = 0; |
2863 | 2863 | ||
2864 | if (msi_x) { | 2864 | if (msi_x) { |
2865 | int nreq = dev->caps.num_ports * num_online_cpus() + 1; | 2865 | int nreq = min3(dev->caps.num_ports * |
2866 | 2866 | (int)num_online_cpus() + 1, | |
2867 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, | 2867 | dev->caps.num_eqs - dev->caps.reserved_eqs, |
2868 | nreq); | 2868 | MAX_MSIX); |
2869 | if (nreq > MAX_MSIX) | ||
2870 | nreq = MAX_MSIX; | ||
2871 | 2869 | ||
2872 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | 2870 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); |
2873 | if (!entries) | 2871 | if (!entries) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index fc52d742b7f7..27251a78075c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig | |||
@@ -13,7 +13,7 @@ config MLX5_CORE | |||
13 | 13 | ||
14 | config MLX5_CORE_EN | 14 | config MLX5_CORE_EN |
15 | bool "Mellanox Technologies ConnectX-4 Ethernet support" | 15 | bool "Mellanox Technologies ConnectX-4 Ethernet support" |
16 | depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE | 16 | depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE |
17 | depends on IPV6=y || IPV6=n || MLX5_CORE=m | 17 | depends on IPV6=y || IPV6=n || MLX5_CORE=m |
18 | imply PTP_1588_CLOCK | 18 | imply PTP_1588_CLOCK |
19 | default n | 19 | default n |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5bdaf3d545b2..10d282841f5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work) | |||
774 | mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", | 774 | mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", |
775 | mlx5_command_str(msg_to_opcode(ent->in)), | 775 | mlx5_command_str(msg_to_opcode(ent->in)), |
776 | msg_to_opcode(ent->in)); | 776 | msg_to_opcode(ent->in)); |
777 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx); | 777 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); |
778 | } | 778 | } |
779 | 779 | ||
780 | static void cmd_work_handler(struct work_struct *work) | 780 | static void cmd_work_handler(struct work_struct *work) |
@@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
804 | } | 804 | } |
805 | 805 | ||
806 | cmd->ent_arr[ent->idx] = ent; | 806 | cmd->ent_arr[ent->idx] = ent; |
807 | set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); | ||
807 | lay = get_inst(cmd, ent->idx); | 808 | lay = get_inst(cmd, ent->idx); |
808 | ent->lay = lay; | 809 | ent->lay = lay; |
809 | memset(lay, 0, sizeof(*lay)); | 810 | memset(lay, 0, sizeof(*lay)); |
@@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work) | |||
825 | if (ent->callback) | 826 | if (ent->callback) |
826 | schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); | 827 | schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); |
827 | 828 | ||
829 | /* Skip sending command to fw if internal error */ | ||
830 | if (pci_channel_offline(dev->pdev) || | ||
831 | dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | ||
832 | u8 status = 0; | ||
833 | u32 drv_synd; | ||
834 | |||
835 | ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); | ||
836 | MLX5_SET(mbox_out, ent->out, status, status); | ||
837 | MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); | ||
838 | |||
839 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); | ||
840 | return; | ||
841 | } | ||
842 | |||
828 | /* ring doorbell after the descriptor is valid */ | 843 | /* ring doorbell after the descriptor is valid */ |
829 | mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); | 844 | mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); |
830 | wmb(); | 845 | wmb(); |
@@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
835 | poll_timeout(ent); | 850 | poll_timeout(ent); |
836 | /* make sure we read the descriptor after ownership is SW */ | 851 | /* make sure we read the descriptor after ownership is SW */ |
837 | rmb(); | 852 | rmb(); |
838 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx); | 853 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); |
839 | } | 854 | } |
840 | } | 855 | } |
841 | 856 | ||
@@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) | |||
879 | wait_for_completion(&ent->done); | 894 | wait_for_completion(&ent->done); |
880 | } else if (!wait_for_completion_timeout(&ent->done, timeout)) { | 895 | } else if (!wait_for_completion_timeout(&ent->done, timeout)) { |
881 | ent->ret = -ETIMEDOUT; | 896 | ent->ret = -ETIMEDOUT; |
882 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx); | 897 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); |
883 | } | 898 | } |
884 | 899 | ||
885 | err = ent->ret; | 900 | err = ent->ret; |
@@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) | |||
1375 | } | 1390 | } |
1376 | } | 1391 | } |
1377 | 1392 | ||
1378 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) | 1393 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) |
1379 | { | 1394 | { |
1380 | struct mlx5_cmd *cmd = &dev->cmd; | 1395 | struct mlx5_cmd *cmd = &dev->cmd; |
1381 | struct mlx5_cmd_work_ent *ent; | 1396 | struct mlx5_cmd_work_ent *ent; |
@@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) | |||
1395 | struct semaphore *sem; | 1410 | struct semaphore *sem; |
1396 | 1411 | ||
1397 | ent = cmd->ent_arr[i]; | 1412 | ent = cmd->ent_arr[i]; |
1413 | |||
1414 | /* if we already completed the command, ignore it */ | ||
1415 | if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, | ||
1416 | &ent->state)) { | ||
1417 | /* only real completion can free the cmd slot */ | ||
1418 | if (!forced) { | ||
1419 | mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", | ||
1420 | ent->idx); | ||
1421 | free_ent(cmd, ent->idx); | ||
1422 | } | ||
1423 | continue; | ||
1424 | } | ||
1425 | |||
1398 | if (ent->callback) | 1426 | if (ent->callback) |
1399 | cancel_delayed_work(&ent->cb_timeout_work); | 1427 | cancel_delayed_work(&ent->cb_timeout_work); |
1400 | if (ent->page_queue) | 1428 | if (ent->page_queue) |
@@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) | |||
1417 | mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", | 1445 | mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", |
1418 | ent->ret, deliv_status_to_str(ent->status), ent->status); | 1446 | ent->ret, deliv_status_to_str(ent->status), ent->status); |
1419 | } | 1447 | } |
1420 | free_ent(cmd, ent->idx); | 1448 | |
1449 | /* only real completion will free the entry slot */ | ||
1450 | if (!forced) | ||
1451 | free_ent(cmd, ent->idx); | ||
1421 | 1452 | ||
1422 | if (ent->callback) { | 1453 | if (ent->callback) { |
1423 | ds = ent->ts2 - ent->ts1; | 1454 | ds = ent->ts2 - ent->ts1; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0099a3e397bc..2fd044b23875 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -1003,7 +1003,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); | |||
1003 | void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); | 1003 | void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); |
1004 | void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); | 1004 | void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); |
1005 | 1005 | ||
1006 | int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn); | 1006 | int mlx5e_create_ttc_table(struct mlx5e_priv *priv); |
1007 | void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); | 1007 | void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); |
1008 | 1008 | ||
1009 | int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, | 1009 | int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ce7b09d72ff6..8209affa75c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -794,7 +794,6 @@ static void get_supported(u32 eth_proto_cap, | |||
794 | ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); | 794 | ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); |
795 | ptys2ethtool_supported_link(supported, eth_proto_cap); | 795 | ptys2ethtool_supported_link(supported, eth_proto_cap); |
796 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); | 796 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); |
797 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); | ||
798 | } | 797 | } |
799 | 798 | ||
800 | static void get_advertising(u32 eth_proto_cap, u8 tx_pause, | 799 | static void get_advertising(u32 eth_proto_cap, u8 tx_pause, |
@@ -804,7 +803,7 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause, | |||
804 | unsigned long *advertising = link_ksettings->link_modes.advertising; | 803 | unsigned long *advertising = link_ksettings->link_modes.advertising; |
805 | 804 | ||
806 | ptys2ethtool_adver_link(advertising, eth_proto_cap); | 805 | ptys2ethtool_adver_link(advertising, eth_proto_cap); |
807 | if (tx_pause) | 806 | if (rx_pause) |
808 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); | 807 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); |
809 | if (tx_pause ^ rx_pause) | 808 | if (tx_pause ^ rx_pause) |
810 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); | 809 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); |
@@ -849,6 +848,8 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, | |||
849 | struct mlx5e_priv *priv = netdev_priv(netdev); | 848 | struct mlx5e_priv *priv = netdev_priv(netdev); |
850 | struct mlx5_core_dev *mdev = priv->mdev; | 849 | struct mlx5_core_dev *mdev = priv->mdev; |
851 | u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; | 850 | u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; |
851 | u32 rx_pause = 0; | ||
852 | u32 tx_pause = 0; | ||
852 | u32 eth_proto_cap; | 853 | u32 eth_proto_cap; |
853 | u32 eth_proto_admin; | 854 | u32 eth_proto_admin; |
854 | u32 eth_proto_lp; | 855 | u32 eth_proto_lp; |
@@ -871,11 +872,13 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, | |||
871 | an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); | 872 | an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); |
872 | an_status = MLX5_GET(ptys_reg, out, an_status); | 873 | an_status = MLX5_GET(ptys_reg, out, an_status); |
873 | 874 | ||
875 | mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); | ||
876 | |||
874 | ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); | 877 | ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); |
875 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); | 878 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); |
876 | 879 | ||
877 | get_supported(eth_proto_cap, link_ksettings); | 880 | get_supported(eth_proto_cap, link_ksettings); |
878 | get_advertising(eth_proto_admin, 0, 0, link_ksettings); | 881 | get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings); |
879 | get_speed_duplex(netdev, eth_proto_oper, link_ksettings); | 882 | get_speed_duplex(netdev, eth_proto_oper, link_ksettings); |
880 | 883 | ||
881 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; | 884 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 576d6787b484..53ed58320a24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | |||
@@ -800,7 +800,7 @@ void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) | |||
800 | mlx5e_destroy_flow_table(&ttc->ft); | 800 | mlx5e_destroy_flow_table(&ttc->ft); |
801 | } | 801 | } |
802 | 802 | ||
803 | int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) | 803 | int mlx5e_create_ttc_table(struct mlx5e_priv *priv) |
804 | { | 804 | { |
805 | struct mlx5e_ttc_table *ttc = &priv->fs.ttc; | 805 | struct mlx5e_ttc_table *ttc = &priv->fs.ttc; |
806 | struct mlx5_flow_table_attr ft_attr = {}; | 806 | struct mlx5_flow_table_attr ft_attr = {}; |
@@ -810,7 +810,6 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) | |||
810 | ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; | 810 | ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; |
811 | ft_attr.level = MLX5E_TTC_FT_LEVEL; | 811 | ft_attr.level = MLX5E_TTC_FT_LEVEL; |
812 | ft_attr.prio = MLX5E_NIC_PRIO; | 812 | ft_attr.prio = MLX5E_NIC_PRIO; |
813 | ft_attr.underlay_qpn = underlay_qpn; | ||
814 | 813 | ||
815 | ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); | 814 | ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); |
816 | if (IS_ERR(ft->t)) { | 815 | if (IS_ERR(ft->t)) { |
@@ -1147,7 +1146,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) | |||
1147 | priv->netdev->hw_features &= ~NETIF_F_NTUPLE; | 1146 | priv->netdev->hw_features &= ~NETIF_F_NTUPLE; |
1148 | } | 1147 | } |
1149 | 1148 | ||
1150 | err = mlx5e_create_ttc_table(priv, 0); | 1149 | err = mlx5e_create_ttc_table(priv); |
1151 | if (err) { | 1150 | if (err) { |
1152 | netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", | 1151 | netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", |
1153 | err); | 1152 | err); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a61b71b6fff3..41cd22a223dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2976,7 +2976,7 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) | |||
2976 | new_channels.params = priv->channels.params; | 2976 | new_channels.params = priv->channels.params; |
2977 | new_channels.params.num_tc = tc ? tc : 1; | 2977 | new_channels.params.num_tc = tc ? tc : 1; |
2978 | 2978 | ||
2979 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 2979 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
2980 | priv->channels.params = new_channels.params; | 2980 | priv->channels.params = new_channels.params; |
2981 | goto out; | 2981 | goto out; |
2982 | } | 2982 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7b1566f0ae58..66b5fec15313 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) | |||
1041 | #define MLX5_IB_GRH_BYTES 40 | 1041 | #define MLX5_IB_GRH_BYTES 40 |
1042 | #define MLX5_IPOIB_ENCAP_LEN 4 | 1042 | #define MLX5_IPOIB_ENCAP_LEN 4 |
1043 | #define MLX5_GID_SIZE 16 | 1043 | #define MLX5_GID_SIZE 16 |
1044 | #define MLX5_IPOIB_PSEUDO_LEN 20 | ||
1045 | #define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN) | ||
1044 | 1046 | ||
1045 | static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | 1047 | static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, |
1046 | struct mlx5_cqe64 *cqe, | 1048 | struct mlx5_cqe64 *cqe, |
@@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |||
1048 | struct sk_buff *skb) | 1050 | struct sk_buff *skb) |
1049 | { | 1051 | { |
1050 | struct net_device *netdev = rq->netdev; | 1052 | struct net_device *netdev = rq->netdev; |
1053 | char *pseudo_header; | ||
1051 | u8 *dgid; | 1054 | u8 *dgid; |
1052 | u8 g; | 1055 | u8 g; |
1053 | 1056 | ||
@@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |||
1076 | if (likely(netdev->features & NETIF_F_RXHASH)) | 1079 | if (likely(netdev->features & NETIF_F_RXHASH)) |
1077 | mlx5e_skb_set_hash(cqe, skb); | 1080 | mlx5e_skb_set_hash(cqe, skb); |
1078 | 1081 | ||
1082 | /* 20 bytes of ipoib header and 4 for encap existing */ | ||
1083 | pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); | ||
1084 | memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); | ||
1079 | skb_reset_mac_header(skb); | 1085 | skb_reset_mac_header(skb); |
1080 | skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); | 1086 | skb_pull(skb, MLX5_IPOIB_HARD_LEN); |
1081 | 1087 | ||
1082 | skb->dev = netdev; | 1088 | skb->dev = netdev; |
1083 | 1089 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 11c27e4fadf6..ec63158ab643 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <net/tc_act/tc_vlan.h> | 43 | #include <net/tc_act/tc_vlan.h> |
44 | #include <net/tc_act/tc_tunnel_key.h> | 44 | #include <net/tc_act/tc_tunnel_key.h> |
45 | #include <net/tc_act/tc_pedit.h> | 45 | #include <net/tc_act/tc_pedit.h> |
46 | #include <net/tc_act/tc_csum.h> | ||
46 | #include <net/vxlan.h> | 47 | #include <net/vxlan.h> |
47 | #include <net/arp.h> | 48 | #include <net/arp.h> |
48 | #include "en.h" | 49 | #include "en.h" |
@@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, | |||
384 | if (e->flags & MLX5_ENCAP_ENTRY_VALID) | 385 | if (e->flags & MLX5_ENCAP_ENTRY_VALID) |
385 | mlx5_encap_dealloc(priv->mdev, e->encap_id); | 386 | mlx5_encap_dealloc(priv->mdev, e->encap_id); |
386 | 387 | ||
387 | hlist_del_rcu(&e->encap_hlist); | 388 | hash_del_rcu(&e->encap_hlist); |
388 | kfree(e->encap_header); | 389 | kfree(e->encap_header); |
389 | kfree(e); | 390 | kfree(e); |
390 | } | 391 | } |
@@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
925 | struct mlx5e_tc_flow_parse_attr *parse_attr) | 926 | struct mlx5e_tc_flow_parse_attr *parse_attr) |
926 | { | 927 | { |
927 | struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; | 928 | struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; |
928 | int i, action_size, nactions, max_actions, first, last; | 929 | int i, action_size, nactions, max_actions, first, last, first_z; |
929 | void *s_masks_p, *a_masks_p, *vals_p; | 930 | void *s_masks_p, *a_masks_p, *vals_p; |
930 | u32 s_mask, a_mask, val; | ||
931 | struct mlx5_fields *f; | 931 | struct mlx5_fields *f; |
932 | u8 cmd, field_bsize; | 932 | u8 cmd, field_bsize; |
933 | u32 s_mask, a_mask; | ||
933 | unsigned long mask; | 934 | unsigned long mask; |
934 | void *action; | 935 | void *action; |
935 | 936 | ||
@@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
946 | for (i = 0; i < ARRAY_SIZE(fields); i++) { | 947 | for (i = 0; i < ARRAY_SIZE(fields); i++) { |
947 | f = &fields[i]; | 948 | f = &fields[i]; |
948 | /* avoid seeing bits set from previous iterations */ | 949 | /* avoid seeing bits set from previous iterations */ |
949 | s_mask = a_mask = mask = val = 0; | 950 | s_mask = 0; |
951 | a_mask = 0; | ||
950 | 952 | ||
951 | s_masks_p = (void *)set_masks + f->offset; | 953 | s_masks_p = (void *)set_masks + f->offset; |
952 | a_masks_p = (void *)add_masks + f->offset; | 954 | a_masks_p = (void *)add_masks + f->offset; |
@@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
981 | memset(a_masks_p, 0, f->size); | 983 | memset(a_masks_p, 0, f->size); |
982 | } | 984 | } |
983 | 985 | ||
984 | memcpy(&val, vals_p, f->size); | ||
985 | |||
986 | field_bsize = f->size * BITS_PER_BYTE; | 986 | field_bsize = f->size * BITS_PER_BYTE; |
987 | |||
988 | first_z = find_first_zero_bit(&mask, field_bsize); | ||
987 | first = find_first_bit(&mask, field_bsize); | 989 | first = find_first_bit(&mask, field_bsize); |
988 | last = find_last_bit(&mask, field_bsize); | 990 | last = find_last_bit(&mask, field_bsize); |
989 | if (first > 0 || last != (field_bsize - 1)) { | 991 | if (first > 0 || last != (field_bsize - 1) || first_z < last) { |
990 | printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", | 992 | printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", |
991 | mask); | 993 | mask); |
992 | return -EOPNOTSUPP; | 994 | return -EOPNOTSUPP; |
@@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
1002 | } | 1004 | } |
1003 | 1005 | ||
1004 | if (field_bsize == 32) | 1006 | if (field_bsize == 32) |
1005 | MLX5_SET(set_action_in, action, data, ntohl(val)); | 1007 | MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p)); |
1006 | else if (field_bsize == 16) | 1008 | else if (field_bsize == 16) |
1007 | MLX5_SET(set_action_in, action, data, ntohs(val)); | 1009 | MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p)); |
1008 | else if (field_bsize == 8) | 1010 | else if (field_bsize == 8) |
1009 | MLX5_SET(set_action_in, action, data, val); | 1011 | MLX5_SET(set_action_in, action, data, *(u8 *)vals_p); |
1010 | 1012 | ||
1011 | action += action_size; | 1013 | action += action_size; |
1012 | nactions++; | 1014 | nactions++; |
@@ -1109,6 +1111,28 @@ out_err: | |||
1109 | return err; | 1111 | return err; |
1110 | } | 1112 | } |
1111 | 1113 | ||
1114 | static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags) | ||
1115 | { | ||
1116 | u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | | ||
1117 | TCA_CSUM_UPDATE_FLAG_UDP; | ||
1118 | |||
1119 | /* The HW recalcs checksums only if re-writing headers */ | ||
1120 | if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { | ||
1121 | netdev_warn(priv->netdev, | ||
1122 | "TC csum action is only offloaded with pedit\n"); | ||
1123 | return false; | ||
1124 | } | ||
1125 | |||
1126 | if (update_flags & ~prot_flags) { | ||
1127 | netdev_warn(priv->netdev, | ||
1128 | "can't offload TC csum action for some header/s - flags %#x\n", | ||
1129 | update_flags); | ||
1130 | return false; | ||
1131 | } | ||
1132 | |||
1133 | return true; | ||
1134 | } | ||
1135 | |||
1112 | static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | 1136 | static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, |
1113 | struct mlx5e_tc_flow_parse_attr *parse_attr, | 1137 | struct mlx5e_tc_flow_parse_attr *parse_attr, |
1114 | struct mlx5e_tc_flow *flow) | 1138 | struct mlx5e_tc_flow *flow) |
@@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1149 | continue; | 1173 | continue; |
1150 | } | 1174 | } |
1151 | 1175 | ||
1176 | if (is_tcf_csum(a)) { | ||
1177 | if (csum_offload_supported(priv, attr->action, | ||
1178 | tcf_csum_update_flags(a))) | ||
1179 | continue; | ||
1180 | |||
1181 | return -EOPNOTSUPP; | ||
1182 | } | ||
1183 | |||
1152 | if (is_tcf_skbedit_mark(a)) { | 1184 | if (is_tcf_skbedit_mark(a)) { |
1153 | u32 mark = tcf_skbedit_mark(a); | 1185 | u32 mark = tcf_skbedit_mark(a); |
1154 | 1186 | ||
@@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1651 | continue; | 1683 | continue; |
1652 | } | 1684 | } |
1653 | 1685 | ||
1686 | if (is_tcf_csum(a)) { | ||
1687 | if (csum_offload_supported(priv, attr->action, | ||
1688 | tcf_csum_update_flags(a))) | ||
1689 | continue; | ||
1690 | |||
1691 | return -EOPNOTSUPP; | ||
1692 | } | ||
1693 | |||
1654 | if (is_tcf_mirred_egress_redirect(a)) { | 1694 | if (is_tcf_mirred_egress_redirect(a)) { |
1655 | int ifindex = tcf_mirred_ifindex(a); | 1695 | int ifindex = tcf_mirred_ifindex(a); |
1656 | struct net_device *out_dev, *encap_dev = NULL; | 1696 | struct net_device *out_dev, *encap_dev = NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ea5d8d37a75c..33eae5ad2fb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) | |||
422 | break; | 422 | break; |
423 | 423 | ||
424 | case MLX5_EVENT_TYPE_CMD: | 424 | case MLX5_EVENT_TYPE_CMD: |
425 | mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); | 425 | mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); |
426 | break; | 426 | break; |
427 | 427 | ||
428 | case MLX5_EVENT_TYPE_PORT_CHANGE: | 428 | case MLX5_EVENT_TYPE_PORT_CHANGE: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 19e3d2fc2099..fcec7bedd3cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | |||
@@ -40,28 +40,25 @@ | |||
40 | #include "eswitch.h" | 40 | #include "eswitch.h" |
41 | 41 | ||
42 | int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, | 42 | int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, |
43 | struct mlx5_flow_table *ft) | 43 | struct mlx5_flow_table *ft, u32 underlay_qpn) |
44 | { | 44 | { |
45 | u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; | 45 | u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; |
46 | u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; | 46 | u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; |
47 | 47 | ||
48 | if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && | 48 | if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && |
49 | ft->underlay_qpn == 0) | 49 | underlay_qpn == 0) |
50 | return 0; | 50 | return 0; |
51 | 51 | ||
52 | MLX5_SET(set_flow_table_root_in, in, opcode, | 52 | MLX5_SET(set_flow_table_root_in, in, opcode, |
53 | MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); | 53 | MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); |
54 | MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); | 54 | MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); |
55 | MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); | 55 | MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); |
56 | MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); | ||
56 | if (ft->vport) { | 57 | if (ft->vport) { |
57 | MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); | 58 | MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); |
58 | MLX5_SET(set_flow_table_root_in, in, other_vport, 1); | 59 | MLX5_SET(set_flow_table_root_in, in, other_vport, 1); |
59 | } | 60 | } |
60 | 61 | ||
61 | if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && | ||
62 | ft->underlay_qpn != 0) | ||
63 | MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn); | ||
64 | |||
65 | return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); | 62 | return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
66 | } | 63 | } |
67 | 64 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 8fad80688536..0f98a7cf4877 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | |||
@@ -71,7 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, | |||
71 | unsigned int index); | 71 | unsigned int index); |
72 | 72 | ||
73 | int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, | 73 | int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, |
74 | struct mlx5_flow_table *ft); | 74 | struct mlx5_flow_table *ft, |
75 | u32 underlay_qpn); | ||
75 | 76 | ||
76 | int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); | 77 | int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); |
77 | int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); | 78 | int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index b8a176503d38..0e487e8ca634 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -650,7 +650,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio | |||
650 | if (ft->level >= min_level) | 650 | if (ft->level >= min_level) |
651 | return 0; | 651 | return 0; |
652 | 652 | ||
653 | err = mlx5_cmd_update_root_ft(root->dev, ft); | 653 | err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn); |
654 | if (err) | 654 | if (err) |
655 | mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", | 655 | mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", |
656 | ft->id); | 656 | ft->id); |
@@ -818,8 +818,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa | |||
818 | goto unlock_root; | 818 | goto unlock_root; |
819 | } | 819 | } |
820 | 820 | ||
821 | ft->underlay_qpn = ft_attr->underlay_qpn; | ||
822 | |||
823 | tree_init_node(&ft->node, 1, del_flow_table); | 821 | tree_init_node(&ft->node, 1, del_flow_table); |
824 | log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; | 822 | log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; |
825 | next_ft = find_next_chained_ft(fs_prio); | 823 | next_ft = find_next_chained_ft(fs_prio); |
@@ -1489,7 +1487,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) | |||
1489 | 1487 | ||
1490 | new_root_ft = find_next_ft(ft); | 1488 | new_root_ft = find_next_ft(ft); |
1491 | if (new_root_ft) { | 1489 | if (new_root_ft) { |
1492 | int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); | 1490 | int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, |
1491 | root->underlay_qpn); | ||
1493 | 1492 | ||
1494 | if (err) { | 1493 | if (err) { |
1495 | mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", | 1494 | mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", |
@@ -2062,3 +2061,21 @@ err: | |||
2062 | mlx5_cleanup_fs(dev); | 2061 | mlx5_cleanup_fs(dev); |
2063 | return err; | 2062 | return err; |
2064 | } | 2063 | } |
2064 | |||
2065 | int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) | ||
2066 | { | ||
2067 | struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; | ||
2068 | |||
2069 | root->underlay_qpn = underlay_qpn; | ||
2070 | return 0; | ||
2071 | } | ||
2072 | EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn); | ||
2073 | |||
2074 | int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) | ||
2075 | { | ||
2076 | struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; | ||
2077 | |||
2078 | root->underlay_qpn = 0; | ||
2079 | return 0; | ||
2080 | } | ||
2081 | EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 81eafc7b9dd9..990acee6fb09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | |||
@@ -118,7 +118,6 @@ struct mlx5_flow_table { | |||
118 | /* FWD rules that point on this flow table */ | 118 | /* FWD rules that point on this flow table */ |
119 | struct list_head fwd_rules; | 119 | struct list_head fwd_rules; |
120 | u32 flags; | 120 | u32 flags; |
121 | u32 underlay_qpn; | ||
122 | }; | 121 | }; |
123 | 122 | ||
124 | struct mlx5_fc_cache { | 123 | struct mlx5_fc_cache { |
@@ -195,6 +194,7 @@ struct mlx5_flow_root_namespace { | |||
195 | struct mlx5_flow_table *root_ft; | 194 | struct mlx5_flow_table *root_ft; |
196 | /* Should be held when chaining flow tables */ | 195 | /* Should be held when chaining flow tables */ |
197 | struct mutex chain_lock; | 196 | struct mutex chain_lock; |
197 | u32 underlay_qpn; | ||
198 | }; | 198 | }; |
199 | 199 | ||
200 | int mlx5_init_fc_stats(struct mlx5_core_dev *dev); | 200 | int mlx5_init_fc_stats(struct mlx5_core_dev *dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d0515391d33b..44f59b1d6f0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev) | |||
90 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); | 90 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); |
91 | 91 | ||
92 | mlx5_core_dbg(dev, "vector 0x%llx\n", vector); | 92 | mlx5_core_dbg(dev, "vector 0x%llx\n", vector); |
93 | mlx5_cmd_comp_handler(dev, vector); | 93 | mlx5_cmd_comp_handler(dev, vector, true); |
94 | return; | 94 | return; |
95 | 95 | ||
96 | no_trig: | 96 | no_trig: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c index 019c230da498..cc1858752e70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c | |||
@@ -66,6 +66,10 @@ static void mlx5i_init(struct mlx5_core_dev *mdev, | |||
66 | 66 | ||
67 | mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); | 67 | mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); |
68 | 68 | ||
69 | /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ | ||
70 | mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST); | ||
71 | priv->channels.params.lro_en = false; | ||
72 | |||
69 | mutex_init(&priv->state_lock); | 73 | mutex_init(&priv->state_lock); |
70 | 74 | ||
71 | netdev->hw_features |= NETIF_F_SG; | 75 | netdev->hw_features |= NETIF_F_SG; |
@@ -156,6 +160,8 @@ out: | |||
156 | 160 | ||
157 | static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) | 161 | static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) |
158 | { | 162 | { |
163 | mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn); | ||
164 | |||
159 | mlx5_core_destroy_qp(mdev, qp); | 165 | mlx5_core_destroy_qp(mdev, qp); |
160 | } | 166 | } |
161 | 167 | ||
@@ -170,6 +176,8 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) | |||
170 | return err; | 176 | return err; |
171 | } | 177 | } |
172 | 178 | ||
179 | mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); | ||
180 | |||
173 | err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); | 181 | err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); |
174 | if (err) { | 182 | if (err) { |
175 | mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); | 183 | mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); |
@@ -189,7 +197,6 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) | |||
189 | 197 | ||
190 | static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) | 198 | static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) |
191 | { | 199 | { |
192 | struct mlx5i_priv *ipriv = priv->ppriv; | ||
193 | int err; | 200 | int err; |
194 | 201 | ||
195 | priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, | 202 | priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, |
@@ -205,7 +212,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) | |||
205 | priv->netdev->hw_features &= ~NETIF_F_NTUPLE; | 212 | priv->netdev->hw_features &= ~NETIF_F_NTUPLE; |
206 | } | 213 | } |
207 | 214 | ||
208 | err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); | 215 | err = mlx5e_create_ttc_table(priv); |
209 | if (err) { | 216 | if (err) { |
210 | netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", | 217 | netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", |
211 | err); | 218 | err); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0c123d571b4c..fe5546bb4153 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -612,7 +612,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) | |||
612 | struct mlx5_priv *priv = &mdev->priv; | 612 | struct mlx5_priv *priv = &mdev->priv; |
613 | struct msix_entry *msix = priv->msix_arr; | 613 | struct msix_entry *msix = priv->msix_arr; |
614 | int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; | 614 | int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; |
615 | int err; | ||
616 | 615 | ||
617 | if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { | 616 | if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { |
618 | mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); | 617 | mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); |
@@ -622,18 +621,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) | |||
622 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), | 621 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), |
623 | priv->irq_info[i].mask); | 622 | priv->irq_info[i].mask); |
624 | 623 | ||
625 | err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); | 624 | #ifdef CONFIG_SMP |
626 | if (err) { | 625 | if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) |
627 | mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", | 626 | mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); |
628 | irq); | 627 | #endif |
629 | goto err_clear_mask; | ||
630 | } | ||
631 | 628 | ||
632 | return 0; | 629 | return 0; |
633 | |||
634 | err_clear_mask: | ||
635 | free_cpumask_var(priv->irq_info[i].mask); | ||
636 | return err; | ||
637 | } | 630 | } |
638 | 631 | ||
639 | static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) | 632 | static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index ea56f6ade6b4..5f0a7bc692a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | |||
@@ -199,10 +199,11 @@ static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp, | |||
199 | 199 | ||
200 | entry->counter_valid = false; | 200 | entry->counter_valid = false; |
201 | entry->counter = 0; | 201 | entry->counter = 0; |
202 | entry->index = mlxsw_sp_rif_index(rif); | ||
203 | |||
202 | if (!counters_enabled) | 204 | if (!counters_enabled) |
203 | return 0; | 205 | return 0; |
204 | 206 | ||
205 | entry->index = mlxsw_sp_rif_index(rif); | ||
206 | err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, | 207 | err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, |
207 | MLXSW_SP_RIF_COUNTER_EGRESS, | 208 | MLXSW_SP_RIF_COUNTER_EGRESS, |
208 | &cnt); | 209 | &cnt); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 33cec1cc1642..9f89c4137d21 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -206,6 +206,9 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, | |||
206 | { | 206 | { |
207 | unsigned int *p_counter_index; | 207 | unsigned int *p_counter_index; |
208 | 208 | ||
209 | if (!mlxsw_sp_rif_counter_valid_get(rif, dir)) | ||
210 | return; | ||
211 | |||
209 | p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); | 212 | p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); |
210 | if (WARN_ON(!p_counter_index)) | 213 | if (WARN_ON(!p_counter_index)) |
211 | return; | 214 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0d8411f1f954..f4bb0c0b7c1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -1497,8 +1497,7 @@ do_fdb_op: | |||
1497 | err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, | 1497 | err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, |
1498 | adding, true); | 1498 | adding, true); |
1499 | if (err) { | 1499 | if (err) { |
1500 | if (net_ratelimit()) | 1500 | dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); |
1501 | netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); | ||
1502 | return; | 1501 | return; |
1503 | } | 1502 | } |
1504 | 1503 | ||
@@ -1558,8 +1557,7 @@ do_fdb_op: | |||
1558 | err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, | 1557 | err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, |
1559 | adding, true); | 1558 | adding, true); |
1560 | if (err) { | 1559 | if (err) { |
1561 | if (net_ratelimit()) | 1560 | dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); |
1562 | netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); | ||
1563 | return; | 1561 | return; |
1564 | } | 1562 | } |
1565 | 1563 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index b8d5270359cd..e30676515529 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c | |||
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) | |||
247 | cmd.req.arg3 = 0; | 247 | cmd.req.arg3 = 0; |
248 | 248 | ||
249 | if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) | 249 | if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) |
250 | netxen_issue_cmd(adapter, &cmd); | 250 | rcode = netxen_issue_cmd(adapter, &cmd); |
251 | 251 | ||
252 | if (rcode != NX_RCODE_SUCCESS) | 252 | if (rcode != NX_RCODE_SUCCESS) |
253 | return -EIO; | 253 | return -EIO; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 67200c5498ab..0a8fde629991 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c | |||
@@ -983,7 +983,7 @@ void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, | |||
983 | memset(&camline, 0, sizeof(union gft_cam_line_union)); | 983 | memset(&camline, 0, sizeof(union gft_cam_line_union)); |
984 | qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, | 984 | qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, |
985 | camline.cam_line_mapped.camline); | 985 | camline.cam_line_mapped.camline); |
986 | memset(&ramline, 0, sizeof(union gft_cam_line_union)); | 986 | memset(&ramline, 0, sizeof(ramline)); |
987 | 987 | ||
988 | for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { | 988 | for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { |
989 | u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; | 989 | u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 49bad00a0f8f..7245b1072518 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define _QLCNIC_LINUX_MAJOR 5 | 38 | #define _QLCNIC_LINUX_MAJOR 5 |
39 | #define _QLCNIC_LINUX_MINOR 3 | 39 | #define _QLCNIC_LINUX_MINOR 3 |
40 | #define _QLCNIC_LINUX_SUBVERSION 65 | 40 | #define _QLCNIC_LINUX_SUBVERSION 66 |
41 | #define QLCNIC_LINUX_VERSIONID "5.3.65" | 41 | #define QLCNIC_LINUX_VERSIONID "5.3.66" |
42 | #define QLCNIC_DRV_IDC_VER 0x01 | 42 | #define QLCNIC_DRV_IDC_VER 0x01 |
43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 718bf58a7da6..4fb68797630e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -3168,6 +3168,40 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, | |||
3168 | return 0; | 3168 | return 0; |
3169 | } | 3169 | } |
3170 | 3170 | ||
3171 | void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter) | ||
3172 | { | ||
3173 | struct qlcnic_hardware_context *ahw = adapter->ahw; | ||
3174 | struct qlcnic_cmd_args cmd; | ||
3175 | u32 config; | ||
3176 | int err; | ||
3177 | |||
3178 | err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); | ||
3179 | if (err) | ||
3180 | return; | ||
3181 | |||
3182 | err = qlcnic_issue_cmd(adapter, &cmd); | ||
3183 | if (err) { | ||
3184 | dev_info(&adapter->pdev->dev, | ||
3185 | "Get Link Status Command failed: 0x%x\n", err); | ||
3186 | goto out; | ||
3187 | } else { | ||
3188 | config = cmd.rsp.arg[3]; | ||
3189 | |||
3190 | switch (QLC_83XX_SFP_MODULE_TYPE(config)) { | ||
3191 | case QLC_83XX_MODULE_FIBRE_1000BASE_SX: | ||
3192 | case QLC_83XX_MODULE_FIBRE_1000BASE_LX: | ||
3193 | case QLC_83XX_MODULE_FIBRE_1000BASE_CX: | ||
3194 | case QLC_83XX_MODULE_TP_1000BASE_T: | ||
3195 | ahw->port_type = QLCNIC_GBE; | ||
3196 | break; | ||
3197 | default: | ||
3198 | ahw->port_type = QLCNIC_XGBE; | ||
3199 | } | ||
3200 | } | ||
3201 | out: | ||
3202 | qlcnic_free_mbx_args(&cmd); | ||
3203 | } | ||
3204 | |||
3171 | int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) | 3205 | int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) |
3172 | { | 3206 | { |
3173 | u8 pci_func; | 3207 | u8 pci_func; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 3dfe8e27b51c..b75a81246856 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
@@ -637,6 +637,7 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, | |||
637 | int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, | 637 | int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, |
638 | struct ethtool_pauseparam *); | 638 | struct ethtool_pauseparam *); |
639 | int qlcnic_83xx_test_link(struct qlcnic_adapter *); | 639 | int qlcnic_83xx_test_link(struct qlcnic_adapter *); |
640 | void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter); | ||
640 | int qlcnic_83xx_reg_test(struct qlcnic_adapter *); | 641 | int qlcnic_83xx_reg_test(struct qlcnic_adapter *); |
641 | int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); | 642 | int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); |
642 | int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); | 643 | int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 9a869c15d8bf..7f7deeaf1cf0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -486,6 +486,9 @@ static int qlcnic_set_link_ksettings(struct net_device *dev, | |||
486 | u32 ret = 0; | 486 | u32 ret = 0; |
487 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 487 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
488 | 488 | ||
489 | if (qlcnic_83xx_check(adapter)) | ||
490 | qlcnic_83xx_get_port_type(adapter); | ||
491 | |||
489 | if (adapter->ahw->port_type != QLCNIC_GBE) | 492 | if (adapter->ahw->port_type != QLCNIC_GBE) |
490 | return -EOPNOTSUPP; | 493 | return -EOPNOTSUPP; |
491 | 494 | ||
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 513e6c74e199..24ca7df15d07 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca) | |||
296 | 296 | ||
297 | /* Allocate rx SKB if we don't have one available. */ | 297 | /* Allocate rx SKB if we don't have one available. */ |
298 | if (!qca->rx_skb) { | 298 | if (!qca->rx_skb) { |
299 | qca->rx_skb = netdev_alloc_skb(net_dev, | 299 | qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, |
300 | net_dev->mtu + VLAN_ETH_HLEN); | 300 | net_dev->mtu + |
301 | VLAN_ETH_HLEN); | ||
301 | if (!qca->rx_skb) { | 302 | if (!qca->rx_skb) { |
302 | netdev_dbg(net_dev, "out of RX resources\n"); | 303 | netdev_dbg(net_dev, "out of RX resources\n"); |
303 | qca->stats.out_of_mem++; | 304 | qca->stats.out_of_mem++; |
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca) | |||
377 | qca->rx_skb, qca->rx_skb->dev); | 378 | qca->rx_skb, qca->rx_skb->dev); |
378 | qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; | 379 | qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; |
379 | netif_rx_ni(qca->rx_skb); | 380 | netif_rx_ni(qca->rx_skb); |
380 | qca->rx_skb = netdev_alloc_skb(net_dev, | 381 | qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, |
381 | net_dev->mtu + VLAN_ETH_HLEN); | 382 | net_dev->mtu + VLAN_ETH_HLEN); |
382 | if (!qca->rx_skb) { | 383 | if (!qca->rx_skb) { |
383 | netdev_dbg(net_dev, "out of RX resources\n"); | 384 | netdev_dbg(net_dev, "out of RX resources\n"); |
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev) | |||
759 | if (!qca->rx_buffer) | 760 | if (!qca->rx_buffer) |
760 | return -ENOBUFS; | 761 | return -ENOBUFS; |
761 | 762 | ||
762 | qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); | 763 | qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + |
764 | VLAN_ETH_HLEN); | ||
763 | if (!qca->rx_skb) { | 765 | if (!qca->rx_skb) { |
764 | kfree(qca->rx_buffer); | 766 | kfree(qca->rx_buffer); |
765 | netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); | 767 | netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f68c4db656ed..2d686ccf971b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -3220,7 +3220,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
3220 | /* MDIO bus init */ | 3220 | /* MDIO bus init */ |
3221 | ret = sh_mdio_init(mdp, pd); | 3221 | ret = sh_mdio_init(mdp, pd); |
3222 | if (ret) { | 3222 | if (ret) { |
3223 | dev_err(&ndev->dev, "failed to initialise MDIO\n"); | 3223 | if (ret != -EPROBE_DEFER) |
3224 | dev_err(&pdev->dev, "MDIO init failed: %d\n", ret); | ||
3224 | goto out_release; | 3225 | goto out_release; |
3225 | } | 3226 | } |
3226 | 3227 | ||
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 7b916aa21bde..4d7fb8af880d 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -18,8 +18,12 @@ | |||
18 | #include "mcdi.h" | 18 | #include "mcdi.h" |
19 | 19 | ||
20 | enum { | 20 | enum { |
21 | EFX_REV_SIENA_A0 = 0, | 21 | /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. |
22 | EFX_REV_HUNT_A0 = 1, | 22 | * They are not supported by this driver but these revision numbers |
23 | * form part of the ethtool API for register dumping. | ||
24 | */ | ||
25 | EFX_REV_SIENA_A0 = 3, | ||
26 | EFX_REV_HUNT_A0 = 4, | ||
23 | }; | 27 | }; |
24 | 28 | ||
25 | static inline int efx_nic_rev(struct efx_nic *efx) | 29 | static inline int efx_nic_rev(struct efx_nic *efx) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cd8c60132390..a74c481401c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3725,7 +3725,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, | |||
3725 | ep++; | 3725 | ep++; |
3726 | } else { | 3726 | } else { |
3727 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", | 3727 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
3728 | i, (unsigned int)virt_to_phys(ep), | 3728 | i, (unsigned int)virt_to_phys(p), |
3729 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), | 3729 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
3730 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); | 3730 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); |
3731 | p++; | 3731 | p++; |
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 5a90fed06260..5b56c24b6ed2 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c | |||
@@ -411,13 +411,14 @@ static int vsw_port_remove(struct vio_dev *vdev) | |||
411 | 411 | ||
412 | if (port) { | 412 | if (port) { |
413 | del_timer_sync(&port->vio.timer); | 413 | del_timer_sync(&port->vio.timer); |
414 | del_timer_sync(&port->clean_timer); | ||
414 | 415 | ||
415 | napi_disable(&port->napi); | 416 | napi_disable(&port->napi); |
417 | unregister_netdev(port->dev); | ||
416 | 418 | ||
417 | list_del_rcu(&port->list); | 419 | list_del_rcu(&port->list); |
418 | 420 | ||
419 | synchronize_rcu(); | 421 | synchronize_rcu(); |
420 | del_timer_sync(&port->clean_timer); | ||
421 | spin_lock_irqsave(&port->vp->lock, flags); | 422 | spin_lock_irqsave(&port->vp->lock, flags); |
422 | sunvnet_port_rm_txq_common(port); | 423 | sunvnet_port_rm_txq_common(port); |
423 | spin_unlock_irqrestore(&port->vp->lock, flags); | 424 | spin_unlock_irqrestore(&port->vp->lock, flags); |
@@ -427,7 +428,6 @@ static int vsw_port_remove(struct vio_dev *vdev) | |||
427 | 428 | ||
428 | dev_set_drvdata(&vdev->dev, NULL); | 429 | dev_set_drvdata(&vdev->dev, NULL); |
429 | 430 | ||
430 | unregister_netdev(port->dev); | ||
431 | free_netdev(port->dev); | 431 | free_netdev(port->dev); |
432 | } | 432 | } |
433 | 433 | ||
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 729a7da90b5b..e6222e535019 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -1353,9 +1353,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) | |||
1353 | 1353 | ||
1354 | tx_pipe->dma_channel = knav_dma_open_channel(dev, | 1354 | tx_pipe->dma_channel = knav_dma_open_channel(dev, |
1355 | tx_pipe->dma_chan_name, &config); | 1355 | tx_pipe->dma_chan_name, &config); |
1356 | if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { | 1356 | if (IS_ERR(tx_pipe->dma_channel)) { |
1357 | dev_err(dev, "failed opening tx chan(%s)\n", | 1357 | dev_err(dev, "failed opening tx chan(%s)\n", |
1358 | tx_pipe->dma_chan_name); | 1358 | tx_pipe->dma_chan_name); |
1359 | ret = PTR_ERR(tx_pipe->dma_channel); | ||
1359 | goto err; | 1360 | goto err; |
1360 | } | 1361 | } |
1361 | 1362 | ||
@@ -1673,9 +1674,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) | |||
1673 | 1674 | ||
1674 | netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, | 1675 | netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, |
1675 | netcp->dma_chan_name, &config); | 1676 | netcp->dma_chan_name, &config); |
1676 | if (IS_ERR_OR_NULL(netcp->rx_channel)) { | 1677 | if (IS_ERR(netcp->rx_channel)) { |
1677 | dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", | 1678 | dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", |
1678 | netcp->dma_chan_name); | 1679 | netcp->dma_chan_name); |
1680 | ret = PTR_ERR(netcp->rx_channel); | ||
1679 | goto fail; | 1681 | goto fail; |
1680 | } | 1682 | } |
1681 | 1683 | ||
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 897176fc5043..dd92950a4615 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c | |||
@@ -2651,7 +2651,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) | |||
2651 | case HWTSTAMP_FILTER_NONE: | 2651 | case HWTSTAMP_FILTER_NONE: |
2652 | cpts_rx_enable(cpts, 0); | 2652 | cpts_rx_enable(cpts, 0); |
2653 | break; | 2653 | break; |
2654 | case HWTSTAMP_FILTER_ALL: | ||
2655 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | 2654 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
2656 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | 2655 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
2657 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | 2656 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index dec5d563ab19..959fd12d2e67 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1293 | if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) | 1293 | if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) |
1294 | goto nla_put_failure; | 1294 | goto nla_put_failure; |
1295 | 1295 | ||
1296 | if (ip_tunnel_info_af(info) == AF_INET) { | 1296 | if (rtnl_dereference(geneve->sock4)) { |
1297 | if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, | 1297 | if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, |
1298 | info->key.u.ipv4.dst)) | 1298 | info->key.u.ipv4.dst)) |
1299 | goto nla_put_failure; | 1299 | goto nla_put_failure; |
@@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1302 | !!(info->key.tun_flags & TUNNEL_CSUM))) | 1302 | !!(info->key.tun_flags & TUNNEL_CSUM))) |
1303 | goto nla_put_failure; | 1303 | goto nla_put_failure; |
1304 | 1304 | ||
1305 | } | ||
1306 | |||
1305 | #if IS_ENABLED(CONFIG_IPV6) | 1307 | #if IS_ENABLED(CONFIG_IPV6) |
1306 | } else { | 1308 | if (rtnl_dereference(geneve->sock6)) { |
1307 | if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, | 1309 | if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, |
1308 | &info->key.u.ipv6.dst)) | 1310 | &info->key.u.ipv6.dst)) |
1309 | goto nla_put_failure; | 1311 | goto nla_put_failure; |
@@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1315 | if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, | 1317 | if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, |
1316 | !geneve->use_udp6_rx_checksums)) | 1318 | !geneve->use_udp6_rx_checksums)) |
1317 | goto nla_put_failure; | 1319 | goto nla_put_failure; |
1318 | #endif | ||
1319 | } | 1320 | } |
1321 | #endif | ||
1320 | 1322 | ||
1321 | if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || | 1323 | if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || |
1322 | nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || | 1324 | nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || |
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 4fea1b3dfbb4..7b652bb7ebe4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c | |||
@@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) | |||
873 | 873 | ||
874 | /* Check if there's an existing gtpX device to configure */ | 874 | /* Check if there's an existing gtpX device to configure */ |
875 | dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); | 875 | dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); |
876 | if (dev->netdev_ops == >p_netdev_ops) | 876 | if (dev && dev->netdev_ops == >p_netdev_ops) |
877 | gtp = netdev_priv(dev); | 877 | gtp = netdev_priv(dev); |
878 | 878 | ||
879 | put_net(net); | 879 | put_net(net); |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 8716b8c07feb..6f3c805f7211 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self) | |||
1077 | * are "42101001.sb" or "42101002.sb" | 1077 | * are "42101001.sb" or "42101002.sb" |
1078 | */ | 1078 | */ |
1079 | sprintf(stir421x_fw_name, "4210%4X.sb", | 1079 | sprintf(stir421x_fw_name, "4210%4X.sb", |
1080 | self->usbdev->descriptor.bcdDevice); | 1080 | le16_to_cpu(self->usbdev->descriptor.bcdDevice)); |
1081 | ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); | 1081 | ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); |
1082 | if (ret < 0) | 1082 | if (ret < 0) |
1083 | return ret; | 1083 | return ret; |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b34eaaae03fd..346ad2ff3998 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -789,10 +789,12 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) | |||
789 | */ | 789 | */ |
790 | static struct lock_class_key macvlan_netdev_addr_lock_key; | 790 | static struct lock_class_key macvlan_netdev_addr_lock_key; |
791 | 791 | ||
792 | #define ALWAYS_ON_FEATURES \ | 792 | #define ALWAYS_ON_OFFLOADS \ |
793 | (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ | 793 | (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ |
794 | NETIF_F_GSO_ROBUST) | 794 | NETIF_F_GSO_ROBUST) |
795 | 795 | ||
796 | #define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX) | ||
797 | |||
796 | #define MACVLAN_FEATURES \ | 798 | #define MACVLAN_FEATURES \ |
797 | (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ | 799 | (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ |
798 | NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ | 800 | NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ |
@@ -827,6 +829,7 @@ static int macvlan_init(struct net_device *dev) | |||
827 | dev->features |= ALWAYS_ON_FEATURES; | 829 | dev->features |= ALWAYS_ON_FEATURES; |
828 | dev->hw_features |= NETIF_F_LRO; | 830 | dev->hw_features |= NETIF_F_LRO; |
829 | dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; | 831 | dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; |
832 | dev->vlan_features |= ALWAYS_ON_OFFLOADS; | ||
830 | dev->gso_max_size = lowerdev->gso_max_size; | 833 | dev->gso_max_size = lowerdev->gso_max_size; |
831 | dev->gso_max_segs = lowerdev->gso_max_segs; | 834 | dev->gso_max_segs = lowerdev->gso_max_segs; |
832 | dev->hard_header_len = lowerdev->hard_header_len; | 835 | dev->hard_header_len = lowerdev->hard_header_len; |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 60ffc9da6a28..c360dd6ead22 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -108,7 +108,7 @@ config MDIO_MOXART | |||
108 | config MDIO_OCTEON | 108 | config MDIO_OCTEON |
109 | tristate "Octeon and some ThunderX SOCs MDIO buses" | 109 | tristate "Octeon and some ThunderX SOCs MDIO buses" |
110 | depends on 64BIT | 110 | depends on 64BIT |
111 | depends on HAS_IOMEM | 111 | depends on HAS_IOMEM && OF_MDIO |
112 | select MDIO_CAVIUM | 112 | select MDIO_CAVIUM |
113 | help | 113 | help |
114 | This module provides a driver for the Octeon and ThunderX MDIO | 114 | This module provides a driver for the Octeon and ThunderX MDIO |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 272b051a0199..9097e42bec2e 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev) | |||
255 | { | 255 | { |
256 | int err; | 256 | int err; |
257 | 257 | ||
258 | /* The Marvell PHY has an errata which requires | ||
259 | * that certain registers get written in order | ||
260 | * to restart autonegotiation */ | ||
261 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | ||
262 | |||
263 | if (err < 0) | ||
264 | return err; | ||
265 | |||
266 | err = phy_write(phydev, 0x1d, 0x1f); | ||
267 | if (err < 0) | ||
268 | return err; | ||
269 | |||
270 | err = phy_write(phydev, 0x1e, 0x200c); | ||
271 | if (err < 0) | ||
272 | return err; | ||
273 | |||
274 | err = phy_write(phydev, 0x1d, 0x5); | ||
275 | if (err < 0) | ||
276 | return err; | ||
277 | |||
278 | err = phy_write(phydev, 0x1e, 0); | ||
279 | if (err < 0) | ||
280 | return err; | ||
281 | |||
282 | err = phy_write(phydev, 0x1e, 0x100); | ||
283 | if (err < 0) | ||
284 | return err; | ||
285 | |||
286 | err = marvell_set_polarity(phydev, phydev->mdix_ctrl); | 258 | err = marvell_set_polarity(phydev, phydev->mdix_ctrl); |
287 | if (err < 0) | 259 | if (err < 0) |
288 | return err; | 260 | return err; |
@@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev) | |||
316 | return 0; | 288 | return 0; |
317 | } | 289 | } |
318 | 290 | ||
291 | static int m88e1101_config_aneg(struct phy_device *phydev) | ||
292 | { | ||
293 | int err; | ||
294 | |||
295 | /* This Marvell PHY has an errata which requires | ||
296 | * that certain registers get written in order | ||
297 | * to restart autonegotiation | ||
298 | */ | ||
299 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | ||
300 | |||
301 | if (err < 0) | ||
302 | return err; | ||
303 | |||
304 | err = phy_write(phydev, 0x1d, 0x1f); | ||
305 | if (err < 0) | ||
306 | return err; | ||
307 | |||
308 | err = phy_write(phydev, 0x1e, 0x200c); | ||
309 | if (err < 0) | ||
310 | return err; | ||
311 | |||
312 | err = phy_write(phydev, 0x1d, 0x5); | ||
313 | if (err < 0) | ||
314 | return err; | ||
315 | |||
316 | err = phy_write(phydev, 0x1e, 0); | ||
317 | if (err < 0) | ||
318 | return err; | ||
319 | |||
320 | err = phy_write(phydev, 0x1e, 0x100); | ||
321 | if (err < 0) | ||
322 | return err; | ||
323 | |||
324 | return marvell_config_aneg(phydev); | ||
325 | } | ||
326 | |||
319 | static int m88e1111_config_aneg(struct phy_device *phydev) | 327 | static int m88e1111_config_aneg(struct phy_device *phydev) |
320 | { | 328 | { |
321 | int err; | 329 | int err; |
@@ -1892,7 +1900,7 @@ static struct phy_driver marvell_drivers[] = { | |||
1892 | .flags = PHY_HAS_INTERRUPT, | 1900 | .flags = PHY_HAS_INTERRUPT, |
1893 | .probe = marvell_probe, | 1901 | .probe = marvell_probe, |
1894 | .config_init = &marvell_config_init, | 1902 | .config_init = &marvell_config_init, |
1895 | .config_aneg = &marvell_config_aneg, | 1903 | .config_aneg = &m88e1101_config_aneg, |
1896 | .read_status = &genphy_read_status, | 1904 | .read_status = &genphy_read_status, |
1897 | .ack_interrupt = &marvell_ack_interrupt, | 1905 | .ack_interrupt = &marvell_ack_interrupt, |
1898 | .config_intr = &marvell_config_intr, | 1906 | .config_intr = &marvell_config_intr, |
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 963838d4fac1..599ce24c514f 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c | |||
@@ -122,10 +122,9 @@ int mdio_mux_init(struct device *dev, | |||
122 | pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); | 122 | pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); |
123 | if (pb == NULL) { | 123 | if (pb == NULL) { |
124 | ret_val = -ENOMEM; | 124 | ret_val = -ENOMEM; |
125 | goto err_parent_bus; | 125 | goto err_pb_kz; |
126 | } | 126 | } |
127 | 127 | ||
128 | |||
129 | pb->switch_data = data; | 128 | pb->switch_data = data; |
130 | pb->switch_fn = switch_fn; | 129 | pb->switch_fn = switch_fn; |
131 | pb->current_child = -1; | 130 | pb->current_child = -1; |
@@ -154,6 +153,7 @@ int mdio_mux_init(struct device *dev, | |||
154 | cb->mii_bus = mdiobus_alloc(); | 153 | cb->mii_bus = mdiobus_alloc(); |
155 | if (!cb->mii_bus) { | 154 | if (!cb->mii_bus) { |
156 | ret_val = -ENOMEM; | 155 | ret_val = -ENOMEM; |
156 | devm_kfree(dev, cb); | ||
157 | of_node_put(child_bus_node); | 157 | of_node_put(child_bus_node); |
158 | break; | 158 | break; |
159 | } | 159 | } |
@@ -170,7 +170,6 @@ int mdio_mux_init(struct device *dev, | |||
170 | mdiobus_free(cb->mii_bus); | 170 | mdiobus_free(cb->mii_bus); |
171 | devm_kfree(dev, cb); | 171 | devm_kfree(dev, cb); |
172 | } else { | 172 | } else { |
173 | of_node_get(child_bus_node); | ||
174 | cb->next = pb->children; | 173 | cb->next = pb->children; |
175 | pb->children = cb; | 174 | pb->children = cb; |
176 | } | 175 | } |
@@ -181,9 +180,11 @@ int mdio_mux_init(struct device *dev, | |||
181 | return 0; | 180 | return 0; |
182 | } | 181 | } |
183 | 182 | ||
183 | devm_kfree(dev, pb); | ||
184 | err_pb_kz: | ||
184 | /* balance the reference of_mdio_find_bus() took */ | 185 | /* balance the reference of_mdio_find_bus() took */ |
185 | put_device(&pb->mii_bus->dev); | 186 | if (!mux_bus) |
186 | 187 | put_device(&parent_bus->dev); | |
187 | err_parent_bus: | 188 | err_parent_bus: |
188 | of_node_put(parent_bus_node); | 189 | of_node_put(parent_bus_node); |
189 | return ret_val; | 190 | return ret_val; |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index a898e5c4ef1b..8e73f5f36e71 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -364,9 +364,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | |||
364 | 364 | ||
365 | mutex_init(&bus->mdio_lock); | 365 | mutex_init(&bus->mdio_lock); |
366 | 366 | ||
367 | if (bus->reset) | ||
368 | bus->reset(bus); | ||
369 | |||
370 | /* de-assert bus level PHY GPIO resets */ | 367 | /* de-assert bus level PHY GPIO resets */ |
371 | if (bus->num_reset_gpios > 0) { | 368 | if (bus->num_reset_gpios > 0) { |
372 | bus->reset_gpiod = devm_kcalloc(&bus->dev, | 369 | bus->reset_gpiod = devm_kcalloc(&bus->dev, |
@@ -396,6 +393,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | |||
396 | } | 393 | } |
397 | } | 394 | } |
398 | 395 | ||
396 | if (bus->reset) | ||
397 | bus->reset(bus); | ||
398 | |||
399 | for (i = 0; i < PHY_MAX_ADDR; i++) { | 399 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
400 | if ((bus->phy_mask & (1 << i)) == 0) { | 400 | if ((bus->phy_mask & (1 << i)) == 0) { |
401 | struct phy_device *phydev; | 401 | struct phy_device *phydev; |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index f3ae88fdf332..8ab281b478f2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -310,6 +310,26 @@ skip: | |||
310 | return -ENODEV; | 310 | return -ENODEV; |
311 | } | 311 | } |
312 | 312 | ||
313 | return 0; | ||
314 | |||
315 | bad_desc: | ||
316 | dev_info(&dev->udev->dev, "bad CDC descriptors\n"); | ||
317 | return -ENODEV; | ||
318 | } | ||
319 | EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); | ||
320 | |||
321 | |||
322 | /* like usbnet_generic_cdc_bind() but handles filter initialization | ||
323 | * correctly | ||
324 | */ | ||
325 | int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | ||
326 | { | ||
327 | int rv; | ||
328 | |||
329 | rv = usbnet_generic_cdc_bind(dev, intf); | ||
330 | if (rv < 0) | ||
331 | goto bail_out; | ||
332 | |||
313 | /* Some devices don't initialise properly. In particular | 333 | /* Some devices don't initialise properly. In particular |
314 | * the packet filter is not reset. There are devices that | 334 | * the packet filter is not reset. There are devices that |
315 | * don't do reset all the way. So the packet filter should | 335 | * don't do reset all the way. So the packet filter should |
@@ -317,13 +337,10 @@ skip: | |||
317 | */ | 337 | */ |
318 | usbnet_cdc_update_filter(dev); | 338 | usbnet_cdc_update_filter(dev); |
319 | 339 | ||
320 | return 0; | 340 | bail_out: |
321 | 341 | return rv; | |
322 | bad_desc: | ||
323 | dev_info(&dev->udev->dev, "bad CDC descriptors\n"); | ||
324 | return -ENODEV; | ||
325 | } | 342 | } |
326 | EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); | 343 | EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind); |
327 | 344 | ||
328 | void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) | 345 | void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) |
329 | { | 346 | { |
@@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
417 | BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) | 434 | BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) |
418 | < sizeof(struct cdc_state))); | 435 | < sizeof(struct cdc_state))); |
419 | 436 | ||
420 | status = usbnet_generic_cdc_bind(dev, intf); | 437 | status = usbnet_ether_cdc_bind(dev, intf); |
421 | if (status < 0) | 438 | if (status < 0) |
422 | return status; | 439 | return status; |
423 | 440 | ||
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c index c4f1c363e24b..9df3c1ffff35 100644 --- a/drivers/net/usb/ch9200.c +++ b/drivers/net/usb/ch9200.c | |||
@@ -310,8 +310,8 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data) | |||
310 | int rd_mac_len = 0; | 310 | int rd_mac_len = 0; |
311 | 311 | ||
312 | netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", | 312 | netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", |
313 | dev->udev->descriptor.idVendor, | 313 | le16_to_cpu(dev->udev->descriptor.idVendor), |
314 | dev->udev->descriptor.idProduct); | 314 | le16_to_cpu(dev->udev->descriptor.idProduct)); |
315 | 315 | ||
316 | memset(mac_addr, 0, sizeof(mac_addr)); | 316 | memset(mac_addr, 0, sizeof(mac_addr)); |
317 | rd_mac_len = control_read(dev, REQUEST_READ, 0, | 317 | rd_mac_len = control_read(dev, REQUEST_READ, 0, |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d7165767ca9d..8f923a147fa9 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1196,6 +1196,8 @@ static const struct usb_device_id products[] = { | |||
1196 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ | 1196 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ |
1197 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ | 1197 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ |
1198 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ | 1198 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ |
1199 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ | ||
1200 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ | ||
1199 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 1201 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
1200 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ | 1202 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
1201 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 1203 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 765400b62168..2dfca96a63b6 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -681,7 +681,7 @@ static int smsc95xx_set_features(struct net_device *netdev, | |||
681 | if (ret < 0) | 681 | if (ret < 0) |
682 | return ret; | 682 | return ret; |
683 | 683 | ||
684 | if (features & NETIF_F_HW_CSUM) | 684 | if (features & NETIF_F_IP_CSUM) |
685 | read_buf |= Tx_COE_EN_; | 685 | read_buf |= Tx_COE_EN_; |
686 | else | 686 | else |
687 | read_buf &= ~Tx_COE_EN_; | 687 | read_buf &= ~Tx_COE_EN_; |
@@ -1279,12 +1279,19 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
1279 | 1279 | ||
1280 | spin_lock_init(&pdata->mac_cr_lock); | 1280 | spin_lock_init(&pdata->mac_cr_lock); |
1281 | 1281 | ||
1282 | /* LAN95xx devices do not alter the computed checksum of 0 to 0xffff. | ||
1283 | * RFC 2460, ipv6 UDP calculated checksum yields a result of zero must | ||
1284 | * be changed to 0xffff. RFC 768, ipv4 UDP computed checksum is zero, | ||
1285 | * it is transmitted as all ones. The zero transmitted checksum means | ||
1286 | * transmitter generated no checksum. Hence, enable csum offload only | ||
1287 | * for ipv4 packets. | ||
1288 | */ | ||
1282 | if (DEFAULT_TX_CSUM_ENABLE) | 1289 | if (DEFAULT_TX_CSUM_ENABLE) |
1283 | dev->net->features |= NETIF_F_HW_CSUM; | 1290 | dev->net->features |= NETIF_F_IP_CSUM; |
1284 | if (DEFAULT_RX_CSUM_ENABLE) | 1291 | if (DEFAULT_RX_CSUM_ENABLE) |
1285 | dev->net->features |= NETIF_F_RXCSUM; | 1292 | dev->net->features |= NETIF_F_RXCSUM; |
1286 | 1293 | ||
1287 | dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM; | 1294 | dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; |
1288 | 1295 | ||
1289 | smsc95xx_init_mac_address(dev); | 1296 | smsc95xx_init_mac_address(dev); |
1290 | 1297 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9320d96a1632..3e9246cc49c3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = { | |||
1989 | .ndo_poll_controller = virtnet_netpoll, | 1989 | .ndo_poll_controller = virtnet_netpoll, |
1990 | #endif | 1990 | #endif |
1991 | .ndo_xdp = virtnet_xdp, | 1991 | .ndo_xdp = virtnet_xdp, |
1992 | .ndo_features_check = passthru_features_check, | ||
1992 | }; | 1993 | }; |
1993 | 1994 | ||
1994 | static void virtnet_config_changed_work(struct work_struct *work) | 1995 | static void virtnet_config_changed_work(struct work_struct *work) |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 25bc764ae7dc..d1c7029ded7c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -2962,6 +2962,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter) | |||
2962 | /* we need to enable NAPI, otherwise dev_close will deadlock */ | 2962 | /* we need to enable NAPI, otherwise dev_close will deadlock */ |
2963 | for (i = 0; i < adapter->num_rx_queues; i++) | 2963 | for (i = 0; i < adapter->num_rx_queues; i++) |
2964 | napi_enable(&adapter->rx_queue[i].napi); | 2964 | napi_enable(&adapter->rx_queue[i].napi); |
2965 | /* | ||
2966 | * Need to clear the quiesce bit to ensure that vmxnet3_close | ||
2967 | * can quiesce the device properly | ||
2968 | */ | ||
2969 | clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); | ||
2965 | dev_close(adapter->netdev); | 2970 | dev_close(adapter->netdev); |
2966 | } | 2971 | } |
2967 | 2972 | ||
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ceda5861da78..db882493875c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
@@ -989,6 +989,7 @@ static u32 vrf_fib_table(const struct net_device *dev) | |||
989 | 989 | ||
990 | static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 990 | static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
991 | { | 991 | { |
992 | kfree_skb(skb); | ||
992 | return 0; | 993 | return 0; |
993 | } | 994 | } |
994 | 995 | ||
@@ -998,7 +999,7 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, | |||
998 | { | 999 | { |
999 | struct net *net = dev_net(dev); | 1000 | struct net *net = dev_net(dev); |
1000 | 1001 | ||
1001 | if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) | 1002 | if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1) |
1002 | skb = NULL; /* kfree_skb(skb) handled by nf code */ | 1003 | skb = NULL; /* kfree_skb(skb) handled by nf code */ |
1003 | 1004 | ||
1004 | return skb; | 1005 | return skb; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 6ffc482550c1..7b61adb6270c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1934,8 +1934,7 @@ abort_transaction_no_dev_fatal: | |||
1934 | xennet_disconnect_backend(info); | 1934 | xennet_disconnect_backend(info); |
1935 | xennet_destroy_queues(info); | 1935 | xennet_destroy_queues(info); |
1936 | out: | 1936 | out: |
1937 | unregister_netdev(info->netdev); | 1937 | device_unregister(&dev->dev); |
1938 | xennet_free_netdev(info->netdev); | ||
1939 | return err; | 1938 | return err; |
1940 | } | 1939 | } |
1941 | 1940 | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d5e0906262ea..a60926410438 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
925 | } | 925 | } |
926 | 926 | ||
927 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 927 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
928 | static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id, | ||
929 | u16 bs) | ||
930 | { | ||
931 | struct nvme_ns *ns = disk->private_data; | ||
932 | u16 old_ms = ns->ms; | ||
933 | u8 pi_type = 0; | ||
934 | |||
935 | ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); | ||
936 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); | ||
937 | |||
938 | /* PI implementation requires metadata equal t10 pi tuple size */ | ||
939 | if (ns->ms == sizeof(struct t10_pi_tuple)) | ||
940 | pi_type = id->dps & NVME_NS_DPS_PI_MASK; | ||
941 | |||
942 | if (blk_get_integrity(disk) && | ||
943 | (ns->pi_type != pi_type || ns->ms != old_ms || | ||
944 | bs != queue_logical_block_size(disk->queue) || | ||
945 | (ns->ms && ns->ext))) | ||
946 | blk_integrity_unregister(disk); | ||
947 | |||
948 | ns->pi_type = pi_type; | ||
949 | } | ||
950 | |||
928 | static void nvme_init_integrity(struct nvme_ns *ns) | 951 | static void nvme_init_integrity(struct nvme_ns *ns) |
929 | { | 952 | { |
930 | struct blk_integrity integrity; | 953 | struct blk_integrity integrity; |
@@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns) | |||
951 | blk_queue_max_integrity_segments(ns->queue, 1); | 974 | blk_queue_max_integrity_segments(ns->queue, 1); |
952 | } | 975 | } |
953 | #else | 976 | #else |
977 | static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id, | ||
978 | u16 bs) | ||
979 | { | ||
980 | } | ||
954 | static void nvme_init_integrity(struct nvme_ns *ns) | 981 | static void nvme_init_integrity(struct nvme_ns *ns) |
955 | { | 982 | { |
956 | } | 983 | } |
@@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) | |||
997 | static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) | 1024 | static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) |
998 | { | 1025 | { |
999 | struct nvme_ns *ns = disk->private_data; | 1026 | struct nvme_ns *ns = disk->private_data; |
1000 | u8 lbaf, pi_type; | 1027 | u16 bs; |
1001 | u16 old_ms; | ||
1002 | unsigned short bs; | ||
1003 | |||
1004 | old_ms = ns->ms; | ||
1005 | lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; | ||
1006 | ns->lba_shift = id->lbaf[lbaf].ds; | ||
1007 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | ||
1008 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); | ||
1009 | 1028 | ||
1010 | /* | 1029 | /* |
1011 | * If identify namespace failed, use default 512 byte block size so | 1030 | * If identify namespace failed, use default 512 byte block size so |
1012 | * block layer can use before failing read/write for 0 capacity. | 1031 | * block layer can use before failing read/write for 0 capacity. |
1013 | */ | 1032 | */ |
1033 | ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; | ||
1014 | if (ns->lba_shift == 0) | 1034 | if (ns->lba_shift == 0) |
1015 | ns->lba_shift = 9; | 1035 | ns->lba_shift = 9; |
1016 | bs = 1 << ns->lba_shift; | 1036 | bs = 1 << ns->lba_shift; |
1017 | /* XXX: PI implementation requires metadata equal t10 pi tuple size */ | ||
1018 | pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? | ||
1019 | id->dps & NVME_NS_DPS_PI_MASK : 0; | ||
1020 | 1037 | ||
1021 | blk_mq_freeze_queue(disk->queue); | 1038 | blk_mq_freeze_queue(disk->queue); |
1022 | if (blk_get_integrity(disk) && (ns->pi_type != pi_type || | ||
1023 | ns->ms != old_ms || | ||
1024 | bs != queue_logical_block_size(disk->queue) || | ||
1025 | (ns->ms && ns->ext))) | ||
1026 | blk_integrity_unregister(disk); | ||
1027 | 1039 | ||
1028 | ns->pi_type = pi_type; | 1040 | if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) |
1041 | nvme_prep_integrity(disk, id, bs); | ||
1029 | blk_queue_logical_block_size(ns->queue, bs); | 1042 | blk_queue_logical_block_size(ns->queue, bs); |
1030 | |||
1031 | if (ns->ms && !blk_get_integrity(disk) && !ns->ext) | 1043 | if (ns->ms && !blk_get_integrity(disk) && !ns->ext) |
1032 | nvme_init_integrity(ns); | 1044 | nvme_init_integrity(ns); |
1033 | if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) | 1045 | if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) |
@@ -1605,7 +1617,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
1605 | } | 1617 | } |
1606 | memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); | 1618 | memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); |
1607 | 1619 | ||
1608 | if (ctrl->ops->is_fabrics) { | 1620 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
1609 | ctrl->icdoff = le16_to_cpu(id->icdoff); | 1621 | ctrl->icdoff = le16_to_cpu(id->icdoff); |
1610 | ctrl->ioccsz = le32_to_cpu(id->ioccsz); | 1622 | ctrl->ioccsz = le32_to_cpu(id->ioccsz); |
1611 | ctrl->iorcsz = le32_to_cpu(id->iorcsz); | 1623 | ctrl->iorcsz = le32_to_cpu(id->iorcsz); |
@@ -2098,7 +2110,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) | |||
2098 | if (ns->ndev) | 2110 | if (ns->ndev) |
2099 | nvme_nvm_unregister_sysfs(ns); | 2111 | nvme_nvm_unregister_sysfs(ns); |
2100 | del_gendisk(ns->disk); | 2112 | del_gendisk(ns->disk); |
2101 | blk_mq_abort_requeue_list(ns->queue); | ||
2102 | blk_cleanup_queue(ns->queue); | 2113 | blk_cleanup_queue(ns->queue); |
2103 | } | 2114 | } |
2104 | 2115 | ||
@@ -2436,8 +2447,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) | |||
2436 | continue; | 2447 | continue; |
2437 | revalidate_disk(ns->disk); | 2448 | revalidate_disk(ns->disk); |
2438 | blk_set_queue_dying(ns->queue); | 2449 | blk_set_queue_dying(ns->queue); |
2439 | blk_mq_abort_requeue_list(ns->queue); | 2450 | |
2440 | blk_mq_start_stopped_hw_queues(ns->queue, true); | 2451 | /* |
2452 | * Forcibly start all queues to avoid having stuck requests. | ||
2453 | * Note that we must ensure the queues are not stopped | ||
2454 | * when the final removal happens. | ||
2455 | */ | ||
2456 | blk_mq_start_hw_queues(ns->queue); | ||
2457 | |||
2458 | /* draining requests in requeue list */ | ||
2459 | blk_mq_kick_requeue_list(ns->queue); | ||
2441 | } | 2460 | } |
2442 | mutex_unlock(&ctrl->namespaces_mutex); | 2461 | mutex_unlock(&ctrl->namespaces_mutex); |
2443 | } | 2462 | } |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 70e689bf1cad..5b14cbefb724 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -45,8 +45,6 @@ enum nvme_fc_queue_flags { | |||
45 | 45 | ||
46 | #define NVMEFC_QUEUE_DELAY 3 /* ms units */ | 46 | #define NVMEFC_QUEUE_DELAY 3 /* ms units */ |
47 | 47 | ||
48 | #define NVME_FC_MAX_CONNECT_ATTEMPTS 1 | ||
49 | |||
50 | struct nvme_fc_queue { | 48 | struct nvme_fc_queue { |
51 | struct nvme_fc_ctrl *ctrl; | 49 | struct nvme_fc_ctrl *ctrl; |
52 | struct device *dev; | 50 | struct device *dev; |
@@ -165,8 +163,6 @@ struct nvme_fc_ctrl { | |||
165 | struct work_struct delete_work; | 163 | struct work_struct delete_work; |
166 | struct work_struct reset_work; | 164 | struct work_struct reset_work; |
167 | struct delayed_work connect_work; | 165 | struct delayed_work connect_work; |
168 | int reconnect_delay; | ||
169 | int connect_attempts; | ||
170 | 166 | ||
171 | struct kref ref; | 167 | struct kref ref; |
172 | u32 flags; | 168 | u32 flags; |
@@ -1376,9 +1372,9 @@ done: | |||
1376 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); | 1372 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); |
1377 | if (!complete_rq) { | 1373 | if (!complete_rq) { |
1378 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { | 1374 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { |
1379 | status = cpu_to_le16(NVME_SC_ABORT_REQ); | 1375 | status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); |
1380 | if (blk_queue_dying(rq->q)) | 1376 | if (blk_queue_dying(rq->q)) |
1381 | status |= cpu_to_le16(NVME_SC_DNR); | 1377 | status |= cpu_to_le16(NVME_SC_DNR << 1); |
1382 | } | 1378 | } |
1383 | nvme_end_request(rq, status, result); | 1379 | nvme_end_request(rq, status, result); |
1384 | } else | 1380 | } else |
@@ -1751,9 +1747,13 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) | |||
1751 | dev_warn(ctrl->ctrl.device, | 1747 | dev_warn(ctrl->ctrl.device, |
1752 | "NVME-FC{%d}: transport association error detected: %s\n", | 1748 | "NVME-FC{%d}: transport association error detected: %s\n", |
1753 | ctrl->cnum, errmsg); | 1749 | ctrl->cnum, errmsg); |
1754 | dev_info(ctrl->ctrl.device, | 1750 | dev_warn(ctrl->ctrl.device, |
1755 | "NVME-FC{%d}: resetting controller\n", ctrl->cnum); | 1751 | "NVME-FC{%d}: resetting controller\n", ctrl->cnum); |
1756 | 1752 | ||
1753 | /* stop the queues on error, cleanup is in reset thread */ | ||
1754 | if (ctrl->queue_count > 1) | ||
1755 | nvme_stop_queues(&ctrl->ctrl); | ||
1756 | |||
1757 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { | 1757 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { |
1758 | dev_err(ctrl->ctrl.device, | 1758 | dev_err(ctrl->ctrl.device, |
1759 | "NVME-FC{%d}: error_recovery: Couldn't change state " | 1759 | "NVME-FC{%d}: error_recovery: Couldn't change state " |
@@ -2191,9 +2191,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2191 | if (!opts->nr_io_queues) | 2191 | if (!opts->nr_io_queues) |
2192 | return 0; | 2192 | return 0; |
2193 | 2193 | ||
2194 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", | ||
2195 | opts->nr_io_queues); | ||
2196 | |||
2197 | nvme_fc_init_io_queues(ctrl); | 2194 | nvme_fc_init_io_queues(ctrl); |
2198 | 2195 | ||
2199 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | 2196 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
@@ -2264,9 +2261,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2264 | if (ctrl->queue_count == 1) | 2261 | if (ctrl->queue_count == 1) |
2265 | return 0; | 2262 | return 0; |
2266 | 2263 | ||
2267 | dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n", | ||
2268 | opts->nr_io_queues); | ||
2269 | |||
2270 | nvme_fc_init_io_queues(ctrl); | 2264 | nvme_fc_init_io_queues(ctrl); |
2271 | 2265 | ||
2272 | ret = blk_mq_reinit_tagset(&ctrl->tag_set); | 2266 | ret = blk_mq_reinit_tagset(&ctrl->tag_set); |
@@ -2302,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2302 | int ret; | 2296 | int ret; |
2303 | bool changed; | 2297 | bool changed; |
2304 | 2298 | ||
2305 | ctrl->connect_attempts++; | 2299 | ++ctrl->ctrl.opts->nr_reconnects; |
2306 | 2300 | ||
2307 | /* | 2301 | /* |
2308 | * Create the admin queue | 2302 | * Create the admin queue |
@@ -2399,9 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2399 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 2393 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
2400 | WARN_ON_ONCE(!changed); | 2394 | WARN_ON_ONCE(!changed); |
2401 | 2395 | ||
2402 | ctrl->connect_attempts = 0; | 2396 | ctrl->ctrl.opts->nr_reconnects = 0; |
2403 | |||
2404 | kref_get(&ctrl->ctrl.kref); | ||
2405 | 2397 | ||
2406 | if (ctrl->queue_count > 1) { | 2398 | if (ctrl->queue_count > 1) { |
2407 | nvme_start_queues(&ctrl->ctrl); | 2399 | nvme_start_queues(&ctrl->ctrl); |
@@ -2532,26 +2524,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work) | |||
2532 | 2524 | ||
2533 | /* | 2525 | /* |
2534 | * tear down the controller | 2526 | * tear down the controller |
2535 | * This will result in the last reference on the nvme ctrl to | 2527 | * After the last reference on the nvme ctrl is removed, |
2536 | * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback. | 2528 | * the transport nvme_fc_nvme_ctrl_freed() callback will be |
2537 | * From there, the transport will tear down it's logical queues and | 2529 | * invoked. From there, the transport will tear down it's |
2538 | * association. | 2530 | * logical queues and association. |
2539 | */ | 2531 | */ |
2540 | nvme_uninit_ctrl(&ctrl->ctrl); | 2532 | nvme_uninit_ctrl(&ctrl->ctrl); |
2541 | 2533 | ||
2542 | nvme_put_ctrl(&ctrl->ctrl); | 2534 | nvme_put_ctrl(&ctrl->ctrl); |
2543 | } | 2535 | } |
2544 | 2536 | ||
2545 | static int | 2537 | static bool |
2546 | __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) | 2538 | __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl) |
2547 | { | 2539 | { |
2548 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) | 2540 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) |
2549 | return -EBUSY; | 2541 | return true; |
2550 | 2542 | ||
2551 | if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) | 2543 | if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) |
2552 | return -EBUSY; | 2544 | return true; |
2553 | 2545 | ||
2554 | return 0; | 2546 | return false; |
2547 | } | ||
2548 | |||
2549 | static int | ||
2550 | __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) | ||
2551 | { | ||
2552 | return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0; | ||
2555 | } | 2553 | } |
2556 | 2554 | ||
2557 | /* | 2555 | /* |
@@ -2577,6 +2575,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl) | |||
2577 | } | 2575 | } |
2578 | 2576 | ||
2579 | static void | 2577 | static void |
2578 | nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) | ||
2579 | { | ||
2580 | /* If we are resetting/deleting then do nothing */ | ||
2581 | if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { | ||
2582 | WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || | ||
2583 | ctrl->ctrl.state == NVME_CTRL_LIVE); | ||
2584 | return; | ||
2585 | } | ||
2586 | |||
2587 | dev_info(ctrl->ctrl.device, | ||
2588 | "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", | ||
2589 | ctrl->cnum, status); | ||
2590 | |||
2591 | if (nvmf_should_reconnect(&ctrl->ctrl)) { | ||
2592 | dev_info(ctrl->ctrl.device, | ||
2593 | "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", | ||
2594 | ctrl->cnum, ctrl->ctrl.opts->reconnect_delay); | ||
2595 | queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, | ||
2596 | ctrl->ctrl.opts->reconnect_delay * HZ); | ||
2597 | } else { | ||
2598 | dev_warn(ctrl->ctrl.device, | ||
2599 | "NVME-FC{%d}: Max reconnect attempts (%d) " | ||
2600 | "reached. Removing controller\n", | ||
2601 | ctrl->cnum, ctrl->ctrl.opts->nr_reconnects); | ||
2602 | WARN_ON(__nvme_fc_schedule_delete_work(ctrl)); | ||
2603 | } | ||
2604 | } | ||
2605 | |||
2606 | static void | ||
2580 | nvme_fc_reset_ctrl_work(struct work_struct *work) | 2607 | nvme_fc_reset_ctrl_work(struct work_struct *work) |
2581 | { | 2608 | { |
2582 | struct nvme_fc_ctrl *ctrl = | 2609 | struct nvme_fc_ctrl *ctrl = |
@@ -2587,34 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) | |||
2587 | nvme_fc_delete_association(ctrl); | 2614 | nvme_fc_delete_association(ctrl); |
2588 | 2615 | ||
2589 | ret = nvme_fc_create_association(ctrl); | 2616 | ret = nvme_fc_create_association(ctrl); |
2590 | if (ret) { | 2617 | if (ret) |
2591 | dev_warn(ctrl->ctrl.device, | 2618 | nvme_fc_reconnect_or_delete(ctrl, ret); |
2592 | "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", | 2619 | else |
2593 | ctrl->cnum, ret); | ||
2594 | if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) { | ||
2595 | dev_warn(ctrl->ctrl.device, | ||
2596 | "NVME-FC{%d}: Max reconnect attempts (%d) " | ||
2597 | "reached. Removing controller\n", | ||
2598 | ctrl->cnum, ctrl->connect_attempts); | ||
2599 | |||
2600 | if (!nvme_change_ctrl_state(&ctrl->ctrl, | ||
2601 | NVME_CTRL_DELETING)) { | ||
2602 | dev_err(ctrl->ctrl.device, | ||
2603 | "NVME-FC{%d}: failed to change state " | ||
2604 | "to DELETING\n", ctrl->cnum); | ||
2605 | return; | ||
2606 | } | ||
2607 | |||
2608 | WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work)); | ||
2609 | return; | ||
2610 | } | ||
2611 | |||
2612 | dev_warn(ctrl->ctrl.device, | ||
2613 | "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", | ||
2614 | ctrl->cnum, ctrl->reconnect_delay); | ||
2615 | queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, | ||
2616 | ctrl->reconnect_delay * HZ); | ||
2617 | } else | ||
2618 | dev_info(ctrl->ctrl.device, | 2620 | dev_info(ctrl->ctrl.device, |
2619 | "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); | 2621 | "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); |
2620 | } | 2622 | } |
@@ -2628,7 +2630,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) | |||
2628 | { | 2630 | { |
2629 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | 2631 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
2630 | 2632 | ||
2631 | dev_warn(ctrl->ctrl.device, | 2633 | dev_info(ctrl->ctrl.device, |
2632 | "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); | 2634 | "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); |
2633 | 2635 | ||
2634 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) | 2636 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) |
@@ -2645,7 +2647,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) | |||
2645 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { | 2647 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { |
2646 | .name = "fc", | 2648 | .name = "fc", |
2647 | .module = THIS_MODULE, | 2649 | .module = THIS_MODULE, |
2648 | .is_fabrics = true, | 2650 | .flags = NVME_F_FABRICS, |
2649 | .reg_read32 = nvmf_reg_read32, | 2651 | .reg_read32 = nvmf_reg_read32, |
2650 | .reg_read64 = nvmf_reg_read64, | 2652 | .reg_read64 = nvmf_reg_read64, |
2651 | .reg_write32 = nvmf_reg_write32, | 2653 | .reg_write32 = nvmf_reg_write32, |
@@ -2667,34 +2669,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work) | |||
2667 | struct nvme_fc_ctrl, connect_work); | 2669 | struct nvme_fc_ctrl, connect_work); |
2668 | 2670 | ||
2669 | ret = nvme_fc_create_association(ctrl); | 2671 | ret = nvme_fc_create_association(ctrl); |
2670 | if (ret) { | 2672 | if (ret) |
2671 | dev_warn(ctrl->ctrl.device, | 2673 | nvme_fc_reconnect_or_delete(ctrl, ret); |
2672 | "NVME-FC{%d}: Reconnect attempt failed (%d)\n", | 2674 | else |
2673 | ctrl->cnum, ret); | ||
2674 | if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) { | ||
2675 | dev_warn(ctrl->ctrl.device, | ||
2676 | "NVME-FC{%d}: Max reconnect attempts (%d) " | ||
2677 | "reached. Removing controller\n", | ||
2678 | ctrl->cnum, ctrl->connect_attempts); | ||
2679 | |||
2680 | if (!nvme_change_ctrl_state(&ctrl->ctrl, | ||
2681 | NVME_CTRL_DELETING)) { | ||
2682 | dev_err(ctrl->ctrl.device, | ||
2683 | "NVME-FC{%d}: failed to change state " | ||
2684 | "to DELETING\n", ctrl->cnum); | ||
2685 | return; | ||
2686 | } | ||
2687 | |||
2688 | WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work)); | ||
2689 | return; | ||
2690 | } | ||
2691 | |||
2692 | dev_warn(ctrl->ctrl.device, | ||
2693 | "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", | ||
2694 | ctrl->cnum, ctrl->reconnect_delay); | ||
2695 | queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, | ||
2696 | ctrl->reconnect_delay * HZ); | ||
2697 | } else | ||
2698 | dev_info(ctrl->ctrl.device, | 2675 | dev_info(ctrl->ctrl.device, |
2699 | "NVME-FC{%d}: controller reconnect complete\n", | 2676 | "NVME-FC{%d}: controller reconnect complete\n", |
2700 | ctrl->cnum); | 2677 | ctrl->cnum); |
@@ -2720,6 +2697,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2720 | unsigned long flags; | 2697 | unsigned long flags; |
2721 | int ret, idx; | 2698 | int ret, idx; |
2722 | 2699 | ||
2700 | if (!(rport->remoteport.port_role & | ||
2701 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { | ||
2702 | ret = -EBADR; | ||
2703 | goto out_fail; | ||
2704 | } | ||
2705 | |||
2723 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | 2706 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); |
2724 | if (!ctrl) { | 2707 | if (!ctrl) { |
2725 | ret = -ENOMEM; | 2708 | ret = -ENOMEM; |
@@ -2745,7 +2728,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2745 | INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); | 2728 | INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); |
2746 | INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); | 2729 | INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); |
2747 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); | 2730 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); |
2748 | ctrl->reconnect_delay = opts->reconnect_delay; | ||
2749 | spin_lock_init(&ctrl->lock); | 2731 | spin_lock_init(&ctrl->lock); |
2750 | 2732 | ||
2751 | /* io queue count */ | 2733 | /* io queue count */ |
@@ -2809,7 +2791,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2809 | ctrl->ctrl.opts = NULL; | 2791 | ctrl->ctrl.opts = NULL; |
2810 | /* initiate nvme ctrl ref counting teardown */ | 2792 | /* initiate nvme ctrl ref counting teardown */ |
2811 | nvme_uninit_ctrl(&ctrl->ctrl); | 2793 | nvme_uninit_ctrl(&ctrl->ctrl); |
2812 | nvme_put_ctrl(&ctrl->ctrl); | ||
2813 | 2794 | ||
2814 | /* as we're past the point where we transition to the ref | 2795 | /* as we're past the point where we transition to the ref |
2815 | * counting teardown path, if we return a bad pointer here, | 2796 | * counting teardown path, if we return a bad pointer here, |
@@ -2825,6 +2806,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2825 | return ERR_PTR(ret); | 2806 | return ERR_PTR(ret); |
2826 | } | 2807 | } |
2827 | 2808 | ||
2809 | kref_get(&ctrl->ctrl.kref); | ||
2810 | |||
2828 | dev_info(ctrl->ctrl.device, | 2811 | dev_info(ctrl->ctrl.device, |
2829 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", | 2812 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", |
2830 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); | 2813 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); |
@@ -2961,7 +2944,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |||
2961 | static struct nvmf_transport_ops nvme_fc_transport = { | 2944 | static struct nvmf_transport_ops nvme_fc_transport = { |
2962 | .name = "fc", | 2945 | .name = "fc", |
2963 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, | 2946 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, |
2964 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY, | 2947 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, |
2965 | .create_ctrl = nvme_fc_create_ctrl, | 2948 | .create_ctrl = nvme_fc_create_ctrl, |
2966 | }; | 2949 | }; |
2967 | 2950 | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 29c708ca9621..9d6a070d4391 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -208,7 +208,9 @@ struct nvme_ns { | |||
208 | struct nvme_ctrl_ops { | 208 | struct nvme_ctrl_ops { |
209 | const char *name; | 209 | const char *name; |
210 | struct module *module; | 210 | struct module *module; |
211 | bool is_fabrics; | 211 | unsigned int flags; |
212 | #define NVME_F_FABRICS (1 << 0) | ||
213 | #define NVME_F_METADATA_SUPPORTED (1 << 1) | ||
212 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); | 214 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
213 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); | 215 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
214 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); | 216 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fed803232edc..d52701df7245 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev) | |||
263 | c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); | 263 | c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); |
264 | 264 | ||
265 | if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { | 265 | if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { |
266 | dev_warn(dev->dev, "unable to set dbbuf\n"); | 266 | dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); |
267 | /* Free memory and continue on */ | 267 | /* Free memory and continue on */ |
268 | nvme_dbbuf_dma_free(dev); | 268 | nvme_dbbuf_dma_free(dev); |
269 | } | 269 | } |
@@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) | |||
1394 | result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, | 1394 | result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, |
1395 | &pci_status); | 1395 | &pci_status); |
1396 | if (result == PCIBIOS_SUCCESSFUL) | 1396 | if (result == PCIBIOS_SUCCESSFUL) |
1397 | dev_warn(dev->dev, | 1397 | dev_warn(dev->ctrl.device, |
1398 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", | 1398 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", |
1399 | csts, pci_status); | 1399 | csts, pci_status); |
1400 | else | 1400 | else |
1401 | dev_warn(dev->dev, | 1401 | dev_warn(dev->ctrl.device, |
1402 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", | 1402 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", |
1403 | csts, result); | 1403 | csts, result); |
1404 | } | 1404 | } |
@@ -1506,6 +1506,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) | |||
1506 | if (dev->cmb) { | 1506 | if (dev->cmb) { |
1507 | iounmap(dev->cmb); | 1507 | iounmap(dev->cmb); |
1508 | dev->cmb = NULL; | 1508 | dev->cmb = NULL; |
1509 | if (dev->cmbsz) { | ||
1510 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, | ||
1511 | &dev_attr_cmb.attr, NULL); | ||
1512 | dev->cmbsz = 0; | ||
1513 | } | ||
1509 | } | 1514 | } |
1510 | } | 1515 | } |
1511 | 1516 | ||
@@ -1735,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
1735 | */ | 1740 | */ |
1736 | if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { | 1741 | if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { |
1737 | dev->q_depth = 2; | 1742 | dev->q_depth = 2; |
1738 | dev_warn(dev->dev, "detected Apple NVMe controller, set " | 1743 | dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " |
1739 | "queue depth=%u to work around controller resets\n", | 1744 | "set queue depth=%u to work around controller resets\n", |
1740 | dev->q_depth); | 1745 | dev->q_depth); |
1741 | } | 1746 | } |
1742 | 1747 | ||
@@ -1754,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
1754 | if (dev->cmbsz) { | 1759 | if (dev->cmbsz) { |
1755 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, | 1760 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, |
1756 | &dev_attr_cmb.attr, NULL)) | 1761 | &dev_attr_cmb.attr, NULL)) |
1757 | dev_warn(dev->dev, | 1762 | dev_warn(dev->ctrl.device, |
1758 | "failed to add sysfs attribute for CMB\n"); | 1763 | "failed to add sysfs attribute for CMB\n"); |
1759 | } | 1764 | } |
1760 | } | 1765 | } |
@@ -1779,6 +1784,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) | |||
1779 | { | 1784 | { |
1780 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 1785 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
1781 | 1786 | ||
1787 | nvme_release_cmb(dev); | ||
1782 | pci_free_irq_vectors(pdev); | 1788 | pci_free_irq_vectors(pdev); |
1783 | 1789 | ||
1784 | if (pci_is_enabled(pdev)) { | 1790 | if (pci_is_enabled(pdev)) { |
@@ -2041,6 +2047,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) | |||
2041 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { | 2047 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { |
2042 | .name = "pcie", | 2048 | .name = "pcie", |
2043 | .module = THIS_MODULE, | 2049 | .module = THIS_MODULE, |
2050 | .flags = NVME_F_METADATA_SUPPORTED, | ||
2044 | .reg_read32 = nvme_pci_reg_read32, | 2051 | .reg_read32 = nvme_pci_reg_read32, |
2045 | .reg_write32 = nvme_pci_reg_write32, | 2052 | .reg_write32 = nvme_pci_reg_write32, |
2046 | .reg_read64 = nvme_pci_reg_read64, | 2053 | .reg_read64 = nvme_pci_reg_read64, |
@@ -2184,7 +2191,6 @@ static void nvme_remove(struct pci_dev *pdev) | |||
2184 | nvme_dev_disable(dev, true); | 2191 | nvme_dev_disable(dev, true); |
2185 | nvme_dev_remove_admin(dev); | 2192 | nvme_dev_remove_admin(dev); |
2186 | nvme_free_queues(dev, 0); | 2193 | nvme_free_queues(dev, 0); |
2187 | nvme_release_cmb(dev); | ||
2188 | nvme_release_prp_pools(dev); | 2194 | nvme_release_prp_pools(dev); |
2189 | nvme_dev_unmap(dev); | 2195 | nvme_dev_unmap(dev); |
2190 | nvme_put_ctrl(&dev->ctrl); | 2196 | nvme_put_ctrl(&dev->ctrl); |
@@ -2288,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = { | |||
2288 | { PCI_VDEVICE(INTEL, 0x0a54), | 2294 | { PCI_VDEVICE(INTEL, 0x0a54), |
2289 | .driver_data = NVME_QUIRK_STRIPE_SIZE | | 2295 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
2290 | NVME_QUIRK_DEALLOCATE_ZEROES, }, | 2296 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
2297 | { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ | ||
2298 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, | ||
2291 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ | 2299 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ |
2292 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, | 2300 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, |
2293 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ | 2301 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index dd1c6deef82f..28bd255c144d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -1038,6 +1038,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) | |||
1038 | nvme_rdma_wr_error(cq, wc, "SEND"); | 1038 | nvme_rdma_wr_error(cq, wc, "SEND"); |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) | ||
1042 | { | ||
1043 | int sig_limit; | ||
1044 | |||
1045 | /* | ||
1046 | * We signal completion every queue depth/2 and also handle the | ||
1047 | * degenerated case of a device with queue_depth=1, where we | ||
1048 | * would need to signal every message. | ||
1049 | */ | ||
1050 | sig_limit = max(queue->queue_size / 2, 1); | ||
1051 | return (++queue->sig_count % sig_limit) == 0; | ||
1052 | } | ||
1053 | |||
1041 | static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, | 1054 | static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, |
1042 | struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, | 1055 | struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, |
1043 | struct ib_send_wr *first, bool flush) | 1056 | struct ib_send_wr *first, bool flush) |
@@ -1065,9 +1078,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, | |||
1065 | * Would have been way to obvious to handle this in hardware or | 1078 | * Would have been way to obvious to handle this in hardware or |
1066 | * at least the RDMA stack.. | 1079 | * at least the RDMA stack.. |
1067 | * | 1080 | * |
1068 | * This messy and racy code sniplet is copy and pasted from the iSER | ||
1069 | * initiator, and the magic '32' comes from there as well. | ||
1070 | * | ||
1071 | * Always signal the flushes. The magic request used for the flush | 1081 | * Always signal the flushes. The magic request used for the flush |
1072 | * sequencer is not allocated in our driver's tagset and it's | 1082 | * sequencer is not allocated in our driver's tagset and it's |
1073 | * triggered to be freed by blk_cleanup_queue(). So we need to | 1083 | * triggered to be freed by blk_cleanup_queue(). So we need to |
@@ -1075,7 +1085,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, | |||
1075 | * embedded in request's payload, is not freed when __ib_process_cq() | 1085 | * embedded in request's payload, is not freed when __ib_process_cq() |
1076 | * calls wr_cqe->done(). | 1086 | * calls wr_cqe->done(). |
1077 | */ | 1087 | */ |
1078 | if ((++queue->sig_count % 32) == 0 || flush) | 1088 | if (nvme_rdma_queue_sig_limit(queue) || flush) |
1079 | wr.send_flags |= IB_SEND_SIGNALED; | 1089 | wr.send_flags |= IB_SEND_SIGNALED; |
1080 | 1090 | ||
1081 | if (first) | 1091 | if (first) |
@@ -1782,7 +1792,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl) | |||
1782 | static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { | 1792 | static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { |
1783 | .name = "rdma", | 1793 | .name = "rdma", |
1784 | .module = THIS_MODULE, | 1794 | .module = THIS_MODULE, |
1785 | .is_fabrics = true, | 1795 | .flags = NVME_F_FABRICS, |
1786 | .reg_read32 = nvmf_reg_read32, | 1796 | .reg_read32 = nvmf_reg_read32, |
1787 | .reg_read64 = nvmf_reg_read64, | 1797 | .reg_read64 = nvmf_reg_read64, |
1788 | .reg_write32 = nvmf_reg_write32, | 1798 | .reg_write32 = nvmf_reg_write32, |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index cf90713043da..eb9399ac97cf 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -529,6 +529,12 @@ fail: | |||
529 | } | 529 | } |
530 | EXPORT_SYMBOL_GPL(nvmet_req_init); | 530 | EXPORT_SYMBOL_GPL(nvmet_req_init); |
531 | 531 | ||
532 | void nvmet_req_uninit(struct nvmet_req *req) | ||
533 | { | ||
534 | percpu_ref_put(&req->sq->ref); | ||
535 | } | ||
536 | EXPORT_SYMBOL_GPL(nvmet_req_uninit); | ||
537 | |||
532 | static inline bool nvmet_cc_en(u32 cc) | 538 | static inline bool nvmet_cc_en(u32 cc) |
533 | { | 539 | { |
534 | return cc & 0x1; | 540 | return cc & 0x1; |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 62eba29c85fb..2006fae61980 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) | |||
517 | { | 517 | { |
518 | int cpu, idx, cnt; | 518 | int cpu, idx, cnt; |
519 | 519 | ||
520 | if (!(tgtport->ops->target_features & | 520 | if (tgtport->ops->max_hw_queues == 1) |
521 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) || | ||
522 | tgtport->ops->max_hw_queues == 1) | ||
523 | return WORK_CPU_UNBOUND; | 521 | return WORK_CPU_UNBOUND; |
524 | 522 | ||
525 | /* Simple cpu selection based on qid modulo active cpu count */ | 523 | /* Simple cpu selection based on qid modulo active cpu count */ |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 15551ef79c8c..294a6611fb24 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
@@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = { | |||
698 | .dma_boundary = FCLOOP_DMABOUND_4G, | 698 | .dma_boundary = FCLOOP_DMABOUND_4G, |
699 | /* optional features */ | 699 | /* optional features */ |
700 | .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | | 700 | .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | |
701 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | | ||
702 | NVMET_FCTGTFEAT_OPDONE_IN_ISR, | 701 | NVMET_FCTGTFEAT_OPDONE_IN_ISR, |
703 | /* sizes of additional private data for data structures */ | 702 | /* sizes of additional private data for data structures */ |
704 | .target_priv_sz = sizeof(struct fcloop_tport), | 703 | .target_priv_sz = sizeof(struct fcloop_tport), |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index feb497134aee..e503cfff0337 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl) | |||
558 | static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { | 558 | static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { |
559 | .name = "loop", | 559 | .name = "loop", |
560 | .module = THIS_MODULE, | 560 | .module = THIS_MODULE, |
561 | .is_fabrics = true, | 561 | .flags = NVME_F_FABRICS, |
562 | .reg_read32 = nvmf_reg_read32, | 562 | .reg_read32 = nvmf_reg_read32, |
563 | .reg_read64 = nvmf_reg_read64, | 563 | .reg_read64 = nvmf_reg_read64, |
564 | .reg_write32 = nvmf_reg_write32, | 564 | .reg_write32 = nvmf_reg_write32, |
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7cb77ba5993b..cfc5c7fb0ab7 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); | |||
261 | 261 | ||
262 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, | 262 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
263 | struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); | 263 | struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); |
264 | void nvmet_req_uninit(struct nvmet_req *req); | ||
264 | void nvmet_req_complete(struct nvmet_req *req, u16 status); | 265 | void nvmet_req_complete(struct nvmet_req *req, u16 status); |
265 | 266 | ||
266 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, | 267 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 99c69018a35f..9e45cde63376 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) | |||
567 | rsp->n_rdma = 0; | 567 | rsp->n_rdma = 0; |
568 | 568 | ||
569 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | 569 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
570 | nvmet_req_uninit(&rsp->req); | ||
570 | nvmet_rdma_release_rsp(rsp); | 571 | nvmet_rdma_release_rsp(rsp); |
571 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | 572 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
572 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", | 573 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 3080d9dd031d..43bd69dceabf 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -507,6 +507,9 @@ void *__unflatten_device_tree(const void *blob, | |||
507 | 507 | ||
508 | /* Allocate memory for the expanded device tree */ | 508 | /* Allocate memory for the expanded device tree */ |
509 | mem = dt_alloc(size + 4, __alignof__(struct device_node)); | 509 | mem = dt_alloc(size + 4, __alignof__(struct device_node)); |
510 | if (!mem) | ||
511 | return NULL; | ||
512 | |||
510 | memset(mem, 0, size); | 513 | memset(mem, 0, size); |
511 | 514 | ||
512 | *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); | 515 | *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 4dec07ea510f..d507c3569a88 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
@@ -197,7 +197,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem) | |||
197 | const struct of_device_id *i; | 197 | const struct of_device_id *i; |
198 | 198 | ||
199 | for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { | 199 | for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { |
200 | int const (*initfn)(struct reserved_mem *rmem) = i->data; | 200 | reservedmem_of_init_fn initfn = i->data; |
201 | const char *compat = i->compatible; | 201 | const char *compat = i->compatible; |
202 | 202 | ||
203 | if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) | 203 | if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 71fecc2debfc..703a42118ffc 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void) | |||
523 | arch_initcall_sync(of_platform_default_populate_init); | 523 | arch_initcall_sync(of_platform_default_populate_init); |
524 | #endif | 524 | #endif |
525 | 525 | ||
526 | static int of_platform_device_destroy(struct device *dev, void *data) | 526 | int of_platform_device_destroy(struct device *dev, void *data) |
527 | { | 527 | { |
528 | /* Do not touch devices not populated from the device tree */ | 528 | /* Do not touch devices not populated from the device tree */ |
529 | if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) | 529 | if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) |
@@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data) | |||
544 | of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); | 544 | of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); |
545 | return 0; | 545 | return 0; |
546 | } | 546 | } |
547 | EXPORT_SYMBOL_GPL(of_platform_device_destroy); | ||
547 | 548 | ||
548 | /** | 549 | /** |
549 | * of_platform_depopulate() - Remove devices populated from device tree | 550 | * of_platform_depopulate() - Remove devices populated from device tree |
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index a98cba55c7f0..19a289b8cc94 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c | |||
@@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) | |||
252 | static int imx6q_pcie_abort_handler(unsigned long addr, | 252 | static int imx6q_pcie_abort_handler(unsigned long addr, |
253 | unsigned int fsr, struct pt_regs *regs) | 253 | unsigned int fsr, struct pt_regs *regs) |
254 | { | 254 | { |
255 | return 0; | 255 | unsigned long pc = instruction_pointer(regs); |
256 | unsigned long instr = *(unsigned long *)pc; | ||
257 | int reg = (instr >> 12) & 15; | ||
258 | |||
259 | /* | ||
260 | * If the instruction being executed was a read, | ||
261 | * make it look like it read all-ones. | ||
262 | */ | ||
263 | if ((instr & 0x0c100000) == 0x04100000) { | ||
264 | unsigned long val; | ||
265 | |||
266 | if (instr & 0x00400000) | ||
267 | val = 255; | ||
268 | else | ||
269 | val = -1; | ||
270 | |||
271 | regs->uregs[reg] = val; | ||
272 | regs->ARM_pc += 4; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | if ((instr & 0x0e100090) == 0x00100090) { | ||
277 | regs->uregs[reg] = -1; | ||
278 | regs->ARM_pc += 4; | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | return 1; | ||
256 | } | 283 | } |
257 | 284 | ||
258 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | 285 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) |
@@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void) | |||
819 | * we can install the handler here without risking it | 846 | * we can install the handler here without risking it |
820 | * accessing some uninitialized driver state. | 847 | * accessing some uninitialized driver state. |
821 | */ | 848 | */ |
822 | hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, | 849 | hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, |
823 | "imprecise external abort"); | 850 | "external abort on non-linefetch"); |
824 | 851 | ||
825 | return platform_driver_register(&imx6_pcie_driver); | 852 | return platform_driver_register(&imx6_pcie_driver); |
826 | } | 853 | } |
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig index c23f146fb5a6..c09623ca8c3b 100644 --- a/drivers/pci/endpoint/Kconfig +++ b/drivers/pci/endpoint/Kconfig | |||
@@ -6,6 +6,7 @@ menu "PCI Endpoint" | |||
6 | 6 | ||
7 | config PCI_ENDPOINT | 7 | config PCI_ENDPOINT |
8 | bool "PCI Endpoint Support" | 8 | bool "PCI Endpoint Support" |
9 | depends on HAS_DMA | ||
9 | help | 10 | help |
10 | Enable this configuration option to support configurable PCI | 11 | Enable this configuration option to support configurable PCI |
11 | endpoint. This should be enabled if the platform has a PCI | 12 | endpoint. This should be enabled if the platform has a PCI |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b01bd5bba8e6..563901cd9c06 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) | |||
2144 | 2144 | ||
2145 | if (!pm_runtime_suspended(dev) | 2145 | if (!pm_runtime_suspended(dev) |
2146 | || pci_target_state(pci_dev) != pci_dev->current_state | 2146 | || pci_target_state(pci_dev) != pci_dev->current_state |
2147 | || platform_pci_need_resume(pci_dev)) | 2147 | || platform_pci_need_resume(pci_dev) |
2148 | || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) | ||
2148 | return false; | 2149 | return false; |
2149 | 2150 | ||
2150 | /* | 2151 | /* |
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index cc6e085008fb..f6a63406c76e 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c | |||
@@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) | |||
1291 | cdev = &stdev->cdev; | 1291 | cdev = &stdev->cdev; |
1292 | cdev_init(cdev, &switchtec_fops); | 1292 | cdev_init(cdev, &switchtec_fops); |
1293 | cdev->owner = THIS_MODULE; | 1293 | cdev->owner = THIS_MODULE; |
1294 | cdev->kobj.parent = &dev->kobj; | ||
1295 | 1294 | ||
1296 | return stdev; | 1295 | return stdev; |
1297 | 1296 | ||
@@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, | |||
1442 | stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; | 1441 | stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; |
1443 | stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; | 1442 | stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; |
1444 | stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; | 1443 | stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; |
1445 | stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); | 1444 | stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id); |
1446 | stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); | 1445 | stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); |
1447 | stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; | 1446 | stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; |
1448 | stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; | 1447 | stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; |
1449 | stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; | 1448 | stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; |
1450 | 1449 | ||
1450 | if (stdev->partition_count < 1) | ||
1451 | stdev->partition_count = 1; | ||
1452 | |||
1451 | init_pff(stdev); | 1453 | init_pff(stdev); |
1452 | 1454 | ||
1453 | pci_set_drvdata(pdev, stdev); | 1455 | pci_set_drvdata(pdev, stdev); |
@@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, | |||
1479 | SWITCHTEC_EVENT_EN_IRQ, | 1481 | SWITCHTEC_EVENT_EN_IRQ, |
1480 | &stdev->mmio_part_cfg->mrpc_comp_hdr); | 1482 | &stdev->mmio_part_cfg->mrpc_comp_hdr); |
1481 | 1483 | ||
1482 | rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); | 1484 | rc = cdev_device_add(&stdev->cdev, &stdev->dev); |
1483 | if (rc) | ||
1484 | goto err_put; | ||
1485 | |||
1486 | rc = device_add(&stdev->dev); | ||
1487 | if (rc) | 1485 | if (rc) |
1488 | goto err_devadd; | 1486 | goto err_devadd; |
1489 | 1487 | ||
@@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev, | |||
1492 | return 0; | 1490 | return 0; |
1493 | 1491 | ||
1494 | err_devadd: | 1492 | err_devadd: |
1495 | cdev_del(&stdev->cdev); | ||
1496 | stdev_kill(stdev); | 1493 | stdev_kill(stdev); |
1497 | err_put: | 1494 | err_put: |
1498 | ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); | 1495 | ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); |
@@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev) | |||
1506 | 1503 | ||
1507 | pci_set_drvdata(pdev, NULL); | 1504 | pci_set_drvdata(pdev, NULL); |
1508 | 1505 | ||
1509 | device_del(&stdev->dev); | 1506 | cdev_device_del(&stdev->cdev, &stdev->dev); |
1510 | cdev_del(&stdev->cdev); | ||
1511 | ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); | 1507 | ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); |
1512 | dev_info(&stdev->dev, "unregistered.\n"); | 1508 | dev_info(&stdev->dev, "unregistered.\n"); |
1513 | 1509 | ||
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 14bde0db8c24..5b10b50f8686 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c | |||
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone( | |||
538 | 538 | ||
539 | power_zone->id = result; | 539 | power_zone->id = result; |
540 | idr_init(&power_zone->idr); | 540 | idr_init(&power_zone->idr); |
541 | result = -ENOMEM; | ||
541 | power_zone->name = kstrdup(name, GFP_KERNEL); | 542 | power_zone->name = kstrdup(name, GFP_KERNEL); |
542 | if (!power_zone->name) | 543 | if (!power_zone->name) |
543 | goto err_name_alloc; | 544 | goto err_name_alloc; |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index b3de973a6260..9dca53df3584 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -1088,7 +1088,7 @@ static u32 rtc_handler(void *context) | |||
1088 | } | 1088 | } |
1089 | spin_unlock_irqrestore(&rtc_lock, flags); | 1089 | spin_unlock_irqrestore(&rtc_lock, flags); |
1090 | 1090 | ||
1091 | pm_wakeup_event(dev, 0); | 1091 | pm_wakeup_hard_event(dev); |
1092 | acpi_clear_event(ACPI_EVENT_RTC); | 1092 | acpi_clear_event(ACPI_EVENT_RTC); |
1093 | acpi_disable_event(ACPI_EVENT_RTC, 0); | 1093 | acpi_disable_event(ACPI_EVENT_RTC, 0); |
1094 | return ACPI_INTERRUPT_HANDLED; | 1094 | return ACPI_INTERRUPT_HANDLED; |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index e443b0d0b236..34b9ad6b3143 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -35,7 +35,7 @@ static struct bus_type ccwgroup_bus_type; | |||
35 | static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) | 35 | static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) |
36 | { | 36 | { |
37 | int i; | 37 | int i; |
38 | char str[8]; | 38 | char str[16]; |
39 | 39 | ||
40 | for (i = 0; i < gdev->count; i++) { | 40 | for (i = 0; i < gdev->count; i++) { |
41 | sprintf(str, "cdev%d", i); | 41 | sprintf(str, "cdev%d", i); |
@@ -238,7 +238,7 @@ static void ccwgroup_release(struct device *dev) | |||
238 | 238 | ||
239 | static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) | 239 | static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) |
240 | { | 240 | { |
241 | char str[8]; | 241 | char str[16]; |
242 | int i, rc; | 242 | int i, rc; |
243 | 243 | ||
244 | for (i = 0; i < gdev->count; i++) { | 244 | for (i = 0; i < gdev->count; i++) { |
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index f33ce8577619..1d595d17bf11 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #include "qdio.h" | 11 | #include "qdio.h" |
12 | 12 | ||
13 | /* that gives us 15 characters in the text event views */ | 13 | /* that gives us 15 characters in the text event views */ |
14 | #define QDIO_DBF_LEN 16 | 14 | #define QDIO_DBF_LEN 32 |
15 | 15 | ||
16 | extern debug_info_t *qdio_dbf_setup; | 16 | extern debug_info_t *qdio_dbf_setup; |
17 | extern debug_info_t *qdio_dbf_error; | 17 | extern debug_info_t *qdio_dbf_error; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index f6aa21176d89..30bc6105aac3 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -701,6 +701,7 @@ enum qeth_discipline_id { | |||
701 | }; | 701 | }; |
702 | 702 | ||
703 | struct qeth_discipline { | 703 | struct qeth_discipline { |
704 | const struct device_type *devtype; | ||
704 | void (*start_poll)(struct ccw_device *, int, unsigned long); | 705 | void (*start_poll)(struct ccw_device *, int, unsigned long); |
705 | qdio_handler_t *input_handler; | 706 | qdio_handler_t *input_handler; |
706 | qdio_handler_t *output_handler; | 707 | qdio_handler_t *output_handler; |
@@ -875,6 +876,9 @@ extern struct qeth_discipline qeth_l2_discipline; | |||
875 | extern struct qeth_discipline qeth_l3_discipline; | 876 | extern struct qeth_discipline qeth_l3_discipline; |
876 | extern const struct attribute_group *qeth_generic_attr_groups[]; | 877 | extern const struct attribute_group *qeth_generic_attr_groups[]; |
877 | extern const struct attribute_group *qeth_osn_attr_groups[]; | 878 | extern const struct attribute_group *qeth_osn_attr_groups[]; |
879 | extern const struct attribute_group qeth_device_attr_group; | ||
880 | extern const struct attribute_group qeth_device_blkt_group; | ||
881 | extern const struct device_type qeth_generic_devtype; | ||
878 | extern struct workqueue_struct *qeth_wq; | 882 | extern struct workqueue_struct *qeth_wq; |
879 | 883 | ||
880 | int qeth_card_hw_is_reachable(struct qeth_card *); | 884 | int qeth_card_hw_is_reachable(struct qeth_card *); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 38114a8d56e0..fc6d85f2b38d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -5530,10 +5530,12 @@ void qeth_core_free_discipline(struct qeth_card *card) | |||
5530 | card->discipline = NULL; | 5530 | card->discipline = NULL; |
5531 | } | 5531 | } |
5532 | 5532 | ||
5533 | static const struct device_type qeth_generic_devtype = { | 5533 | const struct device_type qeth_generic_devtype = { |
5534 | .name = "qeth_generic", | 5534 | .name = "qeth_generic", |
5535 | .groups = qeth_generic_attr_groups, | 5535 | .groups = qeth_generic_attr_groups, |
5536 | }; | 5536 | }; |
5537 | EXPORT_SYMBOL_GPL(qeth_generic_devtype); | ||
5538 | |||
5537 | static const struct device_type qeth_osn_devtype = { | 5539 | static const struct device_type qeth_osn_devtype = { |
5538 | .name = "qeth_osn", | 5540 | .name = "qeth_osn", |
5539 | .groups = qeth_osn_attr_groups, | 5541 | .groups = qeth_osn_attr_groups, |
@@ -5659,23 +5661,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
5659 | goto err_card; | 5661 | goto err_card; |
5660 | } | 5662 | } |
5661 | 5663 | ||
5662 | if (card->info.type == QETH_CARD_TYPE_OSN) | ||
5663 | gdev->dev.type = &qeth_osn_devtype; | ||
5664 | else | ||
5665 | gdev->dev.type = &qeth_generic_devtype; | ||
5666 | |||
5667 | switch (card->info.type) { | 5664 | switch (card->info.type) { |
5668 | case QETH_CARD_TYPE_OSN: | 5665 | case QETH_CARD_TYPE_OSN: |
5669 | case QETH_CARD_TYPE_OSM: | 5666 | case QETH_CARD_TYPE_OSM: |
5670 | rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); | 5667 | rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); |
5671 | if (rc) | 5668 | if (rc) |
5672 | goto err_card; | 5669 | goto err_card; |
5670 | |||
5671 | gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) | ||
5672 | ? card->discipline->devtype | ||
5673 | : &qeth_osn_devtype; | ||
5673 | rc = card->discipline->setup(card->gdev); | 5674 | rc = card->discipline->setup(card->gdev); |
5674 | if (rc) | 5675 | if (rc) |
5675 | goto err_disc; | 5676 | goto err_disc; |
5676 | case QETH_CARD_TYPE_OSD: | 5677 | break; |
5677 | case QETH_CARD_TYPE_OSX: | ||
5678 | default: | 5678 | default: |
5679 | gdev->dev.type = &qeth_generic_devtype; | ||
5679 | break; | 5680 | break; |
5680 | } | 5681 | } |
5681 | 5682 | ||
@@ -5731,8 +5732,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) | |||
5731 | if (rc) | 5732 | if (rc) |
5732 | goto err; | 5733 | goto err; |
5733 | rc = card->discipline->setup(card->gdev); | 5734 | rc = card->discipline->setup(card->gdev); |
5734 | if (rc) | 5735 | if (rc) { |
5736 | qeth_core_free_discipline(card); | ||
5735 | goto err; | 5737 | goto err; |
5738 | } | ||
5736 | } | 5739 | } |
5737 | rc = card->discipline->set_online(gdev); | 5740 | rc = card->discipline->set_online(gdev); |
5738 | err: | 5741 | err: |
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 75b29fd2fcf4..db6a285d41e0 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c | |||
@@ -413,12 +413,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
413 | 413 | ||
414 | if (card->options.layer2 == newdis) | 414 | if (card->options.layer2 == newdis) |
415 | goto out; | 415 | goto out; |
416 | else { | 416 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
417 | card->info.mac_bits = 0; | 417 | /* fixed layer, can't switch */ |
418 | if (card->discipline) { | 418 | rc = -EOPNOTSUPP; |
419 | card->discipline->remove(card->gdev); | 419 | goto out; |
420 | qeth_core_free_discipline(card); | 420 | } |
421 | } | 421 | |
422 | card->info.mac_bits = 0; | ||
423 | if (card->discipline) { | ||
424 | card->discipline->remove(card->gdev); | ||
425 | qeth_core_free_discipline(card); | ||
422 | } | 426 | } |
423 | 427 | ||
424 | rc = qeth_core_load_discipline(card, newdis); | 428 | rc = qeth_core_load_discipline(card, newdis); |
@@ -426,6 +430,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
426 | goto out; | 430 | goto out; |
427 | 431 | ||
428 | rc = card->discipline->setup(card->gdev); | 432 | rc = card->discipline->setup(card->gdev); |
433 | if (rc) | ||
434 | qeth_core_free_discipline(card); | ||
429 | out: | 435 | out: |
430 | mutex_unlock(&card->discipline_mutex); | 436 | mutex_unlock(&card->discipline_mutex); |
431 | return rc ? rc : count; | 437 | return rc ? rc : count; |
@@ -703,10 +709,11 @@ static struct attribute *qeth_blkt_device_attrs[] = { | |||
703 | &dev_attr_inter_jumbo.attr, | 709 | &dev_attr_inter_jumbo.attr, |
704 | NULL, | 710 | NULL, |
705 | }; | 711 | }; |
706 | static struct attribute_group qeth_device_blkt_group = { | 712 | const struct attribute_group qeth_device_blkt_group = { |
707 | .name = "blkt", | 713 | .name = "blkt", |
708 | .attrs = qeth_blkt_device_attrs, | 714 | .attrs = qeth_blkt_device_attrs, |
709 | }; | 715 | }; |
716 | EXPORT_SYMBOL_GPL(qeth_device_blkt_group); | ||
710 | 717 | ||
711 | static struct attribute *qeth_device_attrs[] = { | 718 | static struct attribute *qeth_device_attrs[] = { |
712 | &dev_attr_state.attr, | 719 | &dev_attr_state.attr, |
@@ -726,9 +733,10 @@ static struct attribute *qeth_device_attrs[] = { | |||
726 | &dev_attr_switch_attrs.attr, | 733 | &dev_attr_switch_attrs.attr, |
727 | NULL, | 734 | NULL, |
728 | }; | 735 | }; |
729 | static struct attribute_group qeth_device_attr_group = { | 736 | const struct attribute_group qeth_device_attr_group = { |
730 | .attrs = qeth_device_attrs, | 737 | .attrs = qeth_device_attrs, |
731 | }; | 738 | }; |
739 | EXPORT_SYMBOL_GPL(qeth_device_attr_group); | ||
732 | 740 | ||
733 | const struct attribute_group *qeth_generic_attr_groups[] = { | 741 | const struct attribute_group *qeth_generic_attr_groups[] = { |
734 | &qeth_device_attr_group, | 742 | &qeth_device_attr_group, |
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index 29d9fb3890ad..0d59f9a45ea9 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include "qeth_core.h" | 9 | #include "qeth_core.h" |
10 | 10 | ||
11 | extern const struct attribute_group *qeth_l2_attr_groups[]; | ||
12 | |||
11 | int qeth_l2_create_device_attributes(struct device *); | 13 | int qeth_l2_create_device_attributes(struct device *); |
12 | void qeth_l2_remove_device_attributes(struct device *); | 14 | void qeth_l2_remove_device_attributes(struct device *); |
13 | void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); | 15 | void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 1b07f382d74c..bd2df62a5cdf 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -880,11 +880,21 @@ static int qeth_l2_stop(struct net_device *dev) | |||
880 | return 0; | 880 | return 0; |
881 | } | 881 | } |
882 | 882 | ||
883 | static const struct device_type qeth_l2_devtype = { | ||
884 | .name = "qeth_layer2", | ||
885 | .groups = qeth_l2_attr_groups, | ||
886 | }; | ||
887 | |||
883 | static int qeth_l2_probe_device(struct ccwgroup_device *gdev) | 888 | static int qeth_l2_probe_device(struct ccwgroup_device *gdev) |
884 | { | 889 | { |
885 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 890 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
891 | int rc; | ||
886 | 892 | ||
887 | qeth_l2_create_device_attributes(&gdev->dev); | 893 | if (gdev->dev.type == &qeth_generic_devtype) { |
894 | rc = qeth_l2_create_device_attributes(&gdev->dev); | ||
895 | if (rc) | ||
896 | return rc; | ||
897 | } | ||
888 | INIT_LIST_HEAD(&card->vid_list); | 898 | INIT_LIST_HEAD(&card->vid_list); |
889 | hash_init(card->mac_htable); | 899 | hash_init(card->mac_htable); |
890 | card->options.layer2 = 1; | 900 | card->options.layer2 = 1; |
@@ -896,7 +906,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
896 | { | 906 | { |
897 | struct qeth_card *card = dev_get_drvdata(&cgdev->dev); | 907 | struct qeth_card *card = dev_get_drvdata(&cgdev->dev); |
898 | 908 | ||
899 | qeth_l2_remove_device_attributes(&cgdev->dev); | 909 | if (cgdev->dev.type == &qeth_generic_devtype) |
910 | qeth_l2_remove_device_attributes(&cgdev->dev); | ||
900 | qeth_set_allowed_threads(card, 0, 1); | 911 | qeth_set_allowed_threads(card, 0, 1); |
901 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); | 912 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); |
902 | 913 | ||
@@ -954,7 +965,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) | |||
954 | case QETH_CARD_TYPE_OSN: | 965 | case QETH_CARD_TYPE_OSN: |
955 | card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, | 966 | card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, |
956 | ether_setup); | 967 | ether_setup); |
957 | card->dev->flags |= IFF_NOARP; | ||
958 | break; | 968 | break; |
959 | default: | 969 | default: |
960 | card->dev = alloc_etherdev(0); | 970 | card->dev = alloc_etherdev(0); |
@@ -969,9 +979,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) | |||
969 | card->dev->min_mtu = 64; | 979 | card->dev->min_mtu = 64; |
970 | card->dev->max_mtu = ETH_MAX_MTU; | 980 | card->dev->max_mtu = ETH_MAX_MTU; |
971 | card->dev->netdev_ops = &qeth_l2_netdev_ops; | 981 | card->dev->netdev_ops = &qeth_l2_netdev_ops; |
972 | card->dev->ethtool_ops = | 982 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
973 | (card->info.type != QETH_CARD_TYPE_OSN) ? | 983 | card->dev->ethtool_ops = &qeth_l2_osn_ops; |
974 | &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; | 984 | card->dev->flags |= IFF_NOARP; |
985 | } else { | ||
986 | card->dev->ethtool_ops = &qeth_l2_ethtool_ops; | ||
987 | } | ||
975 | card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | 988 | card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
976 | if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { | 989 | if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { |
977 | card->dev->hw_features = NETIF_F_SG; | 990 | card->dev->hw_features = NETIF_F_SG; |
@@ -1269,6 +1282,7 @@ static int qeth_l2_control_event(struct qeth_card *card, | |||
1269 | } | 1282 | } |
1270 | 1283 | ||
1271 | struct qeth_discipline qeth_l2_discipline = { | 1284 | struct qeth_discipline qeth_l2_discipline = { |
1285 | .devtype = &qeth_l2_devtype, | ||
1272 | .start_poll = qeth_qdio_start_poll, | 1286 | .start_poll = qeth_qdio_start_poll, |
1273 | .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, | 1287 | .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, |
1274 | .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, | 1288 | .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, |
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index 687972356d6b..9696baa49e2d 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c | |||
@@ -269,3 +269,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) | |||
269 | } else | 269 | } else |
270 | qeth_bridgeport_an_set(card, 0); | 270 | qeth_bridgeport_an_set(card, 0); |
271 | } | 271 | } |
272 | |||
273 | const struct attribute_group *qeth_l2_attr_groups[] = { | ||
274 | &qeth_device_attr_group, | ||
275 | &qeth_device_blkt_group, | ||
276 | /* l2 specific, see l2_{create,remove}_device_attributes(): */ | ||
277 | &qeth_l2_bridgeport_attr_group, | ||
278 | NULL, | ||
279 | }; | ||
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6e0354ef4b86..d8df1e635163 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -3039,8 +3039,13 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
3039 | static int qeth_l3_probe_device(struct ccwgroup_device *gdev) | 3039 | static int qeth_l3_probe_device(struct ccwgroup_device *gdev) |
3040 | { | 3040 | { |
3041 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 3041 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
3042 | int rc; | ||
3042 | 3043 | ||
3043 | qeth_l3_create_device_attributes(&gdev->dev); | 3044 | rc = qeth_l3_create_device_attributes(&gdev->dev); |
3045 | if (rc) | ||
3046 | return rc; | ||
3047 | hash_init(card->ip_htable); | ||
3048 | hash_init(card->ip_mc_htable); | ||
3044 | card->options.layer2 = 0; | 3049 | card->options.layer2 = 0; |
3045 | card->info.hwtrap = 0; | 3050 | card->info.hwtrap = 0; |
3046 | return 0; | 3051 | return 0; |
@@ -3306,6 +3311,7 @@ static int qeth_l3_control_event(struct qeth_card *card, | |||
3306 | } | 3311 | } |
3307 | 3312 | ||
3308 | struct qeth_discipline qeth_l3_discipline = { | 3313 | struct qeth_discipline qeth_l3_discipline = { |
3314 | .devtype = &qeth_generic_devtype, | ||
3309 | .start_poll = qeth_qdio_start_poll, | 3315 | .start_poll = qeth_qdio_start_poll, |
3310 | .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, | 3316 | .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, |
3311 | .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, | 3317 | .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 2a76ea78a0bf..b18fe2014cf2 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
@@ -87,7 +87,7 @@ struct vq_info_block { | |||
87 | } __packed; | 87 | } __packed; |
88 | 88 | ||
89 | struct virtio_feature_desc { | 89 | struct virtio_feature_desc { |
90 | __u32 features; | 90 | __le32 features; |
91 | __u8 index; | 91 | __u8 index; |
92 | } __packed; | 92 | } __packed; |
93 | 93 | ||
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 622bdabc8894..dab195f04da7 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) | |||
1769 | goto bye; | 1769 | goto bye; |
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | mempool_free(mbp, hw->mb_mempool); | ||
1773 | if (finicsum != cfcsum) { | 1772 | if (finicsum != cfcsum) { |
1774 | csio_warn(hw, | 1773 | csio_warn(hw, |
1775 | "Config File checksum mismatch: csum=%#x, computed=%#x\n", | 1774 | "Config File checksum mismatch: csum=%#x, computed=%#x\n", |
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) | |||
1780 | rv = csio_hw_validate_caps(hw, mbp); | 1779 | rv = csio_hw_validate_caps(hw, mbp); |
1781 | if (rv != 0) | 1780 | if (rv != 0) |
1782 | goto bye; | 1781 | goto bye; |
1782 | |||
1783 | mempool_free(mbp, hw->mb_mempool); | ||
1784 | mbp = NULL; | ||
1785 | |||
1783 | /* | 1786 | /* |
1784 | * Note that we're operating with parameters | 1787 | * Note that we're operating with parameters |
1785 | * not supplied by the driver, rather than from hard-wired | 1788 | * not supplied by the driver, rather than from hard-wired |
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig index c052104e523e..a011c5dbf214 100644 --- a/drivers/scsi/cxlflash/Kconfig +++ b/drivers/scsi/cxlflash/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config CXLFLASH | 5 | config CXLFLASH |
6 | tristate "Support for IBM CAPI Flash" | 6 | tristate "Support for IBM CAPI Flash" |
7 | depends on PCI && SCSI && CXL && EEH | 7 | depends on PCI && SCSI && CXL && EEH |
8 | select IRQ_POLL | ||
8 | default m | 9 | default m |
9 | help | 10 | help |
10 | Allows CAPI Accelerated IO to Flash | 11 | Allows CAPI Accelerated IO to Flash |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index a808e8ef1d08..234352da5c3c 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -407,11 +407,12 @@ unlock: | |||
407 | * can_queue. Eventually we will hit the point where we run | 407 | * can_queue. Eventually we will hit the point where we run |
408 | * on all reserved structs. | 408 | * on all reserved structs. |
409 | */ | 409 | */ |
410 | static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) | 410 | static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport) |
411 | { | 411 | { |
412 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); | 412 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
413 | unsigned long flags; | 413 | unsigned long flags; |
414 | int can_queue; | 414 | int can_queue; |
415 | bool changed = false; | ||
415 | 416 | ||
416 | spin_lock_irqsave(lport->host->host_lock, flags); | 417 | spin_lock_irqsave(lport->host->host_lock, flags); |
417 | 418 | ||
@@ -427,9 +428,11 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) | |||
427 | if (!can_queue) | 428 | if (!can_queue) |
428 | can_queue = 1; | 429 | can_queue = 1; |
429 | lport->host->can_queue = can_queue; | 430 | lport->host->can_queue = can_queue; |
431 | changed = true; | ||
430 | 432 | ||
431 | unlock: | 433 | unlock: |
432 | spin_unlock_irqrestore(lport->host->host_lock, flags); | 434 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
435 | return changed; | ||
433 | } | 436 | } |
434 | 437 | ||
435 | /* | 438 | /* |
@@ -1896,11 +1899,11 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) | |||
1896 | 1899 | ||
1897 | if (!fc_fcp_lport_queue_ready(lport)) { | 1900 | if (!fc_fcp_lport_queue_ready(lport)) { |
1898 | if (lport->qfull) { | 1901 | if (lport->qfull) { |
1899 | fc_fcp_can_queue_ramp_down(lport); | 1902 | if (fc_fcp_can_queue_ramp_down(lport)) |
1900 | shost_printk(KERN_ERR, lport->host, | 1903 | shost_printk(KERN_ERR, lport->host, |
1901 | "libfc: queue full, " | 1904 | "libfc: queue full, " |
1902 | "reducing can_queue to %d.\n", | 1905 | "reducing can_queue to %d.\n", |
1903 | lport->host->can_queue); | 1906 | lport->host->can_queue); |
1904 | } | 1907 | } |
1905 | rc = SCSI_MLQUEUE_HOST_BUSY; | 1908 | rc = SCSI_MLQUEUE_HOST_BUSY; |
1906 | goto out; | 1909 | goto out; |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index b44c3136eb51..520325867e2b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, | |||
1422 | fp = fc_frame_alloc(lport, sizeof(*rtv)); | 1422 | fp = fc_frame_alloc(lport, sizeof(*rtv)); |
1423 | if (!fp) { | 1423 | if (!fp) { |
1424 | rjt_data.reason = ELS_RJT_UNAB; | 1424 | rjt_data.reason = ELS_RJT_UNAB; |
1425 | rjt_data.reason = ELS_EXPL_INSUF_RES; | 1425 | rjt_data.explan = ELS_EXPL_INSUF_RES; |
1426 | fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); | 1426 | fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); |
1427 | goto drop; | 1427 | goto drop; |
1428 | } | 1428 | } |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6d7840b096e6..f2c0ba6ced78 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -141,6 +141,13 @@ struct lpfc_dmabuf { | |||
141 | uint32_t buffer_tag; /* used for tagged queue ring */ | 141 | uint32_t buffer_tag; /* used for tagged queue ring */ |
142 | }; | 142 | }; |
143 | 143 | ||
144 | struct lpfc_nvmet_ctxbuf { | ||
145 | struct list_head list; | ||
146 | struct lpfc_nvmet_rcv_ctx *context; | ||
147 | struct lpfc_iocbq *iocbq; | ||
148 | struct lpfc_sglq *sglq; | ||
149 | }; | ||
150 | |||
144 | struct lpfc_dma_pool { | 151 | struct lpfc_dma_pool { |
145 | struct lpfc_dmabuf *elements; | 152 | struct lpfc_dmabuf *elements; |
146 | uint32_t max_count; | 153 | uint32_t max_count; |
@@ -163,9 +170,7 @@ struct rqb_dmabuf { | |||
163 | struct lpfc_dmabuf dbuf; | 170 | struct lpfc_dmabuf dbuf; |
164 | uint16_t total_size; | 171 | uint16_t total_size; |
165 | uint16_t bytes_recv; | 172 | uint16_t bytes_recv; |
166 | void *context; | 173 | uint16_t idx; |
167 | struct lpfc_iocbq *iocbq; | ||
168 | struct lpfc_sglq *sglq; | ||
169 | struct lpfc_queue *hrq; /* ptr to associated Header RQ */ | 174 | struct lpfc_queue *hrq; /* ptr to associated Header RQ */ |
170 | struct lpfc_queue *drq; /* ptr to associated Data RQ */ | 175 | struct lpfc_queue *drq; /* ptr to associated Data RQ */ |
171 | }; | 176 | }; |
@@ -670,6 +675,8 @@ struct lpfc_hba { | |||
670 | /* INIT_LINK mailbox command */ | 675 | /* INIT_LINK mailbox command */ |
671 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 676 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
672 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ | 677 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
678 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ | ||
679 | #define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ | ||
673 | 680 | ||
674 | uint32_t hba_flag; /* hba generic flags */ | 681 | uint32_t hba_flag; /* hba generic flags */ |
675 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 682 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
@@ -777,7 +784,6 @@ struct lpfc_hba { | |||
777 | uint32_t cfg_nvme_oas; | 784 | uint32_t cfg_nvme_oas; |
778 | uint32_t cfg_nvme_io_channel; | 785 | uint32_t cfg_nvme_io_channel; |
779 | uint32_t cfg_nvmet_mrq; | 786 | uint32_t cfg_nvmet_mrq; |
780 | uint32_t cfg_nvmet_mrq_post; | ||
781 | uint32_t cfg_enable_nvmet; | 787 | uint32_t cfg_enable_nvmet; |
782 | uint32_t cfg_nvme_enable_fb; | 788 | uint32_t cfg_nvme_enable_fb; |
783 | uint32_t cfg_nvmet_fb_size; | 789 | uint32_t cfg_nvmet_fb_size; |
@@ -943,6 +949,7 @@ struct lpfc_hba { | |||
943 | struct pci_pool *lpfc_mbuf_pool; | 949 | struct pci_pool *lpfc_mbuf_pool; |
944 | struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ | 950 | struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ |
945 | struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ | 951 | struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ |
952 | struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ | ||
946 | struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ | 953 | struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ |
947 | struct pci_pool *txrdy_payload_pool; | 954 | struct pci_pool *txrdy_payload_pool; |
948 | struct lpfc_dma_pool lpfc_mbuf_safety_pool; | 955 | struct lpfc_dma_pool lpfc_mbuf_safety_pool; |
@@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) | |||
1228 | static inline struct lpfc_sli_ring * | 1235 | static inline struct lpfc_sli_ring * |
1229 | lpfc_phba_elsring(struct lpfc_hba *phba) | 1236 | lpfc_phba_elsring(struct lpfc_hba *phba) |
1230 | { | 1237 | { |
1231 | if (phba->sli_rev == LPFC_SLI_REV4) | 1238 | if (phba->sli_rev == LPFC_SLI_REV4) { |
1232 | return phba->sli4_hba.els_wq->pring; | 1239 | if (phba->sli4_hba.els_wq) |
1240 | return phba->sli4_hba.els_wq->pring; | ||
1241 | else | ||
1242 | return NULL; | ||
1243 | } | ||
1233 | return &phba->sli.sli3_ring[LPFC_ELS_RING]; | 1244 | return &phba->sli.sli3_ring[LPFC_ELS_RING]; |
1234 | } | 1245 | } |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4830370bfab1..bb2d9e238225 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -60,9 +60,9 @@ | |||
60 | #define LPFC_MIN_DEVLOSS_TMO 1 | 60 | #define LPFC_MIN_DEVLOSS_TMO 1 |
61 | #define LPFC_MAX_DEVLOSS_TMO 255 | 61 | #define LPFC_MAX_DEVLOSS_TMO 255 |
62 | 62 | ||
63 | #define LPFC_DEF_MRQ_POST 256 | 63 | #define LPFC_DEF_MRQ_POST 512 |
64 | #define LPFC_MIN_MRQ_POST 32 | 64 | #define LPFC_MIN_MRQ_POST 512 |
65 | #define LPFC_MAX_MRQ_POST 512 | 65 | #define LPFC_MAX_MRQ_POST 2048 |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Write key size should be multiple of 4. If write key is changed | 68 | * Write key size should be multiple of 4. If write key is changed |
@@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
205 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 205 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
206 | 206 | ||
207 | len += snprintf(buf+len, PAGE_SIZE-len, | 207 | len += snprintf(buf+len, PAGE_SIZE-len, |
208 | "FCP: Rcv %08x Drop %08x\n", | 208 | "FCP: Rcv %08x Release %08x Drop %08x\n", |
209 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 209 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
210 | atomic_read(&tgtp->xmt_fcp_release), | ||
210 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 211 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
211 | 212 | ||
212 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != | 213 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != |
@@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
218 | } | 219 | } |
219 | 220 | ||
220 | len += snprintf(buf+len, PAGE_SIZE-len, | 221 | len += snprintf(buf+len, PAGE_SIZE-len, |
221 | "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", | 222 | "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " |
223 | "drop %08x\n", | ||
222 | atomic_read(&tgtp->xmt_fcp_read), | 224 | atomic_read(&tgtp->xmt_fcp_read), |
223 | atomic_read(&tgtp->xmt_fcp_read_rsp), | 225 | atomic_read(&tgtp->xmt_fcp_read_rsp), |
224 | atomic_read(&tgtp->xmt_fcp_write), | 226 | atomic_read(&tgtp->xmt_fcp_write), |
225 | atomic_read(&tgtp->xmt_fcp_rsp)); | 227 | atomic_read(&tgtp->xmt_fcp_rsp), |
226 | |||
227 | len += snprintf(buf+len, PAGE_SIZE-len, | ||
228 | "FCP Rsp: abort %08x drop %08x\n", | ||
229 | atomic_read(&tgtp->xmt_fcp_abort), | ||
230 | atomic_read(&tgtp->xmt_fcp_drop)); | 228 | atomic_read(&tgtp->xmt_fcp_drop)); |
231 | 229 | ||
232 | len += snprintf(buf+len, PAGE_SIZE-len, | 230 | len += snprintf(buf+len, PAGE_SIZE-len, |
@@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
236 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); | 234 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); |
237 | 235 | ||
238 | len += snprintf(buf+len, PAGE_SIZE-len, | 236 | len += snprintf(buf+len, PAGE_SIZE-len, |
239 | "ABORT: Xmt %08x Err %08x Cmpl %08x", | 237 | "ABORT: Xmt %08x Cmpl %08x\n", |
238 | atomic_read(&tgtp->xmt_fcp_abort), | ||
239 | atomic_read(&tgtp->xmt_fcp_abort_cmpl)); | ||
240 | |||
241 | len += snprintf(buf + len, PAGE_SIZE - len, | ||
242 | "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", | ||
243 | atomic_read(&tgtp->xmt_abort_sol), | ||
244 | atomic_read(&tgtp->xmt_abort_unsol), | ||
240 | atomic_read(&tgtp->xmt_abort_rsp), | 245 | atomic_read(&tgtp->xmt_abort_rsp), |
241 | atomic_read(&tgtp->xmt_abort_rsp_error), | 246 | atomic_read(&tgtp->xmt_abort_rsp_error)); |
242 | atomic_read(&tgtp->xmt_abort_cmpl)); | 247 | |
248 | len += snprintf(buf + len, PAGE_SIZE - len, | ||
249 | "IO_CTX: %08x outstanding %08x total %x", | ||
250 | phba->sli4_hba.nvmet_ctx_cnt, | ||
251 | phba->sli4_hba.nvmet_io_wait_cnt, | ||
252 | phba->sli4_hba.nvmet_io_wait_total); | ||
243 | 253 | ||
244 | len += snprintf(buf+len, PAGE_SIZE-len, "\n"); | 254 | len += snprintf(buf+len, PAGE_SIZE-len, "\n"); |
245 | return len; | 255 | return len; |
@@ -3312,14 +3322,6 @@ LPFC_ATTR_R(nvmet_mrq, | |||
3312 | "Specify number of RQ pairs for processing NVMET cmds"); | 3322 | "Specify number of RQ pairs for processing NVMET cmds"); |
3313 | 3323 | ||
3314 | /* | 3324 | /* |
3315 | * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ | ||
3316 | * | ||
3317 | */ | ||
3318 | LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST, | ||
3319 | LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST, | ||
3320 | "Specify number of buffers to post on every MRQ"); | ||
3321 | |||
3322 | /* | ||
3323 | * lpfc_enable_fc4_type: Defines what FC4 types are supported. | 3325 | * lpfc_enable_fc4_type: Defines what FC4 types are supported. |
3324 | * Supported Values: 1 - register just FCP | 3326 | * Supported Values: 1 - register just FCP |
3325 | * 3 - register both FCP and NVME | 3327 | * 3 - register both FCP and NVME |
@@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
5154 | &dev_attr_lpfc_suppress_rsp, | 5156 | &dev_attr_lpfc_suppress_rsp, |
5155 | &dev_attr_lpfc_nvme_io_channel, | 5157 | &dev_attr_lpfc_nvme_io_channel, |
5156 | &dev_attr_lpfc_nvmet_mrq, | 5158 | &dev_attr_lpfc_nvmet_mrq, |
5157 | &dev_attr_lpfc_nvmet_mrq_post, | ||
5158 | &dev_attr_lpfc_nvme_enable_fb, | 5159 | &dev_attr_lpfc_nvme_enable_fb, |
5159 | &dev_attr_lpfc_nvmet_fb_size, | 5160 | &dev_attr_lpfc_nvmet_fb_size, |
5160 | &dev_attr_lpfc_enable_bg, | 5161 | &dev_attr_lpfc_enable_bg, |
@@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
6194 | 6195 | ||
6195 | lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); | 6196 | lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); |
6196 | lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); | 6197 | lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); |
6197 | lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); | ||
6198 | 6198 | ||
6199 | /* Initialize first burst. Target vs Initiator are different. */ | 6199 | /* Initialize first burst. Target vs Initiator are different. */ |
6200 | lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); | 6200 | lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); |
@@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) | |||
6291 | /* Not NVME Target mode. Turn off Target parameters. */ | 6291 | /* Not NVME Target mode. Turn off Target parameters. */ |
6292 | phba->nvmet_support = 0; | 6292 | phba->nvmet_support = 0; |
6293 | phba->cfg_nvmet_mrq = 0; | 6293 | phba->cfg_nvmet_mrq = 0; |
6294 | phba->cfg_nvmet_mrq_post = 0; | ||
6295 | phba->cfg_nvmet_fb_size = 0; | 6294 | phba->cfg_nvmet_fb_size = 0; |
6296 | } | 6295 | } |
6297 | 6296 | ||
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 944b32ca4931..8912767e7bc8 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
75 | void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); | 75 | void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); |
76 | void lpfc_retry_pport_discovery(struct lpfc_hba *); | 76 | void lpfc_retry_pport_discovery(struct lpfc_hba *); |
77 | void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); | 77 | void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); |
78 | int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); | ||
79 | void lpfc_free_iocb_list(struct lpfc_hba *phba); | ||
80 | int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, | ||
81 | struct lpfc_queue *drq, int count, int idx); | ||
78 | 82 | ||
79 | void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); | 83 | void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); |
80 | void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 84 | void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
@@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); | |||
246 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); | 250 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); |
247 | struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); | 251 | struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); |
248 | void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); | 252 | void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); |
249 | void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, | 253 | void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, |
250 | struct lpfc_dmabuf *mp); | 254 | struct lpfc_nvmet_ctxbuf *ctxp); |
251 | int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, | 255 | int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, |
252 | struct fc_frame_header *fc_hdr); | 256 | struct fc_frame_header *fc_hdr); |
253 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, | 257 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, |
254 | uint16_t); | 258 | uint16_t); |
255 | int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | 259 | int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, |
256 | struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); | 260 | struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); |
257 | int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq, | ||
258 | struct lpfc_queue *dq, int count); | ||
259 | int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); | 261 | int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); |
260 | void lpfc_unregister_fcf(struct lpfc_hba *); | 262 | void lpfc_unregister_fcf(struct lpfc_hba *); |
261 | void lpfc_unregister_fcf_rescan(struct lpfc_hba *); | 263 | void lpfc_unregister_fcf_rescan(struct lpfc_hba *); |
@@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); | |||
271 | void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); | 273 | void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); |
272 | 274 | ||
273 | int lpfc_mem_alloc(struct lpfc_hba *, int align); | 275 | int lpfc_mem_alloc(struct lpfc_hba *, int align); |
276 | int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba); | ||
274 | int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); | 277 | int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); |
275 | void lpfc_mem_free(struct lpfc_hba *); | 278 | void lpfc_mem_free(struct lpfc_hba *); |
276 | void lpfc_mem_free_all(struct lpfc_hba *); | 279 | void lpfc_mem_free_all(struct lpfc_hba *); |
@@ -294,6 +297,7 @@ int lpfc_selective_reset(struct lpfc_hba *); | |||
294 | void lpfc_reset_barrier(struct lpfc_hba *); | 297 | void lpfc_reset_barrier(struct lpfc_hba *); |
295 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); | 298 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); |
296 | int lpfc_sli_brdkill(struct lpfc_hba *); | 299 | int lpfc_sli_brdkill(struct lpfc_hba *); |
300 | int lpfc_sli_chipset_init(struct lpfc_hba *phba); | ||
297 | int lpfc_sli_brdreset(struct lpfc_hba *); | 301 | int lpfc_sli_brdreset(struct lpfc_hba *); |
298 | int lpfc_sli_brdrestart(struct lpfc_hba *); | 302 | int lpfc_sli_brdrestart(struct lpfc_hba *); |
299 | int lpfc_sli_hba_setup(struct lpfc_hba *); | 303 | int lpfc_sli_hba_setup(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 1487406aea77..f2cd19c6c2df 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -630,7 +630,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, | |||
630 | NLP_EVT_DEVICE_RECOVERY); | 630 | NLP_EVT_DEVICE_RECOVERY); |
631 | spin_lock_irq(shost->host_lock); | 631 | spin_lock_irq(shost->host_lock); |
632 | ndlp->nlp_flag &= ~NLP_NVMET_RECOV; | 632 | ndlp->nlp_flag &= ~NLP_NVMET_RECOV; |
633 | spin_lock_irq(shost->host_lock); | 633 | spin_unlock_irq(shost->host_lock); |
634 | } | 634 | } |
635 | } | 635 | } |
636 | 636 | ||
@@ -2092,6 +2092,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, | |||
2092 | 2092 | ||
2093 | ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ | 2093 | ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ |
2094 | ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ | 2094 | ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ |
2095 | ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */ | ||
2095 | ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ | 2096 | ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ |
2096 | size = FOURBYTES + 32; | 2097 | size = FOURBYTES + 32; |
2097 | ad->AttrLen = cpu_to_be16(size); | 2098 | ad->AttrLen = cpu_to_be16(size); |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index fce549a91911..4bcb92c844ca 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -798,21 +798,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
798 | atomic_read(&tgtp->xmt_fcp_rsp)); | 798 | atomic_read(&tgtp->xmt_fcp_rsp)); |
799 | 799 | ||
800 | len += snprintf(buf + len, size - len, | 800 | len += snprintf(buf + len, size - len, |
801 | "FCP Rsp: abort %08x drop %08x\n", | ||
802 | atomic_read(&tgtp->xmt_fcp_abort), | ||
803 | atomic_read(&tgtp->xmt_fcp_drop)); | ||
804 | |||
805 | len += snprintf(buf + len, size - len, | ||
806 | "FCP Rsp Cmpl: %08x err %08x drop %08x\n", | 801 | "FCP Rsp Cmpl: %08x err %08x drop %08x\n", |
807 | atomic_read(&tgtp->xmt_fcp_rsp_cmpl), | 802 | atomic_read(&tgtp->xmt_fcp_rsp_cmpl), |
808 | atomic_read(&tgtp->xmt_fcp_rsp_error), | 803 | atomic_read(&tgtp->xmt_fcp_rsp_error), |
809 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); | 804 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); |
810 | 805 | ||
811 | len += snprintf(buf + len, size - len, | 806 | len += snprintf(buf + len, size - len, |
812 | "ABORT: Xmt %08x Err %08x Cmpl %08x", | 807 | "ABORT: Xmt %08x Cmpl %08x\n", |
808 | atomic_read(&tgtp->xmt_fcp_abort), | ||
809 | atomic_read(&tgtp->xmt_fcp_abort_cmpl)); | ||
810 | |||
811 | len += snprintf(buf + len, size - len, | ||
812 | "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", | ||
813 | atomic_read(&tgtp->xmt_abort_sol), | ||
814 | atomic_read(&tgtp->xmt_abort_unsol), | ||
813 | atomic_read(&tgtp->xmt_abort_rsp), | 815 | atomic_read(&tgtp->xmt_abort_rsp), |
814 | atomic_read(&tgtp->xmt_abort_rsp_error), | 816 | atomic_read(&tgtp->xmt_abort_rsp_error)); |
815 | atomic_read(&tgtp->xmt_abort_cmpl)); | ||
816 | 817 | ||
817 | len += snprintf(buf + len, size - len, "\n"); | 818 | len += snprintf(buf + len, size - len, "\n"); |
818 | 819 | ||
@@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
841 | } | 842 | } |
842 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | 843 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
843 | } | 844 | } |
845 | |||
846 | len += snprintf(buf + len, size - len, | ||
847 | "IO_CTX: %08x outstanding %08x total %08x\n", | ||
848 | phba->sli4_hba.nvmet_ctx_cnt, | ||
849 | phba->sli4_hba.nvmet_io_wait_cnt, | ||
850 | phba->sli4_hba.nvmet_io_wait_total); | ||
844 | } else { | 851 | } else { |
845 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) | 852 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) |
846 | return len; | 853 | return len; |
@@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, | |||
1959 | atomic_set(&tgtp->rcv_ls_req_out, 0); | 1966 | atomic_set(&tgtp->rcv_ls_req_out, 0); |
1960 | atomic_set(&tgtp->rcv_ls_req_drop, 0); | 1967 | atomic_set(&tgtp->rcv_ls_req_drop, 0); |
1961 | atomic_set(&tgtp->xmt_ls_abort, 0); | 1968 | atomic_set(&tgtp->xmt_ls_abort, 0); |
1969 | atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); | ||
1962 | atomic_set(&tgtp->xmt_ls_rsp, 0); | 1970 | atomic_set(&tgtp->xmt_ls_rsp, 0); |
1963 | atomic_set(&tgtp->xmt_ls_drop, 0); | 1971 | atomic_set(&tgtp->xmt_ls_drop, 0); |
1964 | atomic_set(&tgtp->xmt_ls_rsp_error, 0); | 1972 | atomic_set(&tgtp->xmt_ls_rsp_error, 0); |
@@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, | |||
1967 | atomic_set(&tgtp->rcv_fcp_cmd_in, 0); | 1975 | atomic_set(&tgtp->rcv_fcp_cmd_in, 0); |
1968 | atomic_set(&tgtp->rcv_fcp_cmd_out, 0); | 1976 | atomic_set(&tgtp->rcv_fcp_cmd_out, 0); |
1969 | atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); | 1977 | atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); |
1970 | atomic_set(&tgtp->xmt_fcp_abort, 0); | ||
1971 | atomic_set(&tgtp->xmt_fcp_drop, 0); | 1978 | atomic_set(&tgtp->xmt_fcp_drop, 0); |
1972 | atomic_set(&tgtp->xmt_fcp_read_rsp, 0); | 1979 | atomic_set(&tgtp->xmt_fcp_read_rsp, 0); |
1973 | atomic_set(&tgtp->xmt_fcp_read, 0); | 1980 | atomic_set(&tgtp->xmt_fcp_read, 0); |
1974 | atomic_set(&tgtp->xmt_fcp_write, 0); | 1981 | atomic_set(&tgtp->xmt_fcp_write, 0); |
1975 | atomic_set(&tgtp->xmt_fcp_rsp, 0); | 1982 | atomic_set(&tgtp->xmt_fcp_rsp, 0); |
1983 | atomic_set(&tgtp->xmt_fcp_release, 0); | ||
1976 | atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); | 1984 | atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); |
1977 | atomic_set(&tgtp->xmt_fcp_rsp_error, 0); | 1985 | atomic_set(&tgtp->xmt_fcp_rsp_error, 0); |
1978 | atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); | 1986 | atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); |
1979 | 1987 | ||
1988 | atomic_set(&tgtp->xmt_fcp_abort, 0); | ||
1989 | atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); | ||
1990 | atomic_set(&tgtp->xmt_abort_sol, 0); | ||
1991 | atomic_set(&tgtp->xmt_abort_unsol, 0); | ||
1980 | atomic_set(&tgtp->xmt_abort_rsp, 0); | 1992 | atomic_set(&tgtp->xmt_abort_rsp, 0); |
1981 | atomic_set(&tgtp->xmt_abort_rsp_error, 0); | 1993 | atomic_set(&tgtp->xmt_abort_rsp_error, 0); |
1982 | atomic_set(&tgtp->xmt_abort_cmpl, 0); | ||
1983 | } | 1994 | } |
1984 | return nbytes; | 1995 | return nbytes; |
1985 | } | 1996 | } |
@@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, | |||
3070 | qp->assoc_qid, qp->q_cnt_1, | 3081 | qp->assoc_qid, qp->q_cnt_1, |
3071 | (unsigned long long)qp->q_cnt_4); | 3082 | (unsigned long long)qp->q_cnt_4); |
3072 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3083 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3073 | "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " | 3084 | "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " |
3074 | "HOST-IDX[%04d], PORT-IDX[%04d]", | 3085 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", |
3075 | qp->queue_id, qp->entry_count, | 3086 | qp->queue_id, qp->entry_count, |
3076 | qp->entry_size, qp->host_index, | 3087 | qp->entry_size, qp->host_index, |
3077 | qp->hba_index); | 3088 | qp->hba_index, qp->entry_repost); |
3078 | len += snprintf(pbuffer + len, | 3089 | len += snprintf(pbuffer + len, |
3079 | LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); | 3090 | LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); |
3080 | return len; | 3091 | return len; |
@@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, | |||
3121 | qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, | 3132 | qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, |
3122 | qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); | 3133 | qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); |
3123 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3134 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3124 | "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " | 3135 | "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " |
3125 | "HOST-IDX[%04d], PORT-IDX[%04d]", | 3136 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", |
3126 | qp->queue_id, qp->entry_count, | 3137 | qp->queue_id, qp->entry_count, |
3127 | qp->entry_size, qp->host_index, | 3138 | qp->entry_size, qp->host_index, |
3128 | qp->hba_index); | 3139 | qp->hba_index, qp->entry_repost); |
3129 | 3140 | ||
3130 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); | 3141 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); |
3131 | 3142 | ||
@@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, | |||
3143 | "\t\t%s RQ info: ", rqtype); | 3154 | "\t\t%s RQ info: ", rqtype); |
3144 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3155 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3145 | "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " | 3156 | "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " |
3146 | "trunc:x%x rcv:x%llx]\n", | 3157 | "posted:x%x rcv:x%llx]\n", |
3147 | qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, | 3158 | qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, |
3148 | qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); | 3159 | qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); |
3149 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3160 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3150 | "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " | 3161 | "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " |
3151 | "HOST-IDX[%04d], PORT-IDX[%04d]\n", | 3162 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", |
3152 | qp->queue_id, qp->entry_count, qp->entry_size, | 3163 | qp->queue_id, qp->entry_count, qp->entry_size, |
3153 | qp->host_index, qp->hba_index); | 3164 | qp->host_index, qp->hba_index, qp->entry_repost); |
3154 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3165 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3155 | "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " | 3166 | "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " |
3156 | "HOST-IDX[%04d], PORT-IDX[%04d]\n", | 3167 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", |
3157 | datqp->queue_id, datqp->entry_count, | 3168 | datqp->queue_id, datqp->entry_count, |
3158 | datqp->entry_size, datqp->host_index, | 3169 | datqp->entry_size, datqp->host_index, |
3159 | datqp->hba_index); | 3170 | datqp->hba_index, datqp->entry_repost); |
3160 | return len; | 3171 | return len; |
3161 | } | 3172 | } |
3162 | 3173 | ||
@@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, | |||
3242 | eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, | 3253 | eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, |
3243 | (unsigned long long)qp->q_cnt_4); | 3254 | (unsigned long long)qp->q_cnt_4); |
3244 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3255 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3245 | "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " | 3256 | "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " |
3246 | "HOST-IDX[%04d], PORT-IDX[%04d]", | 3257 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", |
3247 | qp->queue_id, qp->entry_count, qp->entry_size, | 3258 | qp->queue_id, qp->entry_count, qp->entry_size, |
3248 | qp->host_index, qp->hba_index); | 3259 | qp->host_index, qp->hba_index, qp->entry_repost); |
3249 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); | 3260 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); |
3250 | 3261 | ||
3251 | return len; | 3262 | return len; |
@@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
5855 | atomic_dec(&lpfc_debugfs_hba_count); | 5866 | atomic_dec(&lpfc_debugfs_hba_count); |
5856 | } | 5867 | } |
5857 | 5868 | ||
5858 | debugfs_remove(lpfc_debugfs_root); /* lpfc */ | 5869 | if (atomic_read(&lpfc_debugfs_hba_count) == 0) { |
5859 | lpfc_debugfs_root = NULL; | 5870 | debugfs_remove(lpfc_debugfs_root); /* lpfc */ |
5871 | lpfc_debugfs_root = NULL; | ||
5872 | } | ||
5860 | } | 5873 | } |
5861 | #endif | 5874 | #endif |
5862 | return; | 5875 | return; |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 9d5a379f4b15..094c97b9e5f7 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -90,6 +90,7 @@ struct lpfc_nodelist { | |||
90 | #define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ | 90 | #define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ |
91 | #define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ | 91 | #define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ |
92 | #define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ | 92 | #define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ |
93 | #define NLP_NVME_DISCOVERY 0x80 /* entry has NVME disc srvc */ | ||
93 | 94 | ||
94 | uint16_t nlp_fc4_type; /* FC types node supports. */ | 95 | uint16_t nlp_fc4_type; /* FC types node supports. */ |
95 | /* Assigned from GID_FF, only | 96 | /* Assigned from GID_FF, only |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 67827e397431..8e532b39ae93 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi: | |||
1047 | irsp->ulpStatus, irsp->un.ulpWord[4], | 1047 | irsp->ulpStatus, irsp->un.ulpWord[4], |
1048 | irsp->ulpTimeout); | 1048 | irsp->ulpTimeout); |
1049 | 1049 | ||
1050 | |||
1051 | /* If this is not a loop open failure, bail out */ | ||
1052 | if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && | ||
1053 | ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == | ||
1054 | IOERR_LOOP_OPEN_FAILURE))) | ||
1055 | goto flogifail; | ||
1056 | |||
1050 | /* FLOGI failed, so there is no fabric */ | 1057 | /* FLOGI failed, so there is no fabric */ |
1051 | spin_lock_irq(shost->host_lock); | 1058 | spin_lock_irq(shost->host_lock); |
1052 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); | 1059 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); |
@@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2077 | 2084 | ||
2078 | if (irsp->ulpStatus) { | 2085 | if (irsp->ulpStatus) { |
2079 | /* Check for retry */ | 2086 | /* Check for retry */ |
2087 | ndlp->fc4_prli_sent--; | ||
2080 | if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { | 2088 | if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { |
2081 | /* ELS command is being retried */ | 2089 | /* ELS command is being retried */ |
2082 | ndlp->fc4_prli_sent--; | ||
2083 | goto out; | 2090 | goto out; |
2084 | } | 2091 | } |
2092 | |||
2085 | /* PRLI failed */ | 2093 | /* PRLI failed */ |
2086 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 2094 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
2087 | "2754 PRLI failure DID:%06X Status:x%x/x%x\n", | 2095 | "2754 PRLI failure DID:%06X Status:x%x/x%x, " |
2096 | "data: x%x\n", | ||
2088 | ndlp->nlp_DID, irsp->ulpStatus, | 2097 | ndlp->nlp_DID, irsp->ulpStatus, |
2089 | irsp->un.ulpWord[4]); | 2098 | irsp->un.ulpWord[4], ndlp->fc4_prli_sent); |
2099 | |||
2090 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 2100 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
2091 | if (lpfc_error_lost_link(irsp)) | 2101 | if (lpfc_error_lost_link(irsp)) |
2092 | goto out; | 2102 | goto out; |
@@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) | |||
7441 | */ | 7451 | */ |
7442 | spin_lock_irq(&phba->hbalock); | 7452 | spin_lock_irq(&phba->hbalock); |
7443 | pring = lpfc_phba_elsring(phba); | 7453 | pring = lpfc_phba_elsring(phba); |
7454 | |||
7455 | /* Bail out if we've no ELS wq, like in PCI error recovery case. */ | ||
7456 | if (unlikely(!pring)) { | ||
7457 | spin_unlock_irq(&phba->hbalock); | ||
7458 | return; | ||
7459 | } | ||
7460 | |||
7444 | if (phba->sli_rev == LPFC_SLI_REV4) | 7461 | if (phba->sli_rev == LPFC_SLI_REV4) |
7445 | spin_lock(&pring->ring_lock); | 7462 | spin_lock(&pring->ring_lock); |
7446 | 7463 | ||
@@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
8667 | lpfc_do_scr_ns_plogi(phba, vport); | 8684 | lpfc_do_scr_ns_plogi(phba, vport); |
8668 | goto out; | 8685 | goto out; |
8669 | fdisc_failed: | 8686 | fdisc_failed: |
8670 | if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS) | 8687 | if (vport->fc_vport && |
8688 | (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) | ||
8671 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 8689 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
8672 | /* Cancel discovery timer */ | 8690 | /* Cancel discovery timer */ |
8673 | lpfc_can_disctmo(vport); | 8691 | lpfc_can_disctmo(vport); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0482c5580331..3ffcd9215ca8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
693 | pring = lpfc_phba_elsring(phba); | 693 | pring = lpfc_phba_elsring(phba); |
694 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); | 694 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); |
695 | status >>= (4*LPFC_ELS_RING); | 695 | status >>= (4*LPFC_ELS_RING); |
696 | if ((status & HA_RXMASK) || | 696 | if (pring && (status & HA_RXMASK || |
697 | (pring->flag & LPFC_DEFERRED_RING_EVENT) || | 697 | pring->flag & LPFC_DEFERRED_RING_EVENT || |
698 | (phba->hba_flag & HBA_SP_QUEUE_EVT)) { | 698 | phba->hba_flag & HBA_SP_QUEUE_EVT)) { |
699 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { | 699 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { |
700 | pring->flag |= LPFC_DEFERRED_RING_EVENT; | 700 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
701 | /* Set the lpfc data pending flag */ | 701 | /* Set the lpfc data pending flag */ |
702 | set_bit(LPFC_DATA_READY, &phba->data_flags); | 702 | set_bit(LPFC_DATA_READY, &phba->data_flags); |
703 | } else { | 703 | } else { |
704 | if (phba->link_state >= LPFC_LINK_UP) { | 704 | if (phba->link_state >= LPFC_LINK_UP || |
705 | phba->link_flag & LS_MDS_LOOPBACK) { | ||
705 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; | 706 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; |
706 | lpfc_sli_handle_slow_ring_event(phba, pring, | 707 | lpfc_sli_handle_slow_ring_event(phba, pring, |
707 | (status & | 708 | (status & |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1d12f2be36bc..e0a5fce416ae 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy { | |||
1356 | 1356 | ||
1357 | #define LPFC_HDR_BUF_SIZE 128 | 1357 | #define LPFC_HDR_BUF_SIZE 128 |
1358 | #define LPFC_DATA_BUF_SIZE 2048 | 1358 | #define LPFC_DATA_BUF_SIZE 2048 |
1359 | #define LPFC_NVMET_DATA_BUF_SIZE 128 | ||
1359 | struct rq_context { | 1360 | struct rq_context { |
1360 | uint32_t word0; | 1361 | uint32_t word0; |
1361 | #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ | 1362 | #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ |
@@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe { | |||
4420 | }; | 4421 | }; |
4421 | #define TXRDY_PAYLOAD_LEN 12 | 4422 | #define TXRDY_PAYLOAD_LEN 12 |
4422 | 4423 | ||
4424 | #define CMD_SEND_FRAME 0xE1 | ||
4425 | |||
4426 | struct send_frame_wqe { | ||
4427 | struct ulp_bde64 bde; /* words 0-2 */ | ||
4428 | uint32_t frame_len; /* word 3 */ | ||
4429 | uint32_t fc_hdr_wd0; /* word 4 */ | ||
4430 | uint32_t fc_hdr_wd1; /* word 5 */ | ||
4431 | struct wqe_common wqe_com; /* words 6-11 */ | ||
4432 | uint32_t fc_hdr_wd2; /* word 12 */ | ||
4433 | uint32_t fc_hdr_wd3; /* word 13 */ | ||
4434 | uint32_t fc_hdr_wd4; /* word 14 */ | ||
4435 | uint32_t fc_hdr_wd5; /* word 15 */ | ||
4436 | }; | ||
4423 | 4437 | ||
4424 | union lpfc_wqe { | 4438 | union lpfc_wqe { |
4425 | uint32_t words[16]; | 4439 | uint32_t words[16]; |
@@ -4438,7 +4452,7 @@ union lpfc_wqe { | |||
4438 | struct fcp_trsp64_wqe fcp_trsp; | 4452 | struct fcp_trsp64_wqe fcp_trsp; |
4439 | struct fcp_tsend64_wqe fcp_tsend; | 4453 | struct fcp_tsend64_wqe fcp_tsend; |
4440 | struct fcp_treceive64_wqe fcp_treceive; | 4454 | struct fcp_treceive64_wqe fcp_treceive; |
4441 | 4455 | struct send_frame_wqe send_frame; | |
4442 | }; | 4456 | }; |
4443 | 4457 | ||
4444 | union lpfc_wqe128 { | 4458 | union lpfc_wqe128 { |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 90ae354a9c45..9add9473cae5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) | |||
1099 | 1099 | ||
1100 | list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { | 1100 | list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { |
1101 | ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); | 1101 | ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); |
1102 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | 1102 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
1103 | } | 1103 | } |
1104 | } | 1104 | } |
1105 | 1105 | ||
@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) | |||
3381 | { | 3381 | { |
3382 | struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; | 3382 | struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; |
3383 | uint16_t i, lxri, xri_cnt, els_xri_cnt; | 3383 | uint16_t i, lxri, xri_cnt, els_xri_cnt; |
3384 | uint16_t nvmet_xri_cnt, tot_cnt; | 3384 | uint16_t nvmet_xri_cnt; |
3385 | LIST_HEAD(nvmet_sgl_list); | 3385 | LIST_HEAD(nvmet_sgl_list); |
3386 | int rc; | 3386 | int rc; |
3387 | 3387 | ||
@@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) | |||
3389 | * update on pci function's nvmet xri-sgl list | 3389 | * update on pci function's nvmet xri-sgl list |
3390 | */ | 3390 | */ |
3391 | els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); | 3391 | els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); |
3392 | nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; | 3392 | |
3393 | tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; | 3393 | /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ |
3394 | if (nvmet_xri_cnt > tot_cnt) { | 3394 | nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; |
3395 | phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq; | ||
3396 | nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; | ||
3397 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3398 | "6301 NVMET post-sgl count changed to %d\n", | ||
3399 | phba->cfg_nvmet_mrq_post); | ||
3400 | } | ||
3401 | 3395 | ||
3402 | if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { | 3396 | if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { |
3403 | /* els xri-sgl expanded */ | 3397 | /* els xri-sgl expanded */ |
@@ -3602,6 +3596,13 @@ lpfc_get_wwpn(struct lpfc_hba *phba) | |||
3602 | LPFC_MBOXQ_t *mboxq; | 3596 | LPFC_MBOXQ_t *mboxq; |
3603 | MAILBOX_t *mb; | 3597 | MAILBOX_t *mb; |
3604 | 3598 | ||
3599 | if (phba->sli_rev < LPFC_SLI_REV4) { | ||
3600 | /* Reset the port first */ | ||
3601 | lpfc_sli_brdrestart(phba); | ||
3602 | rc = lpfc_sli_chipset_init(phba); | ||
3603 | if (rc) | ||
3604 | return (uint64_t)-1; | ||
3605 | } | ||
3605 | 3606 | ||
3606 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, | 3607 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
3607 | GFP_KERNEL); | 3608 | GFP_KERNEL); |
@@ -4539,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) | |||
4539 | pmb->vport = phba->pport; | 4540 | pmb->vport = phba->pport; |
4540 | 4541 | ||
4541 | if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { | 4542 | if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { |
4543 | phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); | ||
4544 | |||
4545 | switch (phba->sli4_hba.link_state.status) { | ||
4546 | case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: | ||
4547 | phba->link_flag |= LS_MDS_LINK_DOWN; | ||
4548 | break; | ||
4549 | case LPFC_FC_LA_TYPE_MDS_LOOPBACK: | ||
4550 | phba->link_flag |= LS_MDS_LOOPBACK; | ||
4551 | break; | ||
4552 | default: | ||
4553 | break; | ||
4554 | } | ||
4555 | |||
4542 | /* Parse and translate status field */ | 4556 | /* Parse and translate status field */ |
4543 | mb = &pmb->u.mb; | 4557 | mb = &pmb->u.mb; |
4544 | mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, | 4558 | mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, |
@@ -5823,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
5823 | spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); | 5837 | spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); |
5824 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); | 5838 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); |
5825 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); | 5839 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
5840 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list); | ||
5841 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); | ||
5842 | |||
5826 | /* Fast-path XRI aborted CQ Event work queue list */ | 5843 | /* Fast-path XRI aborted CQ Event work queue list */ |
5827 | INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); | 5844 | INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); |
5828 | } | 5845 | } |
@@ -5830,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
5830 | /* This abort list used by worker thread */ | 5847 | /* This abort list used by worker thread */ |
5831 | spin_lock_init(&phba->sli4_hba.sgl_list_lock); | 5848 | spin_lock_init(&phba->sli4_hba.sgl_list_lock); |
5832 | spin_lock_init(&phba->sli4_hba.nvmet_io_lock); | 5849 | spin_lock_init(&phba->sli4_hba.nvmet_io_lock); |
5850 | spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); | ||
5833 | 5851 | ||
5834 | /* | 5852 | /* |
5835 | * Initialize driver internal slow-path work queues | 5853 | * Initialize driver internal slow-path work queues |
@@ -5944,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
5944 | for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { | 5962 | for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { |
5945 | if (wwn == lpfc_enable_nvmet[i]) { | 5963 | if (wwn == lpfc_enable_nvmet[i]) { |
5946 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) | 5964 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
5965 | if (lpfc_nvmet_mem_alloc(phba)) | ||
5966 | break; | ||
5967 | |||
5968 | phba->nvmet_support = 1; /* a match */ | ||
5969 | |||
5947 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5970 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5948 | "6017 NVME Target %016llx\n", | 5971 | "6017 NVME Target %016llx\n", |
5949 | wwn); | 5972 | wwn); |
5950 | phba->nvmet_support = 1; /* a match */ | ||
5951 | #else | 5973 | #else |
5952 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5974 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5953 | "6021 Can't enable NVME Target." | 5975 | "6021 Can't enable NVME Target." |
5954 | " NVME_TARGET_FC infrastructure" | 5976 | " NVME_TARGET_FC infrastructure" |
5955 | " is not in kernel\n"); | 5977 | " is not in kernel\n"); |
5956 | #endif | 5978 | #endif |
5979 | break; | ||
5957 | } | 5980 | } |
5958 | } | 5981 | } |
5959 | } | 5982 | } |
@@ -6262,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) | |||
6262 | * | 6285 | * |
6263 | * This routine is invoked to free the driver's IOCB list and memory. | 6286 | * This routine is invoked to free the driver's IOCB list and memory. |
6264 | **/ | 6287 | **/ |
6265 | static void | 6288 | void |
6266 | lpfc_free_iocb_list(struct lpfc_hba *phba) | 6289 | lpfc_free_iocb_list(struct lpfc_hba *phba) |
6267 | { | 6290 | { |
6268 | struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; | 6291 | struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; |
@@ -6290,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba) | |||
6290 | * 0 - successful | 6313 | * 0 - successful |
6291 | * other values - error | 6314 | * other values - error |
6292 | **/ | 6315 | **/ |
6293 | static int | 6316 | int |
6294 | lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) | 6317 | lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) |
6295 | { | 6318 | { |
6296 | struct lpfc_iocbq *iocbq_entry = NULL; | 6319 | struct lpfc_iocbq *iocbq_entry = NULL; |
@@ -6518,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | |||
6518 | uint16_t rpi_limit, curr_rpi_range; | 6541 | uint16_t rpi_limit, curr_rpi_range; |
6519 | struct lpfc_dmabuf *dmabuf; | 6542 | struct lpfc_dmabuf *dmabuf; |
6520 | struct lpfc_rpi_hdr *rpi_hdr; | 6543 | struct lpfc_rpi_hdr *rpi_hdr; |
6521 | uint32_t rpi_count; | ||
6522 | 6544 | ||
6523 | /* | 6545 | /* |
6524 | * If the SLI4 port supports extents, posting the rpi header isn't | 6546 | * If the SLI4 port supports extents, posting the rpi header isn't |
@@ -6531,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | |||
6531 | return NULL; | 6553 | return NULL; |
6532 | 6554 | ||
6533 | /* The limit on the logical index is just the max_rpi count. */ | 6555 | /* The limit on the logical index is just the max_rpi count. */ |
6534 | rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + | 6556 | rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; |
6535 | phba->sli4_hba.max_cfg_param.max_rpi - 1; | ||
6536 | 6557 | ||
6537 | spin_lock_irq(&phba->hbalock); | 6558 | spin_lock_irq(&phba->hbalock); |
6538 | /* | 6559 | /* |
@@ -6543,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | |||
6543 | curr_rpi_range = phba->sli4_hba.next_rpi; | 6564 | curr_rpi_range = phba->sli4_hba.next_rpi; |
6544 | spin_unlock_irq(&phba->hbalock); | 6565 | spin_unlock_irq(&phba->hbalock); |
6545 | 6566 | ||
6546 | /* | 6567 | /* Reached full RPI range */ |
6547 | * The port has a limited number of rpis. The increment here | 6568 | if (curr_rpi_range == rpi_limit) |
6548 | * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value | ||
6549 | * and to allow the full max_rpi range per port. | ||
6550 | */ | ||
6551 | if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) | ||
6552 | rpi_count = rpi_limit - curr_rpi_range; | ||
6553 | else | ||
6554 | rpi_count = LPFC_RPI_HDR_COUNT; | ||
6555 | |||
6556 | if (!rpi_count) | ||
6557 | return NULL; | 6569 | return NULL; |
6570 | |||
6558 | /* | 6571 | /* |
6559 | * First allocate the protocol header region for the port. The | 6572 | * First allocate the protocol header region for the port. The |
6560 | * port expects a 4KB DMA-mapped memory region that is 4K aligned. | 6573 | * port expects a 4KB DMA-mapped memory region that is 4K aligned. |
@@ -6588,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | |||
6588 | 6601 | ||
6589 | /* The rpi_hdr stores the logical index only. */ | 6602 | /* The rpi_hdr stores the logical index only. */ |
6590 | rpi_hdr->start_rpi = curr_rpi_range; | 6603 | rpi_hdr->start_rpi = curr_rpi_range; |
6604 | rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; | ||
6591 | list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); | 6605 | list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); |
6592 | 6606 | ||
6593 | /* | ||
6594 | * The next_rpi stores the next logical module-64 rpi value used | ||
6595 | * to post physical rpis in subsequent rpi postings. | ||
6596 | */ | ||
6597 | phba->sli4_hba.next_rpi += rpi_count; | ||
6598 | spin_unlock_irq(&phba->hbalock); | 6607 | spin_unlock_irq(&phba->hbalock); |
6599 | return rpi_hdr; | 6608 | return rpi_hdr; |
6600 | 6609 | ||
@@ -8165,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8165 | /* Create NVMET Receive Queue for header */ | 8174 | /* Create NVMET Receive Queue for header */ |
8166 | qdesc = lpfc_sli4_queue_alloc(phba, | 8175 | qdesc = lpfc_sli4_queue_alloc(phba, |
8167 | phba->sli4_hba.rq_esize, | 8176 | phba->sli4_hba.rq_esize, |
8168 | phba->sli4_hba.rq_ecount); | 8177 | LPFC_NVMET_RQE_DEF_COUNT); |
8169 | if (!qdesc) { | 8178 | if (!qdesc) { |
8170 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8179 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8171 | "3146 Failed allocate " | 8180 | "3146 Failed allocate " |
@@ -8187,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8187 | /* Create NVMET Receive Queue for data */ | 8196 | /* Create NVMET Receive Queue for data */ |
8188 | qdesc = lpfc_sli4_queue_alloc(phba, | 8197 | qdesc = lpfc_sli4_queue_alloc(phba, |
8189 | phba->sli4_hba.rq_esize, | 8198 | phba->sli4_hba.rq_esize, |
8190 | phba->sli4_hba.rq_ecount); | 8199 | LPFC_NVMET_RQE_DEF_COUNT); |
8191 | if (!qdesc) { | 8200 | if (!qdesc) { |
8192 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8201 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8193 | "3156 Failed allocate " | 8202 | "3156 Failed allocate " |
@@ -8319,46 +8328,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
8319 | } | 8328 | } |
8320 | 8329 | ||
8321 | int | 8330 | int |
8322 | lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, | ||
8323 | struct lpfc_queue *drq, int count) | ||
8324 | { | ||
8325 | int rc, i; | ||
8326 | struct lpfc_rqe hrqe; | ||
8327 | struct lpfc_rqe drqe; | ||
8328 | struct lpfc_rqb *rqbp; | ||
8329 | struct rqb_dmabuf *rqb_buffer; | ||
8330 | LIST_HEAD(rqb_buf_list); | ||
8331 | |||
8332 | rqbp = hrq->rqbp; | ||
8333 | for (i = 0; i < count; i++) { | ||
8334 | rqb_buffer = (rqbp->rqb_alloc_buffer)(phba); | ||
8335 | if (!rqb_buffer) | ||
8336 | break; | ||
8337 | rqb_buffer->hrq = hrq; | ||
8338 | rqb_buffer->drq = drq; | ||
8339 | list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); | ||
8340 | } | ||
8341 | while (!list_empty(&rqb_buf_list)) { | ||
8342 | list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, | ||
8343 | hbuf.list); | ||
8344 | |||
8345 | hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); | ||
8346 | hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); | ||
8347 | drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); | ||
8348 | drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); | ||
8349 | rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); | ||
8350 | if (rc < 0) { | ||
8351 | (rqbp->rqb_free_buffer)(phba, rqb_buffer); | ||
8352 | } else { | ||
8353 | list_add_tail(&rqb_buffer->hbuf.list, | ||
8354 | &rqbp->rqb_buffer_list); | ||
8355 | rqbp->buffer_count++; | ||
8356 | } | ||
8357 | } | ||
8358 | return 1; | ||
8359 | } | ||
8360 | |||
8361 | int | ||
8362 | lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) | 8331 | lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) |
8363 | { | 8332 | { |
8364 | struct lpfc_rqb *rqbp; | 8333 | struct lpfc_rqb *rqbp; |
@@ -8777,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
8777 | goto out_destroy; | 8746 | goto out_destroy; |
8778 | } | 8747 | } |
8779 | 8748 | ||
8780 | lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); | ||
8781 | lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); | ||
8782 | |||
8783 | rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, | 8749 | rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, |
8784 | phba->sli4_hba.els_cq, LPFC_USOL); | 8750 | phba->sli4_hba.els_cq, LPFC_USOL); |
8785 | if (rc) { | 8751 | if (rc) { |
@@ -8847,7 +8813,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) | |||
8847 | lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); | 8813 | lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); |
8848 | 8814 | ||
8849 | /* Unset ELS work queue */ | 8815 | /* Unset ELS work queue */ |
8850 | if (phba->sli4_hba.els_cq) | 8816 | if (phba->sli4_hba.els_wq) |
8851 | lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); | 8817 | lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); |
8852 | 8818 | ||
8853 | /* Unset unsolicited receive queue */ | 8819 | /* Unset unsolicited receive queue */ |
@@ -11103,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11103 | struct lpfc_hba *phba; | 11069 | struct lpfc_hba *phba; |
11104 | struct lpfc_vport *vport = NULL; | 11070 | struct lpfc_vport *vport = NULL; |
11105 | struct Scsi_Host *shost = NULL; | 11071 | struct Scsi_Host *shost = NULL; |
11106 | int error, cnt; | 11072 | int error; |
11107 | uint32_t cfg_mode, intr_mode; | 11073 | uint32_t cfg_mode, intr_mode; |
11108 | 11074 | ||
11109 | /* Allocate memory for HBA structure */ | 11075 | /* Allocate memory for HBA structure */ |
@@ -11137,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11137 | goto out_unset_pci_mem_s4; | 11103 | goto out_unset_pci_mem_s4; |
11138 | } | 11104 | } |
11139 | 11105 | ||
11140 | cnt = phba->cfg_iocb_cnt * 1024; | ||
11141 | if (phba->nvmet_support) | ||
11142 | cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq; | ||
11143 | |||
11144 | /* Initialize and populate the iocb list per host */ | ||
11145 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
11146 | "2821 initialize iocb list %d total %d\n", | ||
11147 | phba->cfg_iocb_cnt, cnt); | ||
11148 | error = lpfc_init_iocb_list(phba, cnt); | ||
11149 | |||
11150 | if (error) { | ||
11151 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
11152 | "1413 Failed to initialize iocb list.\n"); | ||
11153 | goto out_unset_driver_resource_s4; | ||
11154 | } | ||
11155 | |||
11156 | INIT_LIST_HEAD(&phba->active_rrq_list); | 11106 | INIT_LIST_HEAD(&phba->active_rrq_list); |
11157 | INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); | 11107 | INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); |
11158 | 11108 | ||
@@ -11161,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11161 | if (error) { | 11111 | if (error) { |
11162 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 11112 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
11163 | "1414 Failed to set up driver resource.\n"); | 11113 | "1414 Failed to set up driver resource.\n"); |
11164 | goto out_free_iocb_list; | 11114 | goto out_unset_driver_resource_s4; |
11165 | } | 11115 | } |
11166 | 11116 | ||
11167 | /* Get the default values for Model Name and Description */ | 11117 | /* Get the default values for Model Name and Description */ |
@@ -11261,8 +11211,6 @@ out_destroy_shost: | |||
11261 | lpfc_destroy_shost(phba); | 11211 | lpfc_destroy_shost(phba); |
11262 | out_unset_driver_resource: | 11212 | out_unset_driver_resource: |
11263 | lpfc_unset_driver_resource_phase2(phba); | 11213 | lpfc_unset_driver_resource_phase2(phba); |
11264 | out_free_iocb_list: | ||
11265 | lpfc_free_iocb_list(phba); | ||
11266 | out_unset_driver_resource_s4: | 11214 | out_unset_driver_resource_s4: |
11267 | lpfc_sli4_driver_resource_unset(phba); | 11215 | lpfc_sli4_driver_resource_unset(phba); |
11268 | out_unset_pci_mem_s4: | 11216 | out_unset_pci_mem_s4: |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 5986c7957199..fcc05a1517c2 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -214,6 +214,21 @@ fail_free_drb_pool: | |||
214 | return -ENOMEM; | 214 | return -ENOMEM; |
215 | } | 215 | } |
216 | 216 | ||
217 | int | ||
218 | lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) | ||
219 | { | ||
220 | phba->lpfc_nvmet_drb_pool = | ||
221 | pci_pool_create("lpfc_nvmet_drb_pool", | ||
222 | phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE, | ||
223 | SGL_ALIGN_SZ, 0); | ||
224 | if (!phba->lpfc_nvmet_drb_pool) { | ||
225 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
226 | "6024 Can't enable NVME Target - no memory\n"); | ||
227 | return -ENOMEM; | ||
228 | } | ||
229 | return 0; | ||
230 | } | ||
231 | |||
217 | /** | 232 | /** |
218 | * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc | 233 | * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc |
219 | * @phba: HBA to free memory for | 234 | * @phba: HBA to free memory for |
@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba) | |||
232 | 247 | ||
233 | /* Free HBQ pools */ | 248 | /* Free HBQ pools */ |
234 | lpfc_sli_hbqbuf_free_all(phba); | 249 | lpfc_sli_hbqbuf_free_all(phba); |
250 | if (phba->lpfc_nvmet_drb_pool) | ||
251 | pci_pool_destroy(phba->lpfc_nvmet_drb_pool); | ||
252 | phba->lpfc_nvmet_drb_pool = NULL; | ||
235 | if (phba->lpfc_drb_pool) | 253 | if (phba->lpfc_drb_pool) |
236 | pci_pool_destroy(phba->lpfc_drb_pool); | 254 | pci_pool_destroy(phba->lpfc_drb_pool); |
237 | phba->lpfc_drb_pool = NULL; | 255 | phba->lpfc_drb_pool = NULL; |
@@ -611,8 +629,6 @@ struct rqb_dmabuf * | |||
611 | lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) | 629 | lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) |
612 | { | 630 | { |
613 | struct rqb_dmabuf *dma_buf; | 631 | struct rqb_dmabuf *dma_buf; |
614 | struct lpfc_iocbq *nvmewqe; | ||
615 | union lpfc_wqe128 *wqe; | ||
616 | 632 | ||
617 | dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); | 633 | dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); |
618 | if (!dma_buf) | 634 | if (!dma_buf) |
@@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) | |||
624 | kfree(dma_buf); | 640 | kfree(dma_buf); |
625 | return NULL; | 641 | return NULL; |
626 | } | 642 | } |
627 | dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, | 643 | dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool, |
628 | &dma_buf->dbuf.phys); | 644 | GFP_KERNEL, &dma_buf->dbuf.phys); |
629 | if (!dma_buf->dbuf.virt) { | 645 | if (!dma_buf->dbuf.virt) { |
630 | pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, | 646 | pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, |
631 | dma_buf->hbuf.phys); | 647 | dma_buf->hbuf.phys); |
632 | kfree(dma_buf); | 648 | kfree(dma_buf); |
633 | return NULL; | 649 | return NULL; |
634 | } | 650 | } |
635 | dma_buf->total_size = LPFC_DATA_BUF_SIZE; | 651 | dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; |
636 | |||
637 | dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), | ||
638 | GFP_KERNEL); | ||
639 | if (!dma_buf->context) { | ||
640 | pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, | ||
641 | dma_buf->dbuf.phys); | ||
642 | pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, | ||
643 | dma_buf->hbuf.phys); | ||
644 | kfree(dma_buf); | ||
645 | return NULL; | ||
646 | } | ||
647 | |||
648 | dma_buf->iocbq = lpfc_sli_get_iocbq(phba); | ||
649 | if (!dma_buf->iocbq) { | ||
650 | kfree(dma_buf->context); | ||
651 | pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, | ||
652 | dma_buf->dbuf.phys); | ||
653 | pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, | ||
654 | dma_buf->hbuf.phys); | ||
655 | kfree(dma_buf); | ||
656 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | ||
657 | "2621 Ran out of nvmet iocb/WQEs\n"); | ||
658 | return NULL; | ||
659 | } | ||
660 | dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET; | ||
661 | nvmewqe = dma_buf->iocbq; | ||
662 | wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; | ||
663 | /* Initialize WQE */ | ||
664 | memset(wqe, 0, sizeof(union lpfc_wqe)); | ||
665 | /* Word 7 */ | ||
666 | bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); | ||
667 | bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); | ||
668 | bf_set(wqe_pu, &wqe->generic.wqe_com, 1); | ||
669 | /* Word 10 */ | ||
670 | bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); | ||
671 | bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); | ||
672 | bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); | ||
673 | |||
674 | dma_buf->iocbq->context1 = NULL; | ||
675 | spin_lock(&phba->sli4_hba.sgl_list_lock); | ||
676 | dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq); | ||
677 | spin_unlock(&phba->sli4_hba.sgl_list_lock); | ||
678 | if (!dma_buf->sglq) { | ||
679 | lpfc_sli_release_iocbq(phba, dma_buf->iocbq); | ||
680 | kfree(dma_buf->context); | ||
681 | pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, | ||
682 | dma_buf->dbuf.phys); | ||
683 | pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, | ||
684 | dma_buf->hbuf.phys); | ||
685 | kfree(dma_buf); | ||
686 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | ||
687 | "6132 Ran out of nvmet XRIs\n"); | ||
688 | return NULL; | ||
689 | } | ||
690 | return dma_buf; | 652 | return dma_buf; |
691 | } | 653 | } |
692 | 654 | ||
@@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) | |||
705 | void | 667 | void |
706 | lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) | 668 | lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) |
707 | { | 669 | { |
708 | unsigned long flags; | ||
709 | |||
710 | __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag); | ||
711 | dmab->sglq->state = SGL_FREED; | ||
712 | dmab->sglq->ndlp = NULL; | ||
713 | |||
714 | spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); | ||
715 | list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); | ||
716 | spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags); | ||
717 | |||
718 | lpfc_sli_release_iocbq(phba, dmab->iocbq); | ||
719 | kfree(dmab->context); | ||
720 | pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); | 670 | pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); |
721 | pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); | 671 | pci_pool_free(phba->lpfc_nvmet_drb_pool, |
672 | dmab->dbuf.virt, dmab->dbuf.phys); | ||
722 | kfree(dmab); | 673 | kfree(dmab); |
723 | } | 674 | } |
724 | 675 | ||
@@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) | |||
803 | rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); | 754 | rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); |
804 | if (rc < 0) { | 755 | if (rc < 0) { |
805 | (rqbp->rqb_free_buffer)(phba, rqb_entry); | 756 | (rqbp->rqb_free_buffer)(phba, rqb_entry); |
757 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
758 | "6409 Cannot post to RQ %d: %x %x\n", | ||
759 | rqb_entry->hrq->queue_id, | ||
760 | rqb_entry->hrq->host_index, | ||
761 | rqb_entry->hrq->hba_index); | ||
806 | } else { | 762 | } else { |
807 | list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); | 763 | list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); |
808 | rqbp->buffer_count++; | 764 | rqbp->buffer_count++; |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 8777c2d5f50d..bff3de053df4 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -1944,7 +1944,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1944 | 1944 | ||
1945 | /* Target driver cannot solicit NVME FB. */ | 1945 | /* Target driver cannot solicit NVME FB. */ |
1946 | if (bf_get_be32(prli_tgt, nvpr)) { | 1946 | if (bf_get_be32(prli_tgt, nvpr)) { |
1947 | /* Complete the nvme target roles. The transport | ||
1948 | * needs to know if the rport is capable of | ||
1949 | * discovery in addition to its role. | ||
1950 | */ | ||
1947 | ndlp->nlp_type |= NLP_NVME_TARGET; | 1951 | ndlp->nlp_type |= NLP_NVME_TARGET; |
1952 | if (bf_get_be32(prli_disc, nvpr)) | ||
1953 | ndlp->nlp_type |= NLP_NVME_DISCOVERY; | ||
1948 | if ((bf_get_be32(prli_fba, nvpr) == 1) && | 1954 | if ((bf_get_be32(prli_fba, nvpr) == 1) && |
1949 | (bf_get_be32(prli_fb_sz, nvpr) > 0) && | 1955 | (bf_get_be32(prli_fb_sz, nvpr) > 0) && |
1950 | (phba->cfg_nvme_enable_fb) && | 1956 | (phba->cfg_nvme_enable_fb) && |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 94434e621c33..074a6b5e7763 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
@@ -142,7 +142,7 @@ out: | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context | 145 | * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context |
146 | * @phba: HBA buffer is associated with | 146 | * @phba: HBA buffer is associated with |
147 | * @ctxp: context to clean up | 147 | * @ctxp: context to clean up |
148 | * @mp: Buffer to free | 148 | * @mp: Buffer to free |
@@ -155,24 +155,113 @@ out: | |||
155 | * Returns: None | 155 | * Returns: None |
156 | **/ | 156 | **/ |
157 | void | 157 | void |
158 | lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, | 158 | lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) |
159 | struct lpfc_dmabuf *mp) | ||
160 | { | 159 | { |
161 | if (ctxp) { | 160 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
162 | if (ctxp->flag) | 161 | struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; |
163 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 162 | struct lpfc_nvmet_tgtport *tgtp; |
164 | "6314 rq_post ctx xri x%x flag x%x\n", | 163 | struct fc_frame_header *fc_hdr; |
165 | ctxp->oxid, ctxp->flag); | 164 | struct rqb_dmabuf *nvmebuf; |
166 | 165 | struct lpfc_dmabuf *hbufp; | |
167 | if (ctxp->txrdy) { | 166 | uint32_t *payload; |
168 | pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, | 167 | uint32_t size, oxid, sid, rc; |
169 | ctxp->txrdy_phys); | 168 | unsigned long iflag; |
170 | ctxp->txrdy = NULL; | 169 | |
171 | ctxp->txrdy_phys = 0; | 170 | if (ctxp->txrdy) { |
171 | pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, | ||
172 | ctxp->txrdy_phys); | ||
173 | ctxp->txrdy = NULL; | ||
174 | ctxp->txrdy_phys = 0; | ||
175 | } | ||
176 | ctxp->state = LPFC_NVMET_STE_FREE; | ||
177 | |||
178 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); | ||
179 | if (phba->sli4_hba.nvmet_io_wait_cnt) { | ||
180 | hbufp = &nvmebuf->hbuf; | ||
181 | list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, | ||
182 | nvmebuf, struct rqb_dmabuf, | ||
183 | hbuf.list); | ||
184 | phba->sli4_hba.nvmet_io_wait_cnt--; | ||
185 | spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, | ||
186 | iflag); | ||
187 | |||
188 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); | ||
189 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); | ||
190 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | ||
191 | payload = (uint32_t *)(nvmebuf->dbuf.virt); | ||
192 | size = nvmebuf->bytes_recv; | ||
193 | sid = sli4_sid_from_fc_hdr(fc_hdr); | ||
194 | |||
195 | ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; | ||
196 | memset(ctxp, 0, sizeof(ctxp->ctx)); | ||
197 | ctxp->wqeq = NULL; | ||
198 | ctxp->txrdy = NULL; | ||
199 | ctxp->offset = 0; | ||
200 | ctxp->phba = phba; | ||
201 | ctxp->size = size; | ||
202 | ctxp->oxid = oxid; | ||
203 | ctxp->sid = sid; | ||
204 | ctxp->state = LPFC_NVMET_STE_RCV; | ||
205 | ctxp->entry_cnt = 1; | ||
206 | ctxp->flag = 0; | ||
207 | ctxp->ctxbuf = ctx_buf; | ||
208 | spin_lock_init(&ctxp->ctxlock); | ||
209 | |||
210 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
211 | if (phba->ktime_on) { | ||
212 | ctxp->ts_cmd_nvme = ktime_get_ns(); | ||
213 | ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme; | ||
214 | ctxp->ts_nvme_data = 0; | ||
215 | ctxp->ts_data_wqput = 0; | ||
216 | ctxp->ts_isr_data = 0; | ||
217 | ctxp->ts_data_nvme = 0; | ||
218 | ctxp->ts_nvme_status = 0; | ||
219 | ctxp->ts_status_wqput = 0; | ||
220 | ctxp->ts_isr_status = 0; | ||
221 | ctxp->ts_status_nvme = 0; | ||
172 | } | 222 | } |
173 | ctxp->state = LPFC_NVMET_STE_FREE; | 223 | #endif |
224 | atomic_inc(&tgtp->rcv_fcp_cmd_in); | ||
225 | /* | ||
226 | * The calling sequence should be: | ||
227 | * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done | ||
228 | * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. | ||
229 | * When we return from nvmet_fc_rcv_fcp_req, all relevant info | ||
230 | * the NVME command / FC header is stored. | ||
231 | * A buffer has already been reposted for this IO, so just free | ||
232 | * the nvmebuf. | ||
233 | */ | ||
234 | rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, | ||
235 | payload, size); | ||
236 | |||
237 | /* Process FCP command */ | ||
238 | if (rc == 0) { | ||
239 | atomic_inc(&tgtp->rcv_fcp_cmd_out); | ||
240 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); | ||
245 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | ||
246 | "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", | ||
247 | ctxp->oxid, rc, | ||
248 | atomic_read(&tgtp->rcv_fcp_cmd_in), | ||
249 | atomic_read(&tgtp->rcv_fcp_cmd_out), | ||
250 | atomic_read(&tgtp->xmt_fcp_release)); | ||
251 | |||
252 | lpfc_nvmet_defer_release(phba, ctxp); | ||
253 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); | ||
254 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); | ||
255 | return; | ||
174 | } | 256 | } |
175 | lpfc_rq_buf_free(phba, mp); | 257 | spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
258 | |||
259 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); | ||
260 | list_add_tail(&ctx_buf->list, | ||
261 | &phba->sli4_hba.lpfc_nvmet_ctx_list); | ||
262 | phba->sli4_hba.nvmet_ctx_cnt++; | ||
263 | spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); | ||
264 | #endif | ||
176 | } | 265 | } |
177 | 266 | ||
178 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 267 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
@@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, | |||
502 | "6150 LS Drop IO x%x: Prep\n", | 591 | "6150 LS Drop IO x%x: Prep\n", |
503 | ctxp->oxid); | 592 | ctxp->oxid); |
504 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); | 593 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
594 | atomic_inc(&nvmep->xmt_ls_abort); | ||
505 | lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, | 595 | lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, |
506 | ctxp->sid, ctxp->oxid); | 596 | ctxp->sid, ctxp->oxid); |
507 | return -ENOMEM; | 597 | return -ENOMEM; |
@@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, | |||
545 | lpfc_nlp_put(nvmewqeq->context1); | 635 | lpfc_nlp_put(nvmewqeq->context1); |
546 | 636 | ||
547 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); | 637 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
638 | atomic_inc(&nvmep->xmt_ls_abort); | ||
548 | lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); | 639 | lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); |
549 | return -ENXIO; | 640 | return -ENXIO; |
550 | } | 641 | } |
@@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, | |||
612 | lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", | 703 | lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", |
613 | ctxp->oxid, rsp->op, rsp->rsplen); | 704 | ctxp->oxid, rsp->op, rsp->rsplen); |
614 | 705 | ||
706 | ctxp->flag |= LPFC_NVMET_IO_INP; | ||
615 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); | 707 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); |
616 | if (rc == WQE_SUCCESS) { | 708 | if (rc == WQE_SUCCESS) { |
617 | ctxp->flag |= LPFC_NVMET_IO_INP; | ||
618 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 709 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
619 | if (!phba->ktime_on) | 710 | if (!phba->ktime_on) |
620 | return 0; | 711 | return 0; |
@@ -692,6 +783,7 @@ static void | |||
692 | lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | 783 | lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, |
693 | struct nvmefc_tgt_fcp_req *rsp) | 784 | struct nvmefc_tgt_fcp_req *rsp) |
694 | { | 785 | { |
786 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; | ||
695 | struct lpfc_nvmet_rcv_ctx *ctxp = | 787 | struct lpfc_nvmet_rcv_ctx *ctxp = |
696 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | 788 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
697 | struct lpfc_hba *phba = ctxp->phba; | 789 | struct lpfc_hba *phba = ctxp->phba; |
@@ -710,10 +802,12 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | |||
710 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, | 802 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, |
711 | ctxp->state, 0); | 803 | ctxp->state, 0); |
712 | 804 | ||
805 | atomic_inc(&lpfc_nvmep->xmt_fcp_release); | ||
806 | |||
713 | if (aborting) | 807 | if (aborting) |
714 | return; | 808 | return; |
715 | 809 | ||
716 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | 810 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
717 | } | 811 | } |
718 | 812 | ||
719 | static struct nvmet_fc_target_template lpfc_tgttemplate = { | 813 | static struct nvmet_fc_target_template lpfc_tgttemplate = { |
@@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { | |||
734 | .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), | 828 | .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), |
735 | }; | 829 | }; |
736 | 830 | ||
831 | void | ||
832 | lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) | ||
833 | { | ||
834 | struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; | ||
835 | unsigned long flags; | ||
836 | |||
837 | list_for_each_entry_safe( | ||
838 | ctx_buf, next_ctx_buf, | ||
839 | &phba->sli4_hba.lpfc_nvmet_ctx_list, list) { | ||
840 | spin_lock_irqsave( | ||
841 | &phba->sli4_hba.abts_nvme_buf_list_lock, flags); | ||
842 | list_del_init(&ctx_buf->list); | ||
843 | spin_unlock_irqrestore( | ||
844 | &phba->sli4_hba.abts_nvme_buf_list_lock, flags); | ||
845 | __lpfc_clear_active_sglq(phba, | ||
846 | ctx_buf->sglq->sli4_lxritag); | ||
847 | ctx_buf->sglq->state = SGL_FREED; | ||
848 | ctx_buf->sglq->ndlp = NULL; | ||
849 | |||
850 | spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); | ||
851 | list_add_tail(&ctx_buf->sglq->list, | ||
852 | &phba->sli4_hba.lpfc_nvmet_sgl_list); | ||
853 | spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, | ||
854 | flags); | ||
855 | |||
856 | lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); | ||
857 | kfree(ctx_buf->context); | ||
858 | } | ||
859 | } | ||
860 | |||
861 | int | ||
862 | lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) | ||
863 | { | ||
864 | struct lpfc_nvmet_ctxbuf *ctx_buf; | ||
865 | struct lpfc_iocbq *nvmewqe; | ||
866 | union lpfc_wqe128 *wqe; | ||
867 | int i; | ||
868 | |||
869 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME, | ||
870 | "6403 Allocate NVMET resources for %d XRIs\n", | ||
871 | phba->sli4_hba.nvmet_xri_cnt); | ||
872 | |||
873 | /* For all nvmet xris, allocate resources needed to process a | ||
874 | * received command on a per xri basis. | ||
875 | */ | ||
876 | for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { | ||
877 | ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); | ||
878 | if (!ctx_buf) { | ||
879 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | ||
880 | "6404 Ran out of memory for NVMET\n"); | ||
881 | return -ENOMEM; | ||
882 | } | ||
883 | |||
884 | ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), | ||
885 | GFP_KERNEL); | ||
886 | if (!ctx_buf->context) { | ||
887 | kfree(ctx_buf); | ||
888 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | ||
889 | "6405 Ran out of NVMET " | ||
890 | "context memory\n"); | ||
891 | return -ENOMEM; | ||
892 | } | ||
893 | ctx_buf->context->ctxbuf = ctx_buf; | ||
894 | |||
895 | ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); | ||
896 | if (!ctx_buf->iocbq) { | ||
897 | kfree(ctx_buf->context); | ||
898 | kfree(ctx_buf); | ||
899 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | ||
900 | "6406 Ran out of NVMET iocb/WQEs\n"); | ||
901 | return -ENOMEM; | ||
902 | } | ||
903 | ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; | ||
904 | nvmewqe = ctx_buf->iocbq; | ||
905 | wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; | ||
906 | /* Initialize WQE */ | ||
907 | memset(wqe, 0, sizeof(union lpfc_wqe)); | ||
908 | /* Word 7 */ | ||
909 | bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); | ||
910 | bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); | ||
911 | bf_set(wqe_pu, &wqe->generic.wqe_com, 1); | ||
912 | /* Word 10 */ | ||
913 | bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); | ||
914 | bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); | ||
915 | bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); | ||
916 | |||
917 | ctx_buf->iocbq->context1 = NULL; | ||
918 | spin_lock(&phba->sli4_hba.sgl_list_lock); | ||
919 | ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); | ||
920 | spin_unlock(&phba->sli4_hba.sgl_list_lock); | ||
921 | if (!ctx_buf->sglq) { | ||
922 | lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); | ||
923 | kfree(ctx_buf->context); | ||
924 | kfree(ctx_buf); | ||
925 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | ||
926 | "6407 Ran out of NVMET XRIs\n"); | ||
927 | return -ENOMEM; | ||
928 | } | ||
929 | spin_lock(&phba->sli4_hba.nvmet_io_lock); | ||
930 | list_add_tail(&ctx_buf->list, | ||
931 | &phba->sli4_hba.lpfc_nvmet_ctx_list); | ||
932 | spin_unlock(&phba->sli4_hba.nvmet_io_lock); | ||
933 | } | ||
934 | phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt; | ||
935 | return 0; | ||
936 | } | ||
937 | |||
737 | int | 938 | int |
738 | lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | 939 | lpfc_nvmet_create_targetport(struct lpfc_hba *phba) |
739 | { | 940 | { |
740 | struct lpfc_vport *vport = phba->pport; | 941 | struct lpfc_vport *vport = phba->pport; |
741 | struct lpfc_nvmet_tgtport *tgtp; | 942 | struct lpfc_nvmet_tgtport *tgtp; |
742 | struct nvmet_fc_port_info pinfo; | 943 | struct nvmet_fc_port_info pinfo; |
743 | int error = 0; | 944 | int error; |
744 | 945 | ||
745 | if (phba->targetport) | 946 | if (phba->targetport) |
746 | return 0; | 947 | return 0; |
747 | 948 | ||
949 | error = lpfc_nvmet_setup_io_context(phba); | ||
950 | if (error) | ||
951 | return error; | ||
952 | |||
748 | memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); | 953 | memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); |
749 | pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); | 954 | pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); |
750 | pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); | 955 | pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); |
@@ -764,7 +969,6 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | |||
764 | lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; | 969 | lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; |
765 | lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; | 970 | lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; |
766 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | | 971 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | |
767 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | | ||
768 | NVMET_FCTGTFEAT_CMD_IN_ISR | | 972 | NVMET_FCTGTFEAT_CMD_IN_ISR | |
769 | NVMET_FCTGTFEAT_OPDONE_IN_ISR; | 973 | NVMET_FCTGTFEAT_OPDONE_IN_ISR; |
770 | 974 | ||
@@ -773,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | |||
773 | &phba->pcidev->dev, | 977 | &phba->pcidev->dev, |
774 | &phba->targetport); | 978 | &phba->targetport); |
775 | #else | 979 | #else |
776 | error = -ENOMEM; | 980 | error = -ENOENT; |
777 | #endif | 981 | #endif |
778 | if (error) { | 982 | if (error) { |
779 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, | 983 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
780 | "6025 Cannot register NVME targetport " | 984 | "6025 Cannot register NVME targetport " |
781 | "x%x\n", error); | 985 | "x%x\n", error); |
782 | phba->targetport = NULL; | 986 | phba->targetport = NULL; |
987 | |||
988 | lpfc_nvmet_cleanup_io_context(phba); | ||
989 | |||
783 | } else { | 990 | } else { |
784 | tgtp = (struct lpfc_nvmet_tgtport *) | 991 | tgtp = (struct lpfc_nvmet_tgtport *) |
785 | phba->targetport->private; | 992 | phba->targetport->private; |
@@ -796,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | |||
796 | atomic_set(&tgtp->rcv_ls_req_out, 0); | 1003 | atomic_set(&tgtp->rcv_ls_req_out, 0); |
797 | atomic_set(&tgtp->rcv_ls_req_drop, 0); | 1004 | atomic_set(&tgtp->rcv_ls_req_drop, 0); |
798 | atomic_set(&tgtp->xmt_ls_abort, 0); | 1005 | atomic_set(&tgtp->xmt_ls_abort, 0); |
1006 | atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); | ||
799 | atomic_set(&tgtp->xmt_ls_rsp, 0); | 1007 | atomic_set(&tgtp->xmt_ls_rsp, 0); |
800 | atomic_set(&tgtp->xmt_ls_drop, 0); | 1008 | atomic_set(&tgtp->xmt_ls_drop, 0); |
801 | atomic_set(&tgtp->xmt_ls_rsp_error, 0); | 1009 | atomic_set(&tgtp->xmt_ls_rsp_error, 0); |
@@ -803,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | |||
803 | atomic_set(&tgtp->rcv_fcp_cmd_in, 0); | 1011 | atomic_set(&tgtp->rcv_fcp_cmd_in, 0); |
804 | atomic_set(&tgtp->rcv_fcp_cmd_out, 0); | 1012 | atomic_set(&tgtp->rcv_fcp_cmd_out, 0); |
805 | atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); | 1013 | atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); |
806 | atomic_set(&tgtp->xmt_fcp_abort, 0); | ||
807 | atomic_set(&tgtp->xmt_fcp_drop, 0); | 1014 | atomic_set(&tgtp->xmt_fcp_drop, 0); |
808 | atomic_set(&tgtp->xmt_fcp_read_rsp, 0); | 1015 | atomic_set(&tgtp->xmt_fcp_read_rsp, 0); |
809 | atomic_set(&tgtp->xmt_fcp_read, 0); | 1016 | atomic_set(&tgtp->xmt_fcp_read, 0); |
810 | atomic_set(&tgtp->xmt_fcp_write, 0); | 1017 | atomic_set(&tgtp->xmt_fcp_write, 0); |
811 | atomic_set(&tgtp->xmt_fcp_rsp, 0); | 1018 | atomic_set(&tgtp->xmt_fcp_rsp, 0); |
1019 | atomic_set(&tgtp->xmt_fcp_release, 0); | ||
812 | atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); | 1020 | atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); |
813 | atomic_set(&tgtp->xmt_fcp_rsp_error, 0); | 1021 | atomic_set(&tgtp->xmt_fcp_rsp_error, 0); |
814 | atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); | 1022 | atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); |
1023 | atomic_set(&tgtp->xmt_fcp_abort, 0); | ||
1024 | atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); | ||
1025 | atomic_set(&tgtp->xmt_abort_unsol, 0); | ||
1026 | atomic_set(&tgtp->xmt_abort_sol, 0); | ||
815 | atomic_set(&tgtp->xmt_abort_rsp, 0); | 1027 | atomic_set(&tgtp->xmt_abort_rsp, 0); |
816 | atomic_set(&tgtp->xmt_abort_rsp_error, 0); | 1028 | atomic_set(&tgtp->xmt_abort_rsp_error, 0); |
817 | atomic_set(&tgtp->xmt_abort_cmpl, 0); | ||
818 | } | 1029 | } |
819 | return error; | 1030 | return error; |
820 | } | 1031 | } |
@@ -865,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, | |||
865 | list_for_each_entry_safe(ctxp, next_ctxp, | 1076 | list_for_each_entry_safe(ctxp, next_ctxp, |
866 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | 1077 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
867 | list) { | 1078 | list) { |
868 | if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) | 1079 | if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
869 | continue; | 1080 | continue; |
870 | 1081 | ||
871 | /* Check if we already received a free context call | 1082 | /* Check if we already received a free context call |
@@ -886,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, | |||
886 | (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || | 1097 | (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || |
887 | ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { | 1098 | ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { |
888 | lpfc_set_rrq_active(phba, ndlp, | 1099 | lpfc_set_rrq_active(phba, ndlp, |
889 | ctxp->rqb_buffer->sglq->sli4_lxritag, | 1100 | ctxp->ctxbuf->sglq->sli4_lxritag, |
890 | rxid, 1); | 1101 | rxid, 1); |
891 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); | 1102 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
892 | } | 1103 | } |
@@ -895,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, | |||
895 | "6318 XB aborted %x flg x%x (%x)\n", | 1106 | "6318 XB aborted %x flg x%x (%x)\n", |
896 | ctxp->oxid, ctxp->flag, released); | 1107 | ctxp->oxid, ctxp->flag, released); |
897 | if (released) | 1108 | if (released) |
898 | lpfc_nvmet_rq_post(phba, ctxp, | 1109 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
899 | &ctxp->rqb_buffer->hbuf); | 1110 | |
900 | if (rrq_empty) | 1111 | if (rrq_empty) |
901 | lpfc_worker_wake_up(phba); | 1112 | lpfc_worker_wake_up(phba); |
902 | return; | 1113 | return; |
@@ -924,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, | |||
924 | list_for_each_entry_safe(ctxp, next_ctxp, | 1135 | list_for_each_entry_safe(ctxp, next_ctxp, |
925 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | 1136 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
926 | list) { | 1137 | list) { |
927 | if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) | 1138 | if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
928 | continue; | 1139 | continue; |
929 | 1140 | ||
930 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | 1141 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
@@ -976,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) | |||
976 | init_completion(&tgtp->tport_unreg_done); | 1187 | init_completion(&tgtp->tport_unreg_done); |
977 | nvmet_fc_unregister_targetport(phba->targetport); | 1188 | nvmet_fc_unregister_targetport(phba->targetport); |
978 | wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); | 1189 | wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); |
1190 | lpfc_nvmet_cleanup_io_context(phba); | ||
979 | } | 1191 | } |
980 | phba->targetport = NULL; | 1192 | phba->targetport = NULL; |
981 | #endif | 1193 | #endif |
@@ -1011,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1011 | oxid = 0; | 1223 | oxid = 0; |
1012 | size = 0; | 1224 | size = 0; |
1013 | sid = 0; | 1225 | sid = 0; |
1226 | ctxp = NULL; | ||
1014 | goto dropit; | 1227 | goto dropit; |
1015 | } | 1228 | } |
1016 | 1229 | ||
@@ -1105,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
1105 | struct lpfc_nvmet_rcv_ctx *ctxp; | 1318 | struct lpfc_nvmet_rcv_ctx *ctxp; |
1106 | struct lpfc_nvmet_tgtport *tgtp; | 1319 | struct lpfc_nvmet_tgtport *tgtp; |
1107 | struct fc_frame_header *fc_hdr; | 1320 | struct fc_frame_header *fc_hdr; |
1321 | struct lpfc_nvmet_ctxbuf *ctx_buf; | ||
1108 | uint32_t *payload; | 1322 | uint32_t *payload; |
1109 | uint32_t size, oxid, sid, rc; | 1323 | uint32_t size, oxid, sid, rc, qno; |
1324 | unsigned long iflag; | ||
1110 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 1325 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1111 | uint32_t id; | 1326 | uint32_t id; |
1112 | #endif | 1327 | #endif |
1113 | 1328 | ||
1329 | ctx_buf = NULL; | ||
1114 | if (!nvmebuf || !phba->targetport) { | 1330 | if (!nvmebuf || !phba->targetport) { |
1115 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1331 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1116 | "6157 FCP Drop IO\n"); | 1332 | "6157 NVMET FCP Drop IO\n"); |
1117 | oxid = 0; | 1333 | oxid = 0; |
1118 | size = 0; | 1334 | size = 0; |
1119 | sid = 0; | 1335 | sid = 0; |
1336 | ctxp = NULL; | ||
1120 | goto dropit; | 1337 | goto dropit; |
1121 | } | 1338 | } |
1122 | 1339 | ||
1340 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); | ||
1341 | if (phba->sli4_hba.nvmet_ctx_cnt) { | ||
1342 | list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list, | ||
1343 | ctx_buf, struct lpfc_nvmet_ctxbuf, list); | ||
1344 | phba->sli4_hba.nvmet_ctx_cnt--; | ||
1345 | } | ||
1346 | spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); | ||
1123 | 1347 | ||
1124 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | ||
1125 | payload = (uint32_t *)(nvmebuf->dbuf.virt); | ||
1126 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); | 1348 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); |
1127 | size = nvmebuf->bytes_recv; | ||
1128 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); | 1349 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
1129 | sid = sli4_sid_from_fc_hdr(fc_hdr); | 1350 | size = nvmebuf->bytes_recv; |
1130 | 1351 | ||
1131 | ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; | 1352 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1132 | if (ctxp == NULL) { | 1353 | if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { |
1133 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); | 1354 | id = smp_processor_id(); |
1134 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1355 | if (id < LPFC_CHECK_CPU_CNT) |
1135 | "6158 FCP Drop IO x%x: Alloc\n", | 1356 | phba->cpucheck_rcv_io[id]++; |
1136 | oxid); | 1357 | } |
1137 | lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); | 1358 | #endif |
1138 | /* Cannot send ABTS without context */ | 1359 | |
1360 | lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", | ||
1361 | oxid, size, smp_processor_id()); | ||
1362 | |||
1363 | if (!ctx_buf) { | ||
1364 | /* Queue this NVME IO to process later */ | ||
1365 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); | ||
1366 | list_add_tail(&nvmebuf->hbuf.list, | ||
1367 | &phba->sli4_hba.lpfc_nvmet_io_wait_list); | ||
1368 | phba->sli4_hba.nvmet_io_wait_cnt++; | ||
1369 | phba->sli4_hba.nvmet_io_wait_total++; | ||
1370 | spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, | ||
1371 | iflag); | ||
1372 | |||
1373 | /* Post a brand new DMA buffer to RQ */ | ||
1374 | qno = nvmebuf->idx; | ||
1375 | lpfc_post_rq_buffer( | ||
1376 | phba, phba->sli4_hba.nvmet_mrq_hdr[qno], | ||
1377 | phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); | ||
1139 | return; | 1378 | return; |
1140 | } | 1379 | } |
1380 | |||
1381 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | ||
1382 | payload = (uint32_t *)(nvmebuf->dbuf.virt); | ||
1383 | sid = sli4_sid_from_fc_hdr(fc_hdr); | ||
1384 | |||
1385 | ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; | ||
1141 | memset(ctxp, 0, sizeof(ctxp->ctx)); | 1386 | memset(ctxp, 0, sizeof(ctxp->ctx)); |
1142 | ctxp->wqeq = NULL; | 1387 | ctxp->wqeq = NULL; |
1143 | ctxp->txrdy = NULL; | 1388 | ctxp->txrdy = NULL; |
@@ -1147,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
1147 | ctxp->oxid = oxid; | 1392 | ctxp->oxid = oxid; |
1148 | ctxp->sid = sid; | 1393 | ctxp->sid = sid; |
1149 | ctxp->state = LPFC_NVMET_STE_RCV; | 1394 | ctxp->state = LPFC_NVMET_STE_RCV; |
1150 | ctxp->rqb_buffer = nvmebuf; | ||
1151 | ctxp->entry_cnt = 1; | 1395 | ctxp->entry_cnt = 1; |
1152 | ctxp->flag = 0; | 1396 | ctxp->flag = 0; |
1397 | ctxp->ctxbuf = ctx_buf; | ||
1153 | spin_lock_init(&ctxp->ctxlock); | 1398 | spin_lock_init(&ctxp->ctxlock); |
1154 | 1399 | ||
1155 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 1400 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
@@ -1165,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
1165 | ctxp->ts_isr_status = 0; | 1410 | ctxp->ts_isr_status = 0; |
1166 | ctxp->ts_status_nvme = 0; | 1411 | ctxp->ts_status_nvme = 0; |
1167 | } | 1412 | } |
1168 | |||
1169 | if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { | ||
1170 | id = smp_processor_id(); | ||
1171 | if (id < LPFC_CHECK_CPU_CNT) | ||
1172 | phba->cpucheck_rcv_io[id]++; | ||
1173 | } | ||
1174 | #endif | 1413 | #endif |
1175 | 1414 | ||
1176 | lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", | ||
1177 | oxid, size, smp_processor_id()); | ||
1178 | |||
1179 | atomic_inc(&tgtp->rcv_fcp_cmd_in); | 1415 | atomic_inc(&tgtp->rcv_fcp_cmd_in); |
1180 | /* | 1416 | /* |
1181 | * The calling sequence should be: | 1417 | * The calling sequence should be: |
1182 | * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done | 1418 | * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done |
1183 | * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. | 1419 | * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. |
1420 | * When we return from nvmet_fc_rcv_fcp_req, all relevant info in | ||
1421 | * the NVME command / FC header is stored, so we are free to repost | ||
1422 | * the buffer. | ||
1184 | */ | 1423 | */ |
1185 | rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, | 1424 | rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, |
1186 | payload, size); | 1425 | payload, size); |
@@ -1188,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
1188 | /* Process FCP command */ | 1427 | /* Process FCP command */ |
1189 | if (rc == 0) { | 1428 | if (rc == 0) { |
1190 | atomic_inc(&tgtp->rcv_fcp_cmd_out); | 1429 | atomic_inc(&tgtp->rcv_fcp_cmd_out); |
1430 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ | ||
1191 | return; | 1431 | return; |
1192 | } | 1432 | } |
1193 | 1433 | ||
1194 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); | 1434 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
1195 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1435 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1196 | "6159 FCP Drop IO x%x: err x%x\n", | 1436 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
1197 | ctxp->oxid, rc); | 1437 | ctxp->oxid, rc, |
1438 | atomic_read(&tgtp->rcv_fcp_cmd_in), | ||
1439 | atomic_read(&tgtp->rcv_fcp_cmd_out), | ||
1440 | atomic_read(&tgtp->xmt_fcp_release)); | ||
1198 | dropit: | 1441 | dropit: |
1199 | lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", | 1442 | lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", |
1200 | oxid, size, sid); | 1443 | oxid, size, sid); |
1201 | if (oxid) { | 1444 | if (oxid) { |
1445 | lpfc_nvmet_defer_release(phba, ctxp); | ||
1202 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); | 1446 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); |
1447 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ | ||
1203 | return; | 1448 | return; |
1204 | } | 1449 | } |
1205 | 1450 | ||
1206 | if (nvmebuf) { | 1451 | if (ctx_buf) |
1207 | nvmebuf->iocbq->hba_wqidx = 0; | 1452 | lpfc_nvmet_ctxbuf_post(phba, ctx_buf); |
1208 | /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ | 1453 | |
1209 | lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); | 1454 | if (nvmebuf) |
1210 | } | 1455 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ |
1211 | #endif | 1456 | #endif |
1212 | } | 1457 | } |
1213 | 1458 | ||
@@ -1259,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, | |||
1259 | uint64_t isr_timestamp) | 1504 | uint64_t isr_timestamp) |
1260 | { | 1505 | { |
1261 | if (phba->nvmet_support == 0) { | 1506 | if (phba->nvmet_support == 0) { |
1262 | lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); | 1507 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); |
1263 | return; | 1508 | return; |
1264 | } | 1509 | } |
1265 | lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, | 1510 | lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, |
@@ -1460,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, | |||
1460 | nvmewqe = ctxp->wqeq; | 1705 | nvmewqe = ctxp->wqeq; |
1461 | if (nvmewqe == NULL) { | 1706 | if (nvmewqe == NULL) { |
1462 | /* Allocate buffer for command wqe */ | 1707 | /* Allocate buffer for command wqe */ |
1463 | nvmewqe = ctxp->rqb_buffer->iocbq; | 1708 | nvmewqe = ctxp->ctxbuf->iocbq; |
1464 | if (nvmewqe == NULL) { | 1709 | if (nvmewqe == NULL) { |
1465 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1710 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1466 | "6110 lpfc_nvmet_prep_fcp_wqe: No " | 1711 | "6110 lpfc_nvmet_prep_fcp_wqe: No " |
@@ -1487,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, | |||
1487 | return NULL; | 1732 | return NULL; |
1488 | } | 1733 | } |
1489 | 1734 | ||
1490 | sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; | 1735 | sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; |
1491 | switch (rsp->op) { | 1736 | switch (rsp->op) { |
1492 | case NVMET_FCOP_READDATA: | 1737 | case NVMET_FCOP_READDATA: |
1493 | case NVMET_FCOP_READDATA_RSP: | 1738 | case NVMET_FCOP_READDATA_RSP: |
@@ -1812,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1812 | result = wcqe->parameter; | 2057 | result = wcqe->parameter; |
1813 | 2058 | ||
1814 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 2059 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1815 | atomic_inc(&tgtp->xmt_abort_cmpl); | 2060 | if (ctxp->flag & LPFC_NVMET_ABORT_OP) |
2061 | atomic_inc(&tgtp->xmt_fcp_abort_cmpl); | ||
1816 | 2062 | ||
1817 | ctxp->state = LPFC_NVMET_STE_DONE; | 2063 | ctxp->state = LPFC_NVMET_STE_DONE; |
1818 | 2064 | ||
@@ -1827,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1827 | } | 2073 | } |
1828 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | 2074 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
1829 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | 2075 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
2076 | atomic_inc(&tgtp->xmt_abort_rsp); | ||
1830 | 2077 | ||
1831 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | 2078 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
1832 | "6165 ABORT cmpl: xri x%x flg x%x (%d) " | 2079 | "6165 ABORT cmpl: xri x%x flg x%x (%d) " |
@@ -1835,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1835 | wcqe->word0, wcqe->total_data_placed, | 2082 | wcqe->word0, wcqe->total_data_placed, |
1836 | result, wcqe->word3); | 2083 | result, wcqe->word3); |
1837 | 2084 | ||
2085 | cmdwqe->context2 = NULL; | ||
2086 | cmdwqe->context3 = NULL; | ||
1838 | /* | 2087 | /* |
1839 | * if transport has released ctx, then can reuse it. Otherwise, | 2088 | * if transport has released ctx, then can reuse it. Otherwise, |
1840 | * will be recycled by transport release call. | 2089 | * will be recycled by transport release call. |
1841 | */ | 2090 | */ |
1842 | if (released) | 2091 | if (released) |
1843 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | 2092 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
1844 | 2093 | ||
1845 | cmdwqe->context2 = NULL; | 2094 | /* This is the iocbq for the abort, not the command */ |
1846 | cmdwqe->context3 = NULL; | ||
1847 | lpfc_sli_release_iocbq(phba, cmdwqe); | 2095 | lpfc_sli_release_iocbq(phba, cmdwqe); |
1848 | 2096 | ||
1849 | /* Since iaab/iaar are NOT set, there is no work left. | 2097 | /* Since iaab/iaar are NOT set, there is no work left. |
@@ -1877,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1877 | result = wcqe->parameter; | 2125 | result = wcqe->parameter; |
1878 | 2126 | ||
1879 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 2127 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1880 | atomic_inc(&tgtp->xmt_abort_cmpl); | 2128 | if (ctxp->flag & LPFC_NVMET_ABORT_OP) |
2129 | atomic_inc(&tgtp->xmt_fcp_abort_cmpl); | ||
1881 | 2130 | ||
1882 | if (!ctxp) { | 2131 | if (!ctxp) { |
1883 | /* if context is clear, related io alrady complete */ | 2132 | /* if context is clear, related io alrady complete */ |
@@ -1907,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1907 | } | 2156 | } |
1908 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | 2157 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
1909 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | 2158 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
2159 | atomic_inc(&tgtp->xmt_abort_rsp); | ||
1910 | 2160 | ||
1911 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 2161 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1912 | "6316 ABTS cmpl xri x%x flg x%x (%x) " | 2162 | "6316 ABTS cmpl xri x%x flg x%x (%x) " |
@@ -1914,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1914 | ctxp->oxid, ctxp->flag, released, | 2164 | ctxp->oxid, ctxp->flag, released, |
1915 | wcqe->word0, wcqe->total_data_placed, | 2165 | wcqe->word0, wcqe->total_data_placed, |
1916 | result, wcqe->word3); | 2166 | result, wcqe->word3); |
2167 | |||
2168 | cmdwqe->context2 = NULL; | ||
2169 | cmdwqe->context3 = NULL; | ||
1917 | /* | 2170 | /* |
1918 | * if transport has released ctx, then can reuse it. Otherwise, | 2171 | * if transport has released ctx, then can reuse it. Otherwise, |
1919 | * will be recycled by transport release call. | 2172 | * will be recycled by transport release call. |
1920 | */ | 2173 | */ |
1921 | if (released) | 2174 | if (released) |
1922 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | 2175 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
1923 | |||
1924 | cmdwqe->context2 = NULL; | ||
1925 | cmdwqe->context3 = NULL; | ||
1926 | 2176 | ||
1927 | /* Since iaab/iaar are NOT set, there is no work left. | 2177 | /* Since iaab/iaar are NOT set, there is no work left. |
1928 | * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted | 2178 | * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted |
@@ -1953,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1953 | result = wcqe->parameter; | 2203 | result = wcqe->parameter; |
1954 | 2204 | ||
1955 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 2205 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1956 | atomic_inc(&tgtp->xmt_abort_cmpl); | 2206 | atomic_inc(&tgtp->xmt_ls_abort_cmpl); |
1957 | 2207 | ||
1958 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 2208 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1959 | "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", | 2209 | "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", |
@@ -1984,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, | |||
1984 | sid, xri, ctxp->wqeq->sli4_xritag); | 2234 | sid, xri, ctxp->wqeq->sli4_xritag); |
1985 | 2235 | ||
1986 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 2236 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1987 | if (!ctxp->wqeq) { | ||
1988 | ctxp->wqeq = ctxp->rqb_buffer->iocbq; | ||
1989 | ctxp->wqeq->hba_wqidx = 0; | ||
1990 | } | ||
1991 | 2237 | ||
1992 | ndlp = lpfc_findnode_did(phba->pport, sid); | 2238 | ndlp = lpfc_findnode_did(phba->pport, sid); |
1993 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || | 2239 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
@@ -2083,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2083 | 2329 | ||
2084 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 2330 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
2085 | if (!ctxp->wqeq) { | 2331 | if (!ctxp->wqeq) { |
2086 | ctxp->wqeq = ctxp->rqb_buffer->iocbq; | 2332 | ctxp->wqeq = ctxp->ctxbuf->iocbq; |
2087 | ctxp->wqeq->hba_wqidx = 0; | 2333 | ctxp->wqeq->hba_wqidx = 0; |
2088 | } | 2334 | } |
2089 | 2335 | ||
@@ -2104,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2104 | /* Issue ABTS for this WQE based on iotag */ | 2350 | /* Issue ABTS for this WQE based on iotag */ |
2105 | ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); | 2351 | ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); |
2106 | if (!ctxp->abort_wqeq) { | 2352 | if (!ctxp->abort_wqeq) { |
2353 | atomic_inc(&tgtp->xmt_abort_rsp_error); | ||
2107 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, | 2354 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, |
2108 | "6161 ABORT failed: No wqeqs: " | 2355 | "6161 ABORT failed: No wqeqs: " |
2109 | "xri: x%x\n", ctxp->oxid); | 2356 | "xri: x%x\n", ctxp->oxid); |
@@ -2128,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2128 | /* driver queued commands are in process of being flushed */ | 2375 | /* driver queued commands are in process of being flushed */ |
2129 | if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { | 2376 | if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { |
2130 | spin_unlock_irqrestore(&phba->hbalock, flags); | 2377 | spin_unlock_irqrestore(&phba->hbalock, flags); |
2378 | atomic_inc(&tgtp->xmt_abort_rsp_error); | ||
2131 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | 2379 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
2132 | "6163 Driver in reset cleanup - flushing " | 2380 | "6163 Driver in reset cleanup - flushing " |
2133 | "NVME Req now. hba_flag x%x oxid x%x\n", | 2381 | "NVME Req now. hba_flag x%x oxid x%x\n", |
@@ -2140,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2140 | /* Outstanding abort is in progress */ | 2388 | /* Outstanding abort is in progress */ |
2141 | if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { | 2389 | if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { |
2142 | spin_unlock_irqrestore(&phba->hbalock, flags); | 2390 | spin_unlock_irqrestore(&phba->hbalock, flags); |
2391 | atomic_inc(&tgtp->xmt_abort_rsp_error); | ||
2143 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | 2392 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
2144 | "6164 Outstanding NVME I/O Abort Request " | 2393 | "6164 Outstanding NVME I/O Abort Request " |
2145 | "still pending on oxid x%x\n", | 2394 | "still pending on oxid x%x\n", |
@@ -2190,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2190 | abts_wqeq->context2 = ctxp; | 2439 | abts_wqeq->context2 = ctxp; |
2191 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); | 2440 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); |
2192 | spin_unlock_irqrestore(&phba->hbalock, flags); | 2441 | spin_unlock_irqrestore(&phba->hbalock, flags); |
2193 | if (rc == WQE_SUCCESS) | 2442 | if (rc == WQE_SUCCESS) { |
2443 | atomic_inc(&tgtp->xmt_abort_sol); | ||
2194 | return 0; | 2444 | return 0; |
2445 | } | ||
2195 | 2446 | ||
2447 | atomic_inc(&tgtp->xmt_abort_rsp_error); | ||
2196 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | 2448 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
2197 | lpfc_sli_release_iocbq(phba, abts_wqeq); | 2449 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
2198 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | 2450 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
@@ -2215,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2215 | 2467 | ||
2216 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 2468 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
2217 | if (!ctxp->wqeq) { | 2469 | if (!ctxp->wqeq) { |
2218 | ctxp->wqeq = ctxp->rqb_buffer->iocbq; | 2470 | ctxp->wqeq = ctxp->ctxbuf->iocbq; |
2219 | ctxp->wqeq->hba_wqidx = 0; | 2471 | ctxp->wqeq->hba_wqidx = 0; |
2220 | } | 2472 | } |
2221 | 2473 | ||
@@ -2231,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2231 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); | 2483 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); |
2232 | spin_unlock_irqrestore(&phba->hbalock, flags); | 2484 | spin_unlock_irqrestore(&phba->hbalock, flags); |
2233 | if (rc == WQE_SUCCESS) { | 2485 | if (rc == WQE_SUCCESS) { |
2234 | atomic_inc(&tgtp->xmt_abort_rsp); | ||
2235 | return 0; | 2486 | return 0; |
2236 | } | 2487 | } |
2237 | 2488 | ||
2238 | aerr: | 2489 | aerr: |
2490 | atomic_inc(&tgtp->xmt_abort_rsp_error); | ||
2239 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | 2491 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
2240 | atomic_inc(&tgtp->xmt_abort_rsp_error); | 2492 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
2241 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, | 2493 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, |
@@ -2270,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, | |||
2270 | } | 2522 | } |
2271 | abts_wqeq = ctxp->wqeq; | 2523 | abts_wqeq = ctxp->wqeq; |
2272 | wqe_abts = &abts_wqeq->wqe; | 2524 | wqe_abts = &abts_wqeq->wqe; |
2525 | |||
2273 | lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); | 2526 | lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); |
2274 | 2527 | ||
2275 | spin_lock_irqsave(&phba->hbalock, flags); | 2528 | spin_lock_irqsave(&phba->hbalock, flags); |
@@ -2279,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, | |||
2279 | rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); | 2532 | rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); |
2280 | spin_unlock_irqrestore(&phba->hbalock, flags); | 2533 | spin_unlock_irqrestore(&phba->hbalock, flags); |
2281 | if (rc == WQE_SUCCESS) { | 2534 | if (rc == WQE_SUCCESS) { |
2282 | atomic_inc(&tgtp->xmt_abort_rsp); | 2535 | atomic_inc(&tgtp->xmt_abort_unsol); |
2283 | return 0; | 2536 | return 0; |
2284 | } | 2537 | } |
2285 | 2538 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 128759fe6650..6eb2f5d8d4ed 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
@@ -22,6 +22,7 @@ | |||
22 | ********************************************************************/ | 22 | ********************************************************************/ |
23 | 23 | ||
24 | #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ | 24 | #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ |
25 | #define LPFC_NVMET_RQE_DEF_COUNT 512 | ||
25 | #define LPFC_NVMET_SUCCESS_LEN 12 | 26 | #define LPFC_NVMET_SUCCESS_LEN 12 |
26 | 27 | ||
27 | /* Used for NVME Target */ | 28 | /* Used for NVME Target */ |
@@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport { | |||
34 | atomic_t rcv_ls_req_out; | 35 | atomic_t rcv_ls_req_out; |
35 | atomic_t rcv_ls_req_drop; | 36 | atomic_t rcv_ls_req_drop; |
36 | atomic_t xmt_ls_abort; | 37 | atomic_t xmt_ls_abort; |
38 | atomic_t xmt_ls_abort_cmpl; | ||
37 | 39 | ||
38 | /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ | 40 | /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ |
39 | atomic_t xmt_ls_rsp; | 41 | atomic_t xmt_ls_rsp; |
@@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport { | |||
47 | atomic_t rcv_fcp_cmd_in; | 49 | atomic_t rcv_fcp_cmd_in; |
48 | atomic_t rcv_fcp_cmd_out; | 50 | atomic_t rcv_fcp_cmd_out; |
49 | atomic_t rcv_fcp_cmd_drop; | 51 | atomic_t rcv_fcp_cmd_drop; |
52 | atomic_t xmt_fcp_release; | ||
50 | 53 | ||
51 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ | 54 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ |
52 | atomic_t xmt_fcp_abort; | ||
53 | atomic_t xmt_fcp_drop; | 55 | atomic_t xmt_fcp_drop; |
54 | atomic_t xmt_fcp_read_rsp; | 56 | atomic_t xmt_fcp_read_rsp; |
55 | atomic_t xmt_fcp_read; | 57 | atomic_t xmt_fcp_read; |
@@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport { | |||
62 | atomic_t xmt_fcp_rsp_drop; | 64 | atomic_t xmt_fcp_rsp_drop; |
63 | 65 | ||
64 | 66 | ||
65 | /* Stats counters - lpfc_nvmet_unsol_issue_abort */ | 67 | /* Stats counters - lpfc_nvmet_xmt_fcp_abort */ |
68 | atomic_t xmt_fcp_abort; | ||
69 | atomic_t xmt_fcp_abort_cmpl; | ||
70 | atomic_t xmt_abort_sol; | ||
71 | atomic_t xmt_abort_unsol; | ||
66 | atomic_t xmt_abort_rsp; | 72 | atomic_t xmt_abort_rsp; |
67 | atomic_t xmt_abort_rsp_error; | 73 | atomic_t xmt_abort_rsp_error; |
68 | |||
69 | /* Stats counters - lpfc_nvmet_xmt_abort_cmp */ | ||
70 | atomic_t xmt_abort_cmpl; | ||
71 | }; | 74 | }; |
72 | 75 | ||
73 | struct lpfc_nvmet_rcv_ctx { | 76 | struct lpfc_nvmet_rcv_ctx { |
@@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx { | |||
103 | #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ | 106 | #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ |
104 | #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ | 107 | #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ |
105 | struct rqb_dmabuf *rqb_buffer; | 108 | struct rqb_dmabuf *rqb_buffer; |
109 | struct lpfc_nvmet_ctxbuf *ctxbuf; | ||
106 | 110 | ||
107 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 111 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
108 | uint64_t ts_isr_cmd; | 112 | uint64_t ts_isr_cmd; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index cf19f4976f5f..d6b184839bc2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, | |||
74 | struct lpfc_iocbq *); | 74 | struct lpfc_iocbq *); |
75 | static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, | 75 | static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, |
76 | struct hbq_dmabuf *); | 76 | struct hbq_dmabuf *); |
77 | static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, | ||
78 | struct hbq_dmabuf *dmabuf); | ||
77 | static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, | 79 | static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, |
78 | struct lpfc_cqe *); | 80 | struct lpfc_cqe *); |
79 | static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, | 81 | static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, |
@@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | |||
479 | if (unlikely(!hq) || unlikely(!dq)) | 481 | if (unlikely(!hq) || unlikely(!dq)) |
480 | return -ENOMEM; | 482 | return -ENOMEM; |
481 | put_index = hq->host_index; | 483 | put_index = hq->host_index; |
482 | temp_hrqe = hq->qe[hq->host_index].rqe; | 484 | temp_hrqe = hq->qe[put_index].rqe; |
483 | temp_drqe = dq->qe[dq->host_index].rqe; | 485 | temp_drqe = dq->qe[dq->host_index].rqe; |
484 | 486 | ||
485 | if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) | 487 | if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) |
486 | return -EINVAL; | 488 | return -EINVAL; |
487 | if (hq->host_index != dq->host_index) | 489 | if (put_index != dq->host_index) |
488 | return -EINVAL; | 490 | return -EINVAL; |
489 | /* If the host has not yet processed the next entry then we are done */ | 491 | /* If the host has not yet processed the next entry then we are done */ |
490 | if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) | 492 | if (((put_index + 1) % hq->entry_count) == hq->hba_index) |
491 | return -EBUSY; | 493 | return -EBUSY; |
492 | lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); | 494 | lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); |
493 | lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); | 495 | lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); |
494 | 496 | ||
495 | /* Update the host index to point to the next slot */ | 497 | /* Update the host index to point to the next slot */ |
496 | hq->host_index = ((hq->host_index + 1) % hq->entry_count); | 498 | hq->host_index = ((put_index + 1) % hq->entry_count); |
497 | dq->host_index = ((dq->host_index + 1) % dq->entry_count); | 499 | dq->host_index = ((dq->host_index + 1) % dq->entry_count); |
500 | hq->RQ_buf_posted++; | ||
498 | 501 | ||
499 | /* Ring The Header Receive Queue Doorbell */ | 502 | /* Ring The Header Receive Queue Doorbell */ |
500 | if (!(hq->host_index % hq->entry_repost)) { | 503 | if (!(hq->host_index % hq->entry_repost)) { |
@@ -4204,13 +4207,16 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
4204 | /* Reset HBA */ | 4207 | /* Reset HBA */ |
4205 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 4208 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
4206 | "0325 Reset HBA Data: x%x x%x\n", | 4209 | "0325 Reset HBA Data: x%x x%x\n", |
4207 | phba->pport->port_state, psli->sli_flag); | 4210 | (phba->pport) ? phba->pport->port_state : 0, |
4211 | psli->sli_flag); | ||
4208 | 4212 | ||
4209 | /* perform board reset */ | 4213 | /* perform board reset */ |
4210 | phba->fc_eventTag = 0; | 4214 | phba->fc_eventTag = 0; |
4211 | phba->link_events = 0; | 4215 | phba->link_events = 0; |
4212 | phba->pport->fc_myDID = 0; | 4216 | if (phba->pport) { |
4213 | phba->pport->fc_prevDID = 0; | 4217 | phba->pport->fc_myDID = 0; |
4218 | phba->pport->fc_prevDID = 0; | ||
4219 | } | ||
4214 | 4220 | ||
4215 | /* Turn off parity checking and serr during the physical reset */ | 4221 | /* Turn off parity checking and serr during the physical reset */ |
4216 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); | 4222 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); |
@@ -4336,7 +4342,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) | |||
4336 | /* Restart HBA */ | 4342 | /* Restart HBA */ |
4337 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 4343 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
4338 | "0337 Restart HBA Data: x%x x%x\n", | 4344 | "0337 Restart HBA Data: x%x x%x\n", |
4339 | phba->pport->port_state, psli->sli_flag); | 4345 | (phba->pport) ? phba->pport->port_state : 0, |
4346 | psli->sli_flag); | ||
4340 | 4347 | ||
4341 | word0 = 0; | 4348 | word0 = 0; |
4342 | mb = (MAILBOX_t *) &word0; | 4349 | mb = (MAILBOX_t *) &word0; |
@@ -4350,7 +4357,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) | |||
4350 | readl(to_slim); /* flush */ | 4357 | readl(to_slim); /* flush */ |
4351 | 4358 | ||
4352 | /* Only skip post after fc_ffinit is completed */ | 4359 | /* Only skip post after fc_ffinit is completed */ |
4353 | if (phba->pport->port_state) | 4360 | if (phba->pport && phba->pport->port_state) |
4354 | word0 = 1; /* This is really setting up word1 */ | 4361 | word0 = 1; /* This is really setting up word1 */ |
4355 | else | 4362 | else |
4356 | word0 = 0; /* This is really setting up word1 */ | 4363 | word0 = 0; /* This is really setting up word1 */ |
@@ -4359,7 +4366,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) | |||
4359 | readl(to_slim); /* flush */ | 4366 | readl(to_slim); /* flush */ |
4360 | 4367 | ||
4361 | lpfc_sli_brdreset(phba); | 4368 | lpfc_sli_brdreset(phba); |
4362 | phba->pport->stopped = 0; | 4369 | if (phba->pport) |
4370 | phba->pport->stopped = 0; | ||
4363 | phba->link_state = LPFC_INIT_START; | 4371 | phba->link_state = LPFC_INIT_START; |
4364 | phba->hba_flag = 0; | 4372 | phba->hba_flag = 0; |
4365 | spin_unlock_irq(&phba->hbalock); | 4373 | spin_unlock_irq(&phba->hbalock); |
@@ -4446,7 +4454,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
4446 | * iteration, the function will restart the HBA again. The function returns | 4454 | * iteration, the function will restart the HBA again. The function returns |
4447 | * zero if HBA successfully restarted else returns negative error code. | 4455 | * zero if HBA successfully restarted else returns negative error code. |
4448 | **/ | 4456 | **/ |
4449 | static int | 4457 | int |
4450 | lpfc_sli_chipset_init(struct lpfc_hba *phba) | 4458 | lpfc_sli_chipset_init(struct lpfc_hba *phba) |
4451 | { | 4459 | { |
4452 | uint32_t status, i = 0; | 4460 | uint32_t status, i = 0; |
@@ -5901,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, | |||
5901 | bf_set(lpfc_mbx_set_feature_mds, | 5909 | bf_set(lpfc_mbx_set_feature_mds, |
5902 | &mbox->u.mqe.un.set_feature, 1); | 5910 | &mbox->u.mqe.un.set_feature, 1); |
5903 | bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, | 5911 | bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, |
5904 | &mbox->u.mqe.un.set_feature, 0); | 5912 | &mbox->u.mqe.un.set_feature, 1); |
5905 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; | 5913 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; |
5906 | mbox->u.mqe.un.set_feature.param_len = 8; | 5914 | mbox->u.mqe.un.set_feature.param_len = 8; |
5907 | break; | 5915 | break; |
@@ -6507,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) | |||
6507 | (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); | 6515 | (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); |
6508 | } | 6516 | } |
6509 | 6517 | ||
6518 | int | ||
6519 | lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, | ||
6520 | struct lpfc_queue *drq, int count, int idx) | ||
6521 | { | ||
6522 | int rc, i; | ||
6523 | struct lpfc_rqe hrqe; | ||
6524 | struct lpfc_rqe drqe; | ||
6525 | struct lpfc_rqb *rqbp; | ||
6526 | struct rqb_dmabuf *rqb_buffer; | ||
6527 | LIST_HEAD(rqb_buf_list); | ||
6528 | |||
6529 | rqbp = hrq->rqbp; | ||
6530 | for (i = 0; i < count; i++) { | ||
6531 | /* IF RQ is already full, don't bother */ | ||
6532 | if (rqbp->buffer_count + i >= rqbp->entry_count - 1) | ||
6533 | break; | ||
6534 | rqb_buffer = rqbp->rqb_alloc_buffer(phba); | ||
6535 | if (!rqb_buffer) | ||
6536 | break; | ||
6537 | rqb_buffer->hrq = hrq; | ||
6538 | rqb_buffer->drq = drq; | ||
6539 | rqb_buffer->idx = idx; | ||
6540 | list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); | ||
6541 | } | ||
6542 | while (!list_empty(&rqb_buf_list)) { | ||
6543 | list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, | ||
6544 | hbuf.list); | ||
6545 | |||
6546 | hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); | ||
6547 | hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); | ||
6548 | drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); | ||
6549 | drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); | ||
6550 | rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); | ||
6551 | if (rc < 0) { | ||
6552 | rqbp->rqb_free_buffer(phba, rqb_buffer); | ||
6553 | } else { | ||
6554 | list_add_tail(&rqb_buffer->hbuf.list, | ||
6555 | &rqbp->rqb_buffer_list); | ||
6556 | rqbp->buffer_count++; | ||
6557 | } | ||
6558 | } | ||
6559 | return 1; | ||
6560 | } | ||
6561 | |||
6510 | /** | 6562 | /** |
6511 | * lpfc_sli4_hba_setup - SLI4 device initialization PCI function | 6563 | * lpfc_sli4_hba_setup - SLI4 device initialization PCI function |
6512 | * @phba: Pointer to HBA context object. | 6564 | * @phba: Pointer to HBA context object. |
@@ -6519,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) | |||
6519 | int | 6571 | int |
6520 | lpfc_sli4_hba_setup(struct lpfc_hba *phba) | 6572 | lpfc_sli4_hba_setup(struct lpfc_hba *phba) |
6521 | { | 6573 | { |
6522 | int rc, i; | 6574 | int rc, i, cnt; |
6523 | LPFC_MBOXQ_t *mboxq; | 6575 | LPFC_MBOXQ_t *mboxq; |
6524 | struct lpfc_mqe *mqe; | 6576 | struct lpfc_mqe *mqe; |
6525 | uint8_t *vpd; | 6577 | uint8_t *vpd; |
@@ -6870,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
6870 | goto out_destroy_queue; | 6922 | goto out_destroy_queue; |
6871 | } | 6923 | } |
6872 | phba->sli4_hba.nvmet_xri_cnt = rc; | 6924 | phba->sli4_hba.nvmet_xri_cnt = rc; |
6925 | |||
6926 | cnt = phba->cfg_iocb_cnt * 1024; | ||
6927 | /* We need 1 iocbq for every SGL, for IO processing */ | ||
6928 | cnt += phba->sli4_hba.nvmet_xri_cnt; | ||
6929 | /* Initialize and populate the iocb list per host */ | ||
6930 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
6931 | "2821 initialize iocb list %d total %d\n", | ||
6932 | phba->cfg_iocb_cnt, cnt); | ||
6933 | rc = lpfc_init_iocb_list(phba, cnt); | ||
6934 | if (rc) { | ||
6935 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
6936 | "1413 Failed to init iocb list.\n"); | ||
6937 | goto out_destroy_queue; | ||
6938 | } | ||
6939 | |||
6873 | lpfc_nvmet_create_targetport(phba); | 6940 | lpfc_nvmet_create_targetport(phba); |
6874 | } else { | 6941 | } else { |
6875 | /* update host scsi xri-sgl sizes and mappings */ | 6942 | /* update host scsi xri-sgl sizes and mappings */ |
@@ -6889,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
6889 | "and mapping: %d\n", rc); | 6956 | "and mapping: %d\n", rc); |
6890 | goto out_destroy_queue; | 6957 | goto out_destroy_queue; |
6891 | } | 6958 | } |
6959 | |||
6960 | cnt = phba->cfg_iocb_cnt * 1024; | ||
6961 | /* Initialize and populate the iocb list per host */ | ||
6962 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
6963 | "2820 initialize iocb list %d total %d\n", | ||
6964 | phba->cfg_iocb_cnt, cnt); | ||
6965 | rc = lpfc_init_iocb_list(phba, cnt); | ||
6966 | if (rc) { | ||
6967 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
6968 | "6301 Failed to init iocb list.\n"); | ||
6969 | goto out_destroy_queue; | ||
6970 | } | ||
6892 | } | 6971 | } |
6893 | 6972 | ||
6894 | if (phba->nvmet_support && phba->cfg_nvmet_mrq) { | 6973 | if (phba->nvmet_support && phba->cfg_nvmet_mrq) { |
6895 | |||
6896 | /* Post initial buffers to all RQs created */ | 6974 | /* Post initial buffers to all RQs created */ |
6897 | for (i = 0; i < phba->cfg_nvmet_mrq; i++) { | 6975 | for (i = 0; i < phba->cfg_nvmet_mrq; i++) { |
6898 | rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; | 6976 | rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; |
6899 | INIT_LIST_HEAD(&rqbp->rqb_buffer_list); | 6977 | INIT_LIST_HEAD(&rqbp->rqb_buffer_list); |
6900 | rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; | 6978 | rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; |
6901 | rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; | 6979 | rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; |
6902 | rqbp->entry_count = 256; | 6980 | rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; |
6903 | rqbp->buffer_count = 0; | 6981 | rqbp->buffer_count = 0; |
6904 | 6982 | ||
6905 | /* Divide by 4 and round down to multiple of 16 */ | ||
6906 | rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8; | ||
6907 | phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc; | ||
6908 | phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc; | ||
6909 | |||
6910 | lpfc_post_rq_buffer( | 6983 | lpfc_post_rq_buffer( |
6911 | phba, phba->sli4_hba.nvmet_mrq_hdr[i], | 6984 | phba, phba->sli4_hba.nvmet_mrq_hdr[i], |
6912 | phba->sli4_hba.nvmet_mrq_data[i], | 6985 | phba->sli4_hba.nvmet_mrq_data[i], |
6913 | phba->cfg_nvmet_mrq_post); | 6986 | LPFC_NVMET_RQE_DEF_COUNT, i); |
6914 | } | 6987 | } |
6915 | } | 6988 | } |
6916 | 6989 | ||
@@ -7077,6 +7150,7 @@ out_unset_queue: | |||
7077 | /* Unset all the queues set up in this routine when error out */ | 7150 | /* Unset all the queues set up in this routine when error out */ |
7078 | lpfc_sli4_queue_unset(phba); | 7151 | lpfc_sli4_queue_unset(phba); |
7079 | out_destroy_queue: | 7152 | out_destroy_queue: |
7153 | lpfc_free_iocb_list(phba); | ||
7080 | lpfc_sli4_queue_destroy(phba); | 7154 | lpfc_sli4_queue_destroy(phba); |
7081 | out_stop_timers: | 7155 | out_stop_timers: |
7082 | lpfc_stop_hba_timers(phba); | 7156 | lpfc_stop_hba_timers(phba); |
@@ -8616,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
8616 | memset(wqe, 0, sizeof(union lpfc_wqe128)); | 8690 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
8617 | /* Some of the fields are in the right position already */ | 8691 | /* Some of the fields are in the right position already */ |
8618 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); | 8692 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); |
8619 | wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ | 8693 | if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { |
8620 | wqe->generic.wqe_com.word10 = 0; | 8694 | /* The ct field has moved so reset */ |
8695 | wqe->generic.wqe_com.word7 = 0; | ||
8696 | wqe->generic.wqe_com.word10 = 0; | ||
8697 | } | ||
8621 | 8698 | ||
8622 | abort_tag = (uint32_t) iocbq->iotag; | 8699 | abort_tag = (uint32_t) iocbq->iotag; |
8623 | xritag = iocbq->sli4_xritag; | 8700 | xritag = iocbq->sli4_xritag; |
@@ -9111,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
9111 | } | 9188 | } |
9112 | 9189 | ||
9113 | break; | 9190 | break; |
9191 | case CMD_SEND_FRAME: | ||
9192 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); | ||
9193 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); | ||
9194 | return 0; | ||
9114 | case CMD_XRI_ABORTED_CX: | 9195 | case CMD_XRI_ABORTED_CX: |
9115 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | 9196 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ |
9116 | case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ | 9197 | case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ |
@@ -12783,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
12783 | struct fc_frame_header *fc_hdr; | 12864 | struct fc_frame_header *fc_hdr; |
12784 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; | 12865 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; |
12785 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; | 12866 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; |
12867 | struct lpfc_nvmet_tgtport *tgtp; | ||
12786 | struct hbq_dmabuf *dma_buf; | 12868 | struct hbq_dmabuf *dma_buf; |
12787 | uint32_t status, rq_id; | 12869 | uint32_t status, rq_id; |
12788 | unsigned long iflags; | 12870 | unsigned long iflags; |
@@ -12803,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
12803 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 12885 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
12804 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 12886 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
12805 | "2537 Receive Frame Truncated!!\n"); | 12887 | "2537 Receive Frame Truncated!!\n"); |
12806 | hrq->RQ_buf_trunc++; | ||
12807 | case FC_STATUS_RQ_SUCCESS: | 12888 | case FC_STATUS_RQ_SUCCESS: |
12808 | lpfc_sli4_rq_release(hrq, drq); | 12889 | lpfc_sli4_rq_release(hrq, drq); |
12809 | spin_lock_irqsave(&phba->hbalock, iflags); | 12890 | spin_lock_irqsave(&phba->hbalock, iflags); |
@@ -12814,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
12814 | goto out; | 12895 | goto out; |
12815 | } | 12896 | } |
12816 | hrq->RQ_rcv_buf++; | 12897 | hrq->RQ_rcv_buf++; |
12898 | hrq->RQ_buf_posted--; | ||
12817 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); | 12899 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); |
12818 | 12900 | ||
12819 | /* If a NVME LS event (type 0x28), treat it as Fast path */ | 12901 | /* If a NVME LS event (type 0x28), treat it as Fast path */ |
@@ -12827,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
12827 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 12909 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
12828 | workposted = true; | 12910 | workposted = true; |
12829 | break; | 12911 | break; |
12830 | case FC_STATUS_INSUFF_BUF_NEED_BUF: | ||
12831 | case FC_STATUS_INSUFF_BUF_FRM_DISC: | 12912 | case FC_STATUS_INSUFF_BUF_FRM_DISC: |
12913 | if (phba->nvmet_support) { | ||
12914 | tgtp = phba->targetport->private; | ||
12915 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, | ||
12916 | "6402 RQE Error x%x, posted %d err_cnt " | ||
12917 | "%d: %x %x %x\n", | ||
12918 | status, hrq->RQ_buf_posted, | ||
12919 | hrq->RQ_no_posted_buf, | ||
12920 | atomic_read(&tgtp->rcv_fcp_cmd_in), | ||
12921 | atomic_read(&tgtp->rcv_fcp_cmd_out), | ||
12922 | atomic_read(&tgtp->xmt_fcp_release)); | ||
12923 | } | ||
12924 | /* fallthrough */ | ||
12925 | |||
12926 | case FC_STATUS_INSUFF_BUF_NEED_BUF: | ||
12832 | hrq->RQ_no_posted_buf++; | 12927 | hrq->RQ_no_posted_buf++; |
12833 | /* Post more buffers if possible */ | 12928 | /* Post more buffers if possible */ |
12834 | spin_lock_irqsave(&phba->hbalock, iflags); | 12929 | spin_lock_irqsave(&phba->hbalock, iflags); |
@@ -12946,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
12946 | while ((cqe = lpfc_sli4_cq_get(cq))) { | 13041 | while ((cqe = lpfc_sli4_cq_get(cq))) { |
12947 | workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); | 13042 | workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); |
12948 | if (!(++ecount % cq->entry_repost)) | 13043 | if (!(++ecount % cq->entry_repost)) |
12949 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | 13044 | break; |
12950 | cq->CQ_mbox++; | 13045 | cq->CQ_mbox++; |
12951 | } | 13046 | } |
12952 | break; | 13047 | break; |
@@ -12960,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
12960 | workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, | 13055 | workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, |
12961 | cqe); | 13056 | cqe); |
12962 | if (!(++ecount % cq->entry_repost)) | 13057 | if (!(++ecount % cq->entry_repost)) |
12963 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | 13058 | break; |
12964 | } | 13059 | } |
12965 | 13060 | ||
12966 | /* Track the max number of CQEs processed in 1 EQ */ | 13061 | /* Track the max number of CQEs processed in 1 EQ */ |
@@ -13130,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
13130 | struct lpfc_queue *drq; | 13225 | struct lpfc_queue *drq; |
13131 | struct rqb_dmabuf *dma_buf; | 13226 | struct rqb_dmabuf *dma_buf; |
13132 | struct fc_frame_header *fc_hdr; | 13227 | struct fc_frame_header *fc_hdr; |
13228 | struct lpfc_nvmet_tgtport *tgtp; | ||
13133 | uint32_t status, rq_id; | 13229 | uint32_t status, rq_id; |
13134 | unsigned long iflags; | 13230 | unsigned long iflags; |
13135 | uint32_t fctl, idx; | 13231 | uint32_t fctl, idx; |
@@ -13160,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
13160 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13256 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
13161 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13257 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
13162 | "6126 Receive Frame Truncated!!\n"); | 13258 | "6126 Receive Frame Truncated!!\n"); |
13163 | hrq->RQ_buf_trunc++; | ||
13164 | break; | ||
13165 | case FC_STATUS_RQ_SUCCESS: | 13259 | case FC_STATUS_RQ_SUCCESS: |
13166 | lpfc_sli4_rq_release(hrq, drq); | 13260 | lpfc_sli4_rq_release(hrq, drq); |
13167 | spin_lock_irqsave(&phba->hbalock, iflags); | 13261 | spin_lock_irqsave(&phba->hbalock, iflags); |
@@ -13173,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
13173 | } | 13267 | } |
13174 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 13268 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
13175 | hrq->RQ_rcv_buf++; | 13269 | hrq->RQ_rcv_buf++; |
13270 | hrq->RQ_buf_posted--; | ||
13176 | fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; | 13271 | fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; |
13177 | 13272 | ||
13178 | /* Just some basic sanity checks on FCP Command frame */ | 13273 | /* Just some basic sanity checks on FCP Command frame */ |
@@ -13195,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
13195 | drop: | 13290 | drop: |
13196 | lpfc_in_buf_free(phba, &dma_buf->dbuf); | 13291 | lpfc_in_buf_free(phba, &dma_buf->dbuf); |
13197 | break; | 13292 | break; |
13198 | case FC_STATUS_INSUFF_BUF_NEED_BUF: | ||
13199 | case FC_STATUS_INSUFF_BUF_FRM_DISC: | 13293 | case FC_STATUS_INSUFF_BUF_FRM_DISC: |
13294 | if (phba->nvmet_support) { | ||
13295 | tgtp = phba->targetport->private; | ||
13296 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, | ||
13297 | "6401 RQE Error x%x, posted %d err_cnt " | ||
13298 | "%d: %x %x %x\n", | ||
13299 | status, hrq->RQ_buf_posted, | ||
13300 | hrq->RQ_no_posted_buf, | ||
13301 | atomic_read(&tgtp->rcv_fcp_cmd_in), | ||
13302 | atomic_read(&tgtp->rcv_fcp_cmd_out), | ||
13303 | atomic_read(&tgtp->xmt_fcp_release)); | ||
13304 | } | ||
13305 | /* fallthrough */ | ||
13306 | |||
13307 | case FC_STATUS_INSUFF_BUF_NEED_BUF: | ||
13200 | hrq->RQ_no_posted_buf++; | 13308 | hrq->RQ_no_posted_buf++; |
13201 | /* Post more buffers if possible */ | 13309 | /* Post more buffers if possible */ |
13202 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
13203 | phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; | ||
13204 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
13205 | workposted = true; | ||
13206 | break; | 13310 | break; |
13207 | } | 13311 | } |
13208 | out: | 13312 | out: |
@@ -13356,7 +13460,7 @@ process_cq: | |||
13356 | while ((cqe = lpfc_sli4_cq_get(cq))) { | 13460 | while ((cqe = lpfc_sli4_cq_get(cq))) { |
13357 | workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); | 13461 | workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); |
13358 | if (!(++ecount % cq->entry_repost)) | 13462 | if (!(++ecount % cq->entry_repost)) |
13359 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | 13463 | break; |
13360 | } | 13464 | } |
13361 | 13465 | ||
13362 | /* Track the max number of CQEs processed in 1 EQ */ | 13466 | /* Track the max number of CQEs processed in 1 EQ */ |
@@ -13447,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) | |||
13447 | while ((cqe = lpfc_sli4_cq_get(cq))) { | 13551 | while ((cqe = lpfc_sli4_cq_get(cq))) { |
13448 | workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); | 13552 | workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); |
13449 | if (!(++ecount % cq->entry_repost)) | 13553 | if (!(++ecount % cq->entry_repost)) |
13450 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | 13554 | break; |
13451 | } | 13555 | } |
13452 | 13556 | ||
13453 | /* Track the max number of CQEs processed in 1 EQ */ | 13557 | /* Track the max number of CQEs processed in 1 EQ */ |
@@ -13529,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id) | |||
13529 | while ((eqe = lpfc_sli4_eq_get(eq))) { | 13633 | while ((eqe = lpfc_sli4_eq_get(eq))) { |
13530 | lpfc_sli4_fof_handle_eqe(phba, eqe); | 13634 | lpfc_sli4_fof_handle_eqe(phba, eqe); |
13531 | if (!(++ecount % eq->entry_repost)) | 13635 | if (!(++ecount % eq->entry_repost)) |
13532 | lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); | 13636 | break; |
13533 | eq->EQ_processed++; | 13637 | eq->EQ_processed++; |
13534 | } | 13638 | } |
13535 | 13639 | ||
@@ -13646,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) | |||
13646 | 13750 | ||
13647 | lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); | 13751 | lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); |
13648 | if (!(++ecount % fpeq->entry_repost)) | 13752 | if (!(++ecount % fpeq->entry_repost)) |
13649 | lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); | 13753 | break; |
13650 | fpeq->EQ_processed++; | 13754 | fpeq->EQ_processed++; |
13651 | } | 13755 | } |
13652 | 13756 | ||
@@ -13827,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, | |||
13827 | } | 13931 | } |
13828 | queue->entry_size = entry_size; | 13932 | queue->entry_size = entry_size; |
13829 | queue->entry_count = entry_count; | 13933 | queue->entry_count = entry_count; |
13830 | |||
13831 | /* | ||
13832 | * entry_repost is calculated based on the number of entries in the | ||
13833 | * queue. This works out except for RQs. If buffers are NOT initially | ||
13834 | * posted for every RQE, entry_repost should be adjusted accordingly. | ||
13835 | */ | ||
13836 | queue->entry_repost = (entry_count >> 3); | ||
13837 | if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) | ||
13838 | queue->entry_repost = LPFC_QUEUE_MIN_REPOST; | ||
13839 | queue->phba = phba; | 13934 | queue->phba = phba; |
13840 | 13935 | ||
13936 | /* entry_repost will be set during q creation */ | ||
13937 | |||
13841 | return queue; | 13938 | return queue; |
13842 | out_fail: | 13939 | out_fail: |
13843 | lpfc_sli4_queue_free(queue); | 13940 | lpfc_sli4_queue_free(queue); |
@@ -14068,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) | |||
14068 | status = -ENXIO; | 14165 | status = -ENXIO; |
14069 | eq->host_index = 0; | 14166 | eq->host_index = 0; |
14070 | eq->hba_index = 0; | 14167 | eq->hba_index = 0; |
14168 | eq->entry_repost = LPFC_EQ_REPOST; | ||
14071 | 14169 | ||
14072 | mempool_free(mbox, phba->mbox_mem_pool); | 14170 | mempool_free(mbox, phba->mbox_mem_pool); |
14073 | return status; | 14171 | return status; |
@@ -14141,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
14141 | default: | 14239 | default: |
14142 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 14240 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
14143 | "0361 Unsupported CQ count: " | 14241 | "0361 Unsupported CQ count: " |
14144 | "entry cnt %d sz %d pg cnt %d repost %d\n", | 14242 | "entry cnt %d sz %d pg cnt %d\n", |
14145 | cq->entry_count, cq->entry_size, | 14243 | cq->entry_count, cq->entry_size, |
14146 | cq->page_count, cq->entry_repost); | 14244 | cq->page_count); |
14147 | if (cq->entry_count < 256) { | 14245 | if (cq->entry_count < 256) { |
14148 | status = -EINVAL; | 14246 | status = -EINVAL; |
14149 | goto out; | 14247 | goto out; |
@@ -14196,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
14196 | cq->assoc_qid = eq->queue_id; | 14294 | cq->assoc_qid = eq->queue_id; |
14197 | cq->host_index = 0; | 14295 | cq->host_index = 0; |
14198 | cq->hba_index = 0; | 14296 | cq->hba_index = 0; |
14297 | cq->entry_repost = LPFC_CQ_REPOST; | ||
14199 | 14298 | ||
14200 | out: | 14299 | out: |
14201 | mempool_free(mbox, phba->mbox_mem_pool); | 14300 | mempool_free(mbox, phba->mbox_mem_pool); |
@@ -14387,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
14387 | cq->assoc_qid = eq->queue_id; | 14486 | cq->assoc_qid = eq->queue_id; |
14388 | cq->host_index = 0; | 14487 | cq->host_index = 0; |
14389 | cq->hba_index = 0; | 14488 | cq->hba_index = 0; |
14489 | cq->entry_repost = LPFC_CQ_REPOST; | ||
14390 | 14490 | ||
14391 | rc = 0; | 14491 | rc = 0; |
14392 | list_for_each_entry(dmabuf, &cq->page_list, list) { | 14492 | list_for_each_entry(dmabuf, &cq->page_list, list) { |
@@ -14635,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
14635 | mq->subtype = subtype; | 14735 | mq->subtype = subtype; |
14636 | mq->host_index = 0; | 14736 | mq->host_index = 0; |
14637 | mq->hba_index = 0; | 14737 | mq->hba_index = 0; |
14738 | mq->entry_repost = LPFC_MQ_REPOST; | ||
14638 | 14739 | ||
14639 | /* link the mq onto the parent cq child list */ | 14740 | /* link the mq onto the parent cq child list */ |
14640 | list_add_tail(&mq->list, &cq->child_list); | 14741 | list_add_tail(&mq->list, &cq->child_list); |
@@ -14860,34 +14961,6 @@ out: | |||
14860 | } | 14961 | } |
14861 | 14962 | ||
14862 | /** | 14963 | /** |
14863 | * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ | ||
14864 | * @phba: HBA structure that indicates port to create a queue on. | ||
14865 | * @rq: The queue structure to use for the receive queue. | ||
14866 | * @qno: The associated HBQ number | ||
14867 | * | ||
14868 | * | ||
14869 | * For SLI4 we need to adjust the RQ repost value based on | ||
14870 | * the number of buffers that are initially posted to the RQ. | ||
14871 | */ | ||
14872 | void | ||
14873 | lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) | ||
14874 | { | ||
14875 | uint32_t cnt; | ||
14876 | |||
14877 | /* sanity check on queue memory */ | ||
14878 | if (!rq) | ||
14879 | return; | ||
14880 | cnt = lpfc_hbq_defs[qno]->entry_count; | ||
14881 | |||
14882 | /* Recalc repost for RQs based on buffers initially posted */ | ||
14883 | cnt = (cnt >> 3); | ||
14884 | if (cnt < LPFC_QUEUE_MIN_REPOST) | ||
14885 | cnt = LPFC_QUEUE_MIN_REPOST; | ||
14886 | |||
14887 | rq->entry_repost = cnt; | ||
14888 | } | ||
14889 | |||
14890 | /** | ||
14891 | * lpfc_rq_create - Create a Receive Queue on the HBA | 14964 | * lpfc_rq_create - Create a Receive Queue on the HBA |
14892 | * @phba: HBA structure that indicates port to create a queue on. | 14965 | * @phba: HBA structure that indicates port to create a queue on. |
14893 | * @hrq: The queue structure to use to create the header receive queue. | 14966 | * @hrq: The queue structure to use to create the header receive queue. |
@@ -15072,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
15072 | hrq->subtype = subtype; | 15145 | hrq->subtype = subtype; |
15073 | hrq->host_index = 0; | 15146 | hrq->host_index = 0; |
15074 | hrq->hba_index = 0; | 15147 | hrq->hba_index = 0; |
15148 | hrq->entry_repost = LPFC_RQ_REPOST; | ||
15075 | 15149 | ||
15076 | /* now create the data queue */ | 15150 | /* now create the data queue */ |
15077 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | 15151 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
@@ -15082,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
15082 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { | 15156 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
15083 | bf_set(lpfc_rq_context_rqe_count_1, | 15157 | bf_set(lpfc_rq_context_rqe_count_1, |
15084 | &rq_create->u.request.context, hrq->entry_count); | 15158 | &rq_create->u.request.context, hrq->entry_count); |
15085 | rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; | 15159 | if (subtype == LPFC_NVMET) |
15160 | rq_create->u.request.context.buffer_size = | ||
15161 | LPFC_NVMET_DATA_BUF_SIZE; | ||
15162 | else | ||
15163 | rq_create->u.request.context.buffer_size = | ||
15164 | LPFC_DATA_BUF_SIZE; | ||
15086 | bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, | 15165 | bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, |
15087 | LPFC_RQE_SIZE_8); | 15166 | LPFC_RQE_SIZE_8); |
15088 | bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, | 15167 | bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, |
@@ -15119,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
15119 | LPFC_RQ_RING_SIZE_4096); | 15198 | LPFC_RQ_RING_SIZE_4096); |
15120 | break; | 15199 | break; |
15121 | } | 15200 | } |
15122 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | 15201 | if (subtype == LPFC_NVMET) |
15123 | LPFC_DATA_BUF_SIZE); | 15202 | bf_set(lpfc_rq_context_buf_size, |
15203 | &rq_create->u.request.context, | ||
15204 | LPFC_NVMET_DATA_BUF_SIZE); | ||
15205 | else | ||
15206 | bf_set(lpfc_rq_context_buf_size, | ||
15207 | &rq_create->u.request.context, | ||
15208 | LPFC_DATA_BUF_SIZE); | ||
15124 | } | 15209 | } |
15125 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 15210 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
15126 | cq->queue_id); | 15211 | cq->queue_id); |
@@ -15153,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
15153 | drq->subtype = subtype; | 15238 | drq->subtype = subtype; |
15154 | drq->host_index = 0; | 15239 | drq->host_index = 0; |
15155 | drq->hba_index = 0; | 15240 | drq->hba_index = 0; |
15241 | drq->entry_repost = LPFC_RQ_REPOST; | ||
15156 | 15242 | ||
15157 | /* link the header and data RQs onto the parent cq child list */ | 15243 | /* link the header and data RQs onto the parent cq child list */ |
15158 | list_add_tail(&hrq->list, &cq->child_list); | 15244 | list_add_tail(&hrq->list, &cq->child_list); |
@@ -15265,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, | |||
15265 | cq->queue_id); | 15351 | cq->queue_id); |
15266 | bf_set(lpfc_rq_context_data_size, | 15352 | bf_set(lpfc_rq_context_data_size, |
15267 | &rq_create->u.request.context, | 15353 | &rq_create->u.request.context, |
15268 | LPFC_DATA_BUF_SIZE); | 15354 | LPFC_NVMET_DATA_BUF_SIZE); |
15269 | bf_set(lpfc_rq_context_hdr_size, | 15355 | bf_set(lpfc_rq_context_hdr_size, |
15270 | &rq_create->u.request.context, | 15356 | &rq_create->u.request.context, |
15271 | LPFC_HDR_BUF_SIZE); | 15357 | LPFC_HDR_BUF_SIZE); |
@@ -15310,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, | |||
15310 | hrq->subtype = subtype; | 15396 | hrq->subtype = subtype; |
15311 | hrq->host_index = 0; | 15397 | hrq->host_index = 0; |
15312 | hrq->hba_index = 0; | 15398 | hrq->hba_index = 0; |
15399 | hrq->entry_repost = LPFC_RQ_REPOST; | ||
15313 | 15400 | ||
15314 | drq->db_format = LPFC_DB_RING_FORMAT; | 15401 | drq->db_format = LPFC_DB_RING_FORMAT; |
15315 | drq->db_regaddr = phba->sli4_hba.RQDBregaddr; | 15402 | drq->db_regaddr = phba->sli4_hba.RQDBregaddr; |
@@ -15318,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, | |||
15318 | drq->subtype = subtype; | 15405 | drq->subtype = subtype; |
15319 | drq->host_index = 0; | 15406 | drq->host_index = 0; |
15320 | drq->hba_index = 0; | 15407 | drq->hba_index = 0; |
15408 | drq->entry_repost = LPFC_RQ_REPOST; | ||
15321 | 15409 | ||
15322 | list_add_tail(&hrq->list, &cq->child_list); | 15410 | list_add_tail(&hrq->list, &cq->child_list); |
15323 | list_add_tail(&drq->list, &cq->child_list); | 15411 | list_add_tail(&drq->list, &cq->child_list); |
@@ -16058,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
16058 | struct fc_vft_header *fc_vft_hdr; | 16146 | struct fc_vft_header *fc_vft_hdr; |
16059 | uint32_t *header = (uint32_t *) fc_hdr; | 16147 | uint32_t *header = (uint32_t *) fc_hdr; |
16060 | 16148 | ||
16149 | #define FC_RCTL_MDS_DIAGS 0xF4 | ||
16150 | |||
16061 | switch (fc_hdr->fh_r_ctl) { | 16151 | switch (fc_hdr->fh_r_ctl) { |
16062 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ | 16152 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ |
16063 | case FC_RCTL_DD_SOL_DATA: /* solicited data */ | 16153 | case FC_RCTL_DD_SOL_DATA: /* solicited data */ |
@@ -16085,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
16085 | case FC_RCTL_F_BSY: /* fabric busy to data frame */ | 16175 | case FC_RCTL_F_BSY: /* fabric busy to data frame */ |
16086 | case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ | 16176 | case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ |
16087 | case FC_RCTL_LCR: /* link credit reset */ | 16177 | case FC_RCTL_LCR: /* link credit reset */ |
16178 | case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ | ||
16088 | case FC_RCTL_END: /* end */ | 16179 | case FC_RCTL_END: /* end */ |
16089 | break; | 16180 | break; |
16090 | case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ | 16181 | case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ |
@@ -16094,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
16094 | default: | 16185 | default: |
16095 | goto drop; | 16186 | goto drop; |
16096 | } | 16187 | } |
16188 | |||
16189 | #define FC_TYPE_VENDOR_UNIQUE 0xFF | ||
16190 | |||
16097 | switch (fc_hdr->fh_type) { | 16191 | switch (fc_hdr->fh_type) { |
16098 | case FC_TYPE_BLS: | 16192 | case FC_TYPE_BLS: |
16099 | case FC_TYPE_ELS: | 16193 | case FC_TYPE_ELS: |
16100 | case FC_TYPE_FCP: | 16194 | case FC_TYPE_FCP: |
16101 | case FC_TYPE_CT: | 16195 | case FC_TYPE_CT: |
16102 | case FC_TYPE_NVME: | 16196 | case FC_TYPE_NVME: |
16197 | case FC_TYPE_VENDOR_UNIQUE: | ||
16103 | break; | 16198 | break; |
16104 | case FC_TYPE_IP: | 16199 | case FC_TYPE_IP: |
16105 | case FC_TYPE_ILS: | 16200 | case FC_TYPE_ILS: |
@@ -16110,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
16110 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 16205 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
16111 | "2538 Received frame rctl:%s (x%x), type:%s (x%x), " | 16206 | "2538 Received frame rctl:%s (x%x), type:%s (x%x), " |
16112 | "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", | 16207 | "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", |
16208 | (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" : | ||
16113 | lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, | 16209 | lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, |
16114 | lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, | 16210 | (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ? |
16115 | be32_to_cpu(header[0]), be32_to_cpu(header[1]), | 16211 | "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type], |
16116 | be32_to_cpu(header[2]), be32_to_cpu(header[3]), | 16212 | fc_hdr->fh_type, be32_to_cpu(header[0]), |
16117 | be32_to_cpu(header[4]), be32_to_cpu(header[5]), | 16213 | be32_to_cpu(header[1]), be32_to_cpu(header[2]), |
16118 | be32_to_cpu(header[6])); | 16214 | be32_to_cpu(header[3]), be32_to_cpu(header[4]), |
16215 | be32_to_cpu(header[5]), be32_to_cpu(header[6])); | ||
16119 | return 0; | 16216 | return 0; |
16120 | drop: | 16217 | drop: |
16121 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | 16218 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, |
@@ -16921,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, | |||
16921 | lpfc_sli_release_iocbq(phba, iocbq); | 17018 | lpfc_sli_release_iocbq(phba, iocbq); |
16922 | } | 17019 | } |
16923 | 17020 | ||
17021 | static void | ||
17022 | lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | ||
17023 | struct lpfc_iocbq *rspiocb) | ||
17024 | { | ||
17025 | struct lpfc_dmabuf *pcmd = cmdiocb->context2; | ||
17026 | |||
17027 | if (pcmd && pcmd->virt) | ||
17028 | pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); | ||
17029 | kfree(pcmd); | ||
17030 | lpfc_sli_release_iocbq(phba, cmdiocb); | ||
17031 | } | ||
17032 | |||
17033 | static void | ||
17034 | lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, | ||
17035 | struct hbq_dmabuf *dmabuf) | ||
17036 | { | ||
17037 | struct fc_frame_header *fc_hdr; | ||
17038 | struct lpfc_hba *phba = vport->phba; | ||
17039 | struct lpfc_iocbq *iocbq = NULL; | ||
17040 | union lpfc_wqe *wqe; | ||
17041 | struct lpfc_dmabuf *pcmd = NULL; | ||
17042 | uint32_t frame_len; | ||
17043 | int rc; | ||
17044 | |||
17045 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | ||
17046 | frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); | ||
17047 | |||
17048 | /* Send the received frame back */ | ||
17049 | iocbq = lpfc_sli_get_iocbq(phba); | ||
17050 | if (!iocbq) | ||
17051 | goto exit; | ||
17052 | |||
17053 | /* Allocate buffer for command payload */ | ||
17054 | pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
17055 | if (pcmd) | ||
17056 | pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, | ||
17057 | &pcmd->phys); | ||
17058 | if (!pcmd || !pcmd->virt) | ||
17059 | goto exit; | ||
17060 | |||
17061 | INIT_LIST_HEAD(&pcmd->list); | ||
17062 | |||
17063 | /* copyin the payload */ | ||
17064 | memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); | ||
17065 | |||
17066 | /* fill in BDE's for command */ | ||
17067 | iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); | ||
17068 | iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); | ||
17069 | iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; | ||
17070 | iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; | ||
17071 | |||
17072 | iocbq->context2 = pcmd; | ||
17073 | iocbq->vport = vport; | ||
17074 | iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; | ||
17075 | iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; | ||
17076 | |||
17077 | /* | ||
17078 | * Setup rest of the iocb as though it were a WQE | ||
17079 | * Build the SEND_FRAME WQE | ||
17080 | */ | ||
17081 | wqe = (union lpfc_wqe *)&iocbq->iocb; | ||
17082 | |||
17083 | wqe->send_frame.frame_len = frame_len; | ||
17084 | wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); | ||
17085 | wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); | ||
17086 | wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); | ||
17087 | wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); | ||
17088 | wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); | ||
17089 | wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); | ||
17090 | |||
17091 | iocbq->iocb.ulpCommand = CMD_SEND_FRAME; | ||
17092 | iocbq->iocb.ulpLe = 1; | ||
17093 | iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; | ||
17094 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); | ||
17095 | if (rc == IOCB_ERROR) | ||
17096 | goto exit; | ||
17097 | |||
17098 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
17099 | return; | ||
17100 | |||
17101 | exit: | ||
17102 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
17103 | "2023 Unable to process MDS loopback frame\n"); | ||
17104 | if (pcmd && pcmd->virt) | ||
17105 | pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); | ||
17106 | kfree(pcmd); | ||
17107 | lpfc_sli_release_iocbq(phba, iocbq); | ||
17108 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
17109 | } | ||
17110 | |||
16924 | /** | 17111 | /** |
16925 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware | 17112 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware |
16926 | * @phba: Pointer to HBA context object. | 17113 | * @phba: Pointer to HBA context object. |
@@ -16959,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, | |||
16959 | fcfi = bf_get(lpfc_rcqe_fcf_id, | 17146 | fcfi = bf_get(lpfc_rcqe_fcf_id, |
16960 | &dmabuf->cq_event.cqe.rcqe_cmpl); | 17147 | &dmabuf->cq_event.cqe.rcqe_cmpl); |
16961 | 17148 | ||
17149 | if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { | ||
17150 | vport = phba->pport; | ||
17151 | /* Handle MDS Loopback frames */ | ||
17152 | lpfc_sli4_handle_mds_loopback(vport, dmabuf); | ||
17153 | return; | ||
17154 | } | ||
17155 | |||
16962 | /* d_id this frame is directed to */ | 17156 | /* d_id this frame is directed to */ |
16963 | did = sli4_did_from_fc_hdr(fc_hdr); | 17157 | did = sli4_did_from_fc_hdr(fc_hdr); |
16964 | 17158 | ||
@@ -17132,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) | |||
17132 | "status x%x add_status x%x, mbx status x%x\n", | 17326 | "status x%x add_status x%x, mbx status x%x\n", |
17133 | shdr_status, shdr_add_status, rc); | 17327 | shdr_status, shdr_add_status, rc); |
17134 | rc = -ENXIO; | 17328 | rc = -ENXIO; |
17329 | } else { | ||
17330 | /* | ||
17331 | * The next_rpi stores the next logical module-64 rpi value used | ||
17332 | * to post physical rpis in subsequent rpi postings. | ||
17333 | */ | ||
17334 | spin_lock_irq(&phba->hbalock); | ||
17335 | phba->sli4_hba.next_rpi = rpi_page->next_rpi; | ||
17336 | spin_unlock_irq(&phba->hbalock); | ||
17135 | } | 17337 | } |
17136 | return rc; | 17338 | return rc; |
17137 | } | 17339 | } |
@@ -18712,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, | |||
18712 | 18914 | ||
18713 | spin_lock_irqsave(&pring->ring_lock, iflags); | 18915 | spin_lock_irqsave(&pring->ring_lock, iflags); |
18714 | ctxp = pwqe->context2; | 18916 | ctxp = pwqe->context2; |
18715 | sglq = ctxp->rqb_buffer->sglq; | 18917 | sglq = ctxp->ctxbuf->sglq; |
18716 | if (pwqe->sli4_xritag == NO_XRI) { | 18918 | if (pwqe->sli4_xritag == NO_XRI) { |
18717 | pwqe->sli4_lxritag = sglq->sli4_lxritag; | 18919 | pwqe->sli4_lxritag = sglq->sli4_lxritag; |
18718 | pwqe->sli4_xritag = sglq->sli4_xritag; | 18920 | pwqe->sli4_xritag = sglq->sli4_xritag; |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index da46471337c8..cf863db27700 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 | 24 | #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 |
25 | #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 | 25 | #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 |
26 | #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 | 26 | #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 |
27 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 | ||
28 | #define LPFC_RPI_LOW_WATER_MARK 10 | 27 | #define LPFC_RPI_LOW_WATER_MARK 10 |
29 | 28 | ||
30 | #define LPFC_UNREG_FCF 1 | 29 | #define LPFC_UNREG_FCF 1 |
@@ -155,7 +154,11 @@ struct lpfc_queue { | |||
155 | uint32_t entry_count; /* Number of entries to support on the queue */ | 154 | uint32_t entry_count; /* Number of entries to support on the queue */ |
156 | uint32_t entry_size; /* Size of each queue entry. */ | 155 | uint32_t entry_size; /* Size of each queue entry. */ |
157 | uint32_t entry_repost; /* Count of entries before doorbell is rung */ | 156 | uint32_t entry_repost; /* Count of entries before doorbell is rung */ |
158 | #define LPFC_QUEUE_MIN_REPOST 8 | 157 | #define LPFC_EQ_REPOST 8 |
158 | #define LPFC_MQ_REPOST 8 | ||
159 | #define LPFC_CQ_REPOST 64 | ||
160 | #define LPFC_RQ_REPOST 64 | ||
161 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */ | ||
159 | uint32_t queue_id; /* Queue ID assigned by the hardware */ | 162 | uint32_t queue_id; /* Queue ID assigned by the hardware */ |
160 | uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ | 163 | uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ |
161 | uint32_t page_count; /* Number of pages allocated for this queue */ | 164 | uint32_t page_count; /* Number of pages allocated for this queue */ |
@@ -195,7 +198,7 @@ struct lpfc_queue { | |||
195 | /* defines for RQ stats */ | 198 | /* defines for RQ stats */ |
196 | #define RQ_no_posted_buf q_cnt_1 | 199 | #define RQ_no_posted_buf q_cnt_1 |
197 | #define RQ_no_buf_found q_cnt_2 | 200 | #define RQ_no_buf_found q_cnt_2 |
198 | #define RQ_buf_trunc q_cnt_3 | 201 | #define RQ_buf_posted q_cnt_3 |
199 | #define RQ_rcv_buf q_cnt_4 | 202 | #define RQ_rcv_buf q_cnt_4 |
200 | 203 | ||
201 | uint64_t isr_timestamp; | 204 | uint64_t isr_timestamp; |
@@ -617,12 +620,17 @@ struct lpfc_sli4_hba { | |||
617 | uint16_t scsi_xri_start; | 620 | uint16_t scsi_xri_start; |
618 | uint16_t els_xri_cnt; | 621 | uint16_t els_xri_cnt; |
619 | uint16_t nvmet_xri_cnt; | 622 | uint16_t nvmet_xri_cnt; |
623 | uint16_t nvmet_ctx_cnt; | ||
624 | uint16_t nvmet_io_wait_cnt; | ||
625 | uint16_t nvmet_io_wait_total; | ||
620 | struct list_head lpfc_els_sgl_list; | 626 | struct list_head lpfc_els_sgl_list; |
621 | struct list_head lpfc_abts_els_sgl_list; | 627 | struct list_head lpfc_abts_els_sgl_list; |
622 | struct list_head lpfc_nvmet_sgl_list; | 628 | struct list_head lpfc_nvmet_sgl_list; |
623 | struct list_head lpfc_abts_nvmet_ctx_list; | 629 | struct list_head lpfc_abts_nvmet_ctx_list; |
624 | struct list_head lpfc_abts_scsi_buf_list; | 630 | struct list_head lpfc_abts_scsi_buf_list; |
625 | struct list_head lpfc_abts_nvme_buf_list; | 631 | struct list_head lpfc_abts_nvme_buf_list; |
632 | struct list_head lpfc_nvmet_ctx_list; | ||
633 | struct list_head lpfc_nvmet_io_wait_list; | ||
626 | struct lpfc_sglq **lpfc_sglq_active_list; | 634 | struct lpfc_sglq **lpfc_sglq_active_list; |
627 | struct list_head lpfc_rpi_hdr_list; | 635 | struct list_head lpfc_rpi_hdr_list; |
628 | unsigned long *rpi_bmask; | 636 | unsigned long *rpi_bmask; |
@@ -654,6 +662,7 @@ struct lpfc_sli4_hba { | |||
654 | spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ | 662 | spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ |
655 | spinlock_t sgl_list_lock; /* list of aborted els IOs */ | 663 | spinlock_t sgl_list_lock; /* list of aborted els IOs */ |
656 | spinlock_t nvmet_io_lock; | 664 | spinlock_t nvmet_io_lock; |
665 | spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ | ||
657 | uint32_t physical_port; | 666 | uint32_t physical_port; |
658 | 667 | ||
659 | /* CPU to vector mapping information */ | 668 | /* CPU to vector mapping information */ |
@@ -661,8 +670,6 @@ struct lpfc_sli4_hba { | |||
661 | uint16_t num_online_cpu; | 670 | uint16_t num_online_cpu; |
662 | uint16_t num_present_cpu; | 671 | uint16_t num_present_cpu; |
663 | uint16_t curr_disp_cpu; | 672 | uint16_t curr_disp_cpu; |
664 | |||
665 | uint16_t nvmet_mrq_post_idx; | ||
666 | }; | 673 | }; |
667 | 674 | ||
668 | enum lpfc_sge_type { | 675 | enum lpfc_sge_type { |
@@ -698,6 +705,7 @@ struct lpfc_rpi_hdr { | |||
698 | struct lpfc_dmabuf *dmabuf; | 705 | struct lpfc_dmabuf *dmabuf; |
699 | uint32_t page_count; | 706 | uint32_t page_count; |
700 | uint32_t start_rpi; | 707 | uint32_t start_rpi; |
708 | uint16_t next_rpi; | ||
701 | }; | 709 | }; |
702 | 710 | ||
703 | struct lpfc_rsrc_blks { | 711 | struct lpfc_rsrc_blks { |
@@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, | |||
762 | int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, | 770 | int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, |
763 | struct lpfc_queue **drqp, struct lpfc_queue **cqp, | 771 | struct lpfc_queue **drqp, struct lpfc_queue **cqp, |
764 | uint32_t subtype); | 772 | uint32_t subtype); |
765 | void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); | ||
766 | int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); | 773 | int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); |
767 | int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); | 774 | int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); |
768 | int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); | 775 | int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 1c26dc67151b..c2653244221c 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -20,7 +20,7 @@ | |||
20 | * included with this package. * | 20 | * included with this package. * |
21 | *******************************************************************/ | 21 | *******************************************************************/ |
22 | 22 | ||
23 | #define LPFC_DRIVER_VERSION "11.2.0.12" | 23 | #define LPFC_DRIVER_VERSION "11.2.0.14" |
24 | #define LPFC_DRIVER_NAME "lpfc" | 24 | #define LPFC_DRIVER_NAME "lpfc" |
25 | 25 | ||
26 | /* Used for SLI 2/3 */ | 26 | /* Used for SLI 2/3 */ |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index a4aadf5f4dc6..1cc814f1505a 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -3770,9 +3770,6 @@ static long pmcraid_ioctl_passthrough( | |||
3770 | pmcraid_err("couldn't build passthrough ioadls\n"); | 3770 | pmcraid_err("couldn't build passthrough ioadls\n"); |
3771 | goto out_free_cmd; | 3771 | goto out_free_cmd; |
3772 | } | 3772 | } |
3773 | } else if (request_size < 0) { | ||
3774 | rc = -EINVAL; | ||
3775 | goto out_free_cmd; | ||
3776 | } | 3773 | } |
3777 | 3774 | ||
3778 | /* If data is being written into the device, copy the data from user | 3775 | /* If data is being written into the device, copy the data from user |
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 40aeb6bb96a2..07ee88200e91 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h | |||
@@ -259,7 +259,7 @@ struct qedf_io_log { | |||
259 | uint16_t task_id; | 259 | uint16_t task_id; |
260 | uint32_t port_id; /* Remote port fabric ID */ | 260 | uint32_t port_id; /* Remote port fabric ID */ |
261 | int lun; | 261 | int lun; |
262 | char op; /* SCSI CDB */ | 262 | unsigned char op; /* SCSI CDB */ |
263 | uint8_t lba[4]; | 263 | uint8_t lba[4]; |
264 | unsigned int bufflen; /* SCSI buffer length */ | 264 | unsigned int bufflen; /* SCSI buffer length */ |
265 | unsigned int sg_count; /* Number of SG elements */ | 265 | unsigned int sg_count; /* Number of SG elements */ |
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index c505d41f6dc8..90627033bde6 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c | |||
@@ -109,7 +109,7 @@ retry_els: | |||
109 | did = fcport->rdata->ids.port_id; | 109 | did = fcport->rdata->ids.port_id; |
110 | sid = fcport->sid; | 110 | sid = fcport->sid; |
111 | 111 | ||
112 | __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, | 112 | __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, |
113 | FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | | 113 | FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | |
114 | FC_FC_SEQ_INIT, 0); | 114 | FC_FC_SEQ_INIT, 0); |
115 | 115 | ||
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index cceddd995a4b..a5c97342fd5d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c | |||
@@ -2895,7 +2895,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) | |||
2895 | slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; | 2895 | slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; |
2896 | slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; | 2896 | slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; |
2897 | slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; | 2897 | slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; |
2898 | memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); | 2898 | strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); |
2899 | rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); | 2899 | rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); |
2900 | if (rc) { | 2900 | if (rc) { |
2901 | QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); | 2901 | QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 7bfbcfa7af40..61cdd99ae41e 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -763,6 +763,8 @@ struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, | |||
763 | struct scsi_device *sdev; | 763 | struct scsi_device *sdev; |
764 | 764 | ||
765 | list_for_each_entry(sdev, &shost->__devices, siblings) { | 765 | list_for_each_entry(sdev, &shost->__devices, siblings) { |
766 | if (sdev->sdev_state == SDEV_DEL) | ||
767 | continue; | ||
766 | if (sdev->channel == channel && sdev->id == id && | 768 | if (sdev->channel == channel && sdev->id == id && |
767 | sdev->lun ==lun) | 769 | sdev->lun ==lun) |
768 | return sdev; | 770 | return sdev; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 814a4bd8405d..99e16ac479e3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <scsi/scsi_driver.h> | 30 | #include <scsi/scsi_driver.h> |
31 | #include <scsi/scsi_eh.h> | 31 | #include <scsi/scsi_eh.h> |
32 | #include <scsi/scsi_host.h> | 32 | #include <scsi/scsi_host.h> |
33 | #include <scsi/scsi_transport.h> /* __scsi_init_queue() */ | ||
33 | #include <scsi/scsi_dh.h> | 34 | #include <scsi/scsi_dh.h> |
34 | 35 | ||
35 | #include <trace/events/scsi.h> | 36 | #include <trace/events/scsi.h> |
@@ -1850,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req) | |||
1850 | 1851 | ||
1851 | /* zero out the cmd, except for the embedded scsi_request */ | 1852 | /* zero out the cmd, except for the embedded scsi_request */ |
1852 | memset((char *)cmd + sizeof(cmd->req), 0, | 1853 | memset((char *)cmd + sizeof(cmd->req), 0, |
1853 | sizeof(*cmd) - sizeof(cmd->req)); | 1854 | sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size); |
1854 | 1855 | ||
1855 | req->special = cmd; | 1856 | req->special = cmd; |
1856 | 1857 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index f9d1432d7cc5..b6bb4e0ce0e3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) | |||
827 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 827 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
828 | u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); | 828 | u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); |
829 | u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); | 829 | u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); |
830 | int ret; | ||
830 | 831 | ||
831 | if (!(rq->cmd_flags & REQ_NOUNMAP)) { | 832 | if (!(rq->cmd_flags & REQ_NOUNMAP)) { |
832 | switch (sdkp->zeroing_mode) { | 833 | switch (sdkp->zeroing_mode) { |
833 | case SD_ZERO_WS16_UNMAP: | 834 | case SD_ZERO_WS16_UNMAP: |
834 | return sd_setup_write_same16_cmnd(cmd, true); | 835 | ret = sd_setup_write_same16_cmnd(cmd, true); |
836 | goto out; | ||
835 | case SD_ZERO_WS10_UNMAP: | 837 | case SD_ZERO_WS10_UNMAP: |
836 | return sd_setup_write_same10_cmnd(cmd, true); | 838 | ret = sd_setup_write_same10_cmnd(cmd, true); |
839 | goto out; | ||
837 | } | 840 | } |
838 | } | 841 | } |
839 | 842 | ||
840 | if (sdp->no_write_same) | 843 | if (sdp->no_write_same) |
841 | return BLKPREP_INVALID; | 844 | return BLKPREP_INVALID; |
845 | |||
842 | if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) | 846 | if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) |
843 | return sd_setup_write_same16_cmnd(cmd, false); | 847 | ret = sd_setup_write_same16_cmnd(cmd, false); |
844 | return sd_setup_write_same10_cmnd(cmd, false); | 848 | else |
849 | ret = sd_setup_write_same10_cmnd(cmd, false); | ||
850 | |||
851 | out: | ||
852 | if (sd_is_zoned(sdkp) && ret == BLKPREP_OK) | ||
853 | return sd_zbc_write_lock_zone(cmd); | ||
854 | |||
855 | return ret; | ||
845 | } | 856 | } |
846 | 857 | ||
847 | static void sd_config_write_same(struct scsi_disk *sdkp) | 858 | static void sd_config_write_same(struct scsi_disk *sdkp) |
@@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) | |||
948 | rq->__data_len = sdp->sector_size; | 959 | rq->__data_len = sdp->sector_size; |
949 | ret = scsi_init_io(cmd); | 960 | ret = scsi_init_io(cmd); |
950 | rq->__data_len = nr_bytes; | 961 | rq->__data_len = nr_bytes; |
962 | |||
963 | if (sd_is_zoned(sdkp) && ret != BLKPREP_OK) | ||
964 | sd_zbc_write_unlock_zone(cmd); | ||
965 | |||
951 | return ret; | 966 | return ret; |
952 | } | 967 | } |
953 | 968 | ||
@@ -1567,17 +1582,21 @@ out: | |||
1567 | return retval; | 1582 | return retval; |
1568 | } | 1583 | } |
1569 | 1584 | ||
1570 | static int sd_sync_cache(struct scsi_disk *sdkp) | 1585 | static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) |
1571 | { | 1586 | { |
1572 | int retries, res; | 1587 | int retries, res; |
1573 | struct scsi_device *sdp = sdkp->device; | 1588 | struct scsi_device *sdp = sdkp->device; |
1574 | const int timeout = sdp->request_queue->rq_timeout | 1589 | const int timeout = sdp->request_queue->rq_timeout |
1575 | * SD_FLUSH_TIMEOUT_MULTIPLIER; | 1590 | * SD_FLUSH_TIMEOUT_MULTIPLIER; |
1576 | struct scsi_sense_hdr sshdr; | 1591 | struct scsi_sense_hdr my_sshdr; |
1577 | 1592 | ||
1578 | if (!scsi_device_online(sdp)) | 1593 | if (!scsi_device_online(sdp)) |
1579 | return -ENODEV; | 1594 | return -ENODEV; |
1580 | 1595 | ||
1596 | /* caller might not be interested in sense, but we need it */ | ||
1597 | if (!sshdr) | ||
1598 | sshdr = &my_sshdr; | ||
1599 | |||
1581 | for (retries = 3; retries > 0; --retries) { | 1600 | for (retries = 3; retries > 0; --retries) { |
1582 | unsigned char cmd[10] = { 0 }; | 1601 | unsigned char cmd[10] = { 0 }; |
1583 | 1602 | ||
@@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp) | |||
1586 | * Leave the rest of the command zero to indicate | 1605 | * Leave the rest of the command zero to indicate |
1587 | * flush everything. | 1606 | * flush everything. |
1588 | */ | 1607 | */ |
1589 | res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, | 1608 | res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, |
1590 | timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); | 1609 | timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); |
1591 | if (res == 0) | 1610 | if (res == 0) |
1592 | break; | 1611 | break; |
@@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp) | |||
1596 | sd_print_result(sdkp, "Synchronize Cache(10) failed", res); | 1615 | sd_print_result(sdkp, "Synchronize Cache(10) failed", res); |
1597 | 1616 | ||
1598 | if (driver_byte(res) & DRIVER_SENSE) | 1617 | if (driver_byte(res) & DRIVER_SENSE) |
1599 | sd_print_sense_hdr(sdkp, &sshdr); | 1618 | sd_print_sense_hdr(sdkp, sshdr); |
1619 | |||
1600 | /* we need to evaluate the error return */ | 1620 | /* we need to evaluate the error return */ |
1601 | if (scsi_sense_valid(&sshdr) && | 1621 | if (scsi_sense_valid(sshdr) && |
1602 | (sshdr.asc == 0x3a || /* medium not present */ | 1622 | (sshdr->asc == 0x3a || /* medium not present */ |
1603 | sshdr.asc == 0x20)) /* invalid command */ | 1623 | sshdr->asc == 0x20)) /* invalid command */ |
1604 | /* this is no error here */ | 1624 | /* this is no error here */ |
1605 | return 0; | 1625 | return 0; |
1606 | 1626 | ||
@@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev) | |||
3444 | 3464 | ||
3445 | if (sdkp->WCE && sdkp->media_present) { | 3465 | if (sdkp->WCE && sdkp->media_present) { |
3446 | sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); | 3466 | sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); |
3447 | sd_sync_cache(sdkp); | 3467 | sd_sync_cache(sdkp, NULL); |
3448 | } | 3468 | } |
3449 | 3469 | ||
3450 | if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { | 3470 | if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { |
@@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev) | |||
3456 | static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) | 3476 | static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) |
3457 | { | 3477 | { |
3458 | struct scsi_disk *sdkp = dev_get_drvdata(dev); | 3478 | struct scsi_disk *sdkp = dev_get_drvdata(dev); |
3479 | struct scsi_sense_hdr sshdr; | ||
3459 | int ret = 0; | 3480 | int ret = 0; |
3460 | 3481 | ||
3461 | if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ | 3482 | if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ |
@@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) | |||
3463 | 3484 | ||
3464 | if (sdkp->WCE && sdkp->media_present) { | 3485 | if (sdkp->WCE && sdkp->media_present) { |
3465 | sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); | 3486 | sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); |
3466 | ret = sd_sync_cache(sdkp); | 3487 | ret = sd_sync_cache(sdkp, &sshdr); |
3488 | |||
3467 | if (ret) { | 3489 | if (ret) { |
3468 | /* ignore OFFLINE device */ | 3490 | /* ignore OFFLINE device */ |
3469 | if (ret == -ENODEV) | 3491 | if (ret == -ENODEV) |
3470 | ret = 0; | 3492 | return 0; |
3471 | goto done; | 3493 | |
3494 | if (!scsi_sense_valid(&sshdr) || | ||
3495 | sshdr.sense_key != ILLEGAL_REQUEST) | ||
3496 | return ret; | ||
3497 | |||
3498 | /* | ||
3499 | * sshdr.sense_key == ILLEGAL_REQUEST means this drive | ||
3500 | * doesn't support sync. There's not much to do and | ||
3501 | * suspend shouldn't fail. | ||
3502 | */ | ||
3503 | ret = 0; | ||
3472 | } | 3504 | } |
3473 | } | 3505 | } |
3474 | 3506 | ||
@@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) | |||
3480 | ret = 0; | 3512 | ret = 0; |
3481 | } | 3513 | } |
3482 | 3514 | ||
3483 | done: | ||
3484 | return ret; | 3515 | return ret; |
3485 | } | 3516 | } |
3486 | 3517 | ||
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0a38ba01b7b4..82c33a6edbea 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) | |||
2074 | if ((1 == resp->done) && (!resp->sg_io_owned) && | 2074 | if ((1 == resp->done) && (!resp->sg_io_owned) && |
2075 | ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { | 2075 | ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { |
2076 | resp->done = 2; /* guard against other readers */ | 2076 | resp->done = 2; /* guard against other readers */ |
2077 | break; | 2077 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2078 | return resp; | ||
2078 | } | 2079 | } |
2079 | } | 2080 | } |
2080 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); | 2081 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2081 | return resp; | 2082 | return NULL; |
2082 | } | 2083 | } |
2083 | 2084 | ||
2084 | /* always adds to end of list */ | 2085 | /* always adds to end of list */ |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index abc7e87937cc..ffe8d8608818 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba) | |||
7698 | ufshcd_add_spm_lvl_sysfs_nodes(hba); | 7698 | ufshcd_add_spm_lvl_sysfs_nodes(hba); |
7699 | } | 7699 | } |
7700 | 7700 | ||
7701 | static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba) | ||
7702 | { | ||
7703 | device_remove_file(hba->dev, &hba->rpm_lvl_attr); | ||
7704 | device_remove_file(hba->dev, &hba->spm_lvl_attr); | ||
7705 | } | ||
7706 | |||
7701 | /** | 7707 | /** |
7702 | * ufshcd_shutdown - shutdown routine | 7708 | * ufshcd_shutdown - shutdown routine |
7703 | * @hba: per adapter instance | 7709 | * @hba: per adapter instance |
@@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown); | |||
7735 | */ | 7741 | */ |
7736 | void ufshcd_remove(struct ufs_hba *hba) | 7742 | void ufshcd_remove(struct ufs_hba *hba) |
7737 | { | 7743 | { |
7744 | ufshcd_remove_sysfs_nodes(hba); | ||
7738 | scsi_remove_host(hba->host); | 7745 | scsi_remove_host(hba->host); |
7739 | /* disable interrupts */ | 7746 | /* disable interrupts */ |
7740 | ufshcd_disable_intr(hba, hba->intr_mask); | 7747 | ufshcd_disable_intr(hba, hba->intr_mask); |
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c index b6195fdf0d00..22e98a90468c 100644 --- a/drivers/soc/bcm/brcmstb/common.c +++ b/drivers/soc/bcm/brcmstb/common.c | |||
@@ -49,7 +49,7 @@ static const struct of_device_id sun_top_ctrl_match[] = { | |||
49 | { .compatible = "brcm,bcm7420-sun-top-ctrl", }, | 49 | { .compatible = "brcm,bcm7420-sun-top-ctrl", }, |
50 | { .compatible = "brcm,bcm7425-sun-top-ctrl", }, | 50 | { .compatible = "brcm,bcm7425-sun-top-ctrl", }, |
51 | { .compatible = "brcm,bcm7429-sun-top-ctrl", }, | 51 | { .compatible = "brcm,bcm7429-sun-top-ctrl", }, |
52 | { .compatible = "brcm,bcm7425-sun-top-ctrl", }, | 52 | { .compatible = "brcm,bcm7435-sun-top-ctrl", }, |
53 | { .compatible = "brcm,brcmstb-sun-top-ctrl", }, | 53 | { .compatible = "brcm,brcmstb-sun-top-ctrl", }, |
54 | { } | 54 | { } |
55 | }; | 55 | }; |
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 357a5d8f8da0..a5b86a28f343 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig | |||
@@ -2,8 +2,9 @@ menu "i.MX SoC drivers" | |||
2 | 2 | ||
3 | config IMX7_PM_DOMAINS | 3 | config IMX7_PM_DOMAINS |
4 | bool "i.MX7 PM domains" | 4 | bool "i.MX7 PM domains" |
5 | select PM_GENERIC_DOMAINS | ||
6 | depends on SOC_IMX7D || (COMPILE_TEST && OF) | 5 | depends on SOC_IMX7D || (COMPILE_TEST && OF) |
6 | depends on PM | ||
7 | select PM_GENERIC_DOMAINS | ||
7 | default y if SOC_IMX7D | 8 | default y if SOC_IMX7D |
8 | 9 | ||
9 | endmenu | 10 | endmenu |
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index ecebe2eecc3a..026182d3b27c 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c | |||
@@ -413,7 +413,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name, | |||
413 | * @name: slave channel name | 413 | * @name: slave channel name |
414 | * @config: dma configuration parameters | 414 | * @config: dma configuration parameters |
415 | * | 415 | * |
416 | * Returns pointer to appropriate DMA channel on success or NULL. | 416 | * Returns pointer to appropriate DMA channel on success or error. |
417 | */ | 417 | */ |
418 | void *knav_dma_open_channel(struct device *dev, const char *name, | 418 | void *knav_dma_open_channel(struct device *dev, const char *name, |
419 | struct knav_dma_cfg *config) | 419 | struct knav_dma_cfg *config) |
diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt deleted file mode 100644 index 168715271f06..000000000000 --- a/drivers/staging/android/ion/devicetree.txt +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | Ion Memory Manager | ||
2 | |||
3 | Ion is a memory manager that allows for sharing of buffers via dma-buf. | ||
4 | Ion allows for different types of allocation via an abstraction called | ||
5 | a 'heap'. A heap represents a specific type of memory. Each heap has | ||
6 | a different type. There can be multiple instances of the same heap | ||
7 | type. | ||
8 | |||
9 | Specific heap instances are tied to heap IDs. Heap IDs are not to be specified | ||
10 | in the devicetree. | ||
11 | |||
12 | Required properties for Ion | ||
13 | |||
14 | - compatible: "linux,ion" PLUS a compatible property for the device | ||
15 | |||
16 | All child nodes of a linux,ion node are interpreted as heaps | ||
17 | |||
18 | required properties for heaps | ||
19 | |||
20 | - compatible: compatible string for a heap type PLUS a compatible property | ||
21 | for the specific instance of the heap. Current heap types | ||
22 | -- linux,ion-heap-system | ||
23 | -- linux,ion-heap-system-contig | ||
24 | -- linux,ion-heap-carveout | ||
25 | -- linux,ion-heap-chunk | ||
26 | -- linux,ion-heap-dma | ||
27 | -- linux,ion-heap-custom | ||
28 | |||
29 | Optional properties | ||
30 | - memory-region: A phandle to a memory region. Required for DMA heap type | ||
31 | (see reserved-memory.txt for details on the reservation) | ||
32 | |||
33 | Example: | ||
34 | |||
35 | ion { | ||
36 | compatbile = "hisilicon,ion", "linux,ion"; | ||
37 | |||
38 | ion-system-heap { | ||
39 | compatbile = "hisilicon,system-heap", "linux,ion-heap-system" | ||
40 | }; | ||
41 | |||
42 | ion-camera-region { | ||
43 | compatible = "hisilicon,camera-heap", "linux,ion-heap-dma" | ||
44 | memory-region = <&camera_region>; | ||
45 | }; | ||
46 | |||
47 | ion-fb-region { | ||
48 | compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma" | ||
49 | memory-region = <&fb_region>; | ||
50 | }; | ||
51 | } | ||
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c index 522bd62c102e..8611adf3bb2e 100644 --- a/drivers/staging/ccree/ssi_request_mgr.c +++ b/drivers/staging/ccree/ssi_request_mgr.c | |||
@@ -376,7 +376,6 @@ int send_request( | |||
376 | rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); | 376 | rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); |
377 | if (rc != 0) { | 377 | if (rc != 0) { |
378 | SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); | 378 | SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); |
379 | spin_unlock_bh(&req_mgr_h->hw_lock); | ||
380 | return rc; | 379 | return rc; |
381 | } | 380 | } |
382 | #endif | 381 | #endif |
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig index 2e325cb747ae..730fd6d4db33 100644 --- a/drivers/staging/fsl-dpaa2/Kconfig +++ b/drivers/staging/fsl-dpaa2/Kconfig | |||
@@ -12,6 +12,7 @@ config FSL_DPAA2 | |||
12 | config FSL_DPAA2_ETH | 12 | config FSL_DPAA2_ETH |
13 | tristate "Freescale DPAA2 Ethernet" | 13 | tristate "Freescale DPAA2 Ethernet" |
14 | depends on FSL_DPAA2 && FSL_MC_DPIO | 14 | depends on FSL_DPAA2 && FSL_MC_DPIO |
15 | depends on NETDEVICES && ETHERNET | ||
15 | ---help--- | 16 | ---help--- |
16 | Ethernet driver for Freescale DPAA2 SoCs, using the | 17 | Ethernet driver for Freescale DPAA2 SoCs, using the |
17 | Freescale MC bus driver | 18 | Freescale MC bus driver |
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c index 4723a0bd5067..1c6ed5b2a6f9 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c | |||
@@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val) | |||
97 | 97 | ||
98 | switch (variable) { | 98 | switch (variable) { |
99 | case HW_VAR_BSSID: | 99 | case HW_VAR_BSSID: |
100 | rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]); | 100 | /* BSSIDR 2 byte alignment */ |
101 | rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]); | 101 | rtl92e_writew(dev, BSSIDR, *(u16 *)val); |
102 | rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2)); | ||
102 | break; | 103 | break; |
103 | 104 | ||
104 | case HW_VAR_MEDIA_STATUS: | 105 | case HW_VAR_MEDIA_STATUS: |
@@ -624,7 +625,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev) | |||
624 | struct r8192_priv *priv = rtllib_priv(dev); | 625 | struct r8192_priv *priv = rtllib_priv(dev); |
625 | 626 | ||
626 | RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); | 627 | RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); |
627 | curCR = rtl92e_readl(dev, EPROM_CMD); | 628 | curCR = rtl92e_readw(dev, EPROM_CMD); |
628 | RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, | 629 | RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, |
629 | curCR); | 630 | curCR); |
630 | priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : | 631 | priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : |
@@ -961,8 +962,8 @@ static void _rtl92e_net_update(struct net_device *dev) | |||
961 | rtl92e_config_rate(dev, &rate_config); | 962 | rtl92e_config_rate(dev, &rate_config); |
962 | priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; | 963 | priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; |
963 | priv->basic_rate = rate_config &= 0x15f; | 964 | priv->basic_rate = rate_config &= 0x15f; |
964 | rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]); | 965 | rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid); |
965 | rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]); | 966 | rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2)); |
966 | 967 | ||
967 | if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { | 968 | if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { |
968 | rtl92e_writew(dev, ATIMWND, 2); | 969 | rtl92e_writew(dev, ATIMWND, 2); |
@@ -1182,8 +1183,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, | |||
1182 | struct cb_desc *cb_desc, struct sk_buff *skb) | 1183 | struct cb_desc *cb_desc, struct sk_buff *skb) |
1183 | { | 1184 | { |
1184 | struct r8192_priv *priv = rtllib_priv(dev); | 1185 | struct r8192_priv *priv = rtllib_priv(dev); |
1185 | dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, | 1186 | dma_addr_t mapping; |
1186 | PCI_DMA_TODEVICE); | ||
1187 | struct tx_fwinfo_8190pci *pTxFwInfo; | 1187 | struct tx_fwinfo_8190pci *pTxFwInfo; |
1188 | 1188 | ||
1189 | pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; | 1189 | pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; |
@@ -1194,8 +1194,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, | |||
1194 | pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, | 1194 | pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, |
1195 | pTxFwInfo->TxRate, cb_desc); | 1195 | pTxFwInfo->TxRate, cb_desc); |
1196 | 1196 | ||
1197 | if (pci_dma_mapping_error(priv->pdev, mapping)) | ||
1198 | netdev_err(dev, "%s(): DMA Mapping error\n", __func__); | ||
1199 | if (cb_desc->bAMPDUEnable) { | 1197 | if (cb_desc->bAMPDUEnable) { |
1200 | pTxFwInfo->AllowAggregation = 1; | 1198 | pTxFwInfo->AllowAggregation = 1; |
1201 | pTxFwInfo->RxMF = cb_desc->ampdu_factor; | 1199 | pTxFwInfo->RxMF = cb_desc->ampdu_factor; |
@@ -1230,6 +1228,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, | |||
1230 | } | 1228 | } |
1231 | 1229 | ||
1232 | memset((u8 *)pdesc, 0, 12); | 1230 | memset((u8 *)pdesc, 0, 12); |
1231 | |||
1232 | mapping = pci_map_single(priv->pdev, skb->data, skb->len, | ||
1233 | PCI_DMA_TODEVICE); | ||
1234 | if (pci_dma_mapping_error(priv->pdev, mapping)) { | ||
1235 | netdev_err(dev, "%s(): DMA Mapping error\n", __func__); | ||
1236 | return; | ||
1237 | } | ||
1238 | |||
1233 | pdesc->LINIP = 0; | 1239 | pdesc->LINIP = 0; |
1234 | pdesc->CmdInit = 1; | 1240 | pdesc->CmdInit = 1; |
1235 | pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; | 1241 | pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; |
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c index 48bbd9e8a52f..dcc4eb691889 100644 --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c | |||
@@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr, | |||
306 | pTsCommonInfo->TClasNum = TCLAS_Num; | 306 | pTsCommonInfo->TClasNum = TCLAS_Num; |
307 | } | 307 | } |
308 | 308 | ||
309 | static bool IsACValid(unsigned int tid) | ||
310 | { | ||
311 | return tid < 7; | ||
312 | } | ||
313 | |||
314 | bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, | 309 | bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, |
315 | u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) | 310 | u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) |
316 | { | 311 | { |
@@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, | |||
328 | if (ieee->current_network.qos_data.supported == 0) { | 323 | if (ieee->current_network.qos_data.supported == 0) { |
329 | UP = 0; | 324 | UP = 0; |
330 | } else { | 325 | } else { |
331 | if (!IsACValid(TID)) { | ||
332 | netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", | ||
333 | __func__, TID); | ||
334 | return false; | ||
335 | } | ||
336 | |||
337 | switch (TID) { | 326 | switch (TID) { |
338 | case 0: | 327 | case 0: |
339 | case 3: | 328 | case 3: |
@@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, | |||
351 | case 7: | 340 | case 7: |
352 | UP = 7; | 341 | UP = 7; |
353 | break; | 342 | break; |
343 | default: | ||
344 | netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", | ||
345 | __func__, TID); | ||
346 | return false; | ||
354 | } | 347 | } |
355 | } | 348 | } |
356 | 349 | ||
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 5e7a61f24f8d..36c3189fc4b7 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c | |||
@@ -3531,7 +3531,6 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev) | |||
3531 | pwdev_priv->power_mgmt = true; | 3531 | pwdev_priv->power_mgmt = true; |
3532 | else | 3532 | else |
3533 | pwdev_priv->power_mgmt = false; | 3533 | pwdev_priv->power_mgmt = false; |
3534 | kfree((u8 *)wdev); | ||
3535 | 3534 | ||
3536 | return ret; | 3535 | return ret; |
3537 | 3536 | ||
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c index 2cee9a952c9b..4a356e509fe4 100644 --- a/drivers/staging/typec/fusb302/fusb302.c +++ b/drivers/staging/typec/fusb302/fusb302.c | |||
@@ -264,22 +264,36 @@ static void fusb302_debugfs_exit(const struct fusb302_chip *chip) { } | |||
264 | 264 | ||
265 | #define FUSB302_RESUME_RETRY 10 | 265 | #define FUSB302_RESUME_RETRY 10 |
266 | #define FUSB302_RESUME_RETRY_SLEEP 50 | 266 | #define FUSB302_RESUME_RETRY_SLEEP 50 |
267 | static int fusb302_i2c_write(struct fusb302_chip *chip, | 267 | |
268 | u8 address, u8 data) | 268 | static bool fusb302_is_suspended(struct fusb302_chip *chip) |
269 | { | 269 | { |
270 | int retry_cnt; | 270 | int retry_cnt; |
271 | int ret = 0; | ||
272 | 271 | ||
273 | atomic_set(&chip->i2c_busy, 1); | ||
274 | for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { | 272 | for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { |
275 | if (atomic_read(&chip->pm_suspend)) { | 273 | if (atomic_read(&chip->pm_suspend)) { |
276 | pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", | 274 | dev_err(chip->dev, "i2c: pm suspend, retry %d/%d\n", |
277 | retry_cnt + 1, FUSB302_RESUME_RETRY); | 275 | retry_cnt + 1, FUSB302_RESUME_RETRY); |
278 | msleep(FUSB302_RESUME_RETRY_SLEEP); | 276 | msleep(FUSB302_RESUME_RETRY_SLEEP); |
279 | } else { | 277 | } else { |
280 | break; | 278 | return false; |
281 | } | 279 | } |
282 | } | 280 | } |
281 | |||
282 | return true; | ||
283 | } | ||
284 | |||
285 | static int fusb302_i2c_write(struct fusb302_chip *chip, | ||
286 | u8 address, u8 data) | ||
287 | { | ||
288 | int ret = 0; | ||
289 | |||
290 | atomic_set(&chip->i2c_busy, 1); | ||
291 | |||
292 | if (fusb302_is_suspended(chip)) { | ||
293 | atomic_set(&chip->i2c_busy, 0); | ||
294 | return -ETIMEDOUT; | ||
295 | } | ||
296 | |||
283 | ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data); | 297 | ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data); |
284 | if (ret < 0) | 298 | if (ret < 0) |
285 | fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d", | 299 | fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d", |
@@ -292,21 +306,17 @@ static int fusb302_i2c_write(struct fusb302_chip *chip, | |||
292 | static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, | 306 | static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, |
293 | u8 length, const u8 *data) | 307 | u8 length, const u8 *data) |
294 | { | 308 | { |
295 | int retry_cnt; | ||
296 | int ret = 0; | 309 | int ret = 0; |
297 | 310 | ||
298 | if (length <= 0) | 311 | if (length <= 0) |
299 | return ret; | 312 | return ret; |
300 | atomic_set(&chip->i2c_busy, 1); | 313 | atomic_set(&chip->i2c_busy, 1); |
301 | for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { | 314 | |
302 | if (atomic_read(&chip->pm_suspend)) { | 315 | if (fusb302_is_suspended(chip)) { |
303 | pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", | 316 | atomic_set(&chip->i2c_busy, 0); |
304 | retry_cnt + 1, FUSB302_RESUME_RETRY); | 317 | return -ETIMEDOUT; |
305 | msleep(FUSB302_RESUME_RETRY_SLEEP); | ||
306 | } else { | ||
307 | break; | ||
308 | } | ||
309 | } | 318 | } |
319 | |||
310 | ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address, | 320 | ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address, |
311 | length, data); | 321 | length, data); |
312 | if (ret < 0) | 322 | if (ret < 0) |
@@ -320,19 +330,15 @@ static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, | |||
320 | static int fusb302_i2c_read(struct fusb302_chip *chip, | 330 | static int fusb302_i2c_read(struct fusb302_chip *chip, |
321 | u8 address, u8 *data) | 331 | u8 address, u8 *data) |
322 | { | 332 | { |
323 | int retry_cnt; | ||
324 | int ret = 0; | 333 | int ret = 0; |
325 | 334 | ||
326 | atomic_set(&chip->i2c_busy, 1); | 335 | atomic_set(&chip->i2c_busy, 1); |
327 | for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { | 336 | |
328 | if (atomic_read(&chip->pm_suspend)) { | 337 | if (fusb302_is_suspended(chip)) { |
329 | pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", | 338 | atomic_set(&chip->i2c_busy, 0); |
330 | retry_cnt + 1, FUSB302_RESUME_RETRY); | 339 | return -ETIMEDOUT; |
331 | msleep(FUSB302_RESUME_RETRY_SLEEP); | ||
332 | } else { | ||
333 | break; | ||
334 | } | ||
335 | } | 340 | } |
341 | |||
336 | ret = i2c_smbus_read_byte_data(chip->i2c_client, address); | 342 | ret = i2c_smbus_read_byte_data(chip->i2c_client, address); |
337 | *data = (u8)ret; | 343 | *data = (u8)ret; |
338 | if (ret < 0) | 344 | if (ret < 0) |
@@ -345,33 +351,31 @@ static int fusb302_i2c_read(struct fusb302_chip *chip, | |||
345 | static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address, | 351 | static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address, |
346 | u8 length, u8 *data) | 352 | u8 length, u8 *data) |
347 | { | 353 | { |
348 | int retry_cnt; | ||
349 | int ret = 0; | 354 | int ret = 0; |
350 | 355 | ||
351 | if (length <= 0) | 356 | if (length <= 0) |
352 | return ret; | 357 | return ret; |
353 | atomic_set(&chip->i2c_busy, 1); | 358 | atomic_set(&chip->i2c_busy, 1); |
354 | for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { | 359 | |
355 | if (atomic_read(&chip->pm_suspend)) { | 360 | if (fusb302_is_suspended(chip)) { |
356 | pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", | 361 | atomic_set(&chip->i2c_busy, 0); |
357 | retry_cnt + 1, FUSB302_RESUME_RETRY); | 362 | return -ETIMEDOUT; |
358 | msleep(FUSB302_RESUME_RETRY_SLEEP); | ||
359 | } else { | ||
360 | break; | ||
361 | } | ||
362 | } | 363 | } |
364 | |||
363 | ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address, | 365 | ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address, |
364 | length, data); | 366 | length, data); |
365 | if (ret < 0) { | 367 | if (ret < 0) { |
366 | fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d", | 368 | fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d", |
367 | address, length, ret); | 369 | address, length, ret); |
368 | return ret; | 370 | goto done; |
369 | } | 371 | } |
370 | if (ret != length) { | 372 | if (ret != length) { |
371 | fusb302_log(chip, "only read %d/%d bytes from 0x%02x", | 373 | fusb302_log(chip, "only read %d/%d bytes from 0x%02x", |
372 | ret, length, address); | 374 | ret, length, address); |
373 | return -EIO; | 375 | ret = -EIO; |
374 | } | 376 | } |
377 | |||
378 | done: | ||
375 | atomic_set(&chip->i2c_busy, 0); | 379 | atomic_set(&chip->i2c_busy, 0); |
376 | 380 | ||
377 | return ret; | 381 | return ret; |
@@ -489,7 +493,7 @@ static int tcpm_init(struct tcpc_dev *dev) | |||
489 | ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data); | 493 | ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data); |
490 | if (ret < 0) | 494 | if (ret < 0) |
491 | return ret; | 495 | return ret; |
492 | chip->vbus_present = !!(FUSB_REG_STATUS0 & FUSB_REG_STATUS0_VBUSOK); | 496 | chip->vbus_present = !!(data & FUSB_REG_STATUS0_VBUSOK); |
493 | ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data); | 497 | ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data); |
494 | if (ret < 0) | 498 | if (ret < 0) |
495 | return ret; | 499 | return ret; |
@@ -1025,7 +1029,7 @@ static int fusb302_pd_send_message(struct fusb302_chip *chip, | |||
1025 | buf[pos++] = FUSB302_TKN_SYNC1; | 1029 | buf[pos++] = FUSB302_TKN_SYNC1; |
1026 | buf[pos++] = FUSB302_TKN_SYNC2; | 1030 | buf[pos++] = FUSB302_TKN_SYNC2; |
1027 | 1031 | ||
1028 | len = pd_header_cnt(msg->header) * 4; | 1032 | len = pd_header_cnt_le(msg->header) * 4; |
1029 | /* plug 2 for header */ | 1033 | /* plug 2 for header */ |
1030 | len += 2; | 1034 | len += 2; |
1031 | if (len > 0x1F) { | 1035 | if (len > 0x1F) { |
@@ -1481,7 +1485,7 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip, | |||
1481 | (u8 *)&msg->header); | 1485 | (u8 *)&msg->header); |
1482 | if (ret < 0) | 1486 | if (ret < 0) |
1483 | return ret; | 1487 | return ret; |
1484 | len = pd_header_cnt(msg->header) * 4; | 1488 | len = pd_header_cnt_le(msg->header) * 4; |
1485 | /* add 4 to length to include the CRC */ | 1489 | /* add 4 to length to include the CRC */ |
1486 | if (len > PD_MAX_PAYLOAD * 4) { | 1490 | if (len > PD_MAX_PAYLOAD * 4) { |
1487 | fusb302_log(chip, "PD message too long %d", len); | 1491 | fusb302_log(chip, "PD message too long %d", len); |
@@ -1663,14 +1667,12 @@ static int init_gpio(struct fusb302_chip *chip) | |||
1663 | if (ret < 0) { | 1667 | if (ret < 0) { |
1664 | fusb302_log(chip, | 1668 | fusb302_log(chip, |
1665 | "cannot set GPIO Int_N to input, ret=%d", ret); | 1669 | "cannot set GPIO Int_N to input, ret=%d", ret); |
1666 | gpio_free(chip->gpio_int_n); | ||
1667 | return ret; | 1670 | return ret; |
1668 | } | 1671 | } |
1669 | ret = gpio_to_irq(chip->gpio_int_n); | 1672 | ret = gpio_to_irq(chip->gpio_int_n); |
1670 | if (ret < 0) { | 1673 | if (ret < 0) { |
1671 | fusb302_log(chip, | 1674 | fusb302_log(chip, |
1672 | "cannot request IRQ for GPIO Int_N, ret=%d", ret); | 1675 | "cannot request IRQ for GPIO Int_N, ret=%d", ret); |
1673 | gpio_free(chip->gpio_int_n); | ||
1674 | return ret; | 1676 | return ret; |
1675 | } | 1677 | } |
1676 | chip->gpio_int_n_irq = ret; | 1678 | chip->gpio_int_n_irq = ret; |
@@ -1787,11 +1789,13 @@ static const struct of_device_id fusb302_dt_match[] = { | |||
1787 | {.compatible = "fcs,fusb302"}, | 1789 | {.compatible = "fcs,fusb302"}, |
1788 | {}, | 1790 | {}, |
1789 | }; | 1791 | }; |
1792 | MODULE_DEVICE_TABLE(of, fusb302_dt_match); | ||
1790 | 1793 | ||
1791 | static const struct i2c_device_id fusb302_i2c_device_id[] = { | 1794 | static const struct i2c_device_id fusb302_i2c_device_id[] = { |
1792 | {"typec_fusb302", 0}, | 1795 | {"typec_fusb302", 0}, |
1793 | {}, | 1796 | {}, |
1794 | }; | 1797 | }; |
1798 | MODULE_DEVICE_TABLE(i2c, fusb302_i2c_device_id); | ||
1795 | 1799 | ||
1796 | static const struct dev_pm_ops fusb302_pm_ops = { | 1800 | static const struct dev_pm_ops fusb302_pm_ops = { |
1797 | .suspend = fusb302_pm_suspend, | 1801 | .suspend = fusb302_pm_suspend, |
diff --git a/drivers/staging/typec/pd.h b/drivers/staging/typec/pd.h index 8d97bdb95f23..510ef7279900 100644 --- a/drivers/staging/typec/pd.h +++ b/drivers/staging/typec/pd.h | |||
@@ -92,6 +92,16 @@ static inline unsigned int pd_header_type_le(__le16 header) | |||
92 | return pd_header_type(le16_to_cpu(header)); | 92 | return pd_header_type(le16_to_cpu(header)); |
93 | } | 93 | } |
94 | 94 | ||
95 | static inline unsigned int pd_header_msgid(u16 header) | ||
96 | { | ||
97 | return (header >> PD_HEADER_ID_SHIFT) & PD_HEADER_ID_MASK; | ||
98 | } | ||
99 | |||
100 | static inline unsigned int pd_header_msgid_le(__le16 header) | ||
101 | { | ||
102 | return pd_header_msgid(le16_to_cpu(header)); | ||
103 | } | ||
104 | |||
95 | #define PD_MAX_PAYLOAD 7 | 105 | #define PD_MAX_PAYLOAD 7 |
96 | 106 | ||
97 | struct pd_message { | 107 | struct pd_message { |
diff --git a/drivers/staging/typec/pd_vdo.h b/drivers/staging/typec/pd_vdo.h index dba172e0e0d1..d92259f8de0a 100644 --- a/drivers/staging/typec/pd_vdo.h +++ b/drivers/staging/typec/pd_vdo.h | |||
@@ -22,6 +22,9 @@ | |||
22 | * VDM object is minimum of VDM header + 6 additional data objects. | 22 | * VDM object is minimum of VDM header + 6 additional data objects. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define VDO_MAX_OBJECTS 6 | ||
26 | #define VDO_MAX_SIZE (VDO_MAX_OBJECTS + 1) | ||
27 | |||
25 | /* | 28 | /* |
26 | * VDM header | 29 | * VDM header |
27 | * ---------- | 30 | * ---------- |
@@ -34,7 +37,6 @@ | |||
34 | * <5> :: reserved (SVDM), command type (UVDM) | 37 | * <5> :: reserved (SVDM), command type (UVDM) |
35 | * <4:0> :: command | 38 | * <4:0> :: command |
36 | */ | 39 | */ |
37 | #define VDO_MAX_SIZE 7 | ||
38 | #define VDO(vid, type, custom) \ | 40 | #define VDO(vid, type, custom) \ |
39 | (((vid) << 16) | \ | 41 | (((vid) << 16) | \ |
40 | ((type) << 15) | \ | 42 | ((type) << 15) | \ |
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c index 5e5be74c7850..df72d8b01e73 100644 --- a/drivers/staging/typec/tcpci.c +++ b/drivers/staging/typec/tcpci.c | |||
@@ -425,7 +425,7 @@ static const struct regmap_config tcpci_regmap_config = { | |||
425 | .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */ | 425 | .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */ |
426 | }; | 426 | }; |
427 | 427 | ||
428 | const struct tcpc_config tcpci_tcpc_config = { | 428 | static const struct tcpc_config tcpci_tcpc_config = { |
429 | .type = TYPEC_PORT_DFP, | 429 | .type = TYPEC_PORT_DFP, |
430 | .default_role = TYPEC_SINK, | 430 | .default_role = TYPEC_SINK, |
431 | }; | 431 | }; |
diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c index abba655ba00a..20eb4ebcf8c3 100644 --- a/drivers/staging/typec/tcpm.c +++ b/drivers/staging/typec/tcpm.c | |||
@@ -238,6 +238,7 @@ struct tcpm_port { | |||
238 | unsigned int hard_reset_count; | 238 | unsigned int hard_reset_count; |
239 | bool pd_capable; | 239 | bool pd_capable; |
240 | bool explicit_contract; | 240 | bool explicit_contract; |
241 | unsigned int rx_msgid; | ||
241 | 242 | ||
242 | /* Partner capabilities/requests */ | 243 | /* Partner capabilities/requests */ |
243 | u32 sink_request; | 244 | u32 sink_request; |
@@ -251,6 +252,8 @@ struct tcpm_port { | |||
251 | unsigned int nr_src_pdo; | 252 | unsigned int nr_src_pdo; |
252 | u32 snk_pdo[PDO_MAX_OBJECTS]; | 253 | u32 snk_pdo[PDO_MAX_OBJECTS]; |
253 | unsigned int nr_snk_pdo; | 254 | unsigned int nr_snk_pdo; |
255 | u32 snk_vdo[VDO_MAX_OBJECTS]; | ||
256 | unsigned int nr_snk_vdo; | ||
254 | 257 | ||
255 | unsigned int max_snk_mv; | 258 | unsigned int max_snk_mv; |
256 | unsigned int max_snk_ma; | 259 | unsigned int max_snk_ma; |
@@ -997,6 +1000,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, | |||
997 | struct pd_mode_data *modep; | 1000 | struct pd_mode_data *modep; |
998 | int rlen = 0; | 1001 | int rlen = 0; |
999 | u16 svid; | 1002 | u16 svid; |
1003 | int i; | ||
1000 | 1004 | ||
1001 | tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", | 1005 | tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", |
1002 | p0, cmd_type, cmd, cnt); | 1006 | p0, cmd_type, cmd, cnt); |
@@ -1007,6 +1011,14 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, | |||
1007 | case CMDT_INIT: | 1011 | case CMDT_INIT: |
1008 | switch (cmd) { | 1012 | switch (cmd) { |
1009 | case CMD_DISCOVER_IDENT: | 1013 | case CMD_DISCOVER_IDENT: |
1014 | /* 6.4.4.3.1: Only respond as UFP (device) */ | ||
1015 | if (port->data_role == TYPEC_DEVICE && | ||
1016 | port->nr_snk_vdo) { | ||
1017 | for (i = 0; i < port->nr_snk_vdo; i++) | ||
1018 | response[i + 1] | ||
1019 | = cpu_to_le32(port->snk_vdo[i]); | ||
1020 | rlen = port->nr_snk_vdo + 1; | ||
1021 | } | ||
1010 | break; | 1022 | break; |
1011 | case CMD_DISCOVER_SVID: | 1023 | case CMD_DISCOVER_SVID: |
1012 | break; | 1024 | break; |
@@ -1415,6 +1427,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, | |||
1415 | break; | 1427 | break; |
1416 | case SOFT_RESET_SEND: | 1428 | case SOFT_RESET_SEND: |
1417 | port->message_id = 0; | 1429 | port->message_id = 0; |
1430 | port->rx_msgid = -1; | ||
1418 | if (port->pwr_role == TYPEC_SOURCE) | 1431 | if (port->pwr_role == TYPEC_SOURCE) |
1419 | next_state = SRC_SEND_CAPABILITIES; | 1432 | next_state = SRC_SEND_CAPABILITIES; |
1420 | else | 1433 | else |
@@ -1503,6 +1516,22 @@ static void tcpm_pd_rx_handler(struct work_struct *work) | |||
1503 | port->attached); | 1516 | port->attached); |
1504 | 1517 | ||
1505 | if (port->attached) { | 1518 | if (port->attached) { |
1519 | enum pd_ctrl_msg_type type = pd_header_type_le(msg->header); | ||
1520 | unsigned int msgid = pd_header_msgid_le(msg->header); | ||
1521 | |||
1522 | /* | ||
1523 | * USB PD standard, 6.6.1.2: | ||
1524 | * "... if MessageID value in a received Message is the | ||
1525 | * same as the stored value, the receiver shall return a | ||
1526 | * GoodCRC Message with that MessageID value and drop | ||
1527 | * the Message (this is a retry of an already received | ||
1528 | * Message). Note: this shall not apply to the Soft_Reset | ||
1529 | * Message which always has a MessageID value of zero." | ||
1530 | */ | ||
1531 | if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET) | ||
1532 | goto done; | ||
1533 | port->rx_msgid = msgid; | ||
1534 | |||
1506 | /* | 1535 | /* |
1507 | * If both ends believe to be DFP/host, we have a data role | 1536 | * If both ends believe to be DFP/host, we have a data role |
1508 | * mismatch. | 1537 | * mismatch. |
@@ -1520,6 +1549,7 @@ static void tcpm_pd_rx_handler(struct work_struct *work) | |||
1520 | } | 1549 | } |
1521 | } | 1550 | } |
1522 | 1551 | ||
1552 | done: | ||
1523 | mutex_unlock(&port->lock); | 1553 | mutex_unlock(&port->lock); |
1524 | kfree(event); | 1554 | kfree(event); |
1525 | } | 1555 | } |
@@ -1719,8 +1749,7 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1719 | } | 1749 | } |
1720 | ma = min(ma, port->max_snk_ma); | 1750 | ma = min(ma, port->max_snk_ma); |
1721 | 1751 | ||
1722 | /* XXX: Any other flags need to be set? */ | 1752 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; |
1723 | flags = 0; | ||
1724 | 1753 | ||
1725 | /* Set mismatch bit if offered power is less than operating power */ | 1754 | /* Set mismatch bit if offered power is less than operating power */ |
1726 | mw = ma * mv / 1000; | 1755 | mw = ma * mv / 1000; |
@@ -1957,6 +1986,12 @@ static void tcpm_reset_port(struct tcpm_port *port) | |||
1957 | port->attached = false; | 1986 | port->attached = false; |
1958 | port->pd_capable = false; | 1987 | port->pd_capable = false; |
1959 | 1988 | ||
1989 | /* | ||
1990 | * First Rx ID should be 0; set this to a sentinel of -1 so that | ||
1991 | * we can check tcpm_pd_rx_handler() if we had seen it before. | ||
1992 | */ | ||
1993 | port->rx_msgid = -1; | ||
1994 | |||
1960 | port->tcpc->set_pd_rx(port->tcpc, false); | 1995 | port->tcpc->set_pd_rx(port->tcpc, false); |
1961 | tcpm_init_vbus(port); /* also disables charging */ | 1996 | tcpm_init_vbus(port); /* also disables charging */ |
1962 | tcpm_init_vconn(port); | 1997 | tcpm_init_vconn(port); |
@@ -2170,6 +2205,7 @@ static void run_state_machine(struct tcpm_port *port) | |||
2170 | port->pwr_opmode = TYPEC_PWR_MODE_USB; | 2205 | port->pwr_opmode = TYPEC_PWR_MODE_USB; |
2171 | port->caps_count = 0; | 2206 | port->caps_count = 0; |
2172 | port->message_id = 0; | 2207 | port->message_id = 0; |
2208 | port->rx_msgid = -1; | ||
2173 | port->explicit_contract = false; | 2209 | port->explicit_contract = false; |
2174 | tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); | 2210 | tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); |
2175 | break; | 2211 | break; |
@@ -2329,6 +2365,7 @@ static void run_state_machine(struct tcpm_port *port) | |||
2329 | typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB); | 2365 | typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB); |
2330 | port->pwr_opmode = TYPEC_PWR_MODE_USB; | 2366 | port->pwr_opmode = TYPEC_PWR_MODE_USB; |
2331 | port->message_id = 0; | 2367 | port->message_id = 0; |
2368 | port->rx_msgid = -1; | ||
2332 | port->explicit_contract = false; | 2369 | port->explicit_contract = false; |
2333 | tcpm_set_state(port, SNK_DISCOVERY, 0); | 2370 | tcpm_set_state(port, SNK_DISCOVERY, 0); |
2334 | break; | 2371 | break; |
@@ -2496,6 +2533,7 @@ static void run_state_machine(struct tcpm_port *port) | |||
2496 | /* Soft_Reset states */ | 2533 | /* Soft_Reset states */ |
2497 | case SOFT_RESET: | 2534 | case SOFT_RESET: |
2498 | port->message_id = 0; | 2535 | port->message_id = 0; |
2536 | port->rx_msgid = -1; | ||
2499 | tcpm_pd_send_control(port, PD_CTRL_ACCEPT); | 2537 | tcpm_pd_send_control(port, PD_CTRL_ACCEPT); |
2500 | if (port->pwr_role == TYPEC_SOURCE) | 2538 | if (port->pwr_role == TYPEC_SOURCE) |
2501 | tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); | 2539 | tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); |
@@ -2504,6 +2542,7 @@ static void run_state_machine(struct tcpm_port *port) | |||
2504 | break; | 2542 | break; |
2505 | case SOFT_RESET_SEND: | 2543 | case SOFT_RESET_SEND: |
2506 | port->message_id = 0; | 2544 | port->message_id = 0; |
2545 | port->rx_msgid = -1; | ||
2507 | if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) | 2546 | if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) |
2508 | tcpm_set_state_cond(port, hard_reset_state(port), 0); | 2547 | tcpm_set_state_cond(port, hard_reset_state(port), 0); |
2509 | else | 2548 | else |
@@ -2568,6 +2607,14 @@ static void run_state_machine(struct tcpm_port *port) | |||
2568 | break; | 2607 | break; |
2569 | case PR_SWAP_SRC_SNK_SOURCE_OFF: | 2608 | case PR_SWAP_SRC_SNK_SOURCE_OFF: |
2570 | tcpm_set_cc(port, TYPEC_CC_RD); | 2609 | tcpm_set_cc(port, TYPEC_CC_RD); |
2610 | /* | ||
2611 | * USB-PD standard, 6.2.1.4, Port Power Role: | ||
2612 | * "During the Power Role Swap Sequence, for the initial Source | ||
2613 | * Port, the Port Power Role field shall be set to Sink in the | ||
2614 | * PS_RDY Message indicating that the initial Source’s power | ||
2615 | * supply is turned off" | ||
2616 | */ | ||
2617 | tcpm_set_pwr_role(port, TYPEC_SINK); | ||
2571 | if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { | 2618 | if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { |
2572 | tcpm_set_state(port, ERROR_RECOVERY, 0); | 2619 | tcpm_set_state(port, ERROR_RECOVERY, 0); |
2573 | break; | 2620 | break; |
@@ -2575,7 +2622,6 @@ static void run_state_machine(struct tcpm_port *port) | |||
2575 | tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON); | 2622 | tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON); |
2576 | break; | 2623 | break; |
2577 | case PR_SWAP_SRC_SNK_SINK_ON: | 2624 | case PR_SWAP_SRC_SNK_SINK_ON: |
2578 | tcpm_set_pwr_role(port, TYPEC_SINK); | ||
2579 | tcpm_swap_complete(port, 0); | 2625 | tcpm_swap_complete(port, 0); |
2580 | tcpm_set_state(port, SNK_STARTUP, 0); | 2626 | tcpm_set_state(port, SNK_STARTUP, 0); |
2581 | break; | 2627 | break; |
@@ -2587,8 +2633,15 @@ static void run_state_machine(struct tcpm_port *port) | |||
2587 | case PR_SWAP_SNK_SRC_SOURCE_ON: | 2633 | case PR_SWAP_SNK_SRC_SOURCE_ON: |
2588 | tcpm_set_cc(port, tcpm_rp_cc(port)); | 2634 | tcpm_set_cc(port, tcpm_rp_cc(port)); |
2589 | tcpm_set_vbus(port, true); | 2635 | tcpm_set_vbus(port, true); |
2590 | tcpm_pd_send_control(port, PD_CTRL_PS_RDY); | 2636 | /* |
2637 | * USB PD standard, 6.2.1.4: | ||
2638 | * "Subsequent Messages initiated by the Policy Engine, | ||
2639 | * such as the PS_RDY Message sent to indicate that Vbus | ||
2640 | * is ready, will have the Port Power Role field set to | ||
2641 | * Source." | ||
2642 | */ | ||
2591 | tcpm_set_pwr_role(port, TYPEC_SOURCE); | 2643 | tcpm_set_pwr_role(port, TYPEC_SOURCE); |
2644 | tcpm_pd_send_control(port, PD_CTRL_PS_RDY); | ||
2592 | tcpm_swap_complete(port, 0); | 2645 | tcpm_swap_complete(port, 0); |
2593 | tcpm_set_state(port, SRC_STARTUP, 0); | 2646 | tcpm_set_state(port, SRC_STARTUP, 0); |
2594 | break; | 2647 | break; |
@@ -3292,6 +3345,20 @@ static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo, | |||
3292 | return nr_pdo; | 3345 | return nr_pdo; |
3293 | } | 3346 | } |
3294 | 3347 | ||
3348 | static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo, | ||
3349 | unsigned int nr_vdo) | ||
3350 | { | ||
3351 | unsigned int i; | ||
3352 | |||
3353 | if (nr_vdo > VDO_MAX_OBJECTS) | ||
3354 | nr_vdo = VDO_MAX_OBJECTS; | ||
3355 | |||
3356 | for (i = 0; i < nr_vdo; i++) | ||
3357 | dest_vdo[i] = src_vdo[i]; | ||
3358 | |||
3359 | return nr_vdo; | ||
3360 | } | ||
3361 | |||
3295 | void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, | 3362 | void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, |
3296 | unsigned int nr_pdo) | 3363 | unsigned int nr_pdo) |
3297 | { | 3364 | { |
@@ -3382,6 +3449,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) | |||
3382 | tcpc->config->nr_src_pdo); | 3449 | tcpc->config->nr_src_pdo); |
3383 | port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, | 3450 | port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, |
3384 | tcpc->config->nr_snk_pdo); | 3451 | tcpc->config->nr_snk_pdo); |
3452 | port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, | ||
3453 | tcpc->config->nr_snk_vdo); | ||
3385 | 3454 | ||
3386 | port->max_snk_mv = tcpc->config->max_snk_mv; | 3455 | port->max_snk_mv = tcpc->config->max_snk_mv; |
3387 | port->max_snk_ma = tcpc->config->max_snk_ma; | 3456 | port->max_snk_ma = tcpc->config->max_snk_ma; |
diff --git a/drivers/staging/typec/tcpm.h b/drivers/staging/typec/tcpm.h index 969b365e6549..19c307d31a5a 100644 --- a/drivers/staging/typec/tcpm.h +++ b/drivers/staging/typec/tcpm.h | |||
@@ -60,6 +60,9 @@ struct tcpc_config { | |||
60 | const u32 *snk_pdo; | 60 | const u32 *snk_pdo; |
61 | unsigned int nr_snk_pdo; | 61 | unsigned int nr_snk_pdo; |
62 | 62 | ||
63 | const u32 *snk_vdo; | ||
64 | unsigned int nr_snk_vdo; | ||
65 | |||
63 | unsigned int max_snk_mv; | 66 | unsigned int max_snk_mv; |
64 | unsigned int max_snk_ma; | 67 | unsigned int max_snk_ma; |
65 | unsigned int max_snk_mw; | 68 | unsigned int max_snk_mw; |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index 988ee61fb4a7..d04db3f55519 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c | |||
@@ -502,8 +502,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, | |||
502 | */ | 502 | */ |
503 | sg_init_table(scatterlist, num_pages); | 503 | sg_init_table(scatterlist, num_pages); |
504 | /* Now set the pages for each scatterlist */ | 504 | /* Now set the pages for each scatterlist */ |
505 | for (i = 0; i < num_pages; i++) | 505 | for (i = 0; i < num_pages; i++) { |
506 | sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); | 506 | unsigned int len = PAGE_SIZE - offset; |
507 | |||
508 | if (len > count) | ||
509 | len = count; | ||
510 | sg_set_page(scatterlist + i, pages[i], len, offset); | ||
511 | offset = 0; | ||
512 | count -= len; | ||
513 | } | ||
507 | 514 | ||
508 | dma_buffers = dma_map_sg(g_dev, | 515 | dma_buffers = dma_map_sg(g_dev, |
509 | scatterlist, | 516 | scatterlist, |
@@ -524,20 +531,20 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, | |||
524 | u32 addr = sg_dma_address(sg); | 531 | u32 addr = sg_dma_address(sg); |
525 | 532 | ||
526 | /* Note: addrs is the address + page_count - 1 | 533 | /* Note: addrs is the address + page_count - 1 |
527 | * The firmware expects the block to be page | 534 | * The firmware expects blocks after the first to be page- |
528 | * aligned and a multiple of the page size | 535 | * aligned and a multiple of the page size |
529 | */ | 536 | */ |
530 | WARN_ON(len == 0); | 537 | WARN_ON(len == 0); |
531 | WARN_ON(len & ~PAGE_MASK); | 538 | WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); |
532 | WARN_ON(addr & ~PAGE_MASK); | 539 | WARN_ON(i && (addr & ~PAGE_MASK)); |
533 | if (k > 0 && | 540 | if (k > 0 && |
534 | ((addrs[k - 1] & PAGE_MASK) | | 541 | ((addrs[k - 1] & PAGE_MASK) + |
535 | ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) | 542 | (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)) |
536 | == addr) { | 543 | == (addr & PAGE_MASK)) |
537 | addrs[k - 1] += (len >> PAGE_SHIFT); | 544 | addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); |
538 | } else { | 545 | else |
539 | addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); | 546 | addrs[k++] = (addr & PAGE_MASK) | |
540 | } | 547 | (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); |
541 | } | 548 | } |
542 | 549 | ||
543 | /* Partial cache lines (fragments) require special measures */ | 550 | /* Partial cache lines (fragments) require special measures */ |
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig index 2330a4eb4e8b..a6df12d88f90 100644 --- a/drivers/tee/Kconfig +++ b/drivers/tee/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | # Generic Trusted Execution Environment Configuration | 1 | # Generic Trusted Execution Environment Configuration |
2 | config TEE | 2 | config TEE |
3 | tristate "Trusted Execution Environment support" | 3 | tristate "Trusted Execution Environment support" |
4 | depends on HAVE_ARM_SMCCC || COMPILE_TEST | ||
4 | select DMA_SHARED_BUFFER | 5 | select DMA_SHARED_BUFFER |
5 | select GENERIC_ALLOCATOR | 6 | select GENERIC_ALLOCATOR |
6 | help | 7 | help |
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig index ab08af4654ef..42c098e86f84 100644 --- a/drivers/thermal/broadcom/Kconfig +++ b/drivers/thermal/broadcom/Kconfig | |||
@@ -9,8 +9,9 @@ config BCM2835_THERMAL | |||
9 | config BCM_NS_THERMAL | 9 | config BCM_NS_THERMAL |
10 | tristate "Northstar thermal driver" | 10 | tristate "Northstar thermal driver" |
11 | depends on ARCH_BCM_IPROC || COMPILE_TEST | 11 | depends on ARCH_BCM_IPROC || COMPILE_TEST |
12 | default y if ARCH_BCM_IPROC | ||
12 | help | 13 | help |
13 | Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081, | 14 | Support for the Northstar and Northstar Plus family of SoCs (e.g. |
14 | BCM4709 and BCM47094. It contains DMU (Device Management Unit) block | 15 | BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device |
15 | with a thermal sensor that allows checking CPU temperature. This | 16 | Management Unit) block with a thermal sensor that allows checking CPU |
16 | driver provides support for it. | 17 | temperature. |
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 644ba526d9ea..4362a69ac88d 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c | |||
@@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = { | |||
195 | static int qoriq_tmu_probe(struct platform_device *pdev) | 195 | static int qoriq_tmu_probe(struct platform_device *pdev) |
196 | { | 196 | { |
197 | int ret; | 197 | int ret; |
198 | const struct thermal_trip *trip; | ||
199 | struct qoriq_tmu_data *data; | 198 | struct qoriq_tmu_data *data; |
200 | struct device_node *np = pdev->dev.of_node; | 199 | struct device_node *np = pdev->dev.of_node; |
201 | u32 site = 0; | 200 | u32 site = 0; |
@@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
243 | goto err_tmu; | 242 | goto err_tmu; |
244 | } | 243 | } |
245 | 244 | ||
246 | trip = of_thermal_get_trip_points(data->tz); | ||
247 | |||
248 | /* Enable monitoring */ | 245 | /* Enable monitoring */ |
249 | site |= 0x1 << (15 - data->sensor_id); | 246 | site |= 0x1 << (15 - data->sensor_id); |
250 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); | 247 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index b21b9cc2c8d6..5a51c740e372 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
@@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work, | |||
359 | * This may be called from any critical situation to trigger a system shutdown | 359 | * This may be called from any critical situation to trigger a system shutdown |
360 | * after a known period of time. By default this is not scheduled. | 360 | * after a known period of time. By default this is not scheduled. |
361 | */ | 361 | */ |
362 | void thermal_emergency_poweroff(void) | 362 | static void thermal_emergency_poweroff(void) |
363 | { | 363 | { |
364 | int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; | 364 | int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; |
365 | /* | 365 | /* |
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index ba9c302454fb..696ab3046b87 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c | |||
@@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) | |||
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | /** | 1012 | /** |
1013 | * ti_bandgap_set_continous_mode() - One time enabling of continuous mode | 1013 | * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode |
1014 | * @bgp: pointer to struct ti_bandgap | 1014 | * @bgp: pointer to struct ti_bandgap |
1015 | * | 1015 | * |
1016 | * Call this function only if HAS(MODE_CONFIG) is set. As this driver may | 1016 | * Call this function only if HAS(MODE_CONFIG) is set. As this driver may |
@@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) | |||
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); | 1216 | bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); |
1217 | if (!bgp) { | 1217 | if (!bgp) |
1218 | dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); | ||
1219 | return ERR_PTR(-ENOMEM); | 1218 | return ERR_PTR(-ENOMEM); |
1220 | } | ||
1221 | 1219 | ||
1222 | of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); | 1220 | of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); |
1223 | if (of_id) | 1221 | if (of_id) |
1224 | bgp->conf = of_id->data; | 1222 | bgp->conf = of_id->data; |
1225 | 1223 | ||
1226 | /* register shadow for context save and restore */ | 1224 | /* register shadow for context save and restore */ |
1227 | bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * | 1225 | bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count, |
1228 | bgp->conf->sensor_count, GFP_KERNEL); | 1226 | sizeof(*bgp->regval), GFP_KERNEL); |
1229 | if (!bgp->regval) { | 1227 | if (!bgp->regval) |
1230 | dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); | ||
1231 | return ERR_PTR(-ENOMEM); | 1228 | return ERR_PTR(-ENOMEM); |
1232 | } | ||
1233 | 1229 | ||
1234 | i = 0; | 1230 | i = 0; |
1235 | do { | 1231 | do { |
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 7ac9bcdf1e61..61fe8d6fd24e 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c | |||
@@ -764,7 +764,7 @@ static int __init ehv_bc_init(void) | |||
764 | ehv_bc_driver = alloc_tty_driver(count); | 764 | ehv_bc_driver = alloc_tty_driver(count); |
765 | if (!ehv_bc_driver) { | 765 | if (!ehv_bc_driver) { |
766 | ret = -ENOMEM; | 766 | ret = -ENOMEM; |
767 | goto error; | 767 | goto err_free_bcs; |
768 | } | 768 | } |
769 | 769 | ||
770 | ehv_bc_driver->driver_name = "ehv-bc"; | 770 | ehv_bc_driver->driver_name = "ehv-bc"; |
@@ -778,24 +778,23 @@ static int __init ehv_bc_init(void) | |||
778 | ret = tty_register_driver(ehv_bc_driver); | 778 | ret = tty_register_driver(ehv_bc_driver); |
779 | if (ret) { | 779 | if (ret) { |
780 | pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); | 780 | pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); |
781 | goto error; | 781 | goto err_put_tty_driver; |
782 | } | 782 | } |
783 | 783 | ||
784 | ret = platform_driver_register(&ehv_bc_tty_driver); | 784 | ret = platform_driver_register(&ehv_bc_tty_driver); |
785 | if (ret) { | 785 | if (ret) { |
786 | pr_err("ehv-bc: could not register platform driver (ret=%i)\n", | 786 | pr_err("ehv-bc: could not register platform driver (ret=%i)\n", |
787 | ret); | 787 | ret); |
788 | goto error; | 788 | goto err_deregister_tty_driver; |
789 | } | 789 | } |
790 | 790 | ||
791 | return 0; | 791 | return 0; |
792 | 792 | ||
793 | error: | 793 | err_deregister_tty_driver: |
794 | if (ehv_bc_driver) { | 794 | tty_unregister_driver(ehv_bc_driver); |
795 | tty_unregister_driver(ehv_bc_driver); | 795 | err_put_tty_driver: |
796 | put_tty_driver(ehv_bc_driver); | 796 | put_tty_driver(ehv_bc_driver); |
797 | } | 797 | err_free_bcs: |
798 | |||
799 | kfree(bcs); | 798 | kfree(bcs); |
800 | 799 | ||
801 | return ret; | 800 | return ret; |
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 433de5ea9b02..f71b47334149 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c | |||
@@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev) | |||
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); | 123 | EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); |
124 | 124 | ||
125 | int serdev_device_write_buf(struct serdev_device *serdev, | ||
126 | const unsigned char *buf, size_t count) | ||
127 | { | ||
128 | struct serdev_controller *ctrl = serdev->ctrl; | ||
129 | |||
130 | if (!ctrl || !ctrl->ops->write_buf) | ||
131 | return -EINVAL; | ||
132 | |||
133 | return ctrl->ops->write_buf(ctrl, buf, count); | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(serdev_device_write_buf); | ||
136 | |||
125 | int serdev_device_write(struct serdev_device *serdev, | 137 | int serdev_device_write(struct serdev_device *serdev, |
126 | const unsigned char *buf, size_t count, | 138 | const unsigned char *buf, size_t count, |
127 | unsigned long timeout) | 139 | unsigned long timeout) |
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 487c88f6aa0e..d0a021c93986 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c | |||
@@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl) | |||
102 | return PTR_ERR(tty); | 102 | return PTR_ERR(tty); |
103 | serport->tty = tty; | 103 | serport->tty = tty; |
104 | 104 | ||
105 | serport->port->client_ops = &client_ops; | ||
106 | serport->port->client_data = ctrl; | ||
107 | |||
108 | if (tty->ops->open) | 105 | if (tty->ops->open) |
109 | tty->ops->open(serport->tty, NULL); | 106 | tty->ops->open(serport->tty, NULL); |
110 | else | 107 | else |
@@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port, | |||
215 | struct device *parent, | 212 | struct device *parent, |
216 | struct tty_driver *drv, int idx) | 213 | struct tty_driver *drv, int idx) |
217 | { | 214 | { |
215 | const struct tty_port_client_operations *old_ops; | ||
218 | struct serdev_controller *ctrl; | 216 | struct serdev_controller *ctrl; |
219 | struct serport *serport; | 217 | struct serport *serport; |
220 | int ret; | 218 | int ret; |
@@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port, | |||
233 | 231 | ||
234 | ctrl->ops = &ctrl_ops; | 232 | ctrl->ops = &ctrl_ops; |
235 | 233 | ||
234 | old_ops = port->client_ops; | ||
235 | port->client_ops = &client_ops; | ||
236 | port->client_data = ctrl; | ||
237 | |||
236 | ret = serdev_controller_add(ctrl); | 238 | ret = serdev_controller_add(ctrl); |
237 | if (ret) | 239 | if (ret) |
238 | goto err_controller_put; | 240 | goto err_reset_data; |
239 | 241 | ||
240 | dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); | 242 | dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); |
241 | return &ctrl->dev; | 243 | return &ctrl->dev; |
242 | 244 | ||
243 | err_controller_put: | 245 | err_reset_data: |
246 | port->client_data = NULL; | ||
247 | port->client_ops = old_ops; | ||
244 | serdev_controller_put(ctrl); | 248 | serdev_controller_put(ctrl); |
249 | |||
245 | return ERR_PTR(ret); | 250 | return ERR_PTR(ret); |
246 | } | 251 | } |
247 | 252 | ||
248 | void serdev_tty_port_unregister(struct tty_port *port) | 253 | int serdev_tty_port_unregister(struct tty_port *port) |
249 | { | 254 | { |
250 | struct serdev_controller *ctrl = port->client_data; | 255 | struct serdev_controller *ctrl = port->client_data; |
251 | struct serport *serport = serdev_controller_get_drvdata(ctrl); | 256 | struct serport *serport = serdev_controller_get_drvdata(ctrl); |
252 | 257 | ||
253 | if (!serport) | 258 | if (!serport) |
254 | return; | 259 | return -ENODEV; |
255 | 260 | ||
256 | serdev_controller_remove(ctrl); | 261 | serdev_controller_remove(ctrl); |
257 | port->client_ops = NULL; | 262 | port->client_ops = NULL; |
258 | port->client_data = NULL; | 263 | port->client_data = NULL; |
259 | serdev_controller_put(ctrl); | 264 | serdev_controller_put(ctrl); |
265 | |||
266 | return 0; | ||
260 | } | 267 | } |
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 09a65a3ec7f7..68fd045a7025 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c | |||
@@ -47,6 +47,7 @@ | |||
47 | /* | 47 | /* |
48 | * These are definitions for the Exar XR17V35X and XR17(C|D)15X | 48 | * These are definitions for the Exar XR17V35X and XR17(C|D)15X |
49 | */ | 49 | */ |
50 | #define UART_EXAR_INT0 0x80 | ||
50 | #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ | 51 | #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ |
51 | #define UART_EXAR_DVID 0x8d /* Device identification */ | 52 | #define UART_EXAR_DVID 0x8d /* Device identification */ |
52 | 53 | ||
@@ -1337,7 +1338,7 @@ out_lock: | |||
1337 | /* | 1338 | /* |
1338 | * Check if the device is a Fintek F81216A | 1339 | * Check if the device is a Fintek F81216A |
1339 | */ | 1340 | */ |
1340 | if (port->type == PORT_16550A) | 1341 | if (port->type == PORT_16550A && port->iotype == UPIO_PORT) |
1341 | fintek_8250_probe(up); | 1342 | fintek_8250_probe(up); |
1342 | 1343 | ||
1343 | if (up->capabilities != old_capabilities) { | 1344 | if (up->capabilities != old_capabilities) { |
@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port) | |||
1869 | static int exar_handle_irq(struct uart_port *port) | 1870 | static int exar_handle_irq(struct uart_port *port) |
1870 | { | 1871 | { |
1871 | unsigned int iir = serial_port_in(port, UART_IIR); | 1872 | unsigned int iir = serial_port_in(port, UART_IIR); |
1872 | int ret; | 1873 | int ret = 0; |
1873 | 1874 | ||
1874 | ret = serial8250_handle_irq(port, iir); | 1875 | if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) && |
1876 | serial_port_in(port, UART_EXAR_INT0) != 0) | ||
1877 | ret = 1; | ||
1875 | 1878 | ||
1876 | if ((port->type == PORT_XR17V35X) || | 1879 | ret |= serial8250_handle_irq(port, iir); |
1877 | (port->type == PORT_XR17D15X)) { | ||
1878 | serial_port_in(port, 0x80); | ||
1879 | serial_port_in(port, 0x81); | ||
1880 | serial_port_in(port, 0x82); | ||
1881 | serial_port_in(port, 0x83); | ||
1882 | } | ||
1883 | 1880 | ||
1884 | return ret; | 1881 | return ret; |
1885 | } | 1882 | } |
@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port) | |||
2177 | serial_port_in(port, UART_RX); | 2174 | serial_port_in(port, UART_RX); |
2178 | serial_port_in(port, UART_IIR); | 2175 | serial_port_in(port, UART_IIR); |
2179 | serial_port_in(port, UART_MSR); | 2176 | serial_port_in(port, UART_MSR); |
2177 | if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) | ||
2178 | serial_port_in(port, UART_EXAR_INT0); | ||
2180 | 2179 | ||
2181 | /* | 2180 | /* |
2182 | * At this point, there's no way the LSR could still be 0xff; | 2181 | * At this point, there's no way the LSR could still be 0xff; |
@@ -2335,6 +2334,8 @@ dont_test_tx_en: | |||
2335 | serial_port_in(port, UART_RX); | 2334 | serial_port_in(port, UART_RX); |
2336 | serial_port_in(port, UART_IIR); | 2335 | serial_port_in(port, UART_IIR); |
2337 | serial_port_in(port, UART_MSR); | 2336 | serial_port_in(port, UART_MSR); |
2337 | if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) | ||
2338 | serial_port_in(port, UART_EXAR_INT0); | ||
2338 | up->lsr_saved_flags = 0; | 2339 | up->lsr_saved_flags = 0; |
2339 | up->msr_saved_flags = 0; | 2340 | up->msr_saved_flags = 0; |
2340 | 2341 | ||
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index 18e3f8342b85..0475f5d261ce 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c | |||
@@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev) | |||
478 | 478 | ||
479 | port = &altera_jtaguart_ports[i].port; | 479 | port = &altera_jtaguart_ports[i].port; |
480 | uart_remove_one_port(&altera_jtaguart_driver, port); | 480 | uart_remove_one_port(&altera_jtaguart_driver, port); |
481 | iounmap(port->membase); | ||
481 | 482 | ||
482 | return 0; | 483 | return 0; |
483 | } | 484 | } |
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 46d3438a0d27..3e4b717670d7 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c | |||
@@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev) | |||
615 | if (port) { | 615 | if (port) { |
616 | uart_remove_one_port(&altera_uart_driver, port); | 616 | uart_remove_one_port(&altera_uart_driver, port); |
617 | port->mapbase = 0; | 617 | port->mapbase = 0; |
618 | iounmap(port->membase); | ||
618 | } | 619 | } |
619 | 620 | ||
620 | return 0; | 621 | return 0; |
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c index ebd8569f9ad5..9fff25be87f9 100644 --- a/drivers/tty/serial/efm32-uart.c +++ b/drivers/tty/serial/efm32-uart.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #define UARTn_FRAME 0x04 | 27 | #define UARTn_FRAME 0x04 |
28 | #define UARTn_FRAME_DATABITS__MASK 0x000f | 28 | #define UARTn_FRAME_DATABITS__MASK 0x000f |
29 | #define UARTn_FRAME_DATABITS(n) ((n) - 3) | 29 | #define UARTn_FRAME_DATABITS(n) ((n) - 3) |
30 | #define UARTn_FRAME_PARITY__MASK 0x0300 | ||
30 | #define UARTn_FRAME_PARITY_NONE 0x0000 | 31 | #define UARTn_FRAME_PARITY_NONE 0x0000 |
31 | #define UARTn_FRAME_PARITY_EVEN 0x0200 | 32 | #define UARTn_FRAME_PARITY_EVEN 0x0200 |
32 | #define UARTn_FRAME_PARITY_ODD 0x0300 | 33 | #define UARTn_FRAME_PARITY_ODD 0x0300 |
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port, | |||
572 | 16 * (4 + (clkdiv >> 6))); | 573 | 16 * (4 + (clkdiv >> 6))); |
573 | 574 | ||
574 | frame = efm32_uart_read32(efm_port, UARTn_FRAME); | 575 | frame = efm32_uart_read32(efm_port, UARTn_FRAME); |
575 | if (frame & UARTn_FRAME_PARITY_ODD) | 576 | switch (frame & UARTn_FRAME_PARITY__MASK) { |
577 | case UARTn_FRAME_PARITY_ODD: | ||
576 | *parity = 'o'; | 578 | *parity = 'o'; |
577 | else if (frame & UARTn_FRAME_PARITY_EVEN) | 579 | break; |
580 | case UARTn_FRAME_PARITY_EVEN: | ||
578 | *parity = 'e'; | 581 | *parity = 'e'; |
579 | else | 582 | break; |
583 | default: | ||
580 | *parity = 'n'; | 584 | *parity = 'n'; |
585 | } | ||
581 | 586 | ||
582 | *bits = (frame & UARTn_FRAME_DATABITS__MASK) - | 587 | *bits = (frame & UARTn_FRAME_DATABITS__MASK) - |
583 | UARTn_FRAME_DATABITS(4) + 4; | 588 | UARTn_FRAME_DATABITS(4) + 4; |
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 157883653256..f190a84a0246 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c | |||
@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = { | |||
1382 | static void __exit ifx_spi_exit(void) | 1382 | static void __exit ifx_spi_exit(void) |
1383 | { | 1383 | { |
1384 | /* unregister */ | 1384 | /* unregister */ |
1385 | spi_unregister_driver(&ifx_spi_driver); | ||
1385 | tty_unregister_driver(tty_drv); | 1386 | tty_unregister_driver(tty_drv); |
1386 | put_tty_driver(tty_drv); | 1387 | put_tty_driver(tty_drv); |
1387 | spi_unregister_driver(&ifx_spi_driver); | ||
1388 | unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); | 1388 | unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); |
1389 | } | 1389 | } |
1390 | 1390 | ||
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 33509b4beaec..bbefddd92bfe 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev) | |||
2184 | * and DCD (when they are outputs) or enables the respective | 2184 | * and DCD (when they are outputs) or enables the respective |
2185 | * irqs. So set this bit early, i.e. before requesting irqs. | 2185 | * irqs. So set this bit early, i.e. before requesting irqs. |
2186 | */ | 2186 | */ |
2187 | writel(UFCR_DCEDTE, sport->port.membase + UFCR); | 2187 | reg = readl(sport->port.membase + UFCR); |
2188 | if (!(reg & UFCR_DCEDTE)) | ||
2189 | writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR); | ||
2188 | 2190 | ||
2189 | /* | 2191 | /* |
2190 | * Disable UCR3_RI and UCR3_DCD irqs. They are also not | 2192 | * Disable UCR3_RI and UCR3_DCD irqs. They are also not |
@@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev) | |||
2195 | sport->port.membase + UCR3); | 2197 | sport->port.membase + UCR3); |
2196 | 2198 | ||
2197 | } else { | 2199 | } else { |
2198 | writel(0, sport->port.membase + UFCR); | 2200 | unsigned long ucr3 = UCR3_DSR; |
2201 | |||
2202 | reg = readl(sport->port.membase + UFCR); | ||
2203 | if (reg & UFCR_DCEDTE) | ||
2204 | writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR); | ||
2205 | |||
2206 | if (!is_imx1_uart(sport)) | ||
2207 | ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; | ||
2208 | writel(ucr3, sport->port.membase + UCR3); | ||
2199 | } | 2209 | } |
2200 | 2210 | ||
2201 | clk_disable_unprepare(sport->clk_ipg); | 2211 | clk_disable_unprepare(sport->clk_ipg); |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 0f45b7884a2c..13bfd5dcffce 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) | |||
2083 | mutex_lock(&port->mutex); | 2083 | mutex_lock(&port->mutex); |
2084 | 2084 | ||
2085 | tty_dev = device_find_child(uport->dev, &match, serial_match_port); | 2085 | tty_dev = device_find_child(uport->dev, &match, serial_match_port); |
2086 | if (device_may_wakeup(tty_dev)) { | 2086 | if (tty_dev && device_may_wakeup(tty_dev)) { |
2087 | if (!enable_irq_wake(uport->irq)) | 2087 | if (!enable_irq_wake(uport->irq)) |
2088 | uport->irq_wake = 1; | 2088 | uport->irq_wake = 1; |
2089 | put_device(tty_dev); | 2089 | put_device(tty_dev); |
@@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) | |||
2782 | * Register the port whether it's detected or not. This allows | 2782 | * Register the port whether it's detected or not. This allows |
2783 | * setserial to be used to alter this port's parameters. | 2783 | * setserial to be used to alter this port's parameters. |
2784 | */ | 2784 | */ |
2785 | tty_dev = tty_port_register_device_attr(port, drv->tty_driver, | 2785 | tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver, |
2786 | uport->line, uport->dev, port, uport->tty_groups); | 2786 | uport->line, uport->dev, port, uport->tty_groups); |
2787 | if (likely(!IS_ERR(tty_dev))) { | 2787 | if (likely(!IS_ERR(tty_dev))) { |
2788 | device_set_wakeup_capable(tty_dev, 1); | 2788 | device_set_wakeup_capable(tty_dev, 1); |
@@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport) | |||
2845 | /* | 2845 | /* |
2846 | * Remove the devices from the tty layer | 2846 | * Remove the devices from the tty layer |
2847 | */ | 2847 | */ |
2848 | tty_unregister_device(drv->tty_driver, uport->line); | 2848 | tty_port_unregister_device(port, drv->tty_driver, uport->line); |
2849 | 2849 | ||
2850 | tty = tty_port_tty_get(port); | 2850 | tty = tty_port_tty_get(port); |
2851 | if (tty) { | 2851 | if (tty) { |
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 1d21a9c1d33e..4fb3165384c4 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c | |||
@@ -34,7 +34,9 @@ static int tty_port_default_receive_buf(struct tty_port *port, | |||
34 | if (!disc) | 34 | if (!disc) |
35 | return 0; | 35 | return 0; |
36 | 36 | ||
37 | mutex_lock(&tty->atomic_write_lock); | ||
37 | ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); | 38 | ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); |
39 | mutex_unlock(&tty->atomic_write_lock); | ||
38 | 40 | ||
39 | tty_ldisc_deref(disc); | 41 | tty_ldisc_deref(disc); |
40 | 42 | ||
@@ -129,19 +131,85 @@ struct device *tty_port_register_device_attr(struct tty_port *port, | |||
129 | struct device *device, void *drvdata, | 131 | struct device *device, void *drvdata, |
130 | const struct attribute_group **attr_grp) | 132 | const struct attribute_group **attr_grp) |
131 | { | 133 | { |
134 | tty_port_link_device(port, driver, index); | ||
135 | return tty_register_device_attr(driver, index, device, drvdata, | ||
136 | attr_grp); | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(tty_port_register_device_attr); | ||
139 | |||
140 | /** | ||
141 | * tty_port_register_device_attr_serdev - register tty or serdev device | ||
142 | * @port: tty_port of the device | ||
143 | * @driver: tty_driver for this device | ||
144 | * @index: index of the tty | ||
145 | * @device: parent if exists, otherwise NULL | ||
146 | * @drvdata: driver data for the device | ||
147 | * @attr_grp: attribute group for the device | ||
148 | * | ||
149 | * Register a serdev or tty device depending on if the parent device has any | ||
150 | * defined serdev clients or not. | ||
151 | */ | ||
152 | struct device *tty_port_register_device_attr_serdev(struct tty_port *port, | ||
153 | struct tty_driver *driver, unsigned index, | ||
154 | struct device *device, void *drvdata, | ||
155 | const struct attribute_group **attr_grp) | ||
156 | { | ||
132 | struct device *dev; | 157 | struct device *dev; |
133 | 158 | ||
134 | tty_port_link_device(port, driver, index); | 159 | tty_port_link_device(port, driver, index); |
135 | 160 | ||
136 | dev = serdev_tty_port_register(port, device, driver, index); | 161 | dev = serdev_tty_port_register(port, device, driver, index); |
137 | if (PTR_ERR(dev) != -ENODEV) | 162 | if (PTR_ERR(dev) != -ENODEV) { |
138 | /* Skip creating cdev if we registered a serdev device */ | 163 | /* Skip creating cdev if we registered a serdev device */ |
139 | return dev; | 164 | return dev; |
165 | } | ||
140 | 166 | ||
141 | return tty_register_device_attr(driver, index, device, drvdata, | 167 | return tty_register_device_attr(driver, index, device, drvdata, |
142 | attr_grp); | 168 | attr_grp); |
143 | } | 169 | } |
144 | EXPORT_SYMBOL_GPL(tty_port_register_device_attr); | 170 | EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev); |
171 | |||
172 | /** | ||
173 | * tty_port_register_device_serdev - register tty or serdev device | ||
174 | * @port: tty_port of the device | ||
175 | * @driver: tty_driver for this device | ||
176 | * @index: index of the tty | ||
177 | * @device: parent if exists, otherwise NULL | ||
178 | * | ||
179 | * Register a serdev or tty device depending on if the parent device has any | ||
180 | * defined serdev clients or not. | ||
181 | */ | ||
182 | struct device *tty_port_register_device_serdev(struct tty_port *port, | ||
183 | struct tty_driver *driver, unsigned index, | ||
184 | struct device *device) | ||
185 | { | ||
186 | return tty_port_register_device_attr_serdev(port, driver, index, | ||
187 | device, NULL, NULL); | ||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(tty_port_register_device_serdev); | ||
190 | |||
191 | /** | ||
192 | * tty_port_unregister_device - deregister a tty or serdev device | ||
193 | * @port: tty_port of the device | ||
194 | * @driver: tty_driver for this device | ||
195 | * @index: index of the tty | ||
196 | * | ||
197 | * If a tty or serdev device is registered with a call to | ||
198 | * tty_port_register_device_serdev() then this function must be called when | ||
199 | * the device is gone. | ||
200 | */ | ||
201 | void tty_port_unregister_device(struct tty_port *port, | ||
202 | struct tty_driver *driver, unsigned index) | ||
203 | { | ||
204 | int ret; | ||
205 | |||
206 | ret = serdev_tty_port_unregister(port); | ||
207 | if (ret == 0) | ||
208 | return; | ||
209 | |||
210 | tty_unregister_device(driver, index); | ||
211 | } | ||
212 | EXPORT_SYMBOL_GPL(tty_port_unregister_device); | ||
145 | 213 | ||
146 | int tty_port_alloc_xmit_buf(struct tty_port *port) | 214 | int tty_port_alloc_xmit_buf(struct tty_port *port) |
147 | { | 215 | { |
@@ -189,9 +257,6 @@ static void tty_port_destructor(struct kref *kref) | |||
189 | /* check if last port ref was dropped before tty release */ | 257 | /* check if last port ref was dropped before tty release */ |
190 | if (WARN_ON(port->itty)) | 258 | if (WARN_ON(port->itty)) |
191 | return; | 259 | return; |
192 | |||
193 | serdev_tty_port_unregister(port); | ||
194 | |||
195 | if (port->xmit_buf) | 260 | if (port->xmit_buf) |
196 | free_page((unsigned long)port->xmit_buf); | 261 | free_page((unsigned long)port->xmit_buf); |
197 | tty_port_destroy(port); | 262 | tty_port_destroy(port); |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 1c196f87e9d9..ff04b7f8549f 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -279,7 +279,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) | |||
279 | map = kzalloc(sizeof(*map), GFP_KERNEL); | 279 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
280 | if (!map) { | 280 | if (!map) { |
281 | ret = -ENOMEM; | 281 | ret = -ENOMEM; |
282 | goto err_map_kobj; | 282 | goto err_map; |
283 | } | 283 | } |
284 | kobject_init(&map->kobj, &map_attr_type); | 284 | kobject_init(&map->kobj, &map_attr_type); |
285 | map->mem = mem; | 285 | map->mem = mem; |
@@ -289,7 +289,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) | |||
289 | goto err_map_kobj; | 289 | goto err_map_kobj; |
290 | ret = kobject_uevent(&map->kobj, KOBJ_ADD); | 290 | ret = kobject_uevent(&map->kobj, KOBJ_ADD); |
291 | if (ret) | 291 | if (ret) |
292 | goto err_map; | 292 | goto err_map_kobj; |
293 | } | 293 | } |
294 | 294 | ||
295 | for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { | 295 | for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { |
@@ -308,7 +308,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) | |||
308 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); | 308 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); |
309 | if (!portio) { | 309 | if (!portio) { |
310 | ret = -ENOMEM; | 310 | ret = -ENOMEM; |
311 | goto err_portio_kobj; | 311 | goto err_portio; |
312 | } | 312 | } |
313 | kobject_init(&portio->kobj, &portio_attr_type); | 313 | kobject_init(&portio->kobj, &portio_attr_type); |
314 | portio->port = port; | 314 | portio->port = port; |
@@ -319,7 +319,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) | |||
319 | goto err_portio_kobj; | 319 | goto err_portio_kobj; |
320 | ret = kobject_uevent(&portio->kobj, KOBJ_ADD); | 320 | ret = kobject_uevent(&portio->kobj, KOBJ_ADD); |
321 | if (ret) | 321 | if (ret) |
322 | goto err_portio; | 322 | goto err_portio_kobj; |
323 | } | 323 | } |
324 | 324 | ||
325 | return 0; | 325 | return 0; |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index cfc3cff6e8d5..8e6ef671be9b 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -475,11 +475,11 @@ static void snoop_urb(struct usb_device *udev, | |||
475 | 475 | ||
476 | if (userurb) { /* Async */ | 476 | if (userurb) { /* Async */ |
477 | if (when == SUBMIT) | 477 | if (when == SUBMIT) |
478 | dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " | 478 | dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " |
479 | "length %u\n", | 479 | "length %u\n", |
480 | userurb, ep, t, d, length); | 480 | userurb, ep, t, d, length); |
481 | else | 481 | else |
482 | dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " | 482 | dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " |
483 | "actual_length %u status %d\n", | 483 | "actual_length %u status %d\n", |
484 | userurb, ep, t, d, length, | 484 | userurb, ep, t, d, length, |
485 | timeout_or_status); | 485 | timeout_or_status); |
@@ -1895,7 +1895,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) | |||
1895 | if (as) { | 1895 | if (as) { |
1896 | int retval; | 1896 | int retval; |
1897 | 1897 | ||
1898 | snoop(&ps->dev->dev, "reap %p\n", as->userurb); | 1898 | snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
1899 | retval = processcompl(as, (void __user * __user *)arg); | 1899 | retval = processcompl(as, (void __user * __user *)arg); |
1900 | free_async(as); | 1900 | free_async(as); |
1901 | return retval; | 1901 | return retval; |
@@ -1912,7 +1912,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) | |||
1912 | 1912 | ||
1913 | as = async_getcompleted(ps); | 1913 | as = async_getcompleted(ps); |
1914 | if (as) { | 1914 | if (as) { |
1915 | snoop(&ps->dev->dev, "reap %p\n", as->userurb); | 1915 | snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
1916 | retval = processcompl(as, (void __user * __user *)arg); | 1916 | retval = processcompl(as, (void __user * __user *)arg); |
1917 | free_async(as); | 1917 | free_async(as); |
1918 | } else { | 1918 | } else { |
@@ -2043,7 +2043,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) | |||
2043 | if (as) { | 2043 | if (as) { |
2044 | int retval; | 2044 | int retval; |
2045 | 2045 | ||
2046 | snoop(&ps->dev->dev, "reap %p\n", as->userurb); | 2046 | snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
2047 | retval = processcompl_compat(as, (void __user * __user *)arg); | 2047 | retval = processcompl_compat(as, (void __user * __user *)arg); |
2048 | free_async(as); | 2048 | free_async(as); |
2049 | return retval; | 2049 | return retval; |
@@ -2060,7 +2060,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar | |||
2060 | 2060 | ||
2061 | as = async_getcompleted(ps); | 2061 | as = async_getcompleted(ps); |
2062 | if (as) { | 2062 | if (as) { |
2063 | snoop(&ps->dev->dev, "reap %p\n", as->userurb); | 2063 | snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
2064 | retval = processcompl_compat(as, (void __user * __user *)arg); | 2064 | retval = processcompl_compat(as, (void __user * __user *)arg); |
2065 | free_async(as); | 2065 | free_async(as); |
2066 | } else { | 2066 | } else { |
@@ -2489,7 +2489,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, | |||
2489 | #endif | 2489 | #endif |
2490 | 2490 | ||
2491 | case USBDEVFS_DISCARDURB: | 2491 | case USBDEVFS_DISCARDURB: |
2492 | snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p); | 2492 | snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p); |
2493 | ret = proc_unlinkurb(ps, p); | 2493 | ret = proc_unlinkurb(ps, p); |
2494 | break; | 2494 | break; |
2495 | 2495 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 49550790a3cb..5dea98358c05 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1723,7 +1723,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) | |||
1723 | if (retval == 0) | 1723 | if (retval == 0) |
1724 | retval = -EINPROGRESS; | 1724 | retval = -EINPROGRESS; |
1725 | else if (retval != -EIDRM && retval != -EBUSY) | 1725 | else if (retval != -EIDRM && retval != -EBUSY) |
1726 | dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", | 1726 | dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n", |
1727 | urb, retval); | 1727 | urb, retval); |
1728 | usb_put_dev(udev); | 1728 | usb_put_dev(udev); |
1729 | } | 1729 | } |
@@ -1890,7 +1890,7 @@ rescan: | |||
1890 | /* kick hcd */ | 1890 | /* kick hcd */ |
1891 | unlink1(hcd, urb, -ESHUTDOWN); | 1891 | unlink1(hcd, urb, -ESHUTDOWN); |
1892 | dev_dbg (hcd->self.controller, | 1892 | dev_dbg (hcd->self.controller, |
1893 | "shutdown urb %p ep%d%s%s\n", | 1893 | "shutdown urb %pK ep%d%s%s\n", |
1894 | urb, usb_endpoint_num(&ep->desc), | 1894 | urb, usb_endpoint_num(&ep->desc), |
1895 | is_in ? "in" : "out", | 1895 | is_in ? "in" : "out", |
1896 | ({ char *s; | 1896 | ({ char *s; |
@@ -2520,6 +2520,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, | |||
2520 | hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), | 2520 | hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), |
2521 | GFP_KERNEL); | 2521 | GFP_KERNEL); |
2522 | if (!hcd->bandwidth_mutex) { | 2522 | if (!hcd->bandwidth_mutex) { |
2523 | kfree(hcd->address0_mutex); | ||
2523 | kfree(hcd); | 2524 | kfree(hcd); |
2524 | dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); | 2525 | dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); |
2525 | return NULL; | 2526 | return NULL; |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 9dca59ef18b3..b8bb20d7acdb 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -362,7 +362,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev) | |||
362 | } | 362 | } |
363 | 363 | ||
364 | /* USB 2.0 spec Section 11.24.4.5 */ | 364 | /* USB 2.0 spec Section 11.24.4.5 */ |
365 | static int get_hub_descriptor(struct usb_device *hdev, void *data) | 365 | static int get_hub_descriptor(struct usb_device *hdev, |
366 | struct usb_hub_descriptor *desc) | ||
366 | { | 367 | { |
367 | int i, ret, size; | 368 | int i, ret, size; |
368 | unsigned dtype; | 369 | unsigned dtype; |
@@ -378,10 +379,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data) | |||
378 | for (i = 0; i < 3; i++) { | 379 | for (i = 0; i < 3; i++) { |
379 | ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), | 380 | ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), |
380 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, | 381 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, |
381 | dtype << 8, 0, data, size, | 382 | dtype << 8, 0, desc, size, |
382 | USB_CTRL_GET_TIMEOUT); | 383 | USB_CTRL_GET_TIMEOUT); |
383 | if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) | 384 | if (hub_is_superspeed(hdev)) { |
385 | if (ret == size) | ||
386 | return ret; | ||
387 | } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) { | ||
388 | /* Make sure we have the DeviceRemovable field. */ | ||
389 | size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1; | ||
390 | if (ret < size) | ||
391 | return -EMSGSIZE; | ||
384 | return ret; | 392 | return ret; |
393 | } | ||
385 | } | 394 | } |
386 | return -EINVAL; | 395 | return -EINVAL; |
387 | } | 396 | } |
@@ -1313,7 +1322,7 @@ static int hub_configure(struct usb_hub *hub, | |||
1313 | } | 1322 | } |
1314 | mutex_init(&hub->status_mutex); | 1323 | mutex_init(&hub->status_mutex); |
1315 | 1324 | ||
1316 | hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); | 1325 | hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL); |
1317 | if (!hub->descriptor) { | 1326 | if (!hub->descriptor) { |
1318 | ret = -ENOMEM; | 1327 | ret = -ENOMEM; |
1319 | goto fail; | 1328 | goto fail; |
@@ -1321,13 +1330,19 @@ static int hub_configure(struct usb_hub *hub, | |||
1321 | 1330 | ||
1322 | /* Request the entire hub descriptor. | 1331 | /* Request the entire hub descriptor. |
1323 | * hub->descriptor can handle USB_MAXCHILDREN ports, | 1332 | * hub->descriptor can handle USB_MAXCHILDREN ports, |
1324 | * but the hub can/will return fewer bytes here. | 1333 | * but a (non-SS) hub can/will return fewer bytes here. |
1325 | */ | 1334 | */ |
1326 | ret = get_hub_descriptor(hdev, hub->descriptor); | 1335 | ret = get_hub_descriptor(hdev, hub->descriptor); |
1327 | if (ret < 0) { | 1336 | if (ret < 0) { |
1328 | message = "can't read hub descriptor"; | 1337 | message = "can't read hub descriptor"; |
1329 | goto fail; | 1338 | goto fail; |
1330 | } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { | 1339 | } |
1340 | |||
1341 | maxchild = USB_MAXCHILDREN; | ||
1342 | if (hub_is_superspeed(hdev)) | ||
1343 | maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS); | ||
1344 | |||
1345 | if (hub->descriptor->bNbrPorts > maxchild) { | ||
1331 | message = "hub has too many ports!"; | 1346 | message = "hub has too many ports!"; |
1332 | ret = -ENODEV; | 1347 | ret = -ENODEV; |
1333 | goto fail; | 1348 | goto fail; |
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c index d787f195a9a6..d563cbcf76cf 100644 --- a/drivers/usb/core/of.c +++ b/drivers/usb/core/of.c | |||
@@ -53,6 +53,9 @@ EXPORT_SYMBOL_GPL(usb_of_get_child_node); | |||
53 | * | 53 | * |
54 | * Find the companion device from platform bus. | 54 | * Find the companion device from platform bus. |
55 | * | 55 | * |
56 | * Takes a reference to the returned struct device which needs to be dropped | ||
57 | * after use. | ||
58 | * | ||
56 | * Return: On success, a pointer to the companion device, %NULL on failure. | 59 | * Return: On success, a pointer to the companion device, %NULL on failure. |
57 | */ | 60 | */ |
58 | struct device *usb_of_get_companion_dev(struct device *dev) | 61 | struct device *usb_of_get_companion_dev(struct device *dev) |
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index d75cb8c0f7df..47903d510955 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c | |||
@@ -338,7 +338,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) | |||
338 | if (!urb || !urb->complete) | 338 | if (!urb || !urb->complete) |
339 | return -EINVAL; | 339 | return -EINVAL; |
340 | if (urb->hcpriv) { | 340 | if (urb->hcpriv) { |
341 | WARN_ONCE(1, "URB %p submitted while active\n", urb); | 341 | WARN_ONCE(1, "URB %pK submitted while active\n", urb); |
342 | return -EBUSY; | 342 | return -EBUSY; |
343 | } | 343 | } |
344 | 344 | ||
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index 72664700b8a2..12ee23f53cdd 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c | |||
@@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev) | |||
107 | return PTR_ERR(kdwc->usbss); | 107 | return PTR_ERR(kdwc->usbss); |
108 | 108 | ||
109 | kdwc->clk = devm_clk_get(kdwc->dev, "usb"); | 109 | kdwc->clk = devm_clk_get(kdwc->dev, "usb"); |
110 | if (IS_ERR(kdwc->clk)) { | ||
111 | dev_err(kdwc->dev, "unable to get usb clock\n"); | ||
112 | return PTR_ERR(kdwc->clk); | ||
113 | } | ||
110 | 114 | ||
111 | error = clk_prepare_enable(kdwc->clk); | 115 | error = clk_prepare_enable(kdwc->clk); |
112 | if (error < 0) { | 116 | if (error < 0) { |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index a15ec71d0423..84a2cebfc712 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | 39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa |
40 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 | 40 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 |
41 | #define PCI_DEVICE_ID_INTEL_GLK 0x31aa | 41 | #define PCI_DEVICE_ID_INTEL_GLK 0x31aa |
42 | #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee | ||
43 | #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e | ||
42 | 44 | ||
43 | #define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" | 45 | #define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" |
44 | #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 | 46 | #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 |
@@ -270,6 +272,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
270 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, | 272 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, |
271 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, | 273 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, |
272 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, | 274 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, |
275 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, | ||
276 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, | ||
273 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, | 277 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
274 | { } /* Terminating Entry */ | 278 | { } /* Terminating Entry */ |
275 | }; | 279 | }; |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 6f6f0b3be3ad..aea9a5b948b4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -1261,14 +1261,24 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) | |||
1261 | __dwc3_gadget_start_isoc(dwc, dep, cur_uf); | 1261 | __dwc3_gadget_start_isoc(dwc, dep, cur_uf); |
1262 | dep->flags &= ~DWC3_EP_PENDING_REQUEST; | 1262 | dep->flags &= ~DWC3_EP_PENDING_REQUEST; |
1263 | } | 1263 | } |
1264 | return 0; | ||
1264 | } | 1265 | } |
1265 | return 0; | 1266 | |
1267 | if ((dep->flags & DWC3_EP_BUSY) && | ||
1268 | !(dep->flags & DWC3_EP_MISSED_ISOC)) { | ||
1269 | WARN_ON_ONCE(!dep->resource_index); | ||
1270 | ret = __dwc3_gadget_kick_transfer(dep, | ||
1271 | dep->resource_index); | ||
1272 | } | ||
1273 | |||
1274 | goto out; | ||
1266 | } | 1275 | } |
1267 | 1276 | ||
1268 | if (!dwc3_calc_trbs_left(dep)) | 1277 | if (!dwc3_calc_trbs_left(dep)) |
1269 | return 0; | 1278 | return 0; |
1270 | 1279 | ||
1271 | ret = __dwc3_gadget_kick_transfer(dep, 0); | 1280 | ret = __dwc3_gadget_kick_transfer(dep, 0); |
1281 | out: | ||
1272 | if (ret == -EBUSY) | 1282 | if (ret == -EBUSY) |
1273 | ret = 0; | 1283 | ret = 0; |
1274 | 1284 | ||
@@ -3026,6 +3036,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) | |||
3026 | return IRQ_HANDLED; | 3036 | return IRQ_HANDLED; |
3027 | } | 3037 | } |
3028 | 3038 | ||
3039 | /* | ||
3040 | * With PCIe legacy interrupt, test shows that top-half irq handler can | ||
3041 | * be called again after HW interrupt deassertion. Check if bottom-half | ||
3042 | * irq event handler completes before caching new event to prevent | ||
3043 | * losing events. | ||
3044 | */ | ||
3045 | if (evt->flags & DWC3_EVENT_PENDING) | ||
3046 | return IRQ_HANDLED; | ||
3047 | |||
3029 | count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); | 3048 | count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); |
3030 | count &= DWC3_GEVNTCOUNT_MASK; | 3049 | count &= DWC3_GEVNTCOUNT_MASK; |
3031 | if (!count) | 3050 | if (!count) |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 71dd27c0d7f2..47dda3450abd 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -1858,12 +1858,12 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
1858 | ep->ep->driver_data = ep; | 1858 | ep->ep->driver_data = ep; |
1859 | ep->ep->desc = ds; | 1859 | ep->ep->desc = ds; |
1860 | 1860 | ||
1861 | comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + | 1861 | if (needs_comp_desc) { |
1862 | USB_DT_ENDPOINT_SIZE); | 1862 | comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + |
1863 | ep->ep->maxburst = comp_desc->bMaxBurst + 1; | 1863 | USB_DT_ENDPOINT_SIZE); |
1864 | 1864 | ep->ep->maxburst = comp_desc->bMaxBurst + 1; | |
1865 | if (needs_comp_desc) | ||
1866 | ep->ep->comp_desc = comp_desc; | 1865 | ep->ep->comp_desc = comp_desc; |
1866 | } | ||
1867 | 1867 | ||
1868 | ret = usb_ep_enable(ep->ep); | 1868 | ret = usb_ep_enable(ep->ep); |
1869 | if (likely(!ret)) { | 1869 | if (likely(!ret)) { |
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 000677c991b0..9b0805f55ad7 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c | |||
@@ -1256,7 +1256,7 @@ static void gserial_console_exit(void) | |||
1256 | struct gscons_info *info = &gscons_info; | 1256 | struct gscons_info *info = &gscons_info; |
1257 | 1257 | ||
1258 | unregister_console(&gserial_cons); | 1258 | unregister_console(&gserial_cons); |
1259 | if (info->console_thread != NULL) | 1259 | if (!IS_ERR_OR_NULL(info->console_thread)) |
1260 | kthread_stop(info->console_thread); | 1260 | kthread_stop(info->console_thread); |
1261 | gs_buf_free(&info->con_buf); | 1261 | gs_buf_free(&info->con_buf); |
1262 | } | 1262 | } |
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index c79081952ea0..ccabb51cb98d 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
@@ -2008,7 +2008,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc) | |||
2008 | HUB_CHAR_COMMON_OCPM); | 2008 | HUB_CHAR_COMMON_OCPM); |
2009 | desc->bNbrPorts = 1; | 2009 | desc->bNbrPorts = 1; |
2010 | desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ | 2010 | desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ |
2011 | desc->u.ss.DeviceRemovable = 0xffff; | 2011 | desc->u.ss.DeviceRemovable = 0; |
2012 | } | 2012 | } |
2013 | 2013 | ||
2014 | static inline void hub_descriptor(struct usb_hub_descriptor *desc) | 2014 | static inline void hub_descriptor(struct usb_hub_descriptor *desc) |
@@ -2020,8 +2020,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc) | |||
2020 | HUB_CHAR_INDV_PORT_LPSM | | 2020 | HUB_CHAR_INDV_PORT_LPSM | |
2021 | HUB_CHAR_COMMON_OCPM); | 2021 | HUB_CHAR_COMMON_OCPM); |
2022 | desc->bNbrPorts = 1; | 2022 | desc->bNbrPorts = 1; |
2023 | desc->u.hs.DeviceRemovable[0] = 0xff; | 2023 | desc->u.hs.DeviceRemovable[0] = 0; |
2024 | desc->u.hs.DeviceRemovable[1] = 0xff; | 2024 | desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */ |
2025 | } | 2025 | } |
2026 | 2026 | ||
2027 | static int dummy_hub_control( | 2027 | static int dummy_hub_control( |
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index bc7b9be12f54..f1908ea9fbd8 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c | |||
@@ -384,8 +384,10 @@ static int ehci_platform_resume(struct device *dev) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | companion_dev = usb_of_get_companion_dev(hcd->self.controller); | 386 | companion_dev = usb_of_get_companion_dev(hcd->self.controller); |
387 | if (companion_dev) | 387 | if (companion_dev) { |
388 | device_pm_wait_for_dev(hcd->self.controller, companion_dev); | 388 | device_pm_wait_for_dev(hcd->self.controller, companion_dev); |
389 | put_device(companion_dev); | ||
390 | } | ||
389 | 391 | ||
390 | ehci_resume(hcd, priv->reset_on_resume); | 392 | ehci_resume(hcd, priv->reset_on_resume); |
391 | return 0; | 393 | return 0; |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index bfa7fa3d2eea..7bf78be1fd32 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td) | |||
1269 | time = 30; | 1269 | time = 30; |
1270 | break; | 1270 | break; |
1271 | default: | 1271 | default: |
1272 | time = 300; | 1272 | time = 50; |
1273 | break; | 1273 | break; |
1274 | } | 1274 | } |
1275 | 1275 | ||
@@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597) | |||
1785 | pipe = td->pipe; | 1785 | pipe = td->pipe; |
1786 | pipe_stop(r8a66597, pipe); | 1786 | pipe_stop(r8a66597, pipe); |
1787 | 1787 | ||
1788 | /* Select a different address or endpoint */ | ||
1788 | new_td = td; | 1789 | new_td = td; |
1789 | do { | 1790 | do { |
1790 | list_move_tail(&new_td->queue, | 1791 | list_move_tail(&new_td->queue, |
@@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597) | |||
1794 | new_td = td; | 1795 | new_td = td; |
1795 | break; | 1796 | break; |
1796 | } | 1797 | } |
1797 | } while (td != new_td && td->address == new_td->address); | 1798 | } while (td != new_td && td->address == new_td->address && |
1799 | td->pipe->info.epnum == new_td->pipe->info.epnum); | ||
1798 | 1800 | ||
1799 | start_transfer(r8a66597, new_td); | 1801 | start_transfer(r8a66597, new_td); |
1800 | 1802 | ||
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 5e3e9d4c6956..0dde49c35dd2 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -419,7 +419,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
419 | wait_for_completion(cmd->completion); | 419 | wait_for_completion(cmd->completion); |
420 | 420 | ||
421 | if (cmd->status == COMP_COMMAND_ABORTED || | 421 | if (cmd->status == COMP_COMMAND_ABORTED || |
422 | cmd->status == COMP_STOPPED) { | 422 | cmd->status == COMP_COMMAND_RING_STOPPED) { |
423 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); | 423 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); |
424 | ret = -ETIME; | 424 | ret = -ETIME; |
425 | } | 425 | } |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index bbe22bcc550a..1f1687e888d6 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -56,7 +56,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, | |||
56 | } | 56 | } |
57 | 57 | ||
58 | if (max_packet) { | 58 | if (max_packet) { |
59 | seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); | 59 | seg->bounce_buf = kzalloc(max_packet, flags); |
60 | if (!seg->bounce_buf) { | 60 | if (!seg->bounce_buf) { |
61 | dma_pool_free(xhci->segment_pool, seg->trbs, dma); | 61 | dma_pool_free(xhci->segment_pool, seg->trbs, dma); |
62 | kfree(seg); | 62 | kfree(seg); |
@@ -1724,7 +1724,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | |||
1724 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); | 1724 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); |
1725 | for (i = 0; i < num_sp; i++) { | 1725 | for (i = 0; i < num_sp; i++) { |
1726 | dma_addr_t dma; | 1726 | dma_addr_t dma; |
1727 | void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, | 1727 | void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, |
1728 | flags); | 1728 | flags); |
1729 | if (!buf) | 1729 | if (!buf) |
1730 | goto fail_sp4; | 1730 | goto fail_sp4; |
@@ -2307,10 +2307,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) | |||
2307 | /* Place limits on the number of roothub ports so that the hub | 2307 | /* Place limits on the number of roothub ports so that the hub |
2308 | * descriptors aren't longer than the USB core will allocate. | 2308 | * descriptors aren't longer than the USB core will allocate. |
2309 | */ | 2309 | */ |
2310 | if (xhci->num_usb3_ports > 15) { | 2310 | if (xhci->num_usb3_ports > USB_SS_MAXPORTS) { |
2311 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2311 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2312 | "Limiting USB 3.0 roothub ports to 15."); | 2312 | "Limiting USB 3.0 roothub ports to %u.", |
2313 | xhci->num_usb3_ports = 15; | 2313 | USB_SS_MAXPORTS); |
2314 | xhci->num_usb3_ports = USB_SS_MAXPORTS; | ||
2314 | } | 2315 | } |
2315 | if (xhci->num_usb2_ports > USB_MAXCHILDREN) { | 2316 | if (xhci->num_usb2_ports > USB_MAXCHILDREN) { |
2316 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2317 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7b86508ac8cf..fcf1f3f63e7a 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 | 52 | #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 |
53 | #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 | 53 | #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 |
54 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 | 54 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 |
55 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 | ||
55 | 56 | ||
56 | static const char hcd_name[] = "xhci_hcd"; | 57 | static const char hcd_name[] = "xhci_hcd"; |
57 | 58 | ||
@@ -166,7 +167,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
166 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || | 167 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || |
167 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || | 168 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || |
168 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || | 169 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || |
169 | pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { | 170 | pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || |
171 | pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { | ||
170 | xhci->quirks |= XHCI_PME_STUCK_QUIRK; | 172 | xhci->quirks |= XHCI_PME_STUCK_QUIRK; |
171 | } | 173 | } |
172 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 174 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
@@ -175,7 +177,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
175 | } | 177 | } |
176 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 178 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
177 | (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || | 179 | (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || |
178 | pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) | 180 | pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || |
181 | pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) | ||
179 | xhci->quirks |= XHCI_MISSING_CAS; | 182 | xhci->quirks |= XHCI_MISSING_CAS; |
180 | 183 | ||
181 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && | 184 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 7c2a9e7c8e0f..c04144b25a67 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -177,7 +177,7 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
177 | 177 | ||
178 | irq = platform_get_irq(pdev, 0); | 178 | irq = platform_get_irq(pdev, 0); |
179 | if (irq < 0) | 179 | if (irq < 0) |
180 | return -ENODEV; | 180 | return irq; |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * sysdev must point to a device that is known to the system firmware | 183 | * sysdev must point to a device that is known to the system firmware |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 74bf5c60a260..03f63f50afb6 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -323,7 +323,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, | |||
323 | if (i_cmd->status != COMP_COMMAND_ABORTED) | 323 | if (i_cmd->status != COMP_COMMAND_ABORTED) |
324 | continue; | 324 | continue; |
325 | 325 | ||
326 | i_cmd->status = COMP_STOPPED; | 326 | i_cmd->status = COMP_COMMAND_RING_STOPPED; |
327 | 327 | ||
328 | xhci_dbg(xhci, "Turn aborted command %p to no-op\n", | 328 | xhci_dbg(xhci, "Turn aborted command %p to no-op\n", |
329 | i_cmd->command_trb); | 329 | i_cmd->command_trb); |
@@ -641,8 +641,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, | |||
641 | xhci_urb_free_priv(urb_priv); | 641 | xhci_urb_free_priv(urb_priv); |
642 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 642 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
643 | spin_unlock(&xhci->lock); | 643 | spin_unlock(&xhci->lock); |
644 | usb_hcd_giveback_urb(hcd, urb, status); | ||
645 | trace_xhci_urb_giveback(urb); | 644 | trace_xhci_urb_giveback(urb); |
645 | usb_hcd_giveback_urb(hcd, urb, status); | ||
646 | spin_lock(&xhci->lock); | 646 | spin_lock(&xhci->lock); |
647 | } | 647 | } |
648 | 648 | ||
@@ -1380,7 +1380,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1380 | cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); | 1380 | cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); |
1381 | 1381 | ||
1382 | /* If CMD ring stopped we own the trbs between enqueue and dequeue */ | 1382 | /* If CMD ring stopped we own the trbs between enqueue and dequeue */ |
1383 | if (cmd_comp_code == COMP_STOPPED) { | 1383 | if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { |
1384 | complete_all(&xhci->cmd_ring_stop_completion); | 1384 | complete_all(&xhci->cmd_ring_stop_completion); |
1385 | return; | 1385 | return; |
1386 | } | 1386 | } |
@@ -1436,8 +1436,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1436 | break; | 1436 | break; |
1437 | case TRB_CMD_NOOP: | 1437 | case TRB_CMD_NOOP: |
1438 | /* Is this an aborted command turned to NO-OP? */ | 1438 | /* Is this an aborted command turned to NO-OP? */ |
1439 | if (cmd->status == COMP_STOPPED) | 1439 | if (cmd->status == COMP_COMMAND_RING_STOPPED) |
1440 | cmd_comp_code = COMP_STOPPED; | 1440 | cmd_comp_code = COMP_COMMAND_RING_STOPPED; |
1441 | break; | 1441 | break; |
1442 | case TRB_RESET_EP: | 1442 | case TRB_RESET_EP: |
1443 | WARN_ON(slot_id != TRB_TO_SLOT_ID( | 1443 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
@@ -2677,11 +2677,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2677 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 2677 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
2678 | union xhci_trb *event_ring_deq; | 2678 | union xhci_trb *event_ring_deq; |
2679 | irqreturn_t ret = IRQ_NONE; | 2679 | irqreturn_t ret = IRQ_NONE; |
2680 | unsigned long flags; | ||
2680 | dma_addr_t deq; | 2681 | dma_addr_t deq; |
2681 | u64 temp_64; | 2682 | u64 temp_64; |
2682 | u32 status; | 2683 | u32 status; |
2683 | 2684 | ||
2684 | spin_lock(&xhci->lock); | 2685 | spin_lock_irqsave(&xhci->lock, flags); |
2685 | /* Check if the xHC generated the interrupt, or the irq is shared */ | 2686 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
2686 | status = readl(&xhci->op_regs->status); | 2687 | status = readl(&xhci->op_regs->status); |
2687 | if (status == ~(u32)0) { | 2688 | if (status == ~(u32)0) { |
@@ -2707,12 +2708,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2707 | */ | 2708 | */ |
2708 | status |= STS_EINT; | 2709 | status |= STS_EINT; |
2709 | writel(status, &xhci->op_regs->status); | 2710 | writel(status, &xhci->op_regs->status); |
2710 | /* FIXME when MSI-X is supported and there are multiple vectors */ | ||
2711 | /* Clear the MSI-X event interrupt status */ | ||
2712 | 2711 | ||
2713 | if (hcd->irq) { | 2712 | if (!hcd->msi_enabled) { |
2714 | u32 irq_pending; | 2713 | u32 irq_pending; |
2715 | /* Acknowledge the PCI interrupt */ | ||
2716 | irq_pending = readl(&xhci->ir_set->irq_pending); | 2714 | irq_pending = readl(&xhci->ir_set->irq_pending); |
2717 | irq_pending |= IMAN_IP; | 2715 | irq_pending |= IMAN_IP; |
2718 | writel(irq_pending, &xhci->ir_set->irq_pending); | 2716 | writel(irq_pending, &xhci->ir_set->irq_pending); |
@@ -2757,7 +2755,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2757 | ret = IRQ_HANDLED; | 2755 | ret = IRQ_HANDLED; |
2758 | 2756 | ||
2759 | out: | 2757 | out: |
2760 | spin_unlock(&xhci->lock); | 2758 | spin_unlock_irqrestore(&xhci->lock, flags); |
2761 | 2759 | ||
2762 | return ret; | 2760 | return ret; |
2763 | } | 2761 | } |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2d1310220832..30f47d92a610 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -359,9 +359,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) | |||
359 | /* fall back to msi*/ | 359 | /* fall back to msi*/ |
360 | ret = xhci_setup_msi(xhci); | 360 | ret = xhci_setup_msi(xhci); |
361 | 361 | ||
362 | if (!ret) | 362 | if (!ret) { |
363 | /* hcd->irq is 0, we have MSI */ | 363 | hcd->msi_enabled = 1; |
364 | return 0; | 364 | return 0; |
365 | } | ||
365 | 366 | ||
366 | if (!pdev->irq) { | 367 | if (!pdev->irq) { |
367 | xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); | 368 | xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); |
@@ -1763,7 +1764,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |||
1763 | 1764 | ||
1764 | switch (*cmd_status) { | 1765 | switch (*cmd_status) { |
1765 | case COMP_COMMAND_ABORTED: | 1766 | case COMP_COMMAND_ABORTED: |
1766 | case COMP_STOPPED: | 1767 | case COMP_COMMAND_RING_STOPPED: |
1767 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); | 1768 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); |
1768 | ret = -ETIME; | 1769 | ret = -ETIME; |
1769 | break; | 1770 | break; |
@@ -1813,7 +1814,7 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1813 | 1814 | ||
1814 | switch (*cmd_status) { | 1815 | switch (*cmd_status) { |
1815 | case COMP_COMMAND_ABORTED: | 1816 | case COMP_COMMAND_ABORTED: |
1816 | case COMP_STOPPED: | 1817 | case COMP_COMMAND_RING_STOPPED: |
1817 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); | 1818 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); |
1818 | ret = -ETIME; | 1819 | ret = -ETIME; |
1819 | break; | 1820 | break; |
@@ -3432,7 +3433,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, | |||
3432 | ret = reset_device_cmd->status; | 3433 | ret = reset_device_cmd->status; |
3433 | switch (ret) { | 3434 | switch (ret) { |
3434 | case COMP_COMMAND_ABORTED: | 3435 | case COMP_COMMAND_ABORTED: |
3435 | case COMP_STOPPED: | 3436 | case COMP_COMMAND_RING_STOPPED: |
3436 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); | 3437 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); |
3437 | ret = -ETIME; | 3438 | ret = -ETIME; |
3438 | goto command_cleanup; | 3439 | goto command_cleanup; |
@@ -3817,7 +3818,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3817 | */ | 3818 | */ |
3818 | switch (command->status) { | 3819 | switch (command->status) { |
3819 | case COMP_COMMAND_ABORTED: | 3820 | case COMP_COMMAND_ABORTED: |
3820 | case COMP_STOPPED: | 3821 | case COMP_COMMAND_RING_STOPPED: |
3821 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); | 3822 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); |
3822 | ret = -ETIME; | 3823 | ret = -ETIME; |
3823 | break; | 3824 | break; |
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index e9cae4d82af2..15d4e64d3b65 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c | |||
@@ -192,7 +192,7 @@ static int chaoskey_probe(struct usb_interface *interface, | |||
192 | 192 | ||
193 | dev->in_ep = in_ep; | 193 | dev->in_ep = in_ep; |
194 | 194 | ||
195 | if (udev->descriptor.idVendor != ALEA_VENDOR_ID) | 195 | if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) |
196 | dev->reads_started = 1; | 196 | dev->reads_started = 1; |
197 | 197 | ||
198 | dev->size = size; | 198 | dev->size = size; |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 77569531b78a..83b05a287b0c 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
@@ -554,7 +554,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, | |||
554 | info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); | 554 | info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); |
555 | 555 | ||
556 | /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ | 556 | /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ |
557 | info.speed = le16_to_cpu(dev->udev->speed); | 557 | info.speed = dev->udev->speed; |
558 | info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; | 558 | info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; |
559 | info.report_size = dev->report_size; | 559 | info.report_size = dev->report_size; |
560 | 560 | ||
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index aa3c280fdf8d..0782ac6f5edf 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c | |||
@@ -926,6 +926,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device | |||
926 | USB_MAJOR, dev->minor); | 926 | USB_MAJOR, dev->minor); |
927 | 927 | ||
928 | exit: | 928 | exit: |
929 | kfree(get_version_reply); | ||
929 | return retval; | 930 | return retval; |
930 | 931 | ||
931 | error: | 932 | error: |
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index 3c6948af726a..f019d80ca9e4 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c | |||
@@ -973,7 +973,7 @@ sisusbcon_set_origin(struct vc_data *c) | |||
973 | 973 | ||
974 | mutex_unlock(&sisusb->lock); | 974 | mutex_unlock(&sisusb->lock); |
975 | 975 | ||
976 | return 1; | 976 | return true; |
977 | } | 977 | } |
978 | 978 | ||
979 | /* Interface routine */ | 979 | /* Interface routine */ |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index ac3a4952abb4..dbe617a735d8 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -2780,10 +2780,11 @@ int musb_host_setup(struct musb *musb, int power_budget) | |||
2780 | int ret; | 2780 | int ret; |
2781 | struct usb_hcd *hcd = musb->hcd; | 2781 | struct usb_hcd *hcd = musb->hcd; |
2782 | 2782 | ||
2783 | MUSB_HST_MODE(musb); | 2783 | if (musb->port_mode == MUSB_PORT_MODE_HOST) { |
2784 | musb->xceiv->otg->default_a = 1; | 2784 | MUSB_HST_MODE(musb); |
2785 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | 2785 | musb->xceiv->otg->default_a = 1; |
2786 | 2786 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | |
2787 | } | ||
2787 | otg_set_host(musb->xceiv->otg, &hcd->self); | 2788 | otg_set_host(musb->xceiv->otg, &hcd->self); |
2788 | hcd->self.otg_port = 1; | 2789 | hcd->self.otg_port = 1; |
2789 | musb->xceiv->otg->host = &hcd->self; | 2790 | musb->xceiv->otg->host = &hcd->self; |
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index 8b43c4b99f04..7870b37e0ea5 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c | |||
@@ -219,6 +219,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | |||
219 | u32 dma_remaining; | 219 | u32 dma_remaining; |
220 | int src_burst, dst_burst; | 220 | int src_burst, dst_burst; |
221 | u16 csr; | 221 | u16 csr; |
222 | u32 psize; | ||
222 | int ch; | 223 | int ch; |
223 | s8 dmareq; | 224 | s8 dmareq; |
224 | s8 sync_dev; | 225 | s8 sync_dev; |
@@ -390,15 +391,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | |||
390 | 391 | ||
391 | if (chdat->tx) { | 392 | if (chdat->tx) { |
392 | /* Send transfer_packet_sz packets at a time */ | 393 | /* Send transfer_packet_sz packets at a time */ |
393 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, | 394 | psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); |
394 | chdat->transfer_packet_sz); | 395 | psize &= ~0x7ff; |
396 | psize |= chdat->transfer_packet_sz; | ||
397 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); | ||
395 | 398 | ||
396 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, | 399 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, |
397 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); | 400 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); |
398 | } else { | 401 | } else { |
399 | /* Receive transfer_packet_sz packets at a time */ | 402 | /* Receive transfer_packet_sz packets at a time */ |
400 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, | 403 | psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); |
401 | chdat->transfer_packet_sz << 16); | 404 | psize &= ~(0x7ff << 16); |
405 | psize |= (chdat->transfer_packet_sz << 16); | ||
406 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); | ||
402 | 407 | ||
403 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, | 408 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, |
404 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); | 409 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index d38780fa8788..aba74f817dc6 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = { | |||
809 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, | 809 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, |
810 | { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), | 810 | { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), |
811 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 811 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
812 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), | 812 | { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) }, |
813 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 813 | { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) }, |
814 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), | 814 | { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) }, |
815 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 815 | { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) }, |
816 | { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), | 816 | { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), |
817 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 817 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
818 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), | 818 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), |
@@ -1527,9 +1527,9 @@ static int set_serial_info(struct tty_struct *tty, | |||
1527 | (new_serial.flags & ASYNC_FLAGS)); | 1527 | (new_serial.flags & ASYNC_FLAGS)); |
1528 | priv->custom_divisor = new_serial.custom_divisor; | 1528 | priv->custom_divisor = new_serial.custom_divisor; |
1529 | 1529 | ||
1530 | check_and_exit: | ||
1530 | write_latency_timer(port); | 1531 | write_latency_timer(port); |
1531 | 1532 | ||
1532 | check_and_exit: | ||
1533 | if ((old_priv.flags & ASYNC_SPD_MASK) != | 1533 | if ((old_priv.flags & ASYNC_SPD_MASK) != |
1534 | (priv->flags & ASYNC_SPD_MASK)) { | 1534 | (priv->flags & ASYNC_SPD_MASK)) { |
1535 | if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) | 1535 | if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 71fb9e59db71..4fcf1cecb6d7 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -882,6 +882,8 @@ | |||
882 | /* Olimex */ | 882 | /* Olimex */ |
883 | #define OLIMEX_VID 0x15BA | 883 | #define OLIMEX_VID 0x15BA |
884 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 | 884 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 |
885 | #define OLIMEX_ARM_USB_TINY_PID 0x0004 | ||
886 | #define OLIMEX_ARM_USB_TINY_H_PID 0x002a | ||
885 | #define OLIMEX_ARM_USB_OCD_H_PID 0x002b | 887 | #define OLIMEX_ARM_USB_OCD_H_PID 0x002b |
886 | 888 | ||
887 | /* | 889 | /* |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 87798e625d6c..6cefb9cb133d 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
@@ -2336,8 +2336,11 @@ static void change_port_settings(struct tty_struct *tty, | |||
2336 | if (!baud) { | 2336 | if (!baud) { |
2337 | /* pick a default, any default... */ | 2337 | /* pick a default, any default... */ |
2338 | baud = 9600; | 2338 | baud = 9600; |
2339 | } else | 2339 | } else { |
2340 | /* Avoid a zero divisor. */ | ||
2341 | baud = min(baud, 461550); | ||
2340 | tty_encode_baud_rate(tty, baud, baud); | 2342 | tty_encode_baud_rate(tty, baud, baud); |
2343 | } | ||
2341 | 2344 | ||
2342 | edge_port->baud_rate = baud; | 2345 | edge_port->baud_rate = baud; |
2343 | config->wBaudRate = (__u16)((461550L + baud/2) / baud); | 2346 | config->wBaudRate = (__u16)((461550L + baud/2) / baud); |
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 73956d48a0c5..f9734a96d516 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c | |||
@@ -197,6 +197,7 @@ static u8 ir_xbof_change(u8 xbof) | |||
197 | static int ir_startup(struct usb_serial *serial) | 197 | static int ir_startup(struct usb_serial *serial) |
198 | { | 198 | { |
199 | struct usb_irda_cs_descriptor *irda_desc; | 199 | struct usb_irda_cs_descriptor *irda_desc; |
200 | int rates; | ||
200 | 201 | ||
201 | irda_desc = irda_usb_find_class_desc(serial, 0); | 202 | irda_desc = irda_usb_find_class_desc(serial, 0); |
202 | if (!irda_desc) { | 203 | if (!irda_desc) { |
@@ -205,18 +206,20 @@ static int ir_startup(struct usb_serial *serial) | |||
205 | return -ENODEV; | 206 | return -ENODEV; |
206 | } | 207 | } |
207 | 208 | ||
209 | rates = le16_to_cpu(irda_desc->wBaudRate); | ||
210 | |||
208 | dev_dbg(&serial->dev->dev, | 211 | dev_dbg(&serial->dev->dev, |
209 | "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", | 212 | "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", |
210 | __func__, | 213 | __func__, |
211 | (irda_desc->wBaudRate & USB_IRDA_BR_2400) ? " 2400" : "", | 214 | (rates & USB_IRDA_BR_2400) ? " 2400" : "", |
212 | (irda_desc->wBaudRate & USB_IRDA_BR_9600) ? " 9600" : "", | 215 | (rates & USB_IRDA_BR_9600) ? " 9600" : "", |
213 | (irda_desc->wBaudRate & USB_IRDA_BR_19200) ? " 19200" : "", | 216 | (rates & USB_IRDA_BR_19200) ? " 19200" : "", |
214 | (irda_desc->wBaudRate & USB_IRDA_BR_38400) ? " 38400" : "", | 217 | (rates & USB_IRDA_BR_38400) ? " 38400" : "", |
215 | (irda_desc->wBaudRate & USB_IRDA_BR_57600) ? " 57600" : "", | 218 | (rates & USB_IRDA_BR_57600) ? " 57600" : "", |
216 | (irda_desc->wBaudRate & USB_IRDA_BR_115200) ? " 115200" : "", | 219 | (rates & USB_IRDA_BR_115200) ? " 115200" : "", |
217 | (irda_desc->wBaudRate & USB_IRDA_BR_576000) ? " 576000" : "", | 220 | (rates & USB_IRDA_BR_576000) ? " 576000" : "", |
218 | (irda_desc->wBaudRate & USB_IRDA_BR_1152000) ? " 1152000" : "", | 221 | (rates & USB_IRDA_BR_1152000) ? " 1152000" : "", |
219 | (irda_desc->wBaudRate & USB_IRDA_BR_4000000) ? " 4000000" : ""); | 222 | (rates & USB_IRDA_BR_4000000) ? " 4000000" : ""); |
220 | 223 | ||
221 | switch (irda_desc->bmAdditionalBOFs) { | 224 | switch (irda_desc->bmAdditionalBOFs) { |
222 | case USB_IRDA_AB_48: | 225 | case USB_IRDA_AB_48: |
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index edbc81f205c2..70f346f1aa86 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c | |||
@@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty, | |||
189 | return -ENOMEM; | 189 | return -ENOMEM; |
190 | 190 | ||
191 | divisor = mct_u232_calculate_baud_rate(serial, value, &speed); | 191 | divisor = mct_u232_calculate_baud_rate(serial, value, &speed); |
192 | put_unaligned_le32(cpu_to_le32(divisor), buf); | 192 | put_unaligned_le32(divisor, buf); |
193 | rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), | 193 | rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), |
194 | MCT_U232_SET_BAUD_RATE_REQUEST, | 194 | MCT_U232_SET_BAUD_RATE_REQUEST, |
195 | MCT_U232_SET_REQUEST_TYPE, | 195 | MCT_U232_SET_REQUEST_TYPE, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index af67a0de6b5d..3bf61acfc26b 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb); | |||
281 | #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 | 281 | #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 |
282 | #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 | 282 | #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 |
283 | #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 | 283 | #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 |
284 | #define TELIT_PRODUCT_ME910 0x1100 | ||
284 | #define TELIT_PRODUCT_LE920 0x1200 | 285 | #define TELIT_PRODUCT_LE920 0x1200 |
285 | #define TELIT_PRODUCT_LE910 0x1201 | 286 | #define TELIT_PRODUCT_LE910 0x1201 |
286 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 | 287 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
@@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = { | |||
640 | .reserved = BIT(5) | BIT(6), | 641 | .reserved = BIT(5) | BIT(6), |
641 | }; | 642 | }; |
642 | 643 | ||
644 | static const struct option_blacklist_info telit_me910_blacklist = { | ||
645 | .sendsetup = BIT(0), | ||
646 | .reserved = BIT(1) | BIT(3), | ||
647 | }; | ||
648 | |||
643 | static const struct option_blacklist_info telit_le910_blacklist = { | 649 | static const struct option_blacklist_info telit_le910_blacklist = { |
644 | .sendsetup = BIT(0), | 650 | .sendsetup = BIT(0), |
645 | .reserved = BIT(1) | BIT(2), | 651 | .reserved = BIT(1) | BIT(2), |
@@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = { | |||
1235 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | 1241 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, |
1236 | { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), | 1242 | { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), |
1237 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, | 1243 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, |
1244 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), | ||
1245 | .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, | ||
1238 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), | 1246 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), |
1239 | .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, | 1247 | .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, |
1240 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), | 1248 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 38b3f0d8cd58..fd509ed6cf70 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { | |||
162 | {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ | 162 | {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ |
163 | {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ | 163 | {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ |
164 | {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ | 164 | {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ |
165 | {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ | ||
166 | {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ | ||
165 | {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ | 167 | {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ |
166 | {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ | 168 | {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ |
167 | {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ | 169 | {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ |
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 369f3c24815a..44af719194b2 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c | |||
@@ -446,6 +446,10 @@ struct ms_lib_ctrl { | |||
446 | #define SD_BLOCK_LEN 9 | 446 | #define SD_BLOCK_LEN 9 |
447 | 447 | ||
448 | struct ene_ub6250_info { | 448 | struct ene_ub6250_info { |
449 | |||
450 | /* I/O bounce buffer */ | ||
451 | u8 *bbuf; | ||
452 | |||
449 | /* for 6250 code */ | 453 | /* for 6250 code */ |
450 | struct SD_STATUS SD_Status; | 454 | struct SD_STATUS SD_Status; |
451 | struct MS_STATUS MS_Status; | 455 | struct MS_STATUS MS_Status; |
@@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag); | |||
493 | 497 | ||
494 | static void ene_ub6250_info_destructor(void *extra) | 498 | static void ene_ub6250_info_destructor(void *extra) |
495 | { | 499 | { |
500 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra; | ||
501 | |||
496 | if (!extra) | 502 | if (!extra) |
497 | return; | 503 | return; |
504 | kfree(info->bbuf); | ||
498 | } | 505 | } |
499 | 506 | ||
500 | static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) | 507 | static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) |
@@ -860,8 +867,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, | |||
860 | u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) | 867 | u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) |
861 | { | 868 | { |
862 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; | 869 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
870 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; | ||
871 | u8 *bbuf = info->bbuf; | ||
863 | int result; | 872 | int result; |
864 | u8 ExtBuf[4]; | ||
865 | u32 bn = PhyBlockAddr * 0x20 + PageNum; | 873 | u32 bn = PhyBlockAddr * 0x20 + PageNum; |
866 | 874 | ||
867 | result = ene_load_bincode(us, MS_RW_PATTERN); | 875 | result = ene_load_bincode(us, MS_RW_PATTERN); |
@@ -901,7 +909,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, | |||
901 | bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); | 909 | bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); |
902 | bcb->CDB[6] = 0x01; | 910 | bcb->CDB[6] = 0x01; |
903 | 911 | ||
904 | result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); | 912 | result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
905 | if (result != USB_STOR_XFER_GOOD) | 913 | if (result != USB_STOR_XFER_GOOD) |
906 | return USB_STOR_TRANSPORT_ERROR; | 914 | return USB_STOR_TRANSPORT_ERROR; |
907 | 915 | ||
@@ -910,9 +918,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, | |||
910 | ExtraDat->status0 = 0x10; /* Not yet,fireware support */ | 918 | ExtraDat->status0 = 0x10; /* Not yet,fireware support */ |
911 | 919 | ||
912 | ExtraDat->status1 = 0x00; /* Not yet,fireware support */ | 920 | ExtraDat->status1 = 0x00; /* Not yet,fireware support */ |
913 | ExtraDat->ovrflg = ExtBuf[0]; | 921 | ExtraDat->ovrflg = bbuf[0]; |
914 | ExtraDat->mngflg = ExtBuf[1]; | 922 | ExtraDat->mngflg = bbuf[1]; |
915 | ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); | 923 | ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); |
916 | 924 | ||
917 | return USB_STOR_TRANSPORT_GOOD; | 925 | return USB_STOR_TRANSPORT_GOOD; |
918 | } | 926 | } |
@@ -1332,8 +1340,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, | |||
1332 | u8 PageNum, struct ms_lib_type_extdat *ExtraDat) | 1340 | u8 PageNum, struct ms_lib_type_extdat *ExtraDat) |
1333 | { | 1341 | { |
1334 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; | 1342 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
1343 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; | ||
1344 | u8 *bbuf = info->bbuf; | ||
1335 | int result; | 1345 | int result; |
1336 | u8 ExtBuf[4]; | ||
1337 | 1346 | ||
1338 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); | 1347 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
1339 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | 1348 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
@@ -1347,7 +1356,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, | |||
1347 | bcb->CDB[2] = (unsigned char)(PhyBlock>>16); | 1356 | bcb->CDB[2] = (unsigned char)(PhyBlock>>16); |
1348 | bcb->CDB[6] = 0x01; | 1357 | bcb->CDB[6] = 0x01; |
1349 | 1358 | ||
1350 | result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); | 1359 | result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
1351 | if (result != USB_STOR_XFER_GOOD) | 1360 | if (result != USB_STOR_XFER_GOOD) |
1352 | return USB_STOR_TRANSPORT_ERROR; | 1361 | return USB_STOR_TRANSPORT_ERROR; |
1353 | 1362 | ||
@@ -1355,9 +1364,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, | |||
1355 | ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ | 1364 | ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ |
1356 | ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ | 1365 | ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ |
1357 | ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ | 1366 | ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ |
1358 | ExtraDat->ovrflg = ExtBuf[0]; | 1367 | ExtraDat->ovrflg = bbuf[0]; |
1359 | ExtraDat->mngflg = ExtBuf[1]; | 1368 | ExtraDat->mngflg = bbuf[1]; |
1360 | ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); | 1369 | ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); |
1361 | 1370 | ||
1362 | return USB_STOR_TRANSPORT_GOOD; | 1371 | return USB_STOR_TRANSPORT_GOOD; |
1363 | } | 1372 | } |
@@ -1556,9 +1565,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) | |||
1556 | u16 PhyBlock, newblk, i; | 1565 | u16 PhyBlock, newblk, i; |
1557 | u16 LogStart, LogEnde; | 1566 | u16 LogStart, LogEnde; |
1558 | struct ms_lib_type_extdat extdat; | 1567 | struct ms_lib_type_extdat extdat; |
1559 | u8 buf[0x200]; | ||
1560 | u32 count = 0, index = 0; | 1568 | u32 count = 0, index = 0; |
1561 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; | 1569 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
1570 | u8 *bbuf = info->bbuf; | ||
1562 | 1571 | ||
1563 | for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { | 1572 | for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { |
1564 | ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); | 1573 | ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); |
@@ -1572,14 +1581,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) | |||
1572 | } | 1581 | } |
1573 | 1582 | ||
1574 | if (count == PhyBlock) { | 1583 | if (count == PhyBlock) { |
1575 | ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf); | 1584 | ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, |
1585 | bbuf); | ||
1576 | count += 0x80; | 1586 | count += 0x80; |
1577 | } | 1587 | } |
1578 | index = (PhyBlock % 0x80) * 4; | 1588 | index = (PhyBlock % 0x80) * 4; |
1579 | 1589 | ||
1580 | extdat.ovrflg = buf[index]; | 1590 | extdat.ovrflg = bbuf[index]; |
1581 | extdat.mngflg = buf[index+1]; | 1591 | extdat.mngflg = bbuf[index+1]; |
1582 | extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]); | 1592 | extdat.logadr = memstick_logaddr(bbuf[index+2], |
1593 | bbuf[index+3]); | ||
1583 | 1594 | ||
1584 | if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { | 1595 | if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { |
1585 | ms_lib_setacquired_errorblock(us, PhyBlock); | 1596 | ms_lib_setacquired_errorblock(us, PhyBlock); |
@@ -2062,9 +2073,9 @@ static int ene_ms_init(struct us_data *us) | |||
2062 | { | 2073 | { |
2063 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; | 2074 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
2064 | int result; | 2075 | int result; |
2065 | u8 buf[0x200]; | ||
2066 | u16 MSP_BlockSize, MSP_UserAreaBlocks; | 2076 | u16 MSP_BlockSize, MSP_UserAreaBlocks; |
2067 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; | 2077 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
2078 | u8 *bbuf = info->bbuf; | ||
2068 | 2079 | ||
2069 | printk(KERN_INFO "transport --- ENE_MSInit\n"); | 2080 | printk(KERN_INFO "transport --- ENE_MSInit\n"); |
2070 | 2081 | ||
@@ -2083,13 +2094,13 @@ static int ene_ms_init(struct us_data *us) | |||
2083 | bcb->CDB[0] = 0xF1; | 2094 | bcb->CDB[0] = 0xF1; |
2084 | bcb->CDB[1] = 0x01; | 2095 | bcb->CDB[1] = 0x01; |
2085 | 2096 | ||
2086 | result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); | 2097 | result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
2087 | if (result != USB_STOR_XFER_GOOD) { | 2098 | if (result != USB_STOR_XFER_GOOD) { |
2088 | printk(KERN_ERR "Execution MS Init Code Fail !!\n"); | 2099 | printk(KERN_ERR "Execution MS Init Code Fail !!\n"); |
2089 | return USB_STOR_TRANSPORT_ERROR; | 2100 | return USB_STOR_TRANSPORT_ERROR; |
2090 | } | 2101 | } |
2091 | /* the same part to test ENE */ | 2102 | /* the same part to test ENE */ |
2092 | info->MS_Status = *(struct MS_STATUS *)&buf[0]; | 2103 | info->MS_Status = *(struct MS_STATUS *) bbuf; |
2093 | 2104 | ||
2094 | if (info->MS_Status.Insert && info->MS_Status.Ready) { | 2105 | if (info->MS_Status.Insert && info->MS_Status.Ready) { |
2095 | printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); | 2106 | printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); |
@@ -2098,15 +2109,15 @@ static int ene_ms_init(struct us_data *us) | |||
2098 | printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); | 2109 | printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); |
2099 | printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); | 2110 | printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); |
2100 | if (info->MS_Status.IsMSPro) { | 2111 | if (info->MS_Status.IsMSPro) { |
2101 | MSP_BlockSize = (buf[6] << 8) | buf[7]; | 2112 | MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; |
2102 | MSP_UserAreaBlocks = (buf[10] << 8) | buf[11]; | 2113 | MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; |
2103 | info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; | 2114 | info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; |
2104 | } else { | 2115 | } else { |
2105 | ms_card_init(us); /* Card is MS (to ms.c)*/ | 2116 | ms_card_init(us); /* Card is MS (to ms.c)*/ |
2106 | } | 2117 | } |
2107 | usb_stor_dbg(us, "MS Init Code OK !!\n"); | 2118 | usb_stor_dbg(us, "MS Init Code OK !!\n"); |
2108 | } else { | 2119 | } else { |
2109 | usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]); | 2120 | usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]); |
2110 | return USB_STOR_TRANSPORT_ERROR; | 2121 | return USB_STOR_TRANSPORT_ERROR; |
2111 | } | 2122 | } |
2112 | 2123 | ||
@@ -2116,9 +2127,9 @@ static int ene_ms_init(struct us_data *us) | |||
2116 | static int ene_sd_init(struct us_data *us) | 2127 | static int ene_sd_init(struct us_data *us) |
2117 | { | 2128 | { |
2118 | int result; | 2129 | int result; |
2119 | u8 buf[0x200]; | ||
2120 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; | 2130 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
2121 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; | 2131 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
2132 | u8 *bbuf = info->bbuf; | ||
2122 | 2133 | ||
2123 | usb_stor_dbg(us, "transport --- ENE_SDInit\n"); | 2134 | usb_stor_dbg(us, "transport --- ENE_SDInit\n"); |
2124 | /* SD Init Part-1 */ | 2135 | /* SD Init Part-1 */ |
@@ -2152,17 +2163,17 @@ static int ene_sd_init(struct us_data *us) | |||
2152 | bcb->Flags = US_BULK_FLAG_IN; | 2163 | bcb->Flags = US_BULK_FLAG_IN; |
2153 | bcb->CDB[0] = 0xF1; | 2164 | bcb->CDB[0] = 0xF1; |
2154 | 2165 | ||
2155 | result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); | 2166 | result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
2156 | if (result != USB_STOR_XFER_GOOD) { | 2167 | if (result != USB_STOR_XFER_GOOD) { |
2157 | usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); | 2168 | usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); |
2158 | return USB_STOR_TRANSPORT_ERROR; | 2169 | return USB_STOR_TRANSPORT_ERROR; |
2159 | } | 2170 | } |
2160 | 2171 | ||
2161 | info->SD_Status = *(struct SD_STATUS *)&buf[0]; | 2172 | info->SD_Status = *(struct SD_STATUS *) bbuf; |
2162 | if (info->SD_Status.Insert && info->SD_Status.Ready) { | 2173 | if (info->SD_Status.Insert && info->SD_Status.Ready) { |
2163 | struct SD_STATUS *s = &info->SD_Status; | 2174 | struct SD_STATUS *s = &info->SD_Status; |
2164 | 2175 | ||
2165 | ene_get_card_status(us, (unsigned char *)&buf); | 2176 | ene_get_card_status(us, bbuf); |
2166 | usb_stor_dbg(us, "Insert = %x\n", s->Insert); | 2177 | usb_stor_dbg(us, "Insert = %x\n", s->Insert); |
2167 | usb_stor_dbg(us, "Ready = %x\n", s->Ready); | 2178 | usb_stor_dbg(us, "Ready = %x\n", s->Ready); |
2168 | usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); | 2179 | usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); |
@@ -2170,7 +2181,7 @@ static int ene_sd_init(struct us_data *us) | |||
2170 | usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); | 2181 | usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); |
2171 | usb_stor_dbg(us, "WtP = %x\n", s->WtP); | 2182 | usb_stor_dbg(us, "WtP = %x\n", s->WtP); |
2172 | } else { | 2183 | } else { |
2173 | usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]); | 2184 | usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); |
2174 | return USB_STOR_TRANSPORT_ERROR; | 2185 | return USB_STOR_TRANSPORT_ERROR; |
2175 | } | 2186 | } |
2176 | return USB_STOR_TRANSPORT_GOOD; | 2187 | return USB_STOR_TRANSPORT_GOOD; |
@@ -2180,13 +2191,15 @@ static int ene_sd_init(struct us_data *us) | |||
2180 | static int ene_init(struct us_data *us) | 2191 | static int ene_init(struct us_data *us) |
2181 | { | 2192 | { |
2182 | int result; | 2193 | int result; |
2183 | u8 misc_reg03 = 0; | 2194 | u8 misc_reg03; |
2184 | struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); | 2195 | struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); |
2196 | u8 *bbuf = info->bbuf; | ||
2185 | 2197 | ||
2186 | result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); | 2198 | result = ene_get_card_type(us, REG_CARD_STATUS, bbuf); |
2187 | if (result != USB_STOR_XFER_GOOD) | 2199 | if (result != USB_STOR_XFER_GOOD) |
2188 | return USB_STOR_TRANSPORT_ERROR; | 2200 | return USB_STOR_TRANSPORT_ERROR; |
2189 | 2201 | ||
2202 | misc_reg03 = bbuf[0]; | ||
2190 | if (misc_reg03 & 0x01) { | 2203 | if (misc_reg03 & 0x01) { |
2191 | if (!info->SD_Status.Ready) { | 2204 | if (!info->SD_Status.Ready) { |
2192 | result = ene_sd_init(us); | 2205 | result = ene_sd_init(us); |
@@ -2303,8 +2316,9 @@ static int ene_ub6250_probe(struct usb_interface *intf, | |||
2303 | const struct usb_device_id *id) | 2316 | const struct usb_device_id *id) |
2304 | { | 2317 | { |
2305 | int result; | 2318 | int result; |
2306 | u8 misc_reg03 = 0; | 2319 | u8 misc_reg03; |
2307 | struct us_data *us; | 2320 | struct us_data *us; |
2321 | struct ene_ub6250_info *info; | ||
2308 | 2322 | ||
2309 | result = usb_stor_probe1(&us, intf, id, | 2323 | result = usb_stor_probe1(&us, intf, id, |
2310 | (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, | 2324 | (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, |
@@ -2313,11 +2327,16 @@ static int ene_ub6250_probe(struct usb_interface *intf, | |||
2313 | return result; | 2327 | return result; |
2314 | 2328 | ||
2315 | /* FIXME: where should the code alloc extra buf ? */ | 2329 | /* FIXME: where should the code alloc extra buf ? */ |
2316 | if (!us->extra) { | 2330 | us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); |
2317 | us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); | 2331 | if (!us->extra) |
2318 | if (!us->extra) | 2332 | return -ENOMEM; |
2319 | return -ENOMEM; | 2333 | us->extra_destructor = ene_ub6250_info_destructor; |
2320 | us->extra_destructor = ene_ub6250_info_destructor; | 2334 | |
2335 | info = (struct ene_ub6250_info *)(us->extra); | ||
2336 | info->bbuf = kmalloc(512, GFP_KERNEL); | ||
2337 | if (!info->bbuf) { | ||
2338 | kfree(us->extra); | ||
2339 | return -ENOMEM; | ||
2321 | } | 2340 | } |
2322 | 2341 | ||
2323 | us->transport_name = "ene_ub6250"; | 2342 | us->transport_name = "ene_ub6250"; |
@@ -2329,12 +2348,13 @@ static int ene_ub6250_probe(struct usb_interface *intf, | |||
2329 | return result; | 2348 | return result; |
2330 | 2349 | ||
2331 | /* probe card type */ | 2350 | /* probe card type */ |
2332 | result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); | 2351 | result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf); |
2333 | if (result != USB_STOR_XFER_GOOD) { | 2352 | if (result != USB_STOR_XFER_GOOD) { |
2334 | usb_stor_disconnect(intf); | 2353 | usb_stor_disconnect(intf); |
2335 | return USB_STOR_TRANSPORT_ERROR; | 2354 | return USB_STOR_TRANSPORT_ERROR; |
2336 | } | 2355 | } |
2337 | 2356 | ||
2357 | misc_reg03 = info->bbuf[0]; | ||
2338 | if (!(misc_reg03 & 0x01)) { | 2358 | if (!(misc_reg03 & 0x01)) { |
2339 | pr_info("ums_eneub6250: This driver only supports SD/MS cards. " | 2359 | pr_info("ums_eneub6250: This driver only supports SD/MS cards. " |
2340 | "It does not support SM cards.\n"); | 2360 | "It does not support SM cards.\n"); |
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 5d8b2c261940..0585078638db 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c | |||
@@ -235,14 +235,19 @@ done: | |||
235 | 235 | ||
236 | static inline void hub_descriptor(struct usb_hub_descriptor *desc) | 236 | static inline void hub_descriptor(struct usb_hub_descriptor *desc) |
237 | { | 237 | { |
238 | int width; | ||
239 | |||
238 | memset(desc, 0, sizeof(*desc)); | 240 | memset(desc, 0, sizeof(*desc)); |
239 | desc->bDescriptorType = USB_DT_HUB; | 241 | desc->bDescriptorType = USB_DT_HUB; |
240 | desc->bDescLength = 9; | ||
241 | desc->wHubCharacteristics = cpu_to_le16( | 242 | desc->wHubCharacteristics = cpu_to_le16( |
242 | HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); | 243 | HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); |
244 | |||
243 | desc->bNbrPorts = VHCI_HC_PORTS; | 245 | desc->bNbrPorts = VHCI_HC_PORTS; |
244 | desc->u.hs.DeviceRemovable[0] = 0xff; | 246 | BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN); |
245 | desc->u.hs.DeviceRemovable[1] = 0xff; | 247 | width = desc->bNbrPorts / 8 + 1; |
248 | desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width; | ||
249 | memset(&desc->u.hs.DeviceRemovable[0], 0, width); | ||
250 | memset(&desc->u.hs.DeviceRemovable[width], 0xff, width); | ||
246 | } | 251 | } |
247 | 252 | ||
248 | static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | 253 | static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, |
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 6345e85822a4..a50cf45e530f 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c | |||
@@ -341,6 +341,7 @@ error_submit_ep1: | |||
341 | static | 341 | static |
342 | int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) | 342 | int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) |
343 | { | 343 | { |
344 | struct usb_device *udev = interface_to_usbdev(iface); | ||
344 | struct i1480_usb *i1480_usb; | 345 | struct i1480_usb *i1480_usb; |
345 | struct i1480 *i1480; | 346 | struct i1480 *i1480; |
346 | struct device *dev = &iface->dev; | 347 | struct device *dev = &iface->dev; |
@@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) | |||
352 | iface->cur_altsetting->desc.bInterfaceNumber); | 353 | iface->cur_altsetting->desc.bInterfaceNumber); |
353 | goto error; | 354 | goto error; |
354 | } | 355 | } |
355 | if (iface->num_altsetting > 1 | 356 | if (iface->num_altsetting > 1 && |
356 | && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { | 357 | le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) { |
357 | /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ | 358 | /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ |
358 | result = usb_set_interface(interface_to_usbdev(iface), 0, 1); | 359 | result = usb_set_interface(interface_to_usbdev(iface), 0, 1); |
359 | if (result < 0) | 360 | if (result < 0) |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 52a70ee6014f..8b9049dac094 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -452,7 +452,7 @@ config DAVINCI_WATCHDOG | |||
452 | 452 | ||
453 | config ORION_WATCHDOG | 453 | config ORION_WATCHDOG |
454 | tristate "Orion watchdog" | 454 | tristate "Orion watchdog" |
455 | depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || COMPILE_TEST | 455 | depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110) |
456 | depends on ARM | 456 | depends on ARM |
457 | select WATCHDOG_CORE | 457 | select WATCHDOG_CORE |
458 | help | 458 | help |
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c index 6fce17d5b9f1..a5775dfd8d5f 100644 --- a/drivers/watchdog/bcm_kona_wdt.c +++ b/drivers/watchdog/bcm_kona_wdt.c | |||
@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) | |||
304 | if (!wdt) | 304 | if (!wdt) |
305 | return -ENOMEM; | 305 | return -ENOMEM; |
306 | 306 | ||
307 | spin_lock_init(&wdt->lock); | ||
308 | |||
307 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 309 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
308 | wdt->base = devm_ioremap_resource(dev, res); | 310 | wdt->base = devm_ioremap_resource(dev, res); |
309 | if (IS_ERR(wdt->base)) | 311 | if (IS_ERR(wdt->base)) |
@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) | |||
316 | return ret; | 318 | return ret; |
317 | } | 319 | } |
318 | 320 | ||
319 | spin_lock_init(&wdt->lock); | ||
320 | platform_set_drvdata(pdev, wdt); | 321 | platform_set_drvdata(pdev, wdt); |
321 | watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); | 322 | watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); |
322 | bcm_kona_wdt_wdd.parent = &pdev->dev; | 323 | bcm_kona_wdt_wdd.parent = &pdev->dev; |
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 8d61e8bfe60b..86e0b5d2e761 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c | |||
@@ -49,7 +49,7 @@ | |||
49 | /* Counter maximum value */ | 49 | /* Counter maximum value */ |
50 | #define CDNS_WDT_COUNTER_MAX 0xFFF | 50 | #define CDNS_WDT_COUNTER_MAX 0xFFF |
51 | 51 | ||
52 | static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT; | 52 | static int wdt_timeout; |
53 | static int nowayout = WATCHDOG_NOWAYOUT; | 53 | static int nowayout = WATCHDOG_NOWAYOUT; |
54 | 54 | ||
55 | module_param(wdt_timeout, int, 0); | 55 | module_param(wdt_timeout, int, 0); |
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 347f0389b089..c4f65873bfa4 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c | |||
@@ -306,16 +306,15 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev) | |||
306 | 306 | ||
307 | iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); | 307 | iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); |
308 | 308 | ||
309 | /* Reset the timeout status bit so that the timer | ||
310 | * needs to count down twice again before rebooting */ | ||
311 | outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ | ||
312 | |||
309 | /* Reload the timer by writing to the TCO Timer Counter register */ | 313 | /* Reload the timer by writing to the TCO Timer Counter register */ |
310 | if (p->iTCO_version >= 2) { | 314 | if (p->iTCO_version >= 2) |
311 | outw(0x01, TCO_RLD(p)); | 315 | outw(0x01, TCO_RLD(p)); |
312 | } else if (p->iTCO_version == 1) { | 316 | else if (p->iTCO_version == 1) |
313 | /* Reset the timeout status bit so that the timer | ||
314 | * needs to count down twice again before rebooting */ | ||
315 | outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ | ||
316 | |||
317 | outb(0x01, TCO_RLD(p)); | 317 | outb(0x01, TCO_RLD(p)); |
318 | } | ||
319 | 318 | ||
320 | spin_unlock(&p->io_lock); | 319 | spin_unlock(&p->io_lock); |
321 | return 0; | 320 | return 0; |
@@ -328,11 +327,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t) | |||
328 | unsigned char val8; | 327 | unsigned char val8; |
329 | unsigned int tmrval; | 328 | unsigned int tmrval; |
330 | 329 | ||
331 | tmrval = seconds_to_ticks(p, t); | 330 | /* The timer counts down twice before rebooting */ |
332 | 331 | tmrval = seconds_to_ticks(p, t) / 2; | |
333 | /* For TCO v1 the timer counts down twice before rebooting */ | ||
334 | if (p->iTCO_version == 1) | ||
335 | tmrval /= 2; | ||
336 | 332 | ||
337 | /* from the specs: */ | 333 | /* from the specs: */ |
338 | /* "Values of 0h-3h are ignored and should not be attempted" */ | 334 | /* "Values of 0h-3h are ignored and should not be attempted" */ |
@@ -385,6 +381,8 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev) | |||
385 | spin_lock(&p->io_lock); | 381 | spin_lock(&p->io_lock); |
386 | val16 = inw(TCO_RLD(p)); | 382 | val16 = inw(TCO_RLD(p)); |
387 | val16 &= 0x3ff; | 383 | val16 &= 0x3ff; |
384 | if (!(inw(TCO1_STS(p)) & 0x0008)) | ||
385 | val16 += (inw(TCOv2_TMR(p)) & 0x3ff); | ||
388 | spin_unlock(&p->io_lock); | 386 | spin_unlock(&p->io_lock); |
389 | 387 | ||
390 | time_left = ticks_to_seconds(p, val16); | 388 | time_left = ticks_to_seconds(p, val16); |
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 99ebf6ea3de6..5615f4013924 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c | |||
@@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface, | |||
630 | return -ENODEV; | 630 | return -ENODEV; |
631 | } | 631 | } |
632 | 632 | ||
633 | if (iface_desc->desc.bNumEndpoints < 1) | ||
634 | return -ENODEV; | ||
635 | |||
633 | /* check out the endpoint: it has to be Interrupt & IN */ | 636 | /* check out the endpoint: it has to be Interrupt & IN */ |
634 | endpoint = &iface_desc->endpoint[0].desc; | 637 | endpoint = &iface_desc->endpoint[0].desc; |
635 | 638 | ||
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c index f709962018ac..362fd229786d 100644 --- a/drivers/watchdog/sama5d4_wdt.c +++ b/drivers/watchdog/sama5d4_wdt.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Licensed under GPLv2. | 6 | * Licensed under GPLv2. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/delay.h> | ||
9 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
10 | #include <linux/io.h> | 11 | #include <linux/io.h> |
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
@@ -29,6 +30,7 @@ struct sama5d4_wdt { | |||
29 | struct watchdog_device wdd; | 30 | struct watchdog_device wdd; |
30 | void __iomem *reg_base; | 31 | void __iomem *reg_base; |
31 | u32 mr; | 32 | u32 mr; |
33 | unsigned long last_ping; | ||
32 | }; | 34 | }; |
33 | 35 | ||
34 | static int wdt_timeout = WDT_DEFAULT_TIMEOUT; | 36 | static int wdt_timeout = WDT_DEFAULT_TIMEOUT; |
@@ -44,11 +46,34 @@ MODULE_PARM_DESC(nowayout, | |||
44 | "Watchdog cannot be stopped once started (default=" | 46 | "Watchdog cannot be stopped once started (default=" |
45 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 47 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
46 | 48 | ||
49 | #define wdt_enabled (!(wdt->mr & AT91_WDT_WDDIS)) | ||
50 | |||
47 | #define wdt_read(wdt, field) \ | 51 | #define wdt_read(wdt, field) \ |
48 | readl_relaxed((wdt)->reg_base + (field)) | 52 | readl_relaxed((wdt)->reg_base + (field)) |
49 | 53 | ||
50 | #define wdt_write(wtd, field, val) \ | 54 | /* 4 slow clock periods is 4/32768 = 122.07µs*/ |
51 | writel_relaxed((val), (wdt)->reg_base + (field)) | 55 | #define WDT_DELAY usecs_to_jiffies(123) |
56 | |||
57 | static void wdt_write(struct sama5d4_wdt *wdt, u32 field, u32 val) | ||
58 | { | ||
59 | /* | ||
60 | * WDT_CR and WDT_MR must not be modified within three slow clock | ||
61 | * periods following a restart of the watchdog performed by a write | ||
62 | * access in WDT_CR. | ||
63 | */ | ||
64 | while (time_before(jiffies, wdt->last_ping + WDT_DELAY)) | ||
65 | usleep_range(30, 125); | ||
66 | writel_relaxed(val, wdt->reg_base + field); | ||
67 | wdt->last_ping = jiffies; | ||
68 | } | ||
69 | |||
70 | static void wdt_write_nosleep(struct sama5d4_wdt *wdt, u32 field, u32 val) | ||
71 | { | ||
72 | if (time_before(jiffies, wdt->last_ping + WDT_DELAY)) | ||
73 | udelay(123); | ||
74 | writel_relaxed(val, wdt->reg_base + field); | ||
75 | wdt->last_ping = jiffies; | ||
76 | } | ||
52 | 77 | ||
53 | static int sama5d4_wdt_start(struct watchdog_device *wdd) | 78 | static int sama5d4_wdt_start(struct watchdog_device *wdd) |
54 | { | 79 | { |
@@ -89,7 +114,16 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd, | |||
89 | wdt->mr &= ~AT91_WDT_WDD; | 114 | wdt->mr &= ~AT91_WDT_WDD; |
90 | wdt->mr |= AT91_WDT_SET_WDV(value); | 115 | wdt->mr |= AT91_WDT_SET_WDV(value); |
91 | wdt->mr |= AT91_WDT_SET_WDD(value); | 116 | wdt->mr |= AT91_WDT_SET_WDD(value); |
92 | wdt_write(wdt, AT91_WDT_MR, wdt->mr); | 117 | |
118 | /* | ||
119 | * WDDIS has to be 0 when updating WDD/WDV. The datasheet states: When | ||
120 | * setting the WDDIS bit, and while it is set, the fields WDV and WDD | ||
121 | * must not be modified. | ||
122 | * If the watchdog is enabled, then the timeout can be updated. Else, | ||
123 | * wait that the user enables it. | ||
124 | */ | ||
125 | if (wdt_enabled) | ||
126 | wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); | ||
93 | 127 | ||
94 | wdd->timeout = timeout; | 128 | wdd->timeout = timeout; |
95 | 129 | ||
@@ -145,23 +179,21 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt) | |||
145 | 179 | ||
146 | static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) | 180 | static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) |
147 | { | 181 | { |
148 | struct watchdog_device *wdd = &wdt->wdd; | ||
149 | u32 value = WDT_SEC2TICKS(wdd->timeout); | ||
150 | u32 reg; | 182 | u32 reg; |
151 | |||
152 | /* | 183 | /* |
153 | * Because the fields WDV and WDD must not be modified when the WDDIS | 184 | * When booting and resuming, the bootloader may have changed the |
154 | * bit is set, so clear the WDDIS bit before writing the WDT_MR. | 185 | * watchdog configuration. |
186 | * If the watchdog is already running, we can safely update it. | ||
187 | * Else, we have to disable it properly. | ||
155 | */ | 188 | */ |
156 | reg = wdt_read(wdt, AT91_WDT_MR); | 189 | if (wdt_enabled) { |
157 | reg &= ~AT91_WDT_WDDIS; | 190 | wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr); |
158 | wdt_write(wdt, AT91_WDT_MR, reg); | 191 | } else { |
159 | 192 | reg = wdt_read(wdt, AT91_WDT_MR); | |
160 | wdt->mr |= AT91_WDT_SET_WDD(value); | 193 | if (!(reg & AT91_WDT_WDDIS)) |
161 | wdt->mr |= AT91_WDT_SET_WDV(value); | 194 | wdt_write_nosleep(wdt, AT91_WDT_MR, |
162 | 195 | reg | AT91_WDT_WDDIS); | |
163 | wdt_write(wdt, AT91_WDT_MR, wdt->mr); | 196 | } |
164 | |||
165 | return 0; | 197 | return 0; |
166 | } | 198 | } |
167 | 199 | ||
@@ -172,6 +204,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) | |||
172 | struct resource *res; | 204 | struct resource *res; |
173 | void __iomem *regs; | 205 | void __iomem *regs; |
174 | u32 irq = 0; | 206 | u32 irq = 0; |
207 | u32 timeout; | ||
175 | int ret; | 208 | int ret; |
176 | 209 | ||
177 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); | 210 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); |
@@ -184,6 +217,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) | |||
184 | wdd->ops = &sama5d4_wdt_ops; | 217 | wdd->ops = &sama5d4_wdt_ops; |
185 | wdd->min_timeout = MIN_WDT_TIMEOUT; | 218 | wdd->min_timeout = MIN_WDT_TIMEOUT; |
186 | wdd->max_timeout = MAX_WDT_TIMEOUT; | 219 | wdd->max_timeout = MAX_WDT_TIMEOUT; |
220 | wdt->last_ping = jiffies; | ||
187 | 221 | ||
188 | watchdog_set_drvdata(wdd, wdt); | 222 | watchdog_set_drvdata(wdd, wdt); |
189 | 223 | ||
@@ -221,6 +255,11 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) | |||
221 | return ret; | 255 | return ret; |
222 | } | 256 | } |
223 | 257 | ||
258 | timeout = WDT_SEC2TICKS(wdd->timeout); | ||
259 | |||
260 | wdt->mr |= AT91_WDT_SET_WDD(timeout); | ||
261 | wdt->mr |= AT91_WDT_SET_WDV(timeout); | ||
262 | |||
224 | ret = sama5d4_wdt_init(wdt); | 263 | ret = sama5d4_wdt_init(wdt); |
225 | if (ret) | 264 | if (ret) |
226 | return ret; | 265 | return ret; |
@@ -263,9 +302,7 @@ static int sama5d4_wdt_resume(struct device *dev) | |||
263 | { | 302 | { |
264 | struct sama5d4_wdt *wdt = dev_get_drvdata(dev); | 303 | struct sama5d4_wdt *wdt = dev_get_drvdata(dev); |
265 | 304 | ||
266 | wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); | 305 | sama5d4_wdt_init(wdt); |
267 | if (wdt->mr & AT91_WDT_WDDIS) | ||
268 | wdt_write(wdt, AT91_WDT_MR, wdt->mr); | ||
269 | 306 | ||
270 | return 0; | 307 | return 0; |
271 | } | 308 | } |
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index 48b2c058b009..bc7addc2dc06 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c | |||
@@ -332,7 +332,7 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id) | |||
332 | pr_crit("Would Reboot\n"); | 332 | pr_crit("Would Reboot\n"); |
333 | #else | 333 | #else |
334 | pr_crit("Initiating system reboot\n"); | 334 | pr_crit("Initiating system reboot\n"); |
335 | emergency_restart(NULL); | 335 | emergency_restart(); |
336 | #endif | 336 | #endif |
337 | #else | 337 | #else |
338 | pr_crit("Reset in 5ms\n"); | 338 | pr_crit("Reset in 5ms\n"); |
diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c index e290d5a13a6d..c98252733c30 100644 --- a/drivers/watchdog/zx2967_wdt.c +++ b/drivers/watchdog/zx2967_wdt.c | |||
@@ -211,10 +211,8 @@ static int zx2967_wdt_probe(struct platform_device *pdev) | |||
211 | 211 | ||
212 | base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 212 | base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
213 | wdt->reg_base = devm_ioremap_resource(dev, base); | 213 | wdt->reg_base = devm_ioremap_resource(dev, base); |
214 | if (IS_ERR(wdt->reg_base)) { | 214 | if (IS_ERR(wdt->reg_base)) |
215 | dev_err(dev, "ioremap failed\n"); | ||
216 | return PTR_ERR(wdt->reg_base); | 215 | return PTR_ERR(wdt->reg_base); |
217 | } | ||
218 | 216 | ||
219 | zx2967_wdt_reset_sysctrl(dev); | 217 | zx2967_wdt_reset_sysctrl(dev); |
220 | 218 | ||
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 3fdde0b283c9..29308a80d66f 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -1671,8 +1671,12 @@ static long ceph_fallocate(struct file *file, int mode, | |||
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | size = i_size_read(inode); | 1673 | size = i_size_read(inode); |
1674 | if (!(mode & FALLOC_FL_KEEP_SIZE)) | 1674 | if (!(mode & FALLOC_FL_KEEP_SIZE)) { |
1675 | endoff = offset + length; | 1675 | endoff = offset + length; |
1676 | ret = inode_newsize_ok(inode, endoff); | ||
1677 | if (ret) | ||
1678 | goto unlock; | ||
1679 | } | ||
1676 | 1680 | ||
1677 | if (fi->fmode & CEPH_FILE_MODE_LAZY) | 1681 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
1678 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | 1682 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 15bac390dff9..b98436f5c7c7 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -1135,20 +1135,19 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, | |||
1135 | u32 acllen = 0; | 1135 | u32 acllen = 0; |
1136 | int rc = 0; | 1136 | int rc = 0; |
1137 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); | 1137 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); |
1138 | struct cifs_tcon *tcon; | 1138 | struct smb_version_operations *ops; |
1139 | 1139 | ||
1140 | cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); | 1140 | cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); |
1141 | 1141 | ||
1142 | if (IS_ERR(tlink)) | 1142 | if (IS_ERR(tlink)) |
1143 | return PTR_ERR(tlink); | 1143 | return PTR_ERR(tlink); |
1144 | tcon = tlink_tcon(tlink); | ||
1145 | 1144 | ||
1146 | if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) | 1145 | ops = tlink_tcon(tlink)->ses->server->ops; |
1147 | pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, | 1146 | |
1148 | &acllen); | 1147 | if (pfid && (ops->get_acl_by_fid)) |
1149 | else if (tcon->ses->server->ops->get_acl) | 1148 | pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen); |
1150 | pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, | 1149 | else if (ops->get_acl) |
1151 | &acllen); | 1150 | pntsd = ops->get_acl(cifs_sb, inode, path, &acllen); |
1152 | else { | 1151 | else { |
1153 | cifs_put_tlink(tlink); | 1152 | cifs_put_tlink(tlink); |
1154 | return -EOPNOTSUPP; | 1153 | return -EOPNOTSUPP; |
@@ -1181,23 +1180,23 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode, | |||
1181 | struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ | 1180 | struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ |
1182 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 1181 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
1183 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); | 1182 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); |
1184 | struct cifs_tcon *tcon; | 1183 | struct smb_version_operations *ops; |
1185 | 1184 | ||
1186 | if (IS_ERR(tlink)) | 1185 | if (IS_ERR(tlink)) |
1187 | return PTR_ERR(tlink); | 1186 | return PTR_ERR(tlink); |
1188 | tcon = tlink_tcon(tlink); | 1187 | |
1188 | ops = tlink_tcon(tlink)->ses->server->ops; | ||
1189 | 1189 | ||
1190 | cifs_dbg(NOISY, "set ACL from mode for %s\n", path); | 1190 | cifs_dbg(NOISY, "set ACL from mode for %s\n", path); |
1191 | 1191 | ||
1192 | /* Get the security descriptor */ | 1192 | /* Get the security descriptor */ |
1193 | 1193 | ||
1194 | if (tcon->ses->server->ops->get_acl == NULL) { | 1194 | if (ops->get_acl == NULL) { |
1195 | cifs_put_tlink(tlink); | 1195 | cifs_put_tlink(tlink); |
1196 | return -EOPNOTSUPP; | 1196 | return -EOPNOTSUPP; |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, | 1199 | pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen); |
1200 | &secdesclen); | ||
1201 | if (IS_ERR(pntsd)) { | 1200 | if (IS_ERR(pntsd)) { |
1202 | rc = PTR_ERR(pntsd); | 1201 | rc = PTR_ERR(pntsd); |
1203 | cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); | 1202 | cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); |
@@ -1224,13 +1223,12 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode, | |||
1224 | 1223 | ||
1225 | cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); | 1224 | cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); |
1226 | 1225 | ||
1227 | if (tcon->ses->server->ops->set_acl == NULL) | 1226 | if (ops->set_acl == NULL) |
1228 | rc = -EOPNOTSUPP; | 1227 | rc = -EOPNOTSUPP; |
1229 | 1228 | ||
1230 | if (!rc) { | 1229 | if (!rc) { |
1231 | /* Set the security descriptor */ | 1230 | /* Set the security descriptor */ |
1232 | rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode, | 1231 | rc = ops->set_acl(pnntsd, secdesclen, inode, path, aclflag); |
1233 | path, aclflag); | ||
1234 | cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); | 1232 | cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); |
1235 | } | 1233 | } |
1236 | cifs_put_tlink(tlink); | 1234 | cifs_put_tlink(tlink); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 8be55be70faf..bcc7d9acad64 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -418,7 +418,7 @@ struct smb_version_operations { | |||
418 | int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); | 418 | int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); |
419 | ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, | 419 | ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, |
420 | const unsigned char *, const unsigned char *, char *, | 420 | const unsigned char *, const unsigned char *, char *, |
421 | size_t, const struct nls_table *, int); | 421 | size_t, struct cifs_sb_info *); |
422 | int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, | 422 | int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, |
423 | const char *, const void *, const __u16, | 423 | const char *, const void *, const __u16, |
424 | const struct nls_table *, int); | 424 | const struct nls_table *, int); |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index e49958c3f8bb..6eb3147132e3 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -480,8 +480,7 @@ extern int CIFSSMBCopy(unsigned int xid, | |||
480 | extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, | 480 | extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, |
481 | const unsigned char *searchName, | 481 | const unsigned char *searchName, |
482 | const unsigned char *ea_name, char *EAData, | 482 | const unsigned char *ea_name, char *EAData, |
483 | size_t bufsize, const struct nls_table *nls_codepage, | 483 | size_t bufsize, struct cifs_sb_info *cifs_sb); |
484 | int remap_special_chars); | ||
485 | extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, | 484 | extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, |
486 | const char *fileName, const char *ea_name, | 485 | const char *fileName, const char *ea_name, |
487 | const void *ea_value, const __u16 ea_value_len, | 486 | const void *ea_value, const __u16 ea_value_len, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 4c01b3f9abf0..fbb0d4cbda41 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -697,9 +697,7 @@ cifs_echo_callback(struct mid_q_entry *mid) | |||
697 | { | 697 | { |
698 | struct TCP_Server_Info *server = mid->callback_data; | 698 | struct TCP_Server_Info *server = mid->callback_data; |
699 | 699 | ||
700 | mutex_lock(&server->srv_mutex); | ||
701 | DeleteMidQEntry(mid); | 700 | DeleteMidQEntry(mid); |
702 | mutex_unlock(&server->srv_mutex); | ||
703 | add_credits(server, 1, CIFS_ECHO_OP); | 701 | add_credits(server, 1, CIFS_ECHO_OP); |
704 | } | 702 | } |
705 | 703 | ||
@@ -1599,9 +1597,7 @@ cifs_readv_callback(struct mid_q_entry *mid) | |||
1599 | } | 1597 | } |
1600 | 1598 | ||
1601 | queue_work(cifsiod_wq, &rdata->work); | 1599 | queue_work(cifsiod_wq, &rdata->work); |
1602 | mutex_lock(&server->srv_mutex); | ||
1603 | DeleteMidQEntry(mid); | 1600 | DeleteMidQEntry(mid); |
1604 | mutex_unlock(&server->srv_mutex); | ||
1605 | add_credits(server, 1, 0); | 1601 | add_credits(server, 1, 0); |
1606 | } | 1602 | } |
1607 | 1603 | ||
@@ -2058,7 +2054,6 @@ cifs_writev_callback(struct mid_q_entry *mid) | |||
2058 | { | 2054 | { |
2059 | struct cifs_writedata *wdata = mid->callback_data; | 2055 | struct cifs_writedata *wdata = mid->callback_data; |
2060 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | 2056 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); |
2061 | struct TCP_Server_Info *server = tcon->ses->server; | ||
2062 | unsigned int written; | 2057 | unsigned int written; |
2063 | WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; | 2058 | WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; |
2064 | 2059 | ||
@@ -2095,9 +2090,7 @@ cifs_writev_callback(struct mid_q_entry *mid) | |||
2095 | } | 2090 | } |
2096 | 2091 | ||
2097 | queue_work(cifsiod_wq, &wdata->work); | 2092 | queue_work(cifsiod_wq, &wdata->work); |
2098 | mutex_lock(&server->srv_mutex); | ||
2099 | DeleteMidQEntry(mid); | 2093 | DeleteMidQEntry(mid); |
2100 | mutex_unlock(&server->srv_mutex); | ||
2101 | add_credits(tcon->ses->server, 1, 0); | 2094 | add_credits(tcon->ses->server, 1, 0); |
2102 | } | 2095 | } |
2103 | 2096 | ||
@@ -6076,11 +6069,13 @@ ssize_t | |||
6076 | CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, | 6069 | CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, |
6077 | const unsigned char *searchName, const unsigned char *ea_name, | 6070 | const unsigned char *searchName, const unsigned char *ea_name, |
6078 | char *EAData, size_t buf_size, | 6071 | char *EAData, size_t buf_size, |
6079 | const struct nls_table *nls_codepage, int remap) | 6072 | struct cifs_sb_info *cifs_sb) |
6080 | { | 6073 | { |
6081 | /* BB assumes one setup word */ | 6074 | /* BB assumes one setup word */ |
6082 | TRANSACTION2_QPI_REQ *pSMB = NULL; | 6075 | TRANSACTION2_QPI_REQ *pSMB = NULL; |
6083 | TRANSACTION2_QPI_RSP *pSMBr = NULL; | 6076 | TRANSACTION2_QPI_RSP *pSMBr = NULL; |
6077 | int remap = cifs_remap(cifs_sb); | ||
6078 | struct nls_table *nls_codepage = cifs_sb->local_nls; | ||
6084 | int rc = 0; | 6079 | int rc = 0; |
6085 | int bytes_returned; | 6080 | int bytes_returned; |
6086 | int list_len; | 6081 | int list_len; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 6ef78ad838e6..0fd081bd2a2f 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -582,7 +582,7 @@ cifs_relock_file(struct cifsFileInfo *cfile) | |||
582 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); | 582 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
583 | int rc = 0; | 583 | int rc = 0; |
584 | 584 | ||
585 | down_read(&cinode->lock_sem); | 585 | down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); |
586 | if (cinode->can_cache_brlcks) { | 586 | if (cinode->can_cache_brlcks) { |
587 | /* can cache locks - no need to relock */ | 587 | /* can cache locks - no need to relock */ |
588 | up_read(&cinode->lock_sem); | 588 | up_read(&cinode->lock_sem); |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index c3b2fa0b2ec8..4d1fcd76d022 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -563,8 +563,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, | |||
563 | 563 | ||
564 | rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, | 564 | rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, |
565 | "SETFILEBITS", ea_value, 4 /* size of buf */, | 565 | "SETFILEBITS", ea_value, 4 /* size of buf */, |
566 | cifs_sb->local_nls, | 566 | cifs_sb); |
567 | cifs_remap(cifs_sb)); | ||
568 | cifs_put_tlink(tlink); | 567 | cifs_put_tlink(tlink); |
569 | if (rc < 0) | 568 | if (rc < 0) |
570 | return (int)rc; | 569 | return (int)rc; |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 48ff7703b919..e4afdaae743f 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -1240,15 +1240,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
1240 | goto tcon_exit; | 1240 | goto tcon_exit; |
1241 | } | 1241 | } |
1242 | 1242 | ||
1243 | if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) | 1243 | switch (rsp->ShareType) { |
1244 | case SMB2_SHARE_TYPE_DISK: | ||
1244 | cifs_dbg(FYI, "connection to disk share\n"); | 1245 | cifs_dbg(FYI, "connection to disk share\n"); |
1245 | else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { | 1246 | break; |
1247 | case SMB2_SHARE_TYPE_PIPE: | ||
1246 | tcon->ipc = true; | 1248 | tcon->ipc = true; |
1247 | cifs_dbg(FYI, "connection to pipe share\n"); | 1249 | cifs_dbg(FYI, "connection to pipe share\n"); |
1248 | } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { | 1250 | break; |
1249 | tcon->print = true; | 1251 | case SMB2_SHARE_TYPE_PRINT: |
1252 | tcon->ipc = true; | ||
1250 | cifs_dbg(FYI, "connection to printer\n"); | 1253 | cifs_dbg(FYI, "connection to printer\n"); |
1251 | } else { | 1254 | break; |
1255 | default: | ||
1252 | cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); | 1256 | cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); |
1253 | rc = -EOPNOTSUPP; | 1257 | rc = -EOPNOTSUPP; |
1254 | goto tcon_error_exit; | 1258 | goto tcon_error_exit; |
@@ -2173,9 +2177,7 @@ smb2_echo_callback(struct mid_q_entry *mid) | |||
2173 | if (mid->mid_state == MID_RESPONSE_RECEIVED) | 2177 | if (mid->mid_state == MID_RESPONSE_RECEIVED) |
2174 | credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); | 2178 | credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); |
2175 | 2179 | ||
2176 | mutex_lock(&server->srv_mutex); | ||
2177 | DeleteMidQEntry(mid); | 2180 | DeleteMidQEntry(mid); |
2178 | mutex_unlock(&server->srv_mutex); | ||
2179 | add_credits(server, credits_received, CIFS_ECHO_OP); | 2181 | add_credits(server, credits_received, CIFS_ECHO_OP); |
2180 | } | 2182 | } |
2181 | 2183 | ||
@@ -2433,9 +2435,7 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
2433 | cifs_stats_fail_inc(tcon, SMB2_READ_HE); | 2435 | cifs_stats_fail_inc(tcon, SMB2_READ_HE); |
2434 | 2436 | ||
2435 | queue_work(cifsiod_wq, &rdata->work); | 2437 | queue_work(cifsiod_wq, &rdata->work); |
2436 | mutex_lock(&server->srv_mutex); | ||
2437 | DeleteMidQEntry(mid); | 2438 | DeleteMidQEntry(mid); |
2438 | mutex_unlock(&server->srv_mutex); | ||
2439 | add_credits(server, credits_received, 0); | 2439 | add_credits(server, credits_received, 0); |
2440 | } | 2440 | } |
2441 | 2441 | ||
@@ -2594,7 +2594,6 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
2594 | { | 2594 | { |
2595 | struct cifs_writedata *wdata = mid->callback_data; | 2595 | struct cifs_writedata *wdata = mid->callback_data; |
2596 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | 2596 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); |
2597 | struct TCP_Server_Info *server = tcon->ses->server; | ||
2598 | unsigned int written; | 2597 | unsigned int written; |
2599 | struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; | 2598 | struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; |
2600 | unsigned int credits_received = 1; | 2599 | unsigned int credits_received = 1; |
@@ -2634,9 +2633,7 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
2634 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); | 2633 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); |
2635 | 2634 | ||
2636 | queue_work(cifsiod_wq, &wdata->work); | 2635 | queue_work(cifsiod_wq, &wdata->work); |
2637 | mutex_lock(&server->srv_mutex); | ||
2638 | DeleteMidQEntry(mid); | 2636 | DeleteMidQEntry(mid); |
2639 | mutex_unlock(&server->srv_mutex); | ||
2640 | add_credits(tcon->ses->server, credits_received, 0); | 2637 | add_credits(tcon->ses->server, credits_received, 0); |
2641 | } | 2638 | } |
2642 | 2639 | ||
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 4d64b5b8fc9c..47a125ece11e 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -94,7 +94,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
94 | now = jiffies; | 94 | now = jiffies; |
95 | /* commands taking longer than one second are indications that | 95 | /* commands taking longer than one second are indications that |
96 | something is wrong, unless it is quite a slow link or server */ | 96 | something is wrong, unless it is quite a slow link or server */ |
97 | if ((now - midEntry->when_alloc) > HZ) { | 97 | if (time_after(now, midEntry->when_alloc + HZ)) { |
98 | if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { | 98 | if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { |
99 | pr_debug(" CIFS slow rsp: cmd %d mid %llu", | 99 | pr_debug(" CIFS slow rsp: cmd %d mid %llu", |
100 | midEntry->command, midEntry->mid); | 100 | midEntry->command, midEntry->mid); |
@@ -613,9 +613,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | |||
613 | } | 613 | } |
614 | spin_unlock(&GlobalMid_Lock); | 614 | spin_unlock(&GlobalMid_Lock); |
615 | 615 | ||
616 | mutex_lock(&server->srv_mutex); | ||
617 | DeleteMidQEntry(mid); | 616 | DeleteMidQEntry(mid); |
618 | mutex_unlock(&server->srv_mutex); | ||
619 | return rc; | 617 | return rc; |
620 | } | 618 | } |
621 | 619 | ||
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 20af5187ba63..3cb5c9e2d4e7 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -235,8 +235,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler, | |||
235 | 235 | ||
236 | if (pTcon->ses->server->ops->query_all_EAs) | 236 | if (pTcon->ses->server->ops->query_all_EAs) |
237 | rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, | 237 | rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, |
238 | full_path, name, value, size, | 238 | full_path, name, value, size, cifs_sb); |
239 | cifs_sb->local_nls, cifs_remap(cifs_sb)); | ||
240 | break; | 239 | break; |
241 | 240 | ||
242 | case XATTR_CIFS_ACL: { | 241 | case XATTR_CIFS_ACL: { |
@@ -336,8 +335,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) | |||
336 | 335 | ||
337 | if (pTcon->ses->server->ops->query_all_EAs) | 336 | if (pTcon->ses->server->ops->query_all_EAs) |
338 | rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, | 337 | rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, |
339 | full_path, NULL, data, buf_size, | 338 | full_path, NULL, data, buf_size, cifs_sb); |
340 | cifs_sb->local_nls, cifs_remap(cifs_sb)); | ||
341 | list_ea_exit: | 339 | list_ea_exit: |
342 | kfree(full_path); | 340 | kfree(full_path); |
343 | free_xid(xid); | 341 | free_xid(xid); |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 26d77f9f8c12..2dcbd5698884 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
@@ -817,7 +817,7 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length, | |||
817 | iomap->bdev = bdev; | 817 | iomap->bdev = bdev; |
818 | iomap->offset = (u64)first_block << blkbits; | 818 | iomap->offset = (u64)first_block << blkbits; |
819 | if (blk_queue_dax(bdev->bd_queue)) | 819 | if (blk_queue_dax(bdev->bd_queue)) |
820 | iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | 820 | iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); |
821 | else | 821 | else |
822 | iomap->dax_dev = NULL; | 822 | iomap->dax_dev = NULL; |
823 | 823 | ||
@@ -841,7 +841,7 @@ static int | |||
841 | ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, | 841 | ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, |
842 | ssize_t written, unsigned flags, struct iomap *iomap) | 842 | ssize_t written, unsigned flags, struct iomap *iomap) |
843 | { | 843 | { |
844 | put_dax(iomap->dax_dev); | 844 | fs_put_dax(iomap->dax_dev); |
845 | if (iomap->type == IOMAP_MAPPED && | 845 | if (iomap->type == IOMAP_MAPPED && |
846 | written < length && | 846 | written < length && |
847 | (flags & IOMAP_WRITE)) | 847 | (flags & IOMAP_WRITE)) |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5834c4d76be8..1bd0bfa547f6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3412,7 +3412,7 @@ retry: | |||
3412 | bdev = inode->i_sb->s_bdev; | 3412 | bdev = inode->i_sb->s_bdev; |
3413 | iomap->bdev = bdev; | 3413 | iomap->bdev = bdev; |
3414 | if (blk_queue_dax(bdev->bd_queue)) | 3414 | if (blk_queue_dax(bdev->bd_queue)) |
3415 | iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | 3415 | iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); |
3416 | else | 3416 | else |
3417 | iomap->dax_dev = NULL; | 3417 | iomap->dax_dev = NULL; |
3418 | iomap->offset = first_block << blkbits; | 3418 | iomap->offset = first_block << blkbits; |
@@ -3447,7 +3447,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, | |||
3447 | int blkbits = inode->i_blkbits; | 3447 | int blkbits = inode->i_blkbits; |
3448 | bool truncate = false; | 3448 | bool truncate = false; |
3449 | 3449 | ||
3450 | put_dax(iomap->dax_dev); | 3450 | fs_put_dax(iomap->dax_dev); |
3451 | if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) | 3451 | if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) |
3452 | return 0; | 3452 | return 0; |
3453 | 3453 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 5a1b58f8fef4..65c88379a3a1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -975,8 +975,15 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) | |||
975 | int err; | 975 | int err; |
976 | char *suffix = ""; | 976 | char *suffix = ""; |
977 | 977 | ||
978 | if (sb->s_bdev) | 978 | if (sb->s_bdev) { |
979 | suffix = "-fuseblk"; | 979 | suffix = "-fuseblk"; |
980 | /* | ||
981 | * sb->s_bdi points to blkdev's bdi however we want to redirect | ||
982 | * it to our private bdi... | ||
983 | */ | ||
984 | bdi_put(sb->s_bdi); | ||
985 | sb->s_bdi = &noop_backing_dev_info; | ||
986 | } | ||
980 | err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), | 987 | err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), |
981 | MINOR(fc->dev), suffix); | 988 | MINOR(fc->dev), suffix); |
982 | if (err) | 989 | if (err) |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index f02eb7673392..a7048eafa8e6 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -1280,7 +1280,6 @@ xfs_bmap_read_extents( | |||
1280 | xfs_bmbt_rec_t *frp; | 1280 | xfs_bmbt_rec_t *frp; |
1281 | xfs_fsblock_t nextbno; | 1281 | xfs_fsblock_t nextbno; |
1282 | xfs_extnum_t num_recs; | 1282 | xfs_extnum_t num_recs; |
1283 | xfs_extnum_t start; | ||
1284 | 1283 | ||
1285 | num_recs = xfs_btree_get_numrecs(block); | 1284 | num_recs = xfs_btree_get_numrecs(block); |
1286 | if (unlikely(i + num_recs > room)) { | 1285 | if (unlikely(i + num_recs > room)) { |
@@ -1303,7 +1302,6 @@ xfs_bmap_read_extents( | |||
1303 | * Copy records into the extent records. | 1302 | * Copy records into the extent records. |
1304 | */ | 1303 | */ |
1305 | frp = XFS_BMBT_REC_ADDR(mp, block, 1); | 1304 | frp = XFS_BMBT_REC_ADDR(mp, block, 1); |
1306 | start = i; | ||
1307 | for (j = 0; j < num_recs; j++, i++, frp++) { | 1305 | for (j = 0; j < num_recs; j++, i++, frp++) { |
1308 | xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); | 1306 | xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); |
1309 | trp->l0 = be64_to_cpu(frp->l0); | 1307 | trp->l0 = be64_to_cpu(frp->l0); |
@@ -2065,8 +2063,10 @@ xfs_bmap_add_extent_delay_real( | |||
2065 | } | 2063 | } |
2066 | temp = xfs_bmap_worst_indlen(bma->ip, temp); | 2064 | temp = xfs_bmap_worst_indlen(bma->ip, temp); |
2067 | temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); | 2065 | temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); |
2068 | diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - | 2066 | diff = (int)(temp + temp2 - |
2069 | (bma->cur ? bma->cur->bc_private.b.allocated : 0)); | 2067 | (startblockval(PREV.br_startblock) - |
2068 | (bma->cur ? | ||
2069 | bma->cur->bc_private.b.allocated : 0))); | ||
2070 | if (diff > 0) { | 2070 | if (diff > 0) { |
2071 | error = xfs_mod_fdblocks(bma->ip->i_mount, | 2071 | error = xfs_mod_fdblocks(bma->ip->i_mount, |
2072 | -((int64_t)diff), false); | 2072 | -((int64_t)diff), false); |
@@ -2123,7 +2123,6 @@ xfs_bmap_add_extent_delay_real( | |||
2123 | temp = da_new; | 2123 | temp = da_new; |
2124 | if (bma->cur) | 2124 | if (bma->cur) |
2125 | temp += bma->cur->bc_private.b.allocated; | 2125 | temp += bma->cur->bc_private.b.allocated; |
2126 | ASSERT(temp <= da_old); | ||
2127 | if (temp < da_old) | 2126 | if (temp < da_old) |
2128 | xfs_mod_fdblocks(bma->ip->i_mount, | 2127 | xfs_mod_fdblocks(bma->ip->i_mount, |
2129 | (int64_t)(da_old - temp), false); | 2128 | (int64_t)(da_old - temp), false); |
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 5392674bf893..3a673ba201aa 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c | |||
@@ -4395,7 +4395,7 @@ xfs_btree_visit_blocks( | |||
4395 | xfs_btree_readahead_ptr(cur, ptr, 1); | 4395 | xfs_btree_readahead_ptr(cur, ptr, 1); |
4396 | 4396 | ||
4397 | /* save for the next iteration of the loop */ | 4397 | /* save for the next iteration of the loop */ |
4398 | lptr = *ptr; | 4398 | xfs_btree_copy_ptrs(cur, &lptr, ptr, 1); |
4399 | } | 4399 | } |
4400 | 4400 | ||
4401 | /* for each buffer in the level */ | 4401 | /* for each buffer in the level */ |
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index b177ef33cd4c..82a38d86ebad 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c | |||
@@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers( | |||
1629 | if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) | 1629 | if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) |
1630 | return -EOPNOTSUPP; | 1630 | return -EOPNOTSUPP; |
1631 | 1631 | ||
1632 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); | 1632 | INIT_LIST_HEAD(&debris); |
1633 | |||
1634 | /* | ||
1635 | * In this first part, we use an empty transaction to gather up | ||
1636 | * all the leftover CoW extents so that we can subsequently | ||
1637 | * delete them. The empty transaction is used to avoid | ||
1638 | * a buffer lock deadlock if there happens to be a loop in the | ||
1639 | * refcountbt because we're allowed to re-grab a buffer that is | ||
1640 | * already attached to our transaction. When we're done | ||
1641 | * recording the CoW debris we cancel the (empty) transaction | ||
1642 | * and everything goes away cleanly. | ||
1643 | */ | ||
1644 | error = xfs_trans_alloc_empty(mp, &tp); | ||
1633 | if (error) | 1645 | if (error) |
1634 | return error; | 1646 | return error; |
1635 | cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL); | 1647 | |
1648 | error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp); | ||
1649 | if (error) | ||
1650 | goto out_trans; | ||
1651 | cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL); | ||
1636 | 1652 | ||
1637 | /* Find all the leftover CoW staging extents. */ | 1653 | /* Find all the leftover CoW staging extents. */ |
1638 | INIT_LIST_HEAD(&debris); | ||
1639 | memset(&low, 0, sizeof(low)); | 1654 | memset(&low, 0, sizeof(low)); |
1640 | memset(&high, 0, sizeof(high)); | 1655 | memset(&high, 0, sizeof(high)); |
1641 | low.rc.rc_startblock = XFS_REFC_COW_START; | 1656 | low.rc.rc_startblock = XFS_REFC_COW_START; |
@@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers( | |||
1645 | if (error) | 1660 | if (error) |
1646 | goto out_cursor; | 1661 | goto out_cursor; |
1647 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | 1662 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); |
1648 | xfs_buf_relse(agbp); | 1663 | xfs_trans_brelse(tp, agbp); |
1664 | xfs_trans_cancel(tp); | ||
1649 | 1665 | ||
1650 | /* Now iterate the list to free the leftovers */ | 1666 | /* Now iterate the list to free the leftovers */ |
1651 | list_for_each_entry(rr, &debris, rr_list) { | 1667 | list_for_each_entry_safe(rr, n, &debris, rr_list) { |
1652 | /* Set up transaction. */ | 1668 | /* Set up transaction. */ |
1653 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); | 1669 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); |
1654 | if (error) | 1670 | if (error) |
@@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers( | |||
1676 | error = xfs_trans_commit(tp); | 1692 | error = xfs_trans_commit(tp); |
1677 | if (error) | 1693 | if (error) |
1678 | goto out_free; | 1694 | goto out_free; |
1695 | |||
1696 | list_del(&rr->rr_list); | ||
1697 | kmem_free(rr); | ||
1679 | } | 1698 | } |
1680 | 1699 | ||
1700 | return error; | ||
1701 | out_defer: | ||
1702 | xfs_defer_cancel(&dfops); | ||
1703 | out_trans: | ||
1704 | xfs_trans_cancel(tp); | ||
1681 | out_free: | 1705 | out_free: |
1682 | /* Free the leftover list */ | 1706 | /* Free the leftover list */ |
1683 | list_for_each_entry_safe(rr, n, &debris, rr_list) { | 1707 | list_for_each_entry_safe(rr, n, &debris, rr_list) { |
@@ -1688,11 +1712,6 @@ out_free: | |||
1688 | 1712 | ||
1689 | out_cursor: | 1713 | out_cursor: |
1690 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | 1714 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); |
1691 | xfs_buf_relse(agbp); | 1715 | xfs_trans_brelse(tp, agbp); |
1692 | goto out_free; | 1716 | goto out_trans; |
1693 | |||
1694 | out_defer: | ||
1695 | xfs_defer_cancel(&dfops); | ||
1696 | xfs_trans_cancel(tp); | ||
1697 | goto out_free; | ||
1698 | } | 1717 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 2b954308a1d6..9e3cc2146d5b 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -582,9 +582,13 @@ xfs_getbmap( | |||
582 | } | 582 | } |
583 | break; | 583 | break; |
584 | default: | 584 | default: |
585 | /* Local format data forks report no extents. */ | ||
586 | if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) { | ||
587 | bmv->bmv_entries = 0; | ||
588 | return 0; | ||
589 | } | ||
585 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && | 590 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && |
586 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE && | 591 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE) |
587 | ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) | ||
588 | return -EINVAL; | 592 | return -EINVAL; |
589 | 593 | ||
590 | if (xfs_get_extsz_hint(ip) || | 594 | if (xfs_get_extsz_hint(ip) || |
@@ -712,7 +716,7 @@ xfs_getbmap( | |||
712 | * extents. | 716 | * extents. |
713 | */ | 717 | */ |
714 | if (map[i].br_startblock == DELAYSTARTBLOCK && | 718 | if (map[i].br_startblock == DELAYSTARTBLOCK && |
715 | map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) | 719 | map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) |
716 | ASSERT((iflags & BMV_IF_DELALLOC) != 0); | 720 | ASSERT((iflags & BMV_IF_DELALLOC) != 0); |
717 | 721 | ||
718 | if (map[i].br_startblock == HOLESTARTBLOCK && | 722 | if (map[i].br_startblock == HOLESTARTBLOCK && |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 35703a801372..5fb5a0958a14 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -1043,49 +1043,17 @@ xfs_find_get_desired_pgoff( | |||
1043 | 1043 | ||
1044 | index = startoff >> PAGE_SHIFT; | 1044 | index = startoff >> PAGE_SHIFT; |
1045 | endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); | 1045 | endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); |
1046 | end = endoff >> PAGE_SHIFT; | 1046 | end = (endoff - 1) >> PAGE_SHIFT; |
1047 | do { | 1047 | do { |
1048 | int want; | 1048 | int want; |
1049 | unsigned nr_pages; | 1049 | unsigned nr_pages; |
1050 | unsigned int i; | 1050 | unsigned int i; |
1051 | 1051 | ||
1052 | want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); | 1052 | want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; |
1053 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, | 1053 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, |
1054 | want); | 1054 | want); |
1055 | /* | 1055 | if (nr_pages == 0) |
1056 | * No page mapped into given range. If we are searching holes | ||
1057 | * and if this is the first time we got into the loop, it means | ||
1058 | * that the given offset is landed in a hole, return it. | ||
1059 | * | ||
1060 | * If we have already stepped through some block buffers to find | ||
1061 | * holes but they all contains data. In this case, the last | ||
1062 | * offset is already updated and pointed to the end of the last | ||
1063 | * mapped page, if it does not reach the endpoint to search, | ||
1064 | * that means there should be a hole between them. | ||
1065 | */ | ||
1066 | if (nr_pages == 0) { | ||
1067 | /* Data search found nothing */ | ||
1068 | if (type == DATA_OFF) | ||
1069 | break; | ||
1070 | |||
1071 | ASSERT(type == HOLE_OFF); | ||
1072 | if (lastoff == startoff || lastoff < endoff) { | ||
1073 | found = true; | ||
1074 | *offset = lastoff; | ||
1075 | } | ||
1076 | break; | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * At lease we found one page. If this is the first time we | ||
1081 | * step into the loop, and if the first page index offset is | ||
1082 | * greater than the given search offset, a hole was found. | ||
1083 | */ | ||
1084 | if (type == HOLE_OFF && lastoff == startoff && | ||
1085 | lastoff < page_offset(pvec.pages[0])) { | ||
1086 | found = true; | ||
1087 | break; | 1056 | break; |
1088 | } | ||
1089 | 1057 | ||
1090 | for (i = 0; i < nr_pages; i++) { | 1058 | for (i = 0; i < nr_pages; i++) { |
1091 | struct page *page = pvec.pages[i]; | 1059 | struct page *page = pvec.pages[i]; |
@@ -1098,18 +1066,18 @@ xfs_find_get_desired_pgoff( | |||
1098 | * file mapping. However, page->index will not change | 1066 | * file mapping. However, page->index will not change |
1099 | * because we have a reference on the page. | 1067 | * because we have a reference on the page. |
1100 | * | 1068 | * |
1101 | * Searching done if the page index is out of range. | 1069 | * If current page offset is beyond where we've ended, |
1102 | * If the current offset is not reaches the end of | 1070 | * we've found a hole. |
1103 | * the specified search range, there should be a hole | ||
1104 | * between them. | ||
1105 | */ | 1071 | */ |
1106 | if (page->index > end) { | 1072 | if (type == HOLE_OFF && lastoff < endoff && |
1107 | if (type == HOLE_OFF && lastoff < endoff) { | 1073 | lastoff < page_offset(pvec.pages[i])) { |
1108 | *offset = lastoff; | 1074 | found = true; |
1109 | found = true; | 1075 | *offset = lastoff; |
1110 | } | ||
1111 | goto out; | 1076 | goto out; |
1112 | } | 1077 | } |
1078 | /* Searching done if the page index is out of range. */ | ||
1079 | if (page->index > end) | ||
1080 | goto out; | ||
1113 | 1081 | ||
1114 | lock_page(page); | 1082 | lock_page(page); |
1115 | /* | 1083 | /* |
@@ -1151,21 +1119,20 @@ xfs_find_get_desired_pgoff( | |||
1151 | 1119 | ||
1152 | /* | 1120 | /* |
1153 | * The number of returned pages less than our desired, search | 1121 | * The number of returned pages less than our desired, search |
1154 | * done. In this case, nothing was found for searching data, | 1122 | * done. |
1155 | * but we found a hole behind the last offset. | ||
1156 | */ | 1123 | */ |
1157 | if (nr_pages < want) { | 1124 | if (nr_pages < want) |
1158 | if (type == HOLE_OFF) { | ||
1159 | *offset = lastoff; | ||
1160 | found = true; | ||
1161 | } | ||
1162 | break; | 1125 | break; |
1163 | } | ||
1164 | 1126 | ||
1165 | index = pvec.pages[i - 1]->index + 1; | 1127 | index = pvec.pages[i - 1]->index + 1; |
1166 | pagevec_release(&pvec); | 1128 | pagevec_release(&pvec); |
1167 | } while (index <= end); | 1129 | } while (index <= end); |
1168 | 1130 | ||
1131 | /* No page at lastoff and we are not done - we found a hole. */ | ||
1132 | if (type == HOLE_OFF && lastoff < endoff) { | ||
1133 | *offset = lastoff; | ||
1134 | found = true; | ||
1135 | } | ||
1169 | out: | 1136 | out: |
1170 | pagevec_release(&pvec); | 1137 | pagevec_release(&pvec); |
1171 | return found; | 1138 | return found; |
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 3683819887a5..814ed729881d 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c | |||
@@ -828,6 +828,7 @@ xfs_getfsmap( | |||
828 | struct xfs_fsmap dkeys[2]; /* per-dev keys */ | 828 | struct xfs_fsmap dkeys[2]; /* per-dev keys */ |
829 | struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS]; | 829 | struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS]; |
830 | struct xfs_getfsmap_info info = { NULL }; | 830 | struct xfs_getfsmap_info info = { NULL }; |
831 | bool use_rmap; | ||
831 | int i; | 832 | int i; |
832 | int error = 0; | 833 | int error = 0; |
833 | 834 | ||
@@ -837,12 +838,14 @@ xfs_getfsmap( | |||
837 | !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1])) | 838 | !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1])) |
838 | return -EINVAL; | 839 | return -EINVAL; |
839 | 840 | ||
841 | use_rmap = capable(CAP_SYS_ADMIN) && | ||
842 | xfs_sb_version_hasrmapbt(&mp->m_sb); | ||
840 | head->fmh_entries = 0; | 843 | head->fmh_entries = 0; |
841 | 844 | ||
842 | /* Set up our device handlers. */ | 845 | /* Set up our device handlers. */ |
843 | memset(handlers, 0, sizeof(handlers)); | 846 | memset(handlers, 0, sizeof(handlers)); |
844 | handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev); | 847 | handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev); |
845 | if (xfs_sb_version_hasrmapbt(&mp->m_sb)) | 848 | if (use_rmap) |
846 | handlers[0].fn = xfs_getfsmap_datadev_rmapbt; | 849 | handlers[0].fn = xfs_getfsmap_datadev_rmapbt; |
847 | else | 850 | else |
848 | handlers[0].fn = xfs_getfsmap_datadev_bnobt; | 851 | handlers[0].fn = xfs_getfsmap_datadev_bnobt; |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index a63f61c256bd..94e5bdf7304c 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -1068,7 +1068,7 @@ xfs_file_iomap_begin( | |||
1068 | /* optionally associate a dax device with the iomap bdev */ | 1068 | /* optionally associate a dax device with the iomap bdev */ |
1069 | bdev = iomap->bdev; | 1069 | bdev = iomap->bdev; |
1070 | if (blk_queue_dax(bdev->bd_queue)) | 1070 | if (blk_queue_dax(bdev->bd_queue)) |
1071 | iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | 1071 | iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); |
1072 | else | 1072 | else |
1073 | iomap->dax_dev = NULL; | 1073 | iomap->dax_dev = NULL; |
1074 | 1074 | ||
@@ -1149,7 +1149,7 @@ xfs_file_iomap_end( | |||
1149 | unsigned flags, | 1149 | unsigned flags, |
1150 | struct iomap *iomap) | 1150 | struct iomap *iomap) |
1151 | { | 1151 | { |
1152 | put_dax(iomap->dax_dev); | 1152 | fs_put_dax(iomap->dax_dev); |
1153 | if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) | 1153 | if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) |
1154 | return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, | 1154 | return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, |
1155 | length, written, iomap); | 1155 | length, written, iomap); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 97b8d3728b31..ef718586321c 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -195,7 +195,10 @@ struct vgic_dist { | |||
195 | /* either a GICv2 CPU interface */ | 195 | /* either a GICv2 CPU interface */ |
196 | gpa_t vgic_cpu_base; | 196 | gpa_t vgic_cpu_base; |
197 | /* or a number of GICv3 redistributor regions */ | 197 | /* or a number of GICv3 redistributor regions */ |
198 | gpa_t vgic_redist_base; | 198 | struct { |
199 | gpa_t vgic_redist_base; | ||
200 | gpa_t vgic_redist_free_offset; | ||
201 | }; | ||
199 | }; | 202 | }; |
200 | 203 | ||
201 | /* distributor enabled */ | 204 | /* distributor enabled */ |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c47aa248c640..fcd641032f8d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, | |||
238 | bool kick_requeue_list); | 238 | bool kick_requeue_list); |
239 | void blk_mq_kick_requeue_list(struct request_queue *q); | 239 | void blk_mq_kick_requeue_list(struct request_queue *q); |
240 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); | 240 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
241 | void blk_mq_abort_requeue_list(struct request_queue *q); | ||
242 | void blk_mq_complete_request(struct request *rq); | 241 | void blk_mq_complete_request(struct request *rq); |
243 | 242 | ||
244 | bool blk_mq_queue_stopped(struct request_queue *q); | 243 | bool blk_mq_queue_stopped(struct request_queue *q); |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5efb4db44e1e..d5093b52b485 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -40,6 +40,9 @@ struct bpf_reg_state { | |||
40 | */ | 40 | */ |
41 | s64 min_value; | 41 | s64 min_value; |
42 | u64 max_value; | 42 | u64 max_value; |
43 | u32 min_align; | ||
44 | u32 aux_off; | ||
45 | u32 aux_off_align; | ||
43 | }; | 46 | }; |
44 | 47 | ||
45 | enum bpf_stack_slot_type { | 48 | enum bpf_stack_slot_type { |
@@ -87,6 +90,7 @@ struct bpf_verifier_env { | |||
87 | struct bpf_prog *prog; /* eBPF program being verified */ | 90 | struct bpf_prog *prog; /* eBPF program being verified */ |
88 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ | 91 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
89 | int stack_size; /* number of states to be processed */ | 92 | int stack_size; /* number of states to be processed */ |
93 | bool strict_alignment; /* perform strict pointer alignment checks */ | ||
90 | struct bpf_verifier_state cur_state; /* current verifier state */ | 94 | struct bpf_verifier_state cur_state; /* current verifier state */ |
91 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ | 95 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ |
92 | const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ | 96 | const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ |
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index aa2e19182d99..51c5bd64bd00 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
5 | 5 | ||
6 | #include <linux/string.h> | ||
7 | |||
6 | #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG | 8 | #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG |
7 | 9 | ||
8 | /* | 10 | /* |
@@ -12,12 +14,10 @@ | |||
12 | */ | 14 | */ |
13 | 15 | ||
14 | # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) | 16 | # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) |
15 | extern const char *ceph_file_part(const char *s, int len); | ||
16 | # define dout(fmt, ...) \ | 17 | # define dout(fmt, ...) \ |
17 | pr_debug("%.*s %12.12s:%-4d : " fmt, \ | 18 | pr_debug("%.*s %12.12s:%-4d : " fmt, \ |
18 | 8 - (int)sizeof(KBUILD_MODNAME), " ", \ | 19 | 8 - (int)sizeof(KBUILD_MODNAME), " ", \ |
19 | ceph_file_part(__FILE__, sizeof(__FILE__)), \ | 20 | kbasename(__FILE__), __LINE__, ##__VA_ARGS__) |
20 | __LINE__, ##__VA_ARGS__) | ||
21 | # else | 21 | # else |
22 | /* faux printk call just to see any compiler warnings. */ | 22 | /* faux printk call just to see any compiler warnings. */ |
23 | # define dout(fmt, ...) do { \ | 23 | # define dout(fmt, ...) do { \ |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 00ebac854bb7..5ec1f6c47716 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
@@ -18,6 +18,20 @@ struct dax_operations { | |||
18 | void **, pfn_t *); | 18 | void **, pfn_t *); |
19 | }; | 19 | }; |
20 | 20 | ||
21 | #if IS_ENABLED(CONFIG_DAX) | ||
22 | struct dax_device *dax_get_by_host(const char *host); | ||
23 | void put_dax(struct dax_device *dax_dev); | ||
24 | #else | ||
25 | static inline struct dax_device *dax_get_by_host(const char *host) | ||
26 | { | ||
27 | return NULL; | ||
28 | } | ||
29 | |||
30 | static inline void put_dax(struct dax_device *dax_dev) | ||
31 | { | ||
32 | } | ||
33 | #endif | ||
34 | |||
21 | int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); | 35 | int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); |
22 | #if IS_ENABLED(CONFIG_FS_DAX) | 36 | #if IS_ENABLED(CONFIG_FS_DAX) |
23 | int __bdev_dax_supported(struct super_block *sb, int blocksize); | 37 | int __bdev_dax_supported(struct super_block *sb, int blocksize); |
@@ -25,23 +39,29 @@ static inline int bdev_dax_supported(struct super_block *sb, int blocksize) | |||
25 | { | 39 | { |
26 | return __bdev_dax_supported(sb, blocksize); | 40 | return __bdev_dax_supported(sb, blocksize); |
27 | } | 41 | } |
42 | |||
43 | static inline struct dax_device *fs_dax_get_by_host(const char *host) | ||
44 | { | ||
45 | return dax_get_by_host(host); | ||
46 | } | ||
47 | |||
48 | static inline void fs_put_dax(struct dax_device *dax_dev) | ||
49 | { | ||
50 | put_dax(dax_dev); | ||
51 | } | ||
52 | |||
28 | #else | 53 | #else |
29 | static inline int bdev_dax_supported(struct super_block *sb, int blocksize) | 54 | static inline int bdev_dax_supported(struct super_block *sb, int blocksize) |
30 | { | 55 | { |
31 | return -EOPNOTSUPP; | 56 | return -EOPNOTSUPP; |
32 | } | 57 | } |
33 | #endif | ||
34 | 58 | ||
35 | #if IS_ENABLED(CONFIG_DAX) | 59 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
36 | struct dax_device *dax_get_by_host(const char *host); | ||
37 | void put_dax(struct dax_device *dax_dev); | ||
38 | #else | ||
39 | static inline struct dax_device *dax_get_by_host(const char *host) | ||
40 | { | 60 | { |
41 | return NULL; | 61 | return NULL; |
42 | } | 62 | } |
43 | 63 | ||
44 | static inline void put_dax(struct dax_device *dax_dev) | 64 | static inline void fs_put_dax(struct dax_device *dax_dev) |
45 | { | 65 | { |
46 | } | 66 | } |
47 | #endif | 67 | #endif |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 56197f82af45..62d948f80730 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -272,6 +272,16 @@ struct bpf_prog_aux; | |||
272 | .off = OFF, \ | 272 | .off = OFF, \ |
273 | .imm = IMM }) | 273 | .imm = IMM }) |
274 | 274 | ||
275 | /* Unconditional jumps, goto pc + off16 */ | ||
276 | |||
277 | #define BPF_JMP_A(OFF) \ | ||
278 | ((struct bpf_insn) { \ | ||
279 | .code = BPF_JMP | BPF_JA, \ | ||
280 | .dst_reg = 0, \ | ||
281 | .src_reg = 0, \ | ||
282 | .off = OFF, \ | ||
283 | .imm = 0 }) | ||
284 | |||
275 | /* Function call */ | 285 | /* Function call */ |
276 | 286 | ||
277 | #define BPF_EMIT_CALL(FUNC) \ | 287 | #define BPF_EMIT_CALL(FUNC) \ |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 8d5fcd6284ce..283dc2f5364d 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | |||
614 | static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, | 614 | static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, |
615 | netdev_features_t features) | 615 | netdev_features_t features) |
616 | { | 616 | { |
617 | if (skb_vlan_tagged_multi(skb)) | 617 | if (skb_vlan_tagged_multi(skb)) { |
618 | features = netdev_intersect_features(features, | 618 | /* In the case of multi-tagged packets, use a direct mask |
619 | NETIF_F_SG | | 619 | * instead of using netdev_interesect_features(), to make |
620 | NETIF_F_HIGHDMA | | 620 | * sure that only devices supporting NETIF_F_HW_CSUM will |
621 | NETIF_F_FRAGLIST | | 621 | * have checksum offloading support. |
622 | NETIF_F_HW_CSUM | | 622 | */ |
623 | NETIF_F_HW_VLAN_CTAG_TX | | 623 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | |
624 | NETIF_F_HW_VLAN_STAG_TX); | 624 | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | |
625 | NETIF_F_HW_VLAN_STAG_TX; | ||
626 | } | ||
625 | 627 | ||
626 | return features; | 628 | return features; |
627 | } | 629 | } |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 30f90c1a0aaf..541df0b5b815 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -349,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, | |||
349 | int write, void __user *buffer, | 349 | int write, void __user *buffer, |
350 | size_t *length, loff_t *ppos); | 350 | size_t *length, loff_t *ppos); |
351 | #endif | 351 | #endif |
352 | extern void wait_for_kprobe_optimizer(void); | ||
353 | #else | ||
354 | static inline void wait_for_kprobe_optimizer(void) { } | ||
352 | #endif /* CONFIG_OPTPROBES */ | 355 | #endif /* CONFIG_OPTPROBES */ |
353 | #ifdef CONFIG_KPROBES_ON_FTRACE | 356 | #ifdef CONFIG_KPROBES_ON_FTRACE |
354 | extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | 357 | extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dd9a263ed368..a940ec6a046c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -787,8 +787,14 @@ enum { | |||
787 | }; | 787 | }; |
788 | 788 | ||
789 | enum { | 789 | enum { |
790 | CQE_RSS_HTYPE_IP = 0x3 << 6, | 790 | CQE_RSS_HTYPE_IP = 0x3 << 2, |
791 | CQE_RSS_HTYPE_L4 = 0x3 << 2, | 791 | /* cqe->rss_hash_type[3:2] - IP destination selected for hash |
792 | * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) | ||
793 | */ | ||
794 | CQE_RSS_HTYPE_L4 = 0x3 << 6, | ||
795 | /* cqe->rss_hash_type[7:6] - L4 destination selected for hash | ||
796 | * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI | ||
797 | */ | ||
792 | }; | 798 | }; |
793 | 799 | ||
794 | enum { | 800 | enum { |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index bcdf739ee41a..93273d9ea4d1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -787,7 +787,12 @@ enum { | |||
787 | 787 | ||
788 | typedef void (*mlx5_cmd_cbk_t)(int status, void *context); | 788 | typedef void (*mlx5_cmd_cbk_t)(int status, void *context); |
789 | 789 | ||
790 | enum { | ||
791 | MLX5_CMD_ENT_STATE_PENDING_COMP, | ||
792 | }; | ||
793 | |||
790 | struct mlx5_cmd_work_ent { | 794 | struct mlx5_cmd_work_ent { |
795 | unsigned long state; | ||
791 | struct mlx5_cmd_msg *in; | 796 | struct mlx5_cmd_msg *in; |
792 | struct mlx5_cmd_msg *out; | 797 | struct mlx5_cmd_msg *out; |
793 | void *uout; | 798 | void *uout; |
@@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); | |||
976 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); | 981 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); |
977 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); | 982 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); |
978 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); | 983 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); |
979 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); | 984 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); |
980 | void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); | 985 | void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); |
981 | int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | 986 | int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, |
982 | int nent, u64 mask, const char *name, | 987 | int nent, u64 mask, const char *name, |
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 1b166d2e19c5..b25e7baa273e 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h | |||
@@ -109,7 +109,6 @@ struct mlx5_flow_table_attr { | |||
109 | int max_fte; | 109 | int max_fte; |
110 | u32 level; | 110 | u32 level; |
111 | u32 flags; | 111 | u32 flags; |
112 | u32 underlay_qpn; | ||
113 | }; | 112 | }; |
114 | 113 | ||
115 | struct mlx5_flow_table * | 114 | struct mlx5_flow_table * |
@@ -167,4 +166,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); | |||
167 | void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); | 166 | void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); |
168 | void mlx5_fc_query_cached(struct mlx5_fc *counter, | 167 | void mlx5_fc_query_cached(struct mlx5_fc *counter, |
169 | u64 *bytes, u64 *packets, u64 *lastuse); | 168 | u64 *bytes, u64 *packets, u64 *lastuse); |
169 | int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); | ||
170 | int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); | ||
171 | |||
170 | #endif | 172 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9c23bd2efb56..3f39d27decf4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3296,11 +3296,15 @@ int dev_get_phys_port_id(struct net_device *dev, | |||
3296 | int dev_get_phys_port_name(struct net_device *dev, | 3296 | int dev_get_phys_port_name(struct net_device *dev, |
3297 | char *name, size_t len); | 3297 | char *name, size_t len); |
3298 | int dev_change_proto_down(struct net_device *dev, bool proto_down); | 3298 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
3299 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | ||
3300 | int fd, u32 flags); | ||
3301 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); | 3299 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); |
3302 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 3300 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
3303 | struct netdev_queue *txq, int *ret); | 3301 | struct netdev_queue *txq, int *ret); |
3302 | |||
3303 | typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp); | ||
3304 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | ||
3305 | int fd, u32 flags); | ||
3306 | bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op); | ||
3307 | |||
3304 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 3308 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
3305 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 3309 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
3306 | bool is_skb_forwardable(const struct net_device *dev, | 3310 | bool is_skb_forwardable(const struct net_device *dev, |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index be378cf47fcc..b3044c2c62cb 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m, | |||
294 | int xt_target_to_user(const struct xt_entry_target *t, | 294 | int xt_target_to_user(const struct xt_entry_target *t, |
295 | struct xt_entry_target __user *u); | 295 | struct xt_entry_target __user *u); |
296 | int xt_data_to_user(void __user *dst, const void *src, | 296 | int xt_data_to_user(void __user *dst, const void *src, |
297 | int usersize, int size); | 297 | int usersize, int size, int aligned_size); |
298 | 298 | ||
299 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | 299 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, |
300 | struct xt_counters_info *info, bool compat); | 300 | struct xt_counters_info *info, bool compat); |
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index a30efb437e6d..e0cbf17af780 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h | |||
@@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, | |||
125 | /* True if the target is not a standard target */ | 125 | /* True if the target is not a standard target */ |
126 | #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) | 126 | #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) |
127 | 127 | ||
128 | static inline bool ebt_invalid_target(int target) | ||
129 | { | ||
130 | return (target < -NUM_STANDARD_TARGETS || target >= 0); | ||
131 | } | ||
132 | |||
128 | #endif | 133 | #endif |
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 0db37158a61d..6c8c5d8041b7 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h | |||
@@ -27,8 +27,8 @@ | |||
27 | 27 | ||
28 | /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ | 28 | /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ |
29 | #define FC_PORT_ROLE_NVME_INITIATOR 0x10 | 29 | #define FC_PORT_ROLE_NVME_INITIATOR 0x10 |
30 | #define FC_PORT_ROLE_NVME_TARGET 0x11 | 30 | #define FC_PORT_ROLE_NVME_TARGET 0x20 |
31 | #define FC_PORT_ROLE_NVME_DISCOVERY 0x12 | 31 | #define FC_PORT_ROLE_NVME_DISCOVERY 0x40 |
32 | 32 | ||
33 | 33 | ||
34 | /** | 34 | /** |
@@ -642,15 +642,7 @@ enum { | |||
642 | * sequence in one LLDD operation. Errors during Data | 642 | * sequence in one LLDD operation. Errors during Data |
643 | * sequence transmit must not allow RSP sequence to be sent. | 643 | * sequence transmit must not allow RSP sequence to be sent. |
644 | */ | 644 | */ |
645 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), | 645 | NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1), |
646 | /* Bit 1: When 0, the LLDD will deliver FCP CMD | ||
647 | * on the CPU it should be affinitized to. Thus work will | ||
648 | * be scheduled on the cpu received on. When 1, the LLDD | ||
649 | * may not deliver the CMD on the CPU it should be worked | ||
650 | * on. The transport should pick a cpu to schedule the work | ||
651 | * on. | ||
652 | */ | ||
653 | NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2), | ||
654 | /* Bit 2: When 0, the LLDD is calling the cmd rcv handler | 646 | /* Bit 2: When 0, the LLDD is calling the cmd rcv handler |
655 | * in a non-isr context, allowing the transport to finish | 647 | * in a non-isr context, allowing the transport to finish |
656 | * op completion in the calling context. When 1, the LLDD | 648 | * op completion in the calling context. When 1, the LLDD |
@@ -658,7 +650,7 @@ enum { | |||
658 | * requiring the transport to transition to a workqueue | 650 | * requiring the transport to transition to a workqueue |
659 | * for op completion. | 651 | * for op completion. |
660 | */ | 652 | */ |
661 | NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3), | 653 | NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2), |
662 | /* Bit 3: When 0, the LLDD is calling the op done handler | 654 | /* Bit 3: When 0, the LLDD is calling the op done handler |
663 | * in a non-isr context, allowing the transport to finish | 655 | * in a non-isr context, allowing the transport to finish |
664 | * op completion in the calling context. When 1, the LLDD | 656 | * op completion in the calling context. When 1, the LLDD |
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index ec6b11deb773..1e0deb8e8494 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/ioport.h> | 8 | #include <linux/ioport.h> |
9 | #include <linux/of.h> | 9 | #include <linux/of.h> |
10 | 10 | ||
11 | typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *); | 11 | typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * Workarounds only applied to 32bit powermac machines | 14 | * Workarounds only applied to 32bit powermac machines |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index dc8224ae28d5..e0d1946270f3 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np, | |||
64 | const char *bus_id, | 64 | const char *bus_id, |
65 | struct device *parent); | 65 | struct device *parent); |
66 | 66 | ||
67 | extern int of_platform_device_destroy(struct device *dev, void *data); | ||
67 | extern int of_platform_bus_probe(struct device_node *root, | 68 | extern int of_platform_bus_probe(struct device_node *root, |
68 | const struct of_device_id *matches, | 69 | const struct of_device_id *matches, |
69 | struct device *parent); | 70 | struct device *parent); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 33c2b0b77429..8039f9f0ca05 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -183,6 +183,11 @@ enum pci_dev_flags { | |||
183 | PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), | 183 | PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), |
184 | /* Do not use FLR even if device advertises PCI_AF_CAP */ | 184 | /* Do not use FLR even if device advertises PCI_AF_CAP */ |
185 | PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), | 185 | PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), |
186 | /* | ||
187 | * Resume before calling the driver's system suspend hooks, disabling | ||
188 | * the direct_complete optimization. | ||
189 | */ | ||
190 | PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11), | ||
186 | }; | 191 | }; |
187 | 192 | ||
188 | enum pci_irq_reroute_variant { | 193 | enum pci_irq_reroute_variant { |
@@ -1342,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
1342 | unsigned int max_vecs, unsigned int flags, | 1347 | unsigned int max_vecs, unsigned int flags, |
1343 | const struct irq_affinity *aff_desc) | 1348 | const struct irq_affinity *aff_desc) |
1344 | { | 1349 | { |
1345 | if (min_vecs > 1) | 1350 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) |
1346 | return -EINVAL; | 1351 | return 1; |
1347 | return 1; | 1352 | return -ENOSPC; |
1348 | } | 1353 | } |
1349 | 1354 | ||
1350 | static inline void pci_free_irq_vectors(struct pci_dev *dev) | 1355 | static inline void pci_free_irq_vectors(struct pci_dev *dev) |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 422bc2e4cb6a..ef3eb8bbfee4 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request, | |||
54 | unsigned long addr, unsigned long data); | 54 | unsigned long addr, unsigned long data); |
55 | extern void ptrace_notify(int exit_code); | 55 | extern void ptrace_notify(int exit_code); |
56 | extern void __ptrace_link(struct task_struct *child, | 56 | extern void __ptrace_link(struct task_struct *child, |
57 | struct task_struct *new_parent); | 57 | struct task_struct *new_parent, |
58 | const struct cred *ptracer_cred); | ||
58 | extern void __ptrace_unlink(struct task_struct *child); | 59 | extern void __ptrace_unlink(struct task_struct *child); |
59 | extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); | 60 | extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); |
60 | #define PTRACE_MODE_READ 0x01 | 61 | #define PTRACE_MODE_READ 0x01 |
@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |||
206 | 207 | ||
207 | if (unlikely(ptrace) && current->ptrace) { | 208 | if (unlikely(ptrace) && current->ptrace) { |
208 | child->ptrace = current->ptrace; | 209 | child->ptrace = current->ptrace; |
209 | __ptrace_link(child, current->parent); | 210 | __ptrace_link(child, current->parent, current->ptracer_cred); |
210 | 211 | ||
211 | if (child->ptrace & PT_SEIZED) | 212 | if (child->ptrace & PT_SEIZED) |
212 | task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); | 213 | task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); |
@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |||
215 | 216 | ||
216 | set_tsk_thread_flag(child, TIF_SIGPENDING); | 217 | set_tsk_thread_flag(child, TIF_SIGPENDING); |
217 | } | 218 | } |
219 | else | ||
220 | child->ptracer_cred = NULL; | ||
218 | } | 221 | } |
219 | 222 | ||
220 | /** | 223 | /** |
diff --git a/include/linux/serdev.h b/include/linux/serdev.h index cda76c6506ca..e69402d4a8ae 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h | |||
@@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *); | |||
195 | void serdev_device_close(struct serdev_device *); | 195 | void serdev_device_close(struct serdev_device *); |
196 | unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); | 196 | unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); |
197 | void serdev_device_set_flow_control(struct serdev_device *, bool); | 197 | void serdev_device_set_flow_control(struct serdev_device *, bool); |
198 | int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); | ||
198 | void serdev_device_wait_until_sent(struct serdev_device *, long); | 199 | void serdev_device_wait_until_sent(struct serdev_device *, long); |
199 | int serdev_device_get_tiocm(struct serdev_device *); | 200 | int serdev_device_get_tiocm(struct serdev_device *); |
200 | int serdev_device_set_tiocm(struct serdev_device *, int, int); | 201 | int serdev_device_set_tiocm(struct serdev_device *, int, int); |
@@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev | |||
236 | return 0; | 237 | return 0; |
237 | } | 238 | } |
238 | static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} | 239 | static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} |
240 | static inline int serdev_device_write_buf(struct serdev_device *serdev, | ||
241 | const unsigned char *buf, | ||
242 | size_t count) | ||
243 | { | ||
244 | return -ENODEV; | ||
245 | } | ||
239 | static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} | 246 | static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} |
240 | static inline int serdev_device_get_tiocm(struct serdev_device *serdev) | 247 | static inline int serdev_device_get_tiocm(struct serdev_device *serdev) |
241 | { | 248 | { |
@@ -301,7 +308,7 @@ struct tty_driver; | |||
301 | struct device *serdev_tty_port_register(struct tty_port *port, | 308 | struct device *serdev_tty_port_register(struct tty_port *port, |
302 | struct device *parent, | 309 | struct device *parent, |
303 | struct tty_driver *drv, int idx); | 310 | struct tty_driver *drv, int idx); |
304 | void serdev_tty_port_unregister(struct tty_port *port); | 311 | int serdev_tty_port_unregister(struct tty_port *port); |
305 | #else | 312 | #else |
306 | static inline struct device *serdev_tty_port_register(struct tty_port *port, | 313 | static inline struct device *serdev_tty_port_register(struct tty_port *port, |
307 | struct device *parent, | 314 | struct device *parent, |
@@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port, | |||
309 | { | 316 | { |
310 | return ERR_PTR(-ENODEV); | 317 | return ERR_PTR(-ENODEV); |
311 | } | 318 | } |
312 | static inline void serdev_tty_port_unregister(struct tty_port *port) {} | 319 | static inline int serdev_tty_port_unregister(struct tty_port *port) |
313 | #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ | ||
314 | |||
315 | static inline int serdev_device_write_buf(struct serdev_device *serdev, | ||
316 | const unsigned char *data, | ||
317 | size_t count) | ||
318 | { | 320 | { |
319 | return serdev_device_write(serdev, data, count, 0); | 321 | return -ENODEV; |
320 | } | 322 | } |
323 | #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ | ||
321 | 324 | ||
322 | #endif /*_LINUX_SERDEV_H */ | 325 | #endif /*_LINUX_SERDEV_H */ |
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h index a18e0783946b..787e7ad53d45 100644 --- a/include/linux/soc/renesas/rcar-rst.h +++ b/include/linux/soc/renesas/rcar-rst.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ | 1 | #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ |
2 | #define __LINUX_SOC_RENESAS_RCAR_RST_H__ | 2 | #define __LINUX_SOC_RENESAS_RCAR_RST_H__ |
3 | 3 | ||
4 | #if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \ | ||
5 | defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796) | ||
4 | int rcar_rst_read_mode_pins(u32 *mode); | 6 | int rcar_rst_read_mode_pins(u32 *mode); |
7 | #else | ||
8 | static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } | ||
9 | #endif | ||
5 | 10 | ||
6 | #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ | 11 | #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ |
diff --git a/include/linux/tty.h b/include/linux/tty.h index d07cd2105a6c..eccb4ec30a8a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port, | |||
558 | struct tty_driver *driver, unsigned index, | 558 | struct tty_driver *driver, unsigned index, |
559 | struct device *device, void *drvdata, | 559 | struct device *device, void *drvdata, |
560 | const struct attribute_group **attr_grp); | 560 | const struct attribute_group **attr_grp); |
561 | extern struct device *tty_port_register_device_serdev(struct tty_port *port, | ||
562 | struct tty_driver *driver, unsigned index, | ||
563 | struct device *device); | ||
564 | extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, | ||
565 | struct tty_driver *driver, unsigned index, | ||
566 | struct device *device, void *drvdata, | ||
567 | const struct attribute_group **attr_grp); | ||
568 | extern void tty_port_unregister_device(struct tty_port *port, | ||
569 | struct tty_driver *driver, unsigned index); | ||
561 | extern int tty_port_alloc_xmit_buf(struct tty_port *port); | 570 | extern int tty_port_alloc_xmit_buf(struct tty_port *port); |
562 | extern void tty_port_free_xmit_buf(struct tty_port *port); | 571 | extern void tty_port_free_xmit_buf(struct tty_port *port); |
563 | extern void tty_port_destroy(struct tty_port *port); | 572 | extern void tty_port_destroy(struct tty_port *port); |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index a469999a106d..50398b69ca44 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -148,6 +148,7 @@ struct usb_hcd { | |||
148 | unsigned rh_registered:1;/* is root hub registered? */ | 148 | unsigned rh_registered:1;/* is root hub registered? */ |
149 | unsigned rh_pollable:1; /* may we poll the root hub? */ | 149 | unsigned rh_pollable:1; /* may we poll the root hub? */ |
150 | unsigned msix_enabled:1; /* driver has MSI-X enabled? */ | 150 | unsigned msix_enabled:1; /* driver has MSI-X enabled? */ |
151 | unsigned msi_enabled:1; /* driver has MSI enabled? */ | ||
151 | unsigned remove_phy:1; /* auto-remove USB phy */ | 152 | unsigned remove_phy:1; /* auto-remove USB phy */ |
152 | 153 | ||
153 | /* The next flag is a stopgap, to be removed when all the HCDs | 154 | /* The next flag is a stopgap, to be removed when all the HCDs |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 7dffa5624ea6..97116379db5f 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -206,6 +206,7 @@ struct cdc_state { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); | 208 | extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); |
209 | extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); | ||
209 | extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); | 210 | extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); |
210 | extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); | 211 | extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); |
211 | extern void usbnet_cdc_status(struct usbnet *, struct urb *); | 212 | extern void usbnet_cdc_status(struct usbnet *, struct urb *); |
diff --git a/include/net/dst.h b/include/net/dst.h index 049af33da3b6..cfc043784166 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -107,10 +107,16 @@ struct dst_entry { | |||
107 | }; | 107 | }; |
108 | }; | 108 | }; |
109 | 109 | ||
110 | struct dst_metrics { | ||
111 | u32 metrics[RTAX_MAX]; | ||
112 | atomic_t refcnt; | ||
113 | }; | ||
114 | extern const struct dst_metrics dst_default_metrics; | ||
115 | |||
110 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); | 116 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
111 | extern const u32 dst_default_metrics[]; | ||
112 | 117 | ||
113 | #define DST_METRICS_READ_ONLY 0x1UL | 118 | #define DST_METRICS_READ_ONLY 0x1UL |
119 | #define DST_METRICS_REFCOUNTED 0x2UL | ||
114 | #define DST_METRICS_FLAGS 0x3UL | 120 | #define DST_METRICS_FLAGS 0x3UL |
115 | #define __DST_METRICS_PTR(Y) \ | 121 | #define __DST_METRICS_PTR(Y) \ |
116 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) | 122 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 6692c5758b33..f7f6aa789c61 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -114,11 +114,11 @@ struct fib_info { | |||
114 | __be32 fib_prefsrc; | 114 | __be32 fib_prefsrc; |
115 | u32 fib_tb_id; | 115 | u32 fib_tb_id; |
116 | u32 fib_priority; | 116 | u32 fib_priority; |
117 | u32 *fib_metrics; | 117 | struct dst_metrics *fib_metrics; |
118 | #define fib_mtu fib_metrics[RTAX_MTU-1] | 118 | #define fib_mtu fib_metrics->metrics[RTAX_MTU-1] |
119 | #define fib_window fib_metrics[RTAX_WINDOW-1] | 119 | #define fib_window fib_metrics->metrics[RTAX_WINDOW-1] |
120 | #define fib_rtt fib_metrics[RTAX_RTT-1] | 120 | #define fib_rtt fib_metrics->metrics[RTAX_RTT-1] |
121 | #define fib_advmss fib_metrics[RTAX_ADVMSS-1] | 121 | #define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] |
122 | int fib_nhs; | 122 | int fib_nhs; |
123 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 123 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
124 | int fib_weight; | 124 | int fib_weight; |
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index e04fa7691e5d..c519bb5b5bb8 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #ifndef _NF_CONNTRACK_HELPER_H | 10 | #ifndef _NF_CONNTRACK_HELPER_H |
11 | #define _NF_CONNTRACK_HELPER_H | 11 | #define _NF_CONNTRACK_HELPER_H |
12 | #include <linux/refcount.h> | ||
12 | #include <net/netfilter/nf_conntrack.h> | 13 | #include <net/netfilter/nf_conntrack.h> |
13 | #include <net/netfilter/nf_conntrack_extend.h> | 14 | #include <net/netfilter/nf_conntrack_extend.h> |
14 | #include <net/netfilter/nf_conntrack_expect.h> | 15 | #include <net/netfilter/nf_conntrack_expect.h> |
@@ -26,6 +27,7 @@ struct nf_conntrack_helper { | |||
26 | struct hlist_node hnode; /* Internal use. */ | 27 | struct hlist_node hnode; /* Internal use. */ |
27 | 28 | ||
28 | char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */ | 29 | char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */ |
30 | refcount_t refcnt; | ||
29 | struct module *me; /* pointer to self */ | 31 | struct module *me; /* pointer to self */ |
30 | const struct nf_conntrack_expect_policy *expect_policy; | 32 | const struct nf_conntrack_expect_policy *expect_policy; |
31 | 33 | ||
@@ -79,6 +81,8 @@ struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name, | |||
79 | struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name, | 81 | struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name, |
80 | u16 l3num, | 82 | u16 l3num, |
81 | u8 protonum); | 83 | u8 protonum); |
84 | void nf_conntrack_helper_put(struct nf_conntrack_helper *helper); | ||
85 | |||
82 | void nf_ct_helper_init(struct nf_conntrack_helper *helper, | 86 | void nf_ct_helper_init(struct nf_conntrack_helper *helper, |
83 | u16 l3num, u16 protonum, const char *name, | 87 | u16 l3num, u16 protonum, const char *name, |
84 | u16 default_port, u16 spec_port, u32 id, | 88 | u16 default_port, u16 spec_port, u32 id, |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 028faec8fc27..8a8bab8d7b15 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -176,7 +176,7 @@ struct nft_data_desc { | |||
176 | int nft_data_init(const struct nft_ctx *ctx, | 176 | int nft_data_init(const struct nft_ctx *ctx, |
177 | struct nft_data *data, unsigned int size, | 177 | struct nft_data *data, unsigned int size, |
178 | struct nft_data_desc *desc, const struct nlattr *nla); | 178 | struct nft_data_desc *desc, const struct nlattr *nla); |
179 | void nft_data_uninit(const struct nft_data *data, enum nft_data_types type); | 179 | void nft_data_release(const struct nft_data *data, enum nft_data_types type); |
180 | int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, | 180 | int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, |
181 | enum nft_data_types type, unsigned int len); | 181 | enum nft_data_types type, unsigned int len); |
182 | 182 | ||
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h index f31fb6331a53..3248beaf16b0 100644 --- a/include/net/tc_act/tc_csum.h +++ b/include/net/tc_act/tc_csum.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <net/act_api.h> | 5 | #include <net/act_api.h> |
6 | #include <linux/tc_act/tc_csum.h> | ||
6 | 7 | ||
7 | struct tcf_csum { | 8 | struct tcf_csum { |
8 | struct tc_action common; | 9 | struct tc_action common; |
@@ -11,4 +12,18 @@ struct tcf_csum { | |||
11 | }; | 12 | }; |
12 | #define to_tcf_csum(a) ((struct tcf_csum *)a) | 13 | #define to_tcf_csum(a) ((struct tcf_csum *)a) |
13 | 14 | ||
15 | static inline bool is_tcf_csum(const struct tc_action *a) | ||
16 | { | ||
17 | #ifdef CONFIG_NET_CLS_ACT | ||
18 | if (a->ops && a->ops->type == TCA_ACT_CSUM) | ||
19 | return true; | ||
20 | #endif | ||
21 | return false; | ||
22 | } | ||
23 | |||
24 | static inline u32 tcf_csum_update_flags(const struct tc_action *a) | ||
25 | { | ||
26 | return to_tcf_csum(a)->update_flags; | ||
27 | } | ||
28 | |||
14 | #endif /* __NET_TC_CSUM_H */ | 29 | #endif /* __NET_TC_CSUM_H */ |
diff --git a/include/net/x25.h b/include/net/x25.h index c383aa4edbf0..6d30a01d281d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h | |||
@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *); | |||
298 | 298 | ||
299 | /* sysctl_net_x25.c */ | 299 | /* sysctl_net_x25.c */ |
300 | #ifdef CONFIG_SYSCTL | 300 | #ifdef CONFIG_SYSCTL |
301 | void x25_register_sysctl(void); | 301 | int x25_register_sysctl(void); |
302 | void x25_unregister_sysctl(void); | 302 | void x25_unregister_sysctl(void); |
303 | #else | 303 | #else |
304 | static inline void x25_register_sysctl(void) {}; | 304 | static inline int x25_register_sysctl(void) { return 0; }; |
305 | static inline void x25_unregister_sysctl(void) {}; | 305 | static inline void x25_unregister_sysctl(void) {}; |
306 | #endif /* CONFIG_SYSCTL */ | 306 | #endif /* CONFIG_SYSCTL */ |
307 | 307 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6793a30c66b1..7e7e2b0d2915 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -979,10 +979,6 @@ struct xfrm_dst { | |||
979 | struct flow_cache_object flo; | 979 | struct flow_cache_object flo; |
980 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | 980 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
981 | int num_pols, num_xfrms; | 981 | int num_pols, num_xfrms; |
982 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
983 | struct flowi *origin; | ||
984 | struct xfrm_selector *partner; | ||
985 | #endif | ||
986 | u32 xfrm_genid; | 982 | u32 xfrm_genid; |
987 | u32 policy_genid; | 983 | u32 policy_genid; |
988 | u32 route_mtu_cached; | 984 | u32 route_mtu_cached; |
@@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) | |||
998 | dst_release(xdst->route); | 994 | dst_release(xdst->route); |
999 | if (likely(xdst->u.dst.xfrm)) | 995 | if (likely(xdst->u.dst.xfrm)) |
1000 | xfrm_state_put(xdst->u.dst.xfrm); | 996 | xfrm_state_put(xdst->u.dst.xfrm); |
1001 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1002 | kfree(xdst->origin); | ||
1003 | xdst->origin = NULL; | ||
1004 | kfree(xdst->partner); | ||
1005 | xdst->partner = NULL; | ||
1006 | #endif | ||
1007 | } | 997 | } |
1008 | #endif | 998 | #endif |
1009 | 999 | ||
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 945a1f5f63c5..94dfa9def355 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -132,6 +132,13 @@ enum bpf_attach_type { | |||
132 | */ | 132 | */ |
133 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) | 133 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) |
134 | 134 | ||
135 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the | ||
136 | * verifier will perform strict alignment checking as if the kernel | ||
137 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, | ||
138 | * and NET_IP_ALIGN defined to 2. | ||
139 | */ | ||
140 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) | ||
141 | |||
135 | #define BPF_PSEUDO_MAP_FD 1 | 142 | #define BPF_PSEUDO_MAP_FD 1 |
136 | 143 | ||
137 | /* flags for BPF_MAP_UPDATE_ELEM command */ | 144 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
@@ -177,6 +184,7 @@ union bpf_attr { | |||
177 | __u32 log_size; /* size of user buffer */ | 184 | __u32 log_size; /* size of user buffer */ |
178 | __aligned_u64 log_buf; /* user supplied buffer */ | 185 | __aligned_u64 log_buf; /* user supplied buffer */ |
179 | __u32 kern_version; /* checked when prog_type=kprobe */ | 186 | __u32 kern_version; /* checked when prog_type=kprobe */ |
187 | __u32 prog_flags; | ||
180 | }; | 188 | }; |
181 | 189 | ||
182 | struct { /* anonymous struct used by BPF_OBJ_* commands */ | 190 | struct { /* anonymous struct used by BPF_OBJ_* commands */ |
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 8e56ac70e0d1..15ac20382aba 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h | |||
@@ -888,9 +888,18 @@ enum { | |||
888 | /* XDP section */ | 888 | /* XDP section */ |
889 | 889 | ||
890 | #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) | 890 | #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) |
891 | #define XDP_FLAGS_SKB_MODE (2U << 0) | 891 | #define XDP_FLAGS_SKB_MODE (1U << 1) |
892 | #define XDP_FLAGS_DRV_MODE (1U << 2) | ||
892 | #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ | 893 | #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ |
893 | XDP_FLAGS_SKB_MODE) | 894 | XDP_FLAGS_SKB_MODE | \ |
895 | XDP_FLAGS_DRV_MODE) | ||
896 | |||
897 | /* These are stored into IFLA_XDP_ATTACHED on dump. */ | ||
898 | enum { | ||
899 | XDP_ATTACHED_NONE = 0, | ||
900 | XDP_ATTACHED_DRV, | ||
901 | XDP_ATTACHED_SKB, | ||
902 | }; | ||
894 | 903 | ||
895 | enum { | 904 | enum { |
896 | IFLA_XDP_UNSPEC, | 905 | IFLA_XDP_UNSPEC, |
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index 361297e96f58..576c704e3fb8 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h | |||
@@ -22,6 +22,9 @@ | |||
22 | */ | 22 | */ |
23 | #define USB_MAXCHILDREN 31 | 23 | #define USB_MAXCHILDREN 31 |
24 | 24 | ||
25 | /* See USB 3.1 spec Table 10-5 */ | ||
26 | #define USB_SS_MAXPORTS 15 | ||
27 | |||
25 | /* | 28 | /* |
26 | * Hub request types | 29 | * Hub request types |
27 | */ | 30 | */ |
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 5e00b2333c26..172dc8ee0e3b 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
86 | array->map.key_size = attr->key_size; | 86 | array->map.key_size = attr->key_size; |
87 | array->map.value_size = attr->value_size; | 87 | array->map.value_size = attr->value_size; |
88 | array->map.max_entries = attr->max_entries; | 88 | array->map.max_entries = attr->max_entries; |
89 | array->map.map_flags = attr->map_flags; | ||
89 | array->elem_size = elem_size; | 90 | array->elem_size = elem_size; |
90 | 91 | ||
91 | if (!percpu) | 92 | if (!percpu) |
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 39cfafd895b8..b09185f0f17d 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c | |||
@@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) | |||
432 | trie->map.key_size = attr->key_size; | 432 | trie->map.key_size = attr->key_size; |
433 | trie->map.value_size = attr->value_size; | 433 | trie->map.value_size = attr->value_size; |
434 | trie->map.max_entries = attr->max_entries; | 434 | trie->map.max_entries = attr->max_entries; |
435 | trie->map.map_flags = attr->map_flags; | ||
435 | trie->data_size = attr->key_size - | 436 | trie->data_size = attr->key_size - |
436 | offsetof(struct bpf_lpm_trie_key, data); | 437 | offsetof(struct bpf_lpm_trie_key, data); |
437 | trie->max_prefixlen = trie->data_size * 8; | 438 | trie->max_prefixlen = trie->data_size * 8; |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4dfd6f2ec2f9..31147d730abf 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
88 | smap->map.key_size = attr->key_size; | 88 | smap->map.key_size = attr->key_size; |
89 | smap->map.value_size = value_size; | 89 | smap->map.value_size = value_size; |
90 | smap->map.max_entries = attr->max_entries; | 90 | smap->map.max_entries = attr->max_entries; |
91 | smap->map.map_flags = attr->map_flags; | ||
91 | smap->n_buckets = n_buckets; | 92 | smap->n_buckets = n_buckets; |
92 | smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; | 93 | smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
93 | 94 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index fd2411fd6914..265a0d854e33 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -783,7 +783,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) | |||
783 | EXPORT_SYMBOL_GPL(bpf_prog_get_type); | 783 | EXPORT_SYMBOL_GPL(bpf_prog_get_type); |
784 | 784 | ||
785 | /* last field in 'union bpf_attr' used by this command */ | 785 | /* last field in 'union bpf_attr' used by this command */ |
786 | #define BPF_PROG_LOAD_LAST_FIELD kern_version | 786 | #define BPF_PROG_LOAD_LAST_FIELD prog_flags |
787 | 787 | ||
788 | static int bpf_prog_load(union bpf_attr *attr) | 788 | static int bpf_prog_load(union bpf_attr *attr) |
789 | { | 789 | { |
@@ -796,6 +796,9 @@ static int bpf_prog_load(union bpf_attr *attr) | |||
796 | if (CHECK_ATTR(BPF_PROG_LOAD)) | 796 | if (CHECK_ATTR(BPF_PROG_LOAD)) |
797 | return -EINVAL; | 797 | return -EINVAL; |
798 | 798 | ||
799 | if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) | ||
800 | return -EINVAL; | ||
801 | |||
799 | /* copy eBPF program license from user space */ | 802 | /* copy eBPF program license from user space */ |
800 | if (strncpy_from_user(license, u64_to_user_ptr(attr->license), | 803 | if (strncpy_from_user(license, u64_to_user_ptr(attr->license), |
801 | sizeof(license) - 1) < 0) | 804 | sizeof(license) - 1) < 0) |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c5b56c92f8e2..339c8a1371de 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -140,7 +140,7 @@ struct bpf_verifier_stack_elem { | |||
140 | struct bpf_verifier_stack_elem *next; | 140 | struct bpf_verifier_stack_elem *next; |
141 | }; | 141 | }; |
142 | 142 | ||
143 | #define BPF_COMPLEXITY_LIMIT_INSNS 65536 | 143 | #define BPF_COMPLEXITY_LIMIT_INSNS 98304 |
144 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 | 144 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 |
145 | 145 | ||
146 | #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) | 146 | #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) |
@@ -241,6 +241,12 @@ static void print_verifier_state(struct bpf_verifier_state *state) | |||
241 | if (reg->max_value != BPF_REGISTER_MAX_RANGE) | 241 | if (reg->max_value != BPF_REGISTER_MAX_RANGE) |
242 | verbose(",max_value=%llu", | 242 | verbose(",max_value=%llu", |
243 | (unsigned long long)reg->max_value); | 243 | (unsigned long long)reg->max_value); |
244 | if (reg->min_align) | ||
245 | verbose(",min_align=%u", reg->min_align); | ||
246 | if (reg->aux_off) | ||
247 | verbose(",aux_off=%u", reg->aux_off); | ||
248 | if (reg->aux_off_align) | ||
249 | verbose(",aux_off_align=%u", reg->aux_off_align); | ||
244 | } | 250 | } |
245 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { | 251 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
246 | if (state->stack_slot_type[i] == STACK_SPILL) | 252 | if (state->stack_slot_type[i] == STACK_SPILL) |
@@ -457,16 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = { | |||
457 | BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 | 463 | BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 |
458 | }; | 464 | }; |
459 | 465 | ||
466 | static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno) | ||
467 | { | ||
468 | BUG_ON(regno >= MAX_BPF_REG); | ||
469 | |||
470 | memset(®s[regno], 0, sizeof(regs[regno])); | ||
471 | regs[regno].type = NOT_INIT; | ||
472 | regs[regno].min_value = BPF_REGISTER_MIN_RANGE; | ||
473 | regs[regno].max_value = BPF_REGISTER_MAX_RANGE; | ||
474 | } | ||
475 | |||
460 | static void init_reg_state(struct bpf_reg_state *regs) | 476 | static void init_reg_state(struct bpf_reg_state *regs) |
461 | { | 477 | { |
462 | int i; | 478 | int i; |
463 | 479 | ||
464 | for (i = 0; i < MAX_BPF_REG; i++) { | 480 | for (i = 0; i < MAX_BPF_REG; i++) |
465 | regs[i].type = NOT_INIT; | 481 | mark_reg_not_init(regs, i); |
466 | regs[i].imm = 0; | ||
467 | regs[i].min_value = BPF_REGISTER_MIN_RANGE; | ||
468 | regs[i].max_value = BPF_REGISTER_MAX_RANGE; | ||
469 | } | ||
470 | 482 | ||
471 | /* frame pointer */ | 483 | /* frame pointer */ |
472 | regs[BPF_REG_FP].type = FRAME_PTR; | 484 | regs[BPF_REG_FP].type = FRAME_PTR; |
@@ -492,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) | |||
492 | { | 504 | { |
493 | regs[regno].min_value = BPF_REGISTER_MIN_RANGE; | 505 | regs[regno].min_value = BPF_REGISTER_MIN_RANGE; |
494 | regs[regno].max_value = BPF_REGISTER_MAX_RANGE; | 506 | regs[regno].max_value = BPF_REGISTER_MAX_RANGE; |
507 | regs[regno].min_align = 0; | ||
495 | } | 508 | } |
496 | 509 | ||
497 | static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, | 510 | static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, |
@@ -779,17 +792,37 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) | |||
779 | } | 792 | } |
780 | 793 | ||
781 | static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, | 794 | static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, |
782 | int off, int size) | 795 | int off, int size, bool strict) |
783 | { | 796 | { |
784 | if (reg->id && size != 1) { | 797 | int ip_align; |
785 | verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n"); | 798 | int reg_off; |
786 | return -EACCES; | 799 | |
800 | /* Byte size accesses are always allowed. */ | ||
801 | if (!strict || size == 1) | ||
802 | return 0; | ||
803 | |||
804 | reg_off = reg->off; | ||
805 | if (reg->id) { | ||
806 | if (reg->aux_off_align % size) { | ||
807 | verbose("Packet access is only %u byte aligned, %d byte access not allowed\n", | ||
808 | reg->aux_off_align, size); | ||
809 | return -EACCES; | ||
810 | } | ||
811 | reg_off += reg->aux_off; | ||
787 | } | 812 | } |
788 | 813 | ||
789 | /* skb->data is NET_IP_ALIGN-ed */ | 814 | /* For platforms that do not have a Kconfig enabling |
790 | if ((NET_IP_ALIGN + reg->off + off) % size != 0) { | 815 | * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of |
816 | * NET_IP_ALIGN is universally set to '2'. And on platforms | ||
817 | * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get | ||
818 | * to this code only in strict mode where we want to emulate | ||
819 | * the NET_IP_ALIGN==2 checking. Therefore use an | ||
820 | * unconditional IP align value of '2'. | ||
821 | */ | ||
822 | ip_align = 2; | ||
823 | if ((ip_align + reg_off + off) % size != 0) { | ||
791 | verbose("misaligned packet access off %d+%d+%d size %d\n", | 824 | verbose("misaligned packet access off %d+%d+%d size %d\n", |
792 | NET_IP_ALIGN, reg->off, off, size); | 825 | ip_align, reg_off, off, size); |
793 | return -EACCES; | 826 | return -EACCES; |
794 | } | 827 | } |
795 | 828 | ||
@@ -797,9 +830,9 @@ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, | |||
797 | } | 830 | } |
798 | 831 | ||
799 | static int check_val_ptr_alignment(const struct bpf_reg_state *reg, | 832 | static int check_val_ptr_alignment(const struct bpf_reg_state *reg, |
800 | int size) | 833 | int size, bool strict) |
801 | { | 834 | { |
802 | if (size != 1) { | 835 | if (strict && size != 1) { |
803 | verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); | 836 | verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); |
804 | return -EACCES; | 837 | return -EACCES; |
805 | } | 838 | } |
@@ -807,16 +840,17 @@ static int check_val_ptr_alignment(const struct bpf_reg_state *reg, | |||
807 | return 0; | 840 | return 0; |
808 | } | 841 | } |
809 | 842 | ||
810 | static int check_ptr_alignment(const struct bpf_reg_state *reg, | 843 | static int check_ptr_alignment(struct bpf_verifier_env *env, |
844 | const struct bpf_reg_state *reg, | ||
811 | int off, int size) | 845 | int off, int size) |
812 | { | 846 | { |
847 | bool strict = env->strict_alignment; | ||
848 | |||
813 | switch (reg->type) { | 849 | switch (reg->type) { |
814 | case PTR_TO_PACKET: | 850 | case PTR_TO_PACKET: |
815 | return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : | 851 | return check_pkt_ptr_alignment(reg, off, size, strict); |
816 | check_pkt_ptr_alignment(reg, off, size); | ||
817 | case PTR_TO_MAP_VALUE_ADJ: | 852 | case PTR_TO_MAP_VALUE_ADJ: |
818 | return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : | 853 | return check_val_ptr_alignment(reg, size, strict); |
819 | check_val_ptr_alignment(reg, size); | ||
820 | default: | 854 | default: |
821 | if (off % size != 0) { | 855 | if (off % size != 0) { |
822 | verbose("misaligned access off %d size %d\n", | 856 | verbose("misaligned access off %d size %d\n", |
@@ -849,7 +883,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
849 | if (size < 0) | 883 | if (size < 0) |
850 | return size; | 884 | return size; |
851 | 885 | ||
852 | err = check_ptr_alignment(reg, off, size); | 886 | err = check_ptr_alignment(env, reg, off, size); |
853 | if (err) | 887 | if (err) |
854 | return err; | 888 | return err; |
855 | 889 | ||
@@ -883,6 +917,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
883 | value_regno); | 917 | value_regno); |
884 | /* note that reg.[id|off|range] == 0 */ | 918 | /* note that reg.[id|off|range] == 0 */ |
885 | state->regs[value_regno].type = reg_type; | 919 | state->regs[value_regno].type = reg_type; |
920 | state->regs[value_regno].aux_off = 0; | ||
921 | state->regs[value_regno].aux_off_align = 0; | ||
886 | } | 922 | } |
887 | 923 | ||
888 | } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { | 924 | } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { |
@@ -1313,7 +1349,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) | |||
1313 | struct bpf_verifier_state *state = &env->cur_state; | 1349 | struct bpf_verifier_state *state = &env->cur_state; |
1314 | const struct bpf_func_proto *fn = NULL; | 1350 | const struct bpf_func_proto *fn = NULL; |
1315 | struct bpf_reg_state *regs = state->regs; | 1351 | struct bpf_reg_state *regs = state->regs; |
1316 | struct bpf_reg_state *reg; | ||
1317 | struct bpf_call_arg_meta meta; | 1352 | struct bpf_call_arg_meta meta; |
1318 | bool changes_data; | 1353 | bool changes_data; |
1319 | int i, err; | 1354 | int i, err; |
@@ -1380,11 +1415,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) | |||
1380 | } | 1415 | } |
1381 | 1416 | ||
1382 | /* reset caller saved regs */ | 1417 | /* reset caller saved regs */ |
1383 | for (i = 0; i < CALLER_SAVED_REGS; i++) { | 1418 | for (i = 0; i < CALLER_SAVED_REGS; i++) |
1384 | reg = regs + caller_saved[i]; | 1419 | mark_reg_not_init(regs, caller_saved[i]); |
1385 | reg->type = NOT_INIT; | ||
1386 | reg->imm = 0; | ||
1387 | } | ||
1388 | 1420 | ||
1389 | /* update return register */ | 1421 | /* update return register */ |
1390 | if (fn->ret_type == RET_INTEGER) { | 1422 | if (fn->ret_type == RET_INTEGER) { |
@@ -1455,6 +1487,8 @@ add_imm: | |||
1455 | */ | 1487 | */ |
1456 | dst_reg->off += imm; | 1488 | dst_reg->off += imm; |
1457 | } else { | 1489 | } else { |
1490 | bool had_id; | ||
1491 | |||
1458 | if (src_reg->type == PTR_TO_PACKET) { | 1492 | if (src_reg->type == PTR_TO_PACKET) { |
1459 | /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ | 1493 | /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ |
1460 | tmp_reg = *dst_reg; /* save r7 state */ | 1494 | tmp_reg = *dst_reg; /* save r7 state */ |
@@ -1488,14 +1522,23 @@ add_imm: | |||
1488 | src_reg->imm); | 1522 | src_reg->imm); |
1489 | return -EACCES; | 1523 | return -EACCES; |
1490 | } | 1524 | } |
1525 | |||
1526 | had_id = (dst_reg->id != 0); | ||
1527 | |||
1491 | /* dst_reg stays as pkt_ptr type and since some positive | 1528 | /* dst_reg stays as pkt_ptr type and since some positive |
1492 | * integer value was added to the pointer, increment its 'id' | 1529 | * integer value was added to the pointer, increment its 'id' |
1493 | */ | 1530 | */ |
1494 | dst_reg->id = ++env->id_gen; | 1531 | dst_reg->id = ++env->id_gen; |
1495 | 1532 | ||
1496 | /* something was added to pkt_ptr, set range and off to zero */ | 1533 | /* something was added to pkt_ptr, set range to zero */ |
1534 | dst_reg->aux_off += dst_reg->off; | ||
1497 | dst_reg->off = 0; | 1535 | dst_reg->off = 0; |
1498 | dst_reg->range = 0; | 1536 | dst_reg->range = 0; |
1537 | if (had_id) | ||
1538 | dst_reg->aux_off_align = min(dst_reg->aux_off_align, | ||
1539 | src_reg->min_align); | ||
1540 | else | ||
1541 | dst_reg->aux_off_align = src_reg->min_align; | ||
1499 | } | 1542 | } |
1500 | return 0; | 1543 | return 0; |
1501 | } | 1544 | } |
@@ -1669,6 +1712,13 @@ static void check_reg_overflow(struct bpf_reg_state *reg) | |||
1669 | reg->min_value = BPF_REGISTER_MIN_RANGE; | 1712 | reg->min_value = BPF_REGISTER_MIN_RANGE; |
1670 | } | 1713 | } |
1671 | 1714 | ||
1715 | static u32 calc_align(u32 imm) | ||
1716 | { | ||
1717 | if (!imm) | ||
1718 | return 1U << 31; | ||
1719 | return imm - ((imm - 1) & imm); | ||
1720 | } | ||
1721 | |||
1672 | static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | 1722 | static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, |
1673 | struct bpf_insn *insn) | 1723 | struct bpf_insn *insn) |
1674 | { | 1724 | { |
@@ -1676,8 +1726,10 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1676 | s64 min_val = BPF_REGISTER_MIN_RANGE; | 1726 | s64 min_val = BPF_REGISTER_MIN_RANGE; |
1677 | u64 max_val = BPF_REGISTER_MAX_RANGE; | 1727 | u64 max_val = BPF_REGISTER_MAX_RANGE; |
1678 | u8 opcode = BPF_OP(insn->code); | 1728 | u8 opcode = BPF_OP(insn->code); |
1729 | u32 dst_align, src_align; | ||
1679 | 1730 | ||
1680 | dst_reg = ®s[insn->dst_reg]; | 1731 | dst_reg = ®s[insn->dst_reg]; |
1732 | src_align = 0; | ||
1681 | if (BPF_SRC(insn->code) == BPF_X) { | 1733 | if (BPF_SRC(insn->code) == BPF_X) { |
1682 | check_reg_overflow(®s[insn->src_reg]); | 1734 | check_reg_overflow(®s[insn->src_reg]); |
1683 | min_val = regs[insn->src_reg].min_value; | 1735 | min_val = regs[insn->src_reg].min_value; |
@@ -1693,12 +1745,18 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1693 | regs[insn->src_reg].type != UNKNOWN_VALUE) { | 1745 | regs[insn->src_reg].type != UNKNOWN_VALUE) { |
1694 | min_val = BPF_REGISTER_MIN_RANGE; | 1746 | min_val = BPF_REGISTER_MIN_RANGE; |
1695 | max_val = BPF_REGISTER_MAX_RANGE; | 1747 | max_val = BPF_REGISTER_MAX_RANGE; |
1748 | src_align = 0; | ||
1749 | } else { | ||
1750 | src_align = regs[insn->src_reg].min_align; | ||
1696 | } | 1751 | } |
1697 | } else if (insn->imm < BPF_REGISTER_MAX_RANGE && | 1752 | } else if (insn->imm < BPF_REGISTER_MAX_RANGE && |
1698 | (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { | 1753 | (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { |
1699 | min_val = max_val = insn->imm; | 1754 | min_val = max_val = insn->imm; |
1755 | src_align = calc_align(insn->imm); | ||
1700 | } | 1756 | } |
1701 | 1757 | ||
1758 | dst_align = dst_reg->min_align; | ||
1759 | |||
1702 | /* We don't know anything about what was done to this register, mark it | 1760 | /* We don't know anything about what was done to this register, mark it |
1703 | * as unknown. | 1761 | * as unknown. |
1704 | */ | 1762 | */ |
@@ -1723,18 +1781,21 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1723 | dst_reg->min_value += min_val; | 1781 | dst_reg->min_value += min_val; |
1724 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | 1782 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) |
1725 | dst_reg->max_value += max_val; | 1783 | dst_reg->max_value += max_val; |
1784 | dst_reg->min_align = min(src_align, dst_align); | ||
1726 | break; | 1785 | break; |
1727 | case BPF_SUB: | 1786 | case BPF_SUB: |
1728 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) | 1787 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) |
1729 | dst_reg->min_value -= min_val; | 1788 | dst_reg->min_value -= min_val; |
1730 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | 1789 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) |
1731 | dst_reg->max_value -= max_val; | 1790 | dst_reg->max_value -= max_val; |
1791 | dst_reg->min_align = min(src_align, dst_align); | ||
1732 | break; | 1792 | break; |
1733 | case BPF_MUL: | 1793 | case BPF_MUL: |
1734 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) | 1794 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) |
1735 | dst_reg->min_value *= min_val; | 1795 | dst_reg->min_value *= min_val; |
1736 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | 1796 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) |
1737 | dst_reg->max_value *= max_val; | 1797 | dst_reg->max_value *= max_val; |
1798 | dst_reg->min_align = max(src_align, dst_align); | ||
1738 | break; | 1799 | break; |
1739 | case BPF_AND: | 1800 | case BPF_AND: |
1740 | /* Disallow AND'ing of negative numbers, ain't nobody got time | 1801 | /* Disallow AND'ing of negative numbers, ain't nobody got time |
@@ -1746,17 +1807,23 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1746 | else | 1807 | else |
1747 | dst_reg->min_value = 0; | 1808 | dst_reg->min_value = 0; |
1748 | dst_reg->max_value = max_val; | 1809 | dst_reg->max_value = max_val; |
1810 | dst_reg->min_align = max(src_align, dst_align); | ||
1749 | break; | 1811 | break; |
1750 | case BPF_LSH: | 1812 | case BPF_LSH: |
1751 | /* Gotta have special overflow logic here, if we're shifting | 1813 | /* Gotta have special overflow logic here, if we're shifting |
1752 | * more than MAX_RANGE then just assume we have an invalid | 1814 | * more than MAX_RANGE then just assume we have an invalid |
1753 | * range. | 1815 | * range. |
1754 | */ | 1816 | */ |
1755 | if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) | 1817 | if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) { |
1756 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; | 1818 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; |
1757 | else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) | 1819 | dst_reg->min_align = 1; |
1758 | dst_reg->min_value <<= min_val; | 1820 | } else { |
1759 | 1821 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) | |
1822 | dst_reg->min_value <<= min_val; | ||
1823 | if (!dst_reg->min_align) | ||
1824 | dst_reg->min_align = 1; | ||
1825 | dst_reg->min_align <<= min_val; | ||
1826 | } | ||
1760 | if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) | 1827 | if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) |
1761 | dst_reg->max_value = BPF_REGISTER_MAX_RANGE; | 1828 | dst_reg->max_value = BPF_REGISTER_MAX_RANGE; |
1762 | else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | 1829 | else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) |
@@ -1766,11 +1833,19 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1766 | /* RSH by a negative number is undefined, and the BPF_RSH is an | 1833 | /* RSH by a negative number is undefined, and the BPF_RSH is an |
1767 | * unsigned shift, so make the appropriate casts. | 1834 | * unsigned shift, so make the appropriate casts. |
1768 | */ | 1835 | */ |
1769 | if (min_val < 0 || dst_reg->min_value < 0) | 1836 | if (min_val < 0 || dst_reg->min_value < 0) { |
1770 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; | 1837 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; |
1771 | else | 1838 | } else { |
1772 | dst_reg->min_value = | 1839 | dst_reg->min_value = |
1773 | (u64)(dst_reg->min_value) >> min_val; | 1840 | (u64)(dst_reg->min_value) >> min_val; |
1841 | } | ||
1842 | if (min_val < 0) { | ||
1843 | dst_reg->min_align = 1; | ||
1844 | } else { | ||
1845 | dst_reg->min_align >>= (u64) min_val; | ||
1846 | if (!dst_reg->min_align) | ||
1847 | dst_reg->min_align = 1; | ||
1848 | } | ||
1774 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | 1849 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) |
1775 | dst_reg->max_value >>= max_val; | 1850 | dst_reg->max_value >>= max_val; |
1776 | break; | 1851 | break; |
@@ -1872,6 +1947,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
1872 | regs[insn->dst_reg].imm = insn->imm; | 1947 | regs[insn->dst_reg].imm = insn->imm; |
1873 | regs[insn->dst_reg].max_value = insn->imm; | 1948 | regs[insn->dst_reg].max_value = insn->imm; |
1874 | regs[insn->dst_reg].min_value = insn->imm; | 1949 | regs[insn->dst_reg].min_value = insn->imm; |
1950 | regs[insn->dst_reg].min_align = calc_align(insn->imm); | ||
1875 | } | 1951 | } |
1876 | 1952 | ||
1877 | } else if (opcode > BPF_END) { | 1953 | } else if (opcode > BPF_END) { |
@@ -2368,7 +2444,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
2368 | { | 2444 | { |
2369 | struct bpf_reg_state *regs = env->cur_state.regs; | 2445 | struct bpf_reg_state *regs = env->cur_state.regs; |
2370 | u8 mode = BPF_MODE(insn->code); | 2446 | u8 mode = BPF_MODE(insn->code); |
2371 | struct bpf_reg_state *reg; | ||
2372 | int i, err; | 2447 | int i, err; |
2373 | 2448 | ||
2374 | if (!may_access_skb(env->prog->type)) { | 2449 | if (!may_access_skb(env->prog->type)) { |
@@ -2401,11 +2476,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
2401 | } | 2476 | } |
2402 | 2477 | ||
2403 | /* reset caller saved regs to unreadable */ | 2478 | /* reset caller saved regs to unreadable */ |
2404 | for (i = 0; i < CALLER_SAVED_REGS; i++) { | 2479 | for (i = 0; i < CALLER_SAVED_REGS; i++) |
2405 | reg = regs + caller_saved[i]; | 2480 | mark_reg_not_init(regs, caller_saved[i]); |
2406 | reg->type = NOT_INIT; | ||
2407 | reg->imm = 0; | ||
2408 | } | ||
2409 | 2481 | ||
2410 | /* mark destination R0 register as readable, since it contains | 2482 | /* mark destination R0 register as readable, since it contains |
2411 | * the value fetched from the packet | 2483 | * the value fetched from the packet |
@@ -2564,6 +2636,7 @@ peek_stack: | |||
2564 | env->explored_states[t + 1] = STATE_LIST_MARK; | 2636 | env->explored_states[t + 1] = STATE_LIST_MARK; |
2565 | } else { | 2637 | } else { |
2566 | /* conditional jump with two edges */ | 2638 | /* conditional jump with two edges */ |
2639 | env->explored_states[t] = STATE_LIST_MARK; | ||
2567 | ret = push_insn(t, t + 1, FALLTHROUGH, env); | 2640 | ret = push_insn(t, t + 1, FALLTHROUGH, env); |
2568 | if (ret == 1) | 2641 | if (ret == 1) |
2569 | goto peek_stack; | 2642 | goto peek_stack; |
@@ -2615,7 +2688,8 @@ err_free: | |||
2615 | /* the following conditions reduce the number of explored insns | 2688 | /* the following conditions reduce the number of explored insns |
2616 | * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet | 2689 | * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet |
2617 | */ | 2690 | */ |
2618 | static bool compare_ptrs_to_packet(struct bpf_reg_state *old, | 2691 | static bool compare_ptrs_to_packet(struct bpf_verifier_env *env, |
2692 | struct bpf_reg_state *old, | ||
2619 | struct bpf_reg_state *cur) | 2693 | struct bpf_reg_state *cur) |
2620 | { | 2694 | { |
2621 | if (old->id != cur->id) | 2695 | if (old->id != cur->id) |
@@ -2658,7 +2732,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old, | |||
2658 | * 'if (R4 > data_end)' and all further insn were already good with r=20, | 2732 | * 'if (R4 > data_end)' and all further insn were already good with r=20, |
2659 | * so they will be good with r=30 and we can prune the search. | 2733 | * so they will be good with r=30 and we can prune the search. |
2660 | */ | 2734 | */ |
2661 | if (old->off <= cur->off && | 2735 | if (!env->strict_alignment && old->off <= cur->off && |
2662 | old->off >= old->range && cur->off >= cur->range) | 2736 | old->off >= old->range && cur->off >= cur->range) |
2663 | return true; | 2737 | return true; |
2664 | 2738 | ||
@@ -2722,8 +2796,14 @@ static bool states_equal(struct bpf_verifier_env *env, | |||
2722 | rcur->type != NOT_INIT)) | 2796 | rcur->type != NOT_INIT)) |
2723 | continue; | 2797 | continue; |
2724 | 2798 | ||
2799 | /* Don't care about the reg->id in this case. */ | ||
2800 | if (rold->type == PTR_TO_MAP_VALUE_OR_NULL && | ||
2801 | rcur->type == PTR_TO_MAP_VALUE_OR_NULL && | ||
2802 | rold->map_ptr == rcur->map_ptr) | ||
2803 | continue; | ||
2804 | |||
2725 | if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && | 2805 | if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && |
2726 | compare_ptrs_to_packet(rold, rcur)) | 2806 | compare_ptrs_to_packet(env, rold, rcur)) |
2727 | continue; | 2807 | continue; |
2728 | 2808 | ||
2729 | return false; | 2809 | return false; |
@@ -2856,8 +2936,15 @@ static int do_check(struct bpf_verifier_env *env) | |||
2856 | goto process_bpf_exit; | 2936 | goto process_bpf_exit; |
2857 | } | 2937 | } |
2858 | 2938 | ||
2859 | if (log_level && do_print_state) { | 2939 | if (need_resched()) |
2860 | verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); | 2940 | cond_resched(); |
2941 | |||
2942 | if (log_level > 1 || (log_level && do_print_state)) { | ||
2943 | if (log_level > 1) | ||
2944 | verbose("%d:", insn_idx); | ||
2945 | else | ||
2946 | verbose("\nfrom %d to %d:", | ||
2947 | prev_insn_idx, insn_idx); | ||
2861 | print_verifier_state(&env->cur_state); | 2948 | print_verifier_state(&env->cur_state); |
2862 | do_print_state = false; | 2949 | do_print_state = false; |
2863 | } | 2950 | } |
@@ -3495,6 +3582,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) | |||
3495 | log_level = 0; | 3582 | log_level = 0; |
3496 | } | 3583 | } |
3497 | 3584 | ||
3585 | env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); | ||
3586 | if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) | ||
3587 | env->strict_alignment = true; | ||
3588 | |||
3498 | ret = replace_map_fd_with_map_ptr(env); | 3589 | ret = replace_map_fd_with_map_ptr(env); |
3499 | if (ret < 0) | 3590 | if (ret < 0) |
3500 | goto skip_full_check; | 3591 | goto skip_full_check; |
@@ -3600,6 +3691,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, | |||
3600 | 3691 | ||
3601 | log_level = 0; | 3692 | log_level = 0; |
3602 | 3693 | ||
3694 | env->strict_alignment = false; | ||
3695 | if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) | ||
3696 | env->strict_alignment = true; | ||
3697 | |||
3603 | env->explored_states = kcalloc(env->prog->len, | 3698 | env->explored_states = kcalloc(env->prog->len, |
3604 | sizeof(struct bpf_verifier_state_list *), | 3699 | sizeof(struct bpf_verifier_state_list *), |
3605 | GFP_KERNEL); | 3700 | GFP_KERNEL); |
diff --git a/kernel/fork.c b/kernel/fork.c index 06d759ab4c62..e53770d2bf95 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1577,6 +1577,18 @@ static __latent_entropy struct task_struct *copy_process( | |||
1577 | if (!p) | 1577 | if (!p) |
1578 | goto fork_out; | 1578 | goto fork_out; |
1579 | 1579 | ||
1580 | /* | ||
1581 | * This _must_ happen before we call free_task(), i.e. before we jump | ||
1582 | * to any of the bad_fork_* labels. This is to avoid freeing | ||
1583 | * p->set_child_tid which is (ab)used as a kthread's data pointer for | ||
1584 | * kernel threads (PF_KTHREAD). | ||
1585 | */ | ||
1586 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | ||
1587 | /* | ||
1588 | * Clear TID on mm_release()? | ||
1589 | */ | ||
1590 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; | ||
1591 | |||
1580 | ftrace_graph_init_task(p); | 1592 | ftrace_graph_init_task(p); |
1581 | 1593 | ||
1582 | rt_mutex_init_task(p); | 1594 | rt_mutex_init_task(p); |
@@ -1743,11 +1755,6 @@ static __latent_entropy struct task_struct *copy_process( | |||
1743 | } | 1755 | } |
1744 | } | 1756 | } |
1745 | 1757 | ||
1746 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | ||
1747 | /* | ||
1748 | * Clear TID on mm_release()? | ||
1749 | */ | ||
1750 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; | ||
1751 | #ifdef CONFIG_BLOCK | 1758 | #ifdef CONFIG_BLOCK |
1752 | p->plug = NULL; | 1759 | p->plug = NULL; |
1753 | #endif | 1760 | #endif |
@@ -1845,11 +1852,13 @@ static __latent_entropy struct task_struct *copy_process( | |||
1845 | */ | 1852 | */ |
1846 | recalc_sigpending(); | 1853 | recalc_sigpending(); |
1847 | if (signal_pending(current)) { | 1854 | if (signal_pending(current)) { |
1848 | spin_unlock(¤t->sighand->siglock); | ||
1849 | write_unlock_irq(&tasklist_lock); | ||
1850 | retval = -ERESTARTNOINTR; | 1855 | retval = -ERESTARTNOINTR; |
1851 | goto bad_fork_cancel_cgroup; | 1856 | goto bad_fork_cancel_cgroup; |
1852 | } | 1857 | } |
1858 | if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { | ||
1859 | retval = -ENOMEM; | ||
1860 | goto bad_fork_cancel_cgroup; | ||
1861 | } | ||
1853 | 1862 | ||
1854 | if (likely(p->pid)) { | 1863 | if (likely(p->pid)) { |
1855 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); | 1864 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
@@ -1907,6 +1916,8 @@ static __latent_entropy struct task_struct *copy_process( | |||
1907 | return p; | 1916 | return p; |
1908 | 1917 | ||
1909 | bad_fork_cancel_cgroup: | 1918 | bad_fork_cancel_cgroup: |
1919 | spin_unlock(¤t->sighand->siglock); | ||
1920 | write_unlock_irq(&tasklist_lock); | ||
1910 | cgroup_cancel_fork(p); | 1921 | cgroup_cancel_fork(p); |
1911 | bad_fork_free_pid: | 1922 | bad_fork_free_pid: |
1912 | cgroup_threadgroup_change_end(current); | 1923 | cgroup_threadgroup_change_end(current); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 686be4b73018..c94da688ee9b 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -880,8 +880,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |||
880 | if (!desc) | 880 | if (!desc) |
881 | return; | 881 | return; |
882 | 882 | ||
883 | __irq_do_set_handler(desc, handle, 1, NULL); | ||
884 | desc->irq_common_data.handler_data = data; | 883 | desc->irq_common_data.handler_data = data; |
884 | __irq_do_set_handler(desc, handle, 1, NULL); | ||
885 | 885 | ||
886 | irq_put_desc_busunlock(desc, flags); | 886 | irq_put_desc_busunlock(desc, flags); |
887 | } | 887 | } |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 7367e0ec6f81..adfe3b4cfe05 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -122,7 +122,7 @@ static void *alloc_insn_page(void) | |||
122 | return module_alloc(PAGE_SIZE); | 122 | return module_alloc(PAGE_SIZE); |
123 | } | 123 | } |
124 | 124 | ||
125 | static void free_insn_page(void *page) | 125 | void __weak free_insn_page(void *page) |
126 | { | 126 | { |
127 | module_memfree(page); | 127 | module_memfree(page); |
128 | } | 128 | } |
@@ -595,7 +595,7 @@ static void kprobe_optimizer(struct work_struct *work) | |||
595 | } | 595 | } |
596 | 596 | ||
597 | /* Wait for completing optimization and unoptimization */ | 597 | /* Wait for completing optimization and unoptimization */ |
598 | static void wait_for_kprobe_optimizer(void) | 598 | void wait_for_kprobe_optimizer(void) |
599 | { | 599 | { |
600 | mutex_lock(&kprobe_mutex); | 600 | mutex_lock(&kprobe_mutex); |
601 | 601 | ||
@@ -2183,6 +2183,12 @@ static int kprobes_module_callback(struct notifier_block *nb, | |||
2183 | * The vaddr this probe is installed will soon | 2183 | * The vaddr this probe is installed will soon |
2184 | * be vfreed buy not synced to disk. Hence, | 2184 | * be vfreed buy not synced to disk. Hence, |
2185 | * disarming the breakpoint isn't needed. | 2185 | * disarming the breakpoint isn't needed. |
2186 | * | ||
2187 | * Note, this will also move any optimized probes | ||
2188 | * that are pending to be removed from their | ||
2189 | * corresponding lists to the freeing_list and | ||
2190 | * will not be touched by the delayed | ||
2191 | * kprobe_optimizer work handler. | ||
2186 | */ | 2192 | */ |
2187 | kill_kprobe(p); | 2193 | kill_kprobe(p); |
2188 | } | 2194 | } |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b95509416909..28cd09e635ed 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
1785 | int ret; | 1785 | int ret; |
1786 | 1786 | ||
1787 | raw_spin_lock_irq(&lock->wait_lock); | 1787 | raw_spin_lock_irq(&lock->wait_lock); |
1788 | |||
1789 | set_current_state(TASK_INTERRUPTIBLE); | ||
1790 | |||
1791 | /* sleep on the mutex */ | 1788 | /* sleep on the mutex */ |
1789 | set_current_state(TASK_INTERRUPTIBLE); | ||
1792 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); | 1790 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
1793 | 1791 | /* | |
1792 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | ||
1793 | * have to fix that up. | ||
1794 | */ | ||
1795 | fixup_rt_mutex_waiters(lock); | ||
1794 | raw_spin_unlock_irq(&lock->wait_lock); | 1796 | raw_spin_unlock_irq(&lock->wait_lock); |
1795 | 1797 | ||
1796 | return ret; | 1798 | return ret; |
@@ -1822,15 +1824,25 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, | |||
1822 | 1824 | ||
1823 | raw_spin_lock_irq(&lock->wait_lock); | 1825 | raw_spin_lock_irq(&lock->wait_lock); |
1824 | /* | 1826 | /* |
1827 | * Do an unconditional try-lock, this deals with the lock stealing | ||
1828 | * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() | ||
1829 | * sets a NULL owner. | ||
1830 | * | ||
1831 | * We're not interested in the return value, because the subsequent | ||
1832 | * test on rt_mutex_owner() will infer that. If the trylock succeeded, | ||
1833 | * we will own the lock and it will have removed the waiter. If we | ||
1834 | * failed the trylock, we're still not owner and we need to remove | ||
1835 | * ourselves. | ||
1836 | */ | ||
1837 | try_to_take_rt_mutex(lock, current, waiter); | ||
1838 | /* | ||
1825 | * Unless we're the owner; we're still enqueued on the wait_list. | 1839 | * Unless we're the owner; we're still enqueued on the wait_list. |
1826 | * So check if we became owner, if not, take us off the wait_list. | 1840 | * So check if we became owner, if not, take us off the wait_list. |
1827 | */ | 1841 | */ |
1828 | if (rt_mutex_owner(lock) != current) { | 1842 | if (rt_mutex_owner(lock) != current) { |
1829 | remove_waiter(lock, waiter); | 1843 | remove_waiter(lock, waiter); |
1830 | fixup_rt_mutex_waiters(lock); | ||
1831 | cleanup = true; | 1844 | cleanup = true; |
1832 | } | 1845 | } |
1833 | |||
1834 | /* | 1846 | /* |
1835 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | 1847 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might |
1836 | * have to fix that up. | 1848 | * have to fix that up. |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index d1f3e9f558b8..74a5a7255b4d 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -277,7 +277,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
277 | * if reparented. | 277 | * if reparented. |
278 | */ | 278 | */ |
279 | for (;;) { | 279 | for (;;) { |
280 | set_current_state(TASK_UNINTERRUPTIBLE); | 280 | set_current_state(TASK_INTERRUPTIBLE); |
281 | if (pid_ns->nr_hashed == init_pids) | 281 | if (pid_ns->nr_hashed == init_pids) |
282 | break; | 282 | break; |
283 | schedule(); | 283 | schedule(); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 3b1e0f3ad07f..fa46606f3356 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1425,7 +1425,7 @@ static unsigned int nr_meta_pages; | |||
1425 | * Numbers of normal and highmem page frames allocated for hibernation image | 1425 | * Numbers of normal and highmem page frames allocated for hibernation image |
1426 | * before suspending devices. | 1426 | * before suspending devices. |
1427 | */ | 1427 | */ |
1428 | unsigned int alloc_normal, alloc_highmem; | 1428 | static unsigned int alloc_normal, alloc_highmem; |
1429 | /* | 1429 | /* |
1430 | * Memory bitmap used for marking saveable pages (during hibernation) or | 1430 | * Memory bitmap used for marking saveable pages (during hibernation) or |
1431 | * hibernation image pages (during restore) | 1431 | * hibernation image pages (during restore) |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 266ddcc1d8bb..60f356d91060 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, | |||
60 | } | 60 | } |
61 | 61 | ||
62 | 62 | ||
63 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, | ||
64 | const struct cred *ptracer_cred) | ||
65 | { | ||
66 | BUG_ON(!list_empty(&child->ptrace_entry)); | ||
67 | list_add(&child->ptrace_entry, &new_parent->ptraced); | ||
68 | child->parent = new_parent; | ||
69 | child->ptracer_cred = get_cred(ptracer_cred); | ||
70 | } | ||
71 | |||
63 | /* | 72 | /* |
64 | * ptrace a task: make the debugger its new parent and | 73 | * ptrace a task: make the debugger its new parent and |
65 | * move it to the ptrace list. | 74 | * move it to the ptrace list. |
66 | * | 75 | * |
67 | * Must be called with the tasklist lock write-held. | 76 | * Must be called with the tasklist lock write-held. |
68 | */ | 77 | */ |
69 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) | 78 | static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
70 | { | 79 | { |
71 | BUG_ON(!list_empty(&child->ptrace_entry)); | ||
72 | list_add(&child->ptrace_entry, &new_parent->ptraced); | ||
73 | child->parent = new_parent; | ||
74 | rcu_read_lock(); | 80 | rcu_read_lock(); |
75 | child->ptracer_cred = get_cred(__task_cred(new_parent)); | 81 | __ptrace_link(child, new_parent, __task_cred(new_parent)); |
76 | rcu_read_unlock(); | 82 | rcu_read_unlock(); |
77 | } | 83 | } |
78 | 84 | ||
@@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request, | |||
386 | flags |= PT_SEIZED; | 392 | flags |= PT_SEIZED; |
387 | task->ptrace = flags; | 393 | task->ptrace = flags; |
388 | 394 | ||
389 | __ptrace_link(task, current); | 395 | ptrace_link(task, current); |
390 | 396 | ||
391 | /* SEIZE doesn't trap tracee on attach */ | 397 | /* SEIZE doesn't trap tracee on attach */ |
392 | if (!seize) | 398 | if (!seize) |
@@ -459,7 +465,7 @@ static int ptrace_traceme(void) | |||
459 | */ | 465 | */ |
460 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { | 466 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { |
461 | current->ptrace = PT_PTRACED; | 467 | current->ptrace = PT_PTRACED; |
462 | __ptrace_link(current, current->real_parent); | 468 | ptrace_link(current, current->real_parent); |
463 | } | 469 | } |
464 | } | 470 | } |
465 | write_unlock_irq(&tasklist_lock); | 471 | write_unlock_irq(&tasklist_lock); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 759f4bd52cd6..803c3bc274c4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3502,6 +3502,31 @@ asmlinkage __visible void __sched schedule(void) | |||
3502 | } | 3502 | } |
3503 | EXPORT_SYMBOL(schedule); | 3503 | EXPORT_SYMBOL(schedule); |
3504 | 3504 | ||
3505 | /* | ||
3506 | * synchronize_rcu_tasks() makes sure that no task is stuck in preempted | ||
3507 | * state (have scheduled out non-voluntarily) by making sure that all | ||
3508 | * tasks have either left the run queue or have gone into user space. | ||
3509 | * As idle tasks do not do either, they must not ever be preempted | ||
3510 | * (schedule out non-voluntarily). | ||
3511 | * | ||
3512 | * schedule_idle() is similar to schedule_preempt_disable() except that it | ||
3513 | * never enables preemption because it does not call sched_submit_work(). | ||
3514 | */ | ||
3515 | void __sched schedule_idle(void) | ||
3516 | { | ||
3517 | /* | ||
3518 | * As this skips calling sched_submit_work(), which the idle task does | ||
3519 | * regardless because that function is a nop when the task is in a | ||
3520 | * TASK_RUNNING state, make sure this isn't used someplace that the | ||
3521 | * current task can be in any other state. Note, idle is always in the | ||
3522 | * TASK_RUNNING state. | ||
3523 | */ | ||
3524 | WARN_ON_ONCE(current->state); | ||
3525 | do { | ||
3526 | __schedule(false); | ||
3527 | } while (need_resched()); | ||
3528 | } | ||
3529 | |||
3505 | #ifdef CONFIG_CONTEXT_TRACKING | 3530 | #ifdef CONFIG_CONTEXT_TRACKING |
3506 | asmlinkage __visible void __sched schedule_user(void) | 3531 | asmlinkage __visible void __sched schedule_user(void) |
3507 | { | 3532 | { |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 76877a62b5fa..622eed1b7658 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -245,11 +245,10 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, | |||
245 | sugov_update_commit(sg_policy, time, next_f); | 245 | sugov_update_commit(sg_policy, time, next_f); |
246 | } | 246 | } |
247 | 247 | ||
248 | static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu) | 248 | static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) |
249 | { | 249 | { |
250 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; | 250 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
251 | struct cpufreq_policy *policy = sg_policy->policy; | 251 | struct cpufreq_policy *policy = sg_policy->policy; |
252 | u64 last_freq_update_time = sg_policy->last_freq_update_time; | ||
253 | unsigned long util = 0, max = 1; | 252 | unsigned long util = 0, max = 1; |
254 | unsigned int j; | 253 | unsigned int j; |
255 | 254 | ||
@@ -265,7 +264,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu) | |||
265 | * enough, don't take the CPU into account as it probably is | 264 | * enough, don't take the CPU into account as it probably is |
266 | * idle now (and clear iowait_boost for it). | 265 | * idle now (and clear iowait_boost for it). |
267 | */ | 266 | */ |
268 | delta_ns = last_freq_update_time - j_sg_cpu->last_update; | 267 | delta_ns = time - j_sg_cpu->last_update; |
269 | if (delta_ns > TICK_NSEC) { | 268 | if (delta_ns > TICK_NSEC) { |
270 | j_sg_cpu->iowait_boost = 0; | 269 | j_sg_cpu->iowait_boost = 0; |
271 | continue; | 270 | continue; |
@@ -309,7 +308,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, | |||
309 | if (flags & SCHED_CPUFREQ_RT_DL) | 308 | if (flags & SCHED_CPUFREQ_RT_DL) |
310 | next_f = sg_policy->policy->cpuinfo.max_freq; | 309 | next_f = sg_policy->policy->cpuinfo.max_freq; |
311 | else | 310 | else |
312 | next_f = sugov_next_freq_shared(sg_cpu); | 311 | next_f = sugov_next_freq_shared(sg_cpu, time); |
313 | 312 | ||
314 | sugov_update_commit(sg_policy, time, next_f); | 313 | sugov_update_commit(sg_policy, time, next_f); |
315 | } | 314 | } |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 2a25a9ec2c6e..ef63adce0c9c 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -265,7 +265,7 @@ static void do_idle(void) | |||
265 | smp_mb__after_atomic(); | 265 | smp_mb__after_atomic(); |
266 | 266 | ||
267 | sched_ttwu_pending(); | 267 | sched_ttwu_pending(); |
268 | schedule_preempt_disabled(); | 268 | schedule_idle(); |
269 | 269 | ||
270 | if (unlikely(klp_patch_pending(current))) | 270 | if (unlikely(klp_patch_pending(current))) |
271 | klp_update_patch_state(current); | 271 | klp_update_patch_state(current); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7808ab050599..6dda2aab731e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1467,6 +1467,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq) | |||
1467 | } | 1467 | } |
1468 | #endif | 1468 | #endif |
1469 | 1469 | ||
1470 | extern void schedule_idle(void); | ||
1471 | |||
1470 | extern void sysrq_sched_debug_show(void); | 1472 | extern void sysrq_sched_debug_show(void); |
1471 | extern void sched_init_granularity(void); | 1473 | extern void sched_init_granularity(void); |
1472 | extern void update_max_interval(void); | 1474 | extern void update_max_interval(void); |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 1370f067fb51..d2a1e6dd0291 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -825,8 +825,10 @@ static void check_thread_timers(struct task_struct *tsk, | |||
825 | * At the hard limit, we just die. | 825 | * At the hard limit, we just die. |
826 | * No need to calculate anything else now. | 826 | * No need to calculate anything else now. |
827 | */ | 827 | */ |
828 | pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", | 828 | if (print_fatal_signals) { |
829 | tsk->comm, task_pid_nr(tsk)); | 829 | pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", |
830 | tsk->comm, task_pid_nr(tsk)); | ||
831 | } | ||
830 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | 832 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
831 | return; | 833 | return; |
832 | } | 834 | } |
@@ -838,8 +840,10 @@ static void check_thread_timers(struct task_struct *tsk, | |||
838 | soft += USEC_PER_SEC; | 840 | soft += USEC_PER_SEC; |
839 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; | 841 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; |
840 | } | 842 | } |
841 | pr_info("RT Watchdog Timeout (soft): %s[%d]\n", | 843 | if (print_fatal_signals) { |
842 | tsk->comm, task_pid_nr(tsk)); | 844 | pr_info("RT Watchdog Timeout (soft): %s[%d]\n", |
845 | tsk->comm, task_pid_nr(tsk)); | ||
846 | } | ||
843 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | 847 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
844 | } | 848 | } |
845 | } | 849 | } |
@@ -936,8 +940,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
936 | * At the hard limit, we just die. | 940 | * At the hard limit, we just die. |
937 | * No need to calculate anything else now. | 941 | * No need to calculate anything else now. |
938 | */ | 942 | */ |
939 | pr_info("RT Watchdog Timeout (hard): %s[%d]\n", | 943 | if (print_fatal_signals) { |
940 | tsk->comm, task_pid_nr(tsk)); | 944 | pr_info("RT Watchdog Timeout (hard): %s[%d]\n", |
945 | tsk->comm, task_pid_nr(tsk)); | ||
946 | } | ||
941 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | 947 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
942 | return; | 948 | return; |
943 | } | 949 | } |
@@ -945,8 +951,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
945 | /* | 951 | /* |
946 | * At the soft limit, send a SIGXCPU every second. | 952 | * At the soft limit, send a SIGXCPU every second. |
947 | */ | 953 | */ |
948 | pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", | 954 | if (print_fatal_signals) { |
949 | tsk->comm, task_pid_nr(tsk)); | 955 | pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", |
956 | tsk->comm, task_pid_nr(tsk)); | ||
957 | } | ||
950 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | 958 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
951 | if (soft < hard) { | 959 | if (soft < hard) { |
952 | soft++; | 960 | soft++; |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index bd8ae8d5ae9c..193c5f5e3f79 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1662,14 +1662,14 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |||
1662 | goto out; | 1662 | goto out; |
1663 | 1663 | ||
1664 | if (attr == &dev_attr_act_mask) { | 1664 | if (attr == &dev_attr_act_mask) { |
1665 | if (sscanf(buf, "%llx", &value) != 1) { | 1665 | if (kstrtoull(buf, 0, &value)) { |
1666 | /* Assume it is a list of trace category names */ | 1666 | /* Assume it is a list of trace category names */ |
1667 | ret = blk_trace_str2mask(buf); | 1667 | ret = blk_trace_str2mask(buf); |
1668 | if (ret < 0) | 1668 | if (ret < 0) |
1669 | goto out; | 1669 | goto out; |
1670 | value = ret; | 1670 | value = ret; |
1671 | } | 1671 | } |
1672 | } else if (sscanf(buf, "%llu", &value) != 1) | 1672 | } else if (kstrtoull(buf, 0, &value)) |
1673 | goto out; | 1673 | goto out; |
1674 | 1674 | ||
1675 | ret = -ENXIO; | 1675 | ret = -ENXIO; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 39dca4e86a94..9e5841dc14b5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -4144,9 +4144,9 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | |||
4144 | int i, ret = -ENODEV; | 4144 | int i, ret = -ENODEV; |
4145 | int size; | 4145 | int size; |
4146 | 4146 | ||
4147 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) | 4147 | if (!glob || !strlen(glob) || !strcmp(glob, "*")) |
4148 | func_g.search = NULL; | 4148 | func_g.search = NULL; |
4149 | else if (glob) { | 4149 | else { |
4150 | int not; | 4150 | int not; |
4151 | 4151 | ||
4152 | func_g.type = filter_parse_regex(glob, strlen(glob), | 4152 | func_g.type = filter_parse_regex(glob, strlen(glob), |
@@ -4256,6 +4256,14 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | |||
4256 | return ret; | 4256 | return ret; |
4257 | } | 4257 | } |
4258 | 4258 | ||
4259 | void clear_ftrace_function_probes(struct trace_array *tr) | ||
4260 | { | ||
4261 | struct ftrace_func_probe *probe, *n; | ||
4262 | |||
4263 | list_for_each_entry_safe(probe, n, &tr->func_probes, list) | ||
4264 | unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); | ||
4265 | } | ||
4266 | |||
4259 | static LIST_HEAD(ftrace_commands); | 4267 | static LIST_HEAD(ftrace_commands); |
4260 | static DEFINE_MUTEX(ftrace_cmd_mutex); | 4268 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
4261 | 4269 | ||
@@ -5055,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) | |||
5055 | } | 5063 | } |
5056 | 5064 | ||
5057 | out: | 5065 | out: |
5058 | kfree(fgd->new_hash); | 5066 | free_ftrace_hash(fgd->new_hash); |
5059 | kfree(fgd); | 5067 | kfree(fgd); |
5060 | 5068 | ||
5061 | return ret; | 5069 | return ret; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c4536c449021..1122f151466f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1558,7 +1558,7 @@ static __init int init_trace_selftests(void) | |||
1558 | 1558 | ||
1559 | return 0; | 1559 | return 0; |
1560 | } | 1560 | } |
1561 | early_initcall(init_trace_selftests); | 1561 | core_initcall(init_trace_selftests); |
1562 | #else | 1562 | #else |
1563 | static inline int run_tracer_selftest(struct tracer *type) | 1563 | static inline int run_tracer_selftest(struct tracer *type) |
1564 | { | 1564 | { |
@@ -2568,7 +2568,36 @@ static inline void ftrace_trace_stack(struct trace_array *tr, | |||
2568 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 2568 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
2569 | int pc) | 2569 | int pc) |
2570 | { | 2570 | { |
2571 | __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); | 2571 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
2572 | |||
2573 | if (rcu_is_watching()) { | ||
2574 | __ftrace_trace_stack(buffer, flags, skip, pc, NULL); | ||
2575 | return; | ||
2576 | } | ||
2577 | |||
2578 | /* | ||
2579 | * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), | ||
2580 | * but if the above rcu_is_watching() failed, then the NMI | ||
2581 | * triggered someplace critical, and rcu_irq_enter() should | ||
2582 | * not be called from NMI. | ||
2583 | */ | ||
2584 | if (unlikely(in_nmi())) | ||
2585 | return; | ||
2586 | |||
2587 | /* | ||
2588 | * It is possible that a function is being traced in a | ||
2589 | * location that RCU is not watching. A call to | ||
2590 | * rcu_irq_enter() will make sure that it is, but there's | ||
2591 | * a few internal rcu functions that could be traced | ||
2592 | * where that wont work either. In those cases, we just | ||
2593 | * do nothing. | ||
2594 | */ | ||
2595 | if (unlikely(rcu_irq_enter_disabled())) | ||
2596 | return; | ||
2597 | |||
2598 | rcu_irq_enter_irqson(); | ||
2599 | __ftrace_trace_stack(buffer, flags, skip, pc, NULL); | ||
2600 | rcu_irq_exit_irqson(); | ||
2572 | } | 2601 | } |
2573 | 2602 | ||
2574 | /** | 2603 | /** |
@@ -7550,6 +7579,7 @@ static int instance_rmdir(const char *name) | |||
7550 | } | 7579 | } |
7551 | 7580 | ||
7552 | tracing_set_nop(tr); | 7581 | tracing_set_nop(tr); |
7582 | clear_ftrace_function_probes(tr); | ||
7553 | event_trace_del_tracer(tr); | 7583 | event_trace_del_tracer(tr); |
7554 | ftrace_clear_pids(tr); | 7584 | ftrace_clear_pids(tr); |
7555 | ftrace_destroy_function_files(tr); | 7585 | ftrace_destroy_function_files(tr); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 291a1bca5748..39fd77330aab 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -980,6 +980,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, | |||
980 | extern int | 980 | extern int |
981 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | 981 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
982 | struct ftrace_probe_ops *ops); | 982 | struct ftrace_probe_ops *ops); |
983 | extern void clear_ftrace_function_probes(struct trace_array *tr); | ||
983 | 984 | ||
984 | int register_ftrace_command(struct ftrace_func_command *cmd); | 985 | int register_ftrace_command(struct ftrace_func_command *cmd); |
985 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 986 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
@@ -998,6 +999,10 @@ static inline __init int unregister_ftrace_command(char *cmd_name) | |||
998 | { | 999 | { |
999 | return -EINVAL; | 1000 | return -EINVAL; |
1000 | } | 1001 | } |
1002 | static inline void clear_ftrace_function_probes(struct trace_array *tr) | ||
1003 | { | ||
1004 | } | ||
1005 | |||
1001 | /* | 1006 | /* |
1002 | * The ops parameter passed in is usually undefined. | 1007 | * The ops parameter passed in is usually undefined. |
1003 | * This must be a macro. | 1008 | * This must be a macro. |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8485f6738a87..c129fca6ec99 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1535,6 +1535,11 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1535 | 1535 | ||
1536 | end: | 1536 | end: |
1537 | release_all_trace_kprobes(); | 1537 | release_all_trace_kprobes(); |
1538 | /* | ||
1539 | * Wait for the optimizer work to finish. Otherwise it might fiddle | ||
1540 | * with probes in already freed __init text. | ||
1541 | */ | ||
1542 | wait_for_kprobe_optimizer(); | ||
1538 | if (warn) | 1543 | if (warn) |
1539 | pr_cont("NG: Some tests are failed. Please check them.\n"); | 1544 | pr_cont("NG: Some tests are failed. Please check them.\n"); |
1540 | else | 1545 | else |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 889bc31785be..be88cbaadde3 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -4504,6 +4504,44 @@ static struct bpf_test tests[] = { | |||
4504 | { }, | 4504 | { }, |
4505 | { { 0, 1 } }, | 4505 | { { 0, 1 } }, |
4506 | }, | 4506 | }, |
4507 | { | ||
4508 | "JMP_JSGE_K: Signed jump: value walk 1", | ||
4509 | .u.insns_int = { | ||
4510 | BPF_ALU32_IMM(BPF_MOV, R0, 0), | ||
4511 | BPF_LD_IMM64(R1, -3), | ||
4512 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 6), | ||
4513 | BPF_ALU64_IMM(BPF_ADD, R1, 1), | ||
4514 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), | ||
4515 | BPF_ALU64_IMM(BPF_ADD, R1, 1), | ||
4516 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), | ||
4517 | BPF_ALU64_IMM(BPF_ADD, R1, 1), | ||
4518 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), | ||
4519 | BPF_EXIT_INSN(), /* bad exit */ | ||
4520 | BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ | ||
4521 | BPF_EXIT_INSN(), | ||
4522 | }, | ||
4523 | INTERNAL, | ||
4524 | { }, | ||
4525 | { { 0, 1 } }, | ||
4526 | }, | ||
4527 | { | ||
4528 | "JMP_JSGE_K: Signed jump: value walk 2", | ||
4529 | .u.insns_int = { | ||
4530 | BPF_ALU32_IMM(BPF_MOV, R0, 0), | ||
4531 | BPF_LD_IMM64(R1, -3), | ||
4532 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), | ||
4533 | BPF_ALU64_IMM(BPF_ADD, R1, 2), | ||
4534 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), | ||
4535 | BPF_ALU64_IMM(BPF_ADD, R1, 2), | ||
4536 | BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), | ||
4537 | BPF_EXIT_INSN(), /* bad exit */ | ||
4538 | BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ | ||
4539 | BPF_EXIT_INSN(), | ||
4540 | }, | ||
4541 | INTERNAL, | ||
4542 | { }, | ||
4543 | { { 0, 1 } }, | ||
4544 | }, | ||
4507 | /* BPF_JMP | BPF_JGT | BPF_K */ | 4545 | /* BPF_JMP | BPF_JGT | BPF_K */ |
4508 | { | 4546 | { |
4509 | "JMP_JGT_K: if (3 > 2) return 1", | 4547 | "JMP_JGT_K: if (3 > 2) return 1", |
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 71e85643b3f9..6ad3e043c617 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c | |||
@@ -454,8 +454,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, | |||
454 | goto error_xenbus; | 454 | goto error_xenbus; |
455 | } | 455 | } |
456 | priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); | 456 | priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); |
457 | if (!priv->tag) { | 457 | if (IS_ERR(priv->tag)) { |
458 | ret = -EINVAL; | 458 | ret = PTR_ERR(priv->tag); |
459 | goto error_xenbus; | 459 | goto error_xenbus; |
460 | } | 460 | } |
461 | ret = xenbus_transaction_end(xbt, 0); | 461 | ret = xenbus_transaction_end(xbt, 0); |
@@ -525,7 +525,7 @@ static struct xenbus_driver xen_9pfs_front_driver = { | |||
525 | .otherend_changed = xen_9pfs_front_changed, | 525 | .otherend_changed = xen_9pfs_front_changed, |
526 | }; | 526 | }; |
527 | 527 | ||
528 | int p9_trans_xen_init(void) | 528 | static int p9_trans_xen_init(void) |
529 | { | 529 | { |
530 | if (!xen_domain()) | 530 | if (!xen_domain()) |
531 | return -ENODEV; | 531 | return -ENODEV; |
@@ -537,7 +537,7 @@ int p9_trans_xen_init(void) | |||
537 | } | 537 | } |
538 | module_init(p9_trans_xen_init); | 538 | module_init(p9_trans_xen_init); |
539 | 539 | ||
540 | void p9_trans_xen_exit(void) | 540 | static void p9_trans_xen_exit(void) |
541 | { | 541 | { |
542 | v9fs_unregister_trans(&p9_xen_trans); | 542 | v9fs_unregister_trans(&p9_xen_trans); |
543 | return xenbus_unregister_driver(&xen_9pfs_front_driver); | 543 | return xenbus_unregister_driver(&xen_9pfs_front_driver); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index c5ce7745b230..574f78824d8a 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -835,6 +835,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
835 | return -EPROTONOSUPPORT; | 835 | return -EPROTONOSUPPORT; |
836 | } | 836 | } |
837 | } | 837 | } |
838 | |||
839 | if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { | ||
840 | __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); | ||
841 | |||
842 | if (defpvid >= VLAN_VID_MASK) | ||
843 | return -EINVAL; | ||
844 | } | ||
838 | #endif | 845 | #endif |
839 | 846 | ||
840 | return 0; | 847 | return 0; |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 08341d2aa9c9..0db8102995a5 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -179,6 +179,7 @@ static void br_stp_start(struct net_bridge *br) | |||
179 | br_debug(br, "using kernel STP\n"); | 179 | br_debug(br, "using kernel STP\n"); |
180 | 180 | ||
181 | /* To start timers on any ports left in blocking */ | 181 | /* To start timers on any ports left in blocking */ |
182 | mod_timer(&br->hello_timer, jiffies + br->hello_time); | ||
182 | br_port_state_selection(br); | 183 | br_port_state_selection(br); |
183 | } | 184 | } |
184 | 185 | ||
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index c98b3e5c140a..60b6fe277a8b 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c | |||
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg) | |||
40 | if (br->dev->flags & IFF_UP) { | 40 | if (br->dev->flags & IFF_UP) { |
41 | br_config_bpdu_generation(br); | 41 | br_config_bpdu_generation(br); |
42 | 42 | ||
43 | if (br->stp_enabled != BR_USER_STP) | 43 | if (br->stp_enabled == BR_KERNEL_STP) |
44 | mod_timer(&br->hello_timer, | 44 | mod_timer(&br->hello_timer, |
45 | round_jiffies(jiffies + br->hello_time)); | 45 | round_jiffies(jiffies + br->hello_time)); |
46 | } | 46 | } |
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c index 5929309beaa1..db85230e49c3 100644 --- a/net/bridge/netfilter/ebt_arpreply.c +++ b/net/bridge/netfilter/ebt_arpreply.c | |||
@@ -68,6 +68,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par) | |||
68 | if (e->ethproto != htons(ETH_P_ARP) || | 68 | if (e->ethproto != htons(ETH_P_ARP) || |
69 | e->invflags & EBT_IPROTO) | 69 | e->invflags & EBT_IPROTO) |
70 | return -EINVAL; | 70 | return -EINVAL; |
71 | if (ebt_invalid_target(info->target)) | ||
72 | return -EINVAL; | ||
73 | |||
71 | return 0; | 74 | return 0; |
72 | } | 75 | } |
73 | 76 | ||
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 9ec0c9f908fa..9c6e619f452b 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name, | |||
1373 | strlcpy(name, _name, sizeof(name)); | 1373 | strlcpy(name, _name, sizeof(name)); |
1374 | if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || | 1374 | if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || |
1375 | put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || | 1375 | put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || |
1376 | xt_data_to_user(um + entrysize, data, usersize, datasize)) | 1376 | xt_data_to_user(um + entrysize, data, usersize, datasize, |
1377 | XT_ALIGN(datasize))) | ||
1377 | return -EFAULT; | 1378 | return -EFAULT; |
1378 | 1379 | ||
1379 | return 0; | 1380 | return 0; |
@@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, | |||
1658 | if (match->compat_to_user(cm->data, m->data)) | 1659 | if (match->compat_to_user(cm->data, m->data)) |
1659 | return -EFAULT; | 1660 | return -EFAULT; |
1660 | } else { | 1661 | } else { |
1661 | if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) | 1662 | if (xt_data_to_user(cm->data, m->data, match->usersize, msize, |
1663 | COMPAT_XT_ALIGN(msize))) | ||
1662 | return -EFAULT; | 1664 | return -EFAULT; |
1663 | } | 1665 | } |
1664 | 1666 | ||
@@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t, | |||
1687 | if (target->compat_to_user(cm->data, t->data)) | 1689 | if (target->compat_to_user(cm->data, t->data)) |
1688 | return -EFAULT; | 1690 | return -EFAULT; |
1689 | } else { | 1691 | } else { |
1690 | if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) | 1692 | if (xt_data_to_user(cm->data, t->data, target->usersize, tsize, |
1693 | COMPAT_XT_ALIGN(tsize))) | ||
1691 | return -EFAULT; | 1694 | return -EFAULT; |
1692 | } | 1695 | } |
1693 | 1696 | ||
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 2034fb926670..8757fb87dab8 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
@@ -151,7 +151,7 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
151 | struct timespec validity; | 151 | struct timespec validity; |
152 | void *tp, *tpend; | 152 | void *tp, *tpend; |
153 | void **ptp; | 153 | void **ptp; |
154 | struct ceph_crypto_key new_session_key; | 154 | struct ceph_crypto_key new_session_key = { 0 }; |
155 | struct ceph_buffer *new_ticket_blob; | 155 | struct ceph_buffer *new_ticket_blob; |
156 | unsigned long new_expires, new_renew_after; | 156 | unsigned long new_expires, new_renew_after; |
157 | u64 new_secret_id; | 157 | u64 new_secret_id; |
@@ -215,6 +215,9 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
215 | dout(" ticket blob is %d bytes\n", dlen); | 215 | dout(" ticket blob is %d bytes\n", dlen); |
216 | ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); | 216 | ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); |
217 | blob_struct_v = ceph_decode_8(ptp); | 217 | blob_struct_v = ceph_decode_8(ptp); |
218 | if (blob_struct_v != 1) | ||
219 | goto bad; | ||
220 | |||
218 | new_secret_id = ceph_decode_64(ptp); | 221 | new_secret_id = ceph_decode_64(ptp); |
219 | ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); | 222 | ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); |
220 | if (ret) | 223 | if (ret) |
@@ -234,13 +237,13 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
234 | type, ceph_entity_type_name(type), th->secret_id, | 237 | type, ceph_entity_type_name(type), th->secret_id, |
235 | (int)th->ticket_blob->vec.iov_len); | 238 | (int)th->ticket_blob->vec.iov_len); |
236 | xi->have_keys |= th->service; | 239 | xi->have_keys |= th->service; |
237 | 240 | return 0; | |
238 | out: | ||
239 | return ret; | ||
240 | 241 | ||
241 | bad: | 242 | bad: |
242 | ret = -EINVAL; | 243 | ret = -EINVAL; |
243 | goto out; | 244 | out: |
245 | ceph_crypto_key_destroy(&new_session_key); | ||
246 | return ret; | ||
244 | } | 247 | } |
245 | 248 | ||
246 | static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, | 249 | static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 4fd02831beed..47e94b560ba0 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = { | |||
56 | module_param_cb(supported_features, ¶m_ops_supported_features, NULL, | 56 | module_param_cb(supported_features, ¶m_ops_supported_features, NULL, |
57 | S_IRUGO); | 57 | S_IRUGO); |
58 | 58 | ||
59 | /* | ||
60 | * find filename portion of a path (/foo/bar/baz -> baz) | ||
61 | */ | ||
62 | const char *ceph_file_part(const char *s, int len) | ||
63 | { | ||
64 | const char *e = s + len; | ||
65 | |||
66 | while (e != s && *(e-1) != '/') | ||
67 | e--; | ||
68 | return e; | ||
69 | } | ||
70 | EXPORT_SYMBOL(ceph_file_part); | ||
71 | |||
72 | const char *ceph_msg_type_name(int type) | 59 | const char *ceph_msg_type_name(int type) |
73 | { | 60 | { |
74 | switch (type) { | 61 | switch (type) { |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 5766a6c896c4..588a91930051 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -1174,8 +1174,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, | |||
1174 | * Returns true if the result moves the cursor on to the next piece | 1174 | * Returns true if the result moves the cursor on to the next piece |
1175 | * of the data item. | 1175 | * of the data item. |
1176 | */ | 1176 | */ |
1177 | static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, | 1177 | static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, |
1178 | size_t bytes) | 1178 | size_t bytes) |
1179 | { | 1179 | { |
1180 | bool new_piece; | 1180 | bool new_piece; |
1181 | 1181 | ||
@@ -1207,8 +1207,6 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, | |||
1207 | new_piece = true; | 1207 | new_piece = true; |
1208 | } | 1208 | } |
1209 | cursor->need_crc = new_piece; | 1209 | cursor->need_crc = new_piece; |
1210 | |||
1211 | return new_piece; | ||
1212 | } | 1210 | } |
1213 | 1211 | ||
1214 | static size_t sizeof_footer(struct ceph_connection *con) | 1212 | static size_t sizeof_footer(struct ceph_connection *con) |
@@ -1577,7 +1575,6 @@ static int write_partial_message_data(struct ceph_connection *con) | |||
1577 | size_t page_offset; | 1575 | size_t page_offset; |
1578 | size_t length; | 1576 | size_t length; |
1579 | bool last_piece; | 1577 | bool last_piece; |
1580 | bool need_crc; | ||
1581 | int ret; | 1578 | int ret; |
1582 | 1579 | ||
1583 | page = ceph_msg_data_next(cursor, &page_offset, &length, | 1580 | page = ceph_msg_data_next(cursor, &page_offset, &length, |
@@ -1592,7 +1589,7 @@ static int write_partial_message_data(struct ceph_connection *con) | |||
1592 | } | 1589 | } |
1593 | if (do_datacrc && cursor->need_crc) | 1590 | if (do_datacrc && cursor->need_crc) |
1594 | crc = ceph_crc32c_page(crc, page, page_offset, length); | 1591 | crc = ceph_crc32c_page(crc, page, page_offset, length); |
1595 | need_crc = ceph_msg_data_advance(cursor, (size_t)ret); | 1592 | ceph_msg_data_advance(cursor, (size_t)ret); |
1596 | } | 1593 | } |
1597 | 1594 | ||
1598 | dout("%s %p msg %p done\n", __func__, con, msg); | 1595 | dout("%s %p msg %p done\n", __func__, con, msg); |
@@ -2231,10 +2228,18 @@ static void process_ack(struct ceph_connection *con) | |||
2231 | struct ceph_msg *m; | 2228 | struct ceph_msg *m; |
2232 | u64 ack = le64_to_cpu(con->in_temp_ack); | 2229 | u64 ack = le64_to_cpu(con->in_temp_ack); |
2233 | u64 seq; | 2230 | u64 seq; |
2231 | bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ); | ||
2232 | struct list_head *list = reconnect ? &con->out_queue : &con->out_sent; | ||
2234 | 2233 | ||
2235 | while (!list_empty(&con->out_sent)) { | 2234 | /* |
2236 | m = list_first_entry(&con->out_sent, struct ceph_msg, | 2235 | * In the reconnect case, con_fault() has requeued messages |
2237 | list_head); | 2236 | * in out_sent. We should cleanup old messages according to |
2237 | * the reconnect seq. | ||
2238 | */ | ||
2239 | while (!list_empty(list)) { | ||
2240 | m = list_first_entry(list, struct ceph_msg, list_head); | ||
2241 | if (reconnect && m->needs_out_seq) | ||
2242 | break; | ||
2238 | seq = le64_to_cpu(m->hdr.seq); | 2243 | seq = le64_to_cpu(m->hdr.seq); |
2239 | if (seq > ack) | 2244 | if (seq > ack) |
2240 | break; | 2245 | break; |
@@ -2243,6 +2248,7 @@ static void process_ack(struct ceph_connection *con) | |||
2243 | m->ack_stamp = jiffies; | 2248 | m->ack_stamp = jiffies; |
2244 | ceph_msg_remove(m); | 2249 | ceph_msg_remove(m); |
2245 | } | 2250 | } |
2251 | |||
2246 | prepare_read_tag(con); | 2252 | prepare_read_tag(con); |
2247 | } | 2253 | } |
2248 | 2254 | ||
@@ -2299,7 +2305,7 @@ static int read_partial_msg_data(struct ceph_connection *con) | |||
2299 | 2305 | ||
2300 | if (do_datacrc) | 2306 | if (do_datacrc) |
2301 | crc = ceph_crc32c_page(crc, page, page_offset, ret); | 2307 | crc = ceph_crc32c_page(crc, page, page_offset, ret); |
2302 | (void) ceph_msg_data_advance(cursor, (size_t)ret); | 2308 | ceph_msg_data_advance(cursor, (size_t)ret); |
2303 | } | 2309 | } |
2304 | if (do_datacrc) | 2310 | if (do_datacrc) |
2305 | con->in_data_crc = crc; | 2311 | con->in_data_crc = crc; |
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 29a0ef351c5e..250f11f78609 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
@@ -43,15 +43,13 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) | |||
43 | int i, err = -EINVAL; | 43 | int i, err = -EINVAL; |
44 | struct ceph_fsid fsid; | 44 | struct ceph_fsid fsid; |
45 | u32 epoch, num_mon; | 45 | u32 epoch, num_mon; |
46 | u16 version; | ||
47 | u32 len; | 46 | u32 len; |
48 | 47 | ||
49 | ceph_decode_32_safe(&p, end, len, bad); | 48 | ceph_decode_32_safe(&p, end, len, bad); |
50 | ceph_decode_need(&p, end, len, bad); | 49 | ceph_decode_need(&p, end, len, bad); |
51 | 50 | ||
52 | dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); | 51 | dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); |
53 | 52 | p += sizeof(u16); /* skip version */ | |
54 | ceph_decode_16_safe(&p, end, version, bad); | ||
55 | 53 | ||
56 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); | 54 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); |
57 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | 55 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index ffe9e904d4d1..55e3a477f92d 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -317,6 +317,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) | |||
317 | u32 yes; | 317 | u32 yes; |
318 | struct crush_rule *r; | 318 | struct crush_rule *r; |
319 | 319 | ||
320 | err = -EINVAL; | ||
320 | ceph_decode_32_safe(p, end, yes, bad); | 321 | ceph_decode_32_safe(p, end, yes, bad); |
321 | if (!yes) { | 322 | if (!yes) { |
322 | dout("crush_decode NO rule %d off %x %p to %p\n", | 323 | dout("crush_decode NO rule %d off %x %p to %p\n", |
diff --git a/net/core/dev.c b/net/core/dev.c index 96cf83da0d66..fca407b4a6ea 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -6852,6 +6852,32 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down) | |||
6852 | } | 6852 | } |
6853 | EXPORT_SYMBOL(dev_change_proto_down); | 6853 | EXPORT_SYMBOL(dev_change_proto_down); |
6854 | 6854 | ||
6855 | bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op) | ||
6856 | { | ||
6857 | struct netdev_xdp xdp; | ||
6858 | |||
6859 | memset(&xdp, 0, sizeof(xdp)); | ||
6860 | xdp.command = XDP_QUERY_PROG; | ||
6861 | |||
6862 | /* Query must always succeed. */ | ||
6863 | WARN_ON(xdp_op(dev, &xdp) < 0); | ||
6864 | return xdp.prog_attached; | ||
6865 | } | ||
6866 | |||
6867 | static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op, | ||
6868 | struct netlink_ext_ack *extack, | ||
6869 | struct bpf_prog *prog) | ||
6870 | { | ||
6871 | struct netdev_xdp xdp; | ||
6872 | |||
6873 | memset(&xdp, 0, sizeof(xdp)); | ||
6874 | xdp.command = XDP_SETUP_PROG; | ||
6875 | xdp.extack = extack; | ||
6876 | xdp.prog = prog; | ||
6877 | |||
6878 | return xdp_op(dev, &xdp); | ||
6879 | } | ||
6880 | |||
6855 | /** | 6881 | /** |
6856 | * dev_change_xdp_fd - set or clear a bpf program for a device rx path | 6882 | * dev_change_xdp_fd - set or clear a bpf program for a device rx path |
6857 | * @dev: device | 6883 | * @dev: device |
@@ -6864,41 +6890,34 @@ EXPORT_SYMBOL(dev_change_proto_down); | |||
6864 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | 6890 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
6865 | int fd, u32 flags) | 6891 | int fd, u32 flags) |
6866 | { | 6892 | { |
6867 | int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp); | ||
6868 | const struct net_device_ops *ops = dev->netdev_ops; | 6893 | const struct net_device_ops *ops = dev->netdev_ops; |
6869 | struct bpf_prog *prog = NULL; | 6894 | struct bpf_prog *prog = NULL; |
6870 | struct netdev_xdp xdp; | 6895 | xdp_op_t xdp_op, xdp_chk; |
6871 | int err; | 6896 | int err; |
6872 | 6897 | ||
6873 | ASSERT_RTNL(); | 6898 | ASSERT_RTNL(); |
6874 | 6899 | ||
6875 | xdp_op = ops->ndo_xdp; | 6900 | xdp_op = xdp_chk = ops->ndo_xdp; |
6901 | if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE)) | ||
6902 | return -EOPNOTSUPP; | ||
6876 | if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) | 6903 | if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) |
6877 | xdp_op = generic_xdp_install; | 6904 | xdp_op = generic_xdp_install; |
6905 | if (xdp_op == xdp_chk) | ||
6906 | xdp_chk = generic_xdp_install; | ||
6878 | 6907 | ||
6879 | if (fd >= 0) { | 6908 | if (fd >= 0) { |
6880 | if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { | 6909 | if (xdp_chk && __dev_xdp_attached(dev, xdp_chk)) |
6881 | memset(&xdp, 0, sizeof(xdp)); | 6910 | return -EEXIST; |
6882 | xdp.command = XDP_QUERY_PROG; | 6911 | if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && |
6883 | 6912 | __dev_xdp_attached(dev, xdp_op)) | |
6884 | err = xdp_op(dev, &xdp); | 6913 | return -EBUSY; |
6885 | if (err < 0) | ||
6886 | return err; | ||
6887 | if (xdp.prog_attached) | ||
6888 | return -EBUSY; | ||
6889 | } | ||
6890 | 6914 | ||
6891 | prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); | 6915 | prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); |
6892 | if (IS_ERR(prog)) | 6916 | if (IS_ERR(prog)) |
6893 | return PTR_ERR(prog); | 6917 | return PTR_ERR(prog); |
6894 | } | 6918 | } |
6895 | 6919 | ||
6896 | memset(&xdp, 0, sizeof(xdp)); | 6920 | err = dev_xdp_install(dev, xdp_op, extack, prog); |
6897 | xdp.command = XDP_SETUP_PROG; | ||
6898 | xdp.extack = extack; | ||
6899 | xdp.prog = prog; | ||
6900 | |||
6901 | err = xdp_op(dev, &xdp); | ||
6902 | if (err < 0 && prog) | 6921 | if (err < 0 && prog) |
6903 | bpf_prog_put(prog); | 6922 | bpf_prog_put(prog); |
6904 | 6923 | ||
diff --git a/net/core/dst.c b/net/core/dst.c index 960e503b5a52..6192f11beec9 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
151 | } | 151 | } |
152 | EXPORT_SYMBOL(dst_discard_out); | 152 | EXPORT_SYMBOL(dst_discard_out); |
153 | 153 | ||
154 | const u32 dst_default_metrics[RTAX_MAX + 1] = { | 154 | const struct dst_metrics dst_default_metrics = { |
155 | /* This initializer is needed to force linker to place this variable | 155 | /* This initializer is needed to force linker to place this variable |
156 | * into const section. Otherwise it might end into bss section. | 156 | * into const section. Otherwise it might end into bss section. |
157 | * We really want to avoid false sharing on this variable, and catch | 157 | * We really want to avoid false sharing on this variable, and catch |
158 | * any writes on it. | 158 | * any writes on it. |
159 | */ | 159 | */ |
160 | [RTAX_MAX] = 0xdeadbeef, | 160 | .refcnt = ATOMIC_INIT(1), |
161 | }; | 161 | }; |
162 | 162 | ||
163 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, | 163 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, | |||
169 | if (dev) | 169 | if (dev) |
170 | dev_hold(dev); | 170 | dev_hold(dev); |
171 | dst->ops = ops; | 171 | dst->ops = ops; |
172 | dst_init_metrics(dst, dst_default_metrics, true); | 172 | dst_init_metrics(dst, dst_default_metrics.metrics, true); |
173 | dst->expires = 0UL; | 173 | dst->expires = 0UL; |
174 | dst->path = dst; | 174 | dst->path = dst; |
175 | dst->from = NULL; | 175 | dst->from = NULL; |
@@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release); | |||
314 | 314 | ||
315 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) | 315 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
316 | { | 316 | { |
317 | u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); | 317 | struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); |
318 | 318 | ||
319 | if (p) { | 319 | if (p) { |
320 | u32 *old_p = __DST_METRICS_PTR(old); | 320 | struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); |
321 | unsigned long prev, new; | 321 | unsigned long prev, new; |
322 | 322 | ||
323 | memcpy(p, old_p, sizeof(u32) * RTAX_MAX); | 323 | atomic_set(&p->refcnt, 1); |
324 | memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); | ||
324 | 325 | ||
325 | new = (unsigned long) p; | 326 | new = (unsigned long) p; |
326 | prev = cmpxchg(&dst->_metrics, old, new); | 327 | prev = cmpxchg(&dst->_metrics, old, new); |
327 | 328 | ||
328 | if (prev != old) { | 329 | if (prev != old) { |
329 | kfree(p); | 330 | kfree(p); |
330 | p = __DST_METRICS_PTR(prev); | 331 | p = (struct dst_metrics *)__DST_METRICS_PTR(prev); |
331 | if (prev & DST_METRICS_READ_ONLY) | 332 | if (prev & DST_METRICS_READ_ONLY) |
332 | p = NULL; | 333 | p = NULL; |
334 | } else if (prev & DST_METRICS_REFCOUNTED) { | ||
335 | if (atomic_dec_and_test(&old_p->refcnt)) | ||
336 | kfree(old_p); | ||
333 | } | 337 | } |
334 | } | 338 | } |
335 | return p; | 339 | BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); |
340 | return (u32 *)p; | ||
336 | } | 341 | } |
337 | EXPORT_SYMBOL(dst_cow_metrics_generic); | 342 | EXPORT_SYMBOL(dst_cow_metrics_generic); |
338 | 343 | ||
@@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) | |||
341 | { | 346 | { |
342 | unsigned long prev, new; | 347 | unsigned long prev, new; |
343 | 348 | ||
344 | new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; | 349 | new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; |
345 | prev = cmpxchg(&dst->_metrics, old, new); | 350 | prev = cmpxchg(&dst->_metrics, old, new); |
346 | if (prev == old) | 351 | if (prev == old) |
347 | kfree(__DST_METRICS_PTR(old)); | 352 | kfree(__DST_METRICS_PTR(old)); |
diff --git a/net/core/filter.c b/net/core/filter.c index a253a6197e6b..a6bb95fa87b2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func) | |||
2281 | func == bpf_skb_change_head || | 2281 | func == bpf_skb_change_head || |
2282 | func == bpf_skb_change_tail || | 2282 | func == bpf_skb_change_tail || |
2283 | func == bpf_skb_pull_data || | 2283 | func == bpf_skb_pull_data || |
2284 | func == bpf_clone_redirect || | ||
2284 | func == bpf_l3_csum_replace || | 2285 | func == bpf_l3_csum_replace || |
2285 | func == bpf_l4_csum_replace || | 2286 | func == bpf_l4_csum_replace || |
2286 | func == bpf_xdp_adjust_head) | 2287 | func == bpf_xdp_adjust_head) |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 58b0bcc125b5..d274f81fcc2c 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1132 | lladdr = neigh->ha; | 1132 | lladdr = neigh->ha; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | if (new & NUD_CONNECTED) | ||
1136 | neigh->confirmed = jiffies; | ||
1137 | neigh->updated = jiffies; | ||
1138 | |||
1139 | /* If entry was valid and address is not changed, | 1135 | /* If entry was valid and address is not changed, |
1140 | do not change entry state, if new one is STALE. | 1136 | do not change entry state, if new one is STALE. |
1141 | */ | 1137 | */ |
@@ -1157,6 +1153,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1157 | } | 1153 | } |
1158 | } | 1154 | } |
1159 | 1155 | ||
1156 | /* Update timestamps only once we know we will make a change to the | ||
1157 | * neighbour entry. Otherwise we risk to move the locktime window with | ||
1158 | * noop updates and ignore relevant ARP updates. | ||
1159 | */ | ||
1160 | if (new != old || lladdr != neigh->ha) { | ||
1161 | if (new & NUD_CONNECTED) | ||
1162 | neigh->confirmed = jiffies; | ||
1163 | neigh->updated = jiffies; | ||
1164 | } | ||
1165 | |||
1160 | if (new != old) { | 1166 | if (new != old) { |
1161 | neigh_del_timer(neigh); | 1167 | neigh_del_timer(neigh); |
1162 | if (new & NUD_PROBE) | 1168 | if (new & NUD_PROBE) |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 1934efd4a9d4..26bbfababff2 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -315,6 +315,25 @@ out_undo: | |||
315 | goto out; | 315 | goto out; |
316 | } | 316 | } |
317 | 317 | ||
318 | static int __net_init net_defaults_init_net(struct net *net) | ||
319 | { | ||
320 | net->core.sysctl_somaxconn = SOMAXCONN; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static struct pernet_operations net_defaults_ops = { | ||
325 | .init = net_defaults_init_net, | ||
326 | }; | ||
327 | |||
328 | static __init int net_defaults_init(void) | ||
329 | { | ||
330 | if (register_pernet_subsys(&net_defaults_ops)) | ||
331 | panic("Cannot initialize net default settings"); | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | core_initcall(net_defaults_init); | ||
318 | 337 | ||
319 | #ifdef CONFIG_NET_NS | 338 | #ifdef CONFIG_NET_NS |
320 | static struct ucounts *inc_net_namespaces(struct user_namespace *ns) | 339 | static struct ucounts *inc_net_namespaces(struct user_namespace *ns) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index bcb0f610ee42..9e2c0a7cb325 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -899,8 +899,7 @@ static size_t rtnl_port_size(const struct net_device *dev, | |||
899 | static size_t rtnl_xdp_size(void) | 899 | static size_t rtnl_xdp_size(void) |
900 | { | 900 | { |
901 | size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ | 901 | size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ |
902 | nla_total_size(1) + /* XDP_ATTACHED */ | 902 | nla_total_size(1); /* XDP_ATTACHED */ |
903 | nla_total_size(4); /* XDP_FLAGS */ | ||
904 | 903 | ||
905 | return xdp_size; | 904 | return xdp_size; |
906 | } | 905 | } |
@@ -1247,37 +1246,34 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) | |||
1247 | return 0; | 1246 | return 0; |
1248 | } | 1247 | } |
1249 | 1248 | ||
1249 | static u8 rtnl_xdp_attached_mode(struct net_device *dev) | ||
1250 | { | ||
1251 | const struct net_device_ops *ops = dev->netdev_ops; | ||
1252 | |||
1253 | ASSERT_RTNL(); | ||
1254 | |||
1255 | if (rcu_access_pointer(dev->xdp_prog)) | ||
1256 | return XDP_ATTACHED_SKB; | ||
1257 | if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp)) | ||
1258 | return XDP_ATTACHED_DRV; | ||
1259 | |||
1260 | return XDP_ATTACHED_NONE; | ||
1261 | } | ||
1262 | |||
1250 | static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) | 1263 | static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) |
1251 | { | 1264 | { |
1252 | struct nlattr *xdp; | 1265 | struct nlattr *xdp; |
1253 | u32 xdp_flags = 0; | ||
1254 | u8 val = 0; | ||
1255 | int err; | 1266 | int err; |
1256 | 1267 | ||
1257 | xdp = nla_nest_start(skb, IFLA_XDP); | 1268 | xdp = nla_nest_start(skb, IFLA_XDP); |
1258 | if (!xdp) | 1269 | if (!xdp) |
1259 | return -EMSGSIZE; | 1270 | return -EMSGSIZE; |
1260 | if (rcu_access_pointer(dev->xdp_prog)) { | 1271 | |
1261 | xdp_flags = XDP_FLAGS_SKB_MODE; | 1272 | err = nla_put_u8(skb, IFLA_XDP_ATTACHED, |
1262 | val = 1; | 1273 | rtnl_xdp_attached_mode(dev)); |
1263 | } else if (dev->netdev_ops->ndo_xdp) { | ||
1264 | struct netdev_xdp xdp_op = {}; | ||
1265 | |||
1266 | xdp_op.command = XDP_QUERY_PROG; | ||
1267 | err = dev->netdev_ops->ndo_xdp(dev, &xdp_op); | ||
1268 | if (err) | ||
1269 | goto err_cancel; | ||
1270 | val = xdp_op.prog_attached; | ||
1271 | } | ||
1272 | err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val); | ||
1273 | if (err) | 1274 | if (err) |
1274 | goto err_cancel; | 1275 | goto err_cancel; |
1275 | 1276 | ||
1276 | if (xdp_flags) { | ||
1277 | err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags); | ||
1278 | if (err) | ||
1279 | goto err_cancel; | ||
1280 | } | ||
1281 | nla_nest_end(skb, xdp); | 1277 | nla_nest_end(skb, xdp); |
1282 | return 0; | 1278 | return 0; |
1283 | 1279 | ||
@@ -1631,13 +1627,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1631 | cb->nlh->nlmsg_seq, 0, | 1627 | cb->nlh->nlmsg_seq, 0, |
1632 | flags, | 1628 | flags, |
1633 | ext_filter_mask); | 1629 | ext_filter_mask); |
1634 | /* If we ran out of room on the first message, | ||
1635 | * we're in trouble | ||
1636 | */ | ||
1637 | WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); | ||
1638 | 1630 | ||
1639 | if (err < 0) | 1631 | if (err < 0) { |
1640 | goto out; | 1632 | if (likely(skb->len)) |
1633 | goto out; | ||
1634 | |||
1635 | goto out_err; | ||
1636 | } | ||
1641 | 1637 | ||
1642 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); | 1638 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); |
1643 | cont: | 1639 | cont: |
@@ -1645,10 +1641,12 @@ cont: | |||
1645 | } | 1641 | } |
1646 | } | 1642 | } |
1647 | out: | 1643 | out: |
1644 | err = skb->len; | ||
1645 | out_err: | ||
1648 | cb->args[1] = idx; | 1646 | cb->args[1] = idx; |
1649 | cb->args[0] = h; | 1647 | cb->args[0] = h; |
1650 | 1648 | ||
1651 | return skb->len; | 1649 | return err; |
1652 | } | 1650 | } |
1653 | 1651 | ||
1654 | int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, | 1652 | int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, |
@@ -2199,6 +2197,11 @@ static int do_setlink(const struct sk_buff *skb, | |||
2199 | err = -EINVAL; | 2197 | err = -EINVAL; |
2200 | goto errout; | 2198 | goto errout; |
2201 | } | 2199 | } |
2200 | if ((xdp_flags & XDP_FLAGS_SKB_MODE) && | ||
2201 | (xdp_flags & XDP_FLAGS_DRV_MODE)) { | ||
2202 | err = -EINVAL; | ||
2203 | goto errout; | ||
2204 | } | ||
2202 | } | 2205 | } |
2203 | 2206 | ||
2204 | if (xdp[IFLA_XDP_FD]) { | 2207 | if (xdp[IFLA_XDP_FD]) { |
@@ -3228,8 +3231,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
3228 | int err = 0; | 3231 | int err = 0; |
3229 | int fidx = 0; | 3232 | int fidx = 0; |
3230 | 3233 | ||
3231 | if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, | 3234 | err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, |
3232 | IFLA_MAX, ifla_policy, NULL) == 0) { | 3235 | IFLA_MAX, ifla_policy, NULL); |
3236 | if (err < 0) { | ||
3237 | return -EINVAL; | ||
3238 | } else if (err == 0) { | ||
3233 | if (tb[IFLA_MASTER]) | 3239 | if (tb[IFLA_MASTER]) |
3234 | br_idx = nla_get_u32(tb[IFLA_MASTER]); | 3240 | br_idx = nla_get_u32(tb[IFLA_MASTER]); |
3235 | } | 3241 | } |
@@ -3452,8 +3458,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
3452 | err = br_dev->netdev_ops->ndo_bridge_getlink( | 3458 | err = br_dev->netdev_ops->ndo_bridge_getlink( |
3453 | skb, portid, seq, dev, | 3459 | skb, portid, seq, dev, |
3454 | filter_mask, NLM_F_MULTI); | 3460 | filter_mask, NLM_F_MULTI); |
3455 | if (err < 0 && err != -EOPNOTSUPP) | 3461 | if (err < 0 && err != -EOPNOTSUPP) { |
3456 | break; | 3462 | if (likely(skb->len)) |
3463 | break; | ||
3464 | |||
3465 | goto out_err; | ||
3466 | } | ||
3457 | } | 3467 | } |
3458 | idx++; | 3468 | idx++; |
3459 | } | 3469 | } |
@@ -3464,16 +3474,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
3464 | seq, dev, | 3474 | seq, dev, |
3465 | filter_mask, | 3475 | filter_mask, |
3466 | NLM_F_MULTI); | 3476 | NLM_F_MULTI); |
3467 | if (err < 0 && err != -EOPNOTSUPP) | 3477 | if (err < 0 && err != -EOPNOTSUPP) { |
3468 | break; | 3478 | if (likely(skb->len)) |
3479 | break; | ||
3480 | |||
3481 | goto out_err; | ||
3482 | } | ||
3469 | } | 3483 | } |
3470 | idx++; | 3484 | idx++; |
3471 | } | 3485 | } |
3472 | } | 3486 | } |
3487 | err = skb->len; | ||
3488 | out_err: | ||
3473 | rcu_read_unlock(); | 3489 | rcu_read_unlock(); |
3474 | cb->args[0] = idx; | 3490 | cb->args[0] = idx; |
3475 | 3491 | ||
3476 | return skb->len; | 3492 | return err; |
3477 | } | 3493 | } |
3478 | 3494 | ||
3479 | static inline size_t bridge_nlmsg_size(void) | 3495 | static inline size_t bridge_nlmsg_size(void) |
diff --git a/net/core/sock.c b/net/core/sock.c index 79c6aee6af9b..727f924b7f91 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -139,10 +139,7 @@ | |||
139 | 139 | ||
140 | #include <trace/events/sock.h> | 140 | #include <trace/events/sock.h> |
141 | 141 | ||
142 | #ifdef CONFIG_INET | ||
143 | #include <net/tcp.h> | 142 | #include <net/tcp.h> |
144 | #endif | ||
145 | |||
146 | #include <net/busy_poll.h> | 143 | #include <net/busy_poll.h> |
147 | 144 | ||
148 | static DEFINE_MUTEX(proto_list_mutex); | 145 | static DEFINE_MUTEX(proto_list_mutex); |
@@ -1803,28 +1800,24 @@ EXPORT_SYMBOL(skb_set_owner_w); | |||
1803 | * delay queue. We want to allow the owner socket to send more | 1800 | * delay queue. We want to allow the owner socket to send more |
1804 | * packets, as if they were already TX completed by a typical driver. | 1801 | * packets, as if they were already TX completed by a typical driver. |
1805 | * But we also want to keep skb->sk set because some packet schedulers | 1802 | * But we also want to keep skb->sk set because some packet schedulers |
1806 | * rely on it (sch_fq for example). So we set skb->truesize to a small | 1803 | * rely on it (sch_fq for example). |
1807 | * amount (1) and decrease sk_wmem_alloc accordingly. | ||
1808 | */ | 1804 | */ |
1809 | void skb_orphan_partial(struct sk_buff *skb) | 1805 | void skb_orphan_partial(struct sk_buff *skb) |
1810 | { | 1806 | { |
1811 | /* If this skb is a TCP pure ACK or already went here, | 1807 | if (skb_is_tcp_pure_ack(skb)) |
1812 | * we have nothing to do. 2 is already a very small truesize. | ||
1813 | */ | ||
1814 | if (skb->truesize <= 2) | ||
1815 | return; | 1808 | return; |
1816 | 1809 | ||
1817 | /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, | ||
1818 | * so we do not completely orphan skb, but transfert all | ||
1819 | * accounted bytes but one, to avoid unexpected reorders. | ||
1820 | */ | ||
1821 | if (skb->destructor == sock_wfree | 1810 | if (skb->destructor == sock_wfree |
1822 | #ifdef CONFIG_INET | 1811 | #ifdef CONFIG_INET |
1823 | || skb->destructor == tcp_wfree | 1812 | || skb->destructor == tcp_wfree |
1824 | #endif | 1813 | #endif |
1825 | ) { | 1814 | ) { |
1826 | atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); | 1815 | struct sock *sk = skb->sk; |
1827 | skb->truesize = 1; | 1816 | |
1817 | if (atomic_inc_not_zero(&sk->sk_refcnt)) { | ||
1818 | atomic_sub(skb->truesize, &sk->sk_wmem_alloc); | ||
1819 | skb->destructor = sock_efree; | ||
1820 | } | ||
1828 | } else { | 1821 | } else { |
1829 | skb_orphan(skb); | 1822 | skb_orphan(skb); |
1830 | } | 1823 | } |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index ea23254b2457..b7cd9aafe99e 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net) | |||
479 | { | 479 | { |
480 | struct ctl_table *tbl; | 480 | struct ctl_table *tbl; |
481 | 481 | ||
482 | net->core.sysctl_somaxconn = SOMAXCONN; | ||
483 | |||
484 | tbl = netns_core_table; | 482 | tbl = netns_core_table; |
485 | if (!net_eq(net, &init_net)) { | 483 | if (!net_eq(net, &init_net)) { |
486 | tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); | 484 | tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 840f14aaa016..992621172220 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
426 | newsk->sk_backlog_rcv = dccp_v4_do_rcv; | 426 | newsk->sk_backlog_rcv = dccp_v4_do_rcv; |
427 | newnp->pktoptions = NULL; | 427 | newnp->pktoptions = NULL; |
428 | newnp->opt = NULL; | 428 | newnp->opt = NULL; |
429 | newnp->ipv6_mc_list = NULL; | ||
430 | newnp->ipv6_ac_list = NULL; | ||
431 | newnp->ipv6_fl_list = NULL; | ||
429 | newnp->mcast_oif = inet6_iif(skb); | 432 | newnp->mcast_oif = inet6_iif(skb); |
430 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | 433 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; |
431 | 434 | ||
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
490 | /* Clone RX bits */ | 493 | /* Clone RX bits */ |
491 | newnp->rxopt.all = np->rxopt.all; | 494 | newnp->rxopt.all = np->rxopt.all; |
492 | 495 | ||
496 | newnp->ipv6_mc_list = NULL; | ||
497 | newnp->ipv6_ac_list = NULL; | ||
498 | newnp->ipv6_fl_list = NULL; | ||
493 | newnp->pktoptions = NULL; | 499 | newnp->pktoptions = NULL; |
494 | newnp->opt = NULL; | 500 | newnp->opt = NULL; |
495 | newnp->mcast_oif = inet6_iif(skb); | 501 | newnp->mcast_oif = inet6_iif(skb); |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 0937b34c27ca..e9f3386a528b 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -641,6 +641,32 @@ void arp_xmit(struct sk_buff *skb) | |||
641 | } | 641 | } |
642 | EXPORT_SYMBOL(arp_xmit); | 642 | EXPORT_SYMBOL(arp_xmit); |
643 | 643 | ||
644 | static bool arp_is_garp(struct net *net, struct net_device *dev, | ||
645 | int *addr_type, __be16 ar_op, | ||
646 | __be32 sip, __be32 tip, | ||
647 | unsigned char *sha, unsigned char *tha) | ||
648 | { | ||
649 | bool is_garp = tip == sip; | ||
650 | |||
651 | /* Gratuitous ARP _replies_ also require target hwaddr to be | ||
652 | * the same as source. | ||
653 | */ | ||
654 | if (is_garp && ar_op == htons(ARPOP_REPLY)) | ||
655 | is_garp = | ||
656 | /* IPv4 over IEEE 1394 doesn't provide target | ||
657 | * hardware address field in its ARP payload. | ||
658 | */ | ||
659 | tha && | ||
660 | !memcmp(tha, sha, dev->addr_len); | ||
661 | |||
662 | if (is_garp) { | ||
663 | *addr_type = inet_addr_type_dev_table(net, dev, sip); | ||
664 | if (*addr_type != RTN_UNICAST) | ||
665 | is_garp = false; | ||
666 | } | ||
667 | return is_garp; | ||
668 | } | ||
669 | |||
644 | /* | 670 | /* |
645 | * Process an arp request. | 671 | * Process an arp request. |
646 | */ | 672 | */ |
@@ -653,6 +679,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
653 | unsigned char *arp_ptr; | 679 | unsigned char *arp_ptr; |
654 | struct rtable *rt; | 680 | struct rtable *rt; |
655 | unsigned char *sha; | 681 | unsigned char *sha; |
682 | unsigned char *tha = NULL; | ||
656 | __be32 sip, tip; | 683 | __be32 sip, tip; |
657 | u16 dev_type = dev->type; | 684 | u16 dev_type = dev->type; |
658 | int addr_type; | 685 | int addr_type; |
@@ -724,6 +751,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
724 | break; | 751 | break; |
725 | #endif | 752 | #endif |
726 | default: | 753 | default: |
754 | tha = arp_ptr; | ||
727 | arp_ptr += dev->addr_len; | 755 | arp_ptr += dev->addr_len; |
728 | } | 756 | } |
729 | memcpy(&tip, arp_ptr, 4); | 757 | memcpy(&tip, arp_ptr, 4); |
@@ -835,19 +863,25 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
835 | 863 | ||
836 | n = __neigh_lookup(&arp_tbl, &sip, dev, 0); | 864 | n = __neigh_lookup(&arp_tbl, &sip, dev, 0); |
837 | 865 | ||
838 | if (IN_DEV_ARP_ACCEPT(in_dev)) { | 866 | addr_type = -1; |
839 | unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip); | 867 | if (n || IN_DEV_ARP_ACCEPT(in_dev)) { |
868 | is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op, | ||
869 | sip, tip, sha, tha); | ||
870 | } | ||
840 | 871 | ||
872 | if (IN_DEV_ARP_ACCEPT(in_dev)) { | ||
841 | /* Unsolicited ARP is not accepted by default. | 873 | /* Unsolicited ARP is not accepted by default. |
842 | It is possible, that this option should be enabled for some | 874 | It is possible, that this option should be enabled for some |
843 | devices (strip is candidate) | 875 | devices (strip is candidate) |
844 | */ | 876 | */ |
845 | is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && | ||
846 | addr_type == RTN_UNICAST; | ||
847 | |||
848 | if (!n && | 877 | if (!n && |
849 | ((arp->ar_op == htons(ARPOP_REPLY) && | 878 | (is_garp || |
850 | addr_type == RTN_UNICAST) || is_garp)) | 879 | (arp->ar_op == htons(ARPOP_REPLY) && |
880 | (addr_type == RTN_UNICAST || | ||
881 | (addr_type < 0 && | ||
882 | /* postpone calculation to as late as possible */ | ||
883 | inet_addr_type_dev_table(net, dev, sip) == | ||
884 | RTN_UNICAST))))) | ||
851 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); | 885 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); |
852 | } | 886 | } |
853 | 887 | ||
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 65cc02bd82bc..93322f895eab 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
248 | u8 *tail; | 248 | u8 *tail; |
249 | u8 *vaddr; | 249 | u8 *vaddr; |
250 | int nfrags; | 250 | int nfrags; |
251 | int esph_offset; | ||
251 | struct page *page; | 252 | struct page *page; |
252 | struct sk_buff *trailer; | 253 | struct sk_buff *trailer; |
253 | int tailen = esp->tailen; | 254 | int tailen = esp->tailen; |
@@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
313 | } | 314 | } |
314 | 315 | ||
315 | cow: | 316 | cow: |
317 | esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); | ||
318 | |||
316 | nfrags = skb_cow_data(skb, tailen, &trailer); | 319 | nfrags = skb_cow_data(skb, tailen, &trailer); |
317 | if (nfrags < 0) | 320 | if (nfrags < 0) |
318 | goto out; | 321 | goto out; |
319 | tail = skb_tail_pointer(trailer); | 322 | tail = skb_tail_pointer(trailer); |
320 | esp->esph = ip_esp_hdr(skb); | 323 | esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); |
321 | 324 | ||
322 | skip_cow: | 325 | skip_cow: |
323 | esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); | 326 | esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 39bd1edee676..83e3ed258467 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -763,7 +763,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
763 | unsigned int e = 0, s_e; | 763 | unsigned int e = 0, s_e; |
764 | struct fib_table *tb; | 764 | struct fib_table *tb; |
765 | struct hlist_head *head; | 765 | struct hlist_head *head; |
766 | int dumped = 0; | 766 | int dumped = 0, err; |
767 | 767 | ||
768 | if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && | 768 | if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && |
769 | ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) | 769 | ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) |
@@ -783,20 +783,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
783 | if (dumped) | 783 | if (dumped) |
784 | memset(&cb->args[2], 0, sizeof(cb->args) - | 784 | memset(&cb->args[2], 0, sizeof(cb->args) - |
785 | 2 * sizeof(cb->args[0])); | 785 | 2 * sizeof(cb->args[0])); |
786 | if (fib_table_dump(tb, skb, cb) < 0) | 786 | err = fib_table_dump(tb, skb, cb); |
787 | goto out; | 787 | if (err < 0) { |
788 | if (likely(skb->len)) | ||
789 | goto out; | ||
790 | |||
791 | goto out_err; | ||
792 | } | ||
788 | dumped = 1; | 793 | dumped = 1; |
789 | next: | 794 | next: |
790 | e++; | 795 | e++; |
791 | } | 796 | } |
792 | } | 797 | } |
793 | out: | 798 | out: |
799 | err = skb->len; | ||
800 | out_err: | ||
794 | rcu_read_unlock(); | 801 | rcu_read_unlock(); |
795 | 802 | ||
796 | cb->args[1] = e; | 803 | cb->args[1] = e; |
797 | cb->args[0] = h; | 804 | cb->args[0] = h; |
798 | 805 | ||
799 | return skb->len; | 806 | return err; |
800 | } | 807 | } |
801 | 808 | ||
802 | /* Prepare and feed intra-kernel routing request. | 809 | /* Prepare and feed intra-kernel routing request. |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index da449ddb8cc1..ad9ad4aab5da 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) | |||
203 | static void free_fib_info_rcu(struct rcu_head *head) | 203 | static void free_fib_info_rcu(struct rcu_head *head) |
204 | { | 204 | { |
205 | struct fib_info *fi = container_of(head, struct fib_info, rcu); | 205 | struct fib_info *fi = container_of(head, struct fib_info, rcu); |
206 | struct dst_metrics *m; | ||
206 | 207 | ||
207 | change_nexthops(fi) { | 208 | change_nexthops(fi) { |
208 | if (nexthop_nh->nh_dev) | 209 | if (nexthop_nh->nh_dev) |
@@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head) | |||
213 | rt_fibinfo_free(&nexthop_nh->nh_rth_input); | 214 | rt_fibinfo_free(&nexthop_nh->nh_rth_input); |
214 | } endfor_nexthops(fi); | 215 | } endfor_nexthops(fi); |
215 | 216 | ||
216 | if (fi->fib_metrics != (u32 *) dst_default_metrics) | 217 | m = fi->fib_metrics; |
217 | kfree(fi->fib_metrics); | 218 | if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt)) |
219 | kfree(m); | ||
218 | kfree(fi); | 220 | kfree(fi); |
219 | } | 221 | } |
220 | 222 | ||
@@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) | |||
971 | val = 255; | 973 | val = 255; |
972 | if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) | 974 | if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) |
973 | return -EINVAL; | 975 | return -EINVAL; |
974 | fi->fib_metrics[type - 1] = val; | 976 | fi->fib_metrics->metrics[type - 1] = val; |
975 | } | 977 | } |
976 | 978 | ||
977 | if (ecn_ca) | 979 | if (ecn_ca) |
978 | fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; | 980 | fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; |
979 | 981 | ||
980 | return 0; | 982 | return 0; |
981 | } | 983 | } |
@@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
1033 | goto failure; | 1035 | goto failure; |
1034 | fib_info_cnt++; | 1036 | fib_info_cnt++; |
1035 | if (cfg->fc_mx) { | 1037 | if (cfg->fc_mx) { |
1036 | fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); | 1038 | fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); |
1037 | if (!fi->fib_metrics) | 1039 | if (!fi->fib_metrics) |
1038 | goto failure; | 1040 | goto failure; |
1041 | atomic_set(&fi->fib_metrics->refcnt, 1); | ||
1039 | } else | 1042 | } else |
1040 | fi->fib_metrics = (u32 *) dst_default_metrics; | 1043 | fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; |
1041 | 1044 | ||
1042 | fi->fib_net = net; | 1045 | fi->fib_net = net; |
1043 | fi->fib_protocol = cfg->fc_protocol; | 1046 | fi->fib_protocol = cfg->fc_protocol; |
@@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
1238 | if (fi->fib_priority && | 1241 | if (fi->fib_priority && |
1239 | nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) | 1242 | nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) |
1240 | goto nla_put_failure; | 1243 | goto nla_put_failure; |
1241 | if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) | 1244 | if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0) |
1242 | goto nla_put_failure; | 1245 | goto nla_put_failure; |
1243 | 1246 | ||
1244 | if (fi->fib_prefsrc && | 1247 | if (fi->fib_prefsrc && |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1201409ba1dc..51182ff2b441 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1983,6 +1983,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, | |||
1983 | 1983 | ||
1984 | /* rcu_read_lock is hold by caller */ | 1984 | /* rcu_read_lock is hold by caller */ |
1985 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { | 1985 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { |
1986 | int err; | ||
1987 | |||
1986 | if (i < s_i) { | 1988 | if (i < s_i) { |
1987 | i++; | 1989 | i++; |
1988 | continue; | 1990 | continue; |
@@ -1993,17 +1995,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, | |||
1993 | continue; | 1995 | continue; |
1994 | } | 1996 | } |
1995 | 1997 | ||
1996 | if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, | 1998 | err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, |
1997 | cb->nlh->nlmsg_seq, | 1999 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
1998 | RTM_NEWROUTE, | 2000 | tb->tb_id, fa->fa_type, |
1999 | tb->tb_id, | 2001 | xkey, KEYLENGTH - fa->fa_slen, |
2000 | fa->fa_type, | 2002 | fa->fa_tos, fa->fa_info, NLM_F_MULTI); |
2001 | xkey, | 2003 | if (err < 0) { |
2002 | KEYLENGTH - fa->fa_slen, | ||
2003 | fa->fa_tos, | ||
2004 | fa->fa_info, NLM_F_MULTI) < 0) { | ||
2005 | cb->args[4] = i; | 2004 | cb->args[4] = i; |
2006 | return -1; | 2005 | return err; |
2007 | } | 2006 | } |
2008 | i++; | 2007 | i++; |
2009 | } | 2008 | } |
@@ -2025,10 +2024,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, | |||
2025 | t_key key = cb->args[3]; | 2024 | t_key key = cb->args[3]; |
2026 | 2025 | ||
2027 | while ((l = leaf_walk_rcu(&tp, key)) != NULL) { | 2026 | while ((l = leaf_walk_rcu(&tp, key)) != NULL) { |
2028 | if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { | 2027 | int err; |
2028 | |||
2029 | err = fn_trie_dump_leaf(l, tb, skb, cb); | ||
2030 | if (err < 0) { | ||
2029 | cb->args[3] = key; | 2031 | cb->args[3] = key; |
2030 | cb->args[2] = count; | 2032 | cb->args[2] = count; |
2031 | return -1; | 2033 | return err; |
2032 | } | 2034 | } |
2033 | 2035 | ||
2034 | ++count; | 2036 | ++count; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 3a02d52ed50e..551de4d023a8 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1980,6 +1980,20 @@ int ip_mr_input(struct sk_buff *skb) | |||
1980 | struct net *net = dev_net(skb->dev); | 1980 | struct net *net = dev_net(skb->dev); |
1981 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; | 1981 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; |
1982 | struct mr_table *mrt; | 1982 | struct mr_table *mrt; |
1983 | struct net_device *dev; | ||
1984 | |||
1985 | /* skb->dev passed in is the loX master dev for vrfs. | ||
1986 | * As there are no vifs associated with loopback devices, | ||
1987 | * get the proper interface that does have a vif associated with it. | ||
1988 | */ | ||
1989 | dev = skb->dev; | ||
1990 | if (netif_is_l3_master(skb->dev)) { | ||
1991 | dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); | ||
1992 | if (!dev) { | ||
1993 | kfree_skb(skb); | ||
1994 | return -ENODEV; | ||
1995 | } | ||
1996 | } | ||
1983 | 1997 | ||
1984 | /* Packet is looped back after forward, it should not be | 1998 | /* Packet is looped back after forward, it should not be |
1985 | * forwarded second time, but still can be delivered locally. | 1999 | * forwarded second time, but still can be delivered locally. |
@@ -2017,7 +2031,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
2017 | /* already under rcu_read_lock() */ | 2031 | /* already under rcu_read_lock() */ |
2018 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | 2032 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
2019 | if (!cache) { | 2033 | if (!cache) { |
2020 | int vif = ipmr_find_vif(mrt, skb->dev); | 2034 | int vif = ipmr_find_vif(mrt, dev); |
2021 | 2035 | ||
2022 | if (vif >= 0) | 2036 | if (vif >= 0) |
2023 | cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, | 2037 | cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, |
@@ -2037,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
2037 | } | 2051 | } |
2038 | 2052 | ||
2039 | read_lock(&mrt_lock); | 2053 | read_lock(&mrt_lock); |
2040 | vif = ipmr_find_vif(mrt, skb->dev); | 2054 | vif = ipmr_find_vif(mrt, dev); |
2041 | if (vif >= 0) { | 2055 | if (vif >= 0) { |
2042 | int err2 = ipmr_cache_unresolved(mrt, vif, skb); | 2056 | int err2 = ipmr_cache_unresolved(mrt, vif, skb); |
2043 | read_unlock(&mrt_lock); | 2057 | read_unlock(&mrt_lock); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 655d9eebe43e..6883b3d4ba8f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt) | |||
1385 | 1385 | ||
1386 | static void ipv4_dst_destroy(struct dst_entry *dst) | 1386 | static void ipv4_dst_destroy(struct dst_entry *dst) |
1387 | { | 1387 | { |
1388 | struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); | ||
1388 | struct rtable *rt = (struct rtable *) dst; | 1389 | struct rtable *rt = (struct rtable *) dst; |
1389 | 1390 | ||
1391 | if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt)) | ||
1392 | kfree(p); | ||
1393 | |||
1390 | if (!list_empty(&rt->rt_uncached)) { | 1394 | if (!list_empty(&rt->rt_uncached)) { |
1391 | struct uncached_list *ul = rt->rt_uncached_list; | 1395 | struct uncached_list *ul = rt->rt_uncached_list; |
1392 | 1396 | ||
@@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, | |||
1438 | rt->rt_gateway = nh->nh_gw; | 1442 | rt->rt_gateway = nh->nh_gw; |
1439 | rt->rt_uses_gateway = 1; | 1443 | rt->rt_uses_gateway = 1; |
1440 | } | 1444 | } |
1441 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); | 1445 | dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); |
1446 | if (fi->fib_metrics != &dst_default_metrics) { | ||
1447 | rt->dst._metrics |= DST_METRICS_REFCOUNTED; | ||
1448 | atomic_inc(&fi->fib_metrics->refcnt); | ||
1449 | } | ||
1442 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1450 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1443 | rt->dst.tclassid = nh->nh_tclassid; | 1451 | rt->dst.tclassid = nh->nh_tclassid; |
1444 | #endif | 1452 | #endif |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1e4c76d2b827..59792d283ff8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, | |||
1084 | { | 1084 | { |
1085 | struct tcp_sock *tp = tcp_sk(sk); | 1085 | struct tcp_sock *tp = tcp_sk(sk); |
1086 | struct inet_sock *inet = inet_sk(sk); | 1086 | struct inet_sock *inet = inet_sk(sk); |
1087 | struct sockaddr *uaddr = msg->msg_name; | ||
1087 | int err, flags; | 1088 | int err, flags; |
1088 | 1089 | ||
1089 | if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) | 1090 | if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || |
1091 | (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && | ||
1092 | uaddr->sa_family == AF_UNSPEC)) | ||
1090 | return -EOPNOTSUPP; | 1093 | return -EOPNOTSUPP; |
1091 | if (tp->fastopen_req) | 1094 | if (tp->fastopen_req) |
1092 | return -EALREADY; /* Another Fast Open is in progress */ | 1095 | return -EALREADY; /* Another Fast Open is in progress */ |
@@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, | |||
1108 | } | 1111 | } |
1109 | } | 1112 | } |
1110 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; | 1113 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; |
1111 | err = __inet_stream_connect(sk->sk_socket, msg->msg_name, | 1114 | err = __inet_stream_connect(sk->sk_socket, uaddr, |
1112 | msg->msg_namelen, flags, 1); | 1115 | msg->msg_namelen, flags, 1); |
1113 | /* fastopen_req could already be freed in __inet_stream_connect | 1116 | /* fastopen_req could already be freed in __inet_stream_connect |
1114 | * if the connection times out or gets rst | 1117 | * if the connection times out or gets rst |
@@ -2320,6 +2323,10 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2320 | tcp_set_ca_state(sk, TCP_CA_Open); | 2323 | tcp_set_ca_state(sk, TCP_CA_Open); |
2321 | tcp_clear_retrans(tp); | 2324 | tcp_clear_retrans(tp); |
2322 | inet_csk_delack_init(sk); | 2325 | inet_csk_delack_init(sk); |
2326 | /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 | ||
2327 | * issue in __tcp_select_window() | ||
2328 | */ | ||
2329 | icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; | ||
2323 | tcp_init_send_head(sk); | 2330 | tcp_init_send_head(sk); |
2324 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); | 2331 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
2325 | __sk_dst_reset(sk); | 2332 | __sk_dst_reset(sk); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5a3ad09e2786..174d4376baa5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1179,13 +1179,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, | |||
1179 | */ | 1179 | */ |
1180 | if (pkt_len > mss) { | 1180 | if (pkt_len > mss) { |
1181 | unsigned int new_len = (pkt_len / mss) * mss; | 1181 | unsigned int new_len = (pkt_len / mss) * mss; |
1182 | if (!in_sack && new_len < pkt_len) { | 1182 | if (!in_sack && new_len < pkt_len) |
1183 | new_len += mss; | 1183 | new_len += mss; |
1184 | if (new_len >= skb->len) | ||
1185 | return 0; | ||
1186 | } | ||
1187 | pkt_len = new_len; | 1184 | pkt_len = new_len; |
1188 | } | 1185 | } |
1186 | |||
1187 | if (pkt_len >= skb->len && !in_sack) | ||
1188 | return 0; | ||
1189 | |||
1189 | err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); | 1190 | err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); |
1190 | if (err < 0) | 1191 | if (err < 0) |
1191 | return err; | 1192 | return err; |
@@ -3189,7 +3190,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3189 | int delta; | 3190 | int delta; |
3190 | 3191 | ||
3191 | /* Non-retransmitted hole got filled? That's reordering */ | 3192 | /* Non-retransmitted hole got filled? That's reordering */ |
3192 | if (reord < prior_fackets) | 3193 | if (reord < prior_fackets && reord <= tp->fackets_out) |
3193 | tcp_update_reordering(sk, tp->fackets_out - reord, 0); | 3194 | tcp_update_reordering(sk, tp->fackets_out - reord, 0); |
3194 | 3195 | ||
3195 | delta = tcp_is_fack(tp) ? pkts_acked : | 3196 | delta = tcp_is_fack(tp) ? pkts_acked : |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ea6e4cff9faf..1d6219bf2d6b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1612,7 +1612,7 @@ static void udp_v4_rehash(struct sock *sk) | |||
1612 | udp_lib_rehash(sk, new_hash); | 1612 | udp_lib_rehash(sk, new_hash); |
1613 | } | 1613 | } |
1614 | 1614 | ||
1615 | int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1615 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1616 | { | 1616 | { |
1617 | int rc; | 1617 | int rc; |
1618 | 1618 | ||
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL(udp_encap_enable); | |||
1657 | * Note that in the success and error cases, the skb is assumed to | 1657 | * Note that in the success and error cases, the skb is assumed to |
1658 | * have either been requeued or freed. | 1658 | * have either been requeued or freed. |
1659 | */ | 1659 | */ |
1660 | int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1660 | static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1661 | { | 1661 | { |
1662 | struct udp_sock *up = udp_sk(sk); | 1662 | struct udp_sock *up = udp_sk(sk); |
1663 | int is_udplite = IS_UDPLITE(sk); | 1663 | int is_udplite = IS_UDPLITE(sk); |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index feb50a16398d..a8cf8c6fb60c 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
@@ -25,7 +25,6 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, | |||
25 | int flags, int *addr_len); | 25 | int flags, int *addr_len); |
26 | int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, | 26 | int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, |
27 | int flags); | 27 | int flags); |
28 | int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | ||
29 | void udp_destroy_sock(struct sock *sk); | 28 | void udp_destroy_sock(struct sock *sk); |
30 | 29 | ||
31 | #ifdef CONFIG_PROC_FS | 30 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8d297a79b568..6a4fb1e629fb 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1022,7 +1022,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
1022 | INIT_HLIST_NODE(&ifa->addr_lst); | 1022 | INIT_HLIST_NODE(&ifa->addr_lst); |
1023 | ifa->scope = scope; | 1023 | ifa->scope = scope; |
1024 | ifa->prefix_len = pfxlen; | 1024 | ifa->prefix_len = pfxlen; |
1025 | ifa->flags = flags | IFA_F_TENTATIVE; | 1025 | ifa->flags = flags; |
1026 | /* No need to add the TENTATIVE flag for addresses with NODAD */ | ||
1027 | if (!(flags & IFA_F_NODAD)) | ||
1028 | ifa->flags |= IFA_F_TENTATIVE; | ||
1026 | ifa->valid_lft = valid_lft; | 1029 | ifa->valid_lft = valid_lft; |
1027 | ifa->prefered_lft = prefered_lft; | 1030 | ifa->prefered_lft = prefered_lft; |
1028 | ifa->cstamp = ifa->tstamp = jiffies; | 1031 | ifa->cstamp = ifa->tstamp = jiffies; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 8d128ba79b66..0c5b4caa1949 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) | |||
537 | 537 | ||
538 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); | 538 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
539 | 539 | ||
540 | dsfield = ipv4_get_dsfield(iph); | ||
541 | |||
542 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) | 540 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
543 | fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) | 541 | dsfield = ipv4_get_dsfield(iph); |
544 | & IPV6_TCLASS_MASK; | 542 | else |
543 | dsfield = ip6_tclass(t->parms.flowinfo); | ||
545 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) | 544 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) |
546 | fl6.flowi6_mark = skb->mark; | 545 | fl6.flowi6_mark = skb->mark; |
547 | else | 546 | else |
@@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) | |||
598 | 597 | ||
599 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); | 598 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
600 | 599 | ||
601 | dsfield = ipv6_get_dsfield(ipv6h); | ||
602 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) | 600 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
603 | fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); | 601 | dsfield = ipv6_get_dsfield(ipv6h); |
602 | else | ||
603 | dsfield = ip6_tclass(t->parms.flowinfo); | ||
604 | |||
604 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) | 605 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) |
605 | fl6.flowlabel |= ip6_flowlabel(ipv6h); | 606 | fl6.flowlabel |= ip6_flowlabel(ipv6h); |
606 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) | 607 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 93e58a5e1837..280268f1dd7b 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
63 | const struct net_offload *ops; | 63 | const struct net_offload *ops; |
64 | int proto; | 64 | int proto; |
65 | struct frag_hdr *fptr; | 65 | struct frag_hdr *fptr; |
66 | unsigned int unfrag_ip6hlen; | ||
67 | unsigned int payload_len; | 66 | unsigned int payload_len; |
68 | u8 *prevhdr; | 67 | u8 *prevhdr; |
69 | int offset = 0; | 68 | int offset = 0; |
@@ -116,8 +115,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
116 | skb->network_header = (u8 *)ipv6h - skb->head; | 115 | skb->network_header = (u8 *)ipv6h - skb->head; |
117 | 116 | ||
118 | if (udpfrag) { | 117 | if (udpfrag) { |
119 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | 118 | int err = ip6_find_1stfragopt(skb, &prevhdr); |
120 | fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); | 119 | if (err < 0) |
120 | return ERR_PTR(err); | ||
121 | fptr = (struct frag_hdr *)((u8 *)ipv6h + err); | ||
121 | fptr->frag_off = htons(offset); | 122 | fptr->frag_off = htons(offset); |
122 | if (skb->next) | 123 | if (skb->next) |
123 | fptr->frag_off |= htons(IP6_MF); | 124 | fptr->frag_off |= htons(IP6_MF); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 58f6288e9ba5..bf8a58a1c32d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -597,7 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
597 | int ptr, offset = 0, err = 0; | 597 | int ptr, offset = 0, err = 0; |
598 | u8 *prevhdr, nexthdr = 0; | 598 | u8 *prevhdr, nexthdr = 0; |
599 | 599 | ||
600 | hlen = ip6_find_1stfragopt(skb, &prevhdr); | 600 | err = ip6_find_1stfragopt(skb, &prevhdr); |
601 | if (err < 0) | ||
602 | goto fail; | ||
603 | hlen = err; | ||
601 | nexthdr = *prevhdr; | 604 | nexthdr = *prevhdr; |
602 | 605 | ||
603 | mtu = ip6_skb_dst_mtu(skb); | 606 | mtu = ip6_skb_dst_mtu(skb); |
@@ -1463,6 +1466,11 @@ alloc_new_skb: | |||
1463 | */ | 1466 | */ |
1464 | alloclen += sizeof(struct frag_hdr); | 1467 | alloclen += sizeof(struct frag_hdr); |
1465 | 1468 | ||
1469 | copy = datalen - transhdrlen - fraggap; | ||
1470 | if (copy < 0) { | ||
1471 | err = -EINVAL; | ||
1472 | goto error; | ||
1473 | } | ||
1466 | if (transhdrlen) { | 1474 | if (transhdrlen) { |
1467 | skb = sock_alloc_send_skb(sk, | 1475 | skb = sock_alloc_send_skb(sk, |
1468 | alloclen + hh_len, | 1476 | alloclen + hh_len, |
@@ -1512,13 +1520,9 @@ alloc_new_skb: | |||
1512 | data += fraggap; | 1520 | data += fraggap; |
1513 | pskb_trim_unique(skb_prev, maxfraglen); | 1521 | pskb_trim_unique(skb_prev, maxfraglen); |
1514 | } | 1522 | } |
1515 | copy = datalen - transhdrlen - fraggap; | 1523 | if (copy > 0 && |
1516 | 1524 | getfrag(from, data + transhdrlen, offset, | |
1517 | if (copy < 0) { | 1525 | copy, fraggap, skb) < 0) { |
1518 | err = -EINVAL; | ||
1519 | kfree_skb(skb); | ||
1520 | goto error; | ||
1521 | } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { | ||
1522 | err = -EFAULT; | 1526 | err = -EFAULT; |
1523 | kfree_skb(skb); | 1527 | kfree_skb(skb); |
1524 | goto error; | 1528 | goto error; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 6eb2ae507500..7ae6c503f1ca 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1196,7 +1196,7 @@ route_lookup: | |||
1196 | skb_push(skb, sizeof(struct ipv6hdr)); | 1196 | skb_push(skb, sizeof(struct ipv6hdr)); |
1197 | skb_reset_network_header(skb); | 1197 | skb_reset_network_header(skb); |
1198 | ipv6h = ipv6_hdr(skb); | 1198 | ipv6h = ipv6_hdr(skb); |
1199 | ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), | 1199 | ip6_flow_hdr(ipv6h, dsfield, |
1200 | ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); | 1200 | ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); |
1201 | ipv6h->hop_limit = hop_limit; | 1201 | ipv6h->hop_limit = hop_limit; |
1202 | ipv6h->nexthdr = proto; | 1202 | ipv6h->nexthdr = proto; |
@@ -1231,8 +1231,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1231 | if (tproto != IPPROTO_IPIP && tproto != 0) | 1231 | if (tproto != IPPROTO_IPIP && tproto != 0) |
1232 | return -1; | 1232 | return -1; |
1233 | 1233 | ||
1234 | dsfield = ipv4_get_dsfield(iph); | ||
1235 | |||
1236 | if (t->parms.collect_md) { | 1234 | if (t->parms.collect_md) { |
1237 | struct ip_tunnel_info *tun_info; | 1235 | struct ip_tunnel_info *tun_info; |
1238 | const struct ip_tunnel_key *key; | 1236 | const struct ip_tunnel_key *key; |
@@ -1246,6 +1244,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1246 | fl6.flowi6_proto = IPPROTO_IPIP; | 1244 | fl6.flowi6_proto = IPPROTO_IPIP; |
1247 | fl6.daddr = key->u.ipv6.dst; | 1245 | fl6.daddr = key->u.ipv6.dst; |
1248 | fl6.flowlabel = key->label; | 1246 | fl6.flowlabel = key->label; |
1247 | dsfield = ip6_tclass(key->label); | ||
1249 | } else { | 1248 | } else { |
1250 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1249 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1251 | encap_limit = t->parms.encap_limit; | 1250 | encap_limit = t->parms.encap_limit; |
@@ -1254,8 +1253,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1254 | fl6.flowi6_proto = IPPROTO_IPIP; | 1253 | fl6.flowi6_proto = IPPROTO_IPIP; |
1255 | 1254 | ||
1256 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) | 1255 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
1257 | fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) | 1256 | dsfield = ipv4_get_dsfield(iph); |
1258 | & IPV6_TCLASS_MASK; | 1257 | else |
1258 | dsfield = ip6_tclass(t->parms.flowinfo); | ||
1259 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) | 1259 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) |
1260 | fl6.flowi6_mark = skb->mark; | 1260 | fl6.flowi6_mark = skb->mark; |
1261 | else | 1261 | else |
@@ -1267,6 +1267,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1267 | if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) | 1267 | if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) |
1268 | return -1; | 1268 | return -1; |
1269 | 1269 | ||
1270 | dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph)); | ||
1271 | |||
1270 | skb_set_inner_ipproto(skb, IPPROTO_IPIP); | 1272 | skb_set_inner_ipproto(skb, IPPROTO_IPIP); |
1271 | 1273 | ||
1272 | err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, | 1274 | err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, |
@@ -1300,8 +1302,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1300 | ip6_tnl_addr_conflict(t, ipv6h)) | 1302 | ip6_tnl_addr_conflict(t, ipv6h)) |
1301 | return -1; | 1303 | return -1; |
1302 | 1304 | ||
1303 | dsfield = ipv6_get_dsfield(ipv6h); | ||
1304 | |||
1305 | if (t->parms.collect_md) { | 1305 | if (t->parms.collect_md) { |
1306 | struct ip_tunnel_info *tun_info; | 1306 | struct ip_tunnel_info *tun_info; |
1307 | const struct ip_tunnel_key *key; | 1307 | const struct ip_tunnel_key *key; |
@@ -1315,6 +1315,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1315 | fl6.flowi6_proto = IPPROTO_IPV6; | 1315 | fl6.flowi6_proto = IPPROTO_IPV6; |
1316 | fl6.daddr = key->u.ipv6.dst; | 1316 | fl6.daddr = key->u.ipv6.dst; |
1317 | fl6.flowlabel = key->label; | 1317 | fl6.flowlabel = key->label; |
1318 | dsfield = ip6_tclass(key->label); | ||
1318 | } else { | 1319 | } else { |
1319 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); | 1320 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); |
1320 | /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ | 1321 | /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ |
@@ -1337,7 +1338,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1337 | fl6.flowi6_proto = IPPROTO_IPV6; | 1338 | fl6.flowi6_proto = IPPROTO_IPV6; |
1338 | 1339 | ||
1339 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) | 1340 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
1340 | fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); | 1341 | dsfield = ipv6_get_dsfield(ipv6h); |
1342 | else | ||
1343 | dsfield = ip6_tclass(t->parms.flowinfo); | ||
1341 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) | 1344 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) |
1342 | fl6.flowlabel |= ip6_flowlabel(ipv6h); | 1345 | fl6.flowlabel |= ip6_flowlabel(ipv6h); |
1343 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) | 1346 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) |
@@ -1351,6 +1354,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1351 | if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) | 1354 | if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) |
1352 | return -1; | 1355 | return -1; |
1353 | 1356 | ||
1357 | dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h)); | ||
1358 | |||
1354 | skb_set_inner_ipproto(skb, IPPROTO_IPV6); | 1359 | skb_set_inner_ipproto(skb, IPPROTO_IPV6); |
1355 | 1360 | ||
1356 | err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, | 1361 | err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, |
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index cd4252346a32..e9065b8d3af8 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c | |||
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident); | |||
79 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | 79 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
80 | { | 80 | { |
81 | u16 offset = sizeof(struct ipv6hdr); | 81 | u16 offset = sizeof(struct ipv6hdr); |
82 | struct ipv6_opt_hdr *exthdr = | ||
83 | (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); | ||
84 | unsigned int packet_len = skb_tail_pointer(skb) - | 82 | unsigned int packet_len = skb_tail_pointer(skb) - |
85 | skb_network_header(skb); | 83 | skb_network_header(skb); |
86 | int found_rhdr = 0; | 84 | int found_rhdr = 0; |
87 | *nexthdr = &ipv6_hdr(skb)->nexthdr; | 85 | *nexthdr = &ipv6_hdr(skb)->nexthdr; |
88 | 86 | ||
89 | while (offset + 1 <= packet_len) { | 87 | while (offset <= packet_len) { |
88 | struct ipv6_opt_hdr *exthdr; | ||
90 | 89 | ||
91 | switch (**nexthdr) { | 90 | switch (**nexthdr) { |
92 | 91 | ||
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | |||
107 | return offset; | 106 | return offset; |
108 | } | 107 | } |
109 | 108 | ||
110 | offset += ipv6_optlen(exthdr); | 109 | if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) |
111 | *nexthdr = &exthdr->nexthdr; | 110 | return -EINVAL; |
111 | |||
112 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + | 112 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + |
113 | offset); | 113 | offset); |
114 | offset += ipv6_optlen(exthdr); | ||
115 | *nexthdr = &exthdr->nexthdr; | ||
114 | } | 116 | } |
115 | 117 | ||
116 | return offset; | 118 | return -EINVAL; |
117 | } | 119 | } |
118 | EXPORT_SYMBOL(ip6_find_1stfragopt); | 120 | EXPORT_SYMBOL(ip6_find_1stfragopt); |
119 | 121 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7a8237acd210..4f4310a36a04 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1062,6 +1062,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1062 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; | 1062 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; |
1063 | #endif | 1063 | #endif |
1064 | 1064 | ||
1065 | newnp->ipv6_mc_list = NULL; | ||
1065 | newnp->ipv6_ac_list = NULL; | 1066 | newnp->ipv6_ac_list = NULL; |
1066 | newnp->ipv6_fl_list = NULL; | 1067 | newnp->ipv6_fl_list = NULL; |
1067 | newnp->pktoptions = NULL; | 1068 | newnp->pktoptions = NULL; |
@@ -1131,6 +1132,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1131 | First: no IPv4 options. | 1132 | First: no IPv4 options. |
1132 | */ | 1133 | */ |
1133 | newinet->inet_opt = NULL; | 1134 | newinet->inet_opt = NULL; |
1135 | newnp->ipv6_mc_list = NULL; | ||
1134 | newnp->ipv6_ac_list = NULL; | 1136 | newnp->ipv6_ac_list = NULL; |
1135 | newnp->ipv6_fl_list = NULL; | 1137 | newnp->ipv6_fl_list = NULL; |
1136 | 1138 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 04862abfe4ec..06ec39b79609 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -526,7 +526,7 @@ out: | |||
526 | return; | 526 | return; |
527 | } | 527 | } |
528 | 528 | ||
529 | int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 529 | static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
530 | { | 530 | { |
531 | int rc; | 531 | int rc; |
532 | 532 | ||
@@ -569,7 +569,7 @@ void udpv6_encap_enable(void) | |||
569 | } | 569 | } |
570 | EXPORT_SYMBOL(udpv6_encap_enable); | 570 | EXPORT_SYMBOL(udpv6_encap_enable); |
571 | 571 | ||
572 | int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 572 | static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
573 | { | 573 | { |
574 | struct udp_sock *up = udp_sk(sk); | 574 | struct udp_sock *up = udp_sk(sk); |
575 | int is_udplite = IS_UDPLITE(sk); | 575 | int is_udplite = IS_UDPLITE(sk); |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index e78bdc76dcc3..f180b3d85e31 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
@@ -26,7 +26,6 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, | |||
26 | int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); | 26 | int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); |
27 | int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, | 27 | int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, |
28 | int flags, int *addr_len); | 28 | int flags, int *addr_len); |
29 | int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | ||
30 | void udpv6_destroy_sock(struct sock *sk); | 29 | void udpv6_destroy_sock(struct sock *sk); |
31 | 30 | ||
32 | #ifdef CONFIG_PROC_FS | 31 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index ac858c480f2f..a2267f80febb 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
29 | u8 frag_hdr_sz = sizeof(struct frag_hdr); | 29 | u8 frag_hdr_sz = sizeof(struct frag_hdr); |
30 | __wsum csum; | 30 | __wsum csum; |
31 | int tnl_hlen; | 31 | int tnl_hlen; |
32 | int err; | ||
32 | 33 | ||
33 | mss = skb_shinfo(skb)->gso_size; | 34 | mss = skb_shinfo(skb)->gso_size; |
34 | if (unlikely(skb->len <= mss)) | 35 | if (unlikely(skb->len <= mss)) |
@@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
90 | /* Find the unfragmentable header and shift it left by frag_hdr_sz | 91 | /* Find the unfragmentable header and shift it left by frag_hdr_sz |
91 | * bytes to insert fragment header. | 92 | * bytes to insert fragment header. |
92 | */ | 93 | */ |
93 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | 94 | err = ip6_find_1stfragopt(skb, &prevhdr); |
95 | if (err < 0) | ||
96 | return ERR_PTR(err); | ||
97 | unfrag_ip6hlen = err; | ||
94 | nexthdr = *prevhdr; | 98 | nexthdr = *prevhdr; |
95 | *prevhdr = NEXTHDR_FRAGMENT; | 99 | *prevhdr = NEXTHDR_FRAGMENT; |
96 | unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + | 100 | unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + |
diff --git a/net/key/af_key.c b/net/key/af_key.c index c1950bb14735..512dc43d0ce6 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -3285,7 +3285,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, | |||
3285 | p += pol->sadb_x_policy_len*8; | 3285 | p += pol->sadb_x_policy_len*8; |
3286 | sec_ctx = (struct sadb_x_sec_ctx *)p; | 3286 | sec_ctx = (struct sadb_x_sec_ctx *)p; |
3287 | if (len < pol->sadb_x_policy_len*8 + | 3287 | if (len < pol->sadb_x_policy_len*8 + |
3288 | sec_ctx->sadb_x_sec_len) { | 3288 | sec_ctx->sadb_x_sec_len*8) { |
3289 | *dir = -EINVAL; | 3289 | *dir = -EINVAL; |
3290 | goto out; | 3290 | goto out; |
3291 | } | 3291 | } |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 8364fe5b59e4..c38d16f22d2a 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) | |||
311 | int rc = -EINVAL; | 311 | int rc = -EINVAL; |
312 | 312 | ||
313 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); | 313 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); |
314 | |||
315 | lock_sock(sk); | ||
314 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) | 316 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) |
315 | goto out; | 317 | goto out; |
316 | rc = -EAFNOSUPPORT; | 318 | rc = -EAFNOSUPPORT; |
@@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) | |||
382 | out_put: | 384 | out_put: |
383 | llc_sap_put(sap); | 385 | llc_sap_put(sap); |
384 | out: | 386 | out: |
387 | release_sock(sk); | ||
385 | return rc; | 388 | return rc; |
386 | } | 389 | } |
387 | 390 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 35f4c7d7a500..1f75280ba26c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2492,7 +2492,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
2492 | if (is_multicast_ether_addr(hdr->addr1)) { | 2492 | if (is_multicast_ether_addr(hdr->addr1)) { |
2493 | mpp_addr = hdr->addr3; | 2493 | mpp_addr = hdr->addr3; |
2494 | proxied_addr = mesh_hdr->eaddr1; | 2494 | proxied_addr = mesh_hdr->eaddr1; |
2495 | } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { | 2495 | } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == |
2496 | MESH_FLAGS_AE_A5_A6) { | ||
2496 | /* has_a4 already checked in ieee80211_rx_mesh_check */ | 2497 | /* has_a4 already checked in ieee80211_rx_mesh_check */ |
2497 | mpp_addr = hdr->addr4; | 2498 | mpp_addr = hdr->addr4; |
2498 | proxied_addr = mesh_hdr->eaddr2; | 2499 | proxied_addr = mesh_hdr->eaddr2; |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index d2d7bdf1d510..ad99c1ceea6f 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -849,10 +849,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, | |||
849 | { | 849 | { |
850 | unsigned int verdict = NF_DROP; | 850 | unsigned int verdict = NF_DROP; |
851 | 851 | ||
852 | if (IP_VS_FWD_METHOD(cp) != 0) { | 852 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) |
853 | pr_err("shouldn't reach here, because the box is on the " | 853 | goto ignore_cp; |
854 | "half connection in the tun/dr module.\n"); | ||
855 | } | ||
856 | 854 | ||
857 | /* Ensure the checksum is correct */ | 855 | /* Ensure the checksum is correct */ |
858 | if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { | 856 | if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { |
@@ -886,6 +884,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, | |||
886 | ip_vs_notrack(skb); | 884 | ip_vs_notrack(skb); |
887 | else | 885 | else |
888 | ip_vs_update_conntrack(skb, cp, 0); | 886 | ip_vs_update_conntrack(skb, cp, 0); |
887 | |||
888 | ignore_cp: | ||
889 | verdict = NF_ACCEPT; | 889 | verdict = NF_ACCEPT; |
890 | 890 | ||
891 | out: | 891 | out: |
@@ -1385,8 +1385,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in | |||
1385 | */ | 1385 | */ |
1386 | cp = pp->conn_out_get(ipvs, af, skb, &iph); | 1386 | cp = pp->conn_out_get(ipvs, af, skb, &iph); |
1387 | 1387 | ||
1388 | if (likely(cp)) | 1388 | if (likely(cp)) { |
1389 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) | ||
1390 | goto ignore_cp; | ||
1389 | return handle_response(af, skb, pd, cp, &iph, hooknum); | 1391 | return handle_response(af, skb, pd, cp, &iph, hooknum); |
1392 | } | ||
1390 | 1393 | ||
1391 | /* Check for real-server-started requests */ | 1394 | /* Check for real-server-started requests */ |
1392 | if (atomic_read(&ipvs->conn_out_counter)) { | 1395 | if (atomic_read(&ipvs->conn_out_counter)) { |
@@ -1444,9 +1447,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in | |||
1444 | } | 1447 | } |
1445 | } | 1448 | } |
1446 | } | 1449 | } |
1450 | |||
1451 | out: | ||
1447 | IP_VS_DBG_PKT(12, af, pp, skb, iph.off, | 1452 | IP_VS_DBG_PKT(12, af, pp, skb, iph.off, |
1448 | "ip_vs_out: packet continues traversal as normal"); | 1453 | "ip_vs_out: packet continues traversal as normal"); |
1449 | return NF_ACCEPT; | 1454 | return NF_ACCEPT; |
1455 | |||
1456 | ignore_cp: | ||
1457 | __ip_vs_conn_put(cp); | ||
1458 | goto out; | ||
1450 | } | 1459 | } |
1451 | 1460 | ||
1452 | /* | 1461 | /* |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 3a60efa7799b..7f6100ca63be 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -174,6 +174,10 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) | |||
174 | #endif | 174 | #endif |
175 | if (h != NULL && !try_module_get(h->me)) | 175 | if (h != NULL && !try_module_get(h->me)) |
176 | h = NULL; | 176 | h = NULL; |
177 | if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) { | ||
178 | module_put(h->me); | ||
179 | h = NULL; | ||
180 | } | ||
177 | 181 | ||
178 | rcu_read_unlock(); | 182 | rcu_read_unlock(); |
179 | 183 | ||
@@ -181,6 +185,13 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) | |||
181 | } | 185 | } |
182 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); | 186 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); |
183 | 187 | ||
188 | void nf_conntrack_helper_put(struct nf_conntrack_helper *helper) | ||
189 | { | ||
190 | refcount_dec(&helper->refcnt); | ||
191 | module_put(helper->me); | ||
192 | } | ||
193 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_put); | ||
194 | |||
184 | struct nf_conn_help * | 195 | struct nf_conn_help * |
185 | nf_ct_helper_ext_add(struct nf_conn *ct, | 196 | nf_ct_helper_ext_add(struct nf_conn *ct, |
186 | struct nf_conntrack_helper *helper, gfp_t gfp) | 197 | struct nf_conntrack_helper *helper, gfp_t gfp) |
@@ -417,6 +428,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
417 | } | 428 | } |
418 | } | 429 | } |
419 | } | 430 | } |
431 | refcount_set(&me->refcnt, 1); | ||
420 | hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); | 432 | hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); |
421 | nf_ct_helper_count++; | 433 | nf_ct_helper_count++; |
422 | out: | 434 | out: |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index dcf561b5c97a..9799a50bc604 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <net/netfilter/nf_conntrack_zones.h> | 45 | #include <net/netfilter/nf_conntrack_zones.h> |
46 | #include <net/netfilter/nf_conntrack_timestamp.h> | 46 | #include <net/netfilter/nf_conntrack_timestamp.h> |
47 | #include <net/netfilter/nf_conntrack_labels.h> | 47 | #include <net/netfilter/nf_conntrack_labels.h> |
48 | #include <net/netfilter/nf_conntrack_seqadj.h> | ||
49 | #include <net/netfilter/nf_conntrack_synproxy.h> | ||
48 | #ifdef CONFIG_NF_NAT_NEEDED | 50 | #ifdef CONFIG_NF_NAT_NEEDED |
49 | #include <net/netfilter/nf_nat_core.h> | 51 | #include <net/netfilter/nf_nat_core.h> |
50 | #include <net/netfilter/nf_nat_l4proto.h> | 52 | #include <net/netfilter/nf_nat_l4proto.h> |
@@ -1007,9 +1009,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { | |||
1007 | 1009 | ||
1008 | static int | 1010 | static int |
1009 | ctnetlink_parse_tuple(const struct nlattr * const cda[], | 1011 | ctnetlink_parse_tuple(const struct nlattr * const cda[], |
1010 | struct nf_conntrack_tuple *tuple, | 1012 | struct nf_conntrack_tuple *tuple, u32 type, |
1011 | enum ctattr_type type, u_int8_t l3num, | 1013 | u_int8_t l3num, struct nf_conntrack_zone *zone) |
1012 | struct nf_conntrack_zone *zone) | ||
1013 | { | 1014 | { |
1014 | struct nlattr *tb[CTA_TUPLE_MAX+1]; | 1015 | struct nlattr *tb[CTA_TUPLE_MAX+1]; |
1015 | int err; | 1016 | int err; |
@@ -1828,6 +1829,8 @@ ctnetlink_create_conntrack(struct net *net, | |||
1828 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); | 1829 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); |
1829 | nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); | 1830 | nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); |
1830 | nf_ct_labels_ext_add(ct); | 1831 | nf_ct_labels_ext_add(ct); |
1832 | nfct_seqadj_ext_add(ct); | ||
1833 | nfct_synproxy_ext_add(ct); | ||
1831 | 1834 | ||
1832 | /* we must add conntrack extensions before confirmation. */ | 1835 | /* we must add conntrack extensions before confirmation. */ |
1833 | ct->status |= IPS_CONFIRMED; | 1836 | ct->status |= IPS_CONFIRMED; |
@@ -2447,7 +2450,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = { | |||
2447 | 2450 | ||
2448 | static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, | 2451 | static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, |
2449 | const struct nf_conntrack_tuple *tuple, | 2452 | const struct nf_conntrack_tuple *tuple, |
2450 | enum ctattr_expect type) | 2453 | u32 type) |
2451 | { | 2454 | { |
2452 | struct nlattr *nest_parms; | 2455 | struct nlattr *nest_parms; |
2453 | 2456 | ||
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index b48d6b5aae8a..ef0be325a0c6 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -409,6 +409,10 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
409 | { | 409 | { |
410 | struct nf_conntrack_tuple curr_tuple, new_tuple; | 410 | struct nf_conntrack_tuple curr_tuple, new_tuple; |
411 | 411 | ||
412 | /* Can't setup nat info for confirmed ct. */ | ||
413 | if (nf_ct_is_confirmed(ct)) | ||
414 | return NF_ACCEPT; | ||
415 | |||
412 | NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || | 416 | NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || |
413 | maniptype == NF_NAT_MANIP_DST); | 417 | maniptype == NF_NAT_MANIP_DST); |
414 | BUG_ON(nf_nat_initialized(ct, maniptype)); | 418 | BUG_ON(nf_nat_initialized(ct, maniptype)); |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 559225029740..da314be0c048 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -3367,35 +3367,50 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, | |||
3367 | return nf_tables_fill_setelem(args->skb, set, elem); | 3367 | return nf_tables_fill_setelem(args->skb, set, elem); |
3368 | } | 3368 | } |
3369 | 3369 | ||
3370 | struct nft_set_dump_ctx { | ||
3371 | const struct nft_set *set; | ||
3372 | struct nft_ctx ctx; | ||
3373 | }; | ||
3374 | |||
3370 | static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | 3375 | static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) |
3371 | { | 3376 | { |
3377 | struct nft_set_dump_ctx *dump_ctx = cb->data; | ||
3372 | struct net *net = sock_net(skb->sk); | 3378 | struct net *net = sock_net(skb->sk); |
3373 | u8 genmask = nft_genmask_cur(net); | 3379 | struct nft_af_info *afi; |
3380 | struct nft_table *table; | ||
3374 | struct nft_set *set; | 3381 | struct nft_set *set; |
3375 | struct nft_set_dump_args args; | 3382 | struct nft_set_dump_args args; |
3376 | struct nft_ctx ctx; | 3383 | bool set_found = false; |
3377 | struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; | ||
3378 | struct nfgenmsg *nfmsg; | 3384 | struct nfgenmsg *nfmsg; |
3379 | struct nlmsghdr *nlh; | 3385 | struct nlmsghdr *nlh; |
3380 | struct nlattr *nest; | 3386 | struct nlattr *nest; |
3381 | u32 portid, seq; | 3387 | u32 portid, seq; |
3382 | int event, err; | 3388 | int event; |
3383 | 3389 | ||
3384 | err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, | 3390 | rcu_read_lock(); |
3385 | NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy, | 3391 | list_for_each_entry_rcu(afi, &net->nft.af_info, list) { |
3386 | NULL); | 3392 | if (afi != dump_ctx->ctx.afi) |
3387 | if (err < 0) | 3393 | continue; |
3388 | return err; | ||
3389 | 3394 | ||
3390 | err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, | 3395 | list_for_each_entry_rcu(table, &afi->tables, list) { |
3391 | (void *)nla, genmask); | 3396 | if (table != dump_ctx->ctx.table) |
3392 | if (err < 0) | 3397 | continue; |
3393 | return err; | ||
3394 | 3398 | ||
3395 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], | 3399 | list_for_each_entry_rcu(set, &table->sets, list) { |
3396 | genmask); | 3400 | if (set == dump_ctx->set) { |
3397 | if (IS_ERR(set)) | 3401 | set_found = true; |
3398 | return PTR_ERR(set); | 3402 | break; |
3403 | } | ||
3404 | } | ||
3405 | break; | ||
3406 | } | ||
3407 | break; | ||
3408 | } | ||
3409 | |||
3410 | if (!set_found) { | ||
3411 | rcu_read_unlock(); | ||
3412 | return -ENOENT; | ||
3413 | } | ||
3399 | 3414 | ||
3400 | event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); | 3415 | event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); |
3401 | portid = NETLINK_CB(cb->skb).portid; | 3416 | portid = NETLINK_CB(cb->skb).portid; |
@@ -3407,11 +3422,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
3407 | goto nla_put_failure; | 3422 | goto nla_put_failure; |
3408 | 3423 | ||
3409 | nfmsg = nlmsg_data(nlh); | 3424 | nfmsg = nlmsg_data(nlh); |
3410 | nfmsg->nfgen_family = ctx.afi->family; | 3425 | nfmsg->nfgen_family = afi->family; |
3411 | nfmsg->version = NFNETLINK_V0; | 3426 | nfmsg->version = NFNETLINK_V0; |
3412 | nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); | 3427 | nfmsg->res_id = htons(net->nft.base_seq & 0xffff); |
3413 | 3428 | ||
3414 | if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) | 3429 | if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name)) |
3415 | goto nla_put_failure; | 3430 | goto nla_put_failure; |
3416 | if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) | 3431 | if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) |
3417 | goto nla_put_failure; | 3432 | goto nla_put_failure; |
@@ -3422,12 +3437,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
3422 | 3437 | ||
3423 | args.cb = cb; | 3438 | args.cb = cb; |
3424 | args.skb = skb; | 3439 | args.skb = skb; |
3425 | args.iter.genmask = nft_genmask_cur(ctx.net); | 3440 | args.iter.genmask = nft_genmask_cur(net); |
3426 | args.iter.skip = cb->args[0]; | 3441 | args.iter.skip = cb->args[0]; |
3427 | args.iter.count = 0; | 3442 | args.iter.count = 0; |
3428 | args.iter.err = 0; | 3443 | args.iter.err = 0; |
3429 | args.iter.fn = nf_tables_dump_setelem; | 3444 | args.iter.fn = nf_tables_dump_setelem; |
3430 | set->ops->walk(&ctx, set, &args.iter); | 3445 | set->ops->walk(&dump_ctx->ctx, set, &args.iter); |
3446 | rcu_read_unlock(); | ||
3431 | 3447 | ||
3432 | nla_nest_end(skb, nest); | 3448 | nla_nest_end(skb, nest); |
3433 | nlmsg_end(skb, nlh); | 3449 | nlmsg_end(skb, nlh); |
@@ -3441,9 +3457,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
3441 | return skb->len; | 3457 | return skb->len; |
3442 | 3458 | ||
3443 | nla_put_failure: | 3459 | nla_put_failure: |
3460 | rcu_read_unlock(); | ||
3444 | return -ENOSPC; | 3461 | return -ENOSPC; |
3445 | } | 3462 | } |
3446 | 3463 | ||
3464 | static int nf_tables_dump_set_done(struct netlink_callback *cb) | ||
3465 | { | ||
3466 | kfree(cb->data); | ||
3467 | return 0; | ||
3468 | } | ||
3469 | |||
3447 | static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, | 3470 | static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, |
3448 | struct sk_buff *skb, const struct nlmsghdr *nlh, | 3471 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
3449 | const struct nlattr * const nla[]) | 3472 | const struct nlattr * const nla[]) |
@@ -3465,7 +3488,18 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, | |||
3465 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 3488 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
3466 | struct netlink_dump_control c = { | 3489 | struct netlink_dump_control c = { |
3467 | .dump = nf_tables_dump_set, | 3490 | .dump = nf_tables_dump_set, |
3491 | .done = nf_tables_dump_set_done, | ||
3468 | }; | 3492 | }; |
3493 | struct nft_set_dump_ctx *dump_ctx; | ||
3494 | |||
3495 | dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL); | ||
3496 | if (!dump_ctx) | ||
3497 | return -ENOMEM; | ||
3498 | |||
3499 | dump_ctx->set = set; | ||
3500 | dump_ctx->ctx = ctx; | ||
3501 | |||
3502 | c.data = dump_ctx; | ||
3469 | return netlink_dump_start(nlsk, skb, nlh, &c); | 3503 | return netlink_dump_start(nlsk, skb, nlh, &c); |
3470 | } | 3504 | } |
3471 | return -EOPNOTSUPP; | 3505 | return -EOPNOTSUPP; |
@@ -3593,9 +3627,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, | |||
3593 | { | 3627 | { |
3594 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); | 3628 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); |
3595 | 3629 | ||
3596 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); | 3630 | nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE); |
3597 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) | 3631 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) |
3598 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); | 3632 | nft_data_release(nft_set_ext_data(ext), set->dtype); |
3599 | if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) | 3633 | if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) |
3600 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); | 3634 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); |
3601 | if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) | 3635 | if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) |
@@ -3604,6 +3638,18 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, | |||
3604 | } | 3638 | } |
3605 | EXPORT_SYMBOL_GPL(nft_set_elem_destroy); | 3639 | EXPORT_SYMBOL_GPL(nft_set_elem_destroy); |
3606 | 3640 | ||
3641 | /* Only called from commit path, nft_set_elem_deactivate() already deals with | ||
3642 | * the refcounting from the preparation phase. | ||
3643 | */ | ||
3644 | static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem) | ||
3645 | { | ||
3646 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); | ||
3647 | |||
3648 | if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) | ||
3649 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); | ||
3650 | kfree(elem); | ||
3651 | } | ||
3652 | |||
3607 | static int nft_setelem_parse_flags(const struct nft_set *set, | 3653 | static int nft_setelem_parse_flags(const struct nft_set *set, |
3608 | const struct nlattr *attr, u32 *flags) | 3654 | const struct nlattr *attr, u32 *flags) |
3609 | { | 3655 | { |
@@ -3815,9 +3861,9 @@ err4: | |||
3815 | kfree(elem.priv); | 3861 | kfree(elem.priv); |
3816 | err3: | 3862 | err3: |
3817 | if (nla[NFTA_SET_ELEM_DATA] != NULL) | 3863 | if (nla[NFTA_SET_ELEM_DATA] != NULL) |
3818 | nft_data_uninit(&data, d2.type); | 3864 | nft_data_release(&data, d2.type); |
3819 | err2: | 3865 | err2: |
3820 | nft_data_uninit(&elem.key.val, d1.type); | 3866 | nft_data_release(&elem.key.val, d1.type); |
3821 | err1: | 3867 | err1: |
3822 | return err; | 3868 | return err; |
3823 | } | 3869 | } |
@@ -3862,6 +3908,53 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, | |||
3862 | return err; | 3908 | return err; |
3863 | } | 3909 | } |
3864 | 3910 | ||
3911 | /** | ||
3912 | * nft_data_hold - hold a nft_data item | ||
3913 | * | ||
3914 | * @data: struct nft_data to release | ||
3915 | * @type: type of data | ||
3916 | * | ||
3917 | * Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded, | ||
3918 | * NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and | ||
3919 | * NFT_GOTO verdicts. This function must be called on active data objects | ||
3920 | * from the second phase of the commit protocol. | ||
3921 | */ | ||
3922 | static void nft_data_hold(const struct nft_data *data, enum nft_data_types type) | ||
3923 | { | ||
3924 | if (type == NFT_DATA_VERDICT) { | ||
3925 | switch (data->verdict.code) { | ||
3926 | case NFT_JUMP: | ||
3927 | case NFT_GOTO: | ||
3928 | data->verdict.chain->use++; | ||
3929 | break; | ||
3930 | } | ||
3931 | } | ||
3932 | } | ||
3933 | |||
3934 | static void nft_set_elem_activate(const struct net *net, | ||
3935 | const struct nft_set *set, | ||
3936 | struct nft_set_elem *elem) | ||
3937 | { | ||
3938 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); | ||
3939 | |||
3940 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) | ||
3941 | nft_data_hold(nft_set_ext_data(ext), set->dtype); | ||
3942 | if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) | ||
3943 | (*nft_set_ext_obj(ext))->use++; | ||
3944 | } | ||
3945 | |||
3946 | static void nft_set_elem_deactivate(const struct net *net, | ||
3947 | const struct nft_set *set, | ||
3948 | struct nft_set_elem *elem) | ||
3949 | { | ||
3950 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); | ||
3951 | |||
3952 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) | ||
3953 | nft_data_release(nft_set_ext_data(ext), set->dtype); | ||
3954 | if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) | ||
3955 | (*nft_set_ext_obj(ext))->use--; | ||
3956 | } | ||
3957 | |||
3865 | static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, | 3958 | static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, |
3866 | const struct nlattr *attr) | 3959 | const struct nlattr *attr) |
3867 | { | 3960 | { |
@@ -3927,6 +4020,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, | |||
3927 | kfree(elem.priv); | 4020 | kfree(elem.priv); |
3928 | elem.priv = priv; | 4021 | elem.priv = priv; |
3929 | 4022 | ||
4023 | nft_set_elem_deactivate(ctx->net, set, &elem); | ||
4024 | |||
3930 | nft_trans_elem(trans) = elem; | 4025 | nft_trans_elem(trans) = elem; |
3931 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | 4026 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); |
3932 | return 0; | 4027 | return 0; |
@@ -3936,7 +4031,7 @@ err4: | |||
3936 | err3: | 4031 | err3: |
3937 | kfree(elem.priv); | 4032 | kfree(elem.priv); |
3938 | err2: | 4033 | err2: |
3939 | nft_data_uninit(&elem.key.val, desc.type); | 4034 | nft_data_release(&elem.key.val, desc.type); |
3940 | err1: | 4035 | err1: |
3941 | return err; | 4036 | return err; |
3942 | } | 4037 | } |
@@ -4743,8 +4838,8 @@ static void nf_tables_commit_release(struct nft_trans *trans) | |||
4743 | nft_set_destroy(nft_trans_set(trans)); | 4838 | nft_set_destroy(nft_trans_set(trans)); |
4744 | break; | 4839 | break; |
4745 | case NFT_MSG_DELSETELEM: | 4840 | case NFT_MSG_DELSETELEM: |
4746 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4841 | nf_tables_set_elem_destroy(nft_trans_elem_set(trans), |
4747 | nft_trans_elem(trans).priv, true); | 4842 | nft_trans_elem(trans).priv); |
4748 | break; | 4843 | break; |
4749 | case NFT_MSG_DELOBJ: | 4844 | case NFT_MSG_DELOBJ: |
4750 | nft_obj_destroy(nft_trans_obj(trans)); | 4845 | nft_obj_destroy(nft_trans_obj(trans)); |
@@ -4979,6 +5074,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) | |||
4979 | case NFT_MSG_DELSETELEM: | 5074 | case NFT_MSG_DELSETELEM: |
4980 | te = (struct nft_trans_elem *)trans->data; | 5075 | te = (struct nft_trans_elem *)trans->data; |
4981 | 5076 | ||
5077 | nft_set_elem_activate(net, te->set, &te->elem); | ||
4982 | te->set->ops->activate(net, te->set, &te->elem); | 5078 | te->set->ops->activate(net, te->set, &te->elem); |
4983 | te->set->ndeact--; | 5079 | te->set->ndeact--; |
4984 | 5080 | ||
@@ -5464,7 +5560,7 @@ int nft_data_init(const struct nft_ctx *ctx, | |||
5464 | EXPORT_SYMBOL_GPL(nft_data_init); | 5560 | EXPORT_SYMBOL_GPL(nft_data_init); |
5465 | 5561 | ||
5466 | /** | 5562 | /** |
5467 | * nft_data_uninit - release a nft_data item | 5563 | * nft_data_release - release a nft_data item |
5468 | * | 5564 | * |
5469 | * @data: struct nft_data to release | 5565 | * @data: struct nft_data to release |
5470 | * @type: type of data | 5566 | * @type: type of data |
@@ -5472,7 +5568,7 @@ EXPORT_SYMBOL_GPL(nft_data_init); | |||
5472 | * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, | 5568 | * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, |
5473 | * all others need to be released by calling this function. | 5569 | * all others need to be released by calling this function. |
5474 | */ | 5570 | */ |
5475 | void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) | 5571 | void nft_data_release(const struct nft_data *data, enum nft_data_types type) |
5476 | { | 5572 | { |
5477 | if (type < NFT_DATA_VERDICT) | 5573 | if (type < NFT_DATA_VERDICT) |
5478 | return; | 5574 | return; |
@@ -5483,7 +5579,7 @@ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) | |||
5483 | WARN_ON(1); | 5579 | WARN_ON(1); |
5484 | } | 5580 | } |
5485 | } | 5581 | } |
5486 | EXPORT_SYMBOL_GPL(nft_data_uninit); | 5582 | EXPORT_SYMBOL_GPL(nft_data_release); |
5487 | 5583 | ||
5488 | int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, | 5584 | int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, |
5489 | enum nft_data_types type, unsigned int len) | 5585 | enum nft_data_types type, unsigned int len) |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 950bf6eadc65..be678a323598 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -686,6 +686,7 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, | |||
686 | tuple_set = true; | 686 | tuple_set = true; |
687 | } | 687 | } |
688 | 688 | ||
689 | ret = -ENOENT; | ||
689 | list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { | 690 | list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { |
690 | cur = &nlcth->helper; | 691 | cur = &nlcth->helper; |
691 | j++; | 692 | j++; |
@@ -699,16 +700,20 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, | |||
699 | tuple.dst.protonum != cur->tuple.dst.protonum)) | 700 | tuple.dst.protonum != cur->tuple.dst.protonum)) |
700 | continue; | 701 | continue; |
701 | 702 | ||
702 | found = true; | 703 | if (refcount_dec_if_one(&cur->refcnt)) { |
703 | nf_conntrack_helper_unregister(cur); | 704 | found = true; |
704 | kfree(cur->expect_policy); | 705 | nf_conntrack_helper_unregister(cur); |
706 | kfree(cur->expect_policy); | ||
705 | 707 | ||
706 | list_del(&nlcth->list); | 708 | list_del(&nlcth->list); |
707 | kfree(nlcth); | 709 | kfree(nlcth); |
710 | } else { | ||
711 | ret = -EBUSY; | ||
712 | } | ||
708 | } | 713 | } |
709 | 714 | ||
710 | /* Make sure we return success if we flush and there is no helpers */ | 715 | /* Make sure we return success if we flush and there is no helpers */ |
711 | return (found || j == 0) ? 0 : -ENOENT; | 716 | return (found || j == 0) ? 0 : ret; |
712 | } | 717 | } |
713 | 718 | ||
714 | static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { | 719 | static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { |
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 877d9acd91ef..fff8073e2a56 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c | |||
@@ -83,17 +83,26 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, | |||
83 | tb[NFTA_BITWISE_MASK]); | 83 | tb[NFTA_BITWISE_MASK]); |
84 | if (err < 0) | 84 | if (err < 0) |
85 | return err; | 85 | return err; |
86 | if (d1.len != priv->len) | 86 | if (d1.len != priv->len) { |
87 | return -EINVAL; | 87 | err = -EINVAL; |
88 | goto err1; | ||
89 | } | ||
88 | 90 | ||
89 | err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2, | 91 | err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2, |
90 | tb[NFTA_BITWISE_XOR]); | 92 | tb[NFTA_BITWISE_XOR]); |
91 | if (err < 0) | 93 | if (err < 0) |
92 | return err; | 94 | goto err1; |
93 | if (d2.len != priv->len) | 95 | if (d2.len != priv->len) { |
94 | return -EINVAL; | 96 | err = -EINVAL; |
97 | goto err2; | ||
98 | } | ||
95 | 99 | ||
96 | return 0; | 100 | return 0; |
101 | err2: | ||
102 | nft_data_release(&priv->xor, d2.type); | ||
103 | err1: | ||
104 | nft_data_release(&priv->mask, d1.type); | ||
105 | return err; | ||
97 | } | 106 | } |
98 | 107 | ||
99 | static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) | 108 | static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) |
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 2b96effeadc1..c2945eb3397c 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c | |||
@@ -201,10 +201,18 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) | |||
201 | if (err < 0) | 201 | if (err < 0) |
202 | return ERR_PTR(err); | 202 | return ERR_PTR(err); |
203 | 203 | ||
204 | if (desc.type != NFT_DATA_VALUE) { | ||
205 | err = -EINVAL; | ||
206 | goto err1; | ||
207 | } | ||
208 | |||
204 | if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) | 209 | if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) |
205 | return &nft_cmp_fast_ops; | 210 | return &nft_cmp_fast_ops; |
206 | else | 211 | |
207 | return &nft_cmp_ops; | 212 | return &nft_cmp_ops; |
213 | err1: | ||
214 | nft_data_release(&data, desc.type); | ||
215 | return ERR_PTR(-EINVAL); | ||
208 | } | 216 | } |
209 | 217 | ||
210 | struct nft_expr_type nft_cmp_type __read_mostly = { | 218 | struct nft_expr_type nft_cmp_type __read_mostly = { |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a34ceb38fc55..1678e9e75e8e 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
@@ -826,9 +826,9 @@ static void nft_ct_helper_obj_destroy(struct nft_object *obj) | |||
826 | struct nft_ct_helper_obj *priv = nft_obj_data(obj); | 826 | struct nft_ct_helper_obj *priv = nft_obj_data(obj); |
827 | 827 | ||
828 | if (priv->helper4) | 828 | if (priv->helper4) |
829 | module_put(priv->helper4->me); | 829 | nf_conntrack_helper_put(priv->helper4); |
830 | if (priv->helper6) | 830 | if (priv->helper6) |
831 | module_put(priv->helper6->me); | 831 | nf_conntrack_helper_put(priv->helper6); |
832 | } | 832 | } |
833 | 833 | ||
834 | static void nft_ct_helper_obj_eval(struct nft_object *obj, | 834 | static void nft_ct_helper_obj_eval(struct nft_object *obj, |
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 728baf88295a..4717d7796927 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c | |||
@@ -65,7 +65,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx, | |||
65 | return 0; | 65 | return 0; |
66 | 66 | ||
67 | err1: | 67 | err1: |
68 | nft_data_uninit(&priv->data, desc.type); | 68 | nft_data_release(&priv->data, desc.type); |
69 | return err; | 69 | return err; |
70 | } | 70 | } |
71 | 71 | ||
@@ -73,7 +73,8 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, | |||
73 | const struct nft_expr *expr) | 73 | const struct nft_expr *expr) |
74 | { | 74 | { |
75 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | 75 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); |
76 | return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg)); | 76 | |
77 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); | ||
77 | } | 78 | } |
78 | 79 | ||
79 | static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) | 80 | static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) |
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 9edc74eedc10..cedb96c3619f 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c | |||
@@ -102,9 +102,9 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr | |||
102 | priv->len = desc_from.len; | 102 | priv->len = desc_from.len; |
103 | return 0; | 103 | return 0; |
104 | err2: | 104 | err2: |
105 | nft_data_uninit(&priv->data_to, desc_to.type); | 105 | nft_data_release(&priv->data_to, desc_to.type); |
106 | err1: | 106 | err1: |
107 | nft_data_uninit(&priv->data_from, desc_from.type); | 107 | nft_data_release(&priv->data_from, desc_from.type); |
108 | return err; | 108 | return err; |
109 | } | 109 | } |
110 | 110 | ||
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 8ec086b6b56b..3d3a6df4ce70 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -222,7 +222,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set, | |||
222 | struct nft_set_elem elem; | 222 | struct nft_set_elem elem; |
223 | int err; | 223 | int err; |
224 | 224 | ||
225 | err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); | 225 | err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC); |
226 | iter->err = err; | 226 | iter->err = err; |
227 | if (err) | 227 | if (err) |
228 | return; | 228 | return; |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 8876b7da6884..1770c1d9b37f 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -283,28 +283,30 @@ static int xt_obj_to_user(u16 __user *psize, u16 size, | |||
283 | &U->u.user.revision, K->u.kernel.TYPE->revision) | 283 | &U->u.user.revision, K->u.kernel.TYPE->revision) |
284 | 284 | ||
285 | int xt_data_to_user(void __user *dst, const void *src, | 285 | int xt_data_to_user(void __user *dst, const void *src, |
286 | int usersize, int size) | 286 | int usersize, int size, int aligned_size) |
287 | { | 287 | { |
288 | usersize = usersize ? : size; | 288 | usersize = usersize ? : size; |
289 | if (copy_to_user(dst, src, usersize)) | 289 | if (copy_to_user(dst, src, usersize)) |
290 | return -EFAULT; | 290 | return -EFAULT; |
291 | if (usersize != size && clear_user(dst + usersize, size - usersize)) | 291 | if (usersize != aligned_size && |
292 | clear_user(dst + usersize, aligned_size - usersize)) | ||
292 | return -EFAULT; | 293 | return -EFAULT; |
293 | 294 | ||
294 | return 0; | 295 | return 0; |
295 | } | 296 | } |
296 | EXPORT_SYMBOL_GPL(xt_data_to_user); | 297 | EXPORT_SYMBOL_GPL(xt_data_to_user); |
297 | 298 | ||
298 | #define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ | 299 | #define XT_DATA_TO_USER(U, K, TYPE) \ |
299 | xt_data_to_user(U->data, K->data, \ | 300 | xt_data_to_user(U->data, K->data, \ |
300 | K->u.kernel.TYPE->usersize, \ | 301 | K->u.kernel.TYPE->usersize, \ |
301 | C_SIZE ? : K->u.kernel.TYPE->TYPE##size) | 302 | K->u.kernel.TYPE->TYPE##size, \ |
303 | XT_ALIGN(K->u.kernel.TYPE->TYPE##size)) | ||
302 | 304 | ||
303 | int xt_match_to_user(const struct xt_entry_match *m, | 305 | int xt_match_to_user(const struct xt_entry_match *m, |
304 | struct xt_entry_match __user *u) | 306 | struct xt_entry_match __user *u) |
305 | { | 307 | { |
306 | return XT_OBJ_TO_USER(u, m, match, 0) || | 308 | return XT_OBJ_TO_USER(u, m, match, 0) || |
307 | XT_DATA_TO_USER(u, m, match, 0); | 309 | XT_DATA_TO_USER(u, m, match); |
308 | } | 310 | } |
309 | EXPORT_SYMBOL_GPL(xt_match_to_user); | 311 | EXPORT_SYMBOL_GPL(xt_match_to_user); |
310 | 312 | ||
@@ -312,7 +314,7 @@ int xt_target_to_user(const struct xt_entry_target *t, | |||
312 | struct xt_entry_target __user *u) | 314 | struct xt_entry_target __user *u) |
313 | { | 315 | { |
314 | return XT_OBJ_TO_USER(u, t, target, 0) || | 316 | return XT_OBJ_TO_USER(u, t, target, 0) || |
315 | XT_DATA_TO_USER(u, t, target, 0); | 317 | XT_DATA_TO_USER(u, t, target); |
316 | } | 318 | } |
317 | EXPORT_SYMBOL_GPL(xt_target_to_user); | 319 | EXPORT_SYMBOL_GPL(xt_target_to_user); |
318 | 320 | ||
@@ -611,6 +613,12 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, | |||
611 | } | 613 | } |
612 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | 614 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); |
613 | 615 | ||
616 | #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ | ||
617 | xt_data_to_user(U->data, K->data, \ | ||
618 | K->u.kernel.TYPE->usersize, \ | ||
619 | C_SIZE, \ | ||
620 | COMPAT_XT_ALIGN(C_SIZE)) | ||
621 | |||
614 | int xt_compat_match_to_user(const struct xt_entry_match *m, | 622 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
615 | void __user **dstptr, unsigned int *size) | 623 | void __user **dstptr, unsigned int *size) |
616 | { | 624 | { |
@@ -626,7 +634,7 @@ int xt_compat_match_to_user(const struct xt_entry_match *m, | |||
626 | if (match->compat_to_user((void __user *)cm->data, m->data)) | 634 | if (match->compat_to_user((void __user *)cm->data, m->data)) |
627 | return -EFAULT; | 635 | return -EFAULT; |
628 | } else { | 636 | } else { |
629 | if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) | 637 | if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) |
630 | return -EFAULT; | 638 | return -EFAULT; |
631 | } | 639 | } |
632 | 640 | ||
@@ -972,7 +980,7 @@ int xt_compat_target_to_user(const struct xt_entry_target *t, | |||
972 | if (target->compat_to_user((void __user *)ct->data, t->data)) | 980 | if (target->compat_to_user((void __user *)ct->data, t->data)) |
973 | return -EFAULT; | 981 | return -EFAULT; |
974 | } else { | 982 | } else { |
975 | if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) | 983 | if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) |
976 | return -EFAULT; | 984 | return -EFAULT; |
977 | } | 985 | } |
978 | 986 | ||
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index bb7ad82dcd56..623ef37de886 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, | |||
96 | 96 | ||
97 | help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); | 97 | help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); |
98 | if (help == NULL) { | 98 | if (help == NULL) { |
99 | module_put(helper->me); | 99 | nf_conntrack_helper_put(helper); |
100 | return -ENOMEM; | 100 | return -ENOMEM; |
101 | } | 101 | } |
102 | 102 | ||
@@ -263,7 +263,7 @@ out: | |||
263 | err4: | 263 | err4: |
264 | help = nfct_help(ct); | 264 | help = nfct_help(ct); |
265 | if (help) | 265 | if (help) |
266 | module_put(help->helper->me); | 266 | nf_conntrack_helper_put(help->helper); |
267 | err3: | 267 | err3: |
268 | nf_ct_tmpl_free(ct); | 268 | nf_ct_tmpl_free(ct); |
269 | err2: | 269 | err2: |
@@ -346,7 +346,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par, | |||
346 | if (ct) { | 346 | if (ct) { |
347 | help = nfct_help(ct); | 347 | help = nfct_help(ct); |
348 | if (help) | 348 | if (help) |
349 | module_put(help->helper->me); | 349 | nf_conntrack_helper_put(help->helper); |
350 | 350 | ||
351 | nf_ct_netns_put(par->net, par->family); | 351 | nf_ct_netns_put(par->net, par->family); |
352 | 352 | ||
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index bf602e33c40a..08679ebb3068 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -1123,7 +1123,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name, | |||
1123 | 1123 | ||
1124 | help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); | 1124 | help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); |
1125 | if (!help) { | 1125 | if (!help) { |
1126 | module_put(helper->me); | 1126 | nf_conntrack_helper_put(helper); |
1127 | return -ENOMEM; | 1127 | return -ENOMEM; |
1128 | } | 1128 | } |
1129 | 1129 | ||
@@ -1584,7 +1584,7 @@ void ovs_ct_free_action(const struct nlattr *a) | |||
1584 | static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) | 1584 | static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) |
1585 | { | 1585 | { |
1586 | if (ct_info->helper) | 1586 | if (ct_info->helper) |
1587 | module_put(ct_info->helper->me); | 1587 | nf_conntrack_helper_put(ct_info->helper); |
1588 | if (ct_info->ct) | 1588 | if (ct_info->ct) |
1589 | nf_ct_tmpl_free(ct_info->ct); | 1589 | nf_ct_tmpl_free(ct_info->ct); |
1590 | } | 1590 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f4001763134d..e3eeed19cc7a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2658,13 +2658,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
2658 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); | 2658 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
2659 | } | 2659 | } |
2660 | 2660 | ||
2661 | sockc.tsflags = po->sk.sk_tsflags; | ||
2662 | if (msg->msg_controllen) { | ||
2663 | err = sock_cmsg_send(&po->sk, msg, &sockc); | ||
2664 | if (unlikely(err)) | ||
2665 | goto out; | ||
2666 | } | ||
2667 | |||
2668 | err = -ENXIO; | 2661 | err = -ENXIO; |
2669 | if (unlikely(dev == NULL)) | 2662 | if (unlikely(dev == NULL)) |
2670 | goto out; | 2663 | goto out; |
@@ -2672,6 +2665,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
2672 | if (unlikely(!(dev->flags & IFF_UP))) | 2665 | if (unlikely(!(dev->flags & IFF_UP))) |
2673 | goto out_put; | 2666 | goto out_put; |
2674 | 2667 | ||
2668 | sockc.tsflags = po->sk.sk_tsflags; | ||
2669 | if (msg->msg_controllen) { | ||
2670 | err = sock_cmsg_send(&po->sk, msg, &sockc); | ||
2671 | if (unlikely(err)) | ||
2672 | goto out_put; | ||
2673 | } | ||
2674 | |||
2675 | if (po->sk.sk_socket->type == SOCK_RAW) | 2675 | if (po->sk.sk_socket->type == SOCK_RAW) |
2676 | reserve = dev->hard_header_len; | 2676 | reserve = dev->hard_header_len; |
2677 | size_max = po->tx_ring.frame_size | 2677 | size_max = po->tx_ring.frame_size |
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index dee469fed967..51859b8edd7e 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
@@ -203,7 +203,6 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, | |||
203 | 203 | ||
204 | *arg = (unsigned long) head; | 204 | *arg = (unsigned long) head; |
205 | rcu_assign_pointer(tp->root, new); | 205 | rcu_assign_pointer(tp->root, new); |
206 | call_rcu(&head->rcu, mall_destroy_rcu); | ||
207 | return 0; | 206 | return 0; |
208 | 207 | ||
209 | err_replace_hw_filter: | 208 | err_replace_hw_filter: |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index bbe57d57b67f..e88342fde1bc 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1831,6 +1831,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, | |||
1831 | if (!qdisc_dev(root)) | 1831 | if (!qdisc_dev(root)) |
1832 | return 0; | 1832 | return 0; |
1833 | 1833 | ||
1834 | if (tcm->tcm_parent) { | ||
1835 | q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); | ||
1836 | if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) | ||
1837 | return -1; | ||
1838 | return 0; | ||
1839 | } | ||
1834 | hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { | 1840 | hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { |
1835 | if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) | 1841 | if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) |
1836 | return -1; | 1842 | return -1; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index a9708da28eb5..95238284c422 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1176 | 1176 | ||
1177 | asoc->ctsn_ack_point = asoc->next_tsn - 1; | 1177 | asoc->ctsn_ack_point = asoc->next_tsn - 1; |
1178 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; | 1178 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; |
1179 | if (!asoc->stream) { | 1179 | |
1180 | if (sctp_state(asoc, COOKIE_WAIT)) { | ||
1181 | sctp_stream_free(asoc->stream); | ||
1180 | asoc->stream = new->stream; | 1182 | asoc->stream = new->stream; |
1181 | new->stream = NULL; | 1183 | new->stream = NULL; |
1182 | } | 1184 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 0e06a278d2a9..ba9ad32fc447 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, | |||
473 | struct sctp_association **app, | 473 | struct sctp_association **app, |
474 | struct sctp_transport **tpp) | 474 | struct sctp_transport **tpp) |
475 | { | 475 | { |
476 | struct sctp_init_chunk *chunkhdr, _chunkhdr; | ||
476 | union sctp_addr saddr; | 477 | union sctp_addr saddr; |
477 | union sctp_addr daddr; | 478 | union sctp_addr daddr; |
478 | struct sctp_af *af; | 479 | struct sctp_af *af; |
479 | struct sock *sk = NULL; | 480 | struct sock *sk = NULL; |
480 | struct sctp_association *asoc; | 481 | struct sctp_association *asoc; |
481 | struct sctp_transport *transport = NULL; | 482 | struct sctp_transport *transport = NULL; |
482 | struct sctp_init_chunk *chunkhdr; | ||
483 | __u32 vtag = ntohl(sctphdr->vtag); | 483 | __u32 vtag = ntohl(sctphdr->vtag); |
484 | int len = skb->len - ((void *)sctphdr - (void *)skb->data); | ||
485 | 484 | ||
486 | *app = NULL; *tpp = NULL; | 485 | *app = NULL; *tpp = NULL; |
487 | 486 | ||
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, | |||
516 | * discard the packet. | 515 | * discard the packet. |
517 | */ | 516 | */ |
518 | if (vtag == 0) { | 517 | if (vtag == 0) { |
519 | chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); | 518 | /* chunk header + first 4 octects of init header */ |
520 | if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) | 519 | chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) + |
521 | + sizeof(__be32) || | 520 | sizeof(struct sctphdr), |
521 | sizeof(struct sctp_chunkhdr) + | ||
522 | sizeof(__be32), &_chunkhdr); | ||
523 | if (!chunkhdr || | ||
522 | chunkhdr->chunk_hdr.type != SCTP_CID_INIT || | 524 | chunkhdr->chunk_hdr.type != SCTP_CID_INIT || |
523 | ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { | 525 | ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) |
524 | goto out; | 526 | goto out; |
525 | } | 527 | |
526 | } else if (vtag != asoc->c.peer_vtag) { | 528 | } else if (vtag != asoc->c.peer_vtag) { |
527 | goto out; | 529 | goto out; |
528 | } | 530 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 961ee59f696a..f5b45b8b8b16 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
240 | struct sctp_bind_addr *bp; | 240 | struct sctp_bind_addr *bp; |
241 | struct ipv6_pinfo *np = inet6_sk(sk); | 241 | struct ipv6_pinfo *np = inet6_sk(sk); |
242 | struct sctp_sockaddr_entry *laddr; | 242 | struct sctp_sockaddr_entry *laddr; |
243 | union sctp_addr *baddr = NULL; | ||
244 | union sctp_addr *daddr = &t->ipaddr; | 243 | union sctp_addr *daddr = &t->ipaddr; |
245 | union sctp_addr dst_saddr; | 244 | union sctp_addr dst_saddr; |
246 | struct in6_addr *final_p, final; | 245 | struct in6_addr *final_p, final; |
247 | __u8 matchlen = 0; | 246 | __u8 matchlen = 0; |
248 | __u8 bmatchlen; | ||
249 | sctp_scope_t scope; | 247 | sctp_scope_t scope; |
250 | 248 | ||
251 | memset(fl6, 0, sizeof(struct flowi6)); | 249 | memset(fl6, 0, sizeof(struct flowi6)); |
@@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
312 | */ | 310 | */ |
313 | rcu_read_lock(); | 311 | rcu_read_lock(); |
314 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 312 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
315 | if (!laddr->valid) | 313 | struct dst_entry *bdst; |
314 | __u8 bmatchlen; | ||
315 | |||
316 | if (!laddr->valid || | ||
317 | laddr->state != SCTP_ADDR_SRC || | ||
318 | laddr->a.sa.sa_family != AF_INET6 || | ||
319 | scope > sctp_scope(&laddr->a)) | ||
316 | continue; | 320 | continue; |
317 | if ((laddr->state == SCTP_ADDR_SRC) && | 321 | |
318 | (laddr->a.sa.sa_family == AF_INET6) && | 322 | fl6->saddr = laddr->a.v6.sin6_addr; |
319 | (scope <= sctp_scope(&laddr->a))) { | 323 | fl6->fl6_sport = laddr->a.v6.sin6_port; |
320 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); | ||
321 | if (!baddr || (matchlen < bmatchlen)) { | ||
322 | baddr = &laddr->a; | ||
323 | matchlen = bmatchlen; | ||
324 | } | ||
325 | } | ||
326 | } | ||
327 | if (baddr) { | ||
328 | fl6->saddr = baddr->v6.sin6_addr; | ||
329 | fl6->fl6_sport = baddr->v6.sin6_port; | ||
330 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | 324 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); |
331 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 325 | bdst = ip6_dst_lookup_flow(sk, fl6, final_p); |
326 | |||
327 | if (!IS_ERR(bdst) && | ||
328 | ipv6_chk_addr(dev_net(bdst->dev), | ||
329 | &laddr->a.v6.sin6_addr, bdst->dev, 1)) { | ||
330 | if (!IS_ERR_OR_NULL(dst)) | ||
331 | dst_release(dst); | ||
332 | dst = bdst; | ||
333 | break; | ||
334 | } | ||
335 | |||
336 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); | ||
337 | if (matchlen > bmatchlen) | ||
338 | continue; | ||
339 | |||
340 | if (!IS_ERR_OR_NULL(dst)) | ||
341 | dst_release(dst); | ||
342 | dst = bdst; | ||
343 | matchlen = bmatchlen; | ||
332 | } | 344 | } |
333 | rcu_read_unlock(); | 345 | rcu_read_unlock(); |
334 | 346 | ||
@@ -665,6 +677,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
665 | newnp = inet6_sk(newsk); | 677 | newnp = inet6_sk(newsk); |
666 | 678 | ||
667 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 679 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
680 | newnp->ipv6_mc_list = NULL; | ||
681 | newnp->ipv6_ac_list = NULL; | ||
682 | newnp->ipv6_fl_list = NULL; | ||
668 | 683 | ||
669 | rcu_read_lock(); | 684 | rcu_read_lock(); |
670 | opt = rcu_dereference(np->opt); | 685 | opt = rcu_dereference(np->opt); |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 8a08f13469c4..92e332e17391 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, | |||
2454 | * stream sequence number shall be set to 0. | 2454 | * stream sequence number shall be set to 0. |
2455 | */ | 2455 | */ |
2456 | 2456 | ||
2457 | /* Allocate storage for the negotiated streams if it is not a temporary | 2457 | if (sctp_stream_init(asoc, gfp)) |
2458 | * association. | 2458 | goto clean_up; |
2459 | */ | ||
2460 | if (!asoc->temp) { | ||
2461 | if (sctp_stream_init(asoc, gfp)) | ||
2462 | goto clean_up; | ||
2463 | 2459 | ||
2464 | if (sctp_assoc_set_id(asoc, gfp)) | 2460 | if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) |
2465 | goto clean_up; | 2461 | goto clean_up; |
2466 | } | ||
2467 | 2462 | ||
2468 | /* ADDIP Section 4.1 ASCONF Chunk Procedures | 2463 | /* ADDIP Section 4.1 ASCONF Chunk Procedures |
2469 | * | 2464 | * |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 4f5e6cfc7f60..f863b5573e42 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, | |||
2088 | } | 2088 | } |
2089 | } | 2089 | } |
2090 | 2090 | ||
2091 | /* Set temp so that it won't be added into hashtable */ | ||
2092 | new_asoc->temp = 1; | ||
2093 | |||
2091 | /* Compare the tie_tag in cookie with the verification tag of | 2094 | /* Compare the tie_tag in cookie with the verification tag of |
2092 | * current association. | 2095 | * current association. |
2093 | */ | 2096 | */ |
diff --git a/net/smc/Kconfig b/net/smc/Kconfig index c717ef0896aa..33954852f3f8 100644 --- a/net/smc/Kconfig +++ b/net/smc/Kconfig | |||
@@ -8,6 +8,10 @@ config SMC | |||
8 | The Linux implementation of the SMC-R solution is designed as | 8 | The Linux implementation of the SMC-R solution is designed as |
9 | a separate socket family SMC. | 9 | a separate socket family SMC. |
10 | 10 | ||
11 | Warning: SMC will expose all memory for remote reads and writes | ||
12 | once a connection is established. Don't enable this option except | ||
13 | for tightly controlled lab environment. | ||
14 | |||
11 | Select this option if you want to run SMC socket applications | 15 | Select this option if you want to run SMC socket applications |
12 | 16 | ||
13 | config SMC_DIAG | 17 | config SMC_DIAG |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index e41f594a1e1d..03ec058d18df 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc) | |||
204 | memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); | 204 | memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); |
205 | hton24(cclc.qpn, link->roce_qp->qp_num); | 205 | hton24(cclc.qpn, link->roce_qp->qp_num); |
206 | cclc.rmb_rkey = | 206 | cclc.rmb_rkey = |
207 | htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); | 207 | htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]); |
208 | cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ | 208 | cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ |
209 | cclc.rmbe_alert_token = htonl(conn->alert_token_local); | 209 | cclc.rmbe_alert_token = htonl(conn->alert_token_local); |
210 | cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); | 210 | cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); |
@@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) | |||
256 | memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); | 256 | memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); |
257 | hton24(aclc.qpn, link->roce_qp->qp_num); | 257 | hton24(aclc.qpn, link->roce_qp->qp_num); |
258 | aclc.rmb_rkey = | 258 | aclc.rmb_rkey = |
259 | htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); | 259 | htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]); |
260 | aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ | 260 | aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ |
261 | aclc.rmbe_alert_token = htonl(conn->alert_token_local); | 261 | aclc.rmbe_alert_token = htonl(conn->alert_token_local); |
262 | aclc.qp_mtu = link->path_mtu; | 262 | aclc.qp_mtu = link->path_mtu; |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 65020e93ff21..3ac09a629ea1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -613,19 +613,8 @@ int smc_rmb_create(struct smc_sock *smc) | |||
613 | rmb_desc = NULL; | 613 | rmb_desc = NULL; |
614 | continue; /* if mapping failed, try smaller one */ | 614 | continue; /* if mapping failed, try smaller one */ |
615 | } | 615 | } |
616 | rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, | 616 | rmb_desc->rkey[SMC_SINGLE_LINK] = |
617 | IB_ACCESS_REMOTE_WRITE | | 617 | lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey; |
618 | IB_ACCESS_LOCAL_WRITE, | ||
619 | &rmb_desc->mr_rx[SMC_SINGLE_LINK]); | ||
620 | if (rc) { | ||
621 | smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, | ||
622 | tmp_bufsize, rmb_desc, | ||
623 | DMA_FROM_DEVICE); | ||
624 | kfree(rmb_desc->cpu_addr); | ||
625 | kfree(rmb_desc); | ||
626 | rmb_desc = NULL; | ||
627 | continue; | ||
628 | } | ||
629 | rmb_desc->used = 1; | 618 | rmb_desc->used = 1; |
630 | write_lock_bh(&lgr->rmbs_lock); | 619 | write_lock_bh(&lgr->rmbs_lock); |
631 | list_add(&rmb_desc->list, | 620 | list_add(&rmb_desc->list, |
@@ -668,6 +657,7 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn, | |||
668 | 657 | ||
669 | for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { | 658 | for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { |
670 | if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && | 659 | if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && |
660 | (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) && | ||
671 | test_bit(i, lgr->rtokens_used_mask)) { | 661 | test_bit(i, lgr->rtokens_used_mask)) { |
672 | conn->rtoken_idx = i; | 662 | conn->rtoken_idx = i; |
673 | return 0; | 663 | return 0; |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 27eb38056a27..b013cb43a327 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
@@ -93,7 +93,7 @@ struct smc_buf_desc { | |||
93 | u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; | 93 | u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; |
94 | /* mapped address of buffer */ | 94 | /* mapped address of buffer */ |
95 | void *cpu_addr; /* virtual address of buffer */ | 95 | void *cpu_addr; /* virtual address of buffer */ |
96 | struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; | 96 | u32 rkey[SMC_LINKS_PER_LGR_MAX]; |
97 | /* for rmb only: | 97 | /* for rmb only: |
98 | * rkey provided to peer | 98 | * rkey provided to peer |
99 | */ | 99 | */ |
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index cb69ab977cd7..b31715505a35 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c | |||
@@ -37,24 +37,6 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system | |||
37 | * identifier | 37 | * identifier |
38 | */ | 38 | */ |
39 | 39 | ||
40 | int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, | ||
41 | struct ib_mr **mr) | ||
42 | { | ||
43 | int rc; | ||
44 | |||
45 | if (*mr) | ||
46 | return 0; /* already done */ | ||
47 | |||
48 | /* obtain unique key - | ||
49 | * next invocation of get_dma_mr returns a different key! | ||
50 | */ | ||
51 | *mr = pd->device->get_dma_mr(pd, access_flags); | ||
52 | rc = PTR_ERR_OR_ZERO(*mr); | ||
53 | if (IS_ERR(*mr)) | ||
54 | *mr = NULL; | ||
55 | return rc; | ||
56 | } | ||
57 | |||
58 | static int smc_ib_modify_qp_init(struct smc_link *lnk) | 40 | static int smc_ib_modify_qp_init(struct smc_link *lnk) |
59 | { | 41 | { |
60 | struct ib_qp_attr qp_attr; | 42 | struct ib_qp_attr qp_attr; |
@@ -210,7 +192,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) | |||
210 | { | 192 | { |
211 | int rc; | 193 | int rc; |
212 | 194 | ||
213 | lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); | 195 | lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, |
196 | IB_PD_UNSAFE_GLOBAL_RKEY); | ||
214 | rc = PTR_ERR_OR_ZERO(lnk->roce_pd); | 197 | rc = PTR_ERR_OR_ZERO(lnk->roce_pd); |
215 | if (IS_ERR(lnk->roce_pd)) | 198 | if (IS_ERR(lnk->roce_pd)) |
216 | lnk->roce_pd = NULL; | 199 | lnk->roce_pd = NULL; |
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 7e1f0e24d177..b567152a526d 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h | |||
@@ -61,8 +61,6 @@ void smc_ib_dealloc_protection_domain(struct smc_link *lnk); | |||
61 | int smc_ib_create_protection_domain(struct smc_link *lnk); | 61 | int smc_ib_create_protection_domain(struct smc_link *lnk); |
62 | void smc_ib_destroy_queue_pair(struct smc_link *lnk); | 62 | void smc_ib_destroy_queue_pair(struct smc_link *lnk); |
63 | int smc_ib_create_queue_pair(struct smc_link *lnk); | 63 | int smc_ib_create_queue_pair(struct smc_link *lnk); |
64 | int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, | ||
65 | struct ib_mr **mr); | ||
66 | int smc_ib_ready_link(struct smc_link *lnk); | 64 | int smc_ib_ready_link(struct smc_link *lnk); |
67 | int smc_ib_modify_qp_rts(struct smc_link *lnk); | 65 | int smc_ib_modify_qp_rts(struct smc_link *lnk); |
68 | int smc_ib_modify_qp_reset(struct smc_link *lnk); | 66 | int smc_ib_modify_qp_reset(struct smc_link *lnk); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 0d4f2f455a7c..1b92b72e812f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -362,25 +362,25 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout) | |||
362 | return 0; | 362 | return 0; |
363 | } | 363 | } |
364 | 364 | ||
365 | #define tipc_wait_for_cond(sock_, timeout_, condition_) \ | 365 | #define tipc_wait_for_cond(sock_, timeo_, condition_) \ |
366 | ({ \ | 366 | ({ \ |
367 | int rc_ = 0; \ | 367 | struct sock *sk_; \ |
368 | int done_ = 0; \ | 368 | int rc_; \ |
369 | \ | 369 | \ |
370 | while (!(condition_) && !done_) { \ | 370 | while ((rc_ = !(condition_))) { \ |
371 | struct sock *sk_ = sock->sk; \ | 371 | DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ |
372 | DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ | 372 | sk_ = (sock_)->sk; \ |
373 | \ | 373 | rc_ = tipc_sk_sock_err((sock_), timeo_); \ |
374 | rc_ = tipc_sk_sock_err(sock_, timeout_); \ | 374 | if (rc_) \ |
375 | if (rc_) \ | 375 | break; \ |
376 | break; \ | 376 | prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ |
377 | prepare_to_wait(sk_sleep(sk_), &wait_, \ | 377 | release_sock(sk_); \ |
378 | TASK_INTERRUPTIBLE); \ | 378 | *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ |
379 | done_ = sk_wait_event(sk_, timeout_, \ | 379 | sched_annotate_sleep(); \ |
380 | (condition_), &wait_); \ | 380 | lock_sock(sk_); \ |
381 | remove_wait_queue(sk_sleep(sk_), &wait_); \ | 381 | remove_wait_queue(sk_sleep(sk_), &wait_); \ |
382 | } \ | 382 | } \ |
383 | rc_; \ | 383 | rc_; \ |
384 | }) | 384 | }) |
385 | 385 | ||
386 | /** | 386 | /** |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 6f7f6757ceef..dfc8c51e4d74 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -1540,8 +1540,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1540 | long timeout; | 1540 | long timeout; |
1541 | int err; | 1541 | int err; |
1542 | struct vsock_transport_send_notify_data send_data; | 1542 | struct vsock_transport_send_notify_data send_data; |
1543 | 1543 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
1544 | DEFINE_WAIT(wait); | ||
1545 | 1544 | ||
1546 | sk = sock->sk; | 1545 | sk = sock->sk; |
1547 | vsk = vsock_sk(sk); | 1546 | vsk = vsock_sk(sk); |
@@ -1584,11 +1583,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1584 | if (err < 0) | 1583 | if (err < 0) |
1585 | goto out; | 1584 | goto out; |
1586 | 1585 | ||
1587 | |||
1588 | while (total_written < len) { | 1586 | while (total_written < len) { |
1589 | ssize_t written; | 1587 | ssize_t written; |
1590 | 1588 | ||
1591 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 1589 | add_wait_queue(sk_sleep(sk), &wait); |
1592 | while (vsock_stream_has_space(vsk) == 0 && | 1590 | while (vsock_stream_has_space(vsk) == 0 && |
1593 | sk->sk_err == 0 && | 1591 | sk->sk_err == 0 && |
1594 | !(sk->sk_shutdown & SEND_SHUTDOWN) && | 1592 | !(sk->sk_shutdown & SEND_SHUTDOWN) && |
@@ -1597,33 +1595,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1597 | /* Don't wait for non-blocking sockets. */ | 1595 | /* Don't wait for non-blocking sockets. */ |
1598 | if (timeout == 0) { | 1596 | if (timeout == 0) { |
1599 | err = -EAGAIN; | 1597 | err = -EAGAIN; |
1600 | finish_wait(sk_sleep(sk), &wait); | 1598 | remove_wait_queue(sk_sleep(sk), &wait); |
1601 | goto out_err; | 1599 | goto out_err; |
1602 | } | 1600 | } |
1603 | 1601 | ||
1604 | err = transport->notify_send_pre_block(vsk, &send_data); | 1602 | err = transport->notify_send_pre_block(vsk, &send_data); |
1605 | if (err < 0) { | 1603 | if (err < 0) { |
1606 | finish_wait(sk_sleep(sk), &wait); | 1604 | remove_wait_queue(sk_sleep(sk), &wait); |
1607 | goto out_err; | 1605 | goto out_err; |
1608 | } | 1606 | } |
1609 | 1607 | ||
1610 | release_sock(sk); | 1608 | release_sock(sk); |
1611 | timeout = schedule_timeout(timeout); | 1609 | timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); |
1612 | lock_sock(sk); | 1610 | lock_sock(sk); |
1613 | if (signal_pending(current)) { | 1611 | if (signal_pending(current)) { |
1614 | err = sock_intr_errno(timeout); | 1612 | err = sock_intr_errno(timeout); |
1615 | finish_wait(sk_sleep(sk), &wait); | 1613 | remove_wait_queue(sk_sleep(sk), &wait); |
1616 | goto out_err; | 1614 | goto out_err; |
1617 | } else if (timeout == 0) { | 1615 | } else if (timeout == 0) { |
1618 | err = -EAGAIN; | 1616 | err = -EAGAIN; |
1619 | finish_wait(sk_sleep(sk), &wait); | 1617 | remove_wait_queue(sk_sleep(sk), &wait); |
1620 | goto out_err; | 1618 | goto out_err; |
1621 | } | 1619 | } |
1622 | |||
1623 | prepare_to_wait(sk_sleep(sk), &wait, | ||
1624 | TASK_INTERRUPTIBLE); | ||
1625 | } | 1620 | } |
1626 | finish_wait(sk_sleep(sk), &wait); | 1621 | remove_wait_queue(sk_sleep(sk), &wait); |
1627 | 1622 | ||
1628 | /* These checks occur both as part of and after the loop | 1623 | /* These checks occur both as part of and after the loop |
1629 | * conditional since we need to check before and after | 1624 | * conditional since we need to check before and after |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 14d5f0c8c45f..9f0901f3e42b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid) | |||
322 | { | 322 | { |
323 | struct cfg80211_sched_scan_request *pos; | 323 | struct cfg80211_sched_scan_request *pos; |
324 | 324 | ||
325 | ASSERT_RTNL(); | 325 | WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); |
326 | 326 | ||
327 | list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { | 327 | list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) { |
328 | if (pos->reqid == reqid) | 328 | if (pos->reqid == reqid) |
329 | return pos; | 329 | return pos; |
330 | } | 330 | } |
@@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid) | |||
398 | trace_cfg80211_sched_scan_results(wiphy, reqid); | 398 | trace_cfg80211_sched_scan_results(wiphy, reqid); |
399 | /* ignore if we're not scanning */ | 399 | /* ignore if we're not scanning */ |
400 | 400 | ||
401 | rtnl_lock(); | 401 | rcu_read_lock(); |
402 | request = cfg80211_find_sched_scan_req(rdev, reqid); | 402 | request = cfg80211_find_sched_scan_req(rdev, reqid); |
403 | if (request) { | 403 | if (request) { |
404 | request->report_results = true; | 404 | request->report_results = true; |
405 | queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); | 405 | queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); |
406 | } | 406 | } |
407 | rtnl_unlock(); | 407 | rcu_read_unlock(); |
408 | } | 408 | } |
409 | EXPORT_SYMBOL(cfg80211_sched_scan_results); | 409 | EXPORT_SYMBOL(cfg80211_sched_scan_results); |
410 | 410 | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index 7198373e2920..4992f1025c9d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, | |||
454 | if (iftype == NL80211_IFTYPE_MESH_POINT) | 454 | if (iftype == NL80211_IFTYPE_MESH_POINT) |
455 | skb_copy_bits(skb, hdrlen, &mesh_flags, 1); | 455 | skb_copy_bits(skb, hdrlen, &mesh_flags, 1); |
456 | 456 | ||
457 | mesh_flags &= MESH_FLAGS_AE; | ||
458 | |||
457 | switch (hdr->frame_control & | 459 | switch (hdr->frame_control & |
458 | cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { | 460 | cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { |
459 | case cpu_to_le16(IEEE80211_FCTL_TODS): | 461 | case cpu_to_le16(IEEE80211_FCTL_TODS): |
@@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, | |||
469 | iftype != NL80211_IFTYPE_STATION)) | 471 | iftype != NL80211_IFTYPE_STATION)) |
470 | return -1; | 472 | return -1; |
471 | if (iftype == NL80211_IFTYPE_MESH_POINT) { | 473 | if (iftype == NL80211_IFTYPE_MESH_POINT) { |
472 | if (mesh_flags & MESH_FLAGS_AE_A4) | 474 | if (mesh_flags == MESH_FLAGS_AE_A4) |
473 | return -1; | 475 | return -1; |
474 | if (mesh_flags & MESH_FLAGS_AE_A5_A6) { | 476 | if (mesh_flags == MESH_FLAGS_AE_A5_A6) { |
475 | skb_copy_bits(skb, hdrlen + | 477 | skb_copy_bits(skb, hdrlen + |
476 | offsetof(struct ieee80211s_hdr, eaddr1), | 478 | offsetof(struct ieee80211s_hdr, eaddr1), |
477 | tmp.h_dest, 2 * ETH_ALEN); | 479 | tmp.h_dest, 2 * ETH_ALEN); |
@@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, | |||
487 | ether_addr_equal(tmp.h_source, addr))) | 489 | ether_addr_equal(tmp.h_source, addr))) |
488 | return -1; | 490 | return -1; |
489 | if (iftype == NL80211_IFTYPE_MESH_POINT) { | 491 | if (iftype == NL80211_IFTYPE_MESH_POINT) { |
490 | if (mesh_flags & MESH_FLAGS_AE_A5_A6) | 492 | if (mesh_flags == MESH_FLAGS_AE_A5_A6) |
491 | return -1; | 493 | return -1; |
492 | if (mesh_flags & MESH_FLAGS_AE_A4) | 494 | if (mesh_flags == MESH_FLAGS_AE_A4) |
493 | skb_copy_bits(skb, hdrlen + | 495 | skb_copy_bits(skb, hdrlen + |
494 | offsetof(struct ieee80211s_hdr, eaddr1), | 496 | offsetof(struct ieee80211s_hdr, eaddr1), |
495 | tmp.h_source, ETH_ALEN); | 497 | tmp.h_source, ETH_ALEN); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 8b911c29860e..5a1a98df3499 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -1791,32 +1791,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb) | |||
1791 | 1791 | ||
1792 | static int __init x25_init(void) | 1792 | static int __init x25_init(void) |
1793 | { | 1793 | { |
1794 | int rc = proto_register(&x25_proto, 0); | 1794 | int rc; |
1795 | 1795 | ||
1796 | if (rc != 0) | 1796 | rc = proto_register(&x25_proto, 0); |
1797 | if (rc) | ||
1797 | goto out; | 1798 | goto out; |
1798 | 1799 | ||
1799 | rc = sock_register(&x25_family_ops); | 1800 | rc = sock_register(&x25_family_ops); |
1800 | if (rc != 0) | 1801 | if (rc) |
1801 | goto out_proto; | 1802 | goto out_proto; |
1802 | 1803 | ||
1803 | dev_add_pack(&x25_packet_type); | 1804 | dev_add_pack(&x25_packet_type); |
1804 | 1805 | ||
1805 | rc = register_netdevice_notifier(&x25_dev_notifier); | 1806 | rc = register_netdevice_notifier(&x25_dev_notifier); |
1806 | if (rc != 0) | 1807 | if (rc) |
1807 | goto out_sock; | 1808 | goto out_sock; |
1808 | 1809 | ||
1809 | pr_info("Linux Version 0.2\n"); | 1810 | rc = x25_register_sysctl(); |
1811 | if (rc) | ||
1812 | goto out_dev; | ||
1810 | 1813 | ||
1811 | x25_register_sysctl(); | ||
1812 | rc = x25_proc_init(); | 1814 | rc = x25_proc_init(); |
1813 | if (rc != 0) | 1815 | if (rc) |
1814 | goto out_dev; | 1816 | goto out_sysctl; |
1817 | |||
1818 | pr_info("Linux Version 0.2\n"); | ||
1819 | |||
1815 | out: | 1820 | out: |
1816 | return rc; | 1821 | return rc; |
1822 | out_sysctl: | ||
1823 | x25_unregister_sysctl(); | ||
1817 | out_dev: | 1824 | out_dev: |
1818 | unregister_netdevice_notifier(&x25_dev_notifier); | 1825 | unregister_netdevice_notifier(&x25_dev_notifier); |
1819 | out_sock: | 1826 | out_sock: |
1827 | dev_remove_pack(&x25_packet_type); | ||
1820 | sock_unregister(AF_X25); | 1828 | sock_unregister(AF_X25); |
1821 | out_proto: | 1829 | out_proto: |
1822 | proto_unregister(&x25_proto); | 1830 | proto_unregister(&x25_proto); |
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c index a06dfe143c67..ba078c85f0a1 100644 --- a/net/x25/sysctl_net_x25.c +++ b/net/x25/sysctl_net_x25.c | |||
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = { | |||
73 | { }, | 73 | { }, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | void __init x25_register_sysctl(void) | 76 | int __init x25_register_sysctl(void) |
77 | { | 77 | { |
78 | x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); | 78 | x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); |
79 | if (!x25_table_header) | ||
80 | return -ENOMEM; | ||
81 | return 0; | ||
79 | } | 82 | } |
80 | 83 | ||
81 | void x25_unregister_sysctl(void) | 84 | void x25_unregister_sysctl(void) |
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 8ec8a3fcf8d4..574e6f32f94f 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c | |||
@@ -170,7 +170,7 @@ static int xfrm_dev_feat_change(struct net_device *dev) | |||
170 | 170 | ||
171 | static int xfrm_dev_down(struct net_device *dev) | 171 | static int xfrm_dev_down(struct net_device *dev) |
172 | { | 172 | { |
173 | if (dev->hw_features & NETIF_F_HW_ESP) | 173 | if (dev->features & NETIF_F_HW_ESP) |
174 | xfrm_dev_state_flush(dev_net(dev), dev, true); | 174 | xfrm_dev_state_flush(dev_net(dev), dev, true); |
175 | 175 | ||
176 | xfrm_garbage_collect(dev_net(dev)); | 176 | xfrm_garbage_collect(dev_net(dev)); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b00a1d5a7f52..ed4e52d95172 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1797,43 +1797,6 @@ free_dst: | |||
1797 | goto out; | 1797 | goto out; |
1798 | } | 1798 | } |
1799 | 1799 | ||
1800 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1801 | static int xfrm_dst_alloc_copy(void **target, const void *src, int size) | ||
1802 | { | ||
1803 | if (!*target) { | ||
1804 | *target = kmalloc(size, GFP_ATOMIC); | ||
1805 | if (!*target) | ||
1806 | return -ENOMEM; | ||
1807 | } | ||
1808 | |||
1809 | memcpy(*target, src, size); | ||
1810 | return 0; | ||
1811 | } | ||
1812 | #endif | ||
1813 | |||
1814 | static int xfrm_dst_update_parent(struct dst_entry *dst, | ||
1815 | const struct xfrm_selector *sel) | ||
1816 | { | ||
1817 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1818 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
1819 | return xfrm_dst_alloc_copy((void **)&(xdst->partner), | ||
1820 | sel, sizeof(*sel)); | ||
1821 | #else | ||
1822 | return 0; | ||
1823 | #endif | ||
1824 | } | ||
1825 | |||
1826 | static int xfrm_dst_update_origin(struct dst_entry *dst, | ||
1827 | const struct flowi *fl) | ||
1828 | { | ||
1829 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1830 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
1831 | return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); | ||
1832 | #else | ||
1833 | return 0; | ||
1834 | #endif | ||
1835 | } | ||
1836 | |||
1837 | static int xfrm_expand_policies(const struct flowi *fl, u16 family, | 1800 | static int xfrm_expand_policies(const struct flowi *fl, u16 family, |
1838 | struct xfrm_policy **pols, | 1801 | struct xfrm_policy **pols, |
1839 | int *num_pols, int *num_xfrms) | 1802 | int *num_pols, int *num_xfrms) |
@@ -1905,16 +1868,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, | |||
1905 | 1868 | ||
1906 | xdst = (struct xfrm_dst *)dst; | 1869 | xdst = (struct xfrm_dst *)dst; |
1907 | xdst->num_xfrms = err; | 1870 | xdst->num_xfrms = err; |
1908 | if (num_pols > 1) | ||
1909 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1910 | else | ||
1911 | err = xfrm_dst_update_origin(dst, fl); | ||
1912 | if (unlikely(err)) { | ||
1913 | dst_free(dst); | ||
1914 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1915 | return ERR_PTR(err); | ||
1916 | } | ||
1917 | |||
1918 | xdst->num_pols = num_pols; | 1871 | xdst->num_pols = num_pols; |
1919 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); | 1872 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); |
1920 | xdst->policy_genid = atomic_read(&pols[0]->genid); | 1873 | xdst->policy_genid = atomic_read(&pols[0]->genid); |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index fc3c5aa38754..2e291bc5f1fc 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig) | |||
1383 | x->curlft.add_time = orig->curlft.add_time; | 1383 | x->curlft.add_time = orig->curlft.add_time; |
1384 | x->km.state = orig->km.state; | 1384 | x->km.state = orig->km.state; |
1385 | x->km.seq = orig->km.seq; | 1385 | x->km.seq = orig->km.seq; |
1386 | x->replay = orig->replay; | ||
1387 | x->preplay = orig->preplay; | ||
1386 | 1388 | ||
1387 | return x; | 1389 | return x; |
1388 | 1390 | ||
diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c index b08ab4e88929..9d751e209f31 100644 --- a/samples/bpf/cookie_uid_helper_example.c +++ b/samples/bpf/cookie_uid_helper_example.c | |||
@@ -306,7 +306,9 @@ int main(int argc, char *argv[]) | |||
306 | prog_attach_iptables(argv[2]); | 306 | prog_attach_iptables(argv[2]); |
307 | if (cfg_test_traffic) { | 307 | if (cfg_test_traffic) { |
308 | if (signal(SIGINT, finish) == SIG_ERR) | 308 | if (signal(SIGINT, finish) == SIG_ERR) |
309 | error(1, errno, "register handler failed"); | 309 | error(1, errno, "register SIGINT handler failed"); |
310 | if (signal(SIGTERM, finish) == SIG_ERR) | ||
311 | error(1, errno, "register SIGTERM handler failed"); | ||
310 | while (!test_finish) { | 312 | while (!test_finish) { |
311 | print_table(); | 313 | print_table(); |
312 | printf("\n"); | 314 | printf("\n"); |
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c index 9cce2a66bd66..512f87a5fd20 100644 --- a/samples/bpf/offwaketime_user.c +++ b/samples/bpf/offwaketime_user.c | |||
@@ -100,6 +100,7 @@ int main(int argc, char **argv) | |||
100 | setrlimit(RLIMIT_MEMLOCK, &r); | 100 | setrlimit(RLIMIT_MEMLOCK, &r); |
101 | 101 | ||
102 | signal(SIGINT, int_exit); | 102 | signal(SIGINT, int_exit); |
103 | signal(SIGTERM, int_exit); | ||
103 | 104 | ||
104 | if (load_kallsyms()) { | 105 | if (load_kallsyms()) { |
105 | printf("failed to process /proc/kallsyms\n"); | 106 | printf("failed to process /proc/kallsyms\n"); |
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c index be59d7dcbdde..4ed690b907ff 100644 --- a/samples/bpf/sampleip_user.c +++ b/samples/bpf/sampleip_user.c | |||
@@ -180,6 +180,7 @@ int main(int argc, char **argv) | |||
180 | return 1; | 180 | return 1; |
181 | } | 181 | } |
182 | signal(SIGINT, int_exit); | 182 | signal(SIGINT, int_exit); |
183 | signal(SIGTERM, int_exit); | ||
183 | 184 | ||
184 | /* do sampling */ | 185 | /* do sampling */ |
185 | printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n", | 186 | printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n", |
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 0c5561d193a4..fa4336423da5 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c | |||
@@ -192,6 +192,7 @@ int main(int argc, char **argv) | |||
192 | setrlimit(RLIMIT_MEMLOCK, &r); | 192 | setrlimit(RLIMIT_MEMLOCK, &r); |
193 | 193 | ||
194 | signal(SIGINT, int_exit); | 194 | signal(SIGINT, int_exit); |
195 | signal(SIGTERM, int_exit); | ||
195 | 196 | ||
196 | if (load_kallsyms()) { | 197 | if (load_kallsyms()) { |
197 | printf("failed to process /proc/kallsyms\n"); | 198 | printf("failed to process /proc/kallsyms\n"); |
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c index 7fee0f1ba9a3..7321a3f253c9 100644 --- a/samples/bpf/tracex2_user.c +++ b/samples/bpf/tracex2_user.c | |||
@@ -127,6 +127,7 @@ int main(int ac, char **argv) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | signal(SIGINT, int_exit); | 129 | signal(SIGINT, int_exit); |
130 | signal(SIGTERM, int_exit); | ||
130 | 131 | ||
131 | /* start 'ping' in the background to have some kfree_skb events */ | 132 | /* start 'ping' in the background to have some kfree_skb events */ |
132 | f = popen("ping -c5 localhost", "r"); | 133 | f = popen("ping -c5 localhost", "r"); |
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 378850c70eb8..2431c0321b71 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c | |||
@@ -62,13 +62,14 @@ static void usage(const char *prog) | |||
62 | fprintf(stderr, | 62 | fprintf(stderr, |
63 | "usage: %s [OPTS] IFINDEX\n\n" | 63 | "usage: %s [OPTS] IFINDEX\n\n" |
64 | "OPTS:\n" | 64 | "OPTS:\n" |
65 | " -S use skb-mode\n", | 65 | " -S use skb-mode\n" |
66 | " -N enforce native mode\n", | ||
66 | prog); | 67 | prog); |
67 | } | 68 | } |
68 | 69 | ||
69 | int main(int argc, char **argv) | 70 | int main(int argc, char **argv) |
70 | { | 71 | { |
71 | const char *optstr = "S"; | 72 | const char *optstr = "SN"; |
72 | char filename[256]; | 73 | char filename[256]; |
73 | int opt; | 74 | int opt; |
74 | 75 | ||
@@ -77,6 +78,9 @@ int main(int argc, char **argv) | |||
77 | case 'S': | 78 | case 'S': |
78 | xdp_flags |= XDP_FLAGS_SKB_MODE; | 79 | xdp_flags |= XDP_FLAGS_SKB_MODE; |
79 | break; | 80 | break; |
81 | case 'N': | ||
82 | xdp_flags |= XDP_FLAGS_DRV_MODE; | ||
83 | break; | ||
80 | default: | 84 | default: |
81 | usage(basename(argv[0])); | 85 | usage(basename(argv[0])); |
82 | return 1; | 86 | return 1; |
@@ -102,6 +106,7 @@ int main(int argc, char **argv) | |||
102 | } | 106 | } |
103 | 107 | ||
104 | signal(SIGINT, int_exit); | 108 | signal(SIGINT, int_exit); |
109 | signal(SIGTERM, int_exit); | ||
105 | 110 | ||
106 | if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { | 111 | if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { |
107 | printf("link set xdp fd failed\n"); | 112 | printf("link set xdp fd failed\n"); |
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c index 92b8bde9337c..715cd12eaca5 100644 --- a/samples/bpf/xdp_tx_iptunnel_user.c +++ b/samples/bpf/xdp_tx_iptunnel_user.c | |||
@@ -79,6 +79,8 @@ static void usage(const char *cmd) | |||
79 | printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); | 79 | printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); |
80 | printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); | 80 | printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); |
81 | printf(" -P <IP-Protocol> Default is TCP\n"); | 81 | printf(" -P <IP-Protocol> Default is TCP\n"); |
82 | printf(" -S use skb-mode\n"); | ||
83 | printf(" -N enforce native mode\n"); | ||
82 | printf(" -h Display this help\n"); | 84 | printf(" -h Display this help\n"); |
83 | } | 85 | } |
84 | 86 | ||
@@ -138,7 +140,7 @@ int main(int argc, char **argv) | |||
138 | { | 140 | { |
139 | unsigned char opt_flags[256] = {}; | 141 | unsigned char opt_flags[256] = {}; |
140 | unsigned int kill_after_s = 0; | 142 | unsigned int kill_after_s = 0; |
141 | const char *optstr = "i:a:p:s:d:m:T:P:Sh"; | 143 | const char *optstr = "i:a:p:s:d:m:T:P:SNh"; |
142 | int min_port = 0, max_port = 0; | 144 | int min_port = 0, max_port = 0; |
143 | struct iptnl_info tnl = {}; | 145 | struct iptnl_info tnl = {}; |
144 | struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; | 146 | struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; |
@@ -206,6 +208,9 @@ int main(int argc, char **argv) | |||
206 | case 'S': | 208 | case 'S': |
207 | xdp_flags |= XDP_FLAGS_SKB_MODE; | 209 | xdp_flags |= XDP_FLAGS_SKB_MODE; |
208 | break; | 210 | break; |
211 | case 'N': | ||
212 | xdp_flags |= XDP_FLAGS_DRV_MODE; | ||
213 | break; | ||
209 | default: | 214 | default: |
210 | usage(argv[0]); | 215 | usage(argv[0]); |
211 | return 1; | 216 | return 1; |
@@ -239,6 +244,7 @@ int main(int argc, char **argv) | |||
239 | } | 244 | } |
240 | 245 | ||
241 | signal(SIGINT, int_exit); | 246 | signal(SIGINT, int_exit); |
247 | signal(SIGTERM, int_exit); | ||
242 | 248 | ||
243 | while (min_port <= max_port) { | 249 | while (min_port <= max_port) { |
244 | vip.dport = htons(min_port++); | 250 | vip.dport = htons(min_port++); |
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index 6ba97a1f9c5a..ce753a408c56 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst | |||
@@ -8,6 +8,29 @@ | |||
8 | # | 8 | # |
9 | # ========================================================================== | 9 | # ========================================================================== |
10 | 10 | ||
11 | PHONY := __headers | ||
12 | __headers: | ||
13 | |||
14 | include scripts/Kbuild.include | ||
15 | |||
16 | srcdir := $(srctree)/$(obj) | ||
17 | subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) | ||
18 | # caller may set destination dir (when installing to asm/) | ||
19 | _dst := $(if $(dst),$(dst),$(obj)) | ||
20 | |||
21 | # Recursion | ||
22 | __headers: $(subdirs) | ||
23 | |||
24 | .PHONY: $(subdirs) | ||
25 | $(subdirs): | ||
26 | $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@ | ||
27 | |||
28 | # Skip header install/check for include/uapi and arch/$(hdr-arch)/include/uapi. | ||
29 | # We have only sub-directories there. | ||
30 | skip-inst := $(if $(filter %/uapi,$(obj)),1) | ||
31 | |||
32 | ifeq ($(skip-inst),) | ||
33 | |||
11 | # generated header directory | 34 | # generated header directory |
12 | gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) | 35 | gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) |
13 | 36 | ||
@@ -15,21 +38,14 @@ gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) | |||
15 | kbuild-file := $(srctree)/$(obj)/Kbuild | 38 | kbuild-file := $(srctree)/$(obj)/Kbuild |
16 | -include $(kbuild-file) | 39 | -include $(kbuild-file) |
17 | 40 | ||
18 | # called may set destination dir (when installing to asm/) | ||
19 | _dst := $(if $(dst),$(dst),$(obj)) | ||
20 | |||
21 | old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild | 41 | old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild |
22 | ifneq ($(wildcard $(old-kbuild-file)),) | 42 | ifneq ($(wildcard $(old-kbuild-file)),) |
23 | include $(old-kbuild-file) | 43 | include $(old-kbuild-file) |
24 | endif | 44 | endif |
25 | 45 | ||
26 | include scripts/Kbuild.include | ||
27 | |||
28 | installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) | 46 | installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) |
29 | 47 | ||
30 | srcdir := $(srctree)/$(obj) | ||
31 | gendir := $(objtree)/$(gen) | 48 | gendir := $(objtree)/$(gen) |
32 | subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) | ||
33 | header-files := $(notdir $(wildcard $(srcdir)/*.h)) | 49 | header-files := $(notdir $(wildcard $(srcdir)/*.h)) |
34 | header-files += $(notdir $(wildcard $(srcdir)/*.agh)) | 50 | header-files += $(notdir $(wildcard $(srcdir)/*.agh)) |
35 | header-files := $(filter-out $(no-export-headers), $(header-files)) | 51 | header-files := $(filter-out $(no-export-headers), $(header-files)) |
@@ -88,11 +104,9 @@ quiet_cmd_check = CHECK $(printdir) ($(words $(all-files)) files) | |||
88 | $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \ | 104 | $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \ |
89 | touch $@ | 105 | touch $@ |
90 | 106 | ||
91 | PHONY += __headersinst __headerscheck | ||
92 | |||
93 | ifndef HDRCHECK | 107 | ifndef HDRCHECK |
94 | # Rules for installing headers | 108 | # Rules for installing headers |
95 | __headersinst: $(subdirs) $(install-file) | 109 | __headers: $(install-file) |
96 | @: | 110 | @: |
97 | 111 | ||
98 | targets += $(install-file) | 112 | targets += $(install-file) |
@@ -104,7 +118,7 @@ $(install-file): scripts/headers_install.sh \ | |||
104 | $(call if_changed,install) | 118 | $(call if_changed,install) |
105 | 119 | ||
106 | else | 120 | else |
107 | __headerscheck: $(subdirs) $(check-file) | 121 | __headers: $(check-file) |
108 | @: | 122 | @: |
109 | 123 | ||
110 | targets += $(check-file) | 124 | targets += $(check-file) |
@@ -113,11 +127,6 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE | |||
113 | 127 | ||
114 | endif | 128 | endif |
115 | 129 | ||
116 | # Recursion | ||
117 | .PHONY: $(subdirs) | ||
118 | $(subdirs): | ||
119 | $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@ | ||
120 | |||
121 | targets := $(wildcard $(sort $(targets))) | 130 | targets := $(wildcard $(sort $(targets))) |
122 | cmd_files := $(wildcard \ | 131 | cmd_files := $(wildcard \ |
123 | $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) | 132 | $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) |
@@ -126,6 +135,8 @@ ifneq ($(cmd_files),) | |||
126 | include $(cmd_files) | 135 | include $(cmd_files) |
127 | endif | 136 | endif |
128 | 137 | ||
138 | endif # skip-inst | ||
139 | |||
129 | .PHONY: $(PHONY) | 140 | .PHONY: $(PHONY) |
130 | PHONY += FORCE | 141 | PHONY += FORCE |
131 | FORCE: ; | 142 | FORCE: ; |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 6dc1eda13b8e..58c05e5d9870 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -175,7 +175,7 @@ ld_flags = $(LDFLAGS) $(ldflags-y) | |||
175 | 175 | ||
176 | dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ | 176 | dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ |
177 | -I$(srctree)/arch/$(SRCARCH)/boot/dts \ | 177 | -I$(srctree)/arch/$(SRCARCH)/boot/dts \ |
178 | -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ | 178 | -I$(srctree)/scripts/dtc/include-prefixes \ |
179 | -I$(srctree)/drivers/of/testcase-data \ | 179 | -I$(srctree)/drivers/of/testcase-data \ |
180 | -undef -D__DTS__ | 180 | -undef -D__DTS__ |
181 | 181 | ||
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c index 5adfc8f52b4f..4b72b530c84f 100644 --- a/scripts/dtc/checks.c +++ b/scripts/dtc/checks.c | |||
@@ -873,7 +873,7 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no | |||
873 | while (size--) | 873 | while (size--) |
874 | reg = (reg << 32) | fdt32_to_cpu(*(cells++)); | 874 | reg = (reg << 32) | fdt32_to_cpu(*(cells++)); |
875 | 875 | ||
876 | snprintf(unit_addr, sizeof(unit_addr), "%lx", reg); | 876 | snprintf(unit_addr, sizeof(unit_addr), "%zx", reg); |
877 | if (!streq(unitname, unit_addr)) | 877 | if (!streq(unitname, unit_addr)) |
878 | FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", | 878 | FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", |
879 | node->fullpath, unit_addr); | 879 | node->fullpath, unit_addr); |
diff --git a/scripts/dtc/include-prefixes/arc b/scripts/dtc/include-prefixes/arc new file mode 120000 index 000000000000..5d21b5a69a11 --- /dev/null +++ b/scripts/dtc/include-prefixes/arc | |||
@@ -0,0 +1 @@ | |||
../../../arch/arc/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/arm b/scripts/dtc/include-prefixes/arm new file mode 120000 index 000000000000..eb14d4515a57 --- /dev/null +++ b/scripts/dtc/include-prefixes/arm | |||
@@ -0,0 +1 @@ | |||
../../../arch/arm/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/arm64 b/scripts/dtc/include-prefixes/arm64 new file mode 120000 index 000000000000..275c42c21d71 --- /dev/null +++ b/scripts/dtc/include-prefixes/arm64 | |||
@@ -0,0 +1 @@ | |||
../../../arch/arm64/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/c6x b/scripts/dtc/include-prefixes/c6x new file mode 120000 index 000000000000..49ded4cae2be --- /dev/null +++ b/scripts/dtc/include-prefixes/c6x | |||
@@ -0,0 +1 @@ | |||
../../../arch/c6x/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/cris b/scripts/dtc/include-prefixes/cris new file mode 120000 index 000000000000..736d998ba506 --- /dev/null +++ b/scripts/dtc/include-prefixes/cris | |||
@@ -0,0 +1 @@ | |||
../../../arch/cris/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/dt-bindings b/scripts/dtc/include-prefixes/dt-bindings new file mode 120000 index 000000000000..04fdbb3af016 --- /dev/null +++ b/scripts/dtc/include-prefixes/dt-bindings | |||
@@ -0,0 +1 @@ | |||
../../../include/dt-bindings \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/h8300 b/scripts/dtc/include-prefixes/h8300 new file mode 120000 index 000000000000..3bdaa332c54c --- /dev/null +++ b/scripts/dtc/include-prefixes/h8300 | |||
@@ -0,0 +1 @@ | |||
../../../arch/h8300/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/metag b/scripts/dtc/include-prefixes/metag new file mode 120000 index 000000000000..87a3c847db8f --- /dev/null +++ b/scripts/dtc/include-prefixes/metag | |||
@@ -0,0 +1 @@ | |||
../../../arch/metag/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/microblaze b/scripts/dtc/include-prefixes/microblaze new file mode 120000 index 000000000000..d9830330a21d --- /dev/null +++ b/scripts/dtc/include-prefixes/microblaze | |||
@@ -0,0 +1 @@ | |||
../../../arch/microblaze/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/mips b/scripts/dtc/include-prefixes/mips new file mode 120000 index 000000000000..ae8d4948dc8d --- /dev/null +++ b/scripts/dtc/include-prefixes/mips | |||
@@ -0,0 +1 @@ | |||
../../../arch/mips/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/nios2 b/scripts/dtc/include-prefixes/nios2 new file mode 120000 index 000000000000..51772336d13f --- /dev/null +++ b/scripts/dtc/include-prefixes/nios2 | |||
@@ -0,0 +1 @@ | |||
../../../arch/nios2/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/openrisc b/scripts/dtc/include-prefixes/openrisc new file mode 120000 index 000000000000..71c3bc75c560 --- /dev/null +++ b/scripts/dtc/include-prefixes/openrisc | |||
@@ -0,0 +1 @@ | |||
../../../arch/openrisc/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/powerpc b/scripts/dtc/include-prefixes/powerpc new file mode 120000 index 000000000000..7cd6ec16e899 --- /dev/null +++ b/scripts/dtc/include-prefixes/powerpc | |||
@@ -0,0 +1 @@ | |||
../../../arch/powerpc/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/sh b/scripts/dtc/include-prefixes/sh new file mode 120000 index 000000000000..67d37808c599 --- /dev/null +++ b/scripts/dtc/include-prefixes/sh | |||
@@ -0,0 +1 @@ | |||
../../../arch/sh/boot/dts \ No newline at end of file | |||
diff --git a/scripts/dtc/include-prefixes/xtensa b/scripts/dtc/include-prefixes/xtensa new file mode 120000 index 000000000000..d1eaf6ec7a2b --- /dev/null +++ b/scripts/dtc/include-prefixes/xtensa | |||
@@ -0,0 +1 @@ | |||
../../../arch/xtensa/boot/dts \ No newline at end of file | |||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 58df440013c5..918e45268915 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -2328,6 +2328,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
2328 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | 2328 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), |
2329 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), | 2329 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), |
2330 | SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), | 2330 | SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), |
2331 | SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), | ||
2331 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), | 2332 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), |
2332 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2333 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
2333 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2334 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
@@ -2342,6 +2343,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = { | |||
2342 | {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, | 2343 | {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, |
2343 | {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"}, | 2344 | {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"}, |
2344 | {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, | 2345 | {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, |
2346 | {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"}, | ||
2345 | {} | 2347 | {} |
2346 | }; | 2348 | }; |
2347 | 2349 | ||
@@ -6014,6 +6016,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
6014 | {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, | 6016 | {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, |
6015 | {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, | 6017 | {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, |
6016 | {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, | 6018 | {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, |
6019 | {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, | ||
6017 | {} | 6020 | {} |
6018 | }; | 6021 | }; |
6019 | #define ALC225_STANDARD_PINS \ | 6022 | #define ALC225_STANDARD_PINS \ |
@@ -6465,8 +6468,11 @@ static int patch_alc269(struct hda_codec *codec) | |||
6465 | break; | 6468 | break; |
6466 | case 0x10ec0225: | 6469 | case 0x10ec0225: |
6467 | case 0x10ec0295: | 6470 | case 0x10ec0295: |
6471 | spec->codec_variant = ALC269_TYPE_ALC225; | ||
6472 | break; | ||
6468 | case 0x10ec0299: | 6473 | case 0x10ec0299: |
6469 | spec->codec_variant = ALC269_TYPE_ALC225; | 6474 | spec->codec_variant = ALC269_TYPE_ALC225; |
6475 | spec->gen.mixer_nid = 0; /* no loopback on ALC299 */ | ||
6470 | break; | 6476 | break; |
6471 | case 0x10ec0234: | 6477 | case 0x10ec0234: |
6472 | case 0x10ec0274: | 6478 | case 0x10ec0274: |
@@ -7338,6 +7344,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = { | |||
7338 | {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"}, | 7344 | {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"}, |
7339 | {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"}, | 7345 | {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"}, |
7340 | {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, | 7346 | {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, |
7347 | {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, | ||
7341 | {} | 7348 | {} |
7342 | }; | 7349 | }; |
7343 | 7350 | ||
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index faa3d38bac0b..6cefdf6c0b75 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -1559,6 +1559,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = { | |||
1559 | "Dell Inspiron 1501", STAC_9200_DELL_M26), | 1559 | "Dell Inspiron 1501", STAC_9200_DELL_M26), |
1560 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, | 1560 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, |
1561 | "unknown Dell", STAC_9200_DELL_M26), | 1561 | "unknown Dell", STAC_9200_DELL_M26), |
1562 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201, | ||
1563 | "Dell Latitude D430", STAC_9200_DELL_M22), | ||
1562 | /* Panasonic */ | 1564 | /* Panasonic */ |
1563 | SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), | 1565 | SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), |
1564 | /* Gateway machines needs EAPD to be set on resume */ | 1566 | /* Gateway machines needs EAPD to be set on resume */ |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 01eff6ce6401..d7b0b0a3a2db 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1364,7 +1364,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, | |||
1364 | /* Amanero Combo384 USB interface with native DSD support */ | 1364 | /* Amanero Combo384 USB interface with native DSD support */ |
1365 | case USB_ID(0x16d0, 0x071a): | 1365 | case USB_ID(0x16d0, 0x071a): |
1366 | if (fp->altsetting == 2) { | 1366 | if (fp->altsetting == 2) { |
1367 | switch (chip->dev->descriptor.bcdDevice) { | 1367 | switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) { |
1368 | case 0x199: | 1368 | case 0x199: |
1369 | return SNDRV_PCM_FMTBIT_DSD_U32_LE; | 1369 | return SNDRV_PCM_FMTBIT_DSD_U32_LE; |
1370 | case 0x19b: | 1370 | case 0x19b: |
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index 664b7fe206d6..b11d3920b9a5 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c | |||
@@ -1809,10 +1809,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1809 | pdata->notify_pending = false; | 1809 | pdata->notify_pending = false; |
1810 | spin_unlock_irq(&pdata->lpe_audio_slock); | 1810 | spin_unlock_irq(&pdata->lpe_audio_slock); |
1811 | 1811 | ||
1812 | /* runtime PM isn't enabled as default, since it won't save much on | ||
1813 | * BYT/CHT devices; user who want the runtime PM should adjust the | ||
1814 | * power/ontrol and power/autosuspend_delay_ms sysfs entries instead | ||
1815 | */ | ||
1816 | pm_runtime_use_autosuspend(&pdev->dev); | 1812 | pm_runtime_use_autosuspend(&pdev->dev); |
1817 | pm_runtime_mark_last_busy(&pdev->dev); | 1813 | pm_runtime_mark_last_busy(&pdev->dev); |
1818 | pm_runtime_set_active(&pdev->dev); | 1814 | pm_runtime_set_active(&pdev->dev); |
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 6ebd3e6a1fd1..5e3c673fa3f4 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #define __KVM_HAVE_IRQ_LINE | 27 | #define __KVM_HAVE_IRQ_LINE |
28 | #define __KVM_HAVE_READONLY_MEM | 28 | #define __KVM_HAVE_READONLY_MEM |
29 | 29 | ||
30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
31 | |||
30 | #define KVM_REG_SIZE(id) \ | 32 | #define KVM_REG_SIZE(id) \ |
31 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 33 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
32 | 34 | ||
@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch { | |||
114 | }; | 116 | }; |
115 | 117 | ||
116 | struct kvm_sync_regs { | 118 | struct kvm_sync_regs { |
119 | /* Used with KVM_CAP_ARM_USER_IRQ */ | ||
120 | __u64 device_irq_level; | ||
117 | }; | 121 | }; |
118 | 122 | ||
119 | struct kvm_arch_memory_slot { | 123 | struct kvm_arch_memory_slot { |
@@ -192,13 +196,17 @@ struct kvm_arch_memory_slot { | |||
192 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 | 196 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 |
193 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 | 197 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 |
194 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 | 198 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 |
199 | #define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 | ||
195 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 | 200 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 |
196 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ | 201 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ |
197 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) | 202 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) |
198 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff | 203 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff |
199 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 | 204 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 |
200 | 205 | ||
201 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 | 206 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 |
207 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 | ||
208 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 | ||
209 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 | ||
202 | 210 | ||
203 | /* KVM_IRQ_LINE irq field index values */ | 211 | /* KVM_IRQ_LINE irq field index values */ |
204 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 212 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index c2860358ae3e..70eea2ecc663 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -39,6 +39,8 @@ | |||
39 | #define __KVM_HAVE_IRQ_LINE | 39 | #define __KVM_HAVE_IRQ_LINE |
40 | #define __KVM_HAVE_READONLY_MEM | 40 | #define __KVM_HAVE_READONLY_MEM |
41 | 41 | ||
42 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
43 | |||
42 | #define KVM_REG_SIZE(id) \ | 44 | #define KVM_REG_SIZE(id) \ |
43 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 45 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
44 | 46 | ||
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch { | |||
143 | #define KVM_GUESTDBG_USE_HW (1 << 17) | 145 | #define KVM_GUESTDBG_USE_HW (1 << 17) |
144 | 146 | ||
145 | struct kvm_sync_regs { | 147 | struct kvm_sync_regs { |
148 | /* Used with KVM_CAP_ARM_USER_IRQ */ | ||
149 | __u64 device_irq_level; | ||
146 | }; | 150 | }; |
147 | 151 | ||
148 | struct kvm_arch_memory_slot { | 152 | struct kvm_arch_memory_slot { |
@@ -212,13 +216,17 @@ struct kvm_arch_memory_slot { | |||
212 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 | 216 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 |
213 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 | 217 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 |
214 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 | 218 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 |
219 | #define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 | ||
215 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 | 220 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 |
216 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ | 221 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ |
217 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) | 222 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) |
218 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff | 223 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff |
219 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 | 224 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 |
220 | 225 | ||
221 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 | 226 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 |
227 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 | ||
228 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 | ||
229 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 | ||
222 | 230 | ||
223 | /* Device Control API on vcpu fd */ | 231 | /* Device Control API on vcpu fd */ |
224 | #define KVM_ARM_VCPU_PMU_V3_CTRL 0 | 232 | #define KVM_ARM_VCPU_PMU_V3_CTRL 0 |
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 4edbe4bb0e8b..07fbeb927834 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h | |||
@@ -29,6 +29,9 @@ | |||
29 | #define __KVM_HAVE_IRQ_LINE | 29 | #define __KVM_HAVE_IRQ_LINE |
30 | #define __KVM_HAVE_GUEST_DEBUG | 30 | #define __KVM_HAVE_GUEST_DEBUG |
31 | 31 | ||
32 | /* Not always available, but if it is, this is the correct offset. */ | ||
33 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
34 | |||
32 | struct kvm_regs { | 35 | struct kvm_regs { |
33 | __u64 pc; | 36 | __u64 pc; |
34 | __u64 cr; | 37 | __u64 cr; |
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 7f4fd65e9208..3dd2a1d308dd 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 | 26 | #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 |
27 | #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 | 27 | #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 |
28 | #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 | 28 | #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 |
29 | #define KVM_DEV_FLIC_AISM 9 | ||
30 | #define KVM_DEV_FLIC_AIRQ_INJECT 10 | ||
29 | /* | 31 | /* |
30 | * We can have up to 4*64k pending subchannels + 8 adapter interrupts, | 32 | * We can have up to 4*64k pending subchannels + 8 adapter interrupts, |
31 | * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. | 33 | * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. |
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter { | |||
41 | __u8 isc; | 43 | __u8 isc; |
42 | __u8 maskable; | 44 | __u8 maskable; |
43 | __u8 swap; | 45 | __u8 swap; |
44 | __u8 pad; | 46 | __u8 flags; |
47 | }; | ||
48 | |||
49 | #define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 | ||
50 | |||
51 | struct kvm_s390_ais_req { | ||
52 | __u8 isc; | ||
53 | __u16 mode; | ||
45 | }; | 54 | }; |
46 | 55 | ||
47 | #define KVM_S390_IO_ADAPTER_MASK 1 | 56 | #define KVM_S390_IO_ADAPTER_MASK 1 |
@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine { | |||
110 | #define KVM_S390_VM_CPU_FEAT_CMMA 10 | 119 | #define KVM_S390_VM_CPU_FEAT_CMMA 10 |
111 | #define KVM_S390_VM_CPU_FEAT_PFMFI 11 | 120 | #define KVM_S390_VM_CPU_FEAT_PFMFI 11 |
112 | #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 | 121 | #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 |
122 | #define KVM_S390_VM_CPU_FEAT_KSS 13 | ||
113 | struct kvm_s390_vm_cpu_feat { | 123 | struct kvm_s390_vm_cpu_feat { |
114 | __u64 feat[16]; | 124 | __u64 feat[16]; |
115 | }; | 125 | }; |
@@ -198,6 +208,10 @@ struct kvm_guest_debug_arch { | |||
198 | #define KVM_SYNC_VRS (1UL << 6) | 208 | #define KVM_SYNC_VRS (1UL << 6) |
199 | #define KVM_SYNC_RICCB (1UL << 7) | 209 | #define KVM_SYNC_RICCB (1UL << 7) |
200 | #define KVM_SYNC_FPRS (1UL << 8) | 210 | #define KVM_SYNC_FPRS (1UL << 8) |
211 | #define KVM_SYNC_GSCB (1UL << 9) | ||
212 | /* length and alignment of the sdnx as a power of two */ | ||
213 | #define SDNXC 8 | ||
214 | #define SDNXL (1UL << SDNXC) | ||
201 | /* definition of registers in kvm_run */ | 215 | /* definition of registers in kvm_run */ |
202 | struct kvm_sync_regs { | 216 | struct kvm_sync_regs { |
203 | __u64 prefix; /* prefix register */ | 217 | __u64 prefix; /* prefix register */ |
@@ -218,8 +232,16 @@ struct kvm_sync_regs { | |||
218 | }; | 232 | }; |
219 | __u8 reserved[512]; /* for future vector expansion */ | 233 | __u8 reserved[512]; /* for future vector expansion */ |
220 | __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ | 234 | __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ |
221 | __u8 padding[52]; /* riccb needs to be 64byte aligned */ | 235 | __u8 padding1[52]; /* riccb needs to be 64byte aligned */ |
222 | __u8 riccb[64]; /* runtime instrumentation controls block */ | 236 | __u8 riccb[64]; /* runtime instrumentation controls block */ |
237 | __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ | ||
238 | union { | ||
239 | __u8 sdnx[SDNXL]; /* state description annex */ | ||
240 | struct { | ||
241 | __u64 reserved1[2]; | ||
242 | __u64 gscb[4]; | ||
243 | }; | ||
244 | }; | ||
223 | }; | 245 | }; |
224 | 246 | ||
225 | #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) | 247 | #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 0fe00446f9ca..2701e5f8145b 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -202,6 +202,8 @@ | |||
202 | #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ | 202 | #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ |
203 | #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ | 203 | #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ |
204 | 204 | ||
205 | #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ | ||
206 | |||
205 | /* Virtualization flags: Linux defined, word 8 */ | 207 | /* Virtualization flags: Linux defined, word 8 */ |
206 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 208 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
207 | #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ | 209 | #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ |
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 85599ad4d024..5dff775af7cd 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h | |||
@@ -36,6 +36,12 @@ | |||
36 | # define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) | 36 | # define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) |
37 | #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ | 37 | #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ |
38 | 38 | ||
39 | #ifdef CONFIG_X86_5LEVEL | ||
40 | # define DISABLE_LA57 0 | ||
41 | #else | ||
42 | # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) | ||
43 | #endif | ||
44 | |||
39 | /* | 45 | /* |
40 | * Make sure to add features to the correct mask | 46 | * Make sure to add features to the correct mask |
41 | */ | 47 | */ |
@@ -55,7 +61,7 @@ | |||
55 | #define DISABLED_MASK13 0 | 61 | #define DISABLED_MASK13 0 |
56 | #define DISABLED_MASK14 0 | 62 | #define DISABLED_MASK14 0 |
57 | #define DISABLED_MASK15 0 | 63 | #define DISABLED_MASK15 0 |
58 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) | 64 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) |
59 | #define DISABLED_MASK17 0 | 65 | #define DISABLED_MASK17 0 |
60 | #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) | 66 | #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) |
61 | 67 | ||
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index fac9a5c0abe9..d91ba04dd007 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h | |||
@@ -53,6 +53,12 @@ | |||
53 | # define NEED_MOVBE 0 | 53 | # define NEED_MOVBE 0 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_X86_5LEVEL | ||
57 | # define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) | ||
58 | #else | ||
59 | # define NEED_LA57 0 | ||
60 | #endif | ||
61 | |||
56 | #ifdef CONFIG_X86_64 | 62 | #ifdef CONFIG_X86_64 |
57 | #ifdef CONFIG_PARAVIRT | 63 | #ifdef CONFIG_PARAVIRT |
58 | /* Paravirtualized systems may not have PSE or PGE available */ | 64 | /* Paravirtualized systems may not have PSE or PGE available */ |
@@ -98,7 +104,7 @@ | |||
98 | #define REQUIRED_MASK13 0 | 104 | #define REQUIRED_MASK13 0 |
99 | #define REQUIRED_MASK14 0 | 105 | #define REQUIRED_MASK14 0 |
100 | #define REQUIRED_MASK15 0 | 106 | #define REQUIRED_MASK15 0 |
101 | #define REQUIRED_MASK16 0 | 107 | #define REQUIRED_MASK16 (NEED_LA57) |
102 | #define REQUIRED_MASK17 0 | 108 | #define REQUIRED_MASK17 0 |
103 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) | 109 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) |
104 | 110 | ||
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 739c0c594022..c2824d02ba37 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h | |||
@@ -9,6 +9,9 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
11 | 11 | ||
12 | #define KVM_PIO_PAGE_OFFSET 1 | ||
13 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 | ||
14 | |||
12 | #define DE_VECTOR 0 | 15 | #define DE_VECTOR 0 |
13 | #define DB_VECTOR 1 | 16 | #define DB_VECTOR 1 |
14 | #define BP_VECTOR 3 | 17 | #define BP_VECTOR 3 |
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index 14458658e988..690a2dcf4078 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h | |||
@@ -76,7 +76,11 @@ | |||
76 | #define EXIT_REASON_WBINVD 54 | 76 | #define EXIT_REASON_WBINVD 54 |
77 | #define EXIT_REASON_XSETBV 55 | 77 | #define EXIT_REASON_XSETBV 55 |
78 | #define EXIT_REASON_APIC_WRITE 56 | 78 | #define EXIT_REASON_APIC_WRITE 56 |
79 | #define EXIT_REASON_RDRAND 57 | ||
79 | #define EXIT_REASON_INVPCID 58 | 80 | #define EXIT_REASON_INVPCID 58 |
81 | #define EXIT_REASON_VMFUNC 59 | ||
82 | #define EXIT_REASON_ENCLS 60 | ||
83 | #define EXIT_REASON_RDSEED 61 | ||
80 | #define EXIT_REASON_PML_FULL 62 | 84 | #define EXIT_REASON_PML_FULL 62 |
81 | #define EXIT_REASON_XSAVES 63 | 85 | #define EXIT_REASON_XSAVES 63 |
82 | #define EXIT_REASON_XRSTORS 64 | 86 | #define EXIT_REASON_XRSTORS 64 |
@@ -90,6 +94,7 @@ | |||
90 | { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ | 94 | { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ |
91 | { EXIT_REASON_CPUID, "CPUID" }, \ | 95 | { EXIT_REASON_CPUID, "CPUID" }, \ |
92 | { EXIT_REASON_HLT, "HLT" }, \ | 96 | { EXIT_REASON_HLT, "HLT" }, \ |
97 | { EXIT_REASON_INVD, "INVD" }, \ | ||
93 | { EXIT_REASON_INVLPG, "INVLPG" }, \ | 98 | { EXIT_REASON_INVLPG, "INVLPG" }, \ |
94 | { EXIT_REASON_RDPMC, "RDPMC" }, \ | 99 | { EXIT_REASON_RDPMC, "RDPMC" }, \ |
95 | { EXIT_REASON_RDTSC, "RDTSC" }, \ | 100 | { EXIT_REASON_RDTSC, "RDTSC" }, \ |
@@ -108,6 +113,8 @@ | |||
108 | { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ | 113 | { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ |
109 | { EXIT_REASON_MSR_READ, "MSR_READ" }, \ | 114 | { EXIT_REASON_MSR_READ, "MSR_READ" }, \ |
110 | { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ | 115 | { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ |
116 | { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ | ||
117 | { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ | ||
111 | { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ | 118 | { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ |
112 | { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ | 119 | { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ |
113 | { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ | 120 | { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ |
@@ -115,20 +122,24 @@ | |||
115 | { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ | 122 | { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ |
116 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ | 123 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ |
117 | { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ | 124 | { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ |
118 | { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ | 125 | { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ |
119 | { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ | 126 | { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ |
127 | { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ | ||
120 | { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ | 128 | { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ |
121 | { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ | 129 | { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ |
122 | { EXIT_REASON_INVEPT, "INVEPT" }, \ | 130 | { EXIT_REASON_INVEPT, "INVEPT" }, \ |
131 | { EXIT_REASON_RDTSCP, "RDTSCP" }, \ | ||
123 | { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ | 132 | { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ |
133 | { EXIT_REASON_INVVPID, "INVVPID" }, \ | ||
124 | { EXIT_REASON_WBINVD, "WBINVD" }, \ | 134 | { EXIT_REASON_WBINVD, "WBINVD" }, \ |
135 | { EXIT_REASON_XSETBV, "XSETBV" }, \ | ||
125 | { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ | 136 | { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ |
126 | { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ | 137 | { EXIT_REASON_RDRAND, "RDRAND" }, \ |
127 | { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ | ||
128 | { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ | ||
129 | { EXIT_REASON_INVD, "INVD" }, \ | ||
130 | { EXIT_REASON_INVVPID, "INVVPID" }, \ | ||
131 | { EXIT_REASON_INVPCID, "INVPCID" }, \ | 138 | { EXIT_REASON_INVPCID, "INVPCID" }, \ |
139 | { EXIT_REASON_VMFUNC, "VMFUNC" }, \ | ||
140 | { EXIT_REASON_ENCLS, "ENCLS" }, \ | ||
141 | { EXIT_REASON_RDSEED, "RDSEED" }, \ | ||
142 | { EXIT_REASON_PML_FULL, "PML_FULL" }, \ | ||
132 | { EXIT_REASON_XSAVES, "XSAVES" }, \ | 143 | { EXIT_REASON_XSAVES, "XSAVES" }, \ |
133 | { EXIT_REASON_XRSTORS, "XRSTORS" } | 144 | { EXIT_REASON_XRSTORS, "XRSTORS" } |
134 | 145 | ||
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index ebc6dceddb58..7598361ef1f1 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c | |||
@@ -29,6 +29,7 @@ int main(void) | |||
29 | attr.log_size = 0; | 29 | attr.log_size = 0; |
30 | attr.log_level = 0; | 30 | attr.log_level = 0; |
31 | attr.kern_version = 0; | 31 | attr.kern_version = 0; |
32 | attr.prog_flags = 0; | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Test existence of __NR_bpf and BPF_PROG_LOAD. | 35 | * Test existence of __NR_bpf and BPF_PROG_LOAD. |
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index 390d7c9685fd..4ce25d43e8e3 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h | |||
@@ -208,6 +208,16 @@ | |||
208 | .off = OFF, \ | 208 | .off = OFF, \ |
209 | .imm = IMM }) | 209 | .imm = IMM }) |
210 | 210 | ||
211 | /* Unconditional jumps, goto pc + off16 */ | ||
212 | |||
213 | #define BPF_JMP_A(OFF) \ | ||
214 | ((struct bpf_insn) { \ | ||
215 | .code = BPF_JMP | BPF_JA, \ | ||
216 | .dst_reg = 0, \ | ||
217 | .src_reg = 0, \ | ||
218 | .off = OFF, \ | ||
219 | .imm = 0 }) | ||
220 | |||
211 | /* Function call */ | 221 | /* Function call */ |
212 | 222 | ||
213 | #define BPF_EMIT_CALL(FUNC) \ | 223 | #define BPF_EMIT_CALL(FUNC) \ |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e553529929f6..94dfa9def355 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -132,6 +132,13 @@ enum bpf_attach_type { | |||
132 | */ | 132 | */ |
133 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) | 133 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) |
134 | 134 | ||
135 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the | ||
136 | * verifier will perform strict alignment checking as if the kernel | ||
137 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, | ||
138 | * and NET_IP_ALIGN defined to 2. | ||
139 | */ | ||
140 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) | ||
141 | |||
135 | #define BPF_PSEUDO_MAP_FD 1 | 142 | #define BPF_PSEUDO_MAP_FD 1 |
136 | 143 | ||
137 | /* flags for BPF_MAP_UPDATE_ELEM command */ | 144 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
@@ -177,6 +184,7 @@ union bpf_attr { | |||
177 | __u32 log_size; /* size of user buffer */ | 184 | __u32 log_size; /* size of user buffer */ |
178 | __aligned_u64 log_buf; /* user supplied buffer */ | 185 | __aligned_u64 log_buf; /* user supplied buffer */ |
179 | __u32 kern_version; /* checked when prog_type=kprobe */ | 186 | __u32 kern_version; /* checked when prog_type=kprobe */ |
187 | __u32 prog_flags; | ||
180 | }; | 188 | }; |
181 | 189 | ||
182 | struct { /* anonymous struct used by BPF_OBJ_* commands */ | 190 | struct { /* anonymous struct used by BPF_OBJ_* commands */ |
@@ -481,8 +489,7 @@ union bpf_attr { | |||
481 | * u32 bpf_get_socket_uid(skb) | 489 | * u32 bpf_get_socket_uid(skb) |
482 | * Get the owner uid of the socket stored inside sk_buff. | 490 | * Get the owner uid of the socket stored inside sk_buff. |
483 | * @skb: pointer to skb | 491 | * @skb: pointer to skb |
484 | * Return: uid of the socket owner on success or 0 if the socket pointer | 492 | * Return: uid of the socket owner on success or overflowuid if failed. |
485 | * inside sk_buff is NULL | ||
486 | */ | 493 | */ |
487 | #define __BPF_FUNC_MAPPER(FN) \ | 494 | #define __BPF_FUNC_MAPPER(FN) \ |
488 | FN(unspec), \ | 495 | FN(unspec), \ |
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h index d538897b8e08..17b10304c393 100644 --- a/tools/include/uapi/linux/stat.h +++ b/tools/include/uapi/linux/stat.h | |||
@@ -48,17 +48,13 @@ | |||
48 | * tv_sec holds the number of seconds before (negative) or after (positive) | 48 | * tv_sec holds the number of seconds before (negative) or after (positive) |
49 | * 00:00:00 1st January 1970 UTC. | 49 | * 00:00:00 1st January 1970 UTC. |
50 | * | 50 | * |
51 | * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is | 51 | * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time. |
52 | * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. | ||
53 | * | ||
54 | * Note that if both tv_sec and tv_nsec are non-zero, then the two values must | ||
55 | * either be both positive or both negative. | ||
56 | * | 52 | * |
57 | * __reserved is held in case we need a yet finer resolution. | 53 | * __reserved is held in case we need a yet finer resolution. |
58 | */ | 54 | */ |
59 | struct statx_timestamp { | 55 | struct statx_timestamp { |
60 | __s64 tv_sec; | 56 | __s64 tv_sec; |
61 | __s32 tv_nsec; | 57 | __u32 tv_nsec; |
62 | __s32 __reserved; | 58 | __s32 __reserved; |
63 | }; | 59 | }; |
64 | 60 | ||
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 4fe444b8092e..6e178987af8e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c | |||
@@ -117,6 +117,28 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, | |||
117 | return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 117 | return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); |
118 | } | 118 | } |
119 | 119 | ||
120 | int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, | ||
121 | size_t insns_cnt, int strict_alignment, | ||
122 | const char *license, __u32 kern_version, | ||
123 | char *log_buf, size_t log_buf_sz) | ||
124 | { | ||
125 | union bpf_attr attr; | ||
126 | |||
127 | bzero(&attr, sizeof(attr)); | ||
128 | attr.prog_type = type; | ||
129 | attr.insn_cnt = (__u32)insns_cnt; | ||
130 | attr.insns = ptr_to_u64(insns); | ||
131 | attr.license = ptr_to_u64(license); | ||
132 | attr.log_buf = ptr_to_u64(log_buf); | ||
133 | attr.log_size = log_buf_sz; | ||
134 | attr.log_level = 2; | ||
135 | log_buf[0] = 0; | ||
136 | attr.kern_version = kern_version; | ||
137 | attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0; | ||
138 | |||
139 | return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | ||
140 | } | ||
141 | |||
120 | int bpf_map_update_elem(int fd, const void *key, const void *value, | 142 | int bpf_map_update_elem(int fd, const void *key, const void *value, |
121 | __u64 flags) | 143 | __u64 flags) |
122 | { | 144 | { |
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index edb4daeff7a5..972bd8333eb7 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h | |||
@@ -35,6 +35,10 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, | |||
35 | size_t insns_cnt, const char *license, | 35 | size_t insns_cnt, const char *license, |
36 | __u32 kern_version, char *log_buf, | 36 | __u32 kern_version, char *log_buf, |
37 | size_t log_buf_sz); | 37 | size_t log_buf_sz); |
38 | int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, | ||
39 | size_t insns_cnt, int strict_alignment, | ||
40 | const char *license, __u32 kern_version, | ||
41 | char *log_buf, size_t log_buf_sz); | ||
38 | 42 | ||
39 | int bpf_map_update_elem(int fd, const void *key, const void *value, | 43 | int bpf_map_update_elem(int fd, const void *key, const void *value, |
40 | __u64 flags); | 44 | __u64 flags); |
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index cb0eda3925e6..3517e204a2b3 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt | |||
@@ -311,6 +311,10 @@ include::itrace.txt[] | |||
311 | Set the maximum number of program blocks to print with brstackasm for | 311 | Set the maximum number of program blocks to print with brstackasm for |
312 | each sample. | 312 | each sample. |
313 | 313 | ||
314 | --inline:: | ||
315 | If a callgraph address belongs to an inlined function, the inline stack | ||
316 | will be printed. Each entry has function name and file/line. | ||
317 | |||
314 | SEE ALSO | 318 | SEE ALSO |
315 | -------- | 319 | -------- |
316 | linkperf:perf-record[1], linkperf:perf-script-perl[1], | 320 | linkperf:perf-record[1], linkperf:perf-script-perl[1], |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index d05aec491cff..4761b0d7fcb5 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv) | |||
2494 | "Enable kernel symbol demangling"), | 2494 | "Enable kernel symbol demangling"), |
2495 | OPT_STRING(0, "time", &script.time_str, "str", | 2495 | OPT_STRING(0, "time", &script.time_str, "str", |
2496 | "Time span of interest (start,stop)"), | 2496 | "Time span of interest (start,stop)"), |
2497 | OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, | ||
2498 | "Show inline function"), | ||
2497 | OPT_END() | 2499 | OPT_END() |
2498 | }; | 2500 | }; |
2499 | const char * const script_subcommands[] = { "record", "report", NULL }; | 2501 | const char * const script_subcommands[] = { "record", "report", NULL }; |
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 59addd52d9cd..ddb2c6fbdf91 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c | |||
@@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, | |||
210 | return 0; | 210 | return 0; |
211 | 211 | ||
212 | ret = b->callchain->max_depth - a->callchain->max_depth; | 212 | ret = b->callchain->max_depth - a->callchain->max_depth; |
213 | if (callchain_param.order == ORDER_CALLER) | ||
214 | ret = -ret; | ||
213 | } | 215 | } |
214 | return ret; | 216 | return ret; |
215 | } | 217 | } |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 81fc29ac798f..b4204b43ed58 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -621,14 +621,19 @@ enum match_result { | |||
621 | static enum match_result match_chain_srcline(struct callchain_cursor_node *node, | 621 | static enum match_result match_chain_srcline(struct callchain_cursor_node *node, |
622 | struct callchain_list *cnode) | 622 | struct callchain_list *cnode) |
623 | { | 623 | { |
624 | char *left = get_srcline(cnode->ms.map->dso, | 624 | char *left = NULL; |
625 | char *right = NULL; | ||
626 | enum match_result ret = MATCH_EQ; | ||
627 | int cmp; | ||
628 | |||
629 | if (cnode->ms.map) | ||
630 | left = get_srcline(cnode->ms.map->dso, | ||
625 | map__rip_2objdump(cnode->ms.map, cnode->ip), | 631 | map__rip_2objdump(cnode->ms.map, cnode->ip), |
626 | cnode->ms.sym, true, false); | 632 | cnode->ms.sym, true, false); |
627 | char *right = get_srcline(node->map->dso, | 633 | if (node->map) |
634 | right = get_srcline(node->map->dso, | ||
628 | map__rip_2objdump(node->map, node->ip), | 635 | map__rip_2objdump(node->map, node->ip), |
629 | node->sym, true, false); | 636 | node->sym, true, false); |
630 | enum match_result ret = MATCH_EQ; | ||
631 | int cmp; | ||
632 | 637 | ||
633 | if (left && right) | 638 | if (left && right) |
634 | cmp = strcmp(left, right); | 639 | cmp = strcmp(left, right); |
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c index e415aee6a245..583f3a602506 100644 --- a/tools/perf/util/evsel_fprintf.c +++ b/tools/perf/util/evsel_fprintf.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "map.h" | 7 | #include "map.h" |
8 | #include "strlist.h" | 8 | #include "strlist.h" |
9 | #include "symbol.h" | 9 | #include "symbol.h" |
10 | #include "srcline.h" | ||
10 | 11 | ||
11 | static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) | 12 | static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) |
12 | { | 13 | { |
@@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, | |||
168 | if (!print_oneline) | 169 | if (!print_oneline) |
169 | printed += fprintf(fp, "\n"); | 170 | printed += fprintf(fp, "\n"); |
170 | 171 | ||
172 | if (symbol_conf.inline_name && node->map) { | ||
173 | struct inline_node *inode; | ||
174 | |||
175 | addr = map__rip_2objdump(node->map, node->ip), | ||
176 | inode = dso__parse_addr_inlines(node->map->dso, addr); | ||
177 | |||
178 | if (inode) { | ||
179 | struct inline_list *ilist; | ||
180 | |||
181 | list_for_each_entry(ilist, &inode->val, list) { | ||
182 | if (print_arrow) | ||
183 | printed += fprintf(fp, " <-"); | ||
184 | |||
185 | /* IP is same, just skip it */ | ||
186 | if (print_ip) | ||
187 | printed += fprintf(fp, "%c%16s", | ||
188 | s, ""); | ||
189 | if (print_sym) | ||
190 | printed += fprintf(fp, " %s", | ||
191 | ilist->funcname); | ||
192 | if (print_srcline) | ||
193 | printed += fprintf(fp, "\n %s:%d", | ||
194 | ilist->filename, | ||
195 | ilist->line_nr); | ||
196 | if (!print_oneline) | ||
197 | printed += fprintf(fp, "\n"); | ||
198 | } | ||
199 | |||
200 | inline_node__delete(inode); | ||
201 | } | ||
202 | } | ||
203 | |||
171 | if (symbol_conf.bt_stop_list && | 204 | if (symbol_conf.bt_stop_list && |
172 | node->sym && | 205 | node->sym && |
173 | strlist__has_entry(symbol_conf.bt_stop_list, | 206 | strlist__has_entry(symbol_conf.bt_stop_list, |
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index df051a52393c..ebc88a74e67b 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c | |||
@@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr, | |||
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | list_add_tail(&ilist->list, &node->val); | 59 | if (callchain_param.order == ORDER_CALLEE) |
60 | list_add_tail(&ilist->list, &node->val); | ||
61 | else | ||
62 | list_add(&ilist->list, &node->val); | ||
60 | 63 | ||
61 | return 0; | 64 | return 0; |
62 | } | 65 | } |
@@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l) | |||
200 | 203 | ||
201 | #define MAX_INLINE_NEST 1024 | 204 | #define MAX_INLINE_NEST 1024 |
202 | 205 | ||
203 | static void inline_list__reverse(struct inline_node *node) | 206 | static int inline_list__append_dso_a2l(struct dso *dso, |
207 | struct inline_node *node) | ||
204 | { | 208 | { |
205 | struct inline_list *ilist, *n; | 209 | struct a2l_data *a2l = dso->a2l; |
210 | char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL; | ||
211 | char *filename = a2l->filename ? strdup(a2l->filename) : NULL; | ||
206 | 212 | ||
207 | list_for_each_entry_safe_reverse(ilist, n, &node->val, list) | 213 | return inline_list__append(filename, funcname, a2l->line, node, dso); |
208 | list_move_tail(&ilist->list, &node->val); | ||
209 | } | 214 | } |
210 | 215 | ||
211 | static int addr2line(const char *dso_name, u64 addr, | 216 | static int addr2line(const char *dso_name, u64 addr, |
@@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr, | |||
230 | 235 | ||
231 | bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); | 236 | bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); |
232 | 237 | ||
233 | if (a2l->found && unwind_inlines) { | 238 | if (!a2l->found) |
239 | return 0; | ||
240 | |||
241 | if (unwind_inlines) { | ||
234 | int cnt = 0; | 242 | int cnt = 0; |
235 | 243 | ||
244 | if (node && inline_list__append_dso_a2l(dso, node)) | ||
245 | return 0; | ||
246 | |||
236 | while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, | 247 | while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, |
237 | &a2l->funcname, &a2l->line) && | 248 | &a2l->funcname, &a2l->line) && |
238 | cnt++ < MAX_INLINE_NEST) { | 249 | cnt++ < MAX_INLINE_NEST) { |
239 | 250 | ||
240 | if (node != NULL) { | 251 | if (node != NULL) { |
241 | if (inline_list__append(strdup(a2l->filename), | 252 | if (inline_list__append_dso_a2l(dso, node)) |
242 | strdup(a2l->funcname), | ||
243 | a2l->line, node, | ||
244 | dso) != 0) | ||
245 | return 0; | 253 | return 0; |
254 | // found at least one inline frame | ||
255 | ret = 1; | ||
246 | } | 256 | } |
247 | } | 257 | } |
258 | } | ||
248 | 259 | ||
249 | if ((node != NULL) && | 260 | if (file) { |
250 | (callchain_param.order != ORDER_CALLEE)) { | 261 | *file = a2l->filename ? strdup(a2l->filename) : NULL; |
251 | inline_list__reverse(node); | 262 | ret = *file ? 1 : 0; |
252 | } | ||
253 | } | 263 | } |
254 | 264 | ||
255 | if (a2l->found && a2l->filename) { | 265 | if (line) |
256 | *file = strdup(a2l->filename); | ||
257 | *line = a2l->line; | 266 | *line = a2l->line; |
258 | 267 | ||
259 | if (*file) | ||
260 | ret = 1; | ||
261 | } | ||
262 | |||
263 | return ret; | 268 | return ret; |
264 | } | 269 | } |
265 | 270 | ||
@@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso) | |||
278 | static struct inline_node *addr2inlines(const char *dso_name, u64 addr, | 283 | static struct inline_node *addr2inlines(const char *dso_name, u64 addr, |
279 | struct dso *dso) | 284 | struct dso *dso) |
280 | { | 285 | { |
281 | char *file = NULL; | ||
282 | unsigned int line = 0; | ||
283 | struct inline_node *node; | 286 | struct inline_node *node; |
284 | 287 | ||
285 | node = zalloc(sizeof(*node)); | 288 | node = zalloc(sizeof(*node)); |
@@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr, | |||
291 | INIT_LIST_HEAD(&node->val); | 294 | INIT_LIST_HEAD(&node->val); |
292 | node->addr = addr; | 295 | node->addr = addr; |
293 | 296 | ||
294 | if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node)) | 297 | if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node)) |
295 | goto out_free_inline_node; | 298 | goto out_free_inline_node; |
296 | 299 | ||
297 | if (list_empty(&node->val)) | 300 | if (list_empty(&node->val)) |
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index f90e11a555b2..943a06291587 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c | |||
@@ -168,12 +168,16 @@ frame_callback(Dwfl_Frame *state, void *arg) | |||
168 | { | 168 | { |
169 | struct unwind_info *ui = arg; | 169 | struct unwind_info *ui = arg; |
170 | Dwarf_Addr pc; | 170 | Dwarf_Addr pc; |
171 | bool isactivation; | ||
171 | 172 | ||
172 | if (!dwfl_frame_pc(state, &pc, NULL)) { | 173 | if (!dwfl_frame_pc(state, &pc, &isactivation)) { |
173 | pr_err("%s", dwfl_errmsg(-1)); | 174 | pr_err("%s", dwfl_errmsg(-1)); |
174 | return DWARF_CB_ABORT; | 175 | return DWARF_CB_ABORT; |
175 | } | 176 | } |
176 | 177 | ||
178 | if (!isactivation) | ||
179 | --pc; | ||
180 | |||
177 | return entry(pc, ui) || !(--ui->max_stack) ? | 181 | return entry(pc, ui) || !(--ui->max_stack) ? |
178 | DWARF_CB_ABORT : DWARF_CB_OK; | 182 | DWARF_CB_ABORT : DWARF_CB_OK; |
179 | } | 183 | } |
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index f8455bed6e65..672c2ada9357 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c | |||
@@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, | |||
692 | 692 | ||
693 | while (!ret && (unw_step(&c) > 0) && i < max_stack) { | 693 | while (!ret && (unw_step(&c) > 0) && i < max_stack) { |
694 | unw_get_reg(&c, UNW_REG_IP, &ips[i]); | 694 | unw_get_reg(&c, UNW_REG_IP, &ips[i]); |
695 | |||
696 | /* | ||
697 | * Decrement the IP for any non-activation frames. | ||
698 | * this is required to properly find the srcline | ||
699 | * for caller frames. | ||
700 | * See also the documentation for dwfl_frame_pc(), | ||
701 | * which this code tries to replicate. | ||
702 | */ | ||
703 | if (unw_is_signal_frame(&c) <= 0) | ||
704 | --ips[i]; | ||
705 | |||
695 | ++i; | 706 | ++i; |
696 | } | 707 | } |
697 | 708 | ||
diff --git a/tools/power/acpi/.gitignore b/tools/power/acpi/.gitignore new file mode 100644 index 000000000000..cba3d994995c --- /dev/null +++ b/tools/power/acpi/.gitignore | |||
@@ -0,0 +1,4 @@ | |||
1 | acpidbg | ||
2 | acpidump | ||
3 | ec | ||
4 | include | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 91edd0566237..f389b02d43a0 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
@@ -11,7 +11,8 @@ endif | |||
11 | CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include | 11 | CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include |
12 | LDLIBS += -lcap -lelf | 12 | LDLIBS += -lcap -lelf |
13 | 13 | ||
14 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs | 14 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ |
15 | test_align | ||
15 | 16 | ||
16 | TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o | 17 | TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o |
17 | 18 | ||
@@ -34,6 +35,7 @@ $(BPFOBJ): force | |||
34 | CLANG ?= clang | 35 | CLANG ?= clang |
35 | 36 | ||
36 | %.o: %.c | 37 | %.o: %.c |
37 | $(CLANG) -I. -I../../../include/uapi -I../../../../samples/bpf/ \ | 38 | $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ |
39 | -I../../../../samples/bpf/ \ | ||
38 | -Wno-compare-distinct-pointer-types \ | 40 | -Wno-compare-distinct-pointer-types \ |
39 | -O2 -target bpf -c $< -o $@ | 41 | -O2 -target bpf -c $< -o $@ |
diff --git a/tools/testing/selftests/bpf/include/uapi/linux/types.h b/tools/testing/selftests/bpf/include/uapi/linux/types.h new file mode 100644 index 000000000000..51841848fbfe --- /dev/null +++ b/tools/testing/selftests/bpf/include/uapi/linux/types.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _UAPI_LINUX_TYPES_H | ||
2 | #define _UAPI_LINUX_TYPES_H | ||
3 | |||
4 | #include <asm-generic/int-ll64.h> | ||
5 | |||
6 | /* copied from linux:include/uapi/linux/types.h */ | ||
7 | #define __bitwise | ||
8 | typedef __u16 __bitwise __le16; | ||
9 | typedef __u16 __bitwise __be16; | ||
10 | typedef __u32 __bitwise __le32; | ||
11 | typedef __u32 __bitwise __be32; | ||
12 | typedef __u64 __bitwise __le64; | ||
13 | typedef __u64 __bitwise __be64; | ||
14 | |||
15 | typedef __u16 __bitwise __sum16; | ||
16 | typedef __u32 __bitwise __wsum; | ||
17 | |||
18 | #define __aligned_u64 __u64 __attribute__((aligned(8))) | ||
19 | #define __aligned_be64 __be64 __attribute__((aligned(8))) | ||
20 | #define __aligned_le64 __le64 __attribute__((aligned(8))) | ||
21 | |||
22 | #endif /* _UAPI_LINUX_TYPES_H */ | ||
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c new file mode 100644 index 000000000000..9644d4e069de --- /dev/null +++ b/tools/testing/selftests/bpf/test_align.c | |||
@@ -0,0 +1,453 @@ | |||
1 | #include <asm/types.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <stdint.h> | ||
4 | #include <stdio.h> | ||
5 | #include <stdlib.h> | ||
6 | #include <unistd.h> | ||
7 | #include <errno.h> | ||
8 | #include <string.h> | ||
9 | #include <stddef.h> | ||
10 | #include <stdbool.h> | ||
11 | |||
12 | #include <linux/unistd.h> | ||
13 | #include <linux/filter.h> | ||
14 | #include <linux/bpf_perf_event.h> | ||
15 | #include <linux/bpf.h> | ||
16 | |||
17 | #include <bpf/bpf.h> | ||
18 | |||
19 | #include "../../../include/linux/filter.h" | ||
20 | |||
21 | #ifndef ARRAY_SIZE | ||
22 | # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) | ||
23 | #endif | ||
24 | |||
25 | #define MAX_INSNS 512 | ||
26 | #define MAX_MATCHES 16 | ||
27 | |||
28 | struct bpf_align_test { | ||
29 | const char *descr; | ||
30 | struct bpf_insn insns[MAX_INSNS]; | ||
31 | enum { | ||
32 | UNDEF, | ||
33 | ACCEPT, | ||
34 | REJECT | ||
35 | } result; | ||
36 | enum bpf_prog_type prog_type; | ||
37 | const char *matches[MAX_MATCHES]; | ||
38 | }; | ||
39 | |||
40 | static struct bpf_align_test tests[] = { | ||
41 | { | ||
42 | .descr = "mov", | ||
43 | .insns = { | ||
44 | BPF_MOV64_IMM(BPF_REG_3, 2), | ||
45 | BPF_MOV64_IMM(BPF_REG_3, 4), | ||
46 | BPF_MOV64_IMM(BPF_REG_3, 8), | ||
47 | BPF_MOV64_IMM(BPF_REG_3, 16), | ||
48 | BPF_MOV64_IMM(BPF_REG_3, 32), | ||
49 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
50 | BPF_EXIT_INSN(), | ||
51 | }, | ||
52 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
53 | .matches = { | ||
54 | "1: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", | ||
55 | "2: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", | ||
56 | "3: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", | ||
57 | "4: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", | ||
58 | "5: R1=ctx R3=imm32,min_value=32,max_value=32,min_align=32 R10=fp", | ||
59 | }, | ||
60 | }, | ||
61 | { | ||
62 | .descr = "shift", | ||
63 | .insns = { | ||
64 | BPF_MOV64_IMM(BPF_REG_3, 1), | ||
65 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
66 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
67 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
68 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
69 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), | ||
70 | BPF_MOV64_IMM(BPF_REG_4, 32), | ||
71 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
72 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
73 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
74 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
75 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
76 | BPF_EXIT_INSN(), | ||
77 | }, | ||
78 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
79 | .matches = { | ||
80 | "1: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", | ||
81 | "2: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", | ||
82 | "3: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", | ||
83 | "4: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", | ||
84 | "5: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", | ||
85 | "6: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", | ||
86 | "7: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm32,min_value=32,max_value=32,min_align=32 R10=fp", | ||
87 | "8: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm16,min_value=16,max_value=16,min_align=16 R10=fp", | ||
88 | "9: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", | ||
89 | "10: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm4,min_value=4,max_value=4,min_align=4 R10=fp", | ||
90 | "11: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm2,min_value=2,max_value=2,min_align=2 R10=fp", | ||
91 | }, | ||
92 | }, | ||
93 | { | ||
94 | .descr = "addsub", | ||
95 | .insns = { | ||
96 | BPF_MOV64_IMM(BPF_REG_3, 4), | ||
97 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), | ||
98 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), | ||
99 | BPF_MOV64_IMM(BPF_REG_4, 8), | ||
100 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), | ||
101 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), | ||
102 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
103 | BPF_EXIT_INSN(), | ||
104 | }, | ||
105 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
106 | .matches = { | ||
107 | "1: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", | ||
108 | "2: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=4 R10=fp", | ||
109 | "3: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R10=fp", | ||
110 | "4: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", | ||
111 | "5: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm12,min_value=12,max_value=12,min_align=4 R10=fp", | ||
112 | "6: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm14,min_value=14,max_value=14,min_align=2 R10=fp", | ||
113 | }, | ||
114 | }, | ||
115 | { | ||
116 | .descr = "mul", | ||
117 | .insns = { | ||
118 | BPF_MOV64_IMM(BPF_REG_3, 7), | ||
119 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), | ||
120 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), | ||
121 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), | ||
122 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
123 | BPF_EXIT_INSN(), | ||
124 | }, | ||
125 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
126 | .matches = { | ||
127 | "1: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", | ||
128 | "2: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", | ||
129 | "3: R1=ctx R3=imm14,min_value=14,max_value=14,min_align=2 R10=fp", | ||
130 | "4: R1=ctx R3=imm56,min_value=56,max_value=56,min_align=4 R10=fp", | ||
131 | }, | ||
132 | }, | ||
133 | |||
134 | #define PREP_PKT_POINTERS \ | ||
135 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ | ||
136 | offsetof(struct __sk_buff, data)), \ | ||
137 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ | ||
138 | offsetof(struct __sk_buff, data_end)) | ||
139 | |||
140 | #define LOAD_UNKNOWN(DST_REG) \ | ||
141 | PREP_PKT_POINTERS, \ | ||
142 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ | ||
143 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ | ||
144 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ | ||
145 | BPF_EXIT_INSN(), \ | ||
146 | BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) | ||
147 | |||
148 | { | ||
149 | .descr = "unknown shift", | ||
150 | .insns = { | ||
151 | LOAD_UNKNOWN(BPF_REG_3), | ||
152 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
153 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
154 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
155 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), | ||
156 | LOAD_UNKNOWN(BPF_REG_4), | ||
157 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), | ||
158 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
159 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
160 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
161 | BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), | ||
162 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
163 | BPF_EXIT_INSN(), | ||
164 | }, | ||
165 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
166 | .matches = { | ||
167 | "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", | ||
168 | "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv55,min_align=2 R10=fp", | ||
169 | "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv54,min_align=4 R10=fp", | ||
170 | "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv53,min_align=8 R10=fp", | ||
171 | "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv52,min_align=16 R10=fp", | ||
172 | "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv56 R10=fp", | ||
173 | "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv51,min_align=32 R10=fp", | ||
174 | "20: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv52,min_align=16 R10=fp", | ||
175 | "21: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv53,min_align=8 R10=fp", | ||
176 | "22: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv54,min_align=4 R10=fp", | ||
177 | "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv55,min_align=2 R10=fp", | ||
178 | }, | ||
179 | }, | ||
180 | { | ||
181 | .descr = "unknown mul", | ||
182 | .insns = { | ||
183 | LOAD_UNKNOWN(BPF_REG_3), | ||
184 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), | ||
185 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), | ||
186 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), | ||
187 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), | ||
188 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), | ||
189 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), | ||
190 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), | ||
191 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), | ||
192 | BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), | ||
193 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
194 | BPF_EXIT_INSN(), | ||
195 | }, | ||
196 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
197 | .matches = { | ||
198 | "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", | ||
199 | "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", | ||
200 | "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv55,min_align=1 R10=fp", | ||
201 | "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", | ||
202 | "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv54,min_align=2 R10=fp", | ||
203 | "12: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", | ||
204 | "13: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv53,min_align=4 R10=fp", | ||
205 | "14: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", | ||
206 | "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv52,min_align=8 R10=fp", | ||
207 | "16: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv50,min_align=8 R10=fp" | ||
208 | }, | ||
209 | }, | ||
210 | { | ||
211 | .descr = "packet const offset", | ||
212 | .insns = { | ||
213 | PREP_PKT_POINTERS, | ||
214 | BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), | ||
215 | |||
216 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
217 | |||
218 | /* Skip over ethernet header. */ | ||
219 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), | ||
220 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), | ||
221 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), | ||
222 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), | ||
223 | BPF_EXIT_INSN(), | ||
224 | |||
225 | BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), | ||
226 | BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), | ||
227 | BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), | ||
228 | BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), | ||
229 | BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), | ||
230 | BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), | ||
231 | BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), | ||
232 | |||
233 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
234 | BPF_EXIT_INSN(), | ||
235 | }, | ||
236 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
237 | .matches = { | ||
238 | "4: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=0,r=0) R10=fp", | ||
239 | "5: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=14,r=0) R10=fp", | ||
240 | "6: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R4=pkt(id=0,off=14,r=0) R5=pkt(id=0,off=14,r=0) R10=fp", | ||
241 | "10: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv56 R5=pkt(id=0,off=14,r=18) R10=fp", | ||
242 | "14: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", | ||
243 | "15: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", | ||
244 | }, | ||
245 | }, | ||
246 | { | ||
247 | .descr = "packet variable offset", | ||
248 | .insns = { | ||
249 | LOAD_UNKNOWN(BPF_REG_6), | ||
250 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), | ||
251 | |||
252 | /* First, add a constant to the R5 packet pointer, | ||
253 | * then a variable with a known alignment. | ||
254 | */ | ||
255 | BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), | ||
256 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), | ||
257 | BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), | ||
258 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), | ||
259 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), | ||
260 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), | ||
261 | BPF_EXIT_INSN(), | ||
262 | BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), | ||
263 | |||
264 | /* Now, test in the other direction. Adding first | ||
265 | * the variable offset to R5, then the constant. | ||
266 | */ | ||
267 | BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), | ||
268 | BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), | ||
269 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), | ||
270 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), | ||
271 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), | ||
272 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), | ||
273 | BPF_EXIT_INSN(), | ||
274 | BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), | ||
275 | |||
276 | /* Test multiple accumulations of unknown values | ||
277 | * into a packet pointer. | ||
278 | */ | ||
279 | BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), | ||
280 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), | ||
281 | BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), | ||
282 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), | ||
283 | BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), | ||
284 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), | ||
285 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), | ||
286 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), | ||
287 | BPF_EXIT_INSN(), | ||
288 | BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), | ||
289 | |||
290 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
291 | BPF_EXIT_INSN(), | ||
292 | }, | ||
293 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
294 | .matches = { | ||
295 | /* Calculated offset in R6 has unknown value, but known | ||
296 | * alignment of 4. | ||
297 | */ | ||
298 | "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R6=inv54,min_align=4 R10=fp", | ||
299 | |||
300 | /* Offset is added to packet pointer R5, resulting in known | ||
301 | * auxiliary alignment and offset. | ||
302 | */ | ||
303 | "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R5=pkt(id=1,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
304 | |||
305 | /* At the time the word size load is performed from R5, | ||
306 | * it's total offset is NET_IP_ALIGN + reg->off (0) + | ||
307 | * reg->aux_off (14) which is 16. Then the variable | ||
308 | * offset is considered using reg->aux_off_align which | ||
309 | * is 4 and meets the load's requirements. | ||
310 | */ | ||
311 | "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=1,off=4,r=4),aux_off=14,aux_off_align=4 R5=pkt(id=1,off=0,r=4),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
312 | |||
313 | |||
314 | /* Variable offset is added to R5 packet pointer, | ||
315 | * resulting in auxiliary alignment of 4. | ||
316 | */ | ||
317 | "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=0,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
318 | |||
319 | /* Constant offset is added to R5, resulting in | ||
320 | * reg->off of 14. | ||
321 | */ | ||
322 | "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=14,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
323 | |||
324 | /* At the time the word size load is performed from R5, | ||
325 | * it's total offset is NET_IP_ALIGN + reg->off (14) which | ||
326 | * is 16. Then the variable offset is considered using | ||
327 | * reg->aux_off_align which is 4 and meets the load's | ||
328 | * requirements. | ||
329 | */ | ||
330 | "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=2,off=18,r=18),aux_off_align=4 R5=pkt(id=2,off=14,r=18),aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
331 | |||
332 | /* Constant offset is added to R5 packet pointer, | ||
333 | * resulting in reg->off value of 14. | ||
334 | */ | ||
335 | "26: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=0,off=14,r=8) R6=inv54,min_align=4 R10=fp", | ||
336 | /* Variable offset is added to R5, resulting in an | ||
337 | * auxiliary offset of 14, and an auxiliary alignment of 4. | ||
338 | */ | ||
339 | "27: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
340 | /* Constant is added to R5 again, setting reg->off to 4. */ | ||
341 | "28: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=4,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
342 | /* And once more we add a variable, which causes an accumulation | ||
343 | * of reg->off into reg->aux_off_align, with resulting value of | ||
344 | * 18. The auxiliary alignment stays at 4. | ||
345 | */ | ||
346 | "29: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=4,off=0,r=0),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
347 | /* At the time the word size load is performed from R5, | ||
348 | * it's total offset is NET_IP_ALIGN + reg->off (0) + | ||
349 | * reg->aux_off (18) which is 20. Then the variable offset | ||
350 | * is considered using reg->aux_off_align which is 4 and meets | ||
351 | * the load's requirements. | ||
352 | */ | ||
353 | "33: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=4,off=4,r=4),aux_off=18,aux_off_align=4 R5=pkt(id=4,off=0,r=4),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", | ||
354 | }, | ||
355 | }, | ||
356 | }; | ||
357 | |||
358 | static int probe_filter_length(const struct bpf_insn *fp) | ||
359 | { | ||
360 | int len; | ||
361 | |||
362 | for (len = MAX_INSNS - 1; len > 0; --len) | ||
363 | if (fp[len].code != 0 || fp[len].imm != 0) | ||
364 | break; | ||
365 | return len + 1; | ||
366 | } | ||
367 | |||
368 | static char bpf_vlog[32768]; | ||
369 | |||
370 | static int do_test_single(struct bpf_align_test *test) | ||
371 | { | ||
372 | struct bpf_insn *prog = test->insns; | ||
373 | int prog_type = test->prog_type; | ||
374 | int prog_len, i; | ||
375 | int fd_prog; | ||
376 | int ret; | ||
377 | |||
378 | prog_len = probe_filter_length(prog); | ||
379 | fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, | ||
380 | prog, prog_len, 1, "GPL", 0, | ||
381 | bpf_vlog, sizeof(bpf_vlog)); | ||
382 | if (fd_prog < 0) { | ||
383 | printf("Failed to load program.\n"); | ||
384 | printf("%s", bpf_vlog); | ||
385 | ret = 1; | ||
386 | } else { | ||
387 | ret = 0; | ||
388 | for (i = 0; i < MAX_MATCHES; i++) { | ||
389 | const char *t, *m = test->matches[i]; | ||
390 | |||
391 | if (!m) | ||
392 | break; | ||
393 | t = strstr(bpf_vlog, m); | ||
394 | if (!t) { | ||
395 | printf("Failed to find match: %s\n", m); | ||
396 | ret = 1; | ||
397 | printf("%s", bpf_vlog); | ||
398 | break; | ||
399 | } | ||
400 | } | ||
401 | close(fd_prog); | ||
402 | } | ||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | static int do_test(unsigned int from, unsigned int to) | ||
407 | { | ||
408 | int all_pass = 0; | ||
409 | int all_fail = 0; | ||
410 | unsigned int i; | ||
411 | |||
412 | for (i = from; i < to; i++) { | ||
413 | struct bpf_align_test *test = &tests[i]; | ||
414 | int fail; | ||
415 | |||
416 | printf("Test %3d: %s ... ", | ||
417 | i, test->descr); | ||
418 | fail = do_test_single(test); | ||
419 | if (fail) { | ||
420 | all_fail++; | ||
421 | printf("FAIL\n"); | ||
422 | } else { | ||
423 | all_pass++; | ||
424 | printf("PASS\n"); | ||
425 | } | ||
426 | } | ||
427 | printf("Results: %d pass %d fail\n", | ||
428 | all_pass, all_fail); | ||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | int main(int argc, char **argv) | ||
433 | { | ||
434 | unsigned int from = 0, to = ARRAY_SIZE(tests); | ||
435 | |||
436 | if (argc == 3) { | ||
437 | unsigned int l = atoi(argv[argc - 2]); | ||
438 | unsigned int u = atoi(argv[argc - 1]); | ||
439 | |||
440 | if (l < to && u < to) { | ||
441 | from = l; | ||
442 | to = u + 1; | ||
443 | } | ||
444 | } else if (argc == 2) { | ||
445 | unsigned int t = atoi(argv[argc - 1]); | ||
446 | |||
447 | if (t < to) { | ||
448 | from = t; | ||
449 | to = t + 1; | ||
450 | } | ||
451 | } | ||
452 | return do_test(from, to); | ||
453 | } | ||
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c index 39387bb7e08c..6e11ba11709e 100644 --- a/tools/testing/selftests/bpf/test_pkt_access.c +++ b/tools/testing/selftests/bpf/test_pkt_access.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | #include <stddef.h> | 7 | #include <stddef.h> |
8 | #include <string.h> | ||
8 | #include <linux/bpf.h> | 9 | #include <linux/bpf.h> |
9 | #include <linux/if_ether.h> | 10 | #include <linux/if_ether.h> |
10 | #include <linux/if_packet.h> | 11 | #include <linux/if_packet.h> |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3773562056da..cabb19b1e371 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #define MAX_NR_MAPS 4 | 49 | #define MAX_NR_MAPS 4 |
50 | 50 | ||
51 | #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) | 51 | #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) |
52 | #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) | ||
52 | 53 | ||
53 | struct bpf_test { | 54 | struct bpf_test { |
54 | const char *descr; | 55 | const char *descr; |
@@ -2615,6 +2616,30 @@ static struct bpf_test tests[] = { | |||
2615 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | 2616 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, |
2616 | }, | 2617 | }, |
2617 | { | 2618 | { |
2619 | "direct packet access: test17 (pruning, alignment)", | ||
2620 | .insns = { | ||
2621 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
2622 | offsetof(struct __sk_buff, data)), | ||
2623 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
2624 | offsetof(struct __sk_buff, data_end)), | ||
2625 | BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, | ||
2626 | offsetof(struct __sk_buff, mark)), | ||
2627 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
2628 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), | ||
2629 | BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), | ||
2630 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), | ||
2631 | BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), | ||
2632 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
2633 | BPF_EXIT_INSN(), | ||
2634 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), | ||
2635 | BPF_JMP_A(-6), | ||
2636 | }, | ||
2637 | .errstr = "misaligned packet access off 2+15+-4 size 4", | ||
2638 | .result = REJECT, | ||
2639 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
2640 | .flags = F_LOAD_WITH_STRICT_ALIGNMENT, | ||
2641 | }, | ||
2642 | { | ||
2618 | "helper access to packet: test1, valid packet_ptr range", | 2643 | "helper access to packet: test1, valid packet_ptr range", |
2619 | .insns = { | 2644 | .insns = { |
2620 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | 2645 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
@@ -3341,6 +3366,70 @@ static struct bpf_test tests[] = { | |||
3341 | .prog_type = BPF_PROG_TYPE_SCHED_CLS | 3366 | .prog_type = BPF_PROG_TYPE_SCHED_CLS |
3342 | }, | 3367 | }, |
3343 | { | 3368 | { |
3369 | "alu ops on ptr_to_map_value_or_null, 1", | ||
3370 | .insns = { | ||
3371 | BPF_MOV64_IMM(BPF_REG_1, 10), | ||
3372 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), | ||
3373 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
3374 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
3375 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
3376 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
3377 | BPF_FUNC_map_lookup_elem), | ||
3378 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), | ||
3379 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), | ||
3380 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), | ||
3381 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
3382 | BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), | ||
3383 | BPF_EXIT_INSN(), | ||
3384 | }, | ||
3385 | .fixup_map1 = { 4 }, | ||
3386 | .errstr = "R4 invalid mem access", | ||
3387 | .result = REJECT, | ||
3388 | .prog_type = BPF_PROG_TYPE_SCHED_CLS | ||
3389 | }, | ||
3390 | { | ||
3391 | "alu ops on ptr_to_map_value_or_null, 2", | ||
3392 | .insns = { | ||
3393 | BPF_MOV64_IMM(BPF_REG_1, 10), | ||
3394 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), | ||
3395 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
3396 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
3397 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
3398 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
3399 | BPF_FUNC_map_lookup_elem), | ||
3400 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), | ||
3401 | BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), | ||
3402 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
3403 | BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), | ||
3404 | BPF_EXIT_INSN(), | ||
3405 | }, | ||
3406 | .fixup_map1 = { 4 }, | ||
3407 | .errstr = "R4 invalid mem access", | ||
3408 | .result = REJECT, | ||
3409 | .prog_type = BPF_PROG_TYPE_SCHED_CLS | ||
3410 | }, | ||
3411 | { | ||
3412 | "alu ops on ptr_to_map_value_or_null, 3", | ||
3413 | .insns = { | ||
3414 | BPF_MOV64_IMM(BPF_REG_1, 10), | ||
3415 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), | ||
3416 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
3417 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
3418 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
3419 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
3420 | BPF_FUNC_map_lookup_elem), | ||
3421 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), | ||
3422 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), | ||
3423 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
3424 | BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), | ||
3425 | BPF_EXIT_INSN(), | ||
3426 | }, | ||
3427 | .fixup_map1 = { 4 }, | ||
3428 | .errstr = "R4 invalid mem access", | ||
3429 | .result = REJECT, | ||
3430 | .prog_type = BPF_PROG_TYPE_SCHED_CLS | ||
3431 | }, | ||
3432 | { | ||
3344 | "invalid memory access with multiple map_lookup_elem calls", | 3433 | "invalid memory access with multiple map_lookup_elem calls", |
3345 | .insns = { | 3434 | .insns = { |
3346 | BPF_MOV64_IMM(BPF_REG_1, 10), | 3435 | BPF_MOV64_IMM(BPF_REG_1, 10), |
@@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = { | |||
4937 | .fixup_map_in_map = { 3 }, | 5026 | .fixup_map_in_map = { 3 }, |
4938 | .errstr = "R1 type=map_value_or_null expected=map_ptr", | 5027 | .errstr = "R1 type=map_value_or_null expected=map_ptr", |
4939 | .result = REJECT, | 5028 | .result = REJECT, |
4940 | } | 5029 | }, |
5030 | { | ||
5031 | "ld_abs: check calling conv, r1", | ||
5032 | .insns = { | ||
5033 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5034 | BPF_MOV64_IMM(BPF_REG_1, 0), | ||
5035 | BPF_LD_ABS(BPF_W, -0x200000), | ||
5036 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), | ||
5037 | BPF_EXIT_INSN(), | ||
5038 | }, | ||
5039 | .errstr = "R1 !read_ok", | ||
5040 | .result = REJECT, | ||
5041 | }, | ||
5042 | { | ||
5043 | "ld_abs: check calling conv, r2", | ||
5044 | .insns = { | ||
5045 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5046 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
5047 | BPF_LD_ABS(BPF_W, -0x200000), | ||
5048 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
5049 | BPF_EXIT_INSN(), | ||
5050 | }, | ||
5051 | .errstr = "R2 !read_ok", | ||
5052 | .result = REJECT, | ||
5053 | }, | ||
5054 | { | ||
5055 | "ld_abs: check calling conv, r3", | ||
5056 | .insns = { | ||
5057 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5058 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
5059 | BPF_LD_ABS(BPF_W, -0x200000), | ||
5060 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), | ||
5061 | BPF_EXIT_INSN(), | ||
5062 | }, | ||
5063 | .errstr = "R3 !read_ok", | ||
5064 | .result = REJECT, | ||
5065 | }, | ||
5066 | { | ||
5067 | "ld_abs: check calling conv, r4", | ||
5068 | .insns = { | ||
5069 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5070 | BPF_MOV64_IMM(BPF_REG_4, 0), | ||
5071 | BPF_LD_ABS(BPF_W, -0x200000), | ||
5072 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), | ||
5073 | BPF_EXIT_INSN(), | ||
5074 | }, | ||
5075 | .errstr = "R4 !read_ok", | ||
5076 | .result = REJECT, | ||
5077 | }, | ||
5078 | { | ||
5079 | "ld_abs: check calling conv, r5", | ||
5080 | .insns = { | ||
5081 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5082 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
5083 | BPF_LD_ABS(BPF_W, -0x200000), | ||
5084 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
5085 | BPF_EXIT_INSN(), | ||
5086 | }, | ||
5087 | .errstr = "R5 !read_ok", | ||
5088 | .result = REJECT, | ||
5089 | }, | ||
5090 | { | ||
5091 | "ld_abs: check calling conv, r7", | ||
5092 | .insns = { | ||
5093 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5094 | BPF_MOV64_IMM(BPF_REG_7, 0), | ||
5095 | BPF_LD_ABS(BPF_W, -0x200000), | ||
5096 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), | ||
5097 | BPF_EXIT_INSN(), | ||
5098 | }, | ||
5099 | .result = ACCEPT, | ||
5100 | }, | ||
5101 | { | ||
5102 | "ld_ind: check calling conv, r1", | ||
5103 | .insns = { | ||
5104 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5105 | BPF_MOV64_IMM(BPF_REG_1, 1), | ||
5106 | BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), | ||
5107 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), | ||
5108 | BPF_EXIT_INSN(), | ||
5109 | }, | ||
5110 | .errstr = "R1 !read_ok", | ||
5111 | .result = REJECT, | ||
5112 | }, | ||
5113 | { | ||
5114 | "ld_ind: check calling conv, r2", | ||
5115 | .insns = { | ||
5116 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5117 | BPF_MOV64_IMM(BPF_REG_2, 1), | ||
5118 | BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), | ||
5119 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
5120 | BPF_EXIT_INSN(), | ||
5121 | }, | ||
5122 | .errstr = "R2 !read_ok", | ||
5123 | .result = REJECT, | ||
5124 | }, | ||
5125 | { | ||
5126 | "ld_ind: check calling conv, r3", | ||
5127 | .insns = { | ||
5128 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5129 | BPF_MOV64_IMM(BPF_REG_3, 1), | ||
5130 | BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), | ||
5131 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), | ||
5132 | BPF_EXIT_INSN(), | ||
5133 | }, | ||
5134 | .errstr = "R3 !read_ok", | ||
5135 | .result = REJECT, | ||
5136 | }, | ||
5137 | { | ||
5138 | "ld_ind: check calling conv, r4", | ||
5139 | .insns = { | ||
5140 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5141 | BPF_MOV64_IMM(BPF_REG_4, 1), | ||
5142 | BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), | ||
5143 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), | ||
5144 | BPF_EXIT_INSN(), | ||
5145 | }, | ||
5146 | .errstr = "R4 !read_ok", | ||
5147 | .result = REJECT, | ||
5148 | }, | ||
5149 | { | ||
5150 | "ld_ind: check calling conv, r5", | ||
5151 | .insns = { | ||
5152 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5153 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
5154 | BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), | ||
5155 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
5156 | BPF_EXIT_INSN(), | ||
5157 | }, | ||
5158 | .errstr = "R5 !read_ok", | ||
5159 | .result = REJECT, | ||
5160 | }, | ||
5161 | { | ||
5162 | "ld_ind: check calling conv, r7", | ||
5163 | .insns = { | ||
5164 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
5165 | BPF_MOV64_IMM(BPF_REG_7, 1), | ||
5166 | BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), | ||
5167 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), | ||
5168 | BPF_EXIT_INSN(), | ||
5169 | }, | ||
5170 | .result = ACCEPT, | ||
5171 | }, | ||
4941 | }; | 5172 | }; |
4942 | 5173 | ||
4943 | static int probe_filter_length(const struct bpf_insn *fp) | 5174 | static int probe_filter_length(const struct bpf_insn *fp) |
@@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv, | |||
5059 | 5290 | ||
5060 | do_test_fixup(test, prog, map_fds); | 5291 | do_test_fixup(test, prog, map_fds); |
5061 | 5292 | ||
5062 | fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, | 5293 | fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, |
5063 | prog, prog_len, "GPL", 0, bpf_vlog, | 5294 | prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, |
5064 | sizeof(bpf_vlog)); | 5295 | "GPL", 0, bpf_vlog, sizeof(bpf_vlog)); |
5065 | 5296 | ||
5066 | expected_ret = unpriv && test->result_unpriv != UNDEF ? | 5297 | expected_ret = unpriv && test->result_unpriv != UNDEF ? |
5067 | test->result_unpriv : test->result; | 5298 | test->result_unpriv : test->result; |
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index 32e6211e1c6e..717581145cfc 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest | |||
@@ -58,7 +58,7 @@ parse_opts() { # opts | |||
58 | ;; | 58 | ;; |
59 | --verbose|-v|-vv) | 59 | --verbose|-v|-vv) |
60 | VERBOSE=$((VERBOSE + 1)) | 60 | VERBOSE=$((VERBOSE + 1)) |
61 | [ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1)) | 61 | [ $1 = '-vv' ] && VERBOSE=$((VERBOSE + 1)) |
62 | shift 1 | 62 | shift 1 |
63 | ;; | 63 | ;; |
64 | --debug|-d) | 64 | --debug|-d) |
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index 07bb3e5930b4..aa31368851c9 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc | |||
@@ -48,7 +48,7 @@ test_event_enabled() { | |||
48 | e=`cat $EVENT_ENABLE` | 48 | e=`cat $EVENT_ENABLE` |
49 | if [ "$e" != $val ]; then | 49 | if [ "$e" != $val ]; then |
50 | echo "Expected $val but found $e" | 50 | echo "Expected $val but found $e" |
51 | exit -1 | 51 | exit 1 |
52 | fi | 52 | fi |
53 | } | 53 | } |
54 | 54 | ||
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 9aec6fcb7729..f2019b37370d 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions | |||
@@ -34,10 +34,10 @@ reset_ftrace_filter() { # reset all triggers in set_ftrace_filter | |||
34 | echo > set_ftrace_filter | 34 | echo > set_ftrace_filter |
35 | grep -v '^#' set_ftrace_filter | while read t; do | 35 | grep -v '^#' set_ftrace_filter | while read t; do |
36 | tr=`echo $t | cut -d: -f2` | 36 | tr=`echo $t | cut -d: -f2` |
37 | if [ "$tr" == "" ]; then | 37 | if [ "$tr" = "" ]; then |
38 | continue | 38 | continue |
39 | fi | 39 | fi |
40 | if [ $tr == "enable_event" -o $tr == "disable_event" ]; then | 40 | if [ $tr = "enable_event" -o $tr = "disable_event" ]; then |
41 | tr=`echo $t | cut -d: -f1-4` | 41 | tr=`echo $t | cut -d: -f1-4` |
42 | limit=`echo $t | cut -d: -f5` | 42 | limit=`echo $t | cut -d: -f5` |
43 | else | 43 | else |
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc index 4c5a061a5b4e..c73db7863adb 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc | |||
@@ -75,9 +75,13 @@ rmdir foo | |||
75 | if [ -d foo ]; then | 75 | if [ -d foo ]; then |
76 | fail "foo still exists" | 76 | fail "foo still exists" |
77 | fi | 77 | fi |
78 | exit 0 | ||
79 | |||
80 | 78 | ||
79 | mkdir foo | ||
80 | echo "schedule:enable_event:sched:sched_switch" > foo/set_ftrace_filter | ||
81 | rmdir foo | ||
82 | if [ -d foo ]; then | ||
83 | fail "foo still exists" | ||
84 | fi | ||
81 | 85 | ||
82 | 86 | ||
83 | instance_slam() { | 87 | instance_slam() { |
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc new file mode 100644 index 000000000000..f4d1ff785d67 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc | |||
@@ -0,0 +1,21 @@ | |||
1 | #!/bin/sh | ||
2 | # description: Register/unregister many kprobe events | ||
3 | |||
4 | # ftrace fentry skip size depends on the machine architecture. | ||
5 | # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc | ||
6 | case `uname -m` in | ||
7 | x86_64|i[3456]86) OFFS=5;; | ||
8 | ppc*) OFFS=4;; | ||
9 | *) OFFS=0;; | ||
10 | esac | ||
11 | |||
12 | echo "Setup up to 256 kprobes" | ||
13 | grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \ | ||
14 | head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||: | ||
15 | |||
16 | echo 1 > events/kprobes/enable | ||
17 | echo 0 > events/kprobes/enable | ||
18 | echo > kprobe_events | ||
19 | echo "Waiting for unoptimizing & freeing" | ||
20 | sleep 5 | ||
21 | echo "Done" | ||
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore index 427621792229..2f1f7b013293 100644 --- a/tools/testing/selftests/powerpc/tm/.gitignore +++ b/tools/testing/selftests/powerpc/tm/.gitignore | |||
@@ -11,3 +11,4 @@ tm-signal-context-chk-fpu | |||
11 | tm-signal-context-chk-gpr | 11 | tm-signal-context-chk-gpr |
12 | tm-signal-context-chk-vmx | 12 | tm-signal-context-chk-vmx |
13 | tm-signal-context-chk-vsx | 13 | tm-signal-context-chk-vsx |
14 | tm-vmx-unavail | ||
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index 5576ee6a51f2..958c11c14acd 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile | |||
@@ -2,7 +2,8 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu | |||
2 | tm-signal-context-chk-vmx tm-signal-context-chk-vsx | 2 | tm-signal-context-chk-vmx tm-signal-context-chk-vsx |
3 | 3 | ||
4 | TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ | 4 | TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ |
5 | tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS) | 5 | tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail \ |
6 | $(SIGNAL_CONTEXT_CHK_TESTS) | ||
6 | 7 | ||
7 | include ../../lib.mk | 8 | include ../../lib.mk |
8 | 9 | ||
@@ -13,6 +14,7 @@ CFLAGS += -mhtm | |||
13 | $(OUTPUT)/tm-syscall: tm-syscall-asm.S | 14 | $(OUTPUT)/tm-syscall: tm-syscall-asm.S |
14 | $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include | 15 | $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include |
15 | $(OUTPUT)/tm-tmspr: CFLAGS += -pthread | 16 | $(OUTPUT)/tm-tmspr: CFLAGS += -pthread |
17 | $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 | ||
16 | 18 | ||
17 | SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) | 19 | SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) |
18 | $(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S | 20 | $(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S |
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c index d9c49f41515e..e79ccd6aada1 100644 --- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c | |||
@@ -42,12 +42,12 @@ int test_body(void) | |||
42 | printf("Check DSCR TM context switch: "); | 42 | printf("Check DSCR TM context switch: "); |
43 | fflush(stdout); | 43 | fflush(stdout); |
44 | for (;;) { | 44 | for (;;) { |
45 | rv = 1; | ||
46 | asm __volatile__ ( | 45 | asm __volatile__ ( |
47 | /* set a known value into the DSCR */ | 46 | /* set a known value into the DSCR */ |
48 | "ld 3, %[dscr1];" | 47 | "ld 3, %[dscr1];" |
49 | "mtspr %[sprn_dscr], 3;" | 48 | "mtspr %[sprn_dscr], 3;" |
50 | 49 | ||
50 | "li %[rv], 1;" | ||
51 | /* start and suspend a transaction */ | 51 | /* start and suspend a transaction */ |
52 | "tbegin.;" | 52 | "tbegin.;" |
53 | "beq 1f;" | 53 | "beq 1f;" |
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c new file mode 100644 index 000000000000..137185ba4937 --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Copyright 2017, Michael Neuling, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | * Original: Breno Leitao <brenohl@br.ibm.com> & | ||
5 | * Gustavo Bueno Romero <gromero@br.ibm.com> | ||
6 | * Edited: Michael Neuling | ||
7 | * | ||
8 | * Force VMX unavailable during a transaction and see if it corrupts | ||
9 | * the checkpointed VMX register state after the abort. | ||
10 | */ | ||
11 | |||
12 | #include <inttypes.h> | ||
13 | #include <htmintrin.h> | ||
14 | #include <string.h> | ||
15 | #include <stdlib.h> | ||
16 | #include <stdio.h> | ||
17 | #include <pthread.h> | ||
18 | #include <sys/mman.h> | ||
19 | #include <unistd.h> | ||
20 | #include <pthread.h> | ||
21 | |||
22 | #include "tm.h" | ||
23 | #include "utils.h" | ||
24 | |||
25 | int passed; | ||
26 | |||
27 | void *worker(void *unused) | ||
28 | { | ||
29 | __int128 vmx0; | ||
30 | uint64_t texasr; | ||
31 | |||
32 | asm goto ( | ||
33 | "li 3, 1;" /* Stick non-zero value in VMX0 */ | ||
34 | "std 3, 0(%[vmx0_ptr]);" | ||
35 | "lvx 0, 0, %[vmx0_ptr];" | ||
36 | |||
37 | /* Wait here a bit so we get scheduled out 255 times */ | ||
38 | "lis 3, 0x3fff;" | ||
39 | "1: ;" | ||
40 | "addi 3, 3, -1;" | ||
41 | "cmpdi 3, 0;" | ||
42 | "bne 1b;" | ||
43 | |||
44 | /* Kernel will hopefully turn VMX off now */ | ||
45 | |||
46 | "tbegin. ;" | ||
47 | "beq failure;" | ||
48 | |||
49 | /* Cause VMX unavail. Any VMX instruction */ | ||
50 | "vaddcuw 0,0,0;" | ||
51 | |||
52 | "tend. ;" | ||
53 | "b %l[success];" | ||
54 | |||
55 | /* Check VMX0 sanity after abort */ | ||
56 | "failure: ;" | ||
57 | "lvx 1, 0, %[vmx0_ptr];" | ||
58 | "vcmpequb. 2, 0, 1;" | ||
59 | "bc 4, 24, %l[value_mismatch];" | ||
60 | "b %l[value_match];" | ||
61 | : | ||
62 | : [vmx0_ptr] "r"(&vmx0) | ||
63 | : "r3" | ||
64 | : success, value_match, value_mismatch | ||
65 | ); | ||
66 | |||
67 | /* HTM aborted and VMX0 is corrupted */ | ||
68 | value_mismatch: | ||
69 | texasr = __builtin_get_texasr(); | ||
70 | |||
71 | printf("\n\n==============\n\n"); | ||
72 | printf("Failure with error: %lx\n", _TEXASR_FAILURE_CODE(texasr)); | ||
73 | printf("Summary error : %lx\n", _TEXASR_FAILURE_SUMMARY(texasr)); | ||
74 | printf("TFIAR exact : %lx\n\n", _TEXASR_TFIAR_EXACT(texasr)); | ||
75 | |||
76 | passed = 0; | ||
77 | return NULL; | ||
78 | |||
79 | /* HTM aborted but VMX0 is correct */ | ||
80 | value_match: | ||
81 | // printf("!"); | ||
82 | return NULL; | ||
83 | |||
84 | success: | ||
85 | // printf("."); | ||
86 | return NULL; | ||
87 | } | ||
88 | |||
89 | int tm_vmx_unavail_test() | ||
90 | { | ||
91 | int threads; | ||
92 | pthread_t *thread; | ||
93 | |||
94 | SKIP_IF(!have_htm()); | ||
95 | |||
96 | passed = 1; | ||
97 | |||
98 | threads = sysconf(_SC_NPROCESSORS_ONLN) * 4; | ||
99 | thread = malloc(sizeof(pthread_t)*threads); | ||
100 | if (!thread) | ||
101 | return EXIT_FAILURE; | ||
102 | |||
103 | for (uint64_t i = 0; i < threads; i++) | ||
104 | pthread_create(&thread[i], NULL, &worker, NULL); | ||
105 | |||
106 | for (uint64_t i = 0; i < threads; i++) | ||
107 | pthread_join(thread[i], NULL); | ||
108 | |||
109 | free(thread); | ||
110 | |||
111 | return passed ? EXIT_SUCCESS : EXIT_FAILURE; | ||
112 | } | ||
113 | |||
114 | |||
115 | int main(int argc, char **argv) | ||
116 | { | ||
117 | return test_harness(tm_vmx_unavail_test, "tm_vmx_unavail_test"); | ||
118 | } | ||
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index bce6037cf01d..32c3295929b0 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/kvm_hyp.h> | 22 | #include <asm/kvm_hyp.h> |
23 | 23 | ||
24 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | 24 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) |
25 | #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) | 25 | #define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1) |
26 | 26 | ||
27 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) | 27 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
28 | { | 28 | { |
@@ -135,13 +135,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
135 | 135 | ||
136 | if (used_lrs) { | 136 | if (used_lrs) { |
137 | int i; | 137 | int i; |
138 | u32 nr_pri_bits; | 138 | u32 nr_pre_bits; |
139 | 139 | ||
140 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); | 140 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
141 | 141 | ||
142 | write_gicreg(0, ICH_HCR_EL2); | 142 | write_gicreg(0, ICH_HCR_EL2); |
143 | val = read_gicreg(ICH_VTR_EL2); | 143 | val = read_gicreg(ICH_VTR_EL2); |
144 | nr_pri_bits = vtr_to_nr_pri_bits(val); | 144 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
145 | 145 | ||
146 | for (i = 0; i < used_lrs; i++) { | 146 | for (i = 0; i < used_lrs; i++) { |
147 | if (cpu_if->vgic_elrsr & (1 << i)) | 147 | if (cpu_if->vgic_elrsr & (1 << i)) |
@@ -152,7 +152,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
152 | __gic_v3_set_lr(0, i); | 152 | __gic_v3_set_lr(0, i); |
153 | } | 153 | } |
154 | 154 | ||
155 | switch (nr_pri_bits) { | 155 | switch (nr_pre_bits) { |
156 | case 7: | 156 | case 7: |
157 | cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); | 157 | cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); |
158 | cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); | 158 | cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); |
@@ -162,7 +162,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
162 | cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); | 162 | cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); |
163 | } | 163 | } |
164 | 164 | ||
165 | switch (nr_pri_bits) { | 165 | switch (nr_pre_bits) { |
166 | case 7: | 166 | case 7: |
167 | cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); | 167 | cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); |
168 | cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); | 168 | cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); |
@@ -198,7 +198,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
198 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | 198 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
199 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; | 199 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
200 | u64 val; | 200 | u64 val; |
201 | u32 nr_pri_bits; | 201 | u32 nr_pre_bits; |
202 | int i; | 202 | int i; |
203 | 203 | ||
204 | /* | 204 | /* |
@@ -217,12 +217,12 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
217 | } | 217 | } |
218 | 218 | ||
219 | val = read_gicreg(ICH_VTR_EL2); | 219 | val = read_gicreg(ICH_VTR_EL2); |
220 | nr_pri_bits = vtr_to_nr_pri_bits(val); | 220 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
221 | 221 | ||
222 | if (used_lrs) { | 222 | if (used_lrs) { |
223 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | 223 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
224 | 224 | ||
225 | switch (nr_pri_bits) { | 225 | switch (nr_pre_bits) { |
226 | case 7: | 226 | case 7: |
227 | write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); | 227 | write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); |
228 | write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); | 228 | write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); |
@@ -232,7 +232,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
232 | write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); | 232 | write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); |
233 | } | 233 | } |
234 | 234 | ||
235 | switch (nr_pri_bits) { | 235 | switch (nr_pre_bits) { |
236 | case 7: | 236 | case 7: |
237 | write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); | 237 | write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); |
238 | write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); | 238 | write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 313ee646480f..a2d63247d1bb 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |||
295 | assert_spin_locked(&kvm->mmu_lock); | 295 | assert_spin_locked(&kvm->mmu_lock); |
296 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); | 296 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
297 | do { | 297 | do { |
298 | /* | ||
299 | * Make sure the page table is still active, as another thread | ||
300 | * could have possibly freed the page table, while we released | ||
301 | * the lock. | ||
302 | */ | ||
303 | if (!READ_ONCE(kvm->arch.pgd)) | ||
304 | break; | ||
298 | next = stage2_pgd_addr_end(addr, end); | 305 | next = stage2_pgd_addr_end(addr, end); |
299 | if (!stage2_pgd_none(*pgd)) | 306 | if (!stage2_pgd_none(*pgd)) |
300 | unmap_stage2_puds(kvm, pgd, addr, next); | 307 | unmap_stage2_puds(kvm, pgd, addr, next); |
@@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm) | |||
829 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | 836 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all |
830 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | 837 | * underlying level-2 and level-3 tables before freeing the actual level-1 table |
831 | * and setting the struct pointer to NULL. | 838 | * and setting the struct pointer to NULL. |
832 | * | ||
833 | * Note we don't need locking here as this is only called when the VM is | ||
834 | * destroyed, which can only be done once. | ||
835 | */ | 839 | */ |
836 | void kvm_free_stage2_pgd(struct kvm *kvm) | 840 | void kvm_free_stage2_pgd(struct kvm *kvm) |
837 | { | 841 | { |
838 | if (kvm->arch.pgd == NULL) | 842 | void *pgd = NULL; |
839 | return; | ||
840 | 843 | ||
841 | spin_lock(&kvm->mmu_lock); | 844 | spin_lock(&kvm->mmu_lock); |
842 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 845 | if (kvm->arch.pgd) { |
846 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | ||
847 | pgd = READ_ONCE(kvm->arch.pgd); | ||
848 | kvm->arch.pgd = NULL; | ||
849 | } | ||
843 | spin_unlock(&kvm->mmu_lock); | 850 | spin_unlock(&kvm->mmu_lock); |
844 | 851 | ||
845 | /* Free the HW pgd, one page at a time */ | 852 | /* Free the HW pgd, one page at a time */ |
846 | free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); | 853 | if (pgd) |
847 | kvm->arch.pgd = NULL; | 854 | free_pages_exact(pgd, S2_PGD_SIZE); |
848 | } | 855 | } |
849 | 856 | ||
850 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | 857 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
@@ -1170,11 +1177,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |||
1170 | * large. Otherwise, we may see kernel panics with | 1177 | * large. Otherwise, we may see kernel panics with |
1171 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, | 1178 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, |
1172 | * CONFIG_LOCKDEP. Additionally, holding the lock too long | 1179 | * CONFIG_LOCKDEP. Additionally, holding the lock too long |
1173 | * will also starve other vCPUs. | 1180 | * will also starve other vCPUs. We have to also make sure |
1181 | * that the page tables are not freed while we released | ||
1182 | * the lock. | ||
1174 | */ | 1183 | */ |
1175 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) | 1184 | cond_resched_lock(&kvm->mmu_lock); |
1176 | cond_resched_lock(&kvm->mmu_lock); | 1185 | if (!READ_ONCE(kvm->arch.pgd)) |
1177 | 1186 | break; | |
1178 | next = stage2_pgd_addr_end(addr, end); | 1187 | next = stage2_pgd_addr_end(addr, end); |
1179 | if (stage2_pgd_present(*pgd)) | 1188 | if (stage2_pgd_present(*pgd)) |
1180 | stage2_wp_puds(pgd, addr, next); | 1189 | stage2_wp_puds(pgd, addr, next); |
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index dc68e2e424ab..3a0b8999f011 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c | |||
@@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
242 | * If we are creating a VCPU with a GICv3 we must also register the | 242 | * If we are creating a VCPU with a GICv3 we must also register the |
243 | * KVM io device for the redistributor that belongs to this VCPU. | 243 | * KVM io device for the redistributor that belongs to this VCPU. |
244 | */ | 244 | */ |
245 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | 245 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { |
246 | mutex_lock(&vcpu->kvm->lock); | ||
246 | ret = vgic_register_redist_iodev(vcpu); | 247 | ret = vgic_register_redist_iodev(vcpu); |
248 | mutex_unlock(&vcpu->kvm->lock); | ||
249 | } | ||
247 | return ret; | 250 | return ret; |
248 | } | 251 | } |
249 | 252 | ||
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 99da1a207c19..201d5e2e973d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c | |||
@@ -586,7 +586,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) | |||
586 | if (!vgic_v3_check_base(kvm)) | 586 | if (!vgic_v3_check_base(kvm)) |
587 | return -EINVAL; | 587 | return -EINVAL; |
588 | 588 | ||
589 | rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; | 589 | rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset; |
590 | sgi_base = rd_base + SZ_64K; | 590 | sgi_base = rd_base + SZ_64K; |
591 | 591 | ||
592 | kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); | 592 | kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); |
@@ -614,11 +614,15 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) | |||
614 | mutex_lock(&kvm->slots_lock); | 614 | mutex_lock(&kvm->slots_lock); |
615 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, | 615 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, |
616 | SZ_64K, &sgi_dev->dev); | 616 | SZ_64K, &sgi_dev->dev); |
617 | mutex_unlock(&kvm->slots_lock); | 617 | if (ret) { |
618 | if (ret) | ||
619 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, | 618 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, |
620 | &rd_dev->dev); | 619 | &rd_dev->dev); |
620 | goto out; | ||
621 | } | ||
621 | 622 | ||
623 | vgic->vgic_redist_free_offset += 2 * SZ_64K; | ||
624 | out: | ||
625 | mutex_unlock(&kvm->slots_lock); | ||
622 | return ret; | 626 | return ret; |
623 | } | 627 | } |
624 | 628 | ||
@@ -644,10 +648,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) | |||
644 | 648 | ||
645 | if (ret) { | 649 | if (ret) { |
646 | /* The current c failed, so we start with the previous one. */ | 650 | /* The current c failed, so we start with the previous one. */ |
651 | mutex_lock(&kvm->slots_lock); | ||
647 | for (c--; c >= 0; c--) { | 652 | for (c--; c >= 0; c--) { |
648 | vcpu = kvm_get_vcpu(kvm, c); | 653 | vcpu = kvm_get_vcpu(kvm, c); |
649 | vgic_unregister_redist_iodev(vcpu); | 654 | vgic_unregister_redist_iodev(vcpu); |
650 | } | 655 | } |
656 | mutex_unlock(&kvm->slots_lock); | ||
651 | } | 657 | } |
652 | 658 | ||
653 | return ret; | 659 | return ret; |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index a65757aab6d3..504b4bd0d651 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -149,6 +149,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |||
149 | if (irq->hw) { | 149 | if (irq->hw) { |
150 | val |= GICH_LR_HW; | 150 | val |= GICH_LR_HW; |
151 | val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; | 151 | val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; |
152 | /* | ||
153 | * Never set pending+active on a HW interrupt, as the | ||
154 | * pending state is kept at the physical distributor | ||
155 | * level. | ||
156 | */ | ||
157 | if (irq->active && irq_is_pending(irq)) | ||
158 | val &= ~GICH_LR_PENDING_BIT; | ||
152 | } else { | 159 | } else { |
153 | if (irq->config == VGIC_CONFIG_LEVEL) | 160 | if (irq->config == VGIC_CONFIG_LEVEL) |
154 | val |= GICH_LR_EOI; | 161 | val |= GICH_LR_EOI; |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 8fa737edde6f..6fe3f003636a 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -127,6 +127,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |||
127 | if (irq->hw) { | 127 | if (irq->hw) { |
128 | val |= ICH_LR_HW; | 128 | val |= ICH_LR_HW; |
129 | val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; | 129 | val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; |
130 | /* | ||
131 | * Never set pending+active on a HW interrupt, as the | ||
132 | * pending state is kept at the physical distributor | ||
133 | * level. | ||
134 | */ | ||
135 | if (irq->active && irq_is_pending(irq)) | ||
136 | val &= ~ICH_LR_PENDING_BIT; | ||
130 | } else { | 137 | } else { |
131 | if (irq->config == VGIC_CONFIG_LEVEL) | 138 | if (irq->config == VGIC_CONFIG_LEVEL) |
132 | val |= ICH_LR_EOI; | 139 | val |= ICH_LR_EOI; |