diff options
465 files changed, 6721 insertions, 3365 deletions
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml index eee6f0f4aa43..3a626d1b8f2e 100644 --- a/Documentation/DocBook/media/v4l/compat.xml +++ b/Documentation/DocBook/media/v4l/compat.xml | |||
@@ -2545,6 +2545,30 @@ fields changed from _s32 to _u32. | |||
2545 | </orderedlist> | 2545 | </orderedlist> |
2546 | </section> | 2546 | </section> |
2547 | 2547 | ||
2548 | <section> | ||
2549 | <title>V4L2 in Linux 3.16</title> | ||
2550 | <orderedlist> | ||
2551 | <listitem> | ||
2552 | <para>Added event V4L2_EVENT_SOURCE_CHANGE. | ||
2553 | </para> | ||
2554 | </listitem> | ||
2555 | </orderedlist> | ||
2556 | </section> | ||
2557 | |||
2558 | <section> | ||
2559 | <title>V4L2 in Linux 3.17</title> | ||
2560 | <orderedlist> | ||
2561 | <listitem> | ||
2562 | <para>Extended &v4l2-pix-format;. Added format flags. | ||
2563 | </para> | ||
2564 | </listitem> | ||
2565 | <listitem> | ||
2566 | <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;. | ||
2567 | </para> | ||
2568 | </listitem> | ||
2569 | </orderedlist> | ||
2570 | </section> | ||
2571 | |||
2548 | <section id="other"> | 2572 | <section id="other"> |
2549 | <title>Relation of V4L2 to other Linux multimedia APIs</title> | 2573 | <title>Relation of V4L2 to other Linux multimedia APIs</title> |
2550 | 2574 | ||
diff --git a/Documentation/DocBook/media/v4l/func-poll.xml b/Documentation/DocBook/media/v4l/func-poll.xml index 85cad8bff5ba..4c73f115219b 100644 --- a/Documentation/DocBook/media/v4l/func-poll.xml +++ b/Documentation/DocBook/media/v4l/func-poll.xml | |||
@@ -29,9 +29,12 @@ can suspend execution until the driver has captured data or is ready | |||
29 | to accept data for output.</para> | 29 | to accept data for output.</para> |
30 | 30 | ||
31 | <para>When streaming I/O has been negotiated this function waits | 31 | <para>When streaming I/O has been negotiated this function waits |
32 | until a buffer has been filled or displayed and can be dequeued with | 32 | until a buffer has been filled by the capture device and can be dequeued |
33 | the &VIDIOC-DQBUF; ioctl. When buffers are already in the outgoing | 33 | with the &VIDIOC-DQBUF; ioctl. For output devices this function waits |
34 | queue of the driver the function returns immediately.</para> | 34 | until the device is ready to accept a new buffer to be queued up with |
35 | the &VIDIOC-QBUF; ioctl for display. When buffers are already in the outgoing | ||
36 | queue of the driver (capture) or the incoming queue isn't full (display) | ||
37 | the function returns immediately.</para> | ||
35 | 38 | ||
36 | <para>On success <function>poll()</function> returns the number of | 39 | <para>On success <function>poll()</function> returns the number of |
37 | file descriptors that have been selected (that is, file descriptors | 40 | file descriptors that have been selected (that is, file descriptors |
@@ -44,10 +47,22 @@ Capture devices set the <constant>POLLIN</constant> and | |||
44 | flags. When the function timed out it returns a value of zero, on | 47 | flags. When the function timed out it returns a value of zero, on |
45 | failure it returns <returnvalue>-1</returnvalue> and the | 48 | failure it returns <returnvalue>-1</returnvalue> and the |
46 | <varname>errno</varname> variable is set appropriately. When the | 49 | <varname>errno</varname> variable is set appropriately. When the |
47 | application did not call &VIDIOC-QBUF; or &VIDIOC-STREAMON; yet the | 50 | application did not call &VIDIOC-STREAMON; the |
48 | <function>poll()</function> function succeeds, but sets the | 51 | <function>poll()</function> function succeeds, but sets the |
49 | <constant>POLLERR</constant> flag in the | 52 | <constant>POLLERR</constant> flag in the |
50 | <structfield>revents</structfield> field.</para> | 53 | <structfield>revents</structfield> field. When the |
54 | application has called &VIDIOC-STREAMON; for a capture device but hasn't | ||
55 | yet called &VIDIOC-QBUF;, the <function>poll()</function> function | ||
56 | succeeds and sets the <constant>POLLERR</constant> flag in the | ||
57 | <structfield>revents</structfield> field. For output devices this | ||
58 | same situation will cause <function>poll()</function> to succeed | ||
59 | as well, but it sets the <constant>POLLOUT</constant> and | ||
60 | <constant>POLLWRNORM</constant> flags in the <structfield>revents</structfield> | ||
61 | field.</para> | ||
62 | |||
63 | <para>If an event occurred (see &VIDIOC-DQEVENT;) then | ||
64 | <constant>POLLPRI</constant> will be set in the <structfield>revents</structfield> | ||
65 | field and <function>poll()</function> will return.</para> | ||
51 | 66 | ||
52 | <para>When use of the <function>read()</function> function has | 67 | <para>When use of the <function>read()</function> function has |
53 | been negotiated and the driver does not capture yet, the | 68 | been negotiated and the driver does not capture yet, the |
@@ -58,10 +73,18 @@ continuously (as opposed to, for example, still images) the function | |||
58 | may return immediately.</para> | 73 | may return immediately.</para> |
59 | 74 | ||
60 | <para>When use of the <function>write()</function> function has | 75 | <para>When use of the <function>write()</function> function has |
61 | been negotiated the <function>poll</function> function just waits | 76 | been negotiated and the driver does not stream yet, the |
77 | <function>poll</function> function starts streaming. When that fails | ||
78 | it returns a <constant>POLLERR</constant> as above. Otherwise it waits | ||
62 | until the driver is ready for a non-blocking | 79 | until the driver is ready for a non-blocking |
63 | <function>write()</function> call.</para> | 80 | <function>write()</function> call.</para> |
64 | 81 | ||
82 | <para>If the caller is only interested in events (just | ||
83 | <constant>POLLPRI</constant> is set in the <structfield>events</structfield> | ||
84 | field), then <function>poll()</function> will <emphasis>not</emphasis> | ||
85 | start streaming if the driver does not stream yet. This makes it | ||
86 | possible to just poll for events and not for buffers.</para> | ||
87 | |||
65 | <para>All drivers implementing the <function>read()</function> or | 88 | <para>All drivers implementing the <function>read()</function> or |
66 | <function>write()</function> function or streaming I/O must also | 89 | <function>write()</function> function or streaming I/O must also |
67 | support the <function>poll()</function> function.</para> | 90 | support the <function>poll()</function> function.</para> |
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml index f2f81f06a17b..7cfe618f754d 100644 --- a/Documentation/DocBook/media/v4l/v4l2.xml +++ b/Documentation/DocBook/media/v4l/v4l2.xml | |||
@@ -152,10 +152,11 @@ structs, ioctls) must be noted in more detail in the history chapter | |||
152 | applications. --> | 152 | applications. --> |
153 | 153 | ||
154 | <revision> | 154 | <revision> |
155 | <revnumber>3.16</revnumber> | 155 | <revnumber>3.17</revnumber> |
156 | <date>2014-05-27</date> | 156 | <date>2014-08-04</date> |
157 | <authorinitials>lp</authorinitials> | 157 | <authorinitials>lp, hv</authorinitials> |
158 | <revremark>Extended &v4l2-pix-format;. Added format flags. | 158 | <revremark>Extended &v4l2-pix-format;. Added format flags. Added compound control types |
159 | and VIDIOC_QUERY_EXT_CTRL. | ||
159 | </revremark> | 160 | </revremark> |
160 | </revision> | 161 | </revision> |
161 | 162 | ||
@@ -538,7 +539,7 @@ and discussions on the V4L mailing list.</revremark> | |||
538 | </partinfo> | 539 | </partinfo> |
539 | 540 | ||
540 | <title>Video for Linux Two API Specification</title> | 541 | <title>Video for Linux Two API Specification</title> |
541 | <subtitle>Revision 3.14</subtitle> | 542 | <subtitle>Revision 3.17</subtitle> |
542 | 543 | ||
543 | <chapter id="common"> | 544 | <chapter id="common"> |
544 | &sub-common; | 545 | &sub-common; |
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml index 1ba9e999af3f..c62a7360719b 100644 --- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml +++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml | |||
@@ -119,7 +119,7 @@ | |||
119 | </row> | 119 | </row> |
120 | <row> | 120 | <row> |
121 | <entry>&v4l2-rect;</entry> | 121 | <entry>&v4l2-rect;</entry> |
122 | <entry><structfield>rect</structfield></entry> | 122 | <entry><structfield>r</structfield></entry> |
123 | <entry>Selection rectangle, in pixels.</entry> | 123 | <entry>Selection rectangle, in pixels.</entry> |
124 | </row> | 124 | </row> |
125 | <row> | 125 | <row> |
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt index 7740038d82bc..3c94ff3f9693 100644 --- a/Documentation/cgroups/cpusets.txt +++ b/Documentation/cgroups/cpusets.txt | |||
@@ -345,14 +345,14 @@ the named feature on. | |||
345 | The implementation is simple. | 345 | The implementation is simple. |
346 | 346 | ||
347 | Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag | 347 | Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag |
348 | PF_SPREAD_PAGE for each task that is in that cpuset or subsequently | 348 | PFA_SPREAD_PAGE for each task that is in that cpuset or subsequently |
349 | joins that cpuset. The page allocation calls for the page cache | 349 | joins that cpuset. The page allocation calls for the page cache |
350 | is modified to perform an inline check for this PF_SPREAD_PAGE task | 350 | is modified to perform an inline check for this PFA_SPREAD_PAGE task |
351 | flag, and if set, a call to a new routine cpuset_mem_spread_node() | 351 | flag, and if set, a call to a new routine cpuset_mem_spread_node() |
352 | returns the node to prefer for the allocation. | 352 | returns the node to prefer for the allocation. |
353 | 353 | ||
354 | Similarly, setting 'cpuset.memory_spread_slab' turns on the flag | 354 | Similarly, setting 'cpuset.memory_spread_slab' turns on the flag |
355 | PF_SPREAD_SLAB, and appropriately marked slab caches will allocate | 355 | PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate |
356 | pages from the node returned by cpuset_mem_spread_node(). | 356 | pages from the node returned by cpuset_mem_spread_node(). |
357 | 357 | ||
358 | The cpuset_mem_spread_node() routine is also simple. It uses the | 358 | The cpuset_mem_spread_node() routine is also simple. It uses the |
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt index 298e2f6b33c6..6fd0f15e899a 100644 --- a/Documentation/devicetree/bindings/arm/cpus.txt +++ b/Documentation/devicetree/bindings/arm/cpus.txt | |||
@@ -219,6 +219,12 @@ nodes to be present and contain the properties described below. | |||
219 | Value type: <phandle> | 219 | Value type: <phandle> |
220 | Definition: Specifies the ACC[2] node associated with this CPU. | 220 | Definition: Specifies the ACC[2] node associated with this CPU. |
221 | 221 | ||
222 | - cpu-idle-states | ||
223 | Usage: Optional | ||
224 | Value type: <prop-encoded-array> | ||
225 | Definition: | ||
226 | # List of phandles to idle state nodes supported | ||
227 | by this cpu [3]. | ||
222 | 228 | ||
223 | Example 1 (dual-cluster big.LITTLE system 32-bit): | 229 | Example 1 (dual-cluster big.LITTLE system 32-bit): |
224 | 230 | ||
@@ -415,3 +421,5 @@ cpus { | |||
415 | -- | 421 | -- |
416 | [1] arm/msm/qcom,saw2.txt | 422 | [1] arm/msm/qcom,saw2.txt |
417 | [2] arm/msm/qcom,kpss-acc.txt | 423 | [2] arm/msm/qcom,kpss-acc.txt |
424 | [3] ARM Linux kernel documentation - idle states bindings | ||
425 | Documentation/devicetree/bindings/arm/idle-states.txt | ||
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt index 8b4f7b7fe88b..abde1ea8a119 100644 --- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt | |||
@@ -8,6 +8,8 @@ Required Properties: | |||
8 | * samsung,exynos4210-pd - for exynos4210 type power domain. | 8 | * samsung,exynos4210-pd - for exynos4210 type power domain. |
9 | - reg: physical base address of the controller and length of memory mapped | 9 | - reg: physical base address of the controller and length of memory mapped |
10 | region. | 10 | region. |
11 | - #power-domain-cells: number of cells in power domain specifier; | ||
12 | must be 0. | ||
11 | 13 | ||
12 | Optional Properties: | 14 | Optional Properties: |
13 | - clocks: List of clock handles. The parent clocks of the input clocks to the | 15 | - clocks: List of clock handles. The parent clocks of the input clocks to the |
@@ -29,6 +31,7 @@ Example: | |||
29 | lcd0: power-domain-lcd0 { | 31 | lcd0: power-domain-lcd0 { |
30 | compatible = "samsung,exynos4210-pd"; | 32 | compatible = "samsung,exynos4210-pd"; |
31 | reg = <0x10023C00 0x10>; | 33 | reg = <0x10023C00 0x10>; |
34 | #power-domain-cells = <0>; | ||
32 | }; | 35 | }; |
33 | 36 | ||
34 | mfc_pd: power-domain@10044060 { | 37 | mfc_pd: power-domain@10044060 { |
@@ -37,12 +40,8 @@ Example: | |||
37 | clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>, | 40 | clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>, |
38 | <&clock CLK_MOUT_USER_ACLK333>; | 41 | <&clock CLK_MOUT_USER_ACLK333>; |
39 | clock-names = "oscclk", "pclk0", "clk0"; | 42 | clock-names = "oscclk", "pclk0", "clk0"; |
43 | #power-domain-cells = <0>; | ||
40 | }; | 44 | }; |
41 | 45 | ||
42 | Example of the node using power domain: | 46 | See Documentation/devicetree/bindings/power/power_domain.txt for description |
43 | 47 | of consumer-side bindings. | |
44 | node { | ||
45 | /* ... */ | ||
46 | samsung,power-domain = <&lcd0>; | ||
47 | /* ... */ | ||
48 | }; | ||
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt new file mode 100644 index 000000000000..37375c7f3ccc --- /dev/null +++ b/Documentation/devicetree/bindings/arm/idle-states.txt | |||
@@ -0,0 +1,679 @@ | |||
1 | ========================================== | ||
2 | ARM idle states binding description | ||
3 | ========================================== | ||
4 | |||
5 | ========================================== | ||
6 | 1 - Introduction | ||
7 | ========================================== | ||
8 | |||
9 | ARM systems contain HW capable of managing power consumption dynamically, | ||
10 | where cores can be put in different low-power states (ranging from simple | ||
11 | wfi to power gating) according to OS PM policies. The CPU states representing | ||
12 | the range of dynamic idle states that a processor can enter at run-time, can be | ||
13 | specified through device tree bindings representing the parameters required | ||
14 | to enter/exit specific idle states on a given processor. | ||
15 | |||
16 | According to the Server Base System Architecture document (SBSA, [3]), the | ||
17 | power states an ARM CPU can be put into are identified by the following list: | ||
18 | |||
19 | - Running | ||
20 | - Idle_standby | ||
21 | - Idle_retention | ||
22 | - Sleep | ||
23 | - Off | ||
24 | |||
25 | The power states described in the SBSA document define the basic CPU states on | ||
26 | top of which ARM platforms implement power management schemes that allow an OS | ||
27 | PM implementation to put the processor in different idle states (which include | ||
28 | states listed above; "off" state is not an idle state since it does not have | ||
29 | wake-up capabilities, hence it is not considered in this document). | ||
30 | |||
31 | Idle state parameters (eg entry latency) are platform specific and need to be | ||
32 | characterized with bindings that provide the required information to OS PM | ||
33 | code so that it can build the required tables and use them at runtime. | ||
34 | |||
35 | The device tree binding definition for ARM idle states is the subject of this | ||
36 | document. | ||
37 | |||
38 | =========================================== | ||
39 | 2 - idle-states definitions | ||
40 | =========================================== | ||
41 | |||
42 | Idle states are characterized for a specific system through a set of | ||
43 | timing and energy related properties, that underline the HW behaviour | ||
44 | triggered upon idle states entry and exit. | ||
45 | |||
46 | The following diagram depicts the CPU execution phases and related timing | ||
47 | properties required to enter and exit an idle state: | ||
48 | |||
49 | ..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__.. | ||
50 | | | | | | | ||
51 | |||
52 | |<------ entry ------->| | ||
53 | | latency | | ||
54 | |<- exit ->| | ||
55 | | latency | | ||
56 | |<-------- min-residency -------->| | ||
57 | |<------- wakeup-latency ------->| | ||
58 | |||
59 | Diagram 1: CPU idle state execution phases | ||
60 | |||
61 | EXEC: Normal CPU execution. | ||
62 | |||
63 | PREP: Preparation phase before committing the hardware to idle mode | ||
64 | like cache flushing. This is abortable on pending wake-up | ||
65 | event conditions. The abort latency is assumed to be negligible | ||
66 | (i.e. less than the ENTRY + EXIT duration). If aborted, CPU | ||
67 | goes back to EXEC. This phase is optional. If not abortable, | ||
68 | this should be included in the ENTRY phase instead. | ||
69 | |||
70 | ENTRY: The hardware is committed to idle mode. This period must run | ||
71 | to completion up to IDLE before anything else can happen. | ||
72 | |||
73 | IDLE: This is the actual energy-saving idle period. This may last | ||
74 | between 0 and infinite time, until a wake-up event occurs. | ||
75 | |||
76 | EXIT: Period during which the CPU is brought back to operational | ||
77 | mode (EXEC). | ||
78 | |||
79 | entry-latency: Worst case latency required to enter the idle state. The | ||
80 | exit-latency may be guaranteed only after entry-latency has passed. | ||
81 | |||
82 | min-residency: Minimum period, including preparation and entry, for a given | ||
83 | idle state to be worthwhile energywise. | ||
84 | |||
85 | wakeup-latency: Maximum delay between the signaling of a wake-up event and the | ||
86 | CPU being able to execute normal code again. If not specified, this is assumed | ||
87 | to be entry-latency + exit-latency. | ||
88 | |||
89 | These timing parameters can be used by an OS in different circumstances. | ||
90 | |||
91 | An idle CPU requires the expected min-residency time to select the most | ||
92 | appropriate idle state based on the expected expiry time of the next IRQ | ||
93 | (ie wake-up) that causes the CPU to return to the EXEC phase. | ||
94 | |||
95 | An operating system scheduler may need to compute the shortest wake-up delay | ||
96 | for CPUs in the system by detecting how long will it take to get a CPU out | ||
97 | of an idle state, eg: | ||
98 | |||
99 | wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0) | ||
100 | |||
101 | In other words, the scheduler can make its scheduling decision by selecting | ||
102 | (eg waking-up) the CPU with the shortest wake-up latency. | ||
103 | The wake-up latency must take into account the entry latency if that period | ||
104 | has not expired. The abortable nature of the PREP period can be ignored | ||
105 | if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than | ||
106 | the worst case since it depends on the CPU operating conditions, ie caches | ||
107 | state). | ||
108 | |||
109 | An OS has to reliably probe the wakeup-latency since some devices can enforce | ||
110 | latency constraints guarantees to work properly, so the OS has to detect the | ||
111 | worst case wake-up latency it can incur if a CPU is allowed to enter an | ||
112 | idle state, and possibly to prevent that to guarantee reliable device | ||
113 | functioning. | ||
114 | |||
115 | The min-residency time parameter deserves further explanation since it is | ||
116 | expressed in time units but must factor in energy consumption coefficients. | ||
117 | |||
118 | The energy consumption of a cpu when it enters a power state can be roughly | ||
119 | characterised by the following graph: | ||
120 | |||
121 | | | ||
122 | | | ||
123 | | | ||
124 | e | | ||
125 | n | /--- | ||
126 | e | /------ | ||
127 | r | /------ | ||
128 | g | /----- | ||
129 | y | /------ | ||
130 | | ---- | ||
131 | | /| | ||
132 | | / | | ||
133 | | / | | ||
134 | | / | | ||
135 | | / | | ||
136 | | / | | ||
137 | |/ | | ||
138 | -----|-------+---------------------------------- | ||
139 | 0| 1 time(ms) | ||
140 | |||
141 | Graph 1: Energy vs time example | ||
142 | |||
143 | The graph is split in two parts delimited by time 1ms on the X-axis. | ||
144 | The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope | ||
145 | and denotes the energy costs incurred whilst entering and leaving the idle | ||
146 | state. | ||
147 | The graph curve in the area delimited by X-axis values = {x | x > 1ms } has | ||
148 | shallower slope and essentially represents the energy consumption of the idle | ||
149 | state. | ||
150 | |||
151 | min-residency is defined for a given idle state as the minimum expected | ||
152 | residency time for a state (inclusive of preparation and entry) after | ||
153 | which choosing that state become the most energy efficient option. A good | ||
154 | way to visualise this, is by taking the same graph above and comparing some | ||
155 | states energy consumptions plots. | ||
156 | |||
157 | For sake of simplicity, let's consider a system with two idle states IDLE1, | ||
158 | and IDLE2: | ||
159 | |||
160 | | | ||
161 | | | ||
162 | | | ||
163 | | /-- IDLE1 | ||
164 | e | /--- | ||
165 | n | /---- | ||
166 | e | /--- | ||
167 | r | /-----/--------- IDLE2 | ||
168 | g | /-------/--------- | ||
169 | y | ------------ /---| | ||
170 | | / /---- | | ||
171 | | / /--- | | ||
172 | | / /---- | | ||
173 | | / /--- | | ||
174 | | --- | | ||
175 | | / | | ||
176 | | / | | ||
177 | |/ | time | ||
178 | ---/----------------------------+------------------------ | ||
179 | |IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy | ||
180 | | | ||
181 | IDLE2-min-residency | ||
182 | |||
183 | Graph 2: idle states min-residency example | ||
184 | |||
185 | In graph 2 above, that takes into account idle states entry/exit energy | ||
186 | costs, it is clear that if the idle state residency time (ie time till next | ||
187 | wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state | ||
188 | choice energywise. | ||
189 | |||
190 | This is mainly down to the fact that IDLE1 entry/exit energy costs are lower | ||
191 | than IDLE2. | ||
192 | |||
193 | However, the lower power consumption (ie shallower energy curve slope) of idle | ||
194 | state IDLE2 implies that after a suitable time, IDLE2 becomes more energy | ||
195 | efficient. | ||
196 | |||
197 | The time at which IDLE2 becomes more energy efficient than IDLE1 (and other | ||
198 | shallower states in a system with multiple idle states) is defined | ||
199 | IDLE2-min-residency and corresponds to the time when energy consumption of | ||
200 | IDLE1 and IDLE2 states breaks even. | ||
201 | |||
202 | The definitions provided in this section underpin the idle states | ||
203 | properties specification that is the subject of the following sections. | ||
204 | |||
205 | =========================================== | ||
206 | 3 - idle-states node | ||
207 | =========================================== | ||
208 | |||
209 | ARM processor idle states are defined within the idle-states node, which is | ||
210 | a direct child of the cpus node [1] and provides a container where the | ||
211 | processor idle states, defined as device tree nodes, are listed. | ||
212 | |||
213 | - idle-states node | ||
214 | |||
215 | Usage: Optional - On ARM systems, it is a container of processor idle | ||
216 | states nodes. If the system does not provide CPU | ||
217 | power management capabilities or the processor just | ||
218 | supports idle_standby an idle-states node is not | ||
219 | required. | ||
220 | |||
221 | Description: idle-states node is a container node, where its | ||
222 | subnodes describe the CPU idle states. | ||
223 | |||
224 | Node name must be "idle-states". | ||
225 | |||
226 | The idle-states node's parent node must be the cpus node. | ||
227 | |||
228 | The idle-states node's child nodes can be: | ||
229 | |||
230 | - one or more state nodes | ||
231 | |||
232 | Any other configuration is considered invalid. | ||
233 | |||
234 | An idle-states node defines the following properties: | ||
235 | |||
236 | - entry-method | ||
237 | Value type: <stringlist> | ||
238 | Usage and definition depend on ARM architecture version. | ||
239 | # On ARM v8 64-bit this property is required and must | ||
240 | be one of: | ||
241 | - "psci" (see bindings in [2]) | ||
242 | # On ARM 32-bit systems this property is optional | ||
243 | |||
244 | The nodes describing the idle states (state) can only be defined within the | ||
245 | idle-states node, any other configuration is considered invalid and therefore | ||
246 | must be ignored. | ||
247 | |||
248 | =========================================== | ||
249 | 4 - state node | ||
250 | =========================================== | ||
251 | |||
252 | A state node represents an idle state description and must be defined as | ||
253 | follows: | ||
254 | |||
255 | - state node | ||
256 | |||
257 | Description: must be child of the idle-states node | ||
258 | |||
259 | The state node name shall follow standard device tree naming | ||
260 | rules ([5], 2.2.1 "Node names"), in particular state nodes which | ||
261 | are siblings within a single common parent must be given a unique name. | ||
262 | |||
263 | The idle state entered by executing the wfi instruction (idle_standby | ||
264 | SBSA,[3][4]) is considered standard on all ARM platforms and therefore | ||
265 | must not be listed. | ||
266 | |||
267 | With the definitions provided above, the following list represents | ||
268 | the valid properties for a state node: | ||
269 | |||
270 | - compatible | ||
271 | Usage: Required | ||
272 | Value type: <stringlist> | ||
273 | Definition: Must be "arm,idle-state". | ||
274 | |||
275 | - local-timer-stop | ||
276 | Usage: See definition | ||
277 | Value type: <none> | ||
278 | Definition: if present the CPU local timer control logic is | ||
279 | lost on state entry, otherwise it is retained. | ||
280 | |||
281 | - entry-latency-us | ||
282 | Usage: Required | ||
283 | Value type: <prop-encoded-array> | ||
284 | Definition: u32 value representing worst case latency in | ||
285 | microseconds required to enter the idle state. | ||
286 | The exit-latency-us duration may be guaranteed | ||
287 | only after entry-latency-us has passed. | ||
288 | |||
289 | - exit-latency-us | ||
290 | Usage: Required | ||
291 | Value type: <prop-encoded-array> | ||
292 | Definition: u32 value representing worst case latency | ||
293 | in microseconds required to exit the idle state. | ||
294 | |||
295 | - min-residency-us | ||
296 | Usage: Required | ||
297 | Value type: <prop-encoded-array> | ||
298 | Definition: u32 value representing minimum residency duration | ||
299 | in microseconds, inclusive of preparation and | ||
300 | entry, for this idle state to be considered | ||
301 | worthwhile energy wise (refer to section 2 of | ||
302 | this document for a complete description). | ||
303 | |||
304 | - wakeup-latency-us: | ||
305 | Usage: Optional | ||
306 | Value type: <prop-encoded-array> | ||
307 | Definition: u32 value representing maximum delay between the | ||
308 | signaling of a wake-up event and the CPU being | ||
309 | able to execute normal code again. If omitted, | ||
310 | this is assumed to be equal to: | ||
311 | |||
312 | entry-latency-us + exit-latency-us | ||
313 | |||
314 | It is important to supply this value on systems | ||
315 | where the duration of PREP phase (see diagram 1, | ||
316 | section 2) is non-neglibigle. | ||
317 | In such systems entry-latency-us + exit-latency-us | ||
318 | will exceed wakeup-latency-us by this duration. | ||
319 | |||
320 | In addition to the properties listed above, a state node may require | ||
321 | additional properties specifics to the entry-method defined in the | ||
322 | idle-states node, please refer to the entry-method bindings | ||
323 | documentation for properties definitions. | ||
324 | |||
325 | =========================================== | ||
326 | 4 - Examples | ||
327 | =========================================== | ||
328 | |||
329 | Example 1 (ARM 64-bit, 16-cpu system, PSCI enable-method): | ||
330 | |||
331 | cpus { | ||
332 | #size-cells = <0>; | ||
333 | #address-cells = <2>; | ||
334 | |||
335 | CPU0: cpu@0 { | ||
336 | device_type = "cpu"; | ||
337 | compatible = "arm,cortex-a57"; | ||
338 | reg = <0x0 0x0>; | ||
339 | enable-method = "psci"; | ||
340 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
341 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
342 | }; | ||
343 | |||
344 | CPU1: cpu@1 { | ||
345 | device_type = "cpu"; | ||
346 | compatible = "arm,cortex-a57"; | ||
347 | reg = <0x0 0x1>; | ||
348 | enable-method = "psci"; | ||
349 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
350 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
351 | }; | ||
352 | |||
353 | CPU2: cpu@100 { | ||
354 | device_type = "cpu"; | ||
355 | compatible = "arm,cortex-a57"; | ||
356 | reg = <0x0 0x100>; | ||
357 | enable-method = "psci"; | ||
358 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
359 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
360 | }; | ||
361 | |||
362 | CPU3: cpu@101 { | ||
363 | device_type = "cpu"; | ||
364 | compatible = "arm,cortex-a57"; | ||
365 | reg = <0x0 0x101>; | ||
366 | enable-method = "psci"; | ||
367 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
368 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
369 | }; | ||
370 | |||
371 | CPU4: cpu@10000 { | ||
372 | device_type = "cpu"; | ||
373 | compatible = "arm,cortex-a57"; | ||
374 | reg = <0x0 0x10000>; | ||
375 | enable-method = "psci"; | ||
376 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
377 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
378 | }; | ||
379 | |||
380 | CPU5: cpu@10001 { | ||
381 | device_type = "cpu"; | ||
382 | compatible = "arm,cortex-a57"; | ||
383 | reg = <0x0 0x10001>; | ||
384 | enable-method = "psci"; | ||
385 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
386 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
387 | }; | ||
388 | |||
389 | CPU6: cpu@10100 { | ||
390 | device_type = "cpu"; | ||
391 | compatible = "arm,cortex-a57"; | ||
392 | reg = <0x0 0x10100>; | ||
393 | enable-method = "psci"; | ||
394 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
395 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
396 | }; | ||
397 | |||
398 | CPU7: cpu@10101 { | ||
399 | device_type = "cpu"; | ||
400 | compatible = "arm,cortex-a57"; | ||
401 | reg = <0x0 0x10101>; | ||
402 | enable-method = "psci"; | ||
403 | cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 | ||
404 | &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; | ||
405 | }; | ||
406 | |||
407 | CPU8: cpu@100000000 { | ||
408 | device_type = "cpu"; | ||
409 | compatible = "arm,cortex-a53"; | ||
410 | reg = <0x1 0x0>; | ||
411 | enable-method = "psci"; | ||
412 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
413 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
414 | }; | ||
415 | |||
416 | CPU9: cpu@100000001 { | ||
417 | device_type = "cpu"; | ||
418 | compatible = "arm,cortex-a53"; | ||
419 | reg = <0x1 0x1>; | ||
420 | enable-method = "psci"; | ||
421 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
422 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
423 | }; | ||
424 | |||
425 | CPU10: cpu@100000100 { | ||
426 | device_type = "cpu"; | ||
427 | compatible = "arm,cortex-a53"; | ||
428 | reg = <0x1 0x100>; | ||
429 | enable-method = "psci"; | ||
430 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
431 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
432 | }; | ||
433 | |||
434 | CPU11: cpu@100000101 { | ||
435 | device_type = "cpu"; | ||
436 | compatible = "arm,cortex-a53"; | ||
437 | reg = <0x1 0x101>; | ||
438 | enable-method = "psci"; | ||
439 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
440 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
441 | }; | ||
442 | |||
443 | CPU12: cpu@100010000 { | ||
444 | device_type = "cpu"; | ||
445 | compatible = "arm,cortex-a53"; | ||
446 | reg = <0x1 0x10000>; | ||
447 | enable-method = "psci"; | ||
448 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
449 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
450 | }; | ||
451 | |||
452 | CPU13: cpu@100010001 { | ||
453 | device_type = "cpu"; | ||
454 | compatible = "arm,cortex-a53"; | ||
455 | reg = <0x1 0x10001>; | ||
456 | enable-method = "psci"; | ||
457 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
458 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
459 | }; | ||
460 | |||
461 | CPU14: cpu@100010100 { | ||
462 | device_type = "cpu"; | ||
463 | compatible = "arm,cortex-a53"; | ||
464 | reg = <0x1 0x10100>; | ||
465 | enable-method = "psci"; | ||
466 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
467 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
468 | }; | ||
469 | |||
470 | CPU15: cpu@100010101 { | ||
471 | device_type = "cpu"; | ||
472 | compatible = "arm,cortex-a53"; | ||
473 | reg = <0x1 0x10101>; | ||
474 | enable-method = "psci"; | ||
475 | cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 | ||
476 | &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; | ||
477 | }; | ||
478 | |||
479 | idle-states { | ||
480 | entry-method = "arm,psci"; | ||
481 | |||
482 | CPU_RETENTION_0_0: cpu-retention-0-0 { | ||
483 | compatible = "arm,idle-state"; | ||
484 | arm,psci-suspend-param = <0x0010000>; | ||
485 | entry-latency-us = <20>; | ||
486 | exit-latency-us = <40>; | ||
487 | min-residency-us = <80>; | ||
488 | }; | ||
489 | |||
490 | CLUSTER_RETENTION_0: cluster-retention-0 { | ||
491 | compatible = "arm,idle-state"; | ||
492 | local-timer-stop; | ||
493 | arm,psci-suspend-param = <0x1010000>; | ||
494 | entry-latency-us = <50>; | ||
495 | exit-latency-us = <100>; | ||
496 | min-residency-us = <250>; | ||
497 | wakeup-latency-us = <130>; | ||
498 | }; | ||
499 | |||
500 | CPU_SLEEP_0_0: cpu-sleep-0-0 { | ||
501 | compatible = "arm,idle-state"; | ||
502 | local-timer-stop; | ||
503 | arm,psci-suspend-param = <0x0010000>; | ||
504 | entry-latency-us = <250>; | ||
505 | exit-latency-us = <500>; | ||
506 | min-residency-us = <950>; | ||
507 | }; | ||
508 | |||
509 | CLUSTER_SLEEP_0: cluster-sleep-0 { | ||
510 | compatible = "arm,idle-state"; | ||
511 | local-timer-stop; | ||
512 | arm,psci-suspend-param = <0x1010000>; | ||
513 | entry-latency-us = <600>; | ||
514 | exit-latency-us = <1100>; | ||
515 | min-residency-us = <2700>; | ||
516 | wakeup-latency-us = <1500>; | ||
517 | }; | ||
518 | |||
519 | CPU_RETENTION_1_0: cpu-retention-1-0 { | ||
520 | compatible = "arm,idle-state"; | ||
521 | arm,psci-suspend-param = <0x0010000>; | ||
522 | entry-latency-us = <20>; | ||
523 | exit-latency-us = <40>; | ||
524 | min-residency-us = <90>; | ||
525 | }; | ||
526 | |||
527 | CLUSTER_RETENTION_1: cluster-retention-1 { | ||
528 | compatible = "arm,idle-state"; | ||
529 | local-timer-stop; | ||
530 | arm,psci-suspend-param = <0x1010000>; | ||
531 | entry-latency-us = <50>; | ||
532 | exit-latency-us = <100>; | ||
533 | min-residency-us = <270>; | ||
534 | wakeup-latency-us = <100>; | ||
535 | }; | ||
536 | |||
537 | CPU_SLEEP_1_0: cpu-sleep-1-0 { | ||
538 | compatible = "arm,idle-state"; | ||
539 | local-timer-stop; | ||
540 | arm,psci-suspend-param = <0x0010000>; | ||
541 | entry-latency-us = <70>; | ||
542 | exit-latency-us = <100>; | ||
543 | min-residency-us = <300>; | ||
544 | wakeup-latency-us = <150>; | ||
545 | }; | ||
546 | |||
547 | CLUSTER_SLEEP_1: cluster-sleep-1 { | ||
548 | compatible = "arm,idle-state"; | ||
549 | local-timer-stop; | ||
550 | arm,psci-suspend-param = <0x1010000>; | ||
551 | entry-latency-us = <500>; | ||
552 | exit-latency-us = <1200>; | ||
553 | min-residency-us = <3500>; | ||
554 | wakeup-latency-us = <1300>; | ||
555 | }; | ||
556 | }; | ||
557 | |||
558 | }; | ||
559 | |||
560 | Example 2 (ARM 32-bit, 8-cpu system, two clusters): | ||
561 | |||
562 | cpus { | ||
563 | #size-cells = <0>; | ||
564 | #address-cells = <1>; | ||
565 | |||
566 | CPU0: cpu@0 { | ||
567 | device_type = "cpu"; | ||
568 | compatible = "arm,cortex-a15"; | ||
569 | reg = <0x0>; | ||
570 | cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; | ||
571 | }; | ||
572 | |||
573 | CPU1: cpu@1 { | ||
574 | device_type = "cpu"; | ||
575 | compatible = "arm,cortex-a15"; | ||
576 | reg = <0x1>; | ||
577 | cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; | ||
578 | }; | ||
579 | |||
580 | CPU2: cpu@2 { | ||
581 | device_type = "cpu"; | ||
582 | compatible = "arm,cortex-a15"; | ||
583 | reg = <0x2>; | ||
584 | cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; | ||
585 | }; | ||
586 | |||
587 | CPU3: cpu@3 { | ||
588 | device_type = "cpu"; | ||
589 | compatible = "arm,cortex-a15"; | ||
590 | reg = <0x3>; | ||
591 | cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; | ||
592 | }; | ||
593 | |||
594 | CPU4: cpu@100 { | ||
595 | device_type = "cpu"; | ||
596 | compatible = "arm,cortex-a7"; | ||
597 | reg = <0x100>; | ||
598 | cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; | ||
599 | }; | ||
600 | |||
601 | CPU5: cpu@101 { | ||
602 | device_type = "cpu"; | ||
603 | compatible = "arm,cortex-a7"; | ||
604 | reg = <0x101>; | ||
605 | cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; | ||
606 | }; | ||
607 | |||
608 | CPU6: cpu@102 { | ||
609 | device_type = "cpu"; | ||
610 | compatible = "arm,cortex-a7"; | ||
611 | reg = <0x102>; | ||
612 | cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; | ||
613 | }; | ||
614 | |||
615 | CPU7: cpu@103 { | ||
616 | device_type = "cpu"; | ||
617 | compatible = "arm,cortex-a7"; | ||
618 | reg = <0x103>; | ||
619 | cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; | ||
620 | }; | ||
621 | |||
622 | idle-states { | ||
623 | CPU_SLEEP_0_0: cpu-sleep-0-0 { | ||
624 | compatible = "arm,idle-state"; | ||
625 | local-timer-stop; | ||
626 | entry-latency-us = <200>; | ||
627 | exit-latency-us = <100>; | ||
628 | min-residency-us = <400>; | ||
629 | wakeup-latency-us = <250>; | ||
630 | }; | ||
631 | |||
632 | CLUSTER_SLEEP_0: cluster-sleep-0 { | ||
633 | compatible = "arm,idle-state"; | ||
634 | local-timer-stop; | ||
635 | entry-latency-us = <500>; | ||
636 | exit-latency-us = <1500>; | ||
637 | min-residency-us = <2500>; | ||
638 | wakeup-latency-us = <1700>; | ||
639 | }; | ||
640 | |||
641 | CPU_SLEEP_1_0: cpu-sleep-1-0 { | ||
642 | compatible = "arm,idle-state"; | ||
643 | local-timer-stop; | ||
644 | entry-latency-us = <300>; | ||
645 | exit-latency-us = <500>; | ||
646 | min-residency-us = <900>; | ||
647 | wakeup-latency-us = <600>; | ||
648 | }; | ||
649 | |||
650 | CLUSTER_SLEEP_1: cluster-sleep-1 { | ||
651 | compatible = "arm,idle-state"; | ||
652 | local-timer-stop; | ||
653 | entry-latency-us = <800>; | ||
654 | exit-latency-us = <2000>; | ||
655 | min-residency-us = <6500>; | ||
656 | wakeup-latency-us = <2300>; | ||
657 | }; | ||
658 | }; | ||
659 | |||
660 | }; | ||
661 | |||
662 | =========================================== | ||
663 | 5 - References | ||
664 | =========================================== | ||
665 | |||
666 | [1] ARM Linux Kernel documentation - CPUs bindings | ||
667 | Documentation/devicetree/bindings/arm/cpus.txt | ||
668 | |||
669 | [2] ARM Linux Kernel documentation - PSCI bindings | ||
670 | Documentation/devicetree/bindings/arm/psci.txt | ||
671 | |||
672 | [3] ARM Server Base System Architecture (SBSA) | ||
673 | http://infocenter.arm.com/help/index.jsp | ||
674 | |||
675 | [4] ARM Architecture Reference Manuals | ||
676 | http://infocenter.arm.com/help/index.jsp | ||
677 | |||
678 | [5] ePAPR standard | ||
679 | https://www.power.org/documentation/epapr-version-1-1/ | ||
diff --git a/Documentation/devicetree/bindings/arm/psci.txt b/Documentation/devicetree/bindings/arm/psci.txt index b4a58f39223c..5aa40ede0e99 100644 --- a/Documentation/devicetree/bindings/arm/psci.txt +++ b/Documentation/devicetree/bindings/arm/psci.txt | |||
@@ -50,6 +50,16 @@ Main node optional properties: | |||
50 | 50 | ||
51 | - migrate : Function ID for MIGRATE operation | 51 | - migrate : Function ID for MIGRATE operation |
52 | 52 | ||
53 | Device tree nodes that require usage of PSCI CPU_SUSPEND function (ie idle | ||
54 | state nodes, as per bindings in [1]) must specify the following properties: | ||
55 | |||
56 | - arm,psci-suspend-param | ||
57 | Usage: Required for state nodes[1] if the corresponding | ||
58 | idle-states node entry-method property is set | ||
59 | to "psci". | ||
60 | Value type: <u32> | ||
61 | Definition: power_state parameter to pass to the PSCI | ||
62 | suspend call. | ||
53 | 63 | ||
54 | Example: | 64 | Example: |
55 | 65 | ||
@@ -64,7 +74,6 @@ Case 1: PSCI v0.1 only. | |||
64 | migrate = <0x95c10003>; | 74 | migrate = <0x95c10003>; |
65 | }; | 75 | }; |
66 | 76 | ||
67 | |||
68 | Case 2: PSCI v0.2 only | 77 | Case 2: PSCI v0.2 only |
69 | 78 | ||
70 | psci { | 79 | psci { |
@@ -88,3 +97,6 @@ Case 3: PSCI v0.2 and PSCI v0.1. | |||
88 | 97 | ||
89 | ... | 98 | ... |
90 | }; | 99 | }; |
100 | |||
101 | [1] Kernel documentation - ARM idle states bindings | ||
102 | Documentation/devicetree/bindings/arm/idle-states.txt | ||
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt index 366690cb86a3..e41c98ffbccb 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt | |||
@@ -1,8 +1,8 @@ | |||
1 | Generic CPU0 cpufreq driver | 1 | Generic cpufreq driver |
2 | 2 | ||
3 | It is a generic cpufreq driver for CPU0 frequency management. It | 3 | It is a generic DT based cpufreq driver for frequency management. It supports |
4 | supports both uniprocessor (UP) and symmetric multiprocessor (SMP) | 4 | both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share |
5 | systems which share clock and voltage across all CPUs. | 5 | clock and voltage across all CPUs. |
6 | 6 | ||
7 | Both required and optional properties listed below must be defined | 7 | Both required and optional properties listed below must be defined |
8 | under node /cpus/cpu@0. | 8 | under node /cpus/cpu@0. |
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt new file mode 100644 index 000000000000..98c16672ab5f --- /dev/null +++ b/Documentation/devicetree/bindings/power/power_domain.txt | |||
@@ -0,0 +1,49 @@ | |||
1 | * Generic PM domains | ||
2 | |||
3 | System on chip designs are often divided into multiple PM domains that can be | ||
4 | used for power gating of selected IP blocks for power saving by reduced leakage | ||
5 | current. | ||
6 | |||
7 | This device tree binding can be used to bind PM domain consumer devices with | ||
8 | their PM domains provided by PM domain providers. A PM domain provider can be | ||
9 | represented by any node in the device tree and can provide one or more PM | ||
10 | domains. A consumer node can refer to the provider by a phandle and a set of | ||
11 | phandle arguments (so called PM domain specifiers) of length specified by the | ||
12 | #power-domain-cells property in the PM domain provider node. | ||
13 | |||
14 | ==PM domain providers== | ||
15 | |||
16 | Required properties: | ||
17 | - #power-domain-cells : Number of cells in a PM domain specifier; | ||
18 | Typically 0 for nodes representing a single PM domain and 1 for nodes | ||
19 | providing multiple PM domains (e.g. power controllers), but can be any value | ||
20 | as specified by device tree binding documentation of particular provider. | ||
21 | |||
22 | Example: | ||
23 | |||
24 | power: power-controller@12340000 { | ||
25 | compatible = "foo,power-controller"; | ||
26 | reg = <0x12340000 0x1000>; | ||
27 | #power-domain-cells = <1>; | ||
28 | }; | ||
29 | |||
30 | The node above defines a power controller that is a PM domain provider and | ||
31 | expects one cell as its phandle argument. | ||
32 | |||
33 | ==PM domain consumers== | ||
34 | |||
35 | Required properties: | ||
36 | - power-domains : A phandle and PM domain specifier as defined by bindings of | ||
37 | the power controller specified by phandle. | ||
38 | |||
39 | Example: | ||
40 | |||
41 | leaky-device@12350000 { | ||
42 | compatible = "foo,i-leak-current"; | ||
43 | reg = <0x12350000 0x1000>; | ||
44 | power-domains = <&power 0>; | ||
45 | }; | ||
46 | |||
47 | The node above defines a typical PM domain consumer device, which is located | ||
48 | inside a PM domain with index 0 of a power controller represented by a node | ||
49 | with the label "power". | ||
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt b/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt index 578a1fca366e..443bcb6134d5 100644 --- a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt +++ b/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt | |||
@@ -56,6 +56,9 @@ Required properties: | |||
56 | - fsl,data-width : should be <18> or <24> | 56 | - fsl,data-width : should be <18> or <24> |
57 | - port: A port node with endpoint definitions as defined in | 57 | - port: A port node with endpoint definitions as defined in |
58 | Documentation/devicetree/bindings/media/video-interfaces.txt. | 58 | Documentation/devicetree/bindings/media/video-interfaces.txt. |
59 | On i.MX5, the internal two-input-multiplexer is used. | ||
60 | Due to hardware limitations, only one port (port@[0,1]) | ||
61 | can be used for each channel (lvds-channel@[0,1], respectively) | ||
59 | On i.MX6, there should be four ports (port@[0-3]) that correspond | 62 | On i.MX6, there should be four ports (port@[0-3]) that correspond |
60 | to the four LVDS multiplexer inputs. | 63 | to the four LVDS multiplexer inputs. |
61 | 64 | ||
@@ -78,6 +81,8 @@ ldb: ldb@53fa8008 { | |||
78 | "di0", "di1"; | 81 | "di0", "di1"; |
79 | 82 | ||
80 | lvds-channel@0 { | 83 | lvds-channel@0 { |
84 | #address-cells = <1>; | ||
85 | #size-cells = <0>; | ||
81 | reg = <0>; | 86 | reg = <0>; |
82 | fsl,data-mapping = "spwg"; | 87 | fsl,data-mapping = "spwg"; |
83 | fsl,data-width = <24>; | 88 | fsl,data-width = <24>; |
@@ -86,7 +91,9 @@ ldb: ldb@53fa8008 { | |||
86 | /* ... */ | 91 | /* ... */ |
87 | }; | 92 | }; |
88 | 93 | ||
89 | port { | 94 | port@0 { |
95 | reg = <0>; | ||
96 | |||
90 | lvds0_in: endpoint { | 97 | lvds0_in: endpoint { |
91 | remote-endpoint = <&ipu_di0_lvds0>; | 98 | remote-endpoint = <&ipu_di0_lvds0>; |
92 | }; | 99 | }; |
@@ -94,6 +101,8 @@ ldb: ldb@53fa8008 { | |||
94 | }; | 101 | }; |
95 | 102 | ||
96 | lvds-channel@1 { | 103 | lvds-channel@1 { |
104 | #address-cells = <1>; | ||
105 | #size-cells = <0>; | ||
97 | reg = <1>; | 106 | reg = <1>; |
98 | fsl,data-mapping = "spwg"; | 107 | fsl,data-mapping = "spwg"; |
99 | fsl,data-width = <24>; | 108 | fsl,data-width = <24>; |
@@ -102,7 +111,9 @@ ldb: ldb@53fa8008 { | |||
102 | /* ... */ | 111 | /* ... */ |
103 | }; | 112 | }; |
104 | 113 | ||
105 | port { | 114 | port@1 { |
115 | reg = <1>; | ||
116 | |||
106 | lvds1_in: endpoint { | 117 | lvds1_in: endpoint { |
107 | remote-endpoint = <&ipu_di1_lvds1>; | 118 | remote-endpoint = <&ipu_di1_lvds1>; |
108 | }; | 119 | }; |
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt new file mode 100644 index 000000000000..3a2f54d07fc5 --- /dev/null +++ b/Documentation/devicetree/of_selftest.txt | |||
@@ -0,0 +1,211 @@ | |||
1 | Open Firmware Device Tree Selftest | ||
2 | ---------------------------------- | ||
3 | |||
4 | Author: Gaurav Minocha <gaurav.minocha.os@gmail.com> | ||
5 | |||
6 | 1. Introduction | ||
7 | |||
8 | This document explains how the test data required for executing OF selftest | ||
9 | is attached to the live tree dynamically, independent of the machine's | ||
10 | architecture. | ||
11 | |||
12 | It is recommended to read the following documents before moving ahead. | ||
13 | |||
14 | [1] Documentation/devicetree/usage-model.txt | ||
15 | [2] http://www.devicetree.org/Device_Tree_Usage | ||
16 | |||
17 | OF Selftest has been designed to test the interface (include/linux/of.h) | ||
18 | provided to device driver developers to fetch the device information..etc. | ||
19 | from the unflattened device tree data structure. This interface is used by | ||
20 | most of the device drivers in various use cases. | ||
21 | |||
22 | |||
23 | 2. Test-data | ||
24 | |||
25 | The Device Tree Source file (drivers/of/testcase-data/testcases.dts) contains | ||
26 | the test data required for executing the unit tests automated in | ||
27 | drivers/of/selftests.c. Currently, following Device Tree Source Include files | ||
28 | (.dtsi) are included in testcase.dts: | ||
29 | |||
30 | drivers/of/testcase-data/tests-interrupts.dtsi | ||
31 | drivers/of/testcase-data/tests-platform.dtsi | ||
32 | drivers/of/testcase-data/tests-phandle.dtsi | ||
33 | drivers/of/testcase-data/tests-match.dtsi | ||
34 | |||
35 | When the kernel is build with OF_SELFTEST enabled, then the following make rule | ||
36 | |||
37 | $(obj)/%.dtb: $(src)/%.dts FORCE | ||
38 | $(call if_changed_dep, dtc) | ||
39 | |||
40 | is used to compile the DT source file (testcase.dts) into a binary blob | ||
41 | (testcase.dtb), also referred as flattened DT. | ||
42 | |||
43 | After that, using the following rule the binary blob above is wrapped as an | ||
44 | assembly file (testcase.dtb.S). | ||
45 | |||
46 | $(obj)/%.dtb.S: $(obj)/%.dtb | ||
47 | $(call cmd, dt_S_dtb) | ||
48 | |||
49 | The assembly file is compiled into an object file (testcase.dtb.o), and is | ||
50 | linked into the kernel image. | ||
51 | |||
52 | |||
53 | 2.1. Adding the test data | ||
54 | |||
55 | Un-flattened device tree structure: | ||
56 | |||
57 | Un-flattened device tree consists of connected device_node(s) in form of a tree | ||
58 | structure described below. | ||
59 | |||
60 | // following struct members are used to construct the tree | ||
61 | struct device_node { | ||
62 | ... | ||
63 | struct device_node *parent; | ||
64 | struct device_node *child; | ||
65 | struct device_node *sibling; | ||
66 | struct device_node *allnext; /* next in list of all nodes */ | ||
67 | ... | ||
68 | }; | ||
69 | |||
70 | Figure 1, describes a generic structure of machine’s un-flattened device tree | ||
71 | considering only child and sibling pointers. There exists another pointer, | ||
72 | *parent, that is used to traverse the tree in the reverse direction. So, at | ||
73 | a particular level the child node and all the sibling nodes will have a parent | ||
74 | pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4’s | ||
75 | parent points to root node) | ||
76 | |||
77 | root (‘/’) | ||
78 | | | ||
79 | child1 -> sibling2 -> sibling3 -> sibling4 -> null | ||
80 | | | | | | ||
81 | | | | null | ||
82 | | | | | ||
83 | | | child31 -> sibling32 -> null | ||
84 | | | | | | ||
85 | | | null null | ||
86 | | | | ||
87 | | child21 -> sibling22 -> sibling23 -> null | ||
88 | | | | | | ||
89 | | null null null | ||
90 | | | ||
91 | child11 -> sibling12 -> sibling13 -> sibling14 -> null | ||
92 | | | | | | ||
93 | | | | null | ||
94 | | | | | ||
95 | null null child131 -> null | ||
96 | | | ||
97 | null | ||
98 | |||
99 | Figure 1: Generic structure of un-flattened device tree | ||
100 | |||
101 | |||
102 | *allnext: it is used to link all the nodes of DT into a list. So, for the | ||
103 | above tree the list would be as follows: | ||
104 | |||
105 | root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2-> | ||
106 | child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null | ||
107 | |||
108 | Before executing OF selftest, it is required to attach the test data to | ||
109 | machine's device tree (if present). So, when selftest_data_add() is called, | ||
110 | at first it reads the flattened device tree data linked into the kernel image | ||
111 | via the following kernel symbols: | ||
112 | |||
113 | __dtb_testcases_begin - address marking the start of test data blob | ||
114 | __dtb_testcases_end - address marking the end of test data blob | ||
115 | |||
116 | Secondly, it calls of_fdt_unflatten_device_tree() to unflatten the flattened | ||
117 | blob. And finally, if the machine’s device tree (i.e live tree) is present, | ||
118 | then it attaches the unflattened test data tree to the live tree, else it | ||
119 | attaches itself as a live device tree. | ||
120 | |||
121 | attach_node_and_children() uses of_attach_node() to attach the nodes into the | ||
122 | live tree as explained below. To explain the same, the test data tree described | ||
123 | in Figure 2 is attached to the live tree described in Figure 1. | ||
124 | |||
125 | root (‘/’) | ||
126 | | | ||
127 | testcase-data | ||
128 | | | ||
129 | test-child0 -> test-sibling1 -> test-sibling2 -> test-sibling3 -> null | ||
130 | | | | | | ||
131 | test-child01 null null null | ||
132 | |||
133 | |||
134 | allnext list: | ||
135 | |||
136 | root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2 | ||
137 | ->test-sibling3->null | ||
138 | |||
139 | Figure 2: Example test data tree to be attached to live tree. | ||
140 | |||
141 | According to the scenario above, the live tree is already present so it isn’t | ||
142 | required to attach the root(‘/’) node. All other nodes are attached by calling | ||
143 | of_attach_node() on each node. | ||
144 | |||
145 | In the function of_attach_node(), the new node is attached as the child of the | ||
146 | given parent in live tree. But, if parent already has a child then the new node | ||
147 | replaces the current child and turns it into its sibling. So, when the testcase | ||
148 | data node is attached to the live tree above (Figure 1), the final structure is | ||
149 | as shown in Figure 3. | ||
150 | |||
151 | root (‘/’) | ||
152 | | | ||
153 | testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null | ||
154 | | | | | | | ||
155 | (...) | | | null | ||
156 | | | child31 -> sibling32 -> null | ||
157 | | | | | | ||
158 | | | null null | ||
159 | | | | ||
160 | | child21 -> sibling22 -> sibling23 -> null | ||
161 | | | | | | ||
162 | | null null null | ||
163 | | | ||
164 | child11 -> sibling12 -> sibling13 -> sibling14 -> null | ||
165 | | | | | | ||
166 | null null | null | ||
167 | | | ||
168 | child131 -> null | ||
169 | | | ||
170 | null | ||
171 | ----------------------------------------------------------------------- | ||
172 | |||
173 | root (‘/’) | ||
174 | | | ||
175 | testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null | ||
176 | | | | | | | ||
177 | | (...) (...) (...) null | ||
178 | | | ||
179 | test-sibling3 -> test-sibling2 -> test-sibling1 -> test-child0 -> null | ||
180 | | | | | | ||
181 | null null null test-child01 | ||
182 | |||
183 | |||
184 | Figure 3: Live device tree structure after attaching the testcase-data. | ||
185 | |||
186 | |||
187 | Astute readers would have noticed that test-child0 node becomes the last | ||
188 | sibling compared to the earlier structure (Figure 2). After attaching first | ||
189 | test-child0 the test-sibling1 is attached that pushes the child node | ||
190 | (i.e. test-child0) to become a sibling and makes itself a child node, | ||
191 | as mentioned above. | ||
192 | |||
193 | If a duplicate node is found (i.e. if a node with same full_name property is | ||
194 | already present in the live tree), then the node isn’t attached rather its | ||
195 | properties are updated to the live tree’s node by calling the function | ||
196 | update_node_properties(). | ||
197 | |||
198 | |||
199 | 2.2. Removing the test data | ||
200 | |||
201 | Once the test case execution is complete, selftest_data_remove is called in | ||
202 | order to remove the device nodes attached initially (first the leaf nodes are | ||
203 | detached and then moving up the parent nodes are removed, and eventually the | ||
204 | whole tree). selftest_data_remove() calls detach_node_and_children() that uses | ||
205 | of_detach_node() to detach the nodes from the live device tree. | ||
206 | |||
207 | To detach a node, of_detach_node() first updates all_next linked list, by | ||
208 | attaching the previous node’s allnext to current node’s allnext pointer. And | ||
209 | then, it either updates the child pointer of given node’s parent to its | ||
210 | sibling or attaches the previous sibling to the given node’s sibling, as | ||
211 | appropriate. That is it :) | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 10d51c2f10d7..ccbab5653bb8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3303,11 +3303,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3303 | 3303 | ||
3304 | tdfx= [HW,DRM] | 3304 | tdfx= [HW,DRM] |
3305 | 3305 | ||
3306 | test_suspend= [SUSPEND] | 3306 | test_suspend= [SUSPEND][,N] |
3307 | Specify "mem" (for Suspend-to-RAM) or "standby" (for | 3307 | Specify "mem" (for Suspend-to-RAM) or "standby" (for |
3308 | standby suspend) as the system sleep state to briefly | 3308 | standby suspend) or "freeze" (for suspend type freeze) |
3309 | enter during system startup. The system is woken from | 3309 | as the system sleep state during system startup with |
3310 | this state using a wakeup-capable RTC alarm. | 3310 | the optional capability to repeat N number of times. |
3311 | The system is woken from this state using a | ||
3312 | wakeup-capable RTC alarm. | ||
3311 | 3313 | ||
3312 | thash_entries= [KNL,NET] | 3314 | thash_entries= [KNL,NET] |
3313 | Set number of hash buckets for TCP connection | 3315 | Set number of hash buckets for TCP connection |
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index c48a9704bda8..d16f424c5e8d 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
@@ -462,9 +462,9 @@ JIT compiler | |||
462 | ------------ | 462 | ------------ |
463 | 463 | ||
464 | The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, | 464 | The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, |
465 | ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is | 465 | ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler |
466 | transparently invoked for each attached filter from user space or for internal | 466 | is transparently invoked for each attached filter from user space or for |
467 | kernel users if it has been previously enabled by root: | 467 | internal kernel users if it has been previously enabled by root: |
468 | 468 | ||
469 | echo 1 > /proc/sys/net/core/bpf_jit_enable | 469 | echo 1 > /proc/sys/net/core/bpf_jit_enable |
470 | 470 | ||
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt new file mode 100644 index 000000000000..69663640dea5 --- /dev/null +++ b/Documentation/power/suspend-and-interrupts.txt | |||
@@ -0,0 +1,123 @@ | |||
1 | System Suspend and Device Interrupts | ||
2 | |||
3 | Copyright (C) 2014 Intel Corp. | ||
4 | Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
5 | |||
6 | |||
7 | Suspending and Resuming Device IRQs | ||
8 | ----------------------------------- | ||
9 | |||
10 | Device interrupt request lines (IRQs) are generally disabled during system | ||
11 | suspend after the "late" phase of suspending devices (that is, after all of the | ||
12 | ->prepare, ->suspend and ->suspend_late callbacks have been executed for all | ||
13 | devices). That is done by suspend_device_irqs(). | ||
14 | |||
15 | The rationale for doing so is that after the "late" phase of device suspend | ||
16 | there is no legitimate reason why any interrupts from suspended devices should | ||
17 | trigger and if any devices have not been suspended properly yet, it is better to | ||
18 | block interrupts from them anyway. Also, in the past we had problems with | ||
19 | interrupt handlers for shared IRQs that device drivers implementing them were | ||
20 | not prepared for interrupts triggering after their devices had been suspended. | ||
21 | In some cases they would attempt to access, for example, memory address spaces | ||
22 | of suspended devices and cause unpredictable behavior to ensue as a result. | ||
23 | Unfortunately, such problems are very difficult to debug and the introduction | ||
24 | of suspend_device_irqs(), along with the "noirq" phase of device suspend and | ||
25 | resume, was the only practical way to mitigate them. | ||
26 | |||
27 | Device IRQs are re-enabled during system resume, right before the "early" phase | ||
28 | of resuming devices (that is, before starting to execute ->resume_early | ||
29 | callbacks for devices). The function doing that is resume_device_irqs(). | ||
30 | |||
31 | |||
32 | The IRQF_NO_SUSPEND Flag | ||
33 | ------------------------ | ||
34 | |||
35 | There are interrupts that can legitimately trigger during the entire system | ||
36 | suspend-resume cycle, including the "noirq" phases of suspending and resuming | ||
37 | devices as well as during the time when nonboot CPUs are taken offline and | ||
38 | brought back online. That applies to timer interrupts in the first place, | ||
39 | but also to IPIs and to some other special-purpose interrupts. | ||
40 | |||
41 | The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when | ||
42 | requesting a special-purpose interrupt. It causes suspend_device_irqs() to | ||
43 | leave the corresponding IRQ enabled so as to allow the interrupt to work all | ||
44 | the time as expected. | ||
45 | |||
46 | Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one | ||
47 | user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed | ||
48 | for it will be executed as usual after suspend_device_irqs(), even if the | ||
49 | IRQF_NO_SUSPEND flag was not passed to request_irq() (or equivalent) by some of | ||
50 | the IRQ's users. For this reason, using IRQF_NO_SUSPEND and IRQF_SHARED at the | ||
51 | same time should be avoided. | ||
52 | |||
53 | |||
54 | System Wakeup Interrupts, enable_irq_wake() and disable_irq_wake() | ||
55 | ------------------------------------------------------------------ | ||
56 | |||
57 | System wakeup interrupts generally need to be configured to wake up the system | ||
58 | from sleep states, especially if they are used for different purposes (e.g. as | ||
59 | I/O interrupts) in the working state. | ||
60 | |||
61 | That may involve turning on a special signal handling logic within the platform | ||
62 | (such as an SoC) so that signals from a given line are routed in a different way | ||
63 | during system sleep so as to trigger a system wakeup when needed. For example, | ||
64 | the platform may include a dedicated interrupt controller used specifically for | ||
65 | handling system wakeup events. Then, if a given interrupt line is supposed to | ||
66 | wake up the system from sleep sates, the corresponding input of that interrupt | ||
67 | controller needs to be enabled to receive signals from the line in question. | ||
68 | After wakeup, it generally is better to disable that input to prevent the | ||
69 | dedicated controller from triggering interrupts unnecessarily. | ||
70 | |||
71 | The IRQ subsystem provides two helper functions to be used by device drivers for | ||
72 | those purposes. Namely, enable_irq_wake() turns on the platform's logic for | ||
73 | handling the given IRQ as a system wakeup interrupt line and disable_irq_wake() | ||
74 | turns that logic off. | ||
75 | |||
76 | Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ | ||
77 | in a special way. Namely, the IRQ remains enabled, by on the first interrupt | ||
78 | it will be disabled, marked as pending and "suspended" so that it will be | ||
79 | re-enabled by resume_device_irqs() during the subsequent system resume. Also | ||
80 | the PM core is notified about the event which casues the system suspend in | ||
81 | progress to be aborted (that doesn't have to happen immediately, but at one | ||
82 | of the points where the suspend thread looks for pending wakeup events). | ||
83 | |||
84 | This way every interrupt from a wakeup interrupt source will either cause the | ||
85 | system suspend currently in progress to be aborted or wake up the system if | ||
86 | already suspended. However, after suspend_device_irqs() interrupt handlers are | ||
87 | not executed for system wakeup IRQs. They are only executed for IRQF_NO_SUSPEND | ||
88 | IRQs at that time, but those IRQs should not be configured for system wakeup | ||
89 | using enable_irq_wake(). | ||
90 | |||
91 | |||
92 | Interrupts and Suspend-to-Idle | ||
93 | ------------------------------ | ||
94 | |||
95 | Suspend-to-idle (also known as the "freeze" sleep state) is a relatively new | ||
96 | system sleep state that works by idling all of the processors and waiting for | ||
97 | interrupts right after the "noirq" phase of suspending devices. | ||
98 | |||
99 | Of course, this means that all of the interrupts with the IRQF_NO_SUSPEND flag | ||
100 | set will bring CPUs out of idle while in that state, but they will not cause the | ||
101 | IRQ subsystem to trigger a system wakeup. | ||
102 | |||
103 | System wakeup interrupts, in turn, will trigger wakeup from suspend-to-idle in | ||
104 | analogy with what they do in the full system suspend case. The only difference | ||
105 | is that the wakeup from suspend-to-idle is signaled using the usual working | ||
106 | state interrupt delivery mechanisms and doesn't require the platform to use | ||
107 | any special interrupt handling logic for it to work. | ||
108 | |||
109 | |||
110 | IRQF_NO_SUSPEND and enable_irq_wake() | ||
111 | ------------------------------------- | ||
112 | |||
113 | There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND | ||
114 | flag on the same IRQ. | ||
115 | |||
116 | First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND | ||
117 | interrupts (interrupt handlers are invoked after suspend_device_irqs()) are | ||
118 | directly at odds with the rules for handling system wakeup interrupts (interrupt | ||
119 | handlers are not invoked after suspend_device_irqs()). | ||
120 | |||
121 | Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not | ||
122 | to individual interrupt handlers, so sharing an IRQ between a system wakeup | ||
123 | interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index d40c922ec136..fbafc94ac9ae 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1665,6 +1665,12 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com> | |||
1665 | S: Supported | 1665 | S: Supported |
1666 | F: drivers/tty/serial/atmel_serial.c | 1666 | F: drivers/tty/serial/atmel_serial.c |
1667 | 1667 | ||
1668 | ATMEL Audio ALSA driver | ||
1669 | M: Bo Shen <voice.shen@atmel.com> | ||
1670 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
1671 | S: Supported | ||
1672 | F: sound/soc/atmel | ||
1673 | |||
1668 | ATMEL DMA DRIVER | 1674 | ATMEL DMA DRIVER |
1669 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 1675 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1670 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1676 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -2098,7 +2104,7 @@ S: Supported | |||
2098 | F: drivers/scsi/bfa/ | 2104 | F: drivers/scsi/bfa/ |
2099 | 2105 | ||
2100 | BROCADE BNA 10 GIGABIT ETHERNET DRIVER | 2106 | BROCADE BNA 10 GIGABIT ETHERNET DRIVER |
2101 | M: Rasesh Mody <rmody@brocade.com> | 2107 | M: Rasesh Mody <rasesh.mody@qlogic.com> |
2102 | L: netdev@vger.kernel.org | 2108 | L: netdev@vger.kernel.org |
2103 | S: Supported | 2109 | S: Supported |
2104 | F: drivers/net/ethernet/brocade/bna/ | 2110 | F: drivers/net/ethernet/brocade/bna/ |
@@ -3012,9 +3018,8 @@ S: Supported | |||
3012 | F: drivers/acpi/dock.c | 3018 | F: drivers/acpi/dock.c |
3013 | 3019 | ||
3014 | DOCUMENTATION | 3020 | DOCUMENTATION |
3015 | M: Randy Dunlap <rdunlap@infradead.org> | 3021 | M: Jiri Kosina <jkosina@suse.cz> |
3016 | L: linux-doc@vger.kernel.org | 3022 | L: linux-doc@vger.kernel.org |
3017 | T: quilt http://www.infradead.org/~rdunlap/Doc/patches/ | ||
3018 | S: Maintained | 3023 | S: Maintained |
3019 | F: Documentation/ | 3024 | F: Documentation/ |
3020 | X: Documentation/ABI/ | 3025 | X: Documentation/ABI/ |
@@ -4477,7 +4482,6 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com> | |||
4477 | L: linux-i2c@vger.kernel.org | 4482 | L: linux-i2c@vger.kernel.org |
4478 | L: linux-acpi@vger.kernel.org | 4483 | L: linux-acpi@vger.kernel.org |
4479 | S: Maintained | 4484 | S: Maintained |
4480 | F: drivers/i2c/i2c-acpi.c | ||
4481 | 4485 | ||
4482 | I2C-TAOS-EVM DRIVER | 4486 | I2C-TAOS-EVM DRIVER |
4483 | M: Jean Delvare <jdelvare@suse.de> | 4487 | M: Jean Delvare <jdelvare@suse.de> |
@@ -5480,7 +5484,7 @@ F: drivers/macintosh/ | |||
5480 | LINUX FOR POWERPC EMBEDDED MPC5XXX | 5484 | LINUX FOR POWERPC EMBEDDED MPC5XXX |
5481 | M: Anatolij Gustschin <agust@denx.de> | 5485 | M: Anatolij Gustschin <agust@denx.de> |
5482 | L: linuxppc-dev@lists.ozlabs.org | 5486 | L: linuxppc-dev@lists.ozlabs.org |
5483 | T: git git://git.denx.de/linux-2.6-agust.git | 5487 | T: git git://git.denx.de/linux-denx-agust.git |
5484 | S: Maintained | 5488 | S: Maintained |
5485 | F: arch/powerpc/platforms/512x/ | 5489 | F: arch/powerpc/platforms/512x/ |
5486 | F: arch/powerpc/platforms/52xx/ | 5490 | F: arch/powerpc/platforms/52xx/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 17 | 2 | PATCHLEVEL = 17 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = |
5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index e03fbf3c6889..b40cdadb1f87 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -447,22 +447,19 @@ | |||
447 | gpmc,device-width = <2>; | 447 | gpmc,device-width = <2>; |
448 | gpmc,sync-clk-ps = <0>; | 448 | gpmc,sync-clk-ps = <0>; |
449 | gpmc,cs-on-ns = <0>; | 449 | gpmc,cs-on-ns = <0>; |
450 | gpmc,cs-rd-off-ns = <40>; | 450 | gpmc,cs-rd-off-ns = <80>; |
451 | gpmc,cs-wr-off-ns = <40>; | 451 | gpmc,cs-wr-off-ns = <80>; |
452 | gpmc,adv-on-ns = <0>; | 452 | gpmc,adv-on-ns = <0>; |
453 | gpmc,adv-rd-off-ns = <30>; | 453 | gpmc,adv-rd-off-ns = <60>; |
454 | gpmc,adv-wr-off-ns = <30>; | 454 | gpmc,adv-wr-off-ns = <60>; |
455 | gpmc,we-on-ns = <5>; | 455 | gpmc,we-on-ns = <10>; |
456 | gpmc,we-off-ns = <25>; | 456 | gpmc,we-off-ns = <50>; |
457 | gpmc,oe-on-ns = <2>; | 457 | gpmc,oe-on-ns = <4>; |
458 | gpmc,oe-off-ns = <20>; | 458 | gpmc,oe-off-ns = <40>; |
459 | gpmc,access-ns = <20>; | 459 | gpmc,access-ns = <40>; |
460 | gpmc,wr-access-ns = <40>; | 460 | gpmc,wr-access-ns = <80>; |
461 | gpmc,rd-cycle-ns = <40>; | 461 | gpmc,rd-cycle-ns = <80>; |
462 | gpmc,wr-cycle-ns = <40>; | 462 | gpmc,wr-cycle-ns = <80>; |
463 | gpmc,wait-pin = <0>; | ||
464 | gpmc,wait-on-read; | ||
465 | gpmc,wait-on-write; | ||
466 | gpmc,bus-turnaround-ns = <0>; | 463 | gpmc,bus-turnaround-ns = <0>; |
467 | gpmc,cycle2cycle-delay-ns = <0>; | 464 | gpmc,cycle2cycle-delay-ns = <0>; |
468 | gpmc,clk-activation-ns = <0>; | 465 | gpmc,clk-activation-ns = <0>; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c6c58c1c00e3..6b675a02066f 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
@@ -423,10 +423,14 @@ | |||
423 | status = "disabled"; | 423 | status = "disabled"; |
424 | 424 | ||
425 | lvds-channel@0 { | 425 | lvds-channel@0 { |
426 | #address-cells = <1>; | ||
427 | #size-cells = <0>; | ||
426 | reg = <0>; | 428 | reg = <0>; |
427 | status = "disabled"; | 429 | status = "disabled"; |
428 | 430 | ||
429 | port { | 431 | port@0 { |
432 | reg = <0>; | ||
433 | |||
430 | lvds0_in: endpoint { | 434 | lvds0_in: endpoint { |
431 | remote-endpoint = <&ipu_di0_lvds0>; | 435 | remote-endpoint = <&ipu_di0_lvds0>; |
432 | }; | 436 | }; |
@@ -434,10 +438,14 @@ | |||
434 | }; | 438 | }; |
435 | 439 | ||
436 | lvds-channel@1 { | 440 | lvds-channel@1 { |
441 | #address-cells = <1>; | ||
442 | #size-cells = <0>; | ||
437 | reg = <1>; | 443 | reg = <1>; |
438 | status = "disabled"; | 444 | status = "disabled"; |
439 | 445 | ||
440 | port { | 446 | port@1 { |
447 | reg = <1>; | ||
448 | |||
441 | lvds1_in: endpoint { | 449 | lvds1_in: endpoint { |
442 | remote-endpoint = <&ipu_di1_lvds1>; | 450 | remote-endpoint = <&ipu_di1_lvds1>; |
443 | }; | 451 | }; |
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 598afe91c676..4773d6af66a0 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi | |||
@@ -40,7 +40,7 @@ clocks { | |||
40 | #clock-cells = <0>; | 40 | #clock-cells = <0>; |
41 | compatible = "ti,keystone,psc-clock"; | 41 | compatible = "ti,keystone,psc-clock"; |
42 | clocks = <&chipclk16>; | 42 | clocks = <&chipclk16>; |
43 | clock-output-names = "usb"; | 43 | clock-output-names = "usb1"; |
44 | reg = <0x02350004 0xb00>, <0x02350000 0x400>; | 44 | reg = <0x02350004 0xb00>, <0x02350000 0x400>; |
45 | reg-names = "control", "domain"; | 45 | reg-names = "control", "domain"; |
46 | domain-id = <0>; | 46 | domain-id = <0>; |
@@ -60,8 +60,8 @@ clocks { | |||
60 | #clock-cells = <0>; | 60 | #clock-cells = <0>; |
61 | compatible = "ti,keystone,psc-clock"; | 61 | compatible = "ti,keystone,psc-clock"; |
62 | clocks = <&chipclk12>; | 62 | clocks = <&chipclk12>; |
63 | clock-output-names = "pcie"; | 63 | clock-output-names = "pcie1"; |
64 | reg = <0x0235006c 0xb00>, <0x02350000 0x400>; | 64 | reg = <0x0235006c 0xb00>, <0x02350048 0x400>; |
65 | reg-names = "control", "domain"; | 65 | reg-names = "control", "domain"; |
66 | domain-id = <18>; | 66 | domain-id = <18>; |
67 | }; | 67 | }; |
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index b8698ca68647..429471aa7a1f 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
@@ -353,13 +353,12 @@ | |||
353 | }; | 353 | }; |
354 | 354 | ||
355 | ldo8_reg: ldo8 { | 355 | ldo8_reg: ldo8 { |
356 | /* VDD_3v0: Does not go anywhere */ | 356 | /* VDD_3V_GP: act led/serial console */ |
357 | regulator-name = "ldo8"; | 357 | regulator-name = "ldo8"; |
358 | regulator-min-microvolt = <3000000>; | 358 | regulator-min-microvolt = <3000000>; |
359 | regulator-max-microvolt = <3000000>; | 359 | regulator-max-microvolt = <3000000>; |
360 | regulator-always-on; | ||
360 | regulator-boot-on; | 361 | regulator-boot-on; |
361 | /* Unused */ | ||
362 | status = "disabled"; | ||
363 | }; | 362 | }; |
364 | 363 | ||
365 | ldo9_reg: ldo9 { | 364 | ldo9_reg: ldo9 { |
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index a25c262326dc..322fd1519b09 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts | |||
@@ -38,6 +38,7 @@ | |||
38 | compatible = "arm,cortex-a15"; | 38 | compatible = "arm,cortex-a15"; |
39 | reg = <0>; | 39 | reg = <0>; |
40 | cci-control-port = <&cci_control1>; | 40 | cci-control-port = <&cci_control1>; |
41 | cpu-idle-states = <&CLUSTER_SLEEP_BIG>; | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | cpu1: cpu@1 { | 44 | cpu1: cpu@1 { |
@@ -45,6 +46,7 @@ | |||
45 | compatible = "arm,cortex-a15"; | 46 | compatible = "arm,cortex-a15"; |
46 | reg = <1>; | 47 | reg = <1>; |
47 | cci-control-port = <&cci_control1>; | 48 | cci-control-port = <&cci_control1>; |
49 | cpu-idle-states = <&CLUSTER_SLEEP_BIG>; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | cpu2: cpu@2 { | 52 | cpu2: cpu@2 { |
@@ -52,6 +54,7 @@ | |||
52 | compatible = "arm,cortex-a7"; | 54 | compatible = "arm,cortex-a7"; |
53 | reg = <0x100>; | 55 | reg = <0x100>; |
54 | cci-control-port = <&cci_control2>; | 56 | cci-control-port = <&cci_control2>; |
57 | cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; | ||
55 | }; | 58 | }; |
56 | 59 | ||
57 | cpu3: cpu@3 { | 60 | cpu3: cpu@3 { |
@@ -59,6 +62,7 @@ | |||
59 | compatible = "arm,cortex-a7"; | 62 | compatible = "arm,cortex-a7"; |
60 | reg = <0x101>; | 63 | reg = <0x101>; |
61 | cci-control-port = <&cci_control2>; | 64 | cci-control-port = <&cci_control2>; |
65 | cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; | ||
62 | }; | 66 | }; |
63 | 67 | ||
64 | cpu4: cpu@4 { | 68 | cpu4: cpu@4 { |
@@ -66,6 +70,25 @@ | |||
66 | compatible = "arm,cortex-a7"; | 70 | compatible = "arm,cortex-a7"; |
67 | reg = <0x102>; | 71 | reg = <0x102>; |
68 | cci-control-port = <&cci_control2>; | 72 | cci-control-port = <&cci_control2>; |
73 | cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; | ||
74 | }; | ||
75 | |||
76 | idle-states { | ||
77 | CLUSTER_SLEEP_BIG: cluster-sleep-big { | ||
78 | compatible = "arm,idle-state"; | ||
79 | local-timer-stop; | ||
80 | entry-latency-us = <1000>; | ||
81 | exit-latency-us = <700>; | ||
82 | min-residency-us = <2000>; | ||
83 | }; | ||
84 | |||
85 | CLUSTER_SLEEP_LITTLE: cluster-sleep-little { | ||
86 | compatible = "arm,idle-state"; | ||
87 | local-timer-stop; | ||
88 | entry-latency-us = <1000>; | ||
89 | exit-latency-us = <500>; | ||
90 | min-residency-us = <2500>; | ||
91 | }; | ||
69 | }; | 92 | }; |
70 | }; | 93 | }; |
71 | 94 | ||
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig index fdfda1fa9521..7309988b0f1f 100644 --- a/arch/arm/configs/mvebu_v7_defconfig +++ b/arch/arm/configs/mvebu_v7_defconfig | |||
@@ -32,7 +32,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y | |||
32 | CONFIG_CPU_IDLE=y | 32 | CONFIG_CPU_IDLE=y |
33 | CONFIG_ARM_MVEBU_V7_CPUIDLE=y | 33 | CONFIG_ARM_MVEBU_V7_CPUIDLE=y |
34 | CONFIG_CPU_FREQ=y | 34 | CONFIG_CPU_FREQ=y |
35 | CONFIG_CPUFREQ_GENERIC=y | 35 | CONFIG_CPUFREQ_DT=y |
36 | CONFIG_VFP=y | 36 | CONFIG_VFP=y |
37 | CONFIG_NET=y | 37 | CONFIG_NET=y |
38 | CONFIG_INET=y | 38 | CONFIG_INET=y |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 79ecb4f34ffb..10e78d00a0bb 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -466,6 +466,7 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) | |||
466 | */ | 466 | */ |
467 | #define v7_exit_coherency_flush(level) \ | 467 | #define v7_exit_coherency_flush(level) \ |
468 | asm volatile( \ | 468 | asm volatile( \ |
469 | ".arch armv7-a \n\t" \ | ||
469 | "stmfd sp!, {fp, ip} \n\t" \ | 470 | "stmfd sp!, {fp, ip} \n\t" \ |
470 | "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ | 471 | "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ |
471 | "bic r0, r0, #"__stringify(CR_C)" \n\t" \ | 472 | "bic r0, r0, #"__stringify(CR_C)" \n\t" \ |
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 36172adda9d0..5f833f7adba1 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h | |||
@@ -81,6 +81,7 @@ static inline void set_tls(unsigned long val) | |||
81 | asm("mcr p15, 0, %0, c13, c0, 3" | 81 | asm("mcr p15, 0, %0, c13, c0, 3" |
82 | : : "r" (val)); | 82 | : : "r" (val)); |
83 | } else { | 83 | } else { |
84 | #ifdef CONFIG_KUSER_HELPERS | ||
84 | /* | 85 | /* |
85 | * User space must never try to access this | 86 | * User space must never try to access this |
86 | * directly. Expect your app to break | 87 | * directly. Expect your app to break |
@@ -89,6 +90,7 @@ static inline void set_tls(unsigned long val) | |||
89 | * entry-armv.S for details) | 90 | * entry-armv.S for details) |
90 | */ | 91 | */ |
91 | *((unsigned int *)0xffff0ff0) = val; | 92 | *((unsigned int *)0xffff0ff0) = val; |
93 | #endif | ||
92 | } | 94 | } |
93 | 95 | ||
94 | } | 96 | } |
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c index 08d731294bcd..b206d7790c77 100644 --- a/arch/arm/kernel/kprobes-test.c +++ b/arch/arm/kernel/kprobes-test.c | |||
@@ -110,10 +110,13 @@ | |||
110 | * | 110 | * |
111 | * @ TESTCASE_START | 111 | * @ TESTCASE_START |
112 | * bl __kprobes_test_case_start | 112 | * bl __kprobes_test_case_start |
113 | * @ start of inline data... | 113 | * .pushsection .rodata |
114 | * "10: | ||
114 | * .ascii "mov r0, r7" @ text title for test case | 115 | * .ascii "mov r0, r7" @ text title for test case |
115 | * .byte 0 | 116 | * .byte 0 |
116 | * .align 2, 0 | 117 | * .popsection |
118 | * @ start of inline data... | ||
119 | * .word 10b @ pointer to title in .rodata section | ||
117 | * | 120 | * |
118 | * @ TEST_ARG_REG | 121 | * @ TEST_ARG_REG |
119 | * .byte ARG_TYPE_REG | 122 | * .byte ARG_TYPE_REG |
@@ -971,7 +974,7 @@ void __naked __kprobes_test_case_start(void) | |||
971 | __asm__ __volatile__ ( | 974 | __asm__ __volatile__ ( |
972 | "stmdb sp!, {r4-r11} \n\t" | 975 | "stmdb sp!, {r4-r11} \n\t" |
973 | "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" | 976 | "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" |
974 | "bic r0, lr, #1 @ r0 = inline title string \n\t" | 977 | "bic r0, lr, #1 @ r0 = inline data \n\t" |
975 | "mov r1, sp \n\t" | 978 | "mov r1, sp \n\t" |
976 | "bl kprobes_test_case_start \n\t" | 979 | "bl kprobes_test_case_start \n\t" |
977 | "bx r0 \n\t" | 980 | "bx r0 \n\t" |
@@ -1349,15 +1352,14 @@ static unsigned long next_instruction(unsigned long pc) | |||
1349 | return pc + 4; | 1352 | return pc + 4; |
1350 | } | 1353 | } |
1351 | 1354 | ||
1352 | static uintptr_t __used kprobes_test_case_start(const char *title, void *stack) | 1355 | static uintptr_t __used kprobes_test_case_start(const char **title, void *stack) |
1353 | { | 1356 | { |
1354 | struct test_arg *args; | 1357 | struct test_arg *args; |
1355 | struct test_arg_end *end_arg; | 1358 | struct test_arg_end *end_arg; |
1356 | unsigned long test_code; | 1359 | unsigned long test_code; |
1357 | 1360 | ||
1358 | args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4); | 1361 | current_title = *title++; |
1359 | 1362 | args = (struct test_arg *)title; | |
1360 | current_title = title; | ||
1361 | current_args = args; | 1363 | current_args = args; |
1362 | current_stack = stack; | 1364 | current_stack = stack; |
1363 | 1365 | ||
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index eecc90a0fd91..4430990e90e7 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h | |||
@@ -111,11 +111,14 @@ struct test_arg_end { | |||
111 | #define TESTCASE_START(title) \ | 111 | #define TESTCASE_START(title) \ |
112 | __asm__ __volatile__ ( \ | 112 | __asm__ __volatile__ ( \ |
113 | "bl __kprobes_test_case_start \n\t" \ | 113 | "bl __kprobes_test_case_start \n\t" \ |
114 | ".pushsection .rodata \n\t" \ | ||
115 | "10: \n\t" \ | ||
114 | /* don't use .asciz here as 'title' may be */ \ | 116 | /* don't use .asciz here as 'title' may be */ \ |
115 | /* multiple strings to be concatenated. */ \ | 117 | /* multiple strings to be concatenated. */ \ |
116 | ".ascii "#title" \n\t" \ | 118 | ".ascii "#title" \n\t" \ |
117 | ".byte 0 \n\t" \ | 119 | ".byte 0 \n\t" \ |
118 | ".align 2, 0 \n\t" | 120 | ".popsection \n\t" \ |
121 | ".word 10b \n\t" | ||
119 | 122 | ||
120 | #define TEST_ARG_REG(reg, val) \ | 123 | #define TEST_ARG_REG(reg, val) \ |
121 | ".byte "__stringify(ARG_TYPE_REG)" \n\t" \ | 124 | ".byte "__stringify(ARG_TYPE_REG)" \n\t" \ |
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 6a24e111d6e1..b89e5f35db84 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c | |||
@@ -193,7 +193,6 @@ static void __init exynos_init_late(void) | |||
193 | /* to be supported later */ | 193 | /* to be supported later */ |
194 | return; | 194 | return; |
195 | 195 | ||
196 | pm_genpd_poweroff_unused(); | ||
197 | exynos_pm_init(); | 196 | exynos_pm_init(); |
198 | } | 197 | } |
199 | 198 | ||
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index fd76e1b5a471..20f267121b3e 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c | |||
@@ -105,78 +105,6 @@ static int exynos_pd_power_off(struct generic_pm_domain *domain) | |||
105 | return exynos_pd_power(domain, false); | 105 | return exynos_pd_power(domain, false); |
106 | } | 106 | } |
107 | 107 | ||
108 | static void exynos_add_device_to_domain(struct exynos_pm_domain *pd, | ||
109 | struct device *dev) | ||
110 | { | ||
111 | int ret; | ||
112 | |||
113 | dev_dbg(dev, "adding to power domain %s\n", pd->pd.name); | ||
114 | |||
115 | while (1) { | ||
116 | ret = pm_genpd_add_device(&pd->pd, dev); | ||
117 | if (ret != -EAGAIN) | ||
118 | break; | ||
119 | cond_resched(); | ||
120 | } | ||
121 | |||
122 | pm_genpd_dev_need_restore(dev, true); | ||
123 | } | ||
124 | |||
125 | static void exynos_remove_device_from_domain(struct device *dev) | ||
126 | { | ||
127 | struct generic_pm_domain *genpd = dev_to_genpd(dev); | ||
128 | int ret; | ||
129 | |||
130 | dev_dbg(dev, "removing from power domain %s\n", genpd->name); | ||
131 | |||
132 | while (1) { | ||
133 | ret = pm_genpd_remove_device(genpd, dev); | ||
134 | if (ret != -EAGAIN) | ||
135 | break; | ||
136 | cond_resched(); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static void exynos_read_domain_from_dt(struct device *dev) | ||
141 | { | ||
142 | struct platform_device *pd_pdev; | ||
143 | struct exynos_pm_domain *pd; | ||
144 | struct device_node *node; | ||
145 | |||
146 | node = of_parse_phandle(dev->of_node, "samsung,power-domain", 0); | ||
147 | if (!node) | ||
148 | return; | ||
149 | pd_pdev = of_find_device_by_node(node); | ||
150 | if (!pd_pdev) | ||
151 | return; | ||
152 | pd = platform_get_drvdata(pd_pdev); | ||
153 | exynos_add_device_to_domain(pd, dev); | ||
154 | } | ||
155 | |||
156 | static int exynos_pm_notifier_call(struct notifier_block *nb, | ||
157 | unsigned long event, void *data) | ||
158 | { | ||
159 | struct device *dev = data; | ||
160 | |||
161 | switch (event) { | ||
162 | case BUS_NOTIFY_BIND_DRIVER: | ||
163 | if (dev->of_node) | ||
164 | exynos_read_domain_from_dt(dev); | ||
165 | |||
166 | break; | ||
167 | |||
168 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
169 | exynos_remove_device_from_domain(dev); | ||
170 | |||
171 | break; | ||
172 | } | ||
173 | return NOTIFY_DONE; | ||
174 | } | ||
175 | |||
176 | static struct notifier_block platform_nb = { | ||
177 | .notifier_call = exynos_pm_notifier_call, | ||
178 | }; | ||
179 | |||
180 | static __init int exynos4_pm_init_power_domain(void) | 108 | static __init int exynos4_pm_init_power_domain(void) |
181 | { | 109 | { |
182 | struct platform_device *pdev; | 110 | struct platform_device *pdev; |
@@ -202,7 +130,6 @@ static __init int exynos4_pm_init_power_domain(void) | |||
202 | pd->base = of_iomap(np, 0); | 130 | pd->base = of_iomap(np, 0); |
203 | pd->pd.power_off = exynos_pd_power_off; | 131 | pd->pd.power_off = exynos_pd_power_off; |
204 | pd->pd.power_on = exynos_pd_power_on; | 132 | pd->pd.power_on = exynos_pd_power_on; |
205 | pd->pd.of_node = np; | ||
206 | 133 | ||
207 | pd->oscclk = clk_get(dev, "oscclk"); | 134 | pd->oscclk = clk_get(dev, "oscclk"); |
208 | if (IS_ERR(pd->oscclk)) | 135 | if (IS_ERR(pd->oscclk)) |
@@ -228,15 +155,12 @@ static __init int exynos4_pm_init_power_domain(void) | |||
228 | clk_put(pd->oscclk); | 155 | clk_put(pd->oscclk); |
229 | 156 | ||
230 | no_clk: | 157 | no_clk: |
231 | platform_set_drvdata(pdev, pd); | ||
232 | |||
233 | on = __raw_readl(pd->base + 0x4) & INT_LOCAL_PWR_EN; | 158 | on = __raw_readl(pd->base + 0x4) & INT_LOCAL_PWR_EN; |
234 | 159 | ||
235 | pm_genpd_init(&pd->pd, NULL, !on); | 160 | pm_genpd_init(&pd->pd, NULL, !on); |
161 | of_genpd_add_provider_simple(np, &pd->pd); | ||
236 | } | 162 | } |
237 | 163 | ||
238 | bus_register_notifier(&platform_bus_type, &platform_nb); | ||
239 | |||
240 | return 0; | 164 | return 0; |
241 | } | 165 | } |
242 | arch_initcall(exynos4_pm_init_power_domain); | 166 | arch_initcall(exynos4_pm_init_power_domain); |
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c index 84acdfd1d715..5a75cdc81891 100644 --- a/arch/arm/mach-imx/clk-gate2.c +++ b/arch/arm/mach-imx/clk-gate2.c | |||
@@ -97,7 +97,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw) | |||
97 | struct clk_gate2 *gate = to_clk_gate2(hw); | 97 | struct clk_gate2 *gate = to_clk_gate2(hw); |
98 | 98 | ||
99 | if (gate->share_count) | 99 | if (gate->share_count) |
100 | return !!(*gate->share_count); | 100 | return !!__clk_get_enable_count(hw->clk); |
101 | else | 101 | else |
102 | return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); | 102 | return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); |
103 | } | 103 | } |
@@ -127,10 +127,6 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, | |||
127 | gate->bit_idx = bit_idx; | 127 | gate->bit_idx = bit_idx; |
128 | gate->flags = clk_gate2_flags; | 128 | gate->flags = clk_gate2_flags; |
129 | gate->lock = lock; | 129 | gate->lock = lock; |
130 | |||
131 | /* Initialize share_count per hardware state */ | ||
132 | if (share_count) | ||
133 | *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0; | ||
134 | gate->share_count = share_count; | 130 | gate->share_count = share_count; |
135 | 131 | ||
136 | init.name = name; | 132 | init.name = name; |
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c index 080e66c6a1d0..dc8f1a6f45f2 100644 --- a/arch/arm/mach-imx/imx27-dt.c +++ b/arch/arm/mach-imx/imx27-dt.c | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | static void __init imx27_dt_init(void) | 21 | static void __init imx27_dt_init(void) |
22 | { | 22 | { |
23 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; | 23 | struct platform_device_info devinfo = { .name = "cpufreq-dt", }; |
24 | 24 | ||
25 | mxc_arch_reset_init_dt(); | 25 | mxc_arch_reset_init_dt(); |
26 | 26 | ||
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c index c77deb3f0893..2c5fcaf8675b 100644 --- a/arch/arm/mach-imx/mach-imx51.c +++ b/arch/arm/mach-imx/mach-imx51.c | |||
@@ -51,7 +51,7 @@ static void __init imx51_ipu_mipi_setup(void) | |||
51 | 51 | ||
52 | static void __init imx51_dt_init(void) | 52 | static void __init imx51_dt_init(void) |
53 | { | 53 | { |
54 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; | 54 | struct platform_device_info devinfo = { .name = "cpufreq-dt", }; |
55 | 55 | ||
56 | mxc_arch_reset_init_dt(); | 56 | mxc_arch_reset_init_dt(); |
57 | imx51_ipu_mipi_setup(); | 57 | imx51_ipu_mipi_setup(); |
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index 8a70a51533fd..bbd8664d1bac 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c | |||
@@ -644,7 +644,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void) | |||
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | platform_device_register_simple("cpufreq-generic", -1, NULL, 0); | 647 | platform_device_register_simple("cpufreq-dt", -1, NULL, 0); |
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index e7189dcc9309..08d4167cc7c5 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -1,9 +1,6 @@ | |||
1 | menu "TI OMAP/AM/DM/DRA Family" | 1 | menu "TI OMAP/AM/DM/DRA Family" |
2 | depends on ARCH_MULTI_V6 || ARCH_MULTI_V7 | 2 | depends on ARCH_MULTI_V6 || ARCH_MULTI_V7 |
3 | 3 | ||
4 | config ARCH_OMAP | ||
5 | bool | ||
6 | |||
7 | config ARCH_OMAP2 | 4 | config ARCH_OMAP2 |
8 | bool "TI OMAP2" | 5 | bool "TI OMAP2" |
9 | depends on ARCH_MULTI_V6 | 6 | depends on ARCH_MULTI_V6 |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 8fd87a3055bf..9e91a4e7519a 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2065,7 +2065,7 @@ static void _reconfigure_io_chain(void) | |||
2065 | 2065 | ||
2066 | spin_lock_irqsave(&io_chain_lock, flags); | 2066 | spin_lock_irqsave(&io_chain_lock, flags); |
2067 | 2067 | ||
2068 | if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl()) | 2068 | if (cpu_is_omap34xx()) |
2069 | omap3xxx_prm_reconfigure_io_chain(); | 2069 | omap3xxx_prm_reconfigure_io_chain(); |
2070 | else if (cpu_is_omap44xx()) | 2070 | else if (cpu_is_omap44xx()) |
2071 | omap44xx_prm_reconfigure_io_chain(); | 2071 | omap44xx_prm_reconfigure_io_chain(); |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 828aee9ea6a8..58920bc8807b 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -282,7 +282,7 @@ static inline void omap_init_cpufreq(void) | |||
282 | if (!of_have_populated_dt()) | 282 | if (!of_have_populated_dt()) |
283 | devinfo.name = "omap-cpufreq"; | 283 | devinfo.name = "omap-cpufreq"; |
284 | else | 284 | else |
285 | devinfo.name = "cpufreq-cpu0"; | 285 | devinfo.name = "cpufreq-dt"; |
286 | platform_device_register_full(&devinfo); | 286 | platform_device_register_full(&devinfo); |
287 | } | 287 | } |
288 | 288 | ||
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 2458be6fc67b..372de3edf4a5 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c | |||
@@ -45,7 +45,7 @@ static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { | |||
45 | .ocp_barrier = &omap3xxx_prm_ocp_barrier, | 45 | .ocp_barrier = &omap3xxx_prm_ocp_barrier, |
46 | .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, | 46 | .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, |
47 | .restore_irqen = &omap3xxx_prm_restore_irqen, | 47 | .restore_irqen = &omap3xxx_prm_restore_irqen, |
48 | .reconfigure_io_chain = &omap3xxx_prm_reconfigure_io_chain, | 48 | .reconfigure_io_chain = NULL, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | /* | 51 | /* |
@@ -369,15 +369,30 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) | |||
369 | } | 369 | } |
370 | 370 | ||
371 | /** | 371 | /** |
372 | * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain | 372 | * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain |
373 | * | ||
374 | * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only | ||
375 | * thing we can do is toggle EN_IO bit for earlier omaps. | ||
376 | */ | ||
377 | void omap3430_pre_es3_1_reconfigure_io_chain(void) | ||
378 | { | ||
379 | omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, | ||
380 | PM_WKEN); | ||
381 | omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, | ||
382 | PM_WKEN); | ||
383 | omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain | ||
373 | * | 388 | * |
374 | * Clear any previously-latched I/O wakeup events and ensure that the | 389 | * Clear any previously-latched I/O wakeup events and ensure that the |
375 | * I/O wakeup gates are aligned with the current mux settings. Works | 390 | * I/O wakeup gates are aligned with the current mux settings. Works |
376 | * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then | 391 | * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then |
377 | * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No | 392 | * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No |
378 | * return value. | 393 | * return value. These registers are only available in 3430 es3.1 and later. |
379 | */ | 394 | */ |
380 | void omap3xxx_prm_reconfigure_io_chain(void) | 395 | void omap3_prm_reconfigure_io_chain(void) |
381 | { | 396 | { |
382 | int i = 0; | 397 | int i = 0; |
383 | 398 | ||
@@ -400,6 +415,15 @@ void omap3xxx_prm_reconfigure_io_chain(void) | |||
400 | } | 415 | } |
401 | 416 | ||
402 | /** | 417 | /** |
418 | * omap3xxx_prm_reconfigure_io_chain - reconfigure I/O chain | ||
419 | */ | ||
420 | void omap3xxx_prm_reconfigure_io_chain(void) | ||
421 | { | ||
422 | if (omap3_prcm_irq_setup.reconfigure_io_chain) | ||
423 | omap3_prcm_irq_setup.reconfigure_io_chain(); | ||
424 | } | ||
425 | |||
426 | /** | ||
403 | * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches | 427 | * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches |
404 | * | 428 | * |
405 | * Activates the I/O wakeup event latches and allows events logged by | 429 | * Activates the I/O wakeup event latches and allows events logged by |
@@ -656,6 +680,13 @@ static int omap3xxx_prm_late_init(void) | |||
656 | if (!(prm_features & PRM_HAS_IO_WAKEUP)) | 680 | if (!(prm_features & PRM_HAS_IO_WAKEUP)) |
657 | return 0; | 681 | return 0; |
658 | 682 | ||
683 | if (omap3_has_io_chain_ctrl()) | ||
684 | omap3_prcm_irq_setup.reconfigure_io_chain = | ||
685 | omap3_prm_reconfigure_io_chain; | ||
686 | else | ||
687 | omap3_prcm_irq_setup.reconfigure_io_chain = | ||
688 | omap3430_pre_es3_1_reconfigure_io_chain; | ||
689 | |||
659 | omap3xxx_prm_enable_io_wakeup(); | 690 | omap3xxx_prm_enable_io_wakeup(); |
660 | ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); | 691 | ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); |
661 | if (!ret) | 692 | if (!ret) |
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 630fa916bbc6..04b013fbc98f 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c | |||
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(get_clock_tick_rate); | |||
61 | /* | 61 | /* |
62 | * For non device-tree builds, keep legacy timer init | 62 | * For non device-tree builds, keep legacy timer init |
63 | */ | 63 | */ |
64 | void pxa_timer_init(void) | 64 | void __init pxa_timer_init(void) |
65 | { | 65 | { |
66 | pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), | 66 | pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), |
67 | get_clock_tick_rate()); | 67 | get_clock_tick_rate()); |
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 5c45aae675b6..16547f2641a3 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c | |||
@@ -440,8 +440,3 @@ void s3c64xx_restart(enum reboot_mode mode, const char *cmd) | |||
440 | /* if all else fails, or mode was for soft, jump to 0 */ | 440 | /* if all else fails, or mode was for soft, jump to 0 */ |
441 | soft_restart(0); | 441 | soft_restart(0); |
442 | } | 442 | } |
443 | |||
444 | void __init s3c64xx_init_late(void) | ||
445 | { | ||
446 | s3c64xx_pm_late_initcall(); | ||
447 | } | ||
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h index 7043e7a3a67e..9eb864412911 100644 --- a/arch/arm/mach-s3c64xx/common.h +++ b/arch/arm/mach-s3c64xx/common.h | |||
@@ -23,7 +23,6 @@ void s3c64xx_init_irq(u32 vic0, u32 vic1); | |||
23 | void s3c64xx_init_io(struct map_desc *mach_desc, int size); | 23 | void s3c64xx_init_io(struct map_desc *mach_desc, int size); |
24 | 24 | ||
25 | void s3c64xx_restart(enum reboot_mode mode, const char *cmd); | 25 | void s3c64xx_restart(enum reboot_mode mode, const char *cmd); |
26 | void s3c64xx_init_late(void); | ||
27 | 26 | ||
28 | void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, | 27 | void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, |
29 | unsigned long xusbxti_f, bool is_s3c6400, void __iomem *reg_base); | 28 | unsigned long xusbxti_f, bool is_s3c6400, void __iomem *reg_base); |
@@ -52,12 +51,6 @@ extern void s3c6410_map_io(void); | |||
52 | #define s3c6410_init NULL | 51 | #define s3c6410_init NULL |
53 | #endif | 52 | #endif |
54 | 53 | ||
55 | #ifdef CONFIG_PM | ||
56 | int __init s3c64xx_pm_late_initcall(void); | ||
57 | #else | ||
58 | static inline int s3c64xx_pm_late_initcall(void) { return 0; } | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_S3C64XX_PL080 | 54 | #ifdef CONFIG_S3C64XX_PL080 |
62 | extern struct pl08x_platform_data s3c64xx_dma0_plat_data; | 55 | extern struct pl08x_platform_data s3c64xx_dma0_plat_data; |
63 | extern struct pl08x_platform_data s3c64xx_dma1_plat_data; | 56 | extern struct pl08x_platform_data s3c64xx_dma1_plat_data; |
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c index 60576dfbea8d..6224c67f5061 100644 --- a/arch/arm/mach-s3c64xx/mach-anw6410.c +++ b/arch/arm/mach-s3c64xx/mach-anw6410.c | |||
@@ -233,7 +233,6 @@ MACHINE_START(ANW6410, "A&W6410") | |||
233 | .init_irq = s3c6410_init_irq, | 233 | .init_irq = s3c6410_init_irq, |
234 | .map_io = anw6410_map_io, | 234 | .map_io = anw6410_map_io, |
235 | .init_machine = anw6410_machine_init, | 235 | .init_machine = anw6410_machine_init, |
236 | .init_late = s3c64xx_init_late, | ||
237 | .init_time = samsung_timer_init, | 236 | .init_time = samsung_timer_init, |
238 | .restart = s3c64xx_restart, | 237 | .restart = s3c64xx_restart, |
239 | MACHINE_END | 238 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index fe116334afda..10b913baab28 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c | |||
@@ -857,7 +857,6 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410") | |||
857 | .init_irq = s3c6410_init_irq, | 857 | .init_irq = s3c6410_init_irq, |
858 | .map_io = crag6410_map_io, | 858 | .map_io = crag6410_map_io, |
859 | .init_machine = crag6410_machine_init, | 859 | .init_machine = crag6410_machine_init, |
860 | .init_late = s3c64xx_init_late, | ||
861 | .init_time = samsung_timer_init, | 860 | .init_time = samsung_timer_init, |
862 | .restart = s3c64xx_restart, | 861 | .restart = s3c64xx_restart, |
863 | MACHINE_END | 862 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c index 19e8feb908fd..e4b087c58ee6 100644 --- a/arch/arm/mach-s3c64xx/mach-hmt.c +++ b/arch/arm/mach-s3c64xx/mach-hmt.c | |||
@@ -277,7 +277,6 @@ MACHINE_START(HMT, "Airgoo-HMT") | |||
277 | .init_irq = s3c6410_init_irq, | 277 | .init_irq = s3c6410_init_irq, |
278 | .map_io = hmt_map_io, | 278 | .map_io = hmt_map_io, |
279 | .init_machine = hmt_machine_init, | 279 | .init_machine = hmt_machine_init, |
280 | .init_late = s3c64xx_init_late, | ||
281 | .init_time = samsung_timer_init, | 280 | .init_time = samsung_timer_init, |
282 | .restart = s3c64xx_restart, | 281 | .restart = s3c64xx_restart, |
283 | MACHINE_END | 282 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c index 9cbc07602ef3..ab61af50bfb9 100644 --- a/arch/arm/mach-s3c64xx/mach-mini6410.c +++ b/arch/arm/mach-s3c64xx/mach-mini6410.c | |||
@@ -366,7 +366,6 @@ MACHINE_START(MINI6410, "MINI6410") | |||
366 | .init_irq = s3c6410_init_irq, | 366 | .init_irq = s3c6410_init_irq, |
367 | .map_io = mini6410_map_io, | 367 | .map_io = mini6410_map_io, |
368 | .init_machine = mini6410_machine_init, | 368 | .init_machine = mini6410_machine_init, |
369 | .init_late = s3c64xx_init_late, | ||
370 | .init_time = samsung_timer_init, | 369 | .init_time = samsung_timer_init, |
371 | .restart = s3c64xx_restart, | 370 | .restart = s3c64xx_restart, |
372 | MACHINE_END | 371 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c index 4bae7dc49eea..80cb1446f69f 100644 --- a/arch/arm/mach-s3c64xx/mach-ncp.c +++ b/arch/arm/mach-s3c64xx/mach-ncp.c | |||
@@ -103,7 +103,6 @@ MACHINE_START(NCP, "NCP") | |||
103 | .init_irq = s3c6410_init_irq, | 103 | .init_irq = s3c6410_init_irq, |
104 | .map_io = ncp_map_io, | 104 | .map_io = ncp_map_io, |
105 | .init_machine = ncp_machine_init, | 105 | .init_machine = ncp_machine_init, |
106 | .init_late = s3c64xx_init_late, | ||
107 | .init_time = samsung_timer_init, | 106 | .init_time = samsung_timer_init, |
108 | .restart = s3c64xx_restart, | 107 | .restart = s3c64xx_restart, |
109 | MACHINE_END | 108 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c index fbad2af1ef16..85fa9598b980 100644 --- a/arch/arm/mach-s3c64xx/mach-real6410.c +++ b/arch/arm/mach-s3c64xx/mach-real6410.c | |||
@@ -335,7 +335,6 @@ MACHINE_START(REAL6410, "REAL6410") | |||
335 | .init_irq = s3c6410_init_irq, | 335 | .init_irq = s3c6410_init_irq, |
336 | .map_io = real6410_map_io, | 336 | .map_io = real6410_map_io, |
337 | .init_machine = real6410_machine_init, | 337 | .init_machine = real6410_machine_init, |
338 | .init_late = s3c64xx_init_late, | ||
339 | .init_time = samsung_timer_init, | 338 | .init_time = samsung_timer_init, |
340 | .restart = s3c64xx_restart, | 339 | .restart = s3c64xx_restart, |
341 | MACHINE_END | 340 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c index dec4c08e834f..33224ab36fac 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq5.c +++ b/arch/arm/mach-s3c64xx/mach-smartq5.c | |||
@@ -156,7 +156,6 @@ MACHINE_START(SMARTQ5, "SmartQ 5") | |||
156 | .init_irq = s3c6410_init_irq, | 156 | .init_irq = s3c6410_init_irq, |
157 | .map_io = smartq_map_io, | 157 | .map_io = smartq_map_io, |
158 | .init_machine = smartq5_machine_init, | 158 | .init_machine = smartq5_machine_init, |
159 | .init_late = s3c64xx_init_late, | ||
160 | .init_time = samsung_timer_init, | 159 | .init_time = samsung_timer_init, |
161 | .restart = s3c64xx_restart, | 160 | .restart = s3c64xx_restart, |
162 | MACHINE_END | 161 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c index 27b322069c7d..fc7fece22fb0 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq7.c +++ b/arch/arm/mach-s3c64xx/mach-smartq7.c | |||
@@ -172,7 +172,6 @@ MACHINE_START(SMARTQ7, "SmartQ 7") | |||
172 | .init_irq = s3c6410_init_irq, | 172 | .init_irq = s3c6410_init_irq, |
173 | .map_io = smartq_map_io, | 173 | .map_io = smartq_map_io, |
174 | .init_machine = smartq7_machine_init, | 174 | .init_machine = smartq7_machine_init, |
175 | .init_late = s3c64xx_init_late, | ||
176 | .init_time = samsung_timer_init, | 175 | .init_time = samsung_timer_init, |
177 | .restart = s3c64xx_restart, | 176 | .restart = s3c64xx_restart, |
178 | MACHINE_END | 177 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c index 910749768340..6f425126a735 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6400.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c | |||
@@ -92,7 +92,6 @@ MACHINE_START(SMDK6400, "SMDK6400") | |||
92 | .init_irq = s3c6400_init_irq, | 92 | .init_irq = s3c6400_init_irq, |
93 | .map_io = smdk6400_map_io, | 93 | .map_io = smdk6400_map_io, |
94 | .init_machine = smdk6400_machine_init, | 94 | .init_machine = smdk6400_machine_init, |
95 | .init_late = s3c64xx_init_late, | ||
96 | .init_time = samsung_timer_init, | 95 | .init_time = samsung_timer_init, |
97 | .restart = s3c64xx_restart, | 96 | .restart = s3c64xx_restart, |
98 | MACHINE_END | 97 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index 1dc86d76b530..661eb662d051 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c | |||
@@ -705,7 +705,6 @@ MACHINE_START(SMDK6410, "SMDK6410") | |||
705 | .init_irq = s3c6410_init_irq, | 705 | .init_irq = s3c6410_init_irq, |
706 | .map_io = smdk6410_map_io, | 706 | .map_io = smdk6410_map_io, |
707 | .init_machine = smdk6410_machine_init, | 707 | .init_machine = smdk6410_machine_init, |
708 | .init_late = s3c64xx_init_late, | ||
709 | .init_time = samsung_timer_init, | 708 | .init_time = samsung_timer_init, |
710 | .restart = s3c64xx_restart, | 709 | .restart = s3c64xx_restart, |
711 | MACHINE_END | 710 | MACHINE_END |
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index 6b37694fa335..aaf7bea4032f 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c | |||
@@ -347,10 +347,3 @@ static __init int s3c64xx_pm_initcall(void) | |||
347 | return 0; | 347 | return 0; |
348 | } | 348 | } |
349 | arch_initcall(s3c64xx_pm_initcall); | 349 | arch_initcall(s3c64xx_pm_initcall); |
350 | |||
351 | int __init s3c64xx_pm_late_initcall(void) | ||
352 | { | ||
353 | pm_genpd_poweroff_unused(); | ||
354 | |||
355 | return 0; | ||
356 | } | ||
diff --git a/arch/arm/mach-shmobile/board-ape6evm-reference.c b/arch/arm/mach-shmobile/board-ape6evm-reference.c index 2f7723e5fe91..0110751da511 100644 --- a/arch/arm/mach-shmobile/board-ape6evm-reference.c +++ b/arch/arm/mach-shmobile/board-ape6evm-reference.c | |||
@@ -50,7 +50,7 @@ static void __init ape6evm_add_standard_devices(void) | |||
50 | 50 | ||
51 | r8a73a4_add_dt_devices(); | 51 | r8a73a4_add_dt_devices(); |
52 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 52 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
53 | platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0); | 53 | platform_device_register_simple("cpufreq-dt", -1, NULL, 0); |
54 | } | 54 | } |
55 | 55 | ||
56 | static const char *ape6evm_boards_compat_dt[] __initdata = { | 56 | static const char *ape6evm_boards_compat_dt[] __initdata = { |
diff --git a/arch/arm/mach-shmobile/cpufreq.c b/arch/arm/mach-shmobile/cpufreq.c index 8a24b2be46ae..57fbff024dcd 100644 --- a/arch/arm/mach-shmobile/cpufreq.c +++ b/arch/arm/mach-shmobile/cpufreq.c | |||
@@ -12,6 +12,6 @@ | |||
12 | 12 | ||
13 | int __init shmobile_cpufreq_init(void) | 13 | int __init shmobile_cpufreq_init(void) |
14 | { | 14 | { |
15 | platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0); | 15 | platform_device_register_simple("cpufreq-dt", -1, NULL, 0); |
16 | return 0; | 16 | return 0; |
17 | } | 17 | } |
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c index 69f70b7f7fb2..82fe3d7f9662 100644 --- a/arch/arm/mach-shmobile/pm-r8a7779.c +++ b/arch/arm/mach-shmobile/pm-r8a7779.c | |||
@@ -87,7 +87,6 @@ static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) | |||
87 | genpd->dev_ops.stop = pm_clk_suspend; | 87 | genpd->dev_ops.stop = pm_clk_suspend; |
88 | genpd->dev_ops.start = pm_clk_resume; | 88 | genpd->dev_ops.start = pm_clk_resume; |
89 | genpd->dev_ops.active_wakeup = pd_active_wakeup; | 89 | genpd->dev_ops.active_wakeup = pd_active_wakeup; |
90 | genpd->dev_irq_safe = true; | ||
91 | genpd->power_off = pd_power_down; | 90 | genpd->power_off = pd_power_down; |
92 | genpd->power_on = pd_power_up; | 91 | genpd->power_on = pd_power_up; |
93 | 92 | ||
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index ebdd16e94a84..818de2fddfd4 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c | |||
@@ -111,7 +111,6 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) | |||
111 | genpd->dev_ops.stop = pm_clk_suspend; | 111 | genpd->dev_ops.stop = pm_clk_suspend; |
112 | genpd->dev_ops.start = pm_clk_resume; | 112 | genpd->dev_ops.start = pm_clk_resume; |
113 | genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup; | 113 | genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup; |
114 | genpd->dev_irq_safe = true; | ||
115 | genpd->power_off = rmobile_pd_power_down; | 114 | genpd->power_off = rmobile_pd_power_down; |
116 | genpd->power_on = rmobile_pd_power_up; | 115 | genpd->power_on = rmobile_pd_power_up; |
117 | __rmobile_pd_power_up(rmobile_pd, false); | 116 | __rmobile_pd_power_up(rmobile_pd, false); |
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 2c802ae9b241..15b990cd8c70 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c | |||
@@ -775,7 +775,7 @@ void __init sh73a0_add_early_devices(void) | |||
775 | 775 | ||
776 | void __init sh73a0_add_standard_devices_dt(void) | 776 | void __init sh73a0_add_standard_devices_dt(void) |
777 | { | 777 | { |
778 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", .id = -1, }; | 778 | struct platform_device_info devinfo = { .name = "cpufreq-dt", .id = -1, }; |
779 | 779 | ||
780 | /* clocks are setup late during boot in the case of DT */ | 780 | /* clocks are setup late during boot in the case of DT */ |
781 | sh73a0_clock_init(); | 781 | sh73a0_clock_init(); |
@@ -784,7 +784,7 @@ void __init sh73a0_add_standard_devices_dt(void) | |||
784 | ARRAY_SIZE(sh73a0_devices_dt)); | 784 | ARRAY_SIZE(sh73a0_devices_dt)); |
785 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 785 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
786 | 786 | ||
787 | /* Instantiate cpufreq-cpu0 */ | 787 | /* Instantiate cpufreq-dt */ |
788 | platform_device_register_full(&devinfo); | 788 | platform_device_register_full(&devinfo); |
789 | } | 789 | } |
790 | 790 | ||
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 31a6fa40ba37..ec03ec40e9c6 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c | |||
@@ -104,7 +104,7 @@ static int __init zynq_get_revision(void) | |||
104 | */ | 104 | */ |
105 | static void __init zynq_init_machine(void) | 105 | static void __init zynq_init_machine(void) |
106 | { | 106 | { |
107 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; | 107 | struct platform_device_info devinfo = { .name = "cpufreq-dt", }; |
108 | struct soc_device_attribute *soc_dev_attr; | 108 | struct soc_device_attribute *soc_dev_attr; |
109 | struct soc_device *soc_dev; | 109 | struct soc_device *soc_dev; |
110 | struct device *parent = NULL; | 110 | struct device *parent = NULL; |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 0c1ab49e5f7b..83792f4324ea 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -41,6 +41,7 @@ | |||
41 | * This code is not portable to processors with late data abort handling. | 41 | * This code is not portable to processors with late data abort handling. |
42 | */ | 42 | */ |
43 | #define CODING_BITS(i) (i & 0x0e000000) | 43 | #define CODING_BITS(i) (i & 0x0e000000) |
44 | #define COND_BITS(i) (i & 0xf0000000) | ||
44 | 45 | ||
45 | #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ | 46 | #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ |
46 | #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ | 47 | #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ |
@@ -821,6 +822,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
821 | break; | 822 | break; |
822 | 823 | ||
823 | case 0x04000000: /* ldr or str immediate */ | 824 | case 0x04000000: /* ldr or str immediate */ |
825 | if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */ | ||
826 | goto bad; | ||
824 | offset.un = OFFSET_BITS(instr); | 827 | offset.un = OFFSET_BITS(instr); |
825 | handler = do_alignment_ldrstr; | 828 | handler = do_alignment_ldrstr; |
826 | break; | 829 | break; |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index b64e67c7f176..d3daed0ae0ad 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -157,9 +157,9 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
157 | * TFR EV X F IHD LR S | 157 | * TFR EV X F IHD LR S |
158 | * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM | 158 | * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM |
159 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | 159 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced |
160 | * 11 0 110 1 0011 1100 .111 1101 < we want | 160 | * 11 0 110 0 0011 1100 .111 1101 < we want |
161 | */ | 161 | */ |
162 | .align 2 | 162 | .align 2 |
163 | .type v7_crval, #object | 163 | .type v7_crval, #object |
164 | v7_crval: | 164 | v7_crval: |
165 | crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c | 165 | crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 02fc10d2d63b..d055db32ffcb 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -1,3 +1,6 @@ | |||
1 | config ARCH_OMAP | ||
2 | bool | ||
3 | |||
1 | if ARCH_OMAP | 4 | if ARCH_OMAP |
2 | 5 | ||
3 | menu "TI OMAP Common Features" | 6 | menu "TI OMAP Common Features" |
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index d7b4b38a8e86..47dfa31ad71a 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h | |||
@@ -28,6 +28,8 @@ struct device_node; | |||
28 | * enable-method property. | 28 | * enable-method property. |
29 | * @cpu_init: Reads any data necessary for a specific enable-method from the | 29 | * @cpu_init: Reads any data necessary for a specific enable-method from the |
30 | * devicetree, for a given cpu node and proposed logical id. | 30 | * devicetree, for a given cpu node and proposed logical id. |
31 | * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from | ||
32 | * devicetree, for a given cpu node and proposed logical id. | ||
31 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a | 33 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a |
32 | * mechanism for doing so, tests whether it is possible to boot | 34 | * mechanism for doing so, tests whether it is possible to boot |
33 | * the given CPU. | 35 | * the given CPU. |
@@ -47,6 +49,7 @@ struct device_node; | |||
47 | struct cpu_operations { | 49 | struct cpu_operations { |
48 | const char *name; | 50 | const char *name; |
49 | int (*cpu_init)(struct device_node *, unsigned int); | 51 | int (*cpu_init)(struct device_node *, unsigned int); |
52 | int (*cpu_init_idle)(struct device_node *, unsigned int); | ||
50 | int (*cpu_prepare)(unsigned int); | 53 | int (*cpu_prepare)(unsigned int); |
51 | int (*cpu_boot)(unsigned int); | 54 | int (*cpu_boot)(unsigned int); |
52 | void (*cpu_postboot)(void); | 55 | void (*cpu_postboot)(void); |
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h new file mode 100644 index 000000000000..b52a9932e2b1 --- /dev/null +++ b/arch/arm64/include/asm/cpuidle.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_CPUIDLE_H | ||
2 | #define __ASM_CPUIDLE_H | ||
3 | |||
4 | #ifdef CONFIG_CPU_IDLE | ||
5 | extern int cpu_init_idle(unsigned int cpu); | ||
6 | #else | ||
7 | static inline int cpu_init_idle(unsigned int cpu) | ||
8 | { | ||
9 | return -EOPNOTSUPP; | ||
10 | } | ||
11 | #endif | ||
12 | |||
13 | #endif | ||
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index e9c149c042e0..456d67c1f0fa 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h | |||
@@ -21,6 +21,7 @@ struct sleep_save_sp { | |||
21 | phys_addr_t save_ptr_stash_phys; | 21 | phys_addr_t save_ptr_stash_phys; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); | ||
24 | extern void cpu_resume(void); | 25 | extern void cpu_resume(void); |
25 | extern int cpu_suspend(unsigned long); | 26 | extern int cpu_suspend(unsigned long); |
26 | 27 | ||
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index df7ef8768fc2..6e9538c2d28a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -26,6 +26,7 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o | |||
26 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 26 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
27 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 27 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
28 | arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o | 28 | arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o |
29 | arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o | ||
29 | arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 30 | arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
30 | arm64-obj-$(CONFIG_KGDB) += kgdb.o | 31 | arm64-obj-$(CONFIG_KGDB) += kgdb.o |
31 | arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o | 32 | arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o |
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c new file mode 100644 index 000000000000..19d17f51db37 --- /dev/null +++ b/arch/arm64/kernel/cpuidle.c | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * ARM64 CPU idle arch support | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/of.h> | ||
13 | #include <linux/of_device.h> | ||
14 | |||
15 | #include <asm/cpuidle.h> | ||
16 | #include <asm/cpu_ops.h> | ||
17 | |||
18 | int cpu_init_idle(unsigned int cpu) | ||
19 | { | ||
20 | int ret = -EOPNOTSUPP; | ||
21 | struct device_node *cpu_node = of_cpu_device_node_get(cpu); | ||
22 | |||
23 | if (!cpu_node) | ||
24 | return -ENODEV; | ||
25 | |||
26 | if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) | ||
27 | ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); | ||
28 | |||
29 | of_node_put(cpu_node); | ||
30 | return ret; | ||
31 | } | ||
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 553954771a67..866c1c821860 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/reboot.h> | 21 | #include <linux/reboot.h> |
22 | #include <linux/pm.h> | 22 | #include <linux/pm.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/slab.h> | ||
24 | #include <uapi/linux/psci.h> | 25 | #include <uapi/linux/psci.h> |
25 | 26 | ||
26 | #include <asm/compiler.h> | 27 | #include <asm/compiler.h> |
@@ -28,6 +29,7 @@ | |||
28 | #include <asm/errno.h> | 29 | #include <asm/errno.h> |
29 | #include <asm/psci.h> | 30 | #include <asm/psci.h> |
30 | #include <asm/smp_plat.h> | 31 | #include <asm/smp_plat.h> |
32 | #include <asm/suspend.h> | ||
31 | #include <asm/system_misc.h> | 33 | #include <asm/system_misc.h> |
32 | 34 | ||
33 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | 35 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 |
@@ -65,6 +67,8 @@ enum psci_function { | |||
65 | PSCI_FN_MAX, | 67 | PSCI_FN_MAX, |
66 | }; | 68 | }; |
67 | 69 | ||
70 | static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state); | ||
71 | |||
68 | static u32 psci_function_id[PSCI_FN_MAX]; | 72 | static u32 psci_function_id[PSCI_FN_MAX]; |
69 | 73 | ||
70 | static int psci_to_linux_errno(int errno) | 74 | static int psci_to_linux_errno(int errno) |
@@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state) | |||
93 | & PSCI_0_2_POWER_STATE_AFFL_MASK); | 97 | & PSCI_0_2_POWER_STATE_AFFL_MASK); |
94 | } | 98 | } |
95 | 99 | ||
100 | static void psci_power_state_unpack(u32 power_state, | ||
101 | struct psci_power_state *state) | ||
102 | { | ||
103 | state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >> | ||
104 | PSCI_0_2_POWER_STATE_ID_SHIFT; | ||
105 | state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >> | ||
106 | PSCI_0_2_POWER_STATE_TYPE_SHIFT; | ||
107 | state->affinity_level = | ||
108 | (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >> | ||
109 | PSCI_0_2_POWER_STATE_AFFL_SHIFT; | ||
110 | } | ||
111 | |||
96 | /* | 112 | /* |
97 | * The following two functions are invoked via the invoke_psci_fn pointer | 113 | * The following two functions are invoked via the invoke_psci_fn pointer |
98 | * and will not be inlined, allowing us to piggyback on the AAPCS. | 114 | * and will not be inlined, allowing us to piggyback on the AAPCS. |
@@ -199,6 +215,63 @@ static int psci_migrate_info_type(void) | |||
199 | return err; | 215 | return err; |
200 | } | 216 | } |
201 | 217 | ||
218 | static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, | ||
219 | unsigned int cpu) | ||
220 | { | ||
221 | int i, ret, count = 0; | ||
222 | struct psci_power_state *psci_states; | ||
223 | struct device_node *state_node; | ||
224 | |||
225 | /* | ||
226 | * If the PSCI cpu_suspend function hook has not been initialized | ||
227 | * idle states must not be enabled, so bail out | ||
228 | */ | ||
229 | if (!psci_ops.cpu_suspend) | ||
230 | return -EOPNOTSUPP; | ||
231 | |||
232 | /* Count idle states */ | ||
233 | while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", | ||
234 | count))) { | ||
235 | count++; | ||
236 | of_node_put(state_node); | ||
237 | } | ||
238 | |||
239 | if (!count) | ||
240 | return -ENODEV; | ||
241 | |||
242 | psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); | ||
243 | if (!psci_states) | ||
244 | return -ENOMEM; | ||
245 | |||
246 | for (i = 0; i < count; i++) { | ||
247 | u32 psci_power_state; | ||
248 | |||
249 | state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); | ||
250 | |||
251 | ret = of_property_read_u32(state_node, | ||
252 | "arm,psci-suspend-param", | ||
253 | &psci_power_state); | ||
254 | if (ret) { | ||
255 | pr_warn(" * %s missing arm,psci-suspend-param property\n", | ||
256 | state_node->full_name); | ||
257 | of_node_put(state_node); | ||
258 | goto free_mem; | ||
259 | } | ||
260 | |||
261 | of_node_put(state_node); | ||
262 | pr_debug("psci-power-state %#x index %d\n", psci_power_state, | ||
263 | i); | ||
264 | psci_power_state_unpack(psci_power_state, &psci_states[i]); | ||
265 | } | ||
266 | /* Idle states parsed correctly, initialize per-cpu pointer */ | ||
267 | per_cpu(psci_power_state, cpu) = psci_states; | ||
268 | return 0; | ||
269 | |||
270 | free_mem: | ||
271 | kfree(psci_states); | ||
272 | return ret; | ||
273 | } | ||
274 | |||
202 | static int get_set_conduit_method(struct device_node *np) | 275 | static int get_set_conduit_method(struct device_node *np) |
203 | { | 276 | { |
204 | const char *method; | 277 | const char *method; |
@@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu) | |||
436 | #endif | 509 | #endif |
437 | #endif | 510 | #endif |
438 | 511 | ||
512 | static int psci_suspend_finisher(unsigned long index) | ||
513 | { | ||
514 | struct psci_power_state *state = __get_cpu_var(psci_power_state); | ||
515 | |||
516 | return psci_ops.cpu_suspend(state[index - 1], | ||
517 | virt_to_phys(cpu_resume)); | ||
518 | } | ||
519 | |||
520 | static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) | ||
521 | { | ||
522 | int ret; | ||
523 | struct psci_power_state *state = __get_cpu_var(psci_power_state); | ||
524 | /* | ||
525 | * idle state index 0 corresponds to wfi, should never be called | ||
526 | * from the cpu_suspend operations | ||
527 | */ | ||
528 | if (WARN_ON_ONCE(!index)) | ||
529 | return -EINVAL; | ||
530 | |||
531 | if (state->type == PSCI_POWER_STATE_TYPE_STANDBY) | ||
532 | ret = psci_ops.cpu_suspend(state[index - 1], 0); | ||
533 | else | ||
534 | ret = __cpu_suspend(index, psci_suspend_finisher); | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | |||
439 | const struct cpu_operations cpu_psci_ops = { | 539 | const struct cpu_operations cpu_psci_ops = { |
440 | .name = "psci", | 540 | .name = "psci", |
541 | #ifdef CONFIG_CPU_IDLE | ||
542 | .cpu_init_idle = cpu_psci_cpu_init_idle, | ||
543 | .cpu_suspend = cpu_psci_cpu_suspend, | ||
544 | #endif | ||
441 | #ifdef CONFIG_SMP | 545 | #ifdef CONFIG_SMP |
442 | .cpu_init = cpu_psci_cpu_init, | 546 | .cpu_init = cpu_psci_cpu_init, |
443 | .cpu_prepare = cpu_psci_cpu_prepare, | 547 | .cpu_prepare = cpu_psci_cpu_prepare, |
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index b1925729c692..a564b440416a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -49,28 +49,39 @@ | |||
49 | orr \dst, \dst, \mask // dst|=(aff3>>rs3) | 49 | orr \dst, \dst, \mask // dst|=(aff3>>rs3) |
50 | .endm | 50 | .endm |
51 | /* | 51 | /* |
52 | * Save CPU state for a suspend. This saves callee registers, and allocates | 52 | * Save CPU state for a suspend and execute the suspend finisher. |
53 | * space on the kernel stack to save the CPU specific registers + some | 53 | * On success it will return 0 through cpu_resume - ie through a CPU |
54 | * other data for resume. | 54 | * soft/hard reboot from the reset vector. |
55 | * On failure it returns the suspend finisher return value or force | ||
56 | * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher | ||
57 | * is not allowed to return, if it does this must be considered failure). | ||
58 | * It saves callee registers, and allocates space on the kernel stack | ||
59 | * to save the CPU specific registers + some other data for resume. | ||
55 | * | 60 | * |
56 | * x0 = suspend finisher argument | 61 | * x0 = suspend finisher argument |
62 | * x1 = suspend finisher function pointer | ||
57 | */ | 63 | */ |
58 | ENTRY(__cpu_suspend) | 64 | ENTRY(__cpu_suspend_enter) |
59 | stp x29, lr, [sp, #-96]! | 65 | stp x29, lr, [sp, #-96]! |
60 | stp x19, x20, [sp,#16] | 66 | stp x19, x20, [sp,#16] |
61 | stp x21, x22, [sp,#32] | 67 | stp x21, x22, [sp,#32] |
62 | stp x23, x24, [sp,#48] | 68 | stp x23, x24, [sp,#48] |
63 | stp x25, x26, [sp,#64] | 69 | stp x25, x26, [sp,#64] |
64 | stp x27, x28, [sp,#80] | 70 | stp x27, x28, [sp,#80] |
71 | /* | ||
72 | * Stash suspend finisher and its argument in x20 and x19 | ||
73 | */ | ||
74 | mov x19, x0 | ||
75 | mov x20, x1 | ||
65 | mov x2, sp | 76 | mov x2, sp |
66 | sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx | 77 | sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx |
67 | mov x1, sp | 78 | mov x0, sp |
68 | /* | 79 | /* |
69 | * x1 now points to struct cpu_suspend_ctx allocated on the stack | 80 | * x0 now points to struct cpu_suspend_ctx allocated on the stack |
70 | */ | 81 | */ |
71 | str x2, [x1, #CPU_CTX_SP] | 82 | str x2, [x0, #CPU_CTX_SP] |
72 | ldr x2, =sleep_save_sp | 83 | ldr x1, =sleep_save_sp |
73 | ldr x2, [x2, #SLEEP_SAVE_SP_VIRT] | 84 | ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] |
74 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
75 | mrs x7, mpidr_el1 | 86 | mrs x7, mpidr_el1 |
76 | ldr x9, =mpidr_hash | 87 | ldr x9, =mpidr_hash |
@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend) | |||
82 | ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] | 93 | ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] |
83 | ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] | 94 | ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] |
84 | compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 | 95 | compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 |
85 | add x2, x2, x8, lsl #3 | 96 | add x1, x1, x8, lsl #3 |
86 | #endif | 97 | #endif |
87 | bl __cpu_suspend_finisher | 98 | bl __cpu_suspend_save |
99 | /* | ||
100 | * Grab suspend finisher in x20 and its argument in x19 | ||
101 | */ | ||
102 | mov x0, x19 | ||
103 | mov x1, x20 | ||
104 | /* | ||
105 | * We are ready for power down, fire off the suspend finisher | ||
106 | * in x1, with argument in x0 | ||
107 | */ | ||
108 | blr x1 | ||
88 | /* | 109 | /* |
89 | * Never gets here, unless suspend fails. | 110 | * Never gets here, unless suspend finisher fails. |
90 | * Successful cpu_suspend should return from cpu_resume, returning | 111 | * Successful cpu_suspend should return from cpu_resume, returning |
91 | * through this code path is considered an error | 112 | * through this code path is considered an error |
92 | * If the return value is set to 0 force x0 = -EOPNOTSUPP | 113 | * If the return value is set to 0 force x0 = -EOPNOTSUPP |
@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend) | |||
103 | ldp x27, x28, [sp, #80] | 124 | ldp x27, x28, [sp, #80] |
104 | ldp x29, lr, [sp], #96 | 125 | ldp x29, lr, [sp], #96 |
105 | ret | 126 | ret |
106 | ENDPROC(__cpu_suspend) | 127 | ENDPROC(__cpu_suspend_enter) |
107 | .ltorg | 128 | .ltorg |
108 | 129 | ||
109 | /* | 130 | /* |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 55a99b9a97e0..13ad4dbb1615 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
@@ -9,22 +9,19 @@ | |||
9 | #include <asm/suspend.h> | 9 | #include <asm/suspend.h> |
10 | #include <asm/tlbflush.h> | 10 | #include <asm/tlbflush.h> |
11 | 11 | ||
12 | extern int __cpu_suspend(unsigned long); | 12 | extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); |
13 | /* | 13 | /* |
14 | * This is called by __cpu_suspend() to save the state, and do whatever | 14 | * This is called by __cpu_suspend_enter() to save the state, and do whatever |
15 | * flushing is required to ensure that when the CPU goes to sleep we have | 15 | * flushing is required to ensure that when the CPU goes to sleep we have |
16 | * the necessary data available when the caches are not searched. | 16 | * the necessary data available when the caches are not searched. |
17 | * | 17 | * |
18 | * @arg: Argument to pass to suspend operations | 18 | * ptr: CPU context virtual address |
19 | * @ptr: CPU context virtual address | 19 | * save_ptr: address of the location where the context physical address |
20 | * @save_ptr: address of the location where the context physical address | 20 | * must be saved |
21 | * must be saved | ||
22 | */ | 21 | */ |
23 | int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, | 22 | void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, |
24 | phys_addr_t *save_ptr) | 23 | phys_addr_t *save_ptr) |
25 | { | 24 | { |
26 | int cpu = smp_processor_id(); | ||
27 | |||
28 | *save_ptr = virt_to_phys(ptr); | 25 | *save_ptr = virt_to_phys(ptr); |
29 | 26 | ||
30 | cpu_do_suspend(ptr); | 27 | cpu_do_suspend(ptr); |
@@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, | |||
35 | */ | 32 | */ |
36 | __flush_dcache_area(ptr, sizeof(*ptr)); | 33 | __flush_dcache_area(ptr, sizeof(*ptr)); |
37 | __flush_dcache_area(save_ptr, sizeof(*save_ptr)); | 34 | __flush_dcache_area(save_ptr, sizeof(*save_ptr)); |
38 | |||
39 | return cpu_ops[cpu]->cpu_suspend(arg); | ||
40 | } | 35 | } |
41 | 36 | ||
42 | /* | 37 | /* |
@@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) | |||
56 | } | 51 | } |
57 | 52 | ||
58 | /** | 53 | /** |
59 | * cpu_suspend | 54 | * cpu_suspend() - function to enter a low-power state |
55 | * @arg: argument to pass to CPU suspend operations | ||
60 | * | 56 | * |
61 | * @arg: argument to pass to the finisher function | 57 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU |
58 | * operations back-end error code otherwise. | ||
62 | */ | 59 | */ |
63 | int cpu_suspend(unsigned long arg) | 60 | int cpu_suspend(unsigned long arg) |
64 | { | 61 | { |
65 | struct mm_struct *mm = current->active_mm; | 62 | int cpu = smp_processor_id(); |
66 | int ret, cpu = smp_processor_id(); | ||
67 | unsigned long flags; | ||
68 | 63 | ||
69 | /* | 64 | /* |
70 | * If cpu_ops have not been registered or suspend | 65 | * If cpu_ops have not been registered or suspend |
@@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg) | |||
72 | */ | 67 | */ |
73 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) | 68 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) |
74 | return -EOPNOTSUPP; | 69 | return -EOPNOTSUPP; |
70 | return cpu_ops[cpu]->cpu_suspend(arg); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * __cpu_suspend | ||
75 | * | ||
76 | * arg: argument to pass to the finisher function | ||
77 | * fn: finisher function pointer | ||
78 | * | ||
79 | */ | ||
80 | int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
81 | { | ||
82 | struct mm_struct *mm = current->active_mm; | ||
83 | int ret; | ||
84 | unsigned long flags; | ||
75 | 85 | ||
76 | /* | 86 | /* |
77 | * From this point debug exceptions are disabled to prevent | 87 | * From this point debug exceptions are disabled to prevent |
@@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg) | |||
86 | * page tables, so that the thread address space is properly | 96 | * page tables, so that the thread address space is properly |
87 | * set-up on function return. | 97 | * set-up on function return. |
88 | */ | 98 | */ |
89 | ret = __cpu_suspend(arg); | 99 | ret = __cpu_suspend_enter(arg, fn); |
90 | if (ret == 0) { | 100 | if (ret == 0) { |
91 | cpu_switch_mm(mm->pgd, mm); | 101 | cpu_switch_mm(mm->pgd, mm); |
92 | flush_tlb_all(); | 102 | flush_tlb_all(); |
@@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg) | |||
95 | * Restore per-cpu offset before any kernel | 105 | * Restore per-cpu offset before any kernel |
96 | * subsystem relying on it has a chance to run. | 106 | * subsystem relying on it has a chance to run. |
97 | */ | 107 | */ |
98 | set_my_cpu_offset(per_cpu_offset(cpu)); | 108 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
99 | 109 | ||
100 | /* | 110 | /* |
101 | * Restore HW breakpoint registers to sane values | 111 | * Restore HW breakpoint registers to sane values |
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index 4c4ac163c600..b6bda1838629 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_LOG_BUF_SHIFT=16 | 3 | CONFIG_LOG_BUF_SHIFT=16 |
@@ -6,6 +5,8 @@ CONFIG_PROFILING=y | |||
6 | CONFIG_OPROFILE=y | 5 | CONFIG_OPROFILE=y |
7 | CONFIG_MODULES=y | 6 | CONFIG_MODULES=y |
8 | CONFIG_MODULE_UNLOAD=y | 7 | CONFIG_MODULE_UNLOAD=y |
8 | CONFIG_PARTITION_ADVANCED=y | ||
9 | CONFIG_SGI_PARTITION=y | ||
9 | CONFIG_IA64_DIG=y | 10 | CONFIG_IA64_DIG=y |
10 | CONFIG_SMP=y | 11 | CONFIG_SMP=y |
11 | CONFIG_NR_CPUS=2 | 12 | CONFIG_NR_CPUS=2 |
@@ -51,9 +52,6 @@ CONFIG_DM_MIRROR=m | |||
51 | CONFIG_DM_ZERO=m | 52 | CONFIG_DM_ZERO=m |
52 | CONFIG_NETDEVICES=y | 53 | CONFIG_NETDEVICES=y |
53 | CONFIG_DUMMY=y | 54 | CONFIG_DUMMY=y |
54 | CONFIG_NET_ETHERNET=y | ||
55 | CONFIG_MII=y | ||
56 | CONFIG_NET_PCI=y | ||
57 | CONFIG_INPUT_EVDEV=y | 55 | CONFIG_INPUT_EVDEV=y |
58 | CONFIG_SERIAL_8250=y | 56 | CONFIG_SERIAL_8250=y |
59 | CONFIG_SERIAL_8250_CONSOLE=y | 57 | CONFIG_SERIAL_8250_CONSOLE=y |
@@ -85,7 +83,6 @@ CONFIG_EXT3_FS=y | |||
85 | CONFIG_XFS_FS=y | 83 | CONFIG_XFS_FS=y |
86 | CONFIG_XFS_QUOTA=y | 84 | CONFIG_XFS_QUOTA=y |
87 | CONFIG_XFS_POSIX_ACL=y | 85 | CONFIG_XFS_POSIX_ACL=y |
88 | CONFIG_AUTOFS_FS=m | ||
89 | CONFIG_AUTOFS4_FS=m | 86 | CONFIG_AUTOFS4_FS=m |
90 | CONFIG_ISO9660_FS=m | 87 | CONFIG_ISO9660_FS=m |
91 | CONFIG_JOLIET=y | 88 | CONFIG_JOLIET=y |
@@ -95,17 +92,13 @@ CONFIG_PROC_KCORE=y | |||
95 | CONFIG_TMPFS=y | 92 | CONFIG_TMPFS=y |
96 | CONFIG_HUGETLBFS=y | 93 | CONFIG_HUGETLBFS=y |
97 | CONFIG_NFS_FS=m | 94 | CONFIG_NFS_FS=m |
98 | CONFIG_NFS_V3=y | 95 | CONFIG_NFS_V4=m |
99 | CONFIG_NFS_V4=y | ||
100 | CONFIG_NFSD=m | 96 | CONFIG_NFSD=m |
101 | CONFIG_NFSD_V4=y | 97 | CONFIG_NFSD_V4=y |
102 | CONFIG_CIFS=m | 98 | CONFIG_CIFS=m |
103 | CONFIG_CIFS_STATS=y | 99 | CONFIG_CIFS_STATS=y |
104 | CONFIG_CIFS_XATTR=y | 100 | CONFIG_CIFS_XATTR=y |
105 | CONFIG_CIFS_POSIX=y | 101 | CONFIG_CIFS_POSIX=y |
106 | CONFIG_PARTITION_ADVANCED=y | ||
107 | CONFIG_SGI_PARTITION=y | ||
108 | CONFIG_EFI_PARTITION=y | ||
109 | CONFIG_NLS_CODEPAGE_437=y | 102 | CONFIG_NLS_CODEPAGE_437=y |
110 | CONFIG_NLS_ISO8859_1=y | 103 | CONFIG_NLS_ISO8859_1=y |
111 | CONFIG_NLS_UTF8=m | 104 | CONFIG_NLS_UTF8=m |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index e8ed3ae70aae..81f686dee53c 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
@@ -6,13 +5,13 @@ CONFIG_IKCONFIG_PROC=y | |||
6 | CONFIG_LOG_BUF_SHIFT=20 | 5 | CONFIG_LOG_BUF_SHIFT=20 |
7 | CONFIG_CGROUPS=y | 6 | CONFIG_CGROUPS=y |
8 | CONFIG_CPUSETS=y | 7 | CONFIG_CPUSETS=y |
9 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
10 | CONFIG_BLK_DEV_INITRD=y | 8 | CONFIG_BLK_DEV_INITRD=y |
11 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
12 | CONFIG_MODULES=y | 10 | CONFIG_MODULES=y |
13 | CONFIG_MODULE_UNLOAD=y | 11 | CONFIG_MODULE_UNLOAD=y |
14 | CONFIG_MODVERSIONS=y | 12 | CONFIG_MODVERSIONS=y |
15 | # CONFIG_BLK_DEV_BSG is not set | 13 | CONFIG_PARTITION_ADVANCED=y |
14 | CONFIG_SGI_PARTITION=y | ||
16 | CONFIG_MCKINLEY=y | 15 | CONFIG_MCKINLEY=y |
17 | CONFIG_IA64_PAGE_SIZE_64KB=y | 16 | CONFIG_IA64_PAGE_SIZE_64KB=y |
18 | CONFIG_IA64_CYCLONE=y | 17 | CONFIG_IA64_CYCLONE=y |
@@ -29,14 +28,13 @@ CONFIG_ACPI_BUTTON=m | |||
29 | CONFIG_ACPI_FAN=m | 28 | CONFIG_ACPI_FAN=m |
30 | CONFIG_ACPI_DOCK=y | 29 | CONFIG_ACPI_DOCK=y |
31 | CONFIG_ACPI_PROCESSOR=m | 30 | CONFIG_ACPI_PROCESSOR=m |
32 | CONFIG_ACPI_CONTAINER=y | ||
33 | CONFIG_HOTPLUG_PCI=y | 31 | CONFIG_HOTPLUG_PCI=y |
34 | CONFIG_HOTPLUG_PCI_ACPI=y | 32 | CONFIG_HOTPLUG_PCI_ACPI=y |
33 | CONFIG_NET=y | ||
35 | CONFIG_PACKET=y | 34 | CONFIG_PACKET=y |
36 | CONFIG_UNIX=y | 35 | CONFIG_UNIX=y |
37 | CONFIG_INET=y | 36 | CONFIG_INET=y |
38 | CONFIG_IP_MULTICAST=y | 37 | CONFIG_IP_MULTICAST=y |
39 | CONFIG_ARPD=y | ||
40 | CONFIG_SYN_COOKIES=y | 38 | CONFIG_SYN_COOKIES=y |
41 | # CONFIG_IPV6 is not set | 39 | # CONFIG_IPV6 is not set |
42 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 40 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
@@ -82,16 +80,13 @@ CONFIG_FUSION_FC=m | |||
82 | CONFIG_FUSION_SAS=y | 80 | CONFIG_FUSION_SAS=y |
83 | CONFIG_NETDEVICES=y | 81 | CONFIG_NETDEVICES=y |
84 | CONFIG_DUMMY=m | 82 | CONFIG_DUMMY=m |
85 | CONFIG_NET_ETHERNET=y | 83 | CONFIG_NETCONSOLE=y |
84 | CONFIG_TIGON3=y | ||
86 | CONFIG_NET_TULIP=y | 85 | CONFIG_NET_TULIP=y |
87 | CONFIG_TULIP=m | 86 | CONFIG_TULIP=m |
88 | CONFIG_NET_PCI=y | ||
89 | CONFIG_NET_VENDOR_INTEL=y | ||
90 | CONFIG_E100=m | 87 | CONFIG_E100=m |
91 | CONFIG_E1000=y | 88 | CONFIG_E1000=y |
92 | CONFIG_IGB=y | 89 | CONFIG_IGB=y |
93 | CONFIG_TIGON3=y | ||
94 | CONFIG_NETCONSOLE=y | ||
95 | # CONFIG_SERIO_SERPORT is not set | 90 | # CONFIG_SERIO_SERPORT is not set |
96 | CONFIG_GAMEPORT=m | 91 | CONFIG_GAMEPORT=m |
97 | CONFIG_SERIAL_NONSTANDARD=y | 92 | CONFIG_SERIAL_NONSTANDARD=y |
@@ -151,6 +146,7 @@ CONFIG_USB_STORAGE=m | |||
151 | CONFIG_INFINIBAND=m | 146 | CONFIG_INFINIBAND=m |
152 | CONFIG_INFINIBAND_MTHCA=m | 147 | CONFIG_INFINIBAND_MTHCA=m |
153 | CONFIG_INFINIBAND_IPOIB=m | 148 | CONFIG_INFINIBAND_IPOIB=m |
149 | CONFIG_INTEL_IOMMU=y | ||
154 | CONFIG_MSPEC=m | 150 | CONFIG_MSPEC=m |
155 | CONFIG_EXT2_FS=y | 151 | CONFIG_EXT2_FS=y |
156 | CONFIG_EXT2_FS_XATTR=y | 152 | CONFIG_EXT2_FS_XATTR=y |
@@ -164,7 +160,6 @@ CONFIG_REISERFS_FS_XATTR=y | |||
164 | CONFIG_REISERFS_FS_POSIX_ACL=y | 160 | CONFIG_REISERFS_FS_POSIX_ACL=y |
165 | CONFIG_REISERFS_FS_SECURITY=y | 161 | CONFIG_REISERFS_FS_SECURITY=y |
166 | CONFIG_XFS_FS=y | 162 | CONFIG_XFS_FS=y |
167 | CONFIG_AUTOFS_FS=m | ||
168 | CONFIG_AUTOFS4_FS=m | 163 | CONFIG_AUTOFS4_FS=m |
169 | CONFIG_ISO9660_FS=m | 164 | CONFIG_ISO9660_FS=m |
170 | CONFIG_JOLIET=y | 165 | CONFIG_JOLIET=y |
@@ -175,16 +170,10 @@ CONFIG_PROC_KCORE=y | |||
175 | CONFIG_TMPFS=y | 170 | CONFIG_TMPFS=y |
176 | CONFIG_HUGETLBFS=y | 171 | CONFIG_HUGETLBFS=y |
177 | CONFIG_NFS_FS=m | 172 | CONFIG_NFS_FS=m |
178 | CONFIG_NFS_V3=y | 173 | CONFIG_NFS_V4=m |
179 | CONFIG_NFS_V4=y | ||
180 | CONFIG_NFSD=m | 174 | CONFIG_NFSD=m |
181 | CONFIG_NFSD_V4=y | 175 | CONFIG_NFSD_V4=y |
182 | CONFIG_SMB_FS=m | ||
183 | CONFIG_SMB_NLS_DEFAULT=y | ||
184 | CONFIG_CIFS=m | 176 | CONFIG_CIFS=m |
185 | CONFIG_PARTITION_ADVANCED=y | ||
186 | CONFIG_SGI_PARTITION=y | ||
187 | CONFIG_EFI_PARTITION=y | ||
188 | CONFIG_NLS_CODEPAGE_437=y | 177 | CONFIG_NLS_CODEPAGE_437=y |
189 | CONFIG_NLS_CODEPAGE_737=m | 178 | CONFIG_NLS_CODEPAGE_737=m |
190 | CONFIG_NLS_CODEPAGE_775=m | 179 | CONFIG_NLS_CODEPAGE_775=m |
@@ -225,11 +214,7 @@ CONFIG_NLS_UTF8=m | |||
225 | CONFIG_MAGIC_SYSRQ=y | 214 | CONFIG_MAGIC_SYSRQ=y |
226 | CONFIG_DEBUG_KERNEL=y | 215 | CONFIG_DEBUG_KERNEL=y |
227 | CONFIG_DEBUG_MUTEXES=y | 216 | CONFIG_DEBUG_MUTEXES=y |
228 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
229 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
230 | CONFIG_CRYPTO_ECB=m | ||
231 | CONFIG_CRYPTO_PCBC=m | 217 | CONFIG_CRYPTO_PCBC=m |
232 | CONFIG_CRYPTO_MD5=y | 218 | CONFIG_CRYPTO_MD5=y |
233 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 219 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
234 | CONFIG_CRC_T10DIF=y | 220 | CONFIG_CRC_T10DIF=y |
235 | CONFIG_INTEL_IOMMU=y | ||
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index d663efd1e4db..5b4fcdd51457 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
@@ -9,6 +8,8 @@ CONFIG_KALLSYMS_ALL=y | |||
9 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
11 | CONFIG_MODVERSIONS=y | 10 | CONFIG_MODVERSIONS=y |
11 | CONFIG_PARTITION_ADVANCED=y | ||
12 | CONFIG_SGI_PARTITION=y | ||
12 | CONFIG_MCKINLEY=y | 13 | CONFIG_MCKINLEY=y |
13 | CONFIG_IA64_CYCLONE=y | 14 | CONFIG_IA64_CYCLONE=y |
14 | CONFIG_SMP=y | 15 | CONFIG_SMP=y |
@@ -24,14 +25,12 @@ CONFIG_BINFMT_MISC=m | |||
24 | CONFIG_ACPI_BUTTON=m | 25 | CONFIG_ACPI_BUTTON=m |
25 | CONFIG_ACPI_FAN=m | 26 | CONFIG_ACPI_FAN=m |
26 | CONFIG_ACPI_PROCESSOR=m | 27 | CONFIG_ACPI_PROCESSOR=m |
27 | CONFIG_ACPI_CONTAINER=m | ||
28 | CONFIG_HOTPLUG_PCI=y | 28 | CONFIG_HOTPLUG_PCI=y |
29 | CONFIG_HOTPLUG_PCI_ACPI=m | 29 | CONFIG_NET=y |
30 | CONFIG_PACKET=y | 30 | CONFIG_PACKET=y |
31 | CONFIG_UNIX=y | 31 | CONFIG_UNIX=y |
32 | CONFIG_INET=y | 32 | CONFIG_INET=y |
33 | CONFIG_IP_MULTICAST=y | 33 | CONFIG_IP_MULTICAST=y |
34 | CONFIG_ARPD=y | ||
35 | CONFIG_SYN_COOKIES=y | 34 | CONFIG_SYN_COOKIES=y |
36 | # CONFIG_IPV6 is not set | 35 | # CONFIG_IPV6 is not set |
37 | CONFIG_BLK_DEV_LOOP=m | 36 | CONFIG_BLK_DEV_LOOP=m |
@@ -71,15 +70,12 @@ CONFIG_FUSION_SPI=y | |||
71 | CONFIG_FUSION_FC=m | 70 | CONFIG_FUSION_FC=m |
72 | CONFIG_NETDEVICES=y | 71 | CONFIG_NETDEVICES=y |
73 | CONFIG_DUMMY=m | 72 | CONFIG_DUMMY=m |
74 | CONFIG_NET_ETHERNET=y | 73 | CONFIG_NETCONSOLE=y |
74 | CONFIG_TIGON3=y | ||
75 | CONFIG_NET_TULIP=y | 75 | CONFIG_NET_TULIP=y |
76 | CONFIG_TULIP=m | 76 | CONFIG_TULIP=m |
77 | CONFIG_NET_PCI=y | ||
78 | CONFIG_NET_VENDOR_INTEL=y | ||
79 | CONFIG_E100=m | 77 | CONFIG_E100=m |
80 | CONFIG_E1000=y | 78 | CONFIG_E1000=y |
81 | CONFIG_TIGON3=y | ||
82 | CONFIG_NETCONSOLE=y | ||
83 | # CONFIG_SERIO_SERPORT is not set | 79 | # CONFIG_SERIO_SERPORT is not set |
84 | CONFIG_GAMEPORT=m | 80 | CONFIG_GAMEPORT=m |
85 | CONFIG_SERIAL_NONSTANDARD=y | 81 | CONFIG_SERIAL_NONSTANDARD=y |
@@ -146,7 +142,6 @@ CONFIG_REISERFS_FS_XATTR=y | |||
146 | CONFIG_REISERFS_FS_POSIX_ACL=y | 142 | CONFIG_REISERFS_FS_POSIX_ACL=y |
147 | CONFIG_REISERFS_FS_SECURITY=y | 143 | CONFIG_REISERFS_FS_SECURITY=y |
148 | CONFIG_XFS_FS=y | 144 | CONFIG_XFS_FS=y |
149 | CONFIG_AUTOFS_FS=y | ||
150 | CONFIG_AUTOFS4_FS=y | 145 | CONFIG_AUTOFS4_FS=y |
151 | CONFIG_ISO9660_FS=m | 146 | CONFIG_ISO9660_FS=m |
152 | CONFIG_JOLIET=y | 147 | CONFIG_JOLIET=y |
@@ -157,16 +152,10 @@ CONFIG_PROC_KCORE=y | |||
157 | CONFIG_TMPFS=y | 152 | CONFIG_TMPFS=y |
158 | CONFIG_HUGETLBFS=y | 153 | CONFIG_HUGETLBFS=y |
159 | CONFIG_NFS_FS=m | 154 | CONFIG_NFS_FS=m |
160 | CONFIG_NFS_V3=y | 155 | CONFIG_NFS_V4=m |
161 | CONFIG_NFS_V4=y | ||
162 | CONFIG_NFSD=m | 156 | CONFIG_NFSD=m |
163 | CONFIG_NFSD_V4=y | 157 | CONFIG_NFSD_V4=y |
164 | CONFIG_SMB_FS=m | ||
165 | CONFIG_SMB_NLS_DEFAULT=y | ||
166 | CONFIG_CIFS=m | 158 | CONFIG_CIFS=m |
167 | CONFIG_PARTITION_ADVANCED=y | ||
168 | CONFIG_SGI_PARTITION=y | ||
169 | CONFIG_EFI_PARTITION=y | ||
170 | CONFIG_NLS_CODEPAGE_437=y | 159 | CONFIG_NLS_CODEPAGE_437=y |
171 | CONFIG_NLS_CODEPAGE_737=m | 160 | CONFIG_NLS_CODEPAGE_737=m |
172 | CONFIG_NLS_CODEPAGE_775=m | 161 | CONFIG_NLS_CODEPAGE_775=m |
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig index b4548a3e82d5..f0f69fdbddae 100644 --- a/arch/ia64/configs/sim_defconfig +++ b/arch/ia64/configs/sim_defconfig | |||
@@ -1,13 +1,12 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_IKCONFIG=y | 2 | CONFIG_IKCONFIG=y |
4 | CONFIG_IKCONFIG_PROC=y | 3 | CONFIG_IKCONFIG_PROC=y |
5 | CONFIG_LOG_BUF_SHIFT=16 | 4 | CONFIG_LOG_BUF_SHIFT=16 |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
7 | CONFIG_MODULES=y | 5 | CONFIG_MODULES=y |
8 | CONFIG_MODULE_UNLOAD=y | 6 | CONFIG_MODULE_UNLOAD=y |
9 | CONFIG_MODULE_FORCE_UNLOAD=y | 7 | CONFIG_MODULE_FORCE_UNLOAD=y |
10 | CONFIG_MODVERSIONS=y | 8 | CONFIG_MODVERSIONS=y |
9 | CONFIG_PARTITION_ADVANCED=y | ||
11 | CONFIG_IA64_HP_SIM=y | 10 | CONFIG_IA64_HP_SIM=y |
12 | CONFIG_MCKINLEY=y | 11 | CONFIG_MCKINLEY=y |
13 | CONFIG_IA64_PAGE_SIZE_64KB=y | 12 | CONFIG_IA64_PAGE_SIZE_64KB=y |
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y | |||
27 | CONFIG_BLK_DEV_RAM=y | 26 | CONFIG_BLK_DEV_RAM=y |
28 | CONFIG_SCSI=y | 27 | CONFIG_SCSI=y |
29 | CONFIG_BLK_DEV_SD=y | 28 | CONFIG_BLK_DEV_SD=y |
30 | CONFIG_SCSI_MULTI_LUN=y | ||
31 | CONFIG_SCSI_CONSTANTS=y | 29 | CONFIG_SCSI_CONSTANTS=y |
32 | CONFIG_SCSI_LOGGING=y | 30 | CONFIG_SCSI_LOGGING=y |
33 | CONFIG_SCSI_SPI_ATTRS=y | 31 | CONFIG_SCSI_SPI_ATTRS=y |
@@ -49,8 +47,6 @@ CONFIG_HUGETLBFS=y | |||
49 | CONFIG_NFS_FS=y | 47 | CONFIG_NFS_FS=y |
50 | CONFIG_NFSD=y | 48 | CONFIG_NFSD=y |
51 | CONFIG_NFSD_V3=y | 49 | CONFIG_NFSD_V3=y |
52 | CONFIG_PARTITION_ADVANCED=y | 50 | CONFIG_DEBUG_INFO=y |
53 | CONFIG_EFI_PARTITION=y | ||
54 | CONFIG_DEBUG_KERNEL=y | 51 | CONFIG_DEBUG_KERNEL=y |
55 | CONFIG_DEBUG_MUTEXES=y | 52 | CONFIG_DEBUG_MUTEXES=y |
56 | CONFIG_DEBUG_INFO=y | ||
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index c8a3f40e77f6..192ed157c9ce 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
@@ -11,6 +10,8 @@ CONFIG_MODULE_UNLOAD=y | |||
11 | CONFIG_MODVERSIONS=y | 10 | CONFIG_MODVERSIONS=y |
12 | CONFIG_MODULE_SRCVERSION_ALL=y | 11 | CONFIG_MODULE_SRCVERSION_ALL=y |
13 | # CONFIG_BLK_DEV_BSG is not set | 12 | # CONFIG_BLK_DEV_BSG is not set |
13 | CONFIG_PARTITION_ADVANCED=y | ||
14 | CONFIG_SGI_PARTITION=y | ||
14 | CONFIG_IA64_DIG=y | 15 | CONFIG_IA64_DIG=y |
15 | CONFIG_MCKINLEY=y | 16 | CONFIG_MCKINLEY=y |
16 | CONFIG_IA64_PAGE_SIZE_64KB=y | 17 | CONFIG_IA64_PAGE_SIZE_64KB=y |
@@ -29,14 +30,12 @@ CONFIG_BINFMT_MISC=m | |||
29 | CONFIG_ACPI_BUTTON=m | 30 | CONFIG_ACPI_BUTTON=m |
30 | CONFIG_ACPI_FAN=m | 31 | CONFIG_ACPI_FAN=m |
31 | CONFIG_ACPI_PROCESSOR=m | 32 | CONFIG_ACPI_PROCESSOR=m |
32 | CONFIG_ACPI_CONTAINER=m | ||
33 | CONFIG_HOTPLUG_PCI=y | 33 | CONFIG_HOTPLUG_PCI=y |
34 | CONFIG_HOTPLUG_PCI_ACPI=m | 34 | CONFIG_NET=y |
35 | CONFIG_PACKET=y | 35 | CONFIG_PACKET=y |
36 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
37 | CONFIG_INET=y | 37 | CONFIG_INET=y |
38 | CONFIG_IP_MULTICAST=y | 38 | CONFIG_IP_MULTICAST=y |
39 | CONFIG_ARPD=y | ||
40 | CONFIG_SYN_COOKIES=y | 39 | CONFIG_SYN_COOKIES=y |
41 | # CONFIG_IPV6 is not set | 40 | # CONFIG_IPV6 is not set |
42 | CONFIG_BLK_DEV_LOOP=m | 41 | CONFIG_BLK_DEV_LOOP=m |
@@ -53,6 +52,7 @@ CONFIG_BLK_DEV_SD=y | |||
53 | CONFIG_CHR_DEV_ST=m | 52 | CONFIG_CHR_DEV_ST=m |
54 | CONFIG_BLK_DEV_SR=m | 53 | CONFIG_BLK_DEV_SR=m |
55 | CONFIG_CHR_DEV_SG=m | 54 | CONFIG_CHR_DEV_SG=m |
55 | CONFIG_SCSI_FC_ATTRS=y | ||
56 | CONFIG_SCSI_SYM53C8XX_2=y | 56 | CONFIG_SCSI_SYM53C8XX_2=y |
57 | CONFIG_SCSI_QLOGIC_1280=y | 57 | CONFIG_SCSI_QLOGIC_1280=y |
58 | CONFIG_MD=y | 58 | CONFIG_MD=y |
@@ -72,15 +72,12 @@ CONFIG_FUSION_FC=y | |||
72 | CONFIG_FUSION_CTL=y | 72 | CONFIG_FUSION_CTL=y |
73 | CONFIG_NETDEVICES=y | 73 | CONFIG_NETDEVICES=y |
74 | CONFIG_DUMMY=m | 74 | CONFIG_DUMMY=m |
75 | CONFIG_NET_ETHERNET=y | 75 | CONFIG_NETCONSOLE=y |
76 | CONFIG_TIGON3=y | ||
76 | CONFIG_NET_TULIP=y | 77 | CONFIG_NET_TULIP=y |
77 | CONFIG_TULIP=m | 78 | CONFIG_TULIP=m |
78 | CONFIG_NET_PCI=y | ||
79 | CONFIG_NET_VENDOR_INTEL=y | ||
80 | CONFIG_E100=m | 79 | CONFIG_E100=m |
81 | CONFIG_E1000=y | 80 | CONFIG_E1000=y |
82 | CONFIG_TIGON3=y | ||
83 | CONFIG_NETCONSOLE=y | ||
84 | # CONFIG_SERIO_SERPORT is not set | 81 | # CONFIG_SERIO_SERPORT is not set |
85 | CONFIG_GAMEPORT=m | 82 | CONFIG_GAMEPORT=m |
86 | CONFIG_SERIAL_NONSTANDARD=y | 83 | CONFIG_SERIAL_NONSTANDARD=y |
@@ -118,7 +115,6 @@ CONFIG_REISERFS_FS_XATTR=y | |||
118 | CONFIG_REISERFS_FS_POSIX_ACL=y | 115 | CONFIG_REISERFS_FS_POSIX_ACL=y |
119 | CONFIG_REISERFS_FS_SECURITY=y | 116 | CONFIG_REISERFS_FS_SECURITY=y |
120 | CONFIG_XFS_FS=y | 117 | CONFIG_XFS_FS=y |
121 | CONFIG_AUTOFS_FS=y | ||
122 | CONFIG_AUTOFS4_FS=y | 118 | CONFIG_AUTOFS4_FS=y |
123 | CONFIG_ISO9660_FS=m | 119 | CONFIG_ISO9660_FS=m |
124 | CONFIG_JOLIET=y | 120 | CONFIG_JOLIET=y |
@@ -129,16 +125,10 @@ CONFIG_PROC_KCORE=y | |||
129 | CONFIG_TMPFS=y | 125 | CONFIG_TMPFS=y |
130 | CONFIG_HUGETLBFS=y | 126 | CONFIG_HUGETLBFS=y |
131 | CONFIG_NFS_FS=m | 127 | CONFIG_NFS_FS=m |
132 | CONFIG_NFS_V3=y | 128 | CONFIG_NFS_V4=m |
133 | CONFIG_NFS_V4=y | ||
134 | CONFIG_NFSD=m | 129 | CONFIG_NFSD=m |
135 | CONFIG_NFSD_V4=y | 130 | CONFIG_NFSD_V4=y |
136 | CONFIG_SMB_FS=m | ||
137 | CONFIG_SMB_NLS_DEFAULT=y | ||
138 | CONFIG_CIFS=m | 131 | CONFIG_CIFS=m |
139 | CONFIG_PARTITION_ADVANCED=y | ||
140 | CONFIG_SGI_PARTITION=y | ||
141 | CONFIG_EFI_PARTITION=y | ||
142 | CONFIG_NLS_CODEPAGE_437=y | 132 | CONFIG_NLS_CODEPAGE_437=y |
143 | CONFIG_NLS_CODEPAGE_737=m | 133 | CONFIG_NLS_CODEPAGE_737=m |
144 | CONFIG_NLS_CODEPAGE_775=m | 134 | CONFIG_NLS_CODEPAGE_775=m |
@@ -180,6 +170,5 @@ CONFIG_MAGIC_SYSRQ=y | |||
180 | CONFIG_DEBUG_KERNEL=y | 170 | CONFIG_DEBUG_KERNEL=y |
181 | CONFIG_DEBUG_MUTEXES=y | 171 | CONFIG_DEBUG_MUTEXES=y |
182 | CONFIG_IA64_GRANULE_16MB=y | 172 | CONFIG_IA64_GRANULE_16MB=y |
183 | CONFIG_CRYPTO_ECB=m | ||
184 | CONFIG_CRYPTO_PCBC=m | 173 | CONFIG_CRYPTO_PCBC=m |
185 | CONFIG_CRYPTO_MD5=y | 174 | CONFIG_CRYPTO_MD5=y |
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 54bc72eda30d..b504c8e2fd52 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig | |||
@@ -1,9 +1,9 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_BSD_PROCESS_ACCT=y | 2 | CONFIG_BSD_PROCESS_ACCT=y |
4 | CONFIG_BLK_DEV_INITRD=y | 3 | CONFIG_BLK_DEV_INITRD=y |
5 | CONFIG_KPROBES=y | 4 | CONFIG_KPROBES=y |
6 | CONFIG_MODULES=y | 5 | CONFIG_MODULES=y |
6 | CONFIG_PARTITION_ADVANCED=y | ||
7 | CONFIG_IA64_HP_ZX1=y | 7 | CONFIG_IA64_HP_ZX1=y |
8 | CONFIG_MCKINLEY=y | 8 | CONFIG_MCKINLEY=y |
9 | CONFIG_SMP=y | 9 | CONFIG_SMP=y |
@@ -18,6 +18,7 @@ CONFIG_EFI_VARS=y | |||
18 | CONFIG_BINFMT_MISC=y | 18 | CONFIG_BINFMT_MISC=y |
19 | CONFIG_HOTPLUG_PCI=y | 19 | CONFIG_HOTPLUG_PCI=y |
20 | CONFIG_HOTPLUG_PCI_ACPI=y | 20 | CONFIG_HOTPLUG_PCI_ACPI=y |
21 | CONFIG_NET=y | ||
21 | CONFIG_PACKET=y | 22 | CONFIG_PACKET=y |
22 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
23 | CONFIG_INET=y | 24 | CONFIG_INET=y |
@@ -37,9 +38,9 @@ CONFIG_CHR_DEV_OSST=y | |||
37 | CONFIG_BLK_DEV_SR=y | 38 | CONFIG_BLK_DEV_SR=y |
38 | CONFIG_BLK_DEV_SR_VENDOR=y | 39 | CONFIG_BLK_DEV_SR_VENDOR=y |
39 | CONFIG_CHR_DEV_SG=y | 40 | CONFIG_CHR_DEV_SG=y |
40 | CONFIG_SCSI_MULTI_LUN=y | ||
41 | CONFIG_SCSI_CONSTANTS=y | 41 | CONFIG_SCSI_CONSTANTS=y |
42 | CONFIG_SCSI_LOGGING=y | 42 | CONFIG_SCSI_LOGGING=y |
43 | CONFIG_SCSI_FC_ATTRS=y | ||
43 | CONFIG_SCSI_SYM53C8XX_2=y | 44 | CONFIG_SCSI_SYM53C8XX_2=y |
44 | CONFIG_SCSI_QLOGIC_1280=y | 45 | CONFIG_SCSI_QLOGIC_1280=y |
45 | CONFIG_FUSION=y | 46 | CONFIG_FUSION=y |
@@ -48,18 +49,15 @@ CONFIG_FUSION_FC=y | |||
48 | CONFIG_FUSION_CTL=m | 49 | CONFIG_FUSION_CTL=m |
49 | CONFIG_NETDEVICES=y | 50 | CONFIG_NETDEVICES=y |
50 | CONFIG_DUMMY=y | 51 | CONFIG_DUMMY=y |
51 | CONFIG_NET_ETHERNET=y | 52 | CONFIG_TIGON3=y |
52 | CONFIG_NET_TULIP=y | 53 | CONFIG_NET_TULIP=y |
53 | CONFIG_TULIP=y | 54 | CONFIG_TULIP=y |
54 | CONFIG_TULIP_MWI=y | 55 | CONFIG_TULIP_MWI=y |
55 | CONFIG_TULIP_MMIO=y | 56 | CONFIG_TULIP_MMIO=y |
56 | CONFIG_TULIP_NAPI=y | 57 | CONFIG_TULIP_NAPI=y |
57 | CONFIG_TULIP_NAPI_HW_MITIGATION=y | 58 | CONFIG_TULIP_NAPI_HW_MITIGATION=y |
58 | CONFIG_NET_PCI=y | ||
59 | CONFIG_NET_VENDOR_INTEL=y | ||
60 | CONFIG_E100=y | 59 | CONFIG_E100=y |
61 | CONFIG_E1000=y | 60 | CONFIG_E1000=y |
62 | CONFIG_TIGON3=y | ||
63 | CONFIG_INPUT_JOYDEV=y | 61 | CONFIG_INPUT_JOYDEV=y |
64 | CONFIG_INPUT_EVDEV=y | 62 | CONFIG_INPUT_EVDEV=y |
65 | # CONFIG_INPUT_KEYBOARD is not set | 63 | # CONFIG_INPUT_KEYBOARD is not set |
@@ -100,7 +98,6 @@ CONFIG_USB_STORAGE=y | |||
100 | CONFIG_EXT2_FS=y | 98 | CONFIG_EXT2_FS=y |
101 | CONFIG_EXT2_FS_XATTR=y | 99 | CONFIG_EXT2_FS_XATTR=y |
102 | CONFIG_EXT3_FS=y | 100 | CONFIG_EXT3_FS=y |
103 | CONFIG_AUTOFS_FS=y | ||
104 | CONFIG_ISO9660_FS=y | 101 | CONFIG_ISO9660_FS=y |
105 | CONFIG_JOLIET=y | 102 | CONFIG_JOLIET=y |
106 | CONFIG_UDF_FS=y | 103 | CONFIG_UDF_FS=y |
@@ -110,12 +107,9 @@ CONFIG_PROC_KCORE=y | |||
110 | CONFIG_TMPFS=y | 107 | CONFIG_TMPFS=y |
111 | CONFIG_HUGETLBFS=y | 108 | CONFIG_HUGETLBFS=y |
112 | CONFIG_NFS_FS=y | 109 | CONFIG_NFS_FS=y |
113 | CONFIG_NFS_V3=y | ||
114 | CONFIG_NFS_V4=y | 110 | CONFIG_NFS_V4=y |
115 | CONFIG_NFSD=y | 111 | CONFIG_NFSD=y |
116 | CONFIG_NFSD_V3=y | 112 | CONFIG_NFSD_V3=y |
117 | CONFIG_PARTITION_ADVANCED=y | ||
118 | CONFIG_EFI_PARTITION=y | ||
119 | CONFIG_NLS_CODEPAGE_437=y | 113 | CONFIG_NLS_CODEPAGE_437=y |
120 | CONFIG_NLS_CODEPAGE_737=y | 114 | CONFIG_NLS_CODEPAGE_737=y |
121 | CONFIG_NLS_CODEPAGE_775=y | 115 | CONFIG_NLS_CODEPAGE_775=y |
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 8f219dac9598..e24feb0633aa 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig | |||
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y | |||
19 | # CONFIG_BLK_DEV_BSG is not set | 19 | # CONFIG_BLK_DEV_BSG is not set |
20 | CONFIG_PCI=y | 20 | CONFIG_PCI=y |
21 | CONFIG_BINFMT_MISC=m | 21 | CONFIG_BINFMT_MISC=m |
22 | CONFIG_NET=y | ||
22 | CONFIG_PACKET=y | 23 | CONFIG_PACKET=y |
23 | CONFIG_UNIX=y | 24 | CONFIG_UNIX=y |
24 | CONFIG_INET=y | 25 | CONFIG_INET=y |
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index cc0756021398..48e16d98b2cc 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig | |||
@@ -28,6 +28,7 @@ CONFIG_MIPS32_COMPAT=y | |||
28 | CONFIG_MIPS32_O32=y | 28 | CONFIG_MIPS32_O32=y |
29 | CONFIG_MIPS32_N32=y | 29 | CONFIG_MIPS32_N32=y |
30 | CONFIG_PM=y | 30 | CONFIG_PM=y |
31 | CONFIG_NET=y | ||
31 | CONFIG_PACKET=y | 32 | CONFIG_PACKET=y |
32 | CONFIG_UNIX=y | 33 | CONFIG_UNIX=y |
33 | CONFIG_XFRM_USER=m | 34 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 2575302aa2be..4f37a5985459 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig | |||
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y | |||
18 | CONFIG_MODVERSIONS=y | 18 | CONFIG_MODVERSIONS=y |
19 | CONFIG_BINFMT_MISC=m | 19 | CONFIG_BINFMT_MISC=m |
20 | CONFIG_PM=y | 20 | CONFIG_PM=y |
21 | CONFIG_NET=y | ||
21 | CONFIG_PACKET=m | 22 | CONFIG_PACKET=m |
22 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
23 | CONFIG_NET_KEY=m | 24 | CONFIG_NET_KEY=m |
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 4cb787ff273e..1c6191ebd583 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig | |||
@@ -59,6 +59,7 @@ CONFIG_MIPS32_COMPAT=y | |||
59 | CONFIG_MIPS32_O32=y | 59 | CONFIG_MIPS32_O32=y |
60 | CONFIG_MIPS32_N32=y | 60 | CONFIG_MIPS32_N32=y |
61 | CONFIG_PM_RUNTIME=y | 61 | CONFIG_PM_RUNTIME=y |
62 | CONFIG_NET=y | ||
62 | CONFIG_PACKET=y | 63 | CONFIG_PACKET=y |
63 | CONFIG_UNIX=y | 64 | CONFIG_UNIX=y |
64 | CONFIG_XFRM_USER=y | 65 | CONFIG_XFRM_USER=y |
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index e18741ea1771..f57b96dcf7df 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig | |||
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y | |||
19 | CONFIG_MODVERSIONS=y | 19 | CONFIG_MODVERSIONS=y |
20 | CONFIG_MODULE_SRCVERSION_ALL=y | 20 | CONFIG_MODULE_SRCVERSION_ALL=y |
21 | CONFIG_PCI=y | 21 | CONFIG_PCI=y |
22 | CONFIG_NET=y | ||
22 | CONFIG_PACKET=y | 23 | CONFIG_PACKET=y |
23 | CONFIG_UNIX=y | 24 | CONFIG_UNIX=y |
24 | CONFIG_XFRM_USER=m | 25 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index cf0e01f814e1..d41742dd26c8 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig | |||
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y | |||
20 | CONFIG_MODVERSIONS=y | 20 | CONFIG_MODVERSIONS=y |
21 | CONFIG_MODULE_SRCVERSION_ALL=y | 21 | CONFIG_MODULE_SRCVERSION_ALL=y |
22 | CONFIG_PCI=y | 22 | CONFIG_PCI=y |
23 | CONFIG_NET=y | ||
23 | CONFIG_PACKET=y | 24 | CONFIG_PACKET=y |
24 | CONFIG_UNIX=y | 25 | CONFIG_UNIX=y |
25 | CONFIG_XFRM_USER=m | 26 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index edd9ec9cb678..a7806e83ea0f 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig | |||
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y | |||
19 | CONFIG_MODVERSIONS=y | 19 | CONFIG_MODVERSIONS=y |
20 | CONFIG_MODULE_SRCVERSION_ALL=y | 20 | CONFIG_MODULE_SRCVERSION_ALL=y |
21 | CONFIG_PCI=y | 21 | CONFIG_PCI=y |
22 | CONFIG_NET=y | ||
22 | CONFIG_PACKET=y | 23 | CONFIG_PACKET=y |
23 | CONFIG_UNIX=y | 24 | CONFIG_UNIX=y |
24 | CONFIG_XFRM_USER=m | 25 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index d269a5326a30..9b6926d6bb32 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig | |||
@@ -27,6 +27,7 @@ CONFIG_PD6729=m | |||
27 | CONFIG_I82092=m | 27 | CONFIG_I82092=m |
28 | CONFIG_BINFMT_MISC=m | 28 | CONFIG_BINFMT_MISC=m |
29 | CONFIG_PM=y | 29 | CONFIG_PM=y |
30 | CONFIG_NET=y | ||
30 | CONFIG_PACKET=m | 31 | CONFIG_PACKET=m |
31 | CONFIG_UNIX=y | 32 | CONFIG_UNIX=y |
32 | CONFIG_XFRM_USER=m | 33 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 2f660e9a0da6..70509a48df82 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig | |||
@@ -63,6 +63,7 @@ CONFIG_MIPS32_O32=y | |||
63 | CONFIG_MIPS32_N32=y | 63 | CONFIG_MIPS32_N32=y |
64 | CONFIG_PM_RUNTIME=y | 64 | CONFIG_PM_RUNTIME=y |
65 | CONFIG_PM_DEBUG=y | 65 | CONFIG_PM_DEBUG=y |
66 | CONFIG_NET=y | ||
66 | CONFIG_PACKET=y | 67 | CONFIG_PACKET=y |
67 | CONFIG_UNIX=y | 68 | CONFIG_UNIX=y |
68 | CONFIG_XFRM_USER=m | 69 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index c6f84655c98a..82207e8079f3 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig | |||
@@ -43,6 +43,7 @@ CONFIG_PCI_DEBUG=y | |||
43 | CONFIG_BINFMT_MISC=m | 43 | CONFIG_BINFMT_MISC=m |
44 | CONFIG_PM_RUNTIME=y | 44 | CONFIG_PM_RUNTIME=y |
45 | CONFIG_PM_DEBUG=y | 45 | CONFIG_PM_DEBUG=y |
46 | CONFIG_NET=y | ||
46 | CONFIG_PACKET=y | 47 | CONFIG_PACKET=y |
47 | CONFIG_UNIX=y | 48 | CONFIG_UNIX=y |
48 | CONFIG_XFRM_USER=m | 49 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 29d79ae8a823..db029f4ff759 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig | |||
@@ -20,6 +20,7 @@ CONFIG_MODVERSIONS=y | |||
20 | CONFIG_PCI=y | 20 | CONFIG_PCI=y |
21 | CONFIG_BINFMT_MISC=m | 21 | CONFIG_BINFMT_MISC=m |
22 | CONFIG_PM=y | 22 | CONFIG_PM=y |
23 | CONFIG_NET=y | ||
23 | CONFIG_PACKET=m | 24 | CONFIG_PACKET=m |
24 | CONFIG_UNIX=y | 25 | CONFIG_UNIX=y |
25 | CONFIG_NET_KEY=m | 26 | CONFIG_NET_KEY=m |
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 5d25462de8a6..2f7c734771f4 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -129,7 +129,11 @@ NESTED(_mcount, PT_SIZE, ra) | |||
129 | nop | 129 | nop |
130 | #endif | 130 | #endif |
131 | b ftrace_stub | 131 | b ftrace_stub |
132 | #ifdef CONFIG_32BIT | ||
133 | addiu sp, sp, 8 | ||
134 | #else | ||
132 | nop | 135 | nop |
136 | #endif | ||
133 | 137 | ||
134 | static_trace: | 138 | static_trace: |
135 | MCOUNT_SAVE_REGS | 139 | MCOUNT_SAVE_REGS |
@@ -139,6 +143,9 @@ static_trace: | |||
139 | move a1, AT /* arg2: parent's return address */ | 143 | move a1, AT /* arg2: parent's return address */ |
140 | 144 | ||
141 | MCOUNT_RESTORE_REGS | 145 | MCOUNT_RESTORE_REGS |
146 | #ifdef CONFIG_32BIT | ||
147 | addiu sp, sp, 8 | ||
148 | #endif | ||
142 | .globl ftrace_stub | 149 | .globl ftrace_stub |
143 | ftrace_stub: | 150 | ftrace_stub: |
144 | RETURN_BACK | 151 | RETURN_BACK |
@@ -183,6 +190,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) | |||
183 | jal prepare_ftrace_return | 190 | jal prepare_ftrace_return |
184 | nop | 191 | nop |
185 | MCOUNT_RESTORE_REGS | 192 | MCOUNT_RESTORE_REGS |
193 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
194 | #ifdef CONFIG_32BIT | ||
195 | addiu sp, sp, 8 | ||
196 | #endif | ||
197 | #endif | ||
186 | RETURN_BACK | 198 | RETURN_BACK |
187 | END(ftrace_graph_caller) | 199 | END(ftrace_graph_caller) |
188 | 200 | ||
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index bf0fc6b16ad9..7a4727795a70 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -650,9 +650,9 @@ static inline int cop1_64bit(struct pt_regs *xcp) | |||
650 | #define SIFROMREG(si, x) \ | 650 | #define SIFROMREG(si, x) \ |
651 | do { \ | 651 | do { \ |
652 | if (cop1_64bit(xcp)) \ | 652 | if (cop1_64bit(xcp)) \ |
653 | (si) = get_fpr32(&ctx->fpr[x], 0); \ | 653 | (si) = (int)get_fpr32(&ctx->fpr[x], 0); \ |
654 | else \ | 654 | else \ |
655 | (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ | 655 | (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ |
656 | } while (0) | 656 | } while (0) |
657 | 657 | ||
658 | #define SITOREG(si, x) \ | 658 | #define SITOREG(si, x) \ |
@@ -667,7 +667,7 @@ do { \ | |||
667 | } \ | 667 | } \ |
668 | } while (0) | 668 | } while (0) |
669 | 669 | ||
670 | #define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1)) | 670 | #define SIFROMHREG(si, x) ((si) = (int)get_fpr32(&ctx->fpr[x], 1)) |
671 | 671 | ||
672 | #define SITOHREG(si, x) \ | 672 | #define SITOHREG(si, x) \ |
673 | do { \ | 673 | do { \ |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 571aab064936..f42e35e42790 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -53,6 +53,7 @@ | |||
53 | */ | 53 | */ |
54 | unsigned long empty_zero_page, zero_page_mask; | 54 | unsigned long empty_zero_page, zero_page_mask; |
55 | EXPORT_SYMBOL_GPL(empty_zero_page); | 55 | EXPORT_SYMBOL_GPL(empty_zero_page); |
56 | EXPORT_SYMBOL(zero_page_mask); | ||
56 | 57 | ||
57 | /* | 58 | /* |
58 | * Not static inline because used by IP27 special magic initialization code | 59 | * Not static inline because used by IP27 special magic initialization code |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 7187664034c3..5db8882f732c 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -48,7 +48,12 @@ cflags-y := -pipe | |||
48 | 48 | ||
49 | # These flags should be implied by an hppa-linux configuration, but they | 49 | # These flags should be implied by an hppa-linux configuration, but they |
50 | # are not in gcc 3.2. | 50 | # are not in gcc 3.2. |
51 | cflags-y += -mno-space-regs -mfast-indirect-calls | 51 | cflags-y += -mno-space-regs |
52 | |||
53 | # -mfast-indirect-calls is only relevant for 32-bit kernels. | ||
54 | ifndef CONFIG_64BIT | ||
55 | cflags-y += -mfast-indirect-calls | ||
56 | endif | ||
52 | 57 | ||
53 | # Currently we save and restore fpregs on all kernel entry/interruption paths. | 58 | # Currently we save and restore fpregs on all kernel entry/interruption paths. |
54 | # If that gets optimized, we might need to disable the use of fpregs in the | 59 | # If that gets optimized, we might need to disable the use of fpregs in the |
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig index 90025322b75e..0490199d7b15 100644 --- a/arch/parisc/configs/a500_defconfig +++ b/arch/parisc/configs/a500_defconfig | |||
@@ -31,6 +31,7 @@ CONFIG_PD6729=m | |||
31 | CONFIG_I82092=m | 31 | CONFIG_I82092=m |
32 | # CONFIG_SUPERIO is not set | 32 | # CONFIG_SUPERIO is not set |
33 | # CONFIG_CHASSIS_LCD_LED is not set | 33 | # CONFIG_CHASSIS_LCD_LED is not set |
34 | CONFIG_NET=y | ||
34 | CONFIG_PACKET=y | 35 | CONFIG_PACKET=y |
35 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
36 | CONFIG_XFRM_USER=m | 37 | CONFIG_XFRM_USER=m |
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig index 8249ac9d9cfc..269c23d23fcb 100644 --- a/arch/parisc/configs/c8000_defconfig +++ b/arch/parisc/configs/c8000_defconfig | |||
@@ -33,6 +33,7 @@ CONFIG_PCI_LBA=y | |||
33 | # CONFIG_PDC_CHASSIS_WARN is not set | 33 | # CONFIG_PDC_CHASSIS_WARN is not set |
34 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 34 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
35 | CONFIG_BINFMT_MISC=m | 35 | CONFIG_BINFMT_MISC=m |
36 | CONFIG_NET=y | ||
36 | CONFIG_PACKET=y | 37 | CONFIG_PACKET=y |
37 | CONFIG_UNIX=y | 38 | CONFIG_UNIX=y |
38 | CONFIG_XFRM_USER=m | 39 | CONFIG_XFRM_USER=m |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 3bab72462ab5..92438c21d453 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/user.h> | 17 | #include <linux/user.h> |
18 | #include <linux/personality.h> | 18 | #include <linux/personality.h> |
19 | #include <linux/security.h> | 19 | #include <linux/security.h> |
20 | #include <linux/seccomp.h> | ||
20 | #include <linux/compat.h> | 21 | #include <linux/compat.h> |
21 | #include <linux/signal.h> | 22 | #include <linux/signal.h> |
22 | #include <linux/audit.h> | 23 | #include <linux/audit.h> |
@@ -271,10 +272,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
271 | long ret = 0; | 272 | long ret = 0; |
272 | 273 | ||
273 | /* Do the secure computing check first. */ | 274 | /* Do the secure computing check first. */ |
274 | if (secure_computing(regs->gr[20])) { | 275 | secure_computing_strict(regs->gr[20]); |
275 | /* seccomp failures shouldn't expose any additional code. */ | ||
276 | return -1; | ||
277 | } | ||
278 | 276 | ||
279 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 277 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
280 | tracehook_report_syscall_entry(regs)) | 278 | tracehook_report_syscall_entry(regs)) |
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 5e2aa43562b5..59734916986a 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig | |||
@@ -29,6 +29,7 @@ CONFIG_PM=y | |||
29 | CONFIG_PCI_MSI=y | 29 | CONFIG_PCI_MSI=y |
30 | CONFIG_HOTPLUG_PCI=y | 30 | CONFIG_HOTPLUG_PCI=y |
31 | CONFIG_HOTPLUG_PCI_SHPC=m | 31 | CONFIG_HOTPLUG_PCI_SHPC=m |
32 | CONFIG_NET=y | ||
32 | CONFIG_PACKET=y | 33 | CONFIG_PACKET=y |
33 | CONFIG_UNIX=y | 34 | CONFIG_UNIX=y |
34 | CONFIG_XFRM_USER=y | 35 | CONFIG_XFRM_USER=y |
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 553e66278010..0351b5ffdfef 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig | |||
@@ -31,6 +31,7 @@ CONFIG_HIBERNATION=y | |||
31 | CONFIG_APM_EMULATION=y | 31 | CONFIG_APM_EMULATION=y |
32 | CONFIG_PCCARD=m | 32 | CONFIG_PCCARD=m |
33 | CONFIG_YENTA=m | 33 | CONFIG_YENTA=m |
34 | CONFIG_NET=y | ||
34 | CONFIG_PACKET=y | 35 | CONFIG_PACKET=y |
35 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
36 | CONFIG_XFRM_USER=y | 37 | CONFIG_XFRM_USER=y |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f6c02f8cdc62..36518870e6b2 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -58,6 +58,7 @@ CONFIG_ELECTRA_CF=y | |||
58 | CONFIG_HOTPLUG_PCI=y | 58 | CONFIG_HOTPLUG_PCI=y |
59 | CONFIG_HOTPLUG_PCI_RPA=m | 59 | CONFIG_HOTPLUG_PCI_RPA=m |
60 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 60 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
61 | CONFIG_NET=y | ||
61 | CONFIG_PACKET=y | 62 | CONFIG_PACKET=y |
62 | CONFIG_UNIX=y | 63 | CONFIG_UNIX=y |
63 | CONFIG_XFRM_USER=m | 64 | CONFIG_XFRM_USER=m |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 587f5514f9b1..c3a3269b0865 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
@@ -33,6 +33,7 @@ CONFIG_SPARSEMEM_MANUAL=y | |||
33 | CONFIG_PCI_MSI=y | 33 | CONFIG_PCI_MSI=y |
34 | CONFIG_PCCARD=y | 34 | CONFIG_PCCARD=y |
35 | CONFIG_HOTPLUG_PCI=y | 35 | CONFIG_HOTPLUG_PCI=y |
36 | CONFIG_NET=y | ||
36 | CONFIG_PACKET=y | 37 | CONFIG_PACKET=y |
37 | CONFIG_UNIX=y | 38 | CONFIG_UNIX=y |
38 | CONFIG_XFRM_USER=m | 39 | CONFIG_XFRM_USER=m |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 50375f1f59e7..dd2a9cab4b50 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -53,6 +53,7 @@ CONFIG_SCHED_SMT=y | |||
53 | CONFIG_HOTPLUG_PCI=y | 53 | CONFIG_HOTPLUG_PCI=y |
54 | CONFIG_HOTPLUG_PCI_RPA=m | 54 | CONFIG_HOTPLUG_PCI_RPA=m |
55 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 55 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
56 | CONFIG_NET=y | ||
56 | CONFIG_PACKET=y | 57 | CONFIG_PACKET=y |
57 | CONFIG_UNIX=y | 58 | CONFIG_UNIX=y |
58 | CONFIG_XFRM_USER=m | 59 | CONFIG_XFRM_USER=m |
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 4428ee428f4e..63392f4b29a4 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig | |||
@@ -55,6 +55,7 @@ CONFIG_SCHED_SMT=y | |||
55 | CONFIG_HOTPLUG_PCI=y | 55 | CONFIG_HOTPLUG_PCI=y |
56 | CONFIG_HOTPLUG_PCI_RPA=m | 56 | CONFIG_HOTPLUG_PCI_RPA=m |
57 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 57 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
58 | CONFIG_NET=y | ||
58 | CONFIG_PACKET=y | 59 | CONFIG_PACKET=y |
59 | CONFIG_UNIX=y | 60 | CONFIG_UNIX=y |
60 | CONFIG_XFRM_USER=m | 61 | CONFIG_XFRM_USER=m |
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 3ca1894ade09..9d94fdd9f525 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
@@ -63,6 +63,7 @@ CONFIG_CRASH_DUMP=y | |||
63 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 63 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
64 | CONFIG_BINFMT_MISC=m | 64 | CONFIG_BINFMT_MISC=m |
65 | CONFIG_HIBERNATION=y | 65 | CONFIG_HIBERNATION=y |
66 | CONFIG_NET=y | ||
66 | CONFIG_PACKET=y | 67 | CONFIG_PACKET=y |
67 | CONFIG_PACKET_DIAG=m | 68 | CONFIG_PACKET_DIAG=m |
68 | CONFIG_UNIX=y | 69 | CONFIG_UNIX=y |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 4830aa6e6f53..90f514baa37d 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
@@ -61,6 +61,7 @@ CONFIG_CRASH_DUMP=y | |||
61 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 61 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
62 | CONFIG_BINFMT_MISC=m | 62 | CONFIG_BINFMT_MISC=m |
63 | CONFIG_HIBERNATION=y | 63 | CONFIG_HIBERNATION=y |
64 | CONFIG_NET=y | ||
64 | CONFIG_PACKET=y | 65 | CONFIG_PACKET=y |
65 | CONFIG_PACKET_DIAG=m | 66 | CONFIG_PACKET_DIAG=m |
66 | CONFIG_UNIX=y | 67 | CONFIG_UNIX=y |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 61db449bf309..13559d32af69 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -59,6 +59,7 @@ CONFIG_CRASH_DUMP=y | |||
59 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 59 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
60 | CONFIG_BINFMT_MISC=m | 60 | CONFIG_BINFMT_MISC=m |
61 | CONFIG_HIBERNATION=y | 61 | CONFIG_HIBERNATION=y |
62 | CONFIG_NET=y | ||
62 | CONFIG_PACKET=y | 63 | CONFIG_PACKET=y |
63 | CONFIG_PACKET_DIAG=m | 64 | CONFIG_PACKET_DIAG=m |
64 | CONFIG_UNIX=y | 65 | CONFIG_UNIX=y |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 948e0e057a23..e376789f2d8d 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
@@ -23,6 +23,7 @@ CONFIG_CRASH_DUMP=y | |||
23 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 23 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
24 | # CONFIG_SECCOMP is not set | 24 | # CONFIG_SECCOMP is not set |
25 | # CONFIG_IUCV is not set | 25 | # CONFIG_IUCV is not set |
26 | CONFIG_NET=y | ||
26 | CONFIG_ATM=y | 27 | CONFIG_ATM=y |
27 | CONFIG_ATM_LANE=y | 28 | CONFIG_ATM_LANE=y |
28 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 29 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 2e56498a40df..fab35a8efa4f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -50,6 +50,7 @@ CONFIG_CMA=y | |||
50 | CONFIG_CRASH_DUMP=y | 50 | CONFIG_CRASH_DUMP=y |
51 | CONFIG_BINFMT_MISC=m | 51 | CONFIG_BINFMT_MISC=m |
52 | CONFIG_HIBERNATION=y | 52 | CONFIG_HIBERNATION=y |
53 | CONFIG_NET=y | ||
53 | CONFIG_PACKET=y | 54 | CONFIG_PACKET=y |
54 | CONFIG_UNIX=y | 55 | CONFIG_UNIX=y |
55 | CONFIG_NET_KEY=y | 56 | CONFIG_NET_KEY=y |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0c1073ed1e84..c7235e01fd67 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | |||
43 | 43 | ||
44 | unsigned long empty_zero_page, zero_page_mask; | 44 | unsigned long empty_zero_page, zero_page_mask; |
45 | EXPORT_SYMBOL(empty_zero_page); | 45 | EXPORT_SYMBOL(empty_zero_page); |
46 | EXPORT_SYMBOL(zero_page_mask); | ||
46 | 47 | ||
47 | static void __init setup_zero_pages(void) | 48 | static void __init setup_zero_pages(void) |
48 | { | 49 | { |
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig index 6a96b9a2f7a5..bbd4c2298708 100644 --- a/arch/sh/configs/sdk7780_defconfig +++ b/arch/sh/configs/sdk7780_defconfig | |||
@@ -30,6 +30,7 @@ CONFIG_PCI_DEBUG=y | |||
30 | CONFIG_PCCARD=y | 30 | CONFIG_PCCARD=y |
31 | CONFIG_YENTA=y | 31 | CONFIG_YENTA=y |
32 | CONFIG_HOTPLUG_PCI=y | 32 | CONFIG_HOTPLUG_PCI=y |
33 | CONFIG_NET=y | ||
33 | CONFIG_PACKET=y | 34 | CONFIG_PACKET=y |
34 | CONFIG_UNIX=y | 35 | CONFIG_UNIX=y |
35 | CONFIG_INET=y | 36 | CONFIG_INET=y |
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index e741b1e36acd..df25ae774ee0 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig | |||
@@ -25,6 +25,7 @@ CONFIG_CMDLINE_OVERWRITE=y | |||
25 | CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1" | 25 | CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1" |
26 | CONFIG_PCCARD=y | 26 | CONFIG_PCCARD=y |
27 | CONFIG_BINFMT_MISC=y | 27 | CONFIG_BINFMT_MISC=y |
28 | CONFIG_NET=y | ||
28 | CONFIG_PACKET=y | 29 | CONFIG_PACKET=y |
29 | CONFIG_UNIX=y | 30 | CONFIG_UNIX=y |
30 | CONFIG_XFRM_USER=y | 31 | CONFIG_XFRM_USER=y |
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 9d8521b8c854..6b68f12f29db 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig | |||
@@ -29,6 +29,7 @@ CONFIG_PCI=y | |||
29 | CONFIG_PCI_MSI=y | 29 | CONFIG_PCI_MSI=y |
30 | CONFIG_SUN_OPENPROMFS=m | 30 | CONFIG_SUN_OPENPROMFS=m |
31 | CONFIG_BINFMT_MISC=m | 31 | CONFIG_BINFMT_MISC=m |
32 | CONFIG_NET=y | ||
32 | CONFIG_PACKET=y | 33 | CONFIG_PACKET=y |
33 | CONFIG_UNIX=y | 34 | CONFIG_UNIX=y |
34 | CONFIG_XFRM_USER=m | 35 | CONFIG_XFRM_USER=m |
diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S index 9d016c7017f7..8c83f4b8eb15 100644 --- a/arch/sparc/net/bpf_jit_asm.S +++ b/arch/sparc/net/bpf_jit_asm.S | |||
@@ -6,10 +6,12 @@ | |||
6 | #define SAVE_SZ 176 | 6 | #define SAVE_SZ 176 |
7 | #define SCRATCH_OFF STACK_BIAS + 128 | 7 | #define SCRATCH_OFF STACK_BIAS + 128 |
8 | #define BE_PTR(label) be,pn %xcc, label | 8 | #define BE_PTR(label) be,pn %xcc, label |
9 | #define SIGN_EXTEND(reg) sra reg, 0, reg | ||
9 | #else | 10 | #else |
10 | #define SAVE_SZ 96 | 11 | #define SAVE_SZ 96 |
11 | #define SCRATCH_OFF 72 | 12 | #define SCRATCH_OFF 72 |
12 | #define BE_PTR(label) be label | 13 | #define BE_PTR(label) be label |
14 | #define SIGN_EXTEND(reg) | ||
13 | #endif | 15 | #endif |
14 | 16 | ||
15 | #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ | 17 | #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ |
@@ -135,6 +137,7 @@ bpf_slow_path_byte_msh: | |||
135 | save %sp, -SAVE_SZ, %sp; \ | 137 | save %sp, -SAVE_SZ, %sp; \ |
136 | mov %i0, %o0; \ | 138 | mov %i0, %o0; \ |
137 | mov r_OFF, %o1; \ | 139 | mov r_OFF, %o1; \ |
140 | SIGN_EXTEND(%o1); \ | ||
138 | call bpf_internal_load_pointer_neg_helper; \ | 141 | call bpf_internal_load_pointer_neg_helper; \ |
139 | mov (LEN), %o2; \ | 142 | mov (LEN), %o2; \ |
140 | mov %o0, r_TMP; \ | 143 | mov %o0, r_TMP; \ |
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 1f76c22a6a75..ece4af0575e9 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c | |||
@@ -184,7 +184,7 @@ do { \ | |||
184 | */ | 184 | */ |
185 | #define emit_alu_K(OPCODE, K) \ | 185 | #define emit_alu_K(OPCODE, K) \ |
186 | do { \ | 186 | do { \ |
187 | if (K) { \ | 187 | if (K || OPCODE == AND || OPCODE == MUL) { \ |
188 | unsigned int _insn = OPCODE; \ | 188 | unsigned int _insn = OPCODE; \ |
189 | _insn |= RS1(r_A) | RD(r_A); \ | 189 | _insn |= RS1(r_A) | RD(r_A); \ |
190 | if (is_simm13(K)) { \ | 190 | if (is_simm13(K)) { \ |
@@ -234,12 +234,18 @@ do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ | |||
234 | __emit_load8(BASE, STRUCT, FIELD, DEST); \ | 234 | __emit_load8(BASE, STRUCT, FIELD, DEST); \ |
235 | } while (0) | 235 | } while (0) |
236 | 236 | ||
237 | #define emit_ldmem(OFF, DEST) \ | 237 | #ifdef CONFIG_SPARC64 |
238 | do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \ | 238 | #define BIAS (STACK_BIAS - 4) |
239 | #else | ||
240 | #define BIAS (-4) | ||
241 | #endif | ||
242 | |||
243 | #define emit_ldmem(OFF, DEST) \ | ||
244 | do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ | ||
239 | } while (0) | 245 | } while (0) |
240 | 246 | ||
241 | #define emit_stmem(OFF, SRC) \ | 247 | #define emit_stmem(OFF, SRC) \ |
242 | do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \ | 248 | do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ |
243 | } while (0) | 249 | } while (0) |
244 | 250 | ||
245 | #ifdef CONFIG_SMP | 251 | #ifdef CONFIG_SMP |
@@ -615,10 +621,11 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
615 | case BPF_ANC | SKF_AD_VLAN_TAG: | 621 | case BPF_ANC | SKF_AD_VLAN_TAG: |
616 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: | 622 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: |
617 | emit_skb_load16(vlan_tci, r_A); | 623 | emit_skb_load16(vlan_tci, r_A); |
618 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { | 624 | if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) { |
619 | emit_andi(r_A, VLAN_VID_MASK, r_A); | 625 | emit_alu_K(SRL, 12); |
626 | emit_andi(r_A, 1, r_A); | ||
620 | } else { | 627 | } else { |
621 | emit_loadimm(VLAN_TAG_PRESENT, r_TMP); | 628 | emit_loadimm(~VLAN_TAG_PRESENT, r_TMP); |
622 | emit_and(r_A, r_TMP, r_A); | 629 | emit_and(r_A, r_TMP, r_A); |
623 | } | 630 | } |
624 | break; | 631 | break; |
@@ -630,15 +637,19 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
630 | emit_loadimm(K, r_X); | 637 | emit_loadimm(K, r_X); |
631 | break; | 638 | break; |
632 | case BPF_LD | BPF_MEM: | 639 | case BPF_LD | BPF_MEM: |
640 | seen |= SEEN_MEM; | ||
633 | emit_ldmem(K * 4, r_A); | 641 | emit_ldmem(K * 4, r_A); |
634 | break; | 642 | break; |
635 | case BPF_LDX | BPF_MEM: | 643 | case BPF_LDX | BPF_MEM: |
644 | seen |= SEEN_MEM | SEEN_XREG; | ||
636 | emit_ldmem(K * 4, r_X); | 645 | emit_ldmem(K * 4, r_X); |
637 | break; | 646 | break; |
638 | case BPF_ST: | 647 | case BPF_ST: |
648 | seen |= SEEN_MEM; | ||
639 | emit_stmem(K * 4, r_A); | 649 | emit_stmem(K * 4, r_A); |
640 | break; | 650 | break; |
641 | case BPF_STX: | 651 | case BPF_STX: |
652 | seen |= SEEN_MEM | SEEN_XREG; | ||
642 | emit_stmem(K * 4, r_X); | 653 | emit_stmem(K * 4, r_X); |
643 | break; | 654 | break; |
644 | 655 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 7a801a310e37..0fcd9133790c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -33,8 +33,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | |||
33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
34 | 34 | ||
35 | ifeq ($(CONFIG_EFI_STUB), y) | 35 | ifeq ($(CONFIG_EFI_STUB), y) |
36 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ | 36 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o |
37 | $(objtree)/drivers/firmware/efi/libstub/lib.a | ||
38 | endif | 37 | endif |
39 | 38 | ||
40 | $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE | 39 | $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE |
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index fc6091abedb7..d39189ba7f8e 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, | |||
183 | static bool mem_avoid_overlap(struct mem_vector *img) | 183 | static bool mem_avoid_overlap(struct mem_vector *img) |
184 | { | 184 | { |
185 | int i; | 185 | int i; |
186 | struct setup_data *ptr; | ||
186 | 187 | ||
187 | for (i = 0; i < MEM_AVOID_MAX; i++) { | 188 | for (i = 0; i < MEM_AVOID_MAX; i++) { |
188 | if (mem_overlaps(img, &mem_avoid[i])) | 189 | if (mem_overlaps(img, &mem_avoid[i])) |
189 | return true; | 190 | return true; |
190 | } | 191 | } |
191 | 192 | ||
193 | /* Avoid all entries in the setup_data linked list. */ | ||
194 | ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data; | ||
195 | while (ptr) { | ||
196 | struct mem_vector avoid; | ||
197 | |||
198 | avoid.start = (u64)ptr; | ||
199 | avoid.size = sizeof(*ptr) + ptr->len; | ||
200 | |||
201 | if (mem_overlaps(img, &avoid)) | ||
202 | return true; | ||
203 | |||
204 | ptr = (struct setup_data *)(unsigned long)ptr->next; | ||
205 | } | ||
206 | |||
192 | return false; | 207 | return false; |
193 | } | 208 | } |
194 | 209 | ||
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index dca9842d8f91..de8eebd6f67c 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -19,7 +19,10 @@ | |||
19 | 19 | ||
20 | static efi_system_table_t *sys_table; | 20 | static efi_system_table_t *sys_table; |
21 | 21 | ||
22 | struct efi_config *efi_early; | 22 | static struct efi_config *efi_early; |
23 | |||
24 | #define efi_call_early(f, ...) \ | ||
25 | efi_early->call(efi_early->f, __VA_ARGS__); | ||
23 | 26 | ||
24 | #define BOOT_SERVICES(bits) \ | 27 | #define BOOT_SERVICES(bits) \ |
25 | static void setup_boot_services##bits(struct efi_config *c) \ | 28 | static void setup_boot_services##bits(struct efi_config *c) \ |
@@ -265,21 +268,25 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) | |||
265 | 268 | ||
266 | offset = offsetof(typeof(*out), output_string); | 269 | offset = offsetof(typeof(*out), output_string); |
267 | output_string = efi_early->text_output + offset; | 270 | output_string = efi_early->text_output + offset; |
271 | out = (typeof(out))(unsigned long)efi_early->text_output; | ||
268 | func = (u64 *)output_string; | 272 | func = (u64 *)output_string; |
269 | 273 | ||
270 | efi_early->call(*func, efi_early->text_output, str); | 274 | efi_early->call(*func, out, str); |
271 | } else { | 275 | } else { |
272 | struct efi_simple_text_output_protocol_32 *out; | 276 | struct efi_simple_text_output_protocol_32 *out; |
273 | u32 *func; | 277 | u32 *func; |
274 | 278 | ||
275 | offset = offsetof(typeof(*out), output_string); | 279 | offset = offsetof(typeof(*out), output_string); |
276 | output_string = efi_early->text_output + offset; | 280 | output_string = efi_early->text_output + offset; |
281 | out = (typeof(out))(unsigned long)efi_early->text_output; | ||
277 | func = (u32 *)output_string; | 282 | func = (u32 *)output_string; |
278 | 283 | ||
279 | efi_early->call(*func, efi_early->text_output, str); | 284 | efi_early->call(*func, out, str); |
280 | } | 285 | } |
281 | } | 286 | } |
282 | 287 | ||
288 | #include "../../../../drivers/firmware/efi/libstub/efi-stub-helper.c" | ||
289 | |||
283 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) | 290 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) |
284 | { | 291 | { |
285 | u8 first, len; | 292 | u8 first, len; |
@@ -360,7 +367,7 @@ free_struct: | |||
360 | return status; | 367 | return status; |
361 | } | 368 | } |
362 | 369 | ||
363 | static efi_status_t | 370 | static void |
364 | setup_efi_pci32(struct boot_params *params, void **pci_handle, | 371 | setup_efi_pci32(struct boot_params *params, void **pci_handle, |
365 | unsigned long size) | 372 | unsigned long size) |
366 | { | 373 | { |
@@ -403,8 +410,6 @@ setup_efi_pci32(struct boot_params *params, void **pci_handle, | |||
403 | data = (struct setup_data *)rom; | 410 | data = (struct setup_data *)rom; |
404 | 411 | ||
405 | } | 412 | } |
406 | |||
407 | return status; | ||
408 | } | 413 | } |
409 | 414 | ||
410 | static efi_status_t | 415 | static efi_status_t |
@@ -463,7 +468,7 @@ free_struct: | |||
463 | 468 | ||
464 | } | 469 | } |
465 | 470 | ||
466 | static efi_status_t | 471 | static void |
467 | setup_efi_pci64(struct boot_params *params, void **pci_handle, | 472 | setup_efi_pci64(struct boot_params *params, void **pci_handle, |
468 | unsigned long size) | 473 | unsigned long size) |
469 | { | 474 | { |
@@ -506,11 +511,18 @@ setup_efi_pci64(struct boot_params *params, void **pci_handle, | |||
506 | data = (struct setup_data *)rom; | 511 | data = (struct setup_data *)rom; |
507 | 512 | ||
508 | } | 513 | } |
509 | |||
510 | return status; | ||
511 | } | 514 | } |
512 | 515 | ||
513 | static efi_status_t setup_efi_pci(struct boot_params *params) | 516 | /* |
517 | * There's no way to return an informative status from this function, | ||
518 | * because any analysis (and printing of error messages) needs to be | ||
519 | * done directly at the EFI function call-site. | ||
520 | * | ||
521 | * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we | ||
522 | * just didn't find any PCI devices, but there's no way to tell outside | ||
523 | * the context of the call. | ||
524 | */ | ||
525 | static void setup_efi_pci(struct boot_params *params) | ||
514 | { | 526 | { |
515 | efi_status_t status; | 527 | efi_status_t status; |
516 | void **pci_handle = NULL; | 528 | void **pci_handle = NULL; |
@@ -527,7 +539,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
527 | size, (void **)&pci_handle); | 539 | size, (void **)&pci_handle); |
528 | 540 | ||
529 | if (status != EFI_SUCCESS) | 541 | if (status != EFI_SUCCESS) |
530 | return status; | 542 | return; |
531 | 543 | ||
532 | status = efi_call_early(locate_handle, | 544 | status = efi_call_early(locate_handle, |
533 | EFI_LOCATE_BY_PROTOCOL, &pci_proto, | 545 | EFI_LOCATE_BY_PROTOCOL, &pci_proto, |
@@ -538,13 +550,12 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
538 | goto free_handle; | 550 | goto free_handle; |
539 | 551 | ||
540 | if (efi_early->is64) | 552 | if (efi_early->is64) |
541 | status = setup_efi_pci64(params, pci_handle, size); | 553 | setup_efi_pci64(params, pci_handle, size); |
542 | else | 554 | else |
543 | status = setup_efi_pci32(params, pci_handle, size); | 555 | setup_efi_pci32(params, pci_handle, size); |
544 | 556 | ||
545 | free_handle: | 557 | free_handle: |
546 | efi_call_early(free_pool, pci_handle); | 558 | efi_call_early(free_pool, pci_handle); |
547 | return status; | ||
548 | } | 559 | } |
549 | 560 | ||
550 | static void | 561 | static void |
@@ -1380,10 +1391,7 @@ struct boot_params *efi_main(struct efi_config *c, | |||
1380 | 1391 | ||
1381 | setup_graphics(boot_params); | 1392 | setup_graphics(boot_params); |
1382 | 1393 | ||
1383 | status = setup_efi_pci(boot_params); | 1394 | setup_efi_pci(boot_params); |
1384 | if (status != EFI_SUCCESS) { | ||
1385 | efi_printk(sys_table, "setup_efi_pci() failed!\n"); | ||
1386 | } | ||
1387 | 1395 | ||
1388 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, | 1396 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, |
1389 | sizeof(*gdt), (void **)&gdt); | 1397 | sizeof(*gdt), (void **)&gdt); |
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h index d487e727f1ec..c88c31ecad12 100644 --- a/arch/x86/boot/compressed/eboot.h +++ b/arch/x86/boot/compressed/eboot.h | |||
@@ -103,4 +103,20 @@ struct efi_uga_draw_protocol { | |||
103 | void *blt; | 103 | void *blt; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct efi_config { | ||
107 | u64 image_handle; | ||
108 | u64 table; | ||
109 | u64 allocate_pool; | ||
110 | u64 allocate_pages; | ||
111 | u64 get_memory_map; | ||
112 | u64 free_pool; | ||
113 | u64 free_pages; | ||
114 | u64 locate_handle; | ||
115 | u64 handle_protocol; | ||
116 | u64 exit_boot_services; | ||
117 | u64 text_output; | ||
118 | efi_status_t (*call)(unsigned long, ...); | ||
119 | bool is64; | ||
120 | } __packed; | ||
121 | |||
106 | #endif /* BOOT_COMPRESSED_EBOOT_H */ | 122 | #endif /* BOOT_COMPRESSED_EBOOT_H */ |
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index d6b8aa4c986c..cbed1407a5cd 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -30,33 +30,6 @@ | |||
30 | #include <asm/boot.h> | 30 | #include <asm/boot.h> |
31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
32 | 32 | ||
33 | /* | ||
34 | * Adjust our own GOT | ||
35 | * | ||
36 | * The relocation base must be in %ebx | ||
37 | * | ||
38 | * It is safe to call this macro more than once, because in some of the | ||
39 | * code paths multiple invocations are inevitable, e.g. via the efi* | ||
40 | * entry points. | ||
41 | * | ||
42 | * Relocation is only performed the first time. | ||
43 | */ | ||
44 | .macro FIXUP_GOT | ||
45 | cmpb $1, got_fixed(%ebx) | ||
46 | je 2f | ||
47 | |||
48 | leal _got(%ebx), %edx | ||
49 | leal _egot(%ebx), %ecx | ||
50 | 1: | ||
51 | cmpl %ecx, %edx | ||
52 | jae 2f | ||
53 | addl %ebx, (%edx) | ||
54 | addl $4, %edx | ||
55 | jmp 1b | ||
56 | 2: | ||
57 | movb $1, got_fixed(%ebx) | ||
58 | .endm | ||
59 | |||
60 | __HEAD | 33 | __HEAD |
61 | ENTRY(startup_32) | 34 | ENTRY(startup_32) |
62 | #ifdef CONFIG_EFI_STUB | 35 | #ifdef CONFIG_EFI_STUB |
@@ -83,9 +56,6 @@ ENTRY(efi_pe_entry) | |||
83 | add %esi, 88(%eax) | 56 | add %esi, 88(%eax) |
84 | pushl %eax | 57 | pushl %eax |
85 | 58 | ||
86 | movl %esi, %ebx | ||
87 | FIXUP_GOT | ||
88 | |||
89 | call make_boot_params | 59 | call make_boot_params |
90 | cmpl $0, %eax | 60 | cmpl $0, %eax |
91 | je fail | 61 | je fail |
@@ -111,10 +81,6 @@ ENTRY(efi32_stub_entry) | |||
111 | leal efi32_config(%esi), %eax | 81 | leal efi32_config(%esi), %eax |
112 | add %esi, 88(%eax) | 82 | add %esi, 88(%eax) |
113 | pushl %eax | 83 | pushl %eax |
114 | |||
115 | movl %esi, %ebx | ||
116 | FIXUP_GOT | ||
117 | |||
118 | 2: | 84 | 2: |
119 | call efi_main | 85 | call efi_main |
120 | cmpl $0, %eax | 86 | cmpl $0, %eax |
@@ -224,7 +190,19 @@ relocated: | |||
224 | shrl $2, %ecx | 190 | shrl $2, %ecx |
225 | rep stosl | 191 | rep stosl |
226 | 192 | ||
227 | FIXUP_GOT | 193 | /* |
194 | * Adjust our own GOT | ||
195 | */ | ||
196 | leal _got(%ebx), %edx | ||
197 | leal _egot(%ebx), %ecx | ||
198 | 1: | ||
199 | cmpl %ecx, %edx | ||
200 | jae 2f | ||
201 | addl %ebx, (%edx) | ||
202 | addl $4, %edx | ||
203 | jmp 1b | ||
204 | 2: | ||
205 | |||
228 | /* | 206 | /* |
229 | * Do the decompression, and jump to the new kernel.. | 207 | * Do the decompression, and jump to the new kernel.. |
230 | */ | 208 | */ |
@@ -247,12 +225,8 @@ relocated: | |||
247 | xorl %ebx, %ebx | 225 | xorl %ebx, %ebx |
248 | jmp *%eax | 226 | jmp *%eax |
249 | 227 | ||
250 | .data | ||
251 | /* Have we relocated the GOT? */ | ||
252 | got_fixed: | ||
253 | .byte 0 | ||
254 | |||
255 | #ifdef CONFIG_EFI_STUB | 228 | #ifdef CONFIG_EFI_STUB |
229 | .data | ||
256 | efi32_config: | 230 | efi32_config: |
257 | .fill 11,8,0 | 231 | .fill 11,8,0 |
258 | .long efi_call_phys | 232 | .long efi_call_phys |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 50f69c7eaaf4..2884e0c3e8a5 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -32,33 +32,6 @@ | |||
32 | #include <asm/processor-flags.h> | 32 | #include <asm/processor-flags.h> |
33 | #include <asm/asm-offsets.h> | 33 | #include <asm/asm-offsets.h> |
34 | 34 | ||
35 | /* | ||
36 | * Adjust our own GOT | ||
37 | * | ||
38 | * The relocation base must be in %rbx | ||
39 | * | ||
40 | * It is safe to call this macro more than once, because in some of the | ||
41 | * code paths multiple invocations are inevitable, e.g. via the efi* | ||
42 | * entry points. | ||
43 | * | ||
44 | * Relocation is only performed the first time. | ||
45 | */ | ||
46 | .macro FIXUP_GOT | ||
47 | cmpb $1, got_fixed(%rip) | ||
48 | je 2f | ||
49 | |||
50 | leaq _got(%rip), %rdx | ||
51 | leaq _egot(%rip), %rcx | ||
52 | 1: | ||
53 | cmpq %rcx, %rdx | ||
54 | jae 2f | ||
55 | addq %rbx, (%rdx) | ||
56 | addq $8, %rdx | ||
57 | jmp 1b | ||
58 | 2: | ||
59 | movb $1, got_fixed(%rip) | ||
60 | .endm | ||
61 | |||
62 | __HEAD | 35 | __HEAD |
63 | .code32 | 36 | .code32 |
64 | ENTRY(startup_32) | 37 | ENTRY(startup_32) |
@@ -279,13 +252,10 @@ ENTRY(efi_pe_entry) | |||
279 | subq $1b, %rbp | 252 | subq $1b, %rbp |
280 | 253 | ||
281 | /* | 254 | /* |
282 | * Relocate efi_config->call() and the GOT entries. | 255 | * Relocate efi_config->call(). |
283 | */ | 256 | */ |
284 | addq %rbp, efi64_config+88(%rip) | 257 | addq %rbp, efi64_config+88(%rip) |
285 | 258 | ||
286 | movq %rbp, %rbx | ||
287 | FIXUP_GOT | ||
288 | |||
289 | movq %rax, %rdi | 259 | movq %rax, %rdi |
290 | call make_boot_params | 260 | call make_boot_params |
291 | cmpq $0,%rax | 261 | cmpq $0,%rax |
@@ -301,13 +271,10 @@ handover_entry: | |||
301 | subq $1b, %rbp | 271 | subq $1b, %rbp |
302 | 272 | ||
303 | /* | 273 | /* |
304 | * Relocate efi_config->call() and the GOT entries. | 274 | * Relocate efi_config->call(). |
305 | */ | 275 | */ |
306 | movq efi_config(%rip), %rax | 276 | movq efi_config(%rip), %rax |
307 | addq %rbp, 88(%rax) | 277 | addq %rbp, 88(%rax) |
308 | |||
309 | movq %rbp, %rbx | ||
310 | FIXUP_GOT | ||
311 | 2: | 278 | 2: |
312 | movq efi_config(%rip), %rdi | 279 | movq efi_config(%rip), %rdi |
313 | call efi_main | 280 | call efi_main |
@@ -418,8 +385,19 @@ relocated: | |||
418 | shrq $3, %rcx | 385 | shrq $3, %rcx |
419 | rep stosq | 386 | rep stosq |
420 | 387 | ||
421 | FIXUP_GOT | 388 | /* |
422 | 389 | * Adjust our own GOT | |
390 | */ | ||
391 | leaq _got(%rip), %rdx | ||
392 | leaq _egot(%rip), %rcx | ||
393 | 1: | ||
394 | cmpq %rcx, %rdx | ||
395 | jae 2f | ||
396 | addq %rbx, (%rdx) | ||
397 | addq $8, %rdx | ||
398 | jmp 1b | ||
399 | 2: | ||
400 | |||
423 | /* | 401 | /* |
424 | * Do the decompression, and jump to the new kernel.. | 402 | * Do the decompression, and jump to the new kernel.. |
425 | */ | 403 | */ |
@@ -459,10 +437,6 @@ gdt: | |||
459 | .quad 0x0000000000000000 /* TS continued */ | 437 | .quad 0x0000000000000000 /* TS continued */ |
460 | gdt_end: | 438 | gdt_end: |
461 | 439 | ||
462 | /* Have we relocated the GOT? */ | ||
463 | got_fixed: | ||
464 | .byte 0 | ||
465 | |||
466 | #ifdef CONFIG_EFI_STUB | 440 | #ifdef CONFIG_EFI_STUB |
467 | efi_config: | 441 | efi_config: |
468 | .quad 0 | 442 | .quad 0 |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 888950f29fd9..a7ccd57f19e4 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -481,7 +481,7 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, | |||
481 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | 481 | crypto_inc(ctrblk, AES_BLOCK_SIZE); |
482 | } | 482 | } |
483 | 483 | ||
484 | #ifdef CONFIG_AS_AVX | 484 | #if 0 /* temporary disabled due to failing crypto tests */ |
485 | static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, | 485 | static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, |
486 | const u8 *in, unsigned int len, u8 *iv) | 486 | const u8 *in, unsigned int len, u8 *iv) |
487 | { | 487 | { |
@@ -1522,7 +1522,7 @@ static int __init aesni_init(void) | |||
1522 | aesni_gcm_dec_tfm = aesni_gcm_dec; | 1522 | aesni_gcm_dec_tfm = aesni_gcm_dec; |
1523 | } | 1523 | } |
1524 | aesni_ctr_enc_tfm = aesni_ctr_enc; | 1524 | aesni_ctr_enc_tfm = aesni_ctr_enc; |
1525 | #ifdef CONFIG_AS_AVX | 1525 | #if 0 /* temporary disabled due to failing crypto tests */ |
1526 | if (cpu_has_avx) { | 1526 | if (cpu_has_avx) { |
1527 | /* optimize performance of ctr mode encryption transform */ | 1527 | /* optimize performance of ctr mode encryption transform */ |
1528 | aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; | 1528 | aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 044a2fd3c5fe..0ec241ede5a2 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -159,30 +159,6 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( | |||
159 | } | 159 | } |
160 | #endif /* CONFIG_EFI_MIXED */ | 160 | #endif /* CONFIG_EFI_MIXED */ |
161 | 161 | ||
162 | |||
163 | /* arch specific definitions used by the stub code */ | ||
164 | |||
165 | struct efi_config { | ||
166 | u64 image_handle; | ||
167 | u64 table; | ||
168 | u64 allocate_pool; | ||
169 | u64 allocate_pages; | ||
170 | u64 get_memory_map; | ||
171 | u64 free_pool; | ||
172 | u64 free_pages; | ||
173 | u64 locate_handle; | ||
174 | u64 handle_protocol; | ||
175 | u64 exit_boot_services; | ||
176 | u64 text_output; | ||
177 | efi_status_t (*call)(unsigned long, ...); | ||
178 | bool is64; | ||
179 | } __packed; | ||
180 | |||
181 | extern struct efi_config *efi_early; | ||
182 | |||
183 | #define efi_call_early(f, ...) \ | ||
184 | efi_early->call(efi_early->f, __VA_ARGS__); | ||
185 | |||
186 | extern bool efi_reboot_required(void); | 162 | extern bool efi_reboot_required(void); |
187 | 163 | ||
188 | #else | 164 | #else |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b0910f97a3ea..ffb1733ac91f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -106,14 +106,14 @@ enum fixed_addresses { | |||
106 | __end_of_permanent_fixed_addresses, | 106 | __end_of_permanent_fixed_addresses, |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * 256 temporary boot-time mappings, used by early_ioremap(), | 109 | * 512 temporary boot-time mappings, used by early_ioremap(), |
110 | * before ioremap() is functional. | 110 | * before ioremap() is functional. |
111 | * | 111 | * |
112 | * If necessary we round it up to the next 256 pages boundary so | 112 | * If necessary we round it up to the next 512 pages boundary so |
113 | * that we can have a single pgd entry and a single pte table: | 113 | * that we can have a single pgd entry and a single pte table: |
114 | */ | 114 | */ |
115 | #define NR_FIX_BTMAPS 64 | 115 | #define NR_FIX_BTMAPS 64 |
116 | #define FIX_BTMAPS_SLOTS 4 | 116 | #define FIX_BTMAPS_SLOTS 8 |
117 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) | 117 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
118 | FIX_BTMAP_END = | 118 | FIX_BTMAP_END = |
119 | (__end_of_permanent_fixed_addresses ^ | 119 | (__end_of_permanent_fixed_addresses ^ |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 337ce5a9b15c..1183d545da1e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2623,6 +2623,7 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2623 | .irq_eoi = ack_apic_level, | 2623 | .irq_eoi = ack_apic_level, |
2624 | .irq_set_affinity = native_ioapic_set_affinity, | 2624 | .irq_set_affinity = native_ioapic_set_affinity, |
2625 | .irq_retrigger = ioapic_retrigger_irq, | 2625 | .irq_retrigger = ioapic_retrigger_irq, |
2626 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
2626 | }; | 2627 | }; |
2627 | 2628 | ||
2628 | static inline void init_IO_APIC_traps(void) | 2629 | static inline void init_IO_APIC_traps(void) |
@@ -3173,6 +3174,7 @@ static struct irq_chip msi_chip = { | |||
3173 | .irq_ack = ack_apic_edge, | 3174 | .irq_ack = ack_apic_edge, |
3174 | .irq_set_affinity = msi_set_affinity, | 3175 | .irq_set_affinity = msi_set_affinity, |
3175 | .irq_retrigger = ioapic_retrigger_irq, | 3176 | .irq_retrigger = ioapic_retrigger_irq, |
3177 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3176 | }; | 3178 | }; |
3177 | 3179 | ||
3178 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 3180 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
@@ -3271,6 +3273,7 @@ static struct irq_chip dmar_msi_type = { | |||
3271 | .irq_ack = ack_apic_edge, | 3273 | .irq_ack = ack_apic_edge, |
3272 | .irq_set_affinity = dmar_msi_set_affinity, | 3274 | .irq_set_affinity = dmar_msi_set_affinity, |
3273 | .irq_retrigger = ioapic_retrigger_irq, | 3275 | .irq_retrigger = ioapic_retrigger_irq, |
3276 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3274 | }; | 3277 | }; |
3275 | 3278 | ||
3276 | int arch_setup_dmar_msi(unsigned int irq) | 3279 | int arch_setup_dmar_msi(unsigned int irq) |
@@ -3321,6 +3324,7 @@ static struct irq_chip hpet_msi_type = { | |||
3321 | .irq_ack = ack_apic_edge, | 3324 | .irq_ack = ack_apic_edge, |
3322 | .irq_set_affinity = hpet_msi_set_affinity, | 3325 | .irq_set_affinity = hpet_msi_set_affinity, |
3323 | .irq_retrigger = ioapic_retrigger_irq, | 3326 | .irq_retrigger = ioapic_retrigger_irq, |
3327 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3324 | }; | 3328 | }; |
3325 | 3329 | ||
3326 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) | 3330 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) |
@@ -3384,6 +3388,7 @@ static struct irq_chip ht_irq_chip = { | |||
3384 | .irq_ack = ack_apic_edge, | 3388 | .irq_ack = ack_apic_edge, |
3385 | .irq_set_affinity = ht_set_affinity, | 3389 | .irq_set_affinity = ht_set_affinity, |
3386 | .irq_retrigger = ioapic_retrigger_irq, | 3390 | .irq_retrigger = ioapic_retrigger_irq, |
3391 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3387 | }; | 3392 | }; |
3388 | 3393 | ||
3389 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 3394 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2d872e08fab9..42a2dca984b3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1284,6 +1284,9 @@ static void remove_siblinginfo(int cpu) | |||
1284 | 1284 | ||
1285 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) | 1285 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) |
1286 | cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); | 1286 | cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); |
1287 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) | ||
1288 | cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); | ||
1289 | cpumask_clear(cpu_llc_shared_mask(cpu)); | ||
1287 | cpumask_clear(cpu_sibling_mask(cpu)); | 1290 | cpumask_clear(cpu_sibling_mask(cpu)); |
1288 | cpumask_clear(cpu_core_mask(cpu)); | 1291 | cpumask_clear(cpu_core_mask(cpu)); |
1289 | c->phys_proc_id = 0; | 1292 | c->phys_proc_id = 0; |
diff --git a/block/blk-exec.c b/block/blk-exec.c index f4d27b12c90b..9924725fa50d 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
56 | bool is_pm_resume; | 56 | bool is_pm_resume; |
57 | 57 | ||
58 | WARN_ON(irqs_disabled()); | 58 | WARN_ON(irqs_disabled()); |
59 | WARN_ON(rq->cmd_type == REQ_TYPE_FS); | ||
59 | 60 | ||
60 | rq->rq_disk = bd_disk; | 61 | rq->rq_disk = bd_disk; |
61 | rq->end_io = done; | 62 | rq->end_io = done; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 383ea0cb1f0a..df8e1e09dd17 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -119,7 +119,16 @@ void blk_mq_freeze_queue(struct request_queue *q) | |||
119 | spin_unlock_irq(q->queue_lock); | 119 | spin_unlock_irq(q->queue_lock); |
120 | 120 | ||
121 | if (freeze) { | 121 | if (freeze) { |
122 | percpu_ref_kill(&q->mq_usage_counter); | 122 | /* |
123 | * XXX: Temporary kludge to work around SCSI blk-mq stall. | ||
124 | * SCSI synchronously creates and destroys many queues | ||
125 | * back-to-back during probe leading to lengthy stalls. | ||
126 | * This will be fixed by keeping ->mq_usage_counter in | ||
127 | * atomic mode until genhd registration, but, for now, | ||
128 | * let's work around using expedited synchronization. | ||
129 | */ | ||
130 | __percpu_ref_kill_expedited(&q->mq_usage_counter); | ||
131 | |||
123 | blk_mq_run_queues(q, false); | 132 | blk_mq_run_queues(q, false); |
124 | } | 133 | } |
125 | wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); | 134 | wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); |
@@ -203,7 +212,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | |||
203 | if (tag != BLK_MQ_TAG_FAIL) { | 212 | if (tag != BLK_MQ_TAG_FAIL) { |
204 | rq = data->hctx->tags->rqs[tag]; | 213 | rq = data->hctx->tags->rqs[tag]; |
205 | 214 | ||
206 | rq->cmd_flags = 0; | ||
207 | if (blk_mq_tag_busy(data->hctx)) { | 215 | if (blk_mq_tag_busy(data->hctx)) { |
208 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 216 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
209 | atomic_inc(&data->hctx->nr_active); | 217 | atomic_inc(&data->hctx->nr_active); |
@@ -258,6 +266,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, | |||
258 | 266 | ||
259 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) | 267 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) |
260 | atomic_dec(&hctx->nr_active); | 268 | atomic_dec(&hctx->nr_active); |
269 | rq->cmd_flags = 0; | ||
261 | 270 | ||
262 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 271 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
263 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); | 272 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); |
@@ -393,6 +402,12 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
393 | blk_add_timer(rq); | 402 | blk_add_timer(rq); |
394 | 403 | ||
395 | /* | 404 | /* |
405 | * Ensure that ->deadline is visible before set the started | ||
406 | * flag and clear the completed flag. | ||
407 | */ | ||
408 | smp_mb__before_atomic(); | ||
409 | |||
410 | /* | ||
396 | * Mark us as started and clear complete. Complete might have been | 411 | * Mark us as started and clear complete. Complete might have been |
397 | * set if requeue raced with timeout, which then marked it as | 412 | * set if requeue raced with timeout, which then marked it as |
398 | * complete. So be sure to clear complete again when we start | 413 | * complete. So be sure to clear complete again when we start |
@@ -473,7 +488,11 @@ static void blk_mq_requeue_work(struct work_struct *work) | |||
473 | blk_mq_insert_request(rq, false, false, false); | 488 | blk_mq_insert_request(rq, false, false, false); |
474 | } | 489 | } |
475 | 490 | ||
476 | blk_mq_run_queues(q, false); | 491 | /* |
492 | * Use the start variant of queue running here, so that running | ||
493 | * the requeue work will kick stopped queues. | ||
494 | */ | ||
495 | blk_mq_start_hw_queues(q); | ||
477 | } | 496 | } |
478 | 497 | ||
479 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) | 498 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) |
@@ -957,14 +976,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | |||
957 | 976 | ||
958 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 977 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
959 | 978 | ||
960 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && | 979 | spin_lock(&ctx->lock); |
961 | !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { | 980 | __blk_mq_insert_request(hctx, rq, at_head); |
962 | blk_insert_flush(rq); | 981 | spin_unlock(&ctx->lock); |
963 | } else { | ||
964 | spin_lock(&ctx->lock); | ||
965 | __blk_mq_insert_request(hctx, rq, at_head); | ||
966 | spin_unlock(&ctx->lock); | ||
967 | } | ||
968 | 982 | ||
969 | if (run_queue) | 983 | if (run_queue) |
970 | blk_mq_run_hw_queue(hctx, async); | 984 | blk_mq_run_hw_queue(hctx, async); |
@@ -1404,6 +1418,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1404 | left -= to_do * rq_size; | 1418 | left -= to_do * rq_size; |
1405 | for (j = 0; j < to_do; j++) { | 1419 | for (j = 0; j < to_do; j++) { |
1406 | tags->rqs[i] = p; | 1420 | tags->rqs[i] = p; |
1421 | tags->rqs[i]->atomic_flags = 0; | ||
1422 | tags->rqs[i]->cmd_flags = 0; | ||
1407 | if (set->ops->init_request) { | 1423 | if (set->ops->init_request) { |
1408 | if (set->ops->init_request(set->driver_data, | 1424 | if (set->ops->init_request(set->driver_data, |
1409 | tags->rqs[i], hctx_idx, i, | 1425 | tags->rqs[i], hctx_idx, i, |
@@ -1956,7 +1972,6 @@ out_unwind: | |||
1956 | while (--i >= 0) | 1972 | while (--i >= 0) |
1957 | blk_mq_free_rq_map(set, set->tags[i], i); | 1973 | blk_mq_free_rq_map(set, set->tags[i], i); |
1958 | 1974 | ||
1959 | set->tags = NULL; | ||
1960 | return -ENOMEM; | 1975 | return -ENOMEM; |
1961 | } | 1976 | } |
1962 | 1977 | ||
diff --git a/block/genhd.c b/block/genhd.c index 09da5e4a8e03..e6723bd4d7a1 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) | |||
445 | */ | 445 | */ |
446 | void blk_free_devt(dev_t devt) | 446 | void blk_free_devt(dev_t devt) |
447 | { | 447 | { |
448 | might_sleep(); | ||
449 | |||
450 | if (devt == MKDEV(0, 0)) | 448 | if (devt == MKDEV(0, 0)) |
451 | return; | 449 | return; |
452 | 450 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index fddc1e86f9d0..93d160661f4c 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -54,55 +54,58 @@ ACPI_MODULE_NAME("acpi_lpss"); | |||
54 | 54 | ||
55 | #define LPSS_PRV_REG_COUNT 9 | 55 | #define LPSS_PRV_REG_COUNT 9 |
56 | 56 | ||
57 | struct lpss_shared_clock { | 57 | /* LPSS Flags */ |
58 | const char *name; | 58 | #define LPSS_CLK BIT(0) |
59 | unsigned long rate; | 59 | #define LPSS_CLK_GATE BIT(1) |
60 | struct clk *clk; | 60 | #define LPSS_CLK_DIVIDER BIT(2) |
61 | }; | 61 | #define LPSS_LTR BIT(3) |
62 | #define LPSS_SAVE_CTX BIT(4) | ||
62 | 63 | ||
63 | struct lpss_private_data; | 64 | struct lpss_private_data; |
64 | 65 | ||
65 | struct lpss_device_desc { | 66 | struct lpss_device_desc { |
66 | bool clk_required; | 67 | unsigned int flags; |
67 | const char *clkdev_name; | ||
68 | bool ltr_required; | ||
69 | unsigned int prv_offset; | 68 | unsigned int prv_offset; |
70 | size_t prv_size_override; | 69 | size_t prv_size_override; |
71 | bool clk_divider; | ||
72 | bool clk_gate; | ||
73 | bool save_ctx; | ||
74 | struct lpss_shared_clock *shared_clock; | ||
75 | void (*setup)(struct lpss_private_data *pdata); | 70 | void (*setup)(struct lpss_private_data *pdata); |
76 | }; | 71 | }; |
77 | 72 | ||
78 | static struct lpss_device_desc lpss_dma_desc = { | 73 | static struct lpss_device_desc lpss_dma_desc = { |
79 | .clk_required = true, | 74 | .flags = LPSS_CLK, |
80 | .clkdev_name = "hclk", | ||
81 | }; | 75 | }; |
82 | 76 | ||
83 | struct lpss_private_data { | 77 | struct lpss_private_data { |
84 | void __iomem *mmio_base; | 78 | void __iomem *mmio_base; |
85 | resource_size_t mmio_size; | 79 | resource_size_t mmio_size; |
80 | unsigned int fixed_clk_rate; | ||
86 | struct clk *clk; | 81 | struct clk *clk; |
87 | const struct lpss_device_desc *dev_desc; | 82 | const struct lpss_device_desc *dev_desc; |
88 | u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; | 83 | u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; |
89 | }; | 84 | }; |
90 | 85 | ||
86 | /* UART Component Parameter Register */ | ||
87 | #define LPSS_UART_CPR 0xF4 | ||
88 | #define LPSS_UART_CPR_AFCE BIT(4) | ||
89 | |||
91 | static void lpss_uart_setup(struct lpss_private_data *pdata) | 90 | static void lpss_uart_setup(struct lpss_private_data *pdata) |
92 | { | 91 | { |
93 | unsigned int offset; | 92 | unsigned int offset; |
94 | u32 reg; | 93 | u32 val; |
95 | 94 | ||
96 | offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; | 95 | offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; |
97 | reg = readl(pdata->mmio_base + offset); | 96 | val = readl(pdata->mmio_base + offset); |
98 | writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset); | 97 | writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset); |
99 | 98 | ||
100 | offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; | 99 | val = readl(pdata->mmio_base + LPSS_UART_CPR); |
101 | reg = readl(pdata->mmio_base + offset); | 100 | if (!(val & LPSS_UART_CPR_AFCE)) { |
102 | writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset); | 101 | offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; |
102 | val = readl(pdata->mmio_base + offset); | ||
103 | val |= LPSS_GENERAL_UART_RTS_OVRD; | ||
104 | writel(val, pdata->mmio_base + offset); | ||
105 | } | ||
103 | } | 106 | } |
104 | 107 | ||
105 | static void lpss_i2c_setup(struct lpss_private_data *pdata) | 108 | static void byt_i2c_setup(struct lpss_private_data *pdata) |
106 | { | 109 | { |
107 | unsigned int offset; | 110 | unsigned int offset; |
108 | u32 val; | 111 | u32 val; |
@@ -111,100 +114,56 @@ static void lpss_i2c_setup(struct lpss_private_data *pdata) | |||
111 | val = readl(pdata->mmio_base + offset); | 114 | val = readl(pdata->mmio_base + offset); |
112 | val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; | 115 | val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; |
113 | writel(val, pdata->mmio_base + offset); | 116 | writel(val, pdata->mmio_base + offset); |
114 | } | ||
115 | 117 | ||
116 | static struct lpss_device_desc wpt_dev_desc = { | 118 | if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) |
117 | .clk_required = true, | 119 | pdata->fixed_clk_rate = 133000000; |
118 | .prv_offset = 0x800, | 120 | } |
119 | .ltr_required = true, | ||
120 | .clk_divider = true, | ||
121 | .clk_gate = true, | ||
122 | }; | ||
123 | 121 | ||
124 | static struct lpss_device_desc lpt_dev_desc = { | 122 | static struct lpss_device_desc lpt_dev_desc = { |
125 | .clk_required = true, | 123 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, |
126 | .prv_offset = 0x800, | 124 | .prv_offset = 0x800, |
127 | .ltr_required = true, | ||
128 | .clk_divider = true, | ||
129 | .clk_gate = true, | ||
130 | }; | 125 | }; |
131 | 126 | ||
132 | static struct lpss_device_desc lpt_i2c_dev_desc = { | 127 | static struct lpss_device_desc lpt_i2c_dev_desc = { |
133 | .clk_required = true, | 128 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, |
134 | .prv_offset = 0x800, | 129 | .prv_offset = 0x800, |
135 | .ltr_required = true, | ||
136 | .clk_gate = true, | ||
137 | }; | 130 | }; |
138 | 131 | ||
139 | static struct lpss_device_desc lpt_uart_dev_desc = { | 132 | static struct lpss_device_desc lpt_uart_dev_desc = { |
140 | .clk_required = true, | 133 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, |
141 | .prv_offset = 0x800, | 134 | .prv_offset = 0x800, |
142 | .ltr_required = true, | ||
143 | .clk_divider = true, | ||
144 | .clk_gate = true, | ||
145 | .setup = lpss_uart_setup, | 135 | .setup = lpss_uart_setup, |
146 | }; | 136 | }; |
147 | 137 | ||
148 | static struct lpss_device_desc lpt_sdio_dev_desc = { | 138 | static struct lpss_device_desc lpt_sdio_dev_desc = { |
139 | .flags = LPSS_LTR, | ||
149 | .prv_offset = 0x1000, | 140 | .prv_offset = 0x1000, |
150 | .prv_size_override = 0x1018, | 141 | .prv_size_override = 0x1018, |
151 | .ltr_required = true, | ||
152 | }; | ||
153 | |||
154 | static struct lpss_shared_clock pwm_clock = { | ||
155 | .name = "pwm_clk", | ||
156 | .rate = 25000000, | ||
157 | }; | 142 | }; |
158 | 143 | ||
159 | static struct lpss_device_desc byt_pwm_dev_desc = { | 144 | static struct lpss_device_desc byt_pwm_dev_desc = { |
160 | .clk_required = true, | 145 | .flags = LPSS_SAVE_CTX, |
161 | .save_ctx = true, | ||
162 | .shared_clock = &pwm_clock, | ||
163 | }; | 146 | }; |
164 | 147 | ||
165 | static struct lpss_device_desc byt_uart_dev_desc = { | 148 | static struct lpss_device_desc byt_uart_dev_desc = { |
166 | .clk_required = true, | 149 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
167 | .prv_offset = 0x800, | 150 | .prv_offset = 0x800, |
168 | .clk_divider = true, | ||
169 | .clk_gate = true, | ||
170 | .save_ctx = true, | ||
171 | .setup = lpss_uart_setup, | 151 | .setup = lpss_uart_setup, |
172 | }; | 152 | }; |
173 | 153 | ||
174 | static struct lpss_device_desc byt_spi_dev_desc = { | 154 | static struct lpss_device_desc byt_spi_dev_desc = { |
175 | .clk_required = true, | 155 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
176 | .prv_offset = 0x400, | 156 | .prv_offset = 0x400, |
177 | .clk_divider = true, | ||
178 | .clk_gate = true, | ||
179 | .save_ctx = true, | ||
180 | }; | 157 | }; |
181 | 158 | ||
182 | static struct lpss_device_desc byt_sdio_dev_desc = { | 159 | static struct lpss_device_desc byt_sdio_dev_desc = { |
183 | .clk_required = true, | 160 | .flags = LPSS_CLK, |
184 | }; | ||
185 | |||
186 | static struct lpss_shared_clock i2c_clock = { | ||
187 | .name = "i2c_clk", | ||
188 | .rate = 100000000, | ||
189 | }; | 161 | }; |
190 | 162 | ||
191 | static struct lpss_device_desc byt_i2c_dev_desc = { | 163 | static struct lpss_device_desc byt_i2c_dev_desc = { |
192 | .clk_required = true, | 164 | .flags = LPSS_CLK | LPSS_SAVE_CTX, |
193 | .prv_offset = 0x800, | 165 | .prv_offset = 0x800, |
194 | .save_ctx = true, | 166 | .setup = byt_i2c_setup, |
195 | .shared_clock = &i2c_clock, | ||
196 | .setup = lpss_i2c_setup, | ||
197 | }; | ||
198 | |||
199 | static struct lpss_shared_clock bsw_pwm_clock = { | ||
200 | .name = "pwm_clk", | ||
201 | .rate = 19200000, | ||
202 | }; | ||
203 | |||
204 | static struct lpss_device_desc bsw_pwm_dev_desc = { | ||
205 | .clk_required = true, | ||
206 | .save_ctx = true, | ||
207 | .shared_clock = &bsw_pwm_clock, | ||
208 | }; | 167 | }; |
209 | 168 | ||
210 | #else | 169 | #else |
@@ -237,7 +196,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | |||
237 | { "INT33FC", }, | 196 | { "INT33FC", }, |
238 | 197 | ||
239 | /* Braswell LPSS devices */ | 198 | /* Braswell LPSS devices */ |
240 | { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, | 199 | { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, |
241 | { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, | 200 | { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, |
242 | { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, | 201 | { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, |
243 | { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, | 202 | { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, |
@@ -251,7 +210,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | |||
251 | { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, | 210 | { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, |
252 | { "INT3437", }, | 211 | { "INT3437", }, |
253 | 212 | ||
254 | { "INT3438", LPSS_ADDR(wpt_dev_desc) }, | 213 | /* Wildcat Point LPSS devices */ |
214 | { "INT3438", LPSS_ADDR(lpt_dev_desc) }, | ||
255 | 215 | ||
256 | { } | 216 | { } |
257 | }; | 217 | }; |
@@ -276,7 +236,6 @@ static int register_device_clock(struct acpi_device *adev, | |||
276 | struct lpss_private_data *pdata) | 236 | struct lpss_private_data *pdata) |
277 | { | 237 | { |
278 | const struct lpss_device_desc *dev_desc = pdata->dev_desc; | 238 | const struct lpss_device_desc *dev_desc = pdata->dev_desc; |
279 | struct lpss_shared_clock *shared_clock = dev_desc->shared_clock; | ||
280 | const char *devname = dev_name(&adev->dev); | 239 | const char *devname = dev_name(&adev->dev); |
281 | struct clk *clk = ERR_PTR(-ENODEV); | 240 | struct clk *clk = ERR_PTR(-ENODEV); |
282 | struct lpss_clk_data *clk_data; | 241 | struct lpss_clk_data *clk_data; |
@@ -289,12 +248,7 @@ static int register_device_clock(struct acpi_device *adev, | |||
289 | clk_data = platform_get_drvdata(lpss_clk_dev); | 248 | clk_data = platform_get_drvdata(lpss_clk_dev); |
290 | if (!clk_data) | 249 | if (!clk_data) |
291 | return -ENODEV; | 250 | return -ENODEV; |
292 | 251 | clk = clk_data->clk; | |
293 | if (dev_desc->clkdev_name) { | ||
294 | clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, | ||
295 | devname); | ||
296 | return 0; | ||
297 | } | ||
298 | 252 | ||
299 | if (!pdata->mmio_base | 253 | if (!pdata->mmio_base |
300 | || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) | 254 | || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) |
@@ -303,24 +257,19 @@ static int register_device_clock(struct acpi_device *adev, | |||
303 | parent = clk_data->name; | 257 | parent = clk_data->name; |
304 | prv_base = pdata->mmio_base + dev_desc->prv_offset; | 258 | prv_base = pdata->mmio_base + dev_desc->prv_offset; |
305 | 259 | ||
306 | if (shared_clock) { | 260 | if (pdata->fixed_clk_rate) { |
307 | clk = shared_clock->clk; | 261 | clk = clk_register_fixed_rate(NULL, devname, parent, 0, |
308 | if (!clk) { | 262 | pdata->fixed_clk_rate); |
309 | clk = clk_register_fixed_rate(NULL, shared_clock->name, | 263 | goto out; |
310 | "lpss_clk", 0, | ||
311 | shared_clock->rate); | ||
312 | shared_clock->clk = clk; | ||
313 | } | ||
314 | parent = shared_clock->name; | ||
315 | } | 264 | } |
316 | 265 | ||
317 | if (dev_desc->clk_gate) { | 266 | if (dev_desc->flags & LPSS_CLK_GATE) { |
318 | clk = clk_register_gate(NULL, devname, parent, 0, | 267 | clk = clk_register_gate(NULL, devname, parent, 0, |
319 | prv_base, 0, 0, NULL); | 268 | prv_base, 0, 0, NULL); |
320 | parent = devname; | 269 | parent = devname; |
321 | } | 270 | } |
322 | 271 | ||
323 | if (dev_desc->clk_divider) { | 272 | if (dev_desc->flags & LPSS_CLK_DIVIDER) { |
324 | /* Prevent division by zero */ | 273 | /* Prevent division by zero */ |
325 | if (!readl(prv_base)) | 274 | if (!readl(prv_base)) |
326 | writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); | 275 | writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); |
@@ -344,7 +293,7 @@ static int register_device_clock(struct acpi_device *adev, | |||
344 | kfree(parent); | 293 | kfree(parent); |
345 | kfree(clk_name); | 294 | kfree(clk_name); |
346 | } | 295 | } |
347 | 296 | out: | |
348 | if (IS_ERR(clk)) | 297 | if (IS_ERR(clk)) |
349 | return PTR_ERR(clk); | 298 | return PTR_ERR(clk); |
350 | 299 | ||
@@ -392,7 +341,10 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
392 | 341 | ||
393 | pdata->dev_desc = dev_desc; | 342 | pdata->dev_desc = dev_desc; |
394 | 343 | ||
395 | if (dev_desc->clk_required) { | 344 | if (dev_desc->setup) |
345 | dev_desc->setup(pdata); | ||
346 | |||
347 | if (dev_desc->flags & LPSS_CLK) { | ||
396 | ret = register_device_clock(adev, pdata); | 348 | ret = register_device_clock(adev, pdata); |
397 | if (ret) { | 349 | if (ret) { |
398 | /* Skip the device, but continue the namespace scan. */ | 350 | /* Skip the device, but continue the namespace scan. */ |
@@ -413,13 +365,9 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
413 | goto err_out; | 365 | goto err_out; |
414 | } | 366 | } |
415 | 367 | ||
416 | if (dev_desc->setup) | ||
417 | dev_desc->setup(pdata); | ||
418 | |||
419 | adev->driver_data = pdata; | 368 | adev->driver_data = pdata; |
420 | pdev = acpi_create_platform_device(adev); | 369 | pdev = acpi_create_platform_device(adev); |
421 | if (!IS_ERR_OR_NULL(pdev)) { | 370 | if (!IS_ERR_OR_NULL(pdev)) { |
422 | device_enable_async_suspend(&pdev->dev); | ||
423 | return 1; | 371 | return 1; |
424 | } | 372 | } |
425 | 373 | ||
@@ -693,19 +641,19 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, | |||
693 | 641 | ||
694 | switch (action) { | 642 | switch (action) { |
695 | case BUS_NOTIFY_BOUND_DRIVER: | 643 | case BUS_NOTIFY_BOUND_DRIVER: |
696 | if (pdata->dev_desc->save_ctx) | 644 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) |
697 | pdev->dev.pm_domain = &acpi_lpss_pm_domain; | 645 | pdev->dev.pm_domain = &acpi_lpss_pm_domain; |
698 | break; | 646 | break; |
699 | case BUS_NOTIFY_UNBOUND_DRIVER: | 647 | case BUS_NOTIFY_UNBOUND_DRIVER: |
700 | if (pdata->dev_desc->save_ctx) | 648 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) |
701 | pdev->dev.pm_domain = NULL; | 649 | pdev->dev.pm_domain = NULL; |
702 | break; | 650 | break; |
703 | case BUS_NOTIFY_ADD_DEVICE: | 651 | case BUS_NOTIFY_ADD_DEVICE: |
704 | if (pdata->dev_desc->ltr_required) | 652 | if (pdata->dev_desc->flags & LPSS_LTR) |
705 | return sysfs_create_group(&pdev->dev.kobj, | 653 | return sysfs_create_group(&pdev->dev.kobj, |
706 | &lpss_attr_group); | 654 | &lpss_attr_group); |
707 | case BUS_NOTIFY_DEL_DEVICE: | 655 | case BUS_NOTIFY_DEL_DEVICE: |
708 | if (pdata->dev_desc->ltr_required) | 656 | if (pdata->dev_desc->flags & LPSS_LTR) |
709 | sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); | 657 | sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); |
710 | default: | 658 | default: |
711 | break; | 659 | break; |
@@ -722,7 +670,7 @@ static void acpi_lpss_bind(struct device *dev) | |||
722 | { | 670 | { |
723 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); | 671 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
724 | 672 | ||
725 | if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required) | 673 | if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) |
726 | return; | 674 | return; |
727 | 675 | ||
728 | if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) | 676 | if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) |
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 996fa1959eea..f30c40796856 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c | |||
@@ -132,10 +132,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { | |||
132 | {"PNP0401"}, /* ECP Printer Port */ | 132 | {"PNP0401"}, /* ECP Printer Port */ |
133 | /* apple-gmux */ | 133 | /* apple-gmux */ |
134 | {"APP000B"}, | 134 | {"APP000B"}, |
135 | /* fujitsu-laptop.c */ | ||
136 | {"FUJ02bf"}, | ||
137 | {"FUJ02B1"}, | ||
138 | {"FUJ02E3"}, | ||
139 | /* system */ | 135 | /* system */ |
140 | {"PNP0c02"}, /* General ID for reserving resources */ | 136 | {"PNP0c02"}, /* General ID for reserving resources */ |
141 | {"PNP0c01"}, /* memory controller */ | 137 | {"PNP0c01"}, /* memory controller */ |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1f9aba5fb81f..2747279fbe3c 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -254,6 +254,7 @@ struct acpi_create_field_info { | |||
254 | u32 field_bit_position; | 254 | u32 field_bit_position; |
255 | u32 field_bit_length; | 255 | u32 field_bit_length; |
256 | u16 resource_length; | 256 | u16 resource_length; |
257 | u16 pin_number_index; | ||
257 | u8 field_flags; | 258 | u8 field_flags; |
258 | u8 attribute; | 259 | u8 attribute; |
259 | u8 field_type; | 260 | u8 field_type; |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 22fb6449d3d6..8abb393dafab 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -264,6 +264,7 @@ struct acpi_object_region_field { | |||
264 | ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; | 264 | ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; |
265 | union acpi_operand_object *region_obj; /* Containing op_region object */ | 265 | union acpi_operand_object *region_obj; /* Containing op_region object */ |
266 | u8 *resource_buffer; /* resource_template for serial regions/fields */ | 266 | u8 *resource_buffer; /* resource_template for serial regions/fields */ |
267 | u16 pin_number_index; /* Index relative to previous Connection/Template */ | ||
267 | }; | 268 | }; |
268 | 269 | ||
269 | struct acpi_object_bank_field { | 270 | struct acpi_object_bank_field { |
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 3661c8e90540..c57666196672 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, | |||
360 | */ | 360 | */ |
361 | info->resource_buffer = NULL; | 361 | info->resource_buffer = NULL; |
362 | info->connection_node = NULL; | 362 | info->connection_node = NULL; |
363 | info->pin_number_index = 0; | ||
363 | 364 | ||
364 | /* | 365 | /* |
365 | * A Connection() is either an actual resource descriptor (buffer) | 366 | * A Connection() is either an actual resource descriptor (buffer) |
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, | |||
437 | } | 438 | } |
438 | 439 | ||
439 | info->field_bit_position += info->field_bit_length; | 440 | info->field_bit_position += info->field_bit_length; |
441 | info->pin_number_index++; /* Index relative to previous Connection() */ | ||
440 | break; | 442 | break; |
441 | 443 | ||
442 | default: | 444 | default: |
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 9957297d1580..8eb8575e8c16 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c | |||
@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
142 | union acpi_operand_object *region_obj2; | 142 | union acpi_operand_object *region_obj2; |
143 | void *region_context = NULL; | 143 | void *region_context = NULL; |
144 | struct acpi_connection_info *context; | 144 | struct acpi_connection_info *context; |
145 | acpi_physical_address address; | ||
145 | 146 | ||
146 | ACPI_FUNCTION_TRACE(ev_address_space_dispatch); | 147 | ACPI_FUNCTION_TRACE(ev_address_space_dispatch); |
147 | 148 | ||
@@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
231 | /* We have everything we need, we can invoke the address space handler */ | 232 | /* We have everything we need, we can invoke the address space handler */ |
232 | 233 | ||
233 | handler = handler_desc->address_space.handler; | 234 | handler = handler_desc->address_space.handler; |
234 | 235 | address = (region_obj->region.address + region_offset); | |
235 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
236 | "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", | ||
237 | ®ion_obj->region.handler->address_space, handler, | ||
238 | ACPI_FORMAT_NATIVE_UINT(region_obj->region.address + | ||
239 | region_offset), | ||
240 | acpi_ut_get_region_name(region_obj->region. | ||
241 | space_id))); | ||
242 | 236 | ||
243 | /* | 237 | /* |
244 | * Special handling for generic_serial_bus and general_purpose_io: | 238 | * Special handling for generic_serial_bus and general_purpose_io: |
245 | * There are three extra parameters that must be passed to the | 239 | * There are three extra parameters that must be passed to the |
246 | * handler via the context: | 240 | * handler via the context: |
247 | * 1) Connection buffer, a resource template from Connection() op. | 241 | * 1) Connection buffer, a resource template from Connection() op |
248 | * 2) Length of the above buffer. | 242 | * 2) Length of the above buffer |
249 | * 3) Actual access length from the access_as() op. | 243 | * 3) Actual access length from the access_as() op |
244 | * | ||
245 | * In addition, for general_purpose_io, the Address and bit_width fields | ||
246 | * are defined as follows: | ||
247 | * 1) Address is the pin number index of the field (bit offset from | ||
248 | * the previous Connection) | ||
249 | * 2) bit_width is the actual bit length of the field (number of pins) | ||
250 | */ | 250 | */ |
251 | if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) || | 251 | if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && |
252 | (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) && | ||
253 | context && field_obj) { | 252 | context && field_obj) { |
254 | 253 | ||
255 | /* Get the Connection (resource_template) buffer */ | 254 | /* Get the Connection (resource_template) buffer */ |
@@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
258 | context->length = field_obj->field.resource_length; | 257 | context->length = field_obj->field.resource_length; |
259 | context->access_length = field_obj->field.access_length; | 258 | context->access_length = field_obj->field.access_length; |
260 | } | 259 | } |
260 | if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && | ||
261 | context && field_obj) { | ||
262 | |||
263 | /* Get the Connection (resource_template) buffer */ | ||
264 | |||
265 | context->connection = field_obj->field.resource_buffer; | ||
266 | context->length = field_obj->field.resource_length; | ||
267 | context->access_length = field_obj->field.access_length; | ||
268 | address = field_obj->field.pin_number_index; | ||
269 | bit_width = field_obj->field.bit_length; | ||
270 | } | ||
271 | |||
272 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
273 | "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", | ||
274 | ®ion_obj->region.handler->address_space, handler, | ||
275 | ACPI_FORMAT_NATIVE_UINT(address), | ||
276 | acpi_ut_get_region_name(region_obj->region. | ||
277 | space_id))); | ||
261 | 278 | ||
262 | if (!(handler_desc->address_space.handler_flags & | 279 | if (!(handler_desc->address_space.handler_flags & |
263 | ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { | 280 | ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { |
@@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
271 | 288 | ||
272 | /* Call the handler */ | 289 | /* Call the handler */ |
273 | 290 | ||
274 | status = handler(function, | 291 | status = handler(function, address, bit_width, value, context, |
275 | (region_obj->region.address + region_offset), | ||
276 | bit_width, value, context, | ||
277 | region_obj2->extra.region_context); | 292 | region_obj2->extra.region_context); |
278 | 293 | ||
279 | if (ACPI_FAILURE(status)) { | 294 | if (ACPI_FAILURE(status)) { |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 0cf159cc6e6d..56710a03c9b0 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -596,6 +596,38 @@ acpi_status acpi_enable_all_runtime_gpes(void) | |||
596 | 596 | ||
597 | ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) | 597 | ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) |
598 | 598 | ||
599 | /****************************************************************************** | ||
600 | * | ||
601 | * FUNCTION: acpi_enable_all_wakeup_gpes | ||
602 | * | ||
603 | * PARAMETERS: None | ||
604 | * | ||
605 | * RETURN: Status | ||
606 | * | ||
607 | * DESCRIPTION: Enable all "wakeup" GPEs and disable all of the other GPEs, in | ||
608 | * all GPE blocks. | ||
609 | * | ||
610 | ******************************************************************************/ | ||
611 | |||
612 | acpi_status acpi_enable_all_wakeup_gpes(void) | ||
613 | { | ||
614 | acpi_status status; | ||
615 | |||
616 | ACPI_FUNCTION_TRACE(acpi_enable_all_wakeup_gpes); | ||
617 | |||
618 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
619 | if (ACPI_FAILURE(status)) { | ||
620 | return_ACPI_STATUS(status); | ||
621 | } | ||
622 | |||
623 | status = acpi_hw_enable_all_wakeup_gpes(); | ||
624 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
625 | |||
626 | return_ACPI_STATUS(status); | ||
627 | } | ||
628 | |||
629 | ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) | ||
630 | |||
599 | /******************************************************************************* | 631 | /******************************************************************************* |
600 | * | 632 | * |
601 | * FUNCTION: acpi_install_gpe_block | 633 | * FUNCTION: acpi_install_gpe_block |
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 6907ce0c704c..b994845ed359 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
@@ -253,6 +253,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, | |||
253 | buffer = &buffer_desc->integer.value; | 253 | buffer = &buffer_desc->integer.value; |
254 | } | 254 | } |
255 | 255 | ||
256 | if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && | ||
257 | (obj_desc->field.region_obj->region.space_id == | ||
258 | ACPI_ADR_SPACE_GPIO)) { | ||
259 | /* | ||
260 | * For GPIO (general_purpose_io), the Address will be the bit offset | ||
261 | * from the previous Connection() operator, making it effectively a | ||
262 | * pin number index. The bit_length is the length of the field, which | ||
263 | * is thus the number of pins. | ||
264 | */ | ||
265 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
266 | "GPIO FieldRead [FROM]: Pin %u Bits %u\n", | ||
267 | obj_desc->field.pin_number_index, | ||
268 | obj_desc->field.bit_length)); | ||
269 | |||
270 | /* Lock entire transaction if requested */ | ||
271 | |||
272 | acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); | ||
273 | |||
274 | /* Perform the write */ | ||
275 | |||
276 | status = acpi_ex_access_region(obj_desc, 0, | ||
277 | (u64 *)buffer, ACPI_READ); | ||
278 | acpi_ex_release_global_lock(obj_desc->common_field.field_flags); | ||
279 | if (ACPI_FAILURE(status)) { | ||
280 | acpi_ut_remove_reference(buffer_desc); | ||
281 | } else { | ||
282 | *ret_buffer_desc = buffer_desc; | ||
283 | } | ||
284 | return_ACPI_STATUS(status); | ||
285 | } | ||
286 | |||
256 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | 287 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, |
257 | "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n", | 288 | "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n", |
258 | obj_desc, obj_desc->common.type, buffer, | 289 | obj_desc, obj_desc->common.type, buffer, |
@@ -413,6 +444,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, | |||
413 | 444 | ||
414 | *result_desc = buffer_desc; | 445 | *result_desc = buffer_desc; |
415 | return_ACPI_STATUS(status); | 446 | return_ACPI_STATUS(status); |
447 | } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && | ||
448 | (obj_desc->field.region_obj->region.space_id == | ||
449 | ACPI_ADR_SPACE_GPIO)) { | ||
450 | /* | ||
451 | * For GPIO (general_purpose_io), we will bypass the entire field | ||
452 | * mechanism and handoff the bit address and bit width directly to | ||
453 | * the handler. The Address will be the bit offset | ||
454 | * from the previous Connection() operator, making it effectively a | ||
455 | * pin number index. The bit_length is the length of the field, which | ||
456 | * is thus the number of pins. | ||
457 | */ | ||
458 | if (source_desc->common.type != ACPI_TYPE_INTEGER) { | ||
459 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); | ||
460 | } | ||
461 | |||
462 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
463 | "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n", | ||
464 | acpi_ut_get_type_name(source_desc->common. | ||
465 | type), | ||
466 | source_desc->common.type, | ||
467 | (u32)source_desc->integer.value, | ||
468 | obj_desc->field.pin_number_index, | ||
469 | obj_desc->field.bit_length)); | ||
470 | |||
471 | buffer = &source_desc->integer.value; | ||
472 | |||
473 | /* Lock entire transaction if requested */ | ||
474 | |||
475 | acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); | ||
476 | |||
477 | /* Perform the write */ | ||
478 | |||
479 | status = acpi_ex_access_region(obj_desc, 0, | ||
480 | (u64 *)buffer, ACPI_WRITE); | ||
481 | acpi_ex_release_global_lock(obj_desc->common_field.field_flags); | ||
482 | return_ACPI_STATUS(status); | ||
416 | } | 483 | } |
417 | 484 | ||
418 | /* Get a pointer to the data to be written */ | 485 | /* Get a pointer to the data to be written */ |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index ee3f872870bc..118e942005e5 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) | |||
484 | obj_desc->field.resource_length = info->resource_length; | 484 | obj_desc->field.resource_length = info->resource_length; |
485 | } | 485 | } |
486 | 486 | ||
487 | obj_desc->field.pin_number_index = info->pin_number_index; | ||
488 | |||
487 | /* Allow full data read from EC address space */ | 489 | /* Allow full data read from EC address space */ |
488 | 490 | ||
489 | if ((obj_desc->field.region_obj->region.space_id == | 491 | if ((obj_desc->field.region_obj->region.space_id == |
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 2e6caabba07a..ea62d40fd161 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
@@ -396,11 +396,11 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
396 | /* Examine each GPE Register within the block */ | 396 | /* Examine each GPE Register within the block */ |
397 | 397 | ||
398 | for (i = 0; i < gpe_block->register_count; i++) { | 398 | for (i = 0; i < gpe_block->register_count; i++) { |
399 | if (!gpe_block->register_info[i].enable_for_wake) { | ||
400 | continue; | ||
401 | } | ||
402 | 399 | ||
403 | /* Enable all "wake" GPEs in this register */ | 400 | /* |
401 | * Enable all "wake" GPEs in this register and disable the | ||
402 | * remaining ones. | ||
403 | */ | ||
404 | 404 | ||
405 | status = | 405 | status = |
406 | acpi_hw_write(gpe_block->register_info[i].enable_for_wake, | 406 | acpi_hw_write(gpe_block->register_info[i].enable_for_wake, |
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index 14cb6c0c8be2..5cd017c7ac0e 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c | |||
@@ -87,7 +87,9 @@ const char *acpi_gbl_io_decode[] = { | |||
87 | 87 | ||
88 | const char *acpi_gbl_ll_decode[] = { | 88 | const char *acpi_gbl_ll_decode[] = { |
89 | "ActiveHigh", | 89 | "ActiveHigh", |
90 | "ActiveLow" | 90 | "ActiveLow", |
91 | "ActiveBoth", | ||
92 | "Reserved" | ||
91 | }; | 93 | }; |
92 | 94 | ||
93 | const char *acpi_gbl_max_decode[] = { | 95 | const char *acpi_gbl_max_decode[] = { |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 5fdfe65fe165..8ec8a89a20ab 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -695,7 +695,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery) | |||
695 | if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { | 695 | if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { |
696 | const char *s; | 696 | const char *s; |
697 | s = dmi_get_system_info(DMI_PRODUCT_VERSION); | 697 | s = dmi_get_system_info(DMI_PRODUCT_VERSION); |
698 | if (s && !strnicmp(s, "ThinkPad", 8)) { | 698 | if (s && !strncasecmp(s, "ThinkPad", 8)) { |
699 | dmi_walk(find_battery, battery); | 699 | dmi_walk(find_battery, battery); |
700 | if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, | 700 | if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, |
701 | &battery->flags) && | 701 | &battery->flags) && |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 36eb42e3b0bb..ed122e17636e 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -247,8 +247,8 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
247 | }, | 247 | }, |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * These machines will power on immediately after shutdown when | 250 | * The wireless hotkey does not work on those machines when |
251 | * reporting the Windows 2012 OSI. | 251 | * returning true for _OSI("Windows 2012") |
252 | */ | 252 | */ |
253 | { | 253 | { |
254 | .callback = dmi_disable_osi_win8, | 254 | .callback = dmi_disable_osi_win8, |
@@ -258,6 +258,38 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
258 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), | 258 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), |
259 | }, | 259 | }, |
260 | }, | 260 | }, |
261 | { | ||
262 | .callback = dmi_disable_osi_win8, | ||
263 | .ident = "Dell Inspiron 7537", | ||
264 | .matches = { | ||
265 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
266 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"), | ||
267 | }, | ||
268 | }, | ||
269 | { | ||
270 | .callback = dmi_disable_osi_win8, | ||
271 | .ident = "Dell Inspiron 5437", | ||
272 | .matches = { | ||
273 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
274 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"), | ||
275 | }, | ||
276 | }, | ||
277 | { | ||
278 | .callback = dmi_disable_osi_win8, | ||
279 | .ident = "Dell Inspiron 3437", | ||
280 | .matches = { | ||
281 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
282 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"), | ||
283 | }, | ||
284 | }, | ||
285 | { | ||
286 | .callback = dmi_disable_osi_win8, | ||
287 | .ident = "Dell Vostro 3446", | ||
288 | .matches = { | ||
289 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
290 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"), | ||
291 | }, | ||
292 | }, | ||
261 | 293 | ||
262 | /* | 294 | /* |
263 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 295 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 76f7cff64594..c8ead9f97375 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c | |||
@@ -99,6 +99,13 @@ static void container_device_detach(struct acpi_device *adev) | |||
99 | device_unregister(dev); | 99 | device_unregister(dev); |
100 | } | 100 | } |
101 | 101 | ||
102 | static void container_device_online(struct acpi_device *adev) | ||
103 | { | ||
104 | struct device *dev = acpi_driver_data(adev); | ||
105 | |||
106 | kobject_uevent(&dev->kobj, KOBJ_ONLINE); | ||
107 | } | ||
108 | |||
102 | static struct acpi_scan_handler container_handler = { | 109 | static struct acpi_scan_handler container_handler = { |
103 | .ids = container_device_ids, | 110 | .ids = container_device_ids, |
104 | .attach = container_device_attach, | 111 | .attach = container_device_attach, |
@@ -106,6 +113,7 @@ static struct acpi_scan_handler container_handler = { | |||
106 | .hotplug = { | 113 | .hotplug = { |
107 | .enabled = true, | 114 | .enabled = true, |
108 | .demand_offline = true, | 115 | .demand_offline = true, |
116 | .notify_online = container_device_online, | ||
109 | }, | 117 | }, |
110 | }; | 118 | }; |
111 | 119 | ||
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 67075f800e34..bea6896be122 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
@@ -1041,6 +1041,40 @@ static struct dev_pm_domain acpi_general_pm_domain = { | |||
1041 | }; | 1041 | }; |
1042 | 1042 | ||
1043 | /** | 1043 | /** |
1044 | * acpi_dev_pm_detach - Remove ACPI power management from the device. | ||
1045 | * @dev: Device to take care of. | ||
1046 | * @power_off: Whether or not to try to remove power from the device. | ||
1047 | * | ||
1048 | * Remove the device from the general ACPI PM domain and remove its wakeup | ||
1049 | * notifier. If @power_off is set, additionally remove power from the device if | ||
1050 | * possible. | ||
1051 | * | ||
1052 | * Callers must ensure proper synchronization of this function with power | ||
1053 | * management callbacks. | ||
1054 | */ | ||
1055 | static void acpi_dev_pm_detach(struct device *dev, bool power_off) | ||
1056 | { | ||
1057 | struct acpi_device *adev = ACPI_COMPANION(dev); | ||
1058 | |||
1059 | if (adev && dev->pm_domain == &acpi_general_pm_domain) { | ||
1060 | dev->pm_domain = NULL; | ||
1061 | acpi_remove_pm_notifier(adev); | ||
1062 | if (power_off) { | ||
1063 | /* | ||
1064 | * If the device's PM QoS resume latency limit or flags | ||
1065 | * have been exposed to user space, they have to be | ||
1066 | * hidden at this point, so that they don't affect the | ||
1067 | * choice of the low-power state to put the device into. | ||
1068 | */ | ||
1069 | dev_pm_qos_hide_latency_limit(dev); | ||
1070 | dev_pm_qos_hide_flags(dev); | ||
1071 | acpi_device_wakeup(adev, ACPI_STATE_S0, false); | ||
1072 | acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | ||
1073 | } | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | /** | ||
1044 | * acpi_dev_pm_attach - Prepare device for ACPI power management. | 1078 | * acpi_dev_pm_attach - Prepare device for ACPI power management. |
1045 | * @dev: Device to prepare. | 1079 | * @dev: Device to prepare. |
1046 | * @power_on: Whether or not to power on the device. | 1080 | * @power_on: Whether or not to power on the device. |
@@ -1072,42 +1106,9 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) | |||
1072 | acpi_dev_pm_full_power(adev); | 1106 | acpi_dev_pm_full_power(adev); |
1073 | acpi_device_wakeup(adev, ACPI_STATE_S0, false); | 1107 | acpi_device_wakeup(adev, ACPI_STATE_S0, false); |
1074 | } | 1108 | } |
1109 | |||
1110 | dev->pm_domain->detach = acpi_dev_pm_detach; | ||
1075 | return 0; | 1111 | return 0; |
1076 | } | 1112 | } |
1077 | EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); | 1113 | EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); |
1078 | |||
1079 | /** | ||
1080 | * acpi_dev_pm_detach - Remove ACPI power management from the device. | ||
1081 | * @dev: Device to take care of. | ||
1082 | * @power_off: Whether or not to try to remove power from the device. | ||
1083 | * | ||
1084 | * Remove the device from the general ACPI PM domain and remove its wakeup | ||
1085 | * notifier. If @power_off is set, additionally remove power from the device if | ||
1086 | * possible. | ||
1087 | * | ||
1088 | * Callers must ensure proper synchronization of this function with power | ||
1089 | * management callbacks. | ||
1090 | */ | ||
1091 | void acpi_dev_pm_detach(struct device *dev, bool power_off) | ||
1092 | { | ||
1093 | struct acpi_device *adev = ACPI_COMPANION(dev); | ||
1094 | |||
1095 | if (adev && dev->pm_domain == &acpi_general_pm_domain) { | ||
1096 | dev->pm_domain = NULL; | ||
1097 | acpi_remove_pm_notifier(adev); | ||
1098 | if (power_off) { | ||
1099 | /* | ||
1100 | * If the device's PM QoS resume latency limit or flags | ||
1101 | * have been exposed to user space, they have to be | ||
1102 | * hidden at this point, so that they don't affect the | ||
1103 | * choice of the low-power state to put the device into. | ||
1104 | */ | ||
1105 | dev_pm_qos_hide_latency_limit(dev); | ||
1106 | dev_pm_qos_hide_flags(dev); | ||
1107 | acpi_device_wakeup(adev, ACPI_STATE_S0, false); | ||
1108 | acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | ||
1109 | } | ||
1110 | } | ||
1111 | } | ||
1112 | EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); | ||
1113 | #endif /* CONFIG_PM */ | 1114 | #endif /* CONFIG_PM */ |
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 8acf53e62966..5328b1090e08 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -27,12 +27,10 @@ | |||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
30 | #include <asm/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | #include <linux/thermal.h> | 31 | #include <linux/thermal.h> |
32 | #include <linux/acpi.h> | 32 | #include <linux/acpi.h> |
33 | 33 | ||
34 | #define PREFIX "ACPI: " | ||
35 | |||
36 | #define ACPI_FAN_CLASS "fan" | 34 | #define ACPI_FAN_CLASS "fan" |
37 | #define ACPI_FAN_FILE_STATE "state" | 35 | #define ACPI_FAN_FILE_STATE "state" |
38 | 36 | ||
@@ -127,8 +125,9 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = { | |||
127 | }; | 125 | }; |
128 | 126 | ||
129 | /* -------------------------------------------------------------------------- | 127 | /* -------------------------------------------------------------------------- |
130 | Driver Interface | 128 | * Driver Interface |
131 | -------------------------------------------------------------------------- */ | 129 | * -------------------------------------------------------------------------- |
130 | */ | ||
132 | 131 | ||
133 | static int acpi_fan_add(struct acpi_device *device) | 132 | static int acpi_fan_add(struct acpi_device *device) |
134 | { | 133 | { |
@@ -143,7 +142,7 @@ static int acpi_fan_add(struct acpi_device *device) | |||
143 | 142 | ||
144 | result = acpi_bus_update_power(device->handle, NULL); | 143 | result = acpi_bus_update_power(device->handle, NULL); |
145 | if (result) { | 144 | if (result) { |
146 | printk(KERN_ERR PREFIX "Setting initial power state\n"); | 145 | dev_err(&device->dev, "Setting initial power state\n"); |
147 | goto end; | 146 | goto end; |
148 | } | 147 | } |
149 | 148 | ||
@@ -168,10 +167,9 @@ static int acpi_fan_add(struct acpi_device *device) | |||
168 | &device->dev.kobj, | 167 | &device->dev.kobj, |
169 | "device"); | 168 | "device"); |
170 | if (result) | 169 | if (result) |
171 | dev_err(&device->dev, "Failed to create sysfs link " | 170 | dev_err(&device->dev, "Failed to create sysfs link 'device'\n"); |
172 | "'device'\n"); | ||
173 | 171 | ||
174 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", | 172 | dev_info(&device->dev, "ACPI: %s [%s] (%s)\n", |
175 | acpi_device_name(device), acpi_device_bid(device), | 173 | acpi_device_name(device), acpi_device_bid(device), |
176 | !device->power.state ? "on" : "off"); | 174 | !device->power.state ? "on" : "off"); |
177 | 175 | ||
@@ -217,7 +215,7 @@ static int acpi_fan_resume(struct device *dev) | |||
217 | 215 | ||
218 | result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); | 216 | result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); |
219 | if (result) | 217 | if (result) |
220 | printk(KERN_ERR PREFIX "Error updating fan power state\n"); | 218 | dev_err(dev, "Error updating fan power state\n"); |
221 | 219 | ||
222 | return result; | 220 | return result; |
223 | } | 221 | } |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 3abe9b223ba7..9964f70be98d 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -152,6 +152,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported) | |||
152 | osi_linux.dmi ? " via DMI" : ""); | 152 | osi_linux.dmi ? " via DMI" : ""); |
153 | } | 153 | } |
154 | 154 | ||
155 | if (!strcmp("Darwin", interface)) { | ||
156 | /* | ||
157 | * Apple firmware will behave poorly if it receives positive | ||
158 | * answers to "Darwin" and any other OS. Respond positively | ||
159 | * to Darwin and then disable all other vendor strings. | ||
160 | */ | ||
161 | acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); | ||
162 | supported = ACPI_UINT32_MAX; | ||
163 | } | ||
164 | |||
155 | return supported; | 165 | return supported; |
156 | } | 166 | } |
157 | 167 | ||
@@ -825,7 +835,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, | |||
825 | 835 | ||
826 | acpi_irq_handler = handler; | 836 | acpi_irq_handler = handler; |
827 | acpi_irq_context = context; | 837 | acpi_irq_context = context; |
828 | if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) { | 838 | if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { |
829 | printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); | 839 | printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); |
830 | acpi_irq_handler = NULL; | 840 | acpi_irq_handler = NULL; |
831 | return AE_NOT_ACQUIRED; | 841 | return AE_NOT_ACQUIRED; |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index e6ae603ed1a1..cd4de7e038ea 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/pci-aspm.h> | 35 | #include <linux/pci-aspm.h> |
36 | #include <linux/acpi.h> | 36 | #include <linux/acpi.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/dmi.h> | ||
38 | #include <acpi/apei.h> /* for acpi_hest_init() */ | 39 | #include <acpi/apei.h> /* for acpi_hest_init() */ |
39 | 40 | ||
40 | #include "internal.h" | 41 | #include "internal.h" |
@@ -430,6 +431,19 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, | |||
430 | acpi_handle handle = device->handle; | 431 | acpi_handle handle = device->handle; |
431 | 432 | ||
432 | /* | 433 | /* |
434 | * Apple always return failure on _OSC calls when _OSI("Darwin") has | ||
435 | * been called successfully. We know the feature set supported by the | ||
436 | * platform, so avoid calling _OSC at all | ||
437 | */ | ||
438 | |||
439 | if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) { | ||
440 | root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL; | ||
441 | decode_osc_control(root, "OS assumes control of", | ||
442 | root->osc_control_set); | ||
443 | return; | ||
444 | } | ||
445 | |||
446 | /* | ||
433 | * All supported architectures that use ACPI have support for | 447 | * All supported architectures that use ACPI have support for |
434 | * PCI domains, so we indicate this in _OSC support capabilities. | 448 | * PCI domains, so we indicate this in _OSC support capabilities. |
435 | */ | 449 | */ |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e32321ce9a5c..ef58f46c8442 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -16,7 +16,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry, | |||
16 | u32 acpi_id, int *apic_id) | 16 | u32 acpi_id, int *apic_id) |
17 | { | 17 | { |
18 | struct acpi_madt_local_apic *lapic = | 18 | struct acpi_madt_local_apic *lapic = |
19 | (struct acpi_madt_local_apic *)entry; | 19 | container_of(entry, struct acpi_madt_local_apic, header); |
20 | 20 | ||
21 | if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) | 21 | if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) |
22 | return -ENODEV; | 22 | return -ENODEV; |
@@ -32,7 +32,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, | |||
32 | int device_declaration, u32 acpi_id, int *apic_id) | 32 | int device_declaration, u32 acpi_id, int *apic_id) |
33 | { | 33 | { |
34 | struct acpi_madt_local_x2apic *apic = | 34 | struct acpi_madt_local_x2apic *apic = |
35 | (struct acpi_madt_local_x2apic *)entry; | 35 | container_of(entry, struct acpi_madt_local_x2apic, header); |
36 | 36 | ||
37 | if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) | 37 | if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) |
38 | return -ENODEV; | 38 | return -ENODEV; |
@@ -49,7 +49,7 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, | |||
49 | int device_declaration, u32 acpi_id, int *apic_id) | 49 | int device_declaration, u32 acpi_id, int *apic_id) |
50 | { | 50 | { |
51 | struct acpi_madt_local_sapic *lsapic = | 51 | struct acpi_madt_local_sapic *lsapic = |
52 | (struct acpi_madt_local_sapic *)entry; | 52 | container_of(entry, struct acpi_madt_local_sapic, header); |
53 | 53 | ||
54 | if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) | 54 | if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) |
55 | return -ENODEV; | 55 | return -ENODEV; |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 366ca40a6f70..a7a3edd28beb 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/jiffies.h> | 35 | #include <linux/jiffies.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/power_supply.h> | 37 | #include <linux/power_supply.h> |
38 | #include <linux/dmi.h> | ||
38 | 39 | ||
39 | #include "sbshc.h" | 40 | #include "sbshc.h" |
40 | #include "battery.h" | 41 | #include "battery.h" |
@@ -61,6 +62,8 @@ static unsigned int cache_time = 1000; | |||
61 | module_param(cache_time, uint, 0644); | 62 | module_param(cache_time, uint, 0644); |
62 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 63 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
63 | 64 | ||
65 | static bool sbs_manager_broken; | ||
66 | |||
64 | #define MAX_SBS_BAT 4 | 67 | #define MAX_SBS_BAT 4 |
65 | #define ACPI_SBS_BLOCK_MAX 32 | 68 | #define ACPI_SBS_BLOCK_MAX 32 |
66 | 69 | ||
@@ -109,6 +112,7 @@ struct acpi_sbs { | |||
109 | u8 batteries_supported:4; | 112 | u8 batteries_supported:4; |
110 | u8 manager_present:1; | 113 | u8 manager_present:1; |
111 | u8 charger_present:1; | 114 | u8 charger_present:1; |
115 | u8 charger_exists:1; | ||
112 | }; | 116 | }; |
113 | 117 | ||
114 | #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) | 118 | #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) |
@@ -429,9 +433,19 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs) | |||
429 | 433 | ||
430 | result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER, | 434 | result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER, |
431 | 0x13, (u8 *) & status); | 435 | 0x13, (u8 *) & status); |
432 | if (!result) | 436 | |
433 | sbs->charger_present = (status >> 15) & 0x1; | 437 | if (result) |
434 | return result; | 438 | return result; |
439 | |||
440 | /* | ||
441 | * The spec requires that bit 4 always be 1. If it's not set, assume | ||
442 | * that the implementation doesn't support an SBS charger | ||
443 | */ | ||
444 | if (!((status >> 4) & 0x1)) | ||
445 | return -ENODEV; | ||
446 | |||
447 | sbs->charger_present = (status >> 15) & 0x1; | ||
448 | return 0; | ||
435 | } | 449 | } |
436 | 450 | ||
437 | static ssize_t acpi_battery_alarm_show(struct device *dev, | 451 | static ssize_t acpi_battery_alarm_show(struct device *dev, |
@@ -483,16 +497,21 @@ static int acpi_battery_read(struct acpi_battery *battery) | |||
483 | ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); | 497 | ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); |
484 | } else if (battery->id == 0) | 498 | } else if (battery->id == 0) |
485 | battery->present = 1; | 499 | battery->present = 1; |
500 | |||
486 | if (result || !battery->present) | 501 | if (result || !battery->present) |
487 | return result; | 502 | return result; |
488 | 503 | ||
489 | if (saved_present != battery->present) { | 504 | if (saved_present != battery->present) { |
490 | battery->update_time = 0; | 505 | battery->update_time = 0; |
491 | result = acpi_battery_get_info(battery); | 506 | result = acpi_battery_get_info(battery); |
492 | if (result) | 507 | if (result) { |
508 | battery->present = 0; | ||
493 | return result; | 509 | return result; |
510 | } | ||
494 | } | 511 | } |
495 | result = acpi_battery_get_state(battery); | 512 | result = acpi_battery_get_state(battery); |
513 | if (result) | ||
514 | battery->present = 0; | ||
496 | return result; | 515 | return result; |
497 | } | 516 | } |
498 | 517 | ||
@@ -524,6 +543,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) | |||
524 | result = power_supply_register(&sbs->device->dev, &battery->bat); | 543 | result = power_supply_register(&sbs->device->dev, &battery->bat); |
525 | if (result) | 544 | if (result) |
526 | goto end; | 545 | goto end; |
546 | |||
527 | result = device_create_file(battery->bat.dev, &alarm_attr); | 547 | result = device_create_file(battery->bat.dev, &alarm_attr); |
528 | if (result) | 548 | if (result) |
529 | goto end; | 549 | goto end; |
@@ -554,6 +574,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs) | |||
554 | if (result) | 574 | if (result) |
555 | goto end; | 575 | goto end; |
556 | 576 | ||
577 | sbs->charger_exists = 1; | ||
557 | sbs->charger.name = "sbs-charger"; | 578 | sbs->charger.name = "sbs-charger"; |
558 | sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; | 579 | sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; |
559 | sbs->charger.properties = sbs_ac_props; | 580 | sbs->charger.properties = sbs_ac_props; |
@@ -580,9 +601,12 @@ static void acpi_sbs_callback(void *context) | |||
580 | struct acpi_battery *bat; | 601 | struct acpi_battery *bat; |
581 | u8 saved_charger_state = sbs->charger_present; | 602 | u8 saved_charger_state = sbs->charger_present; |
582 | u8 saved_battery_state; | 603 | u8 saved_battery_state; |
583 | acpi_ac_get_present(sbs); | 604 | |
584 | if (sbs->charger_present != saved_charger_state) | 605 | if (sbs->charger_exists) { |
585 | kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); | 606 | acpi_ac_get_present(sbs); |
607 | if (sbs->charger_present != saved_charger_state) | ||
608 | kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); | ||
609 | } | ||
586 | 610 | ||
587 | if (sbs->manager_present) { | 611 | if (sbs->manager_present) { |
588 | for (id = 0; id < MAX_SBS_BAT; ++id) { | 612 | for (id = 0; id < MAX_SBS_BAT; ++id) { |
@@ -598,12 +622,31 @@ static void acpi_sbs_callback(void *context) | |||
598 | } | 622 | } |
599 | } | 623 | } |
600 | 624 | ||
625 | static int disable_sbs_manager(const struct dmi_system_id *d) | ||
626 | { | ||
627 | sbs_manager_broken = true; | ||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | static struct dmi_system_id acpi_sbs_dmi_table[] = { | ||
632 | { | ||
633 | .callback = disable_sbs_manager, | ||
634 | .ident = "Apple", | ||
635 | .matches = { | ||
636 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.") | ||
637 | }, | ||
638 | }, | ||
639 | { }, | ||
640 | }; | ||
641 | |||
601 | static int acpi_sbs_add(struct acpi_device *device) | 642 | static int acpi_sbs_add(struct acpi_device *device) |
602 | { | 643 | { |
603 | struct acpi_sbs *sbs; | 644 | struct acpi_sbs *sbs; |
604 | int result = 0; | 645 | int result = 0; |
605 | int id; | 646 | int id; |
606 | 647 | ||
648 | dmi_check_system(acpi_sbs_dmi_table); | ||
649 | |||
607 | sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); | 650 | sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); |
608 | if (!sbs) { | 651 | if (!sbs) { |
609 | result = -ENOMEM; | 652 | result = -ENOMEM; |
@@ -619,17 +662,24 @@ static int acpi_sbs_add(struct acpi_device *device) | |||
619 | device->driver_data = sbs; | 662 | device->driver_data = sbs; |
620 | 663 | ||
621 | result = acpi_charger_add(sbs); | 664 | result = acpi_charger_add(sbs); |
622 | if (result) | 665 | if (result && result != -ENODEV) |
623 | goto end; | 666 | goto end; |
624 | 667 | ||
625 | result = acpi_manager_get_info(sbs); | 668 | result = 0; |
626 | if (!result) { | 669 | |
627 | sbs->manager_present = 1; | 670 | if (!sbs_manager_broken) { |
628 | for (id = 0; id < MAX_SBS_BAT; ++id) | 671 | result = acpi_manager_get_info(sbs); |
629 | if ((sbs->batteries_supported & (1 << id))) | 672 | if (!result) { |
630 | acpi_battery_add(sbs, id); | 673 | sbs->manager_present = 0; |
631 | } else | 674 | for (id = 0; id < MAX_SBS_BAT; ++id) |
675 | if ((sbs->batteries_supported & (1 << id))) | ||
676 | acpi_battery_add(sbs, id); | ||
677 | } | ||
678 | } | ||
679 | |||
680 | if (!sbs->manager_present) | ||
632 | acpi_battery_add(sbs, 0); | 681 | acpi_battery_add(sbs, 0); |
682 | |||
633 | acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); | 683 | acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); |
634 | end: | 684 | end: |
635 | if (result) | 685 | if (result) |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3bf7764659a4..ae44d8654c82 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -130,7 +130,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, | |||
130 | list_for_each_entry(id, &acpi_dev->pnp.ids, list) { | 130 | list_for_each_entry(id, &acpi_dev->pnp.ids, list) { |
131 | count = snprintf(&modalias[len], size, "%s:", id->id); | 131 | count = snprintf(&modalias[len], size, "%s:", id->id); |
132 | if (count < 0) | 132 | if (count < 0) |
133 | return EINVAL; | 133 | return -EINVAL; |
134 | if (count >= size) | 134 | if (count >= size) |
135 | return -ENOMEM; | 135 | return -ENOMEM; |
136 | len += count; | 136 | len += count; |
@@ -2189,6 +2189,9 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
2189 | ok: | 2189 | ok: |
2190 | list_for_each_entry(child, &device->children, node) | 2190 | list_for_each_entry(child, &device->children, node) |
2191 | acpi_bus_attach(child); | 2191 | acpi_bus_attach(child); |
2192 | |||
2193 | if (device->handler && device->handler->hotplug.notify_online) | ||
2194 | device->handler->hotplug.notify_online(device); | ||
2192 | } | 2195 | } |
2193 | 2196 | ||
2194 | /** | 2197 | /** |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 54da4a3fe65e..05a31b573fc3 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/dmi.h> | 15 | #include <linux/dmi.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/interrupt.h> | ||
17 | #include <linux/suspend.h> | 18 | #include <linux/suspend.h> |
18 | #include <linux/reboot.h> | 19 | #include <linux/reboot.h> |
19 | #include <linux/acpi.h> | 20 | #include <linux/acpi.h> |
@@ -626,6 +627,19 @@ static int acpi_freeze_begin(void) | |||
626 | return 0; | 627 | return 0; |
627 | } | 628 | } |
628 | 629 | ||
630 | static int acpi_freeze_prepare(void) | ||
631 | { | ||
632 | acpi_enable_all_wakeup_gpes(); | ||
633 | enable_irq_wake(acpi_gbl_FADT.sci_interrupt); | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static void acpi_freeze_restore(void) | ||
638 | { | ||
639 | disable_irq_wake(acpi_gbl_FADT.sci_interrupt); | ||
640 | acpi_enable_all_runtime_gpes(); | ||
641 | } | ||
642 | |||
629 | static void acpi_freeze_end(void) | 643 | static void acpi_freeze_end(void) |
630 | { | 644 | { |
631 | acpi_scan_lock_release(); | 645 | acpi_scan_lock_release(); |
@@ -633,6 +647,8 @@ static void acpi_freeze_end(void) | |||
633 | 647 | ||
634 | static const struct platform_freeze_ops acpi_freeze_ops = { | 648 | static const struct platform_freeze_ops acpi_freeze_ops = { |
635 | .begin = acpi_freeze_begin, | 649 | .begin = acpi_freeze_begin, |
650 | .prepare = acpi_freeze_prepare, | ||
651 | .restore = acpi_freeze_restore, | ||
636 | .end = acpi_freeze_end, | 652 | .end = acpi_freeze_end, |
637 | }; | 653 | }; |
638 | 654 | ||
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 07c8c5a5ee95..834f35c4bf8d 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c | |||
@@ -661,7 +661,6 @@ EXPORT_SYMBOL(acpi_evaluate_dsm); | |||
661 | * @uuid: UUID of requested functions, should be 16 bytes at least | 661 | * @uuid: UUID of requested functions, should be 16 bytes at least |
662 | * @rev: revision number of requested functions | 662 | * @rev: revision number of requested functions |
663 | * @funcs: bitmap of requested functions | 663 | * @funcs: bitmap of requested functions |
664 | * @exclude: excluding special value, used to support i915 and nouveau | ||
665 | * | 664 | * |
666 | * Evaluate device's _DSM method to check whether it supports requested | 665 | * Evaluate device's _DSM method to check whether it supports requested |
667 | * functions. Currently only support 64 functions at maximum, should be | 666 | * functions. Currently only support 64 functions at maximum, should be |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index fcbda105616e..807a88a0f394 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -411,12 +411,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) | |||
411 | return 0; | 411 | return 0; |
412 | } | 412 | } |
413 | 413 | ||
414 | static int __init video_set_use_native_backlight(const struct dmi_system_id *d) | ||
415 | { | ||
416 | use_native_backlight_dmi = true; | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static int __init video_disable_native_backlight(const struct dmi_system_id *d) | 414 | static int __init video_disable_native_backlight(const struct dmi_system_id *d) |
421 | { | 415 | { |
422 | use_native_backlight_dmi = false; | 416 | use_native_backlight_dmi = false; |
@@ -467,265 +461,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
467 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), | 461 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), |
468 | }, | 462 | }, |
469 | }, | 463 | }, |
470 | { | ||
471 | .callback = video_set_use_native_backlight, | ||
472 | .ident = "ThinkPad X230", | ||
473 | .matches = { | ||
474 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
475 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"), | ||
476 | }, | ||
477 | }, | ||
478 | { | ||
479 | .callback = video_set_use_native_backlight, | ||
480 | .ident = "ThinkPad T430 and T430s", | ||
481 | .matches = { | ||
482 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
483 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), | ||
484 | }, | ||
485 | }, | ||
486 | { | ||
487 | .callback = video_set_use_native_backlight, | ||
488 | .ident = "ThinkPad T430", | ||
489 | .matches = { | ||
490 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
491 | DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), | ||
492 | }, | ||
493 | }, | ||
494 | { | ||
495 | .callback = video_set_use_native_backlight, | ||
496 | .ident = "ThinkPad T431s", | ||
497 | .matches = { | ||
498 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
499 | DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"), | ||
500 | }, | ||
501 | }, | ||
502 | { | ||
503 | .callback = video_set_use_native_backlight, | ||
504 | .ident = "ThinkPad Edge E530", | ||
505 | .matches = { | ||
506 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
507 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"), | ||
508 | }, | ||
509 | }, | ||
510 | { | ||
511 | .callback = video_set_use_native_backlight, | ||
512 | .ident = "ThinkPad Edge E530", | ||
513 | .matches = { | ||
514 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
515 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"), | ||
516 | }, | ||
517 | }, | ||
518 | { | ||
519 | .callback = video_set_use_native_backlight, | ||
520 | .ident = "ThinkPad Edge E530", | ||
521 | .matches = { | ||
522 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
523 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"), | ||
524 | }, | ||
525 | }, | ||
526 | { | ||
527 | .callback = video_set_use_native_backlight, | ||
528 | .ident = "ThinkPad W530", | ||
529 | .matches = { | ||
530 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
531 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"), | ||
532 | }, | ||
533 | }, | ||
534 | { | ||
535 | .callback = video_set_use_native_backlight, | ||
536 | .ident = "ThinkPad X1 Carbon", | ||
537 | .matches = { | ||
538 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
539 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"), | ||
540 | }, | ||
541 | }, | ||
542 | { | ||
543 | .callback = video_set_use_native_backlight, | ||
544 | .ident = "Lenovo Yoga 13", | ||
545 | .matches = { | ||
546 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
547 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), | ||
548 | }, | ||
549 | }, | ||
550 | { | ||
551 | .callback = video_set_use_native_backlight, | ||
552 | .ident = "Lenovo Yoga 2 11", | ||
553 | .matches = { | ||
554 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
555 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"), | ||
556 | }, | ||
557 | }, | ||
558 | { | ||
559 | .callback = video_set_use_native_backlight, | ||
560 | .ident = "Thinkpad Helix", | ||
561 | .matches = { | ||
562 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
563 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), | ||
564 | }, | ||
565 | }, | ||
566 | { | ||
567 | .callback = video_set_use_native_backlight, | ||
568 | .ident = "Dell Inspiron 7520", | ||
569 | .matches = { | ||
570 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
571 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), | ||
572 | }, | ||
573 | }, | ||
574 | { | ||
575 | .callback = video_set_use_native_backlight, | ||
576 | .ident = "Acer Aspire 5733Z", | ||
577 | .matches = { | ||
578 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
579 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"), | ||
580 | }, | ||
581 | }, | ||
582 | { | ||
583 | .callback = video_set_use_native_backlight, | ||
584 | .ident = "Acer Aspire 5742G", | ||
585 | .matches = { | ||
586 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
587 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"), | ||
588 | }, | ||
589 | }, | ||
590 | { | ||
591 | .callback = video_set_use_native_backlight, | ||
592 | .ident = "Acer Aspire V5-171", | ||
593 | .matches = { | ||
594 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
595 | DMI_MATCH(DMI_PRODUCT_NAME, "V5-171"), | ||
596 | }, | ||
597 | }, | ||
598 | { | ||
599 | .callback = video_set_use_native_backlight, | ||
600 | .ident = "Acer Aspire V5-431", | ||
601 | .matches = { | ||
602 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
603 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"), | ||
604 | }, | ||
605 | }, | ||
606 | { | ||
607 | .callback = video_set_use_native_backlight, | ||
608 | .ident = "Acer Aspire V5-471G", | ||
609 | .matches = { | ||
610 | DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), | ||
611 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"), | ||
612 | }, | ||
613 | }, | ||
614 | { | ||
615 | .callback = video_set_use_native_backlight, | ||
616 | .ident = "Acer TravelMate B113", | ||
617 | .matches = { | ||
618 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
619 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"), | ||
620 | }, | ||
621 | }, | ||
622 | { | ||
623 | .callback = video_set_use_native_backlight, | ||
624 | .ident = "Acer Aspire V5-572G", | ||
625 | .matches = { | ||
626 | DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), | ||
627 | DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"), | ||
628 | }, | ||
629 | }, | ||
630 | { | ||
631 | .callback = video_set_use_native_backlight, | ||
632 | .ident = "Acer Aspire V5-573G", | ||
633 | .matches = { | ||
634 | DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), | ||
635 | DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"), | ||
636 | }, | ||
637 | }, | ||
638 | { | ||
639 | .callback = video_set_use_native_backlight, | ||
640 | .ident = "ASUS Zenbook Prime UX31A", | ||
641 | .matches = { | ||
642 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
643 | DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"), | ||
644 | }, | ||
645 | }, | ||
646 | { | ||
647 | .callback = video_set_use_native_backlight, | ||
648 | .ident = "HP ProBook 4340s", | ||
649 | .matches = { | ||
650 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
651 | DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"), | ||
652 | }, | ||
653 | }, | ||
654 | { | ||
655 | .callback = video_set_use_native_backlight, | ||
656 | .ident = "HP ProBook 4540s", | ||
657 | .matches = { | ||
658 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
659 | DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"), | ||
660 | }, | ||
661 | }, | ||
662 | { | ||
663 | .callback = video_set_use_native_backlight, | ||
664 | .ident = "HP ProBook 2013 models", | ||
665 | .matches = { | ||
666 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
667 | DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), | ||
668 | DMI_MATCH(DMI_PRODUCT_NAME, " G1"), | ||
669 | }, | ||
670 | }, | ||
671 | { | ||
672 | .callback = video_set_use_native_backlight, | ||
673 | .ident = "HP EliteBook 2013 models", | ||
674 | .matches = { | ||
675 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
676 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), | ||
677 | DMI_MATCH(DMI_PRODUCT_NAME, " G1"), | ||
678 | }, | ||
679 | }, | ||
680 | { | ||
681 | .callback = video_set_use_native_backlight, | ||
682 | .ident = "HP EliteBook 2014 models", | ||
683 | .matches = { | ||
684 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
685 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), | ||
686 | DMI_MATCH(DMI_PRODUCT_NAME, " G2"), | ||
687 | }, | ||
688 | }, | ||
689 | { | ||
690 | .callback = video_set_use_native_backlight, | ||
691 | .ident = "HP ZBook 14", | ||
692 | .matches = { | ||
693 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
694 | DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), | ||
695 | }, | ||
696 | }, | ||
697 | { | ||
698 | .callback = video_set_use_native_backlight, | ||
699 | .ident = "HP ZBook 15", | ||
700 | .matches = { | ||
701 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
702 | DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), | ||
703 | }, | ||
704 | }, | ||
705 | { | ||
706 | .callback = video_set_use_native_backlight, | ||
707 | .ident = "HP ZBook 17", | ||
708 | .matches = { | ||
709 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
710 | DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), | ||
711 | }, | ||
712 | }, | ||
713 | { | ||
714 | .callback = video_set_use_native_backlight, | ||
715 | .ident = "HP EliteBook 8470p", | ||
716 | .matches = { | ||
717 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
718 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8470p"), | ||
719 | }, | ||
720 | }, | ||
721 | { | ||
722 | .callback = video_set_use_native_backlight, | ||
723 | .ident = "HP EliteBook 8780w", | ||
724 | .matches = { | ||
725 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
726 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), | ||
727 | }, | ||
728 | }, | ||
729 | 464 | ||
730 | /* | 465 | /* |
731 | * These models have a working acpi_video backlight control, and using | 466 | * These models have a working acpi_video backlight control, and using |
@@ -750,6 +485,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
750 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), | 485 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), |
751 | }, | 486 | }, |
752 | }, | 487 | }, |
488 | { | ||
489 | .callback = video_disable_native_backlight, | ||
490 | .ident = "ThinkPad X201s", | ||
491 | .matches = { | ||
492 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
493 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), | ||
494 | }, | ||
495 | }, | ||
753 | 496 | ||
754 | /* The native backlight controls do not work on some older machines */ | 497 | /* The native backlight controls do not work on some older machines */ |
755 | { | 498 | { |
@@ -1411,6 +1154,23 @@ acpi_video_device_bind(struct acpi_video_bus *video, | |||
1411 | } | 1154 | } |
1412 | } | 1155 | } |
1413 | 1156 | ||
1157 | static bool acpi_video_device_in_dod(struct acpi_video_device *device) | ||
1158 | { | ||
1159 | struct acpi_video_bus *video = device->video; | ||
1160 | int i; | ||
1161 | |||
1162 | /* If we have a broken _DOD, no need to test */ | ||
1163 | if (!video->attached_count) | ||
1164 | return true; | ||
1165 | |||
1166 | for (i = 0; i < video->attached_count; i++) { | ||
1167 | if (video->attached_array[i].bind_info == device) | ||
1168 | return true; | ||
1169 | } | ||
1170 | |||
1171 | return false; | ||
1172 | } | ||
1173 | |||
1414 | /* | 1174 | /* |
1415 | * Arg: | 1175 | * Arg: |
1416 | * video : video bus device | 1176 | * video : video bus device |
@@ -1850,6 +1610,15 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) | |||
1850 | static int count; | 1610 | static int count; |
1851 | char *name; | 1611 | char *name; |
1852 | 1612 | ||
1613 | /* | ||
1614 | * Do not create backlight device for video output | ||
1615 | * device that is not in the enumerated list. | ||
1616 | */ | ||
1617 | if (!acpi_video_device_in_dod(device)) { | ||
1618 | dev_dbg(&device->dev->dev, "not in _DOD list, ignore\n"); | ||
1619 | return; | ||
1620 | } | ||
1621 | |||
1853 | result = acpi_video_init_brightness(device); | 1622 | result = acpi_video_init_brightness(device); |
1854 | if (result) | 1623 | if (result) |
1855 | return; | 1624 | return; |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c42feb2bacd0..27c43499977a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -174,6 +174,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { | |||
174 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), | 174 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), |
175 | }, | 175 | }, |
176 | }, | 176 | }, |
177 | { | ||
178 | .callback = video_detect_force_vendor, | ||
179 | .ident = "Lenovo IdeaPad Z570", | ||
180 | .matches = { | ||
181 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
182 | DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), | ||
183 | }, | ||
184 | }, | ||
177 | { }, | 185 | { }, |
178 | }; | 186 | }; |
179 | 187 | ||
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 3cf61a127ee5..47bbdc1b5be3 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/pm.h> | 16 | #include <linux/pm.h> |
17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
18 | #include <linux/pm_domain.h> | ||
18 | #include <linux/amba/bus.h> | 19 | #include <linux/amba/bus.h> |
19 | #include <linux/sizes.h> | 20 | #include <linux/sizes.h> |
20 | 21 | ||
@@ -182,9 +183,15 @@ static int amba_probe(struct device *dev) | |||
182 | int ret; | 183 | int ret; |
183 | 184 | ||
184 | do { | 185 | do { |
186 | ret = dev_pm_domain_attach(dev, true); | ||
187 | if (ret == -EPROBE_DEFER) | ||
188 | break; | ||
189 | |||
185 | ret = amba_get_enable_pclk(pcdev); | 190 | ret = amba_get_enable_pclk(pcdev); |
186 | if (ret) | 191 | if (ret) { |
192 | dev_pm_domain_detach(dev, true); | ||
187 | break; | 193 | break; |
194 | } | ||
188 | 195 | ||
189 | pm_runtime_get_noresume(dev); | 196 | pm_runtime_get_noresume(dev); |
190 | pm_runtime_set_active(dev); | 197 | pm_runtime_set_active(dev); |
@@ -199,6 +206,7 @@ static int amba_probe(struct device *dev) | |||
199 | pm_runtime_put_noidle(dev); | 206 | pm_runtime_put_noidle(dev); |
200 | 207 | ||
201 | amba_put_disable_pclk(pcdev); | 208 | amba_put_disable_pclk(pcdev); |
209 | dev_pm_domain_detach(dev, true); | ||
202 | } while (0); | 210 | } while (0); |
203 | 211 | ||
204 | return ret; | 212 | return ret; |
@@ -220,6 +228,7 @@ static int amba_remove(struct device *dev) | |||
220 | pm_runtime_put_noidle(dev); | 228 | pm_runtime_put_noidle(dev); |
221 | 229 | ||
222 | amba_put_disable_pclk(pcdev); | 230 | amba_put_disable_pclk(pcdev); |
231 | dev_pm_domain_detach(dev, true); | ||
223 | 232 | ||
224 | return ret; | 233 | return ret; |
225 | } | 234 | } |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index ab4f4ce02722..b2afc29403f9 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/pm_runtime.h> | 23 | #include <linux/pm_runtime.h> |
24 | #include <linux/pm_domain.h> | ||
24 | #include <linux/idr.h> | 25 | #include <linux/idr.h> |
25 | #include <linux/acpi.h> | 26 | #include <linux/acpi.h> |
26 | #include <linux/clk/clk-conf.h> | 27 | #include <linux/clk/clk-conf.h> |
@@ -506,11 +507,12 @@ static int platform_drv_probe(struct device *_dev) | |||
506 | if (ret < 0) | 507 | if (ret < 0) |
507 | return ret; | 508 | return ret; |
508 | 509 | ||
509 | acpi_dev_pm_attach(_dev, true); | 510 | ret = dev_pm_domain_attach(_dev, true); |
510 | 511 | if (ret != -EPROBE_DEFER) { | |
511 | ret = drv->probe(dev); | 512 | ret = drv->probe(dev); |
512 | if (ret) | 513 | if (ret) |
513 | acpi_dev_pm_detach(_dev, true); | 514 | dev_pm_domain_detach(_dev, true); |
515 | } | ||
514 | 516 | ||
515 | if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { | 517 | if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { |
516 | dev_warn(_dev, "probe deferral not supported\n"); | 518 | dev_warn(_dev, "probe deferral not supported\n"); |
@@ -532,7 +534,7 @@ static int platform_drv_remove(struct device *_dev) | |||
532 | int ret; | 534 | int ret; |
533 | 535 | ||
534 | ret = drv->remove(dev); | 536 | ret = drv->remove(dev); |
535 | acpi_dev_pm_detach(_dev, true); | 537 | dev_pm_domain_detach(_dev, true); |
536 | 538 | ||
537 | return ret; | 539 | return ret; |
538 | } | 540 | } |
@@ -543,7 +545,7 @@ static void platform_drv_shutdown(struct device *_dev) | |||
543 | struct platform_device *dev = to_platform_device(_dev); | 545 | struct platform_device *dev = to_platform_device(_dev); |
544 | 546 | ||
545 | drv->shutdown(dev); | 547 | drv->shutdown(dev); |
546 | acpi_dev_pm_detach(_dev, true); | 548 | dev_pm_domain_detach(_dev, true); |
547 | } | 549 | } |
548 | 550 | ||
549 | /** | 551 | /** |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index df2e5eeaeb05..b0f138806bbc 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/pm_clock.h> | 13 | #include <linux/pm_clock.h> |
14 | #include <linux/acpi.h> | ||
15 | #include <linux/pm_domain.h> | ||
14 | 16 | ||
15 | /** | 17 | /** |
16 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. | 18 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. |
@@ -82,3 +84,53 @@ int dev_pm_put_subsys_data(struct device *dev) | |||
82 | return ret; | 84 | return ret; |
83 | } | 85 | } |
84 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | 86 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); |
87 | |||
88 | /** | ||
89 | * dev_pm_domain_attach - Attach a device to its PM domain. | ||
90 | * @dev: Device to attach. | ||
91 | * @power_on: Used to indicate whether we should power on the device. | ||
92 | * | ||
93 | * The @dev may only be attached to a single PM domain. By iterating through | ||
94 | * the available alternatives we try to find a valid PM domain for the device. | ||
95 | * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain | ||
96 | * should be assigned by the corresponding attach function. | ||
97 | * | ||
98 | * This function should typically be invoked from subsystem level code during | ||
99 | * the probe phase. Especially for those that holds devices which requires | ||
100 | * power management through PM domains. | ||
101 | * | ||
102 | * Callers must ensure proper synchronization of this function with power | ||
103 | * management callbacks. | ||
104 | * | ||
105 | * Returns 0 on successfully attached PM domain or negative error code. | ||
106 | */ | ||
107 | int dev_pm_domain_attach(struct device *dev, bool power_on) | ||
108 | { | ||
109 | int ret; | ||
110 | |||
111 | ret = acpi_dev_pm_attach(dev, power_on); | ||
112 | if (ret) | ||
113 | ret = genpd_dev_pm_attach(dev); | ||
114 | |||
115 | return ret; | ||
116 | } | ||
117 | EXPORT_SYMBOL_GPL(dev_pm_domain_attach); | ||
118 | |||
119 | /** | ||
120 | * dev_pm_domain_detach - Detach a device from its PM domain. | ||
121 | * @dev: Device to attach. | ||
122 | * @power_off: Used to indicate whether we should power off the device. | ||
123 | * | ||
124 | * This functions will reverse the actions from dev_pm_domain_attach() and thus | ||
125 | * try to detach the @dev from its PM domain. Typically it should be invoked | ||
126 | * from subsystem level code during the remove phase. | ||
127 | * | ||
128 | * Callers must ensure proper synchronization of this function with power | ||
129 | * management callbacks. | ||
130 | */ | ||
131 | void dev_pm_domain_detach(struct device *dev, bool power_off) | ||
132 | { | ||
133 | if (dev->pm_domain && dev->pm_domain->detach) | ||
134 | dev->pm_domain->detach(dev, power_off); | ||
135 | } | ||
136 | EXPORT_SYMBOL_GPL(dev_pm_domain_detach); | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index eee55c1e5fde..40bc2f4072cc 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/io.h> | 10 | #include <linux/io.h> |
11 | #include <linux/platform_device.h> | ||
11 | #include <linux/pm_runtime.h> | 12 | #include <linux/pm_runtime.h> |
12 | #include <linux/pm_domain.h> | 13 | #include <linux/pm_domain.h> |
13 | #include <linux/pm_qos.h> | 14 | #include <linux/pm_qos.h> |
@@ -25,10 +26,6 @@ | |||
25 | __routine = genpd->dev_ops.callback; \ | 26 | __routine = genpd->dev_ops.callback; \ |
26 | if (__routine) { \ | 27 | if (__routine) { \ |
27 | __ret = __routine(dev); \ | 28 | __ret = __routine(dev); \ |
28 | } else { \ | ||
29 | __routine = dev_gpd_data(dev)->ops.callback; \ | ||
30 | if (__routine) \ | ||
31 | __ret = __routine(dev); \ | ||
32 | } \ | 29 | } \ |
33 | __ret; \ | 30 | __ret; \ |
34 | }) | 31 | }) |
@@ -70,8 +67,6 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) | |||
70 | return genpd; | 67 | return genpd; |
71 | } | 68 | } |
72 | 69 | ||
73 | #ifdef CONFIG_PM | ||
74 | |||
75 | struct generic_pm_domain *dev_to_genpd(struct device *dev) | 70 | struct generic_pm_domain *dev_to_genpd(struct device *dev) |
76 | { | 71 | { |
77 | if (IS_ERR_OR_NULL(dev->pm_domain)) | 72 | if (IS_ERR_OR_NULL(dev->pm_domain)) |
@@ -147,13 +142,13 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) | |||
147 | { | 142 | { |
148 | s64 usecs64; | 143 | s64 usecs64; |
149 | 144 | ||
150 | if (!genpd->cpu_data) | 145 | if (!genpd->cpuidle_data) |
151 | return; | 146 | return; |
152 | 147 | ||
153 | usecs64 = genpd->power_on_latency_ns; | 148 | usecs64 = genpd->power_on_latency_ns; |
154 | do_div(usecs64, NSEC_PER_USEC); | 149 | do_div(usecs64, NSEC_PER_USEC); |
155 | usecs64 += genpd->cpu_data->saved_exit_latency; | 150 | usecs64 += genpd->cpuidle_data->saved_exit_latency; |
156 | genpd->cpu_data->idle_state->exit_latency = usecs64; | 151 | genpd->cpuidle_data->idle_state->exit_latency = usecs64; |
157 | } | 152 | } |
158 | 153 | ||
159 | /** | 154 | /** |
@@ -193,9 +188,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
193 | return 0; | 188 | return 0; |
194 | } | 189 | } |
195 | 190 | ||
196 | if (genpd->cpu_data) { | 191 | if (genpd->cpuidle_data) { |
197 | cpuidle_pause_and_lock(); | 192 | cpuidle_pause_and_lock(); |
198 | genpd->cpu_data->idle_state->disabled = true; | 193 | genpd->cpuidle_data->idle_state->disabled = true; |
199 | cpuidle_resume_and_unlock(); | 194 | cpuidle_resume_and_unlock(); |
200 | goto out; | 195 | goto out; |
201 | } | 196 | } |
@@ -285,8 +280,6 @@ int pm_genpd_name_poweron(const char *domain_name) | |||
285 | return genpd ? pm_genpd_poweron(genpd) : -EINVAL; | 280 | return genpd ? pm_genpd_poweron(genpd) : -EINVAL; |
286 | } | 281 | } |
287 | 282 | ||
288 | #endif /* CONFIG_PM */ | ||
289 | |||
290 | #ifdef CONFIG_PM_RUNTIME | 283 | #ifdef CONFIG_PM_RUNTIME |
291 | 284 | ||
292 | static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, | 285 | static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, |
@@ -430,7 +423,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | |||
430 | * Queue up the execution of pm_genpd_poweroff() unless it's already been done | 423 | * Queue up the execution of pm_genpd_poweroff() unless it's already been done |
431 | * before. | 424 | * before. |
432 | */ | 425 | */ |
433 | void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | 426 | static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) |
434 | { | 427 | { |
435 | queue_work(pm_wq, &genpd->power_off_work); | 428 | queue_work(pm_wq, &genpd->power_off_work); |
436 | } | 429 | } |
@@ -520,17 +513,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
520 | } | 513 | } |
521 | } | 514 | } |
522 | 515 | ||
523 | if (genpd->cpu_data) { | 516 | if (genpd->cpuidle_data) { |
524 | /* | 517 | /* |
525 | * If cpu_data is set, cpuidle should turn the domain off when | 518 | * If cpuidle_data is set, cpuidle should turn the domain off |
526 | * the CPU in it is idle. In that case we don't decrement the | 519 | * when the CPU in it is idle. In that case we don't decrement |
527 | * subdomain counts of the master domains, so that power is not | 520 | * the subdomain counts of the master domains, so that power is |
528 | * removed from the current domain prematurely as a result of | 521 | * not removed from the current domain prematurely as a result |
529 | * cutting off the masters' power. | 522 | * of cutting off the masters' power. |
530 | */ | 523 | */ |
531 | genpd->status = GPD_STATE_POWER_OFF; | 524 | genpd->status = GPD_STATE_POWER_OFF; |
532 | cpuidle_pause_and_lock(); | 525 | cpuidle_pause_and_lock(); |
533 | genpd->cpu_data->idle_state->disabled = false; | 526 | genpd->cpuidle_data->idle_state->disabled = false; |
534 | cpuidle_resume_and_unlock(); | 527 | cpuidle_resume_and_unlock(); |
535 | goto out; | 528 | goto out; |
536 | } | 529 | } |
@@ -619,8 +612,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
619 | if (IS_ERR(genpd)) | 612 | if (IS_ERR(genpd)) |
620 | return -EINVAL; | 613 | return -EINVAL; |
621 | 614 | ||
622 | might_sleep_if(!genpd->dev_irq_safe); | ||
623 | |||
624 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; | 615 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; |
625 | if (stop_ok && !stop_ok(dev)) | 616 | if (stop_ok && !stop_ok(dev)) |
626 | return -EBUSY; | 617 | return -EBUSY; |
@@ -665,8 +656,6 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
665 | if (IS_ERR(genpd)) | 656 | if (IS_ERR(genpd)) |
666 | return -EINVAL; | 657 | return -EINVAL; |
667 | 658 | ||
668 | might_sleep_if(!genpd->dev_irq_safe); | ||
669 | |||
670 | /* If power.irq_safe, the PM domain is never powered off. */ | 659 | /* If power.irq_safe, the PM domain is never powered off. */ |
671 | if (dev->power.irq_safe) | 660 | if (dev->power.irq_safe) |
672 | return genpd_start_dev_no_timing(genpd, dev); | 661 | return genpd_start_dev_no_timing(genpd, dev); |
@@ -733,6 +722,13 @@ void pm_genpd_poweroff_unused(void) | |||
733 | mutex_unlock(&gpd_list_lock); | 722 | mutex_unlock(&gpd_list_lock); |
734 | } | 723 | } |
735 | 724 | ||
725 | static int __init genpd_poweroff_unused(void) | ||
726 | { | ||
727 | pm_genpd_poweroff_unused(); | ||
728 | return 0; | ||
729 | } | ||
730 | late_initcall(genpd_poweroff_unused); | ||
731 | |||
736 | #else | 732 | #else |
737 | 733 | ||
738 | static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | 734 | static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, |
@@ -741,6 +737,9 @@ static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | |||
741 | return NOTIFY_DONE; | 737 | return NOTIFY_DONE; |
742 | } | 738 | } |
743 | 739 | ||
740 | static inline void | ||
741 | genpd_queue_power_off_work(struct generic_pm_domain *genpd) {} | ||
742 | |||
744 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 743 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
745 | 744 | ||
746 | #define pm_genpd_runtime_suspend NULL | 745 | #define pm_genpd_runtime_suspend NULL |
@@ -774,46 +773,6 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, | |||
774 | return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); | 773 | return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); |
775 | } | 774 | } |
776 | 775 | ||
777 | static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
778 | { | ||
779 | return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); | ||
780 | } | ||
781 | |||
782 | static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) | ||
783 | { | ||
784 | return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); | ||
785 | } | ||
786 | |||
787 | static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) | ||
788 | { | ||
789 | return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); | ||
790 | } | ||
791 | |||
792 | static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
793 | { | ||
794 | return GENPD_DEV_CALLBACK(genpd, int, resume, dev); | ||
795 | } | ||
796 | |||
797 | static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
798 | { | ||
799 | return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); | ||
800 | } | ||
801 | |||
802 | static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) | ||
803 | { | ||
804 | return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); | ||
805 | } | ||
806 | |||
807 | static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) | ||
808 | { | ||
809 | return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); | ||
810 | } | ||
811 | |||
812 | static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
813 | { | ||
814 | return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); | ||
815 | } | ||
816 | |||
817 | /** | 776 | /** |
818 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. | 777 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. |
819 | * @genpd: PM domain to power off, if possible. | 778 | * @genpd: PM domain to power off, if possible. |
@@ -995,7 +954,7 @@ static int pm_genpd_suspend(struct device *dev) | |||
995 | if (IS_ERR(genpd)) | 954 | if (IS_ERR(genpd)) |
996 | return -EINVAL; | 955 | return -EINVAL; |
997 | 956 | ||
998 | return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); | 957 | return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); |
999 | } | 958 | } |
1000 | 959 | ||
1001 | /** | 960 | /** |
@@ -1016,7 +975,7 @@ static int pm_genpd_suspend_late(struct device *dev) | |||
1016 | if (IS_ERR(genpd)) | 975 | if (IS_ERR(genpd)) |
1017 | return -EINVAL; | 976 | return -EINVAL; |
1018 | 977 | ||
1019 | return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); | 978 | return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); |
1020 | } | 979 | } |
1021 | 980 | ||
1022 | /** | 981 | /** |
@@ -1103,7 +1062,7 @@ static int pm_genpd_resume_early(struct device *dev) | |||
1103 | if (IS_ERR(genpd)) | 1062 | if (IS_ERR(genpd)) |
1104 | return -EINVAL; | 1063 | return -EINVAL; |
1105 | 1064 | ||
1106 | return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); | 1065 | return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); |
1107 | } | 1066 | } |
1108 | 1067 | ||
1109 | /** | 1068 | /** |
@@ -1124,7 +1083,7 @@ static int pm_genpd_resume(struct device *dev) | |||
1124 | if (IS_ERR(genpd)) | 1083 | if (IS_ERR(genpd)) |
1125 | return -EINVAL; | 1084 | return -EINVAL; |
1126 | 1085 | ||
1127 | return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); | 1086 | return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); |
1128 | } | 1087 | } |
1129 | 1088 | ||
1130 | /** | 1089 | /** |
@@ -1145,7 +1104,7 @@ static int pm_genpd_freeze(struct device *dev) | |||
1145 | if (IS_ERR(genpd)) | 1104 | if (IS_ERR(genpd)) |
1146 | return -EINVAL; | 1105 | return -EINVAL; |
1147 | 1106 | ||
1148 | return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); | 1107 | return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); |
1149 | } | 1108 | } |
1150 | 1109 | ||
1151 | /** | 1110 | /** |
@@ -1167,7 +1126,7 @@ static int pm_genpd_freeze_late(struct device *dev) | |||
1167 | if (IS_ERR(genpd)) | 1126 | if (IS_ERR(genpd)) |
1168 | return -EINVAL; | 1127 | return -EINVAL; |
1169 | 1128 | ||
1170 | return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); | 1129 | return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); |
1171 | } | 1130 | } |
1172 | 1131 | ||
1173 | /** | 1132 | /** |
@@ -1231,7 +1190,7 @@ static int pm_genpd_thaw_early(struct device *dev) | |||
1231 | if (IS_ERR(genpd)) | 1190 | if (IS_ERR(genpd)) |
1232 | return -EINVAL; | 1191 | return -EINVAL; |
1233 | 1192 | ||
1234 | return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); | 1193 | return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); |
1235 | } | 1194 | } |
1236 | 1195 | ||
1237 | /** | 1196 | /** |
@@ -1252,7 +1211,7 @@ static int pm_genpd_thaw(struct device *dev) | |||
1252 | if (IS_ERR(genpd)) | 1211 | if (IS_ERR(genpd)) |
1253 | return -EINVAL; | 1212 | return -EINVAL; |
1254 | 1213 | ||
1255 | return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); | 1214 | return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); |
1256 | } | 1215 | } |
1257 | 1216 | ||
1258 | /** | 1217 | /** |
@@ -1344,13 +1303,13 @@ static void pm_genpd_complete(struct device *dev) | |||
1344 | } | 1303 | } |
1345 | 1304 | ||
1346 | /** | 1305 | /** |
1347 | * pm_genpd_syscore_switch - Switch power during system core suspend or resume. | 1306 | * genpd_syscore_switch - Switch power during system core suspend or resume. |
1348 | * @dev: Device that normally is marked as "always on" to switch power for. | 1307 | * @dev: Device that normally is marked as "always on" to switch power for. |
1349 | * | 1308 | * |
1350 | * This routine may only be called during the system core (syscore) suspend or | 1309 | * This routine may only be called during the system core (syscore) suspend or |
1351 | * resume phase for devices whose "always on" flags are set. | 1310 | * resume phase for devices whose "always on" flags are set. |
1352 | */ | 1311 | */ |
1353 | void pm_genpd_syscore_switch(struct device *dev, bool suspend) | 1312 | static void genpd_syscore_switch(struct device *dev, bool suspend) |
1354 | { | 1313 | { |
1355 | struct generic_pm_domain *genpd; | 1314 | struct generic_pm_domain *genpd; |
1356 | 1315 | ||
@@ -1366,7 +1325,18 @@ void pm_genpd_syscore_switch(struct device *dev, bool suspend) | |||
1366 | genpd->suspended_count--; | 1325 | genpd->suspended_count--; |
1367 | } | 1326 | } |
1368 | } | 1327 | } |
1369 | EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); | 1328 | |
1329 | void pm_genpd_syscore_poweroff(struct device *dev) | ||
1330 | { | ||
1331 | genpd_syscore_switch(dev, true); | ||
1332 | } | ||
1333 | EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); | ||
1334 | |||
1335 | void pm_genpd_syscore_poweron(struct device *dev) | ||
1336 | { | ||
1337 | genpd_syscore_switch(dev, false); | ||
1338 | } | ||
1339 | EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); | ||
1370 | 1340 | ||
1371 | #else | 1341 | #else |
1372 | 1342 | ||
@@ -1466,6 +1436,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
1466 | 1436 | ||
1467 | spin_unlock_irq(&dev->power.lock); | 1437 | spin_unlock_irq(&dev->power.lock); |
1468 | 1438 | ||
1439 | if (genpd->attach_dev) | ||
1440 | genpd->attach_dev(dev); | ||
1441 | |||
1469 | mutex_lock(&gpd_data->lock); | 1442 | mutex_lock(&gpd_data->lock); |
1470 | gpd_data->base.dev = dev; | 1443 | gpd_data->base.dev = dev; |
1471 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | 1444 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); |
@@ -1484,39 +1457,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
1484 | } | 1457 | } |
1485 | 1458 | ||
1486 | /** | 1459 | /** |
1487 | * __pm_genpd_of_add_device - Add a device to an I/O PM domain. | ||
1488 | * @genpd_node: Device tree node pointer representing a PM domain to which the | ||
1489 | * the device is added to. | ||
1490 | * @dev: Device to be added. | ||
1491 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
1492 | */ | ||
1493 | int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, | ||
1494 | struct gpd_timing_data *td) | ||
1495 | { | ||
1496 | struct generic_pm_domain *genpd = NULL, *gpd; | ||
1497 | |||
1498 | dev_dbg(dev, "%s()\n", __func__); | ||
1499 | |||
1500 | if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) | ||
1501 | return -EINVAL; | ||
1502 | |||
1503 | mutex_lock(&gpd_list_lock); | ||
1504 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
1505 | if (gpd->of_node == genpd_node) { | ||
1506 | genpd = gpd; | ||
1507 | break; | ||
1508 | } | ||
1509 | } | ||
1510 | mutex_unlock(&gpd_list_lock); | ||
1511 | |||
1512 | if (!genpd) | ||
1513 | return -EINVAL; | ||
1514 | |||
1515 | return __pm_genpd_add_device(genpd, dev, td); | ||
1516 | } | ||
1517 | |||
1518 | |||
1519 | /** | ||
1520 | * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. | 1460 | * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. |
1521 | * @domain_name: Name of the PM domain to add the device to. | 1461 | * @domain_name: Name of the PM domain to add the device to. |
1522 | * @dev: Device to be added. | 1462 | * @dev: Device to be added. |
@@ -1558,6 +1498,9 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1558 | genpd->device_count--; | 1498 | genpd->device_count--; |
1559 | genpd->max_off_time_changed = true; | 1499 | genpd->max_off_time_changed = true; |
1560 | 1500 | ||
1501 | if (genpd->detach_dev) | ||
1502 | genpd->detach_dev(dev); | ||
1503 | |||
1561 | spin_lock_irq(&dev->power.lock); | 1504 | spin_lock_irq(&dev->power.lock); |
1562 | 1505 | ||
1563 | dev->pm_domain = NULL; | 1506 | dev->pm_domain = NULL; |
@@ -1744,112 +1687,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
1744 | } | 1687 | } |
1745 | 1688 | ||
1746 | /** | 1689 | /** |
1747 | * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. | ||
1748 | * @dev: Device to add the callbacks to. | ||
1749 | * @ops: Set of callbacks to add. | ||
1750 | * @td: Timing data to add to the device along with the callbacks (optional). | ||
1751 | * | ||
1752 | * Every call to this routine should be balanced with a call to | ||
1753 | * __pm_genpd_remove_callbacks() and they must not be nested. | ||
1754 | */ | ||
1755 | int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, | ||
1756 | struct gpd_timing_data *td) | ||
1757 | { | ||
1758 | struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; | ||
1759 | int ret = 0; | ||
1760 | |||
1761 | if (!(dev && ops)) | ||
1762 | return -EINVAL; | ||
1763 | |||
1764 | gpd_data_new = __pm_genpd_alloc_dev_data(dev); | ||
1765 | if (!gpd_data_new) | ||
1766 | return -ENOMEM; | ||
1767 | |||
1768 | pm_runtime_disable(dev); | ||
1769 | device_pm_lock(); | ||
1770 | |||
1771 | ret = dev_pm_get_subsys_data(dev); | ||
1772 | if (ret) | ||
1773 | goto out; | ||
1774 | |||
1775 | spin_lock_irq(&dev->power.lock); | ||
1776 | |||
1777 | if (dev->power.subsys_data->domain_data) { | ||
1778 | gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); | ||
1779 | } else { | ||
1780 | gpd_data = gpd_data_new; | ||
1781 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
1782 | } | ||
1783 | gpd_data->refcount++; | ||
1784 | gpd_data->ops = *ops; | ||
1785 | if (td) | ||
1786 | gpd_data->td = *td; | ||
1787 | |||
1788 | spin_unlock_irq(&dev->power.lock); | ||
1789 | |||
1790 | out: | ||
1791 | device_pm_unlock(); | ||
1792 | pm_runtime_enable(dev); | ||
1793 | |||
1794 | if (gpd_data != gpd_data_new) | ||
1795 | __pm_genpd_free_dev_data(dev, gpd_data_new); | ||
1796 | |||
1797 | return ret; | ||
1798 | } | ||
1799 | EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); | ||
1800 | |||
1801 | /** | ||
1802 | * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. | ||
1803 | * @dev: Device to remove the callbacks from. | ||
1804 | * @clear_td: If set, clear the device's timing data too. | ||
1805 | * | ||
1806 | * This routine can only be called after pm_genpd_add_callbacks(). | ||
1807 | */ | ||
1808 | int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) | ||
1809 | { | ||
1810 | struct generic_pm_domain_data *gpd_data = NULL; | ||
1811 | bool remove = false; | ||
1812 | int ret = 0; | ||
1813 | |||
1814 | if (!(dev && dev->power.subsys_data)) | ||
1815 | return -EINVAL; | ||
1816 | |||
1817 | pm_runtime_disable(dev); | ||
1818 | device_pm_lock(); | ||
1819 | |||
1820 | spin_lock_irq(&dev->power.lock); | ||
1821 | |||
1822 | if (dev->power.subsys_data->domain_data) { | ||
1823 | gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); | ||
1824 | gpd_data->ops = (struct gpd_dev_ops){ NULL }; | ||
1825 | if (clear_td) | ||
1826 | gpd_data->td = (struct gpd_timing_data){ 0 }; | ||
1827 | |||
1828 | if (--gpd_data->refcount == 0) { | ||
1829 | dev->power.subsys_data->domain_data = NULL; | ||
1830 | remove = true; | ||
1831 | } | ||
1832 | } else { | ||
1833 | ret = -EINVAL; | ||
1834 | } | ||
1835 | |||
1836 | spin_unlock_irq(&dev->power.lock); | ||
1837 | |||
1838 | device_pm_unlock(); | ||
1839 | pm_runtime_enable(dev); | ||
1840 | |||
1841 | if (ret) | ||
1842 | return ret; | ||
1843 | |||
1844 | dev_pm_put_subsys_data(dev); | ||
1845 | if (remove) | ||
1846 | __pm_genpd_free_dev_data(dev, gpd_data); | ||
1847 | |||
1848 | return 0; | ||
1849 | } | ||
1850 | EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); | ||
1851 | |||
1852 | /** | ||
1853 | * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. | 1690 | * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. |
1854 | * @genpd: PM domain to be connected with cpuidle. | 1691 | * @genpd: PM domain to be connected with cpuidle. |
1855 | * @state: cpuidle state this domain can disable/enable. | 1692 | * @state: cpuidle state this domain can disable/enable. |
@@ -1861,7 +1698,7 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); | |||
1861 | int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | 1698 | int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) |
1862 | { | 1699 | { |
1863 | struct cpuidle_driver *cpuidle_drv; | 1700 | struct cpuidle_driver *cpuidle_drv; |
1864 | struct gpd_cpu_data *cpu_data; | 1701 | struct gpd_cpuidle_data *cpuidle_data; |
1865 | struct cpuidle_state *idle_state; | 1702 | struct cpuidle_state *idle_state; |
1866 | int ret = 0; | 1703 | int ret = 0; |
1867 | 1704 | ||
@@ -1870,12 +1707,12 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | |||
1870 | 1707 | ||
1871 | genpd_acquire_lock(genpd); | 1708 | genpd_acquire_lock(genpd); |
1872 | 1709 | ||
1873 | if (genpd->cpu_data) { | 1710 | if (genpd->cpuidle_data) { |
1874 | ret = -EEXIST; | 1711 | ret = -EEXIST; |
1875 | goto out; | 1712 | goto out; |
1876 | } | 1713 | } |
1877 | cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); | 1714 | cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); |
1878 | if (!cpu_data) { | 1715 | if (!cpuidle_data) { |
1879 | ret = -ENOMEM; | 1716 | ret = -ENOMEM; |
1880 | goto out; | 1717 | goto out; |
1881 | } | 1718 | } |
@@ -1893,9 +1730,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | |||
1893 | ret = -EAGAIN; | 1730 | ret = -EAGAIN; |
1894 | goto err; | 1731 | goto err; |
1895 | } | 1732 | } |
1896 | cpu_data->idle_state = idle_state; | 1733 | cpuidle_data->idle_state = idle_state; |
1897 | cpu_data->saved_exit_latency = idle_state->exit_latency; | 1734 | cpuidle_data->saved_exit_latency = idle_state->exit_latency; |
1898 | genpd->cpu_data = cpu_data; | 1735 | genpd->cpuidle_data = cpuidle_data; |
1899 | genpd_recalc_cpu_exit_latency(genpd); | 1736 | genpd_recalc_cpu_exit_latency(genpd); |
1900 | 1737 | ||
1901 | out: | 1738 | out: |
@@ -1906,7 +1743,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | |||
1906 | cpuidle_driver_unref(); | 1743 | cpuidle_driver_unref(); |
1907 | 1744 | ||
1908 | err_drv: | 1745 | err_drv: |
1909 | kfree(cpu_data); | 1746 | kfree(cpuidle_data); |
1910 | goto out; | 1747 | goto out; |
1911 | } | 1748 | } |
1912 | 1749 | ||
@@ -1929,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state) | |||
1929 | */ | 1766 | */ |
1930 | int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) | 1767 | int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) |
1931 | { | 1768 | { |
1932 | struct gpd_cpu_data *cpu_data; | 1769 | struct gpd_cpuidle_data *cpuidle_data; |
1933 | struct cpuidle_state *idle_state; | 1770 | struct cpuidle_state *idle_state; |
1934 | int ret = 0; | 1771 | int ret = 0; |
1935 | 1772 | ||
@@ -1938,20 +1775,20 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) | |||
1938 | 1775 | ||
1939 | genpd_acquire_lock(genpd); | 1776 | genpd_acquire_lock(genpd); |
1940 | 1777 | ||
1941 | cpu_data = genpd->cpu_data; | 1778 | cpuidle_data = genpd->cpuidle_data; |
1942 | if (!cpu_data) { | 1779 | if (!cpuidle_data) { |
1943 | ret = -ENODEV; | 1780 | ret = -ENODEV; |
1944 | goto out; | 1781 | goto out; |
1945 | } | 1782 | } |
1946 | idle_state = cpu_data->idle_state; | 1783 | idle_state = cpuidle_data->idle_state; |
1947 | if (!idle_state->disabled) { | 1784 | if (!idle_state->disabled) { |
1948 | ret = -EAGAIN; | 1785 | ret = -EAGAIN; |
1949 | goto out; | 1786 | goto out; |
1950 | } | 1787 | } |
1951 | idle_state->exit_latency = cpu_data->saved_exit_latency; | 1788 | idle_state->exit_latency = cpuidle_data->saved_exit_latency; |
1952 | cpuidle_driver_unref(); | 1789 | cpuidle_driver_unref(); |
1953 | genpd->cpu_data = NULL; | 1790 | genpd->cpuidle_data = NULL; |
1954 | kfree(cpu_data); | 1791 | kfree(cpuidle_data); |
1955 | 1792 | ||
1956 | out: | 1793 | out: |
1957 | genpd_release_lock(genpd); | 1794 | genpd_release_lock(genpd); |
@@ -1970,17 +1807,13 @@ int pm_genpd_name_detach_cpuidle(const char *name) | |||
1970 | /* Default device callbacks for generic PM domains. */ | 1807 | /* Default device callbacks for generic PM domains. */ |
1971 | 1808 | ||
1972 | /** | 1809 | /** |
1973 | * pm_genpd_default_save_state - Default "save device state" for PM domians. | 1810 | * pm_genpd_default_save_state - Default "save device state" for PM domains. |
1974 | * @dev: Device to handle. | 1811 | * @dev: Device to handle. |
1975 | */ | 1812 | */ |
1976 | static int pm_genpd_default_save_state(struct device *dev) | 1813 | static int pm_genpd_default_save_state(struct device *dev) |
1977 | { | 1814 | { |
1978 | int (*cb)(struct device *__dev); | 1815 | int (*cb)(struct device *__dev); |
1979 | 1816 | ||
1980 | cb = dev_gpd_data(dev)->ops.save_state; | ||
1981 | if (cb) | ||
1982 | return cb(dev); | ||
1983 | |||
1984 | if (dev->type && dev->type->pm) | 1817 | if (dev->type && dev->type->pm) |
1985 | cb = dev->type->pm->runtime_suspend; | 1818 | cb = dev->type->pm->runtime_suspend; |
1986 | else if (dev->class && dev->class->pm) | 1819 | else if (dev->class && dev->class->pm) |
@@ -1997,17 +1830,13 @@ static int pm_genpd_default_save_state(struct device *dev) | |||
1997 | } | 1830 | } |
1998 | 1831 | ||
1999 | /** | 1832 | /** |
2000 | * pm_genpd_default_restore_state - Default PM domians "restore device state". | 1833 | * pm_genpd_default_restore_state - Default PM domains "restore device state". |
2001 | * @dev: Device to handle. | 1834 | * @dev: Device to handle. |
2002 | */ | 1835 | */ |
2003 | static int pm_genpd_default_restore_state(struct device *dev) | 1836 | static int pm_genpd_default_restore_state(struct device *dev) |
2004 | { | 1837 | { |
2005 | int (*cb)(struct device *__dev); | 1838 | int (*cb)(struct device *__dev); |
2006 | 1839 | ||
2007 | cb = dev_gpd_data(dev)->ops.restore_state; | ||
2008 | if (cb) | ||
2009 | return cb(dev); | ||
2010 | |||
2011 | if (dev->type && dev->type->pm) | 1840 | if (dev->type && dev->type->pm) |
2012 | cb = dev->type->pm->runtime_resume; | 1841 | cb = dev->type->pm->runtime_resume; |
2013 | else if (dev->class && dev->class->pm) | 1842 | else if (dev->class && dev->class->pm) |
@@ -2023,109 +1852,6 @@ static int pm_genpd_default_restore_state(struct device *dev) | |||
2023 | return cb ? cb(dev) : 0; | 1852 | return cb ? cb(dev) : 0; |
2024 | } | 1853 | } |
2025 | 1854 | ||
2026 | #ifdef CONFIG_PM_SLEEP | ||
2027 | |||
2028 | /** | ||
2029 | * pm_genpd_default_suspend - Default "device suspend" for PM domians. | ||
2030 | * @dev: Device to handle. | ||
2031 | */ | ||
2032 | static int pm_genpd_default_suspend(struct device *dev) | ||
2033 | { | ||
2034 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; | ||
2035 | |||
2036 | return cb ? cb(dev) : pm_generic_suspend(dev); | ||
2037 | } | ||
2038 | |||
2039 | /** | ||
2040 | * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. | ||
2041 | * @dev: Device to handle. | ||
2042 | */ | ||
2043 | static int pm_genpd_default_suspend_late(struct device *dev) | ||
2044 | { | ||
2045 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; | ||
2046 | |||
2047 | return cb ? cb(dev) : pm_generic_suspend_late(dev); | ||
2048 | } | ||
2049 | |||
2050 | /** | ||
2051 | * pm_genpd_default_resume_early - Default "early device resume" for PM domians. | ||
2052 | * @dev: Device to handle. | ||
2053 | */ | ||
2054 | static int pm_genpd_default_resume_early(struct device *dev) | ||
2055 | { | ||
2056 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; | ||
2057 | |||
2058 | return cb ? cb(dev) : pm_generic_resume_early(dev); | ||
2059 | } | ||
2060 | |||
2061 | /** | ||
2062 | * pm_genpd_default_resume - Default "device resume" for PM domians. | ||
2063 | * @dev: Device to handle. | ||
2064 | */ | ||
2065 | static int pm_genpd_default_resume(struct device *dev) | ||
2066 | { | ||
2067 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; | ||
2068 | |||
2069 | return cb ? cb(dev) : pm_generic_resume(dev); | ||
2070 | } | ||
2071 | |||
2072 | /** | ||
2073 | * pm_genpd_default_freeze - Default "device freeze" for PM domians. | ||
2074 | * @dev: Device to handle. | ||
2075 | */ | ||
2076 | static int pm_genpd_default_freeze(struct device *dev) | ||
2077 | { | ||
2078 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; | ||
2079 | |||
2080 | return cb ? cb(dev) : pm_generic_freeze(dev); | ||
2081 | } | ||
2082 | |||
2083 | /** | ||
2084 | * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. | ||
2085 | * @dev: Device to handle. | ||
2086 | */ | ||
2087 | static int pm_genpd_default_freeze_late(struct device *dev) | ||
2088 | { | ||
2089 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; | ||
2090 | |||
2091 | return cb ? cb(dev) : pm_generic_freeze_late(dev); | ||
2092 | } | ||
2093 | |||
2094 | /** | ||
2095 | * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. | ||
2096 | * @dev: Device to handle. | ||
2097 | */ | ||
2098 | static int pm_genpd_default_thaw_early(struct device *dev) | ||
2099 | { | ||
2100 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; | ||
2101 | |||
2102 | return cb ? cb(dev) : pm_generic_thaw_early(dev); | ||
2103 | } | ||
2104 | |||
2105 | /** | ||
2106 | * pm_genpd_default_thaw - Default "device thaw" for PM domians. | ||
2107 | * @dev: Device to handle. | ||
2108 | */ | ||
2109 | static int pm_genpd_default_thaw(struct device *dev) | ||
2110 | { | ||
2111 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; | ||
2112 | |||
2113 | return cb ? cb(dev) : pm_generic_thaw(dev); | ||
2114 | } | ||
2115 | |||
2116 | #else /* !CONFIG_PM_SLEEP */ | ||
2117 | |||
2118 | #define pm_genpd_default_suspend NULL | ||
2119 | #define pm_genpd_default_suspend_late NULL | ||
2120 | #define pm_genpd_default_resume_early NULL | ||
2121 | #define pm_genpd_default_resume NULL | ||
2122 | #define pm_genpd_default_freeze NULL | ||
2123 | #define pm_genpd_default_freeze_late NULL | ||
2124 | #define pm_genpd_default_thaw_early NULL | ||
2125 | #define pm_genpd_default_thaw NULL | ||
2126 | |||
2127 | #endif /* !CONFIG_PM_SLEEP */ | ||
2128 | |||
2129 | /** | 1855 | /** |
2130 | * pm_genpd_init - Initialize a generic I/O PM domain object. | 1856 | * pm_genpd_init - Initialize a generic I/O PM domain object. |
2131 | * @genpd: PM domain object to initialize. | 1857 | * @genpd: PM domain object to initialize. |
@@ -2177,15 +1903,452 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
2177 | genpd->domain.ops.complete = pm_genpd_complete; | 1903 | genpd->domain.ops.complete = pm_genpd_complete; |
2178 | genpd->dev_ops.save_state = pm_genpd_default_save_state; | 1904 | genpd->dev_ops.save_state = pm_genpd_default_save_state; |
2179 | genpd->dev_ops.restore_state = pm_genpd_default_restore_state; | 1905 | genpd->dev_ops.restore_state = pm_genpd_default_restore_state; |
2180 | genpd->dev_ops.suspend = pm_genpd_default_suspend; | ||
2181 | genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; | ||
2182 | genpd->dev_ops.resume_early = pm_genpd_default_resume_early; | ||
2183 | genpd->dev_ops.resume = pm_genpd_default_resume; | ||
2184 | genpd->dev_ops.freeze = pm_genpd_default_freeze; | ||
2185 | genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; | ||
2186 | genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; | ||
2187 | genpd->dev_ops.thaw = pm_genpd_default_thaw; | ||
2188 | mutex_lock(&gpd_list_lock); | 1906 | mutex_lock(&gpd_list_lock); |
2189 | list_add(&genpd->gpd_list_node, &gpd_list); | 1907 | list_add(&genpd->gpd_list_node, &gpd_list); |
2190 | mutex_unlock(&gpd_list_lock); | 1908 | mutex_unlock(&gpd_list_lock); |
2191 | } | 1909 | } |
1910 | |||
1911 | #ifdef CONFIG_PM_GENERIC_DOMAINS_OF | ||
1912 | /* | ||
1913 | * Device Tree based PM domain providers. | ||
1914 | * | ||
1915 | * The code below implements generic device tree based PM domain providers that | ||
1916 | * bind device tree nodes with generic PM domains registered in the system. | ||
1917 | * | ||
1918 | * Any driver that registers generic PM domains and needs to support binding of | ||
1919 | * devices to these domains is supposed to register a PM domain provider, which | ||
1920 | * maps a PM domain specifier retrieved from the device tree to a PM domain. | ||
1921 | * | ||
1922 | * Two simple mapping functions have been provided for convenience: | ||
1923 | * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. | ||
1924 | * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by | ||
1925 | * index. | ||
1926 | */ | ||
1927 | |||
1928 | /** | ||
1929 | * struct of_genpd_provider - PM domain provider registration structure | ||
1930 | * @link: Entry in global list of PM domain providers | ||
1931 | * @node: Pointer to device tree node of PM domain provider | ||
1932 | * @xlate: Provider-specific xlate callback mapping a set of specifier cells | ||
1933 | * into a PM domain. | ||
1934 | * @data: context pointer to be passed into @xlate callback | ||
1935 | */ | ||
1936 | struct of_genpd_provider { | ||
1937 | struct list_head link; | ||
1938 | struct device_node *node; | ||
1939 | genpd_xlate_t xlate; | ||
1940 | void *data; | ||
1941 | }; | ||
1942 | |||
1943 | /* List of registered PM domain providers. */ | ||
1944 | static LIST_HEAD(of_genpd_providers); | ||
1945 | /* Mutex to protect the list above. */ | ||
1946 | static DEFINE_MUTEX(of_genpd_mutex); | ||
1947 | |||
1948 | /** | ||
1949 | * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping | ||
1950 | * @genpdspec: OF phandle args to map into a PM domain | ||
1951 | * @data: xlate function private data - pointer to struct generic_pm_domain | ||
1952 | * | ||
1953 | * This is a generic xlate function that can be used to model PM domains that | ||
1954 | * have their own device tree nodes. The private data of xlate function needs | ||
1955 | * to be a valid pointer to struct generic_pm_domain. | ||
1956 | */ | ||
1957 | struct generic_pm_domain *__of_genpd_xlate_simple( | ||
1958 | struct of_phandle_args *genpdspec, | ||
1959 | void *data) | ||
1960 | { | ||
1961 | if (genpdspec->args_count != 0) | ||
1962 | return ERR_PTR(-EINVAL); | ||
1963 | return data; | ||
1964 | } | ||
1965 | EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); | ||
1966 | |||
1967 | /** | ||
1968 | * __of_genpd_xlate_onecell() - Xlate function using a single index. | ||
1969 | * @genpdspec: OF phandle args to map into a PM domain | ||
1970 | * @data: xlate function private data - pointer to struct genpd_onecell_data | ||
1971 | * | ||
1972 | * This is a generic xlate function that can be used to model simple PM domain | ||
1973 | * controllers that have one device tree node and provide multiple PM domains. | ||
1974 | * A single cell is used as an index into an array of PM domains specified in | ||
1975 | * the genpd_onecell_data struct when registering the provider. | ||
1976 | */ | ||
1977 | struct generic_pm_domain *__of_genpd_xlate_onecell( | ||
1978 | struct of_phandle_args *genpdspec, | ||
1979 | void *data) | ||
1980 | { | ||
1981 | struct genpd_onecell_data *genpd_data = data; | ||
1982 | unsigned int idx = genpdspec->args[0]; | ||
1983 | |||
1984 | if (genpdspec->args_count != 1) | ||
1985 | return ERR_PTR(-EINVAL); | ||
1986 | |||
1987 | if (idx >= genpd_data->num_domains) { | ||
1988 | pr_err("%s: invalid domain index %u\n", __func__, idx); | ||
1989 | return ERR_PTR(-EINVAL); | ||
1990 | } | ||
1991 | |||
1992 | if (!genpd_data->domains[idx]) | ||
1993 | return ERR_PTR(-ENOENT); | ||
1994 | |||
1995 | return genpd_data->domains[idx]; | ||
1996 | } | ||
1997 | EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); | ||
1998 | |||
1999 | /** | ||
2000 | * __of_genpd_add_provider() - Register a PM domain provider for a node | ||
2001 | * @np: Device node pointer associated with the PM domain provider. | ||
2002 | * @xlate: Callback for decoding PM domain from phandle arguments. | ||
2003 | * @data: Context pointer for @xlate callback. | ||
2004 | */ | ||
2005 | int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, | ||
2006 | void *data) | ||
2007 | { | ||
2008 | struct of_genpd_provider *cp; | ||
2009 | |||
2010 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
2011 | if (!cp) | ||
2012 | return -ENOMEM; | ||
2013 | |||
2014 | cp->node = of_node_get(np); | ||
2015 | cp->data = data; | ||
2016 | cp->xlate = xlate; | ||
2017 | |||
2018 | mutex_lock(&of_genpd_mutex); | ||
2019 | list_add(&cp->link, &of_genpd_providers); | ||
2020 | mutex_unlock(&of_genpd_mutex); | ||
2021 | pr_debug("Added domain provider from %s\n", np->full_name); | ||
2022 | |||
2023 | return 0; | ||
2024 | } | ||
2025 | EXPORT_SYMBOL_GPL(__of_genpd_add_provider); | ||
2026 | |||
2027 | /** | ||
2028 | * of_genpd_del_provider() - Remove a previously registered PM domain provider | ||
2029 | * @np: Device node pointer associated with the PM domain provider | ||
2030 | */ | ||
2031 | void of_genpd_del_provider(struct device_node *np) | ||
2032 | { | ||
2033 | struct of_genpd_provider *cp; | ||
2034 | |||
2035 | mutex_lock(&of_genpd_mutex); | ||
2036 | list_for_each_entry(cp, &of_genpd_providers, link) { | ||
2037 | if (cp->node == np) { | ||
2038 | list_del(&cp->link); | ||
2039 | of_node_put(cp->node); | ||
2040 | kfree(cp); | ||
2041 | break; | ||
2042 | } | ||
2043 | } | ||
2044 | mutex_unlock(&of_genpd_mutex); | ||
2045 | } | ||
2046 | EXPORT_SYMBOL_GPL(of_genpd_del_provider); | ||
2047 | |||
2048 | /** | ||
2049 | * of_genpd_get_from_provider() - Look-up PM domain | ||
2050 | * @genpdspec: OF phandle args to use for look-up | ||
2051 | * | ||
2052 | * Looks for a PM domain provider under the node specified by @genpdspec and if | ||
2053 | * found, uses xlate function of the provider to map phandle args to a PM | ||
2054 | * domain. | ||
2055 | * | ||
2056 | * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() | ||
2057 | * on failure. | ||
2058 | */ | ||
2059 | static struct generic_pm_domain *of_genpd_get_from_provider( | ||
2060 | struct of_phandle_args *genpdspec) | ||
2061 | { | ||
2062 | struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); | ||
2063 | struct of_genpd_provider *provider; | ||
2064 | |||
2065 | mutex_lock(&of_genpd_mutex); | ||
2066 | |||
2067 | /* Check if we have such a provider in our array */ | ||
2068 | list_for_each_entry(provider, &of_genpd_providers, link) { | ||
2069 | if (provider->node == genpdspec->np) | ||
2070 | genpd = provider->xlate(genpdspec, provider->data); | ||
2071 | if (!IS_ERR(genpd)) | ||
2072 | break; | ||
2073 | } | ||
2074 | |||
2075 | mutex_unlock(&of_genpd_mutex); | ||
2076 | |||
2077 | return genpd; | ||
2078 | } | ||
2079 | |||
2080 | /** | ||
2081 | * genpd_dev_pm_detach - Detach a device from its PM domain. | ||
2082 | * @dev: Device to attach. | ||
2083 | * @power_off: Currently not used | ||
2084 | * | ||
2085 | * Try to locate a corresponding generic PM domain, which the device was | ||
2086 | * attached to previously. If such is found, the device is detached from it. | ||
2087 | */ | ||
2088 | static void genpd_dev_pm_detach(struct device *dev, bool power_off) | ||
2089 | { | ||
2090 | struct generic_pm_domain *pd = NULL, *gpd; | ||
2091 | int ret = 0; | ||
2092 | |||
2093 | if (!dev->pm_domain) | ||
2094 | return; | ||
2095 | |||
2096 | mutex_lock(&gpd_list_lock); | ||
2097 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
2098 | if (&gpd->domain == dev->pm_domain) { | ||
2099 | pd = gpd; | ||
2100 | break; | ||
2101 | } | ||
2102 | } | ||
2103 | mutex_unlock(&gpd_list_lock); | ||
2104 | |||
2105 | if (!pd) | ||
2106 | return; | ||
2107 | |||
2108 | dev_dbg(dev, "removing from PM domain %s\n", pd->name); | ||
2109 | |||
2110 | while (1) { | ||
2111 | ret = pm_genpd_remove_device(pd, dev); | ||
2112 | if (ret != -EAGAIN) | ||
2113 | break; | ||
2114 | cond_resched(); | ||
2115 | } | ||
2116 | |||
2117 | if (ret < 0) { | ||
2118 | dev_err(dev, "failed to remove from PM domain %s: %d", | ||
2119 | pd->name, ret); | ||
2120 | return; | ||
2121 | } | ||
2122 | |||
2123 | /* Check if PM domain can be powered off after removing this device. */ | ||
2124 | genpd_queue_power_off_work(pd); | ||
2125 | } | ||
2126 | |||
2127 | /** | ||
2128 | * genpd_dev_pm_attach - Attach a device to its PM domain using DT. | ||
2129 | * @dev: Device to attach. | ||
2130 | * | ||
2131 | * Parse device's OF node to find a PM domain specifier. If such is found, | ||
2132 | * attaches the device to retrieved pm_domain ops. | ||
2133 | * | ||
2134 | * Both generic and legacy Samsung-specific DT bindings are supported to keep | ||
2135 | * backwards compatibility with existing DTBs. | ||
2136 | * | ||
2137 | * Returns 0 on successfully attached PM domain or negative error code. | ||
2138 | */ | ||
2139 | int genpd_dev_pm_attach(struct device *dev) | ||
2140 | { | ||
2141 | struct of_phandle_args pd_args; | ||
2142 | struct generic_pm_domain *pd; | ||
2143 | int ret; | ||
2144 | |||
2145 | if (!dev->of_node) | ||
2146 | return -ENODEV; | ||
2147 | |||
2148 | if (dev->pm_domain) | ||
2149 | return -EEXIST; | ||
2150 | |||
2151 | ret = of_parse_phandle_with_args(dev->of_node, "power-domains", | ||
2152 | "#power-domain-cells", 0, &pd_args); | ||
2153 | if (ret < 0) { | ||
2154 | if (ret != -ENOENT) | ||
2155 | return ret; | ||
2156 | |||
2157 | /* | ||
2158 | * Try legacy Samsung-specific bindings | ||
2159 | * (for backwards compatibility of DT ABI) | ||
2160 | */ | ||
2161 | pd_args.args_count = 0; | ||
2162 | pd_args.np = of_parse_phandle(dev->of_node, | ||
2163 | "samsung,power-domain", 0); | ||
2164 | if (!pd_args.np) | ||
2165 | return -ENOENT; | ||
2166 | } | ||
2167 | |||
2168 | pd = of_genpd_get_from_provider(&pd_args); | ||
2169 | if (IS_ERR(pd)) { | ||
2170 | dev_dbg(dev, "%s() failed to find PM domain: %ld\n", | ||
2171 | __func__, PTR_ERR(pd)); | ||
2172 | of_node_put(dev->of_node); | ||
2173 | return PTR_ERR(pd); | ||
2174 | } | ||
2175 | |||
2176 | dev_dbg(dev, "adding to PM domain %s\n", pd->name); | ||
2177 | |||
2178 | while (1) { | ||
2179 | ret = pm_genpd_add_device(pd, dev); | ||
2180 | if (ret != -EAGAIN) | ||
2181 | break; | ||
2182 | cond_resched(); | ||
2183 | } | ||
2184 | |||
2185 | if (ret < 0) { | ||
2186 | dev_err(dev, "failed to add to PM domain %s: %d", | ||
2187 | pd->name, ret); | ||
2188 | of_node_put(dev->of_node); | ||
2189 | return ret; | ||
2190 | } | ||
2191 | |||
2192 | dev->pm_domain->detach = genpd_dev_pm_detach; | ||
2193 | |||
2194 | return 0; | ||
2195 | } | ||
2196 | EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); | ||
2197 | #endif | ||
2198 | |||
2199 | |||
2200 | /*** debugfs support ***/ | ||
2201 | |||
2202 | #ifdef CONFIG_PM_ADVANCED_DEBUG | ||
2203 | #include <linux/pm.h> | ||
2204 | #include <linux/device.h> | ||
2205 | #include <linux/debugfs.h> | ||
2206 | #include <linux/seq_file.h> | ||
2207 | #include <linux/init.h> | ||
2208 | #include <linux/kobject.h> | ||
2209 | static struct dentry *pm_genpd_debugfs_dir; | ||
2210 | |||
2211 | /* | ||
2212 | * TODO: This function is a slightly modified version of rtpm_status_show | ||
2213 | * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME | ||
2214 | * are too loose to generalize it. | ||
2215 | */ | ||
2216 | #ifdef CONFIG_PM_RUNTIME | ||
2217 | static void rtpm_status_str(struct seq_file *s, struct device *dev) | ||
2218 | { | ||
2219 | static const char * const status_lookup[] = { | ||
2220 | [RPM_ACTIVE] = "active", | ||
2221 | [RPM_RESUMING] = "resuming", | ||
2222 | [RPM_SUSPENDED] = "suspended", | ||
2223 | [RPM_SUSPENDING] = "suspending" | ||
2224 | }; | ||
2225 | const char *p = ""; | ||
2226 | |||
2227 | if (dev->power.runtime_error) | ||
2228 | p = "error"; | ||
2229 | else if (dev->power.disable_depth) | ||
2230 | p = "unsupported"; | ||
2231 | else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) | ||
2232 | p = status_lookup[dev->power.runtime_status]; | ||
2233 | else | ||
2234 | WARN_ON(1); | ||
2235 | |||
2236 | seq_puts(s, p); | ||
2237 | } | ||
2238 | #else | ||
2239 | static void rtpm_status_str(struct seq_file *s, struct device *dev) | ||
2240 | { | ||
2241 | seq_puts(s, "active"); | ||
2242 | } | ||
2243 | #endif | ||
2244 | |||
2245 | static int pm_genpd_summary_one(struct seq_file *s, | ||
2246 | struct generic_pm_domain *gpd) | ||
2247 | { | ||
2248 | static const char * const status_lookup[] = { | ||
2249 | [GPD_STATE_ACTIVE] = "on", | ||
2250 | [GPD_STATE_WAIT_MASTER] = "wait-master", | ||
2251 | [GPD_STATE_BUSY] = "busy", | ||
2252 | [GPD_STATE_REPEAT] = "off-in-progress", | ||
2253 | [GPD_STATE_POWER_OFF] = "off" | ||
2254 | }; | ||
2255 | struct pm_domain_data *pm_data; | ||
2256 | const char *kobj_path; | ||
2257 | struct gpd_link *link; | ||
2258 | int ret; | ||
2259 | |||
2260 | ret = mutex_lock_interruptible(&gpd->lock); | ||
2261 | if (ret) | ||
2262 | return -ERESTARTSYS; | ||
2263 | |||
2264 | if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) | ||
2265 | goto exit; | ||
2266 | seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); | ||
2267 | |||
2268 | /* | ||
2269 | * Modifications on the list require holding locks on both | ||
2270 | * master and slave, so we are safe. | ||
2271 | * Also gpd->name is immutable. | ||
2272 | */ | ||
2273 | list_for_each_entry(link, &gpd->master_links, master_node) { | ||
2274 | seq_printf(s, "%s", link->slave->name); | ||
2275 | if (!list_is_last(&link->master_node, &gpd->master_links)) | ||
2276 | seq_puts(s, ", "); | ||
2277 | } | ||
2278 | |||
2279 | list_for_each_entry(pm_data, &gpd->dev_list, list_node) { | ||
2280 | kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); | ||
2281 | if (kobj_path == NULL) | ||
2282 | continue; | ||
2283 | |||
2284 | seq_printf(s, "\n %-50s ", kobj_path); | ||
2285 | rtpm_status_str(s, pm_data->dev); | ||
2286 | kfree(kobj_path); | ||
2287 | } | ||
2288 | |||
2289 | seq_puts(s, "\n"); | ||
2290 | exit: | ||
2291 | mutex_unlock(&gpd->lock); | ||
2292 | |||
2293 | return 0; | ||
2294 | } | ||
2295 | |||
2296 | static int pm_genpd_summary_show(struct seq_file *s, void *data) | ||
2297 | { | ||
2298 | struct generic_pm_domain *gpd; | ||
2299 | int ret = 0; | ||
2300 | |||
2301 | seq_puts(s, " domain status slaves\n"); | ||
2302 | seq_puts(s, " /device runtime status\n"); | ||
2303 | seq_puts(s, "----------------------------------------------------------------------\n"); | ||
2304 | |||
2305 | ret = mutex_lock_interruptible(&gpd_list_lock); | ||
2306 | if (ret) | ||
2307 | return -ERESTARTSYS; | ||
2308 | |||
2309 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
2310 | ret = pm_genpd_summary_one(s, gpd); | ||
2311 | if (ret) | ||
2312 | break; | ||
2313 | } | ||
2314 | mutex_unlock(&gpd_list_lock); | ||
2315 | |||
2316 | return ret; | ||
2317 | } | ||
2318 | |||
2319 | static int pm_genpd_summary_open(struct inode *inode, struct file *file) | ||
2320 | { | ||
2321 | return single_open(file, pm_genpd_summary_show, NULL); | ||
2322 | } | ||
2323 | |||
2324 | static const struct file_operations pm_genpd_summary_fops = { | ||
2325 | .open = pm_genpd_summary_open, | ||
2326 | .read = seq_read, | ||
2327 | .llseek = seq_lseek, | ||
2328 | .release = single_release, | ||
2329 | }; | ||
2330 | |||
2331 | static int __init pm_genpd_debug_init(void) | ||
2332 | { | ||
2333 | struct dentry *d; | ||
2334 | |||
2335 | pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); | ||
2336 | |||
2337 | if (!pm_genpd_debugfs_dir) | ||
2338 | return -ENOMEM; | ||
2339 | |||
2340 | d = debugfs_create_file("pm_genpd_summary", S_IRUGO, | ||
2341 | pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); | ||
2342 | if (!d) | ||
2343 | return -ENOMEM; | ||
2344 | |||
2345 | return 0; | ||
2346 | } | ||
2347 | late_initcall(pm_genpd_debug_init); | ||
2348 | |||
2349 | static void __exit pm_genpd_debug_exit(void) | ||
2350 | { | ||
2351 | debugfs_remove_recursive(pm_genpd_debugfs_dir); | ||
2352 | } | ||
2353 | __exitcall(pm_genpd_debug_exit); | ||
2354 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ | ||
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index a089e3bcdfbc..d88a62e104d4 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c | |||
@@ -42,7 +42,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data) | |||
42 | * default_stop_ok - Default PM domain governor routine for stopping devices. | 42 | * default_stop_ok - Default PM domain governor routine for stopping devices. |
43 | * @dev: Device to check. | 43 | * @dev: Device to check. |
44 | */ | 44 | */ |
45 | bool default_stop_ok(struct device *dev) | 45 | static bool default_stop_ok(struct device *dev) |
46 | { | 46 | { |
47 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | 47 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; |
48 | unsigned long flags; | 48 | unsigned long flags; |
@@ -229,10 +229,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain) | |||
229 | 229 | ||
230 | #else /* !CONFIG_PM_RUNTIME */ | 230 | #else /* !CONFIG_PM_RUNTIME */ |
231 | 231 | ||
232 | bool default_stop_ok(struct device *dev) | 232 | static inline bool default_stop_ok(struct device *dev) { return false; } |
233 | { | ||
234 | return false; | ||
235 | } | ||
236 | 233 | ||
237 | #define default_power_down_ok NULL | 234 | #define default_power_down_ok NULL |
238 | #define always_on_power_down_ok NULL | 235 | #define always_on_power_down_ok NULL |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index b67d9aef9fe4..44973196d3fd 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -540,7 +540,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) | |||
540 | * Call the "noirq" resume handlers for all devices in dpm_noirq_list and | 540 | * Call the "noirq" resume handlers for all devices in dpm_noirq_list and |
541 | * enable device drivers to receive interrupts. | 541 | * enable device drivers to receive interrupts. |
542 | */ | 542 | */ |
543 | static void dpm_resume_noirq(pm_message_t state) | 543 | void dpm_resume_noirq(pm_message_t state) |
544 | { | 544 | { |
545 | struct device *dev; | 545 | struct device *dev; |
546 | ktime_t starttime = ktime_get(); | 546 | ktime_t starttime = ktime_get(); |
@@ -662,7 +662,7 @@ static void async_resume_early(void *data, async_cookie_t cookie) | |||
662 | * dpm_resume_early - Execute "early resume" callbacks for all devices. | 662 | * dpm_resume_early - Execute "early resume" callbacks for all devices. |
663 | * @state: PM transition of the system being carried out. | 663 | * @state: PM transition of the system being carried out. |
664 | */ | 664 | */ |
665 | static void dpm_resume_early(pm_message_t state) | 665 | void dpm_resume_early(pm_message_t state) |
666 | { | 666 | { |
667 | struct device *dev; | 667 | struct device *dev; |
668 | ktime_t starttime = ktime_get(); | 668 | ktime_t starttime = ktime_get(); |
@@ -1093,7 +1093,7 @@ static int device_suspend_noirq(struct device *dev) | |||
1093 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend | 1093 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend |
1094 | * handlers for all non-sysdev devices. | 1094 | * handlers for all non-sysdev devices. |
1095 | */ | 1095 | */ |
1096 | static int dpm_suspend_noirq(pm_message_t state) | 1096 | int dpm_suspend_noirq(pm_message_t state) |
1097 | { | 1097 | { |
1098 | ktime_t starttime = ktime_get(); | 1098 | ktime_t starttime = ktime_get(); |
1099 | int error = 0; | 1099 | int error = 0; |
@@ -1232,7 +1232,7 @@ static int device_suspend_late(struct device *dev) | |||
1232 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. | 1232 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. |
1233 | * @state: PM transition of the system being carried out. | 1233 | * @state: PM transition of the system being carried out. |
1234 | */ | 1234 | */ |
1235 | static int dpm_suspend_late(pm_message_t state) | 1235 | int dpm_suspend_late(pm_message_t state) |
1236 | { | 1236 | { |
1237 | ktime_t starttime = ktime_get(); | 1237 | ktime_t starttime = ktime_get(); |
1238 | int error = 0; | 1238 | int error = 0; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 95b181d1ca6d..a9d26ed11bf4 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -92,9 +92,6 @@ | |||
92 | * wakeup_count - Report the number of wakeup events related to the device | 92 | * wakeup_count - Report the number of wakeup events related to the device |
93 | */ | 93 | */ |
94 | 94 | ||
95 | static const char enabled[] = "enabled"; | ||
96 | static const char disabled[] = "disabled"; | ||
97 | |||
98 | const char power_group_name[] = "power"; | 95 | const char power_group_name[] = "power"; |
99 | EXPORT_SYMBOL_GPL(power_group_name); | 96 | EXPORT_SYMBOL_GPL(power_group_name); |
100 | 97 | ||
@@ -336,11 +333,14 @@ static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, | |||
336 | #endif /* CONFIG_PM_RUNTIME */ | 333 | #endif /* CONFIG_PM_RUNTIME */ |
337 | 334 | ||
338 | #ifdef CONFIG_PM_SLEEP | 335 | #ifdef CONFIG_PM_SLEEP |
336 | static const char _enabled[] = "enabled"; | ||
337 | static const char _disabled[] = "disabled"; | ||
338 | |||
339 | static ssize_t | 339 | static ssize_t |
340 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 340 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
341 | { | 341 | { |
342 | return sprintf(buf, "%s\n", device_can_wakeup(dev) | 342 | return sprintf(buf, "%s\n", device_can_wakeup(dev) |
343 | ? (device_may_wakeup(dev) ? enabled : disabled) | 343 | ? (device_may_wakeup(dev) ? _enabled : _disabled) |
344 | : ""); | 344 | : ""); |
345 | } | 345 | } |
346 | 346 | ||
@@ -357,11 +357,11 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
357 | cp = memchr(buf, '\n', n); | 357 | cp = memchr(buf, '\n', n); |
358 | if (cp) | 358 | if (cp) |
359 | len = cp - buf; | 359 | len = cp - buf; |
360 | if (len == sizeof enabled - 1 | 360 | if (len == sizeof _enabled - 1 |
361 | && strncmp(buf, enabled, sizeof enabled - 1) == 0) | 361 | && strncmp(buf, _enabled, sizeof _enabled - 1) == 0) |
362 | device_set_wakeup_enable(dev, 1); | 362 | device_set_wakeup_enable(dev, 1); |
363 | else if (len == sizeof disabled - 1 | 363 | else if (len == sizeof _disabled - 1 |
364 | && strncmp(buf, disabled, sizeof disabled - 1) == 0) | 364 | && strncmp(buf, _disabled, sizeof _disabled - 1) == 0) |
365 | device_set_wakeup_enable(dev, 0); | 365 | device_set_wakeup_enable(dev, 0); |
366 | else | 366 | else |
367 | return -EINVAL; | 367 | return -EINVAL; |
@@ -570,7 +570,8 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr, | |||
570 | char *buf) | 570 | char *buf) |
571 | { | 571 | { |
572 | return sprintf(buf, "%s\n", | 572 | return sprintf(buf, "%s\n", |
573 | device_async_suspend_enabled(dev) ? enabled : disabled); | 573 | device_async_suspend_enabled(dev) ? |
574 | _enabled : _disabled); | ||
574 | } | 575 | } |
575 | 576 | ||
576 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, | 577 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, |
@@ -582,9 +583,10 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, | |||
582 | cp = memchr(buf, '\n', n); | 583 | cp = memchr(buf, '\n', n); |
583 | if (cp) | 584 | if (cp) |
584 | len = cp - buf; | 585 | len = cp - buf; |
585 | if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) | 586 | if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0) |
586 | device_enable_async_suspend(dev); | 587 | device_enable_async_suspend(dev); |
587 | else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) | 588 | else if (len == sizeof _disabled - 1 && |
589 | strncmp(buf, _disabled, len) == 0) | ||
588 | device_disable_async_suspend(dev); | 590 | device_disable_async_suspend(dev); |
589 | else | 591 | else |
590 | return -EINVAL; | 592 | return -EINVAL; |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb1bd2ecad8b..c2744b30d5d9 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -24,6 +24,9 @@ | |||
24 | */ | 24 | */ |
25 | bool events_check_enabled __read_mostly; | 25 | bool events_check_enabled __read_mostly; |
26 | 26 | ||
27 | /* If set and the system is suspending, terminate the suspend. */ | ||
28 | static bool pm_abort_suspend __read_mostly; | ||
29 | |||
27 | /* | 30 | /* |
28 | * Combined counters of registered wakeup events and wakeup events in progress. | 31 | * Combined counters of registered wakeup events and wakeup events in progress. |
29 | * They need to be modified together atomically, so it's better to use one | 32 | * They need to be modified together atomically, so it's better to use one |
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void) | |||
719 | pm_print_active_wakeup_sources(); | 722 | pm_print_active_wakeup_sources(); |
720 | } | 723 | } |
721 | 724 | ||
722 | return ret; | 725 | return ret || pm_abort_suspend; |
726 | } | ||
727 | |||
728 | void pm_system_wakeup(void) | ||
729 | { | ||
730 | pm_abort_suspend = true; | ||
731 | freeze_wake(); | ||
732 | } | ||
733 | |||
734 | void pm_wakeup_clear(void) | ||
735 | { | ||
736 | pm_abort_suspend = false; | ||
723 | } | 737 | } |
724 | 738 | ||
725 | /** | 739 | /** |
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index dbb8350ea8dc..8d98a329f6ea 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/syscore_ops.h> | 9 | #include <linux/syscore_ops.h> |
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/suspend.h> |
13 | #include <trace/events/power.h> | 13 | #include <trace/events/power.h> |
14 | 14 | ||
15 | static LIST_HEAD(syscore_ops_list); | 15 | static LIST_HEAD(syscore_ops_list); |
@@ -54,9 +54,8 @@ int syscore_suspend(void) | |||
54 | pr_debug("Checking wakeup interrupts\n"); | 54 | pr_debug("Checking wakeup interrupts\n"); |
55 | 55 | ||
56 | /* Return error code if there are any wakeup interrupts pending. */ | 56 | /* Return error code if there are any wakeup interrupts pending. */ |
57 | ret = check_wakeup_irqs(); | 57 | if (pm_wakeup_pending()) |
58 | if (ret) | 58 | return -EBUSY; |
59 | return ret; | ||
60 | 59 | ||
61 | WARN_ONCE(!irqs_disabled(), | 60 | WARN_ONCE(!irqs_disabled(), |
62 | "Interrupts enabled before system core suspend.\n"); | 61 | "Interrupts enabled before system core suspend.\n"); |
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index 551e01061434..95254585db86 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h | |||
@@ -188,31 +188,31 @@ static struct l3_flagmux_data omap_l3_flagmux_clk3 = { | |||
188 | }; | 188 | }; |
189 | 189 | ||
190 | static struct l3_masters_data omap_l3_masters[] = { | 190 | static struct l3_masters_data omap_l3_masters[] = { |
191 | { 0x0 , "MPU"}, | 191 | { 0x00, "MPU"}, |
192 | { 0x10, "CS_ADP"}, | 192 | { 0x04, "CS_ADP"}, |
193 | { 0x14, "xxx"}, | 193 | { 0x05, "xxx"}, |
194 | { 0x20, "DSP"}, | 194 | { 0x08, "DSP"}, |
195 | { 0x30, "IVAHD"}, | 195 | { 0x0C, "IVAHD"}, |
196 | { 0x40, "ISS"}, | 196 | { 0x10, "ISS"}, |
197 | { 0x44, "DucatiM3"}, | 197 | { 0x11, "DucatiM3"}, |
198 | { 0x48, "FaceDetect"}, | 198 | { 0x12, "FaceDetect"}, |
199 | { 0x50, "SDMA_Rd"}, | 199 | { 0x14, "SDMA_Rd"}, |
200 | { 0x54, "SDMA_Wr"}, | 200 | { 0x15, "SDMA_Wr"}, |
201 | { 0x58, "xxx"}, | 201 | { 0x16, "xxx"}, |
202 | { 0x5C, "xxx"}, | 202 | { 0x17, "xxx"}, |
203 | { 0x60, "SGX"}, | 203 | { 0x18, "SGX"}, |
204 | { 0x70, "DSS"}, | 204 | { 0x1C, "DSS"}, |
205 | { 0x80, "C2C"}, | 205 | { 0x20, "C2C"}, |
206 | { 0x88, "xxx"}, | 206 | { 0x22, "xxx"}, |
207 | { 0x8C, "xxx"}, | 207 | { 0x23, "xxx"}, |
208 | { 0x90, "HSI"}, | 208 | { 0x24, "HSI"}, |
209 | { 0xA0, "MMC1"}, | 209 | { 0x28, "MMC1"}, |
210 | { 0xA4, "MMC2"}, | 210 | { 0x29, "MMC2"}, |
211 | { 0xA8, "MMC6"}, | 211 | { 0x2A, "MMC6"}, |
212 | { 0xB0, "UNIPRO1"}, | 212 | { 0x2C, "UNIPRO1"}, |
213 | { 0xC0, "USBHOSTHS"}, | 213 | { 0x30, "USBHOSTHS"}, |
214 | { 0xC4, "USBOTGHS"}, | 214 | { 0x31, "USBOTGHS"}, |
215 | { 0xC8, "USBHOSTFS"} | 215 | { 0x32, "USBHOSTFS"} |
216 | }; | 216 | }; |
217 | 217 | ||
218 | static struct l3_flagmux_data *omap_l3_flagmux[] = { | 218 | static struct l3_flagmux_data *omap_l3_flagmux[] = { |
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 0300c46ee247..32f7c1b36204 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c | |||
@@ -447,7 +447,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np, | |||
447 | int i; | 447 | int i; |
448 | 448 | ||
449 | num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); | 449 | num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); |
450 | if (num_parents <= 0 || num_parents > 1) | 450 | if (num_parents != 2) |
451 | return; | 451 | return; |
452 | 452 | ||
453 | for (i = 0; i < num_parents; ++i) { | 453 | for (i = 0; i < num_parents; ++i) { |
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index bac2ddf49d02..73a8d0ff530c 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c | |||
@@ -22,7 +22,7 @@ static struct clk_onecell_data clk_data = { | |||
22 | .clk_num = ARRAY_SIZE(clk), | 22 | .clk_num = ARRAY_SIZE(clk), |
23 | }; | 23 | }; |
24 | 24 | ||
25 | static int __init efm32gg_cmu_init(struct device_node *np) | 25 | static void __init efm32gg_cmu_init(struct device_node *np) |
26 | { | 26 | { |
27 | int i; | 27 | int i; |
28 | void __iomem *base; | 28 | void __iomem *base; |
@@ -33,7 +33,7 @@ static int __init efm32gg_cmu_init(struct device_node *np) | |||
33 | base = of_iomap(np, 0); | 33 | base = of_iomap(np, 0); |
34 | if (!base) { | 34 | if (!base) { |
35 | pr_warn("Failed to map address range for efm32gg,cmu node\n"); | 35 | pr_warn("Failed to map address range for efm32gg,cmu node\n"); |
36 | return -EADDRNOTAVAIL; | 36 | return; |
37 | } | 37 | } |
38 | 38 | ||
39 | clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL, | 39 | clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL, |
@@ -76,6 +76,6 @@ static int __init efm32gg_cmu_init(struct device_node *np) | |||
76 | clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0", | 76 | clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0", |
77 | "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); | 77 | "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); |
78 | 78 | ||
79 | return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); | 79 | of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); |
80 | } | 80 | } |
81 | CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); | 81 | CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b76fa69b44cb..bacc06ff939b 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1467,6 +1467,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even | |||
1467 | static void clk_change_rate(struct clk *clk) | 1467 | static void clk_change_rate(struct clk *clk) |
1468 | { | 1468 | { |
1469 | struct clk *child; | 1469 | struct clk *child; |
1470 | struct hlist_node *tmp; | ||
1470 | unsigned long old_rate; | 1471 | unsigned long old_rate; |
1471 | unsigned long best_parent_rate = 0; | 1472 | unsigned long best_parent_rate = 0; |
1472 | bool skip_set_rate = false; | 1473 | bool skip_set_rate = false; |
@@ -1502,7 +1503,11 @@ static void clk_change_rate(struct clk *clk) | |||
1502 | if (clk->notifier_count && old_rate != clk->rate) | 1503 | if (clk->notifier_count && old_rate != clk->rate) |
1503 | __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); | 1504 | __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); |
1504 | 1505 | ||
1505 | hlist_for_each_entry(child, &clk->children, child_node) { | 1506 | /* |
1507 | * Use safe iteration, as change_rate can actually swap parents | ||
1508 | * for certain clock types. | ||
1509 | */ | ||
1510 | hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { | ||
1506 | /* Skip children who will be reparented to another clock */ | 1511 | /* Skip children who will be reparented to another clock */ |
1507 | if (child->new_parent && child->new_parent != clk) | 1512 | if (child->new_parent && child->new_parent != clk) |
1508 | continue; | 1513 | continue; |
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 4032e510d9aa..3b83b7dd78c7 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c | |||
@@ -1095,7 +1095,7 @@ static struct clk_branch prng_clk = { | |||
1095 | }; | 1095 | }; |
1096 | 1096 | ||
1097 | static const struct freq_tbl clk_tbl_sdc[] = { | 1097 | static const struct freq_tbl clk_tbl_sdc[] = { |
1098 | { 144000, P_PXO, 5, 18,625 }, | 1098 | { 200000, P_PXO, 2, 2, 125 }, |
1099 | { 400000, P_PLL8, 4, 1, 240 }, | 1099 | { 400000, P_PLL8, 4, 1, 240 }, |
1100 | { 16000000, P_PLL8, 4, 1, 6 }, | 1100 | { 16000000, P_PLL8, 4, 1, 6 }, |
1101 | { 17070000, P_PLL8, 1, 2, 45 }, | 1101 | { 17070000, P_PLL8, 1, 2, 45 }, |
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 0d8c6c59a75e..b22a2d2f21e9 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c | |||
@@ -545,7 +545,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { | |||
545 | GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS), | 545 | GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS), |
546 | GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), | 546 | GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), |
547 | GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), | 547 | GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), |
548 | GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), | 548 | GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), |
549 | GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), | 549 | GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), |
550 | GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), | 550 | GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), |
551 | GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), | 551 | GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), |
@@ -603,7 +603,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { | |||
603 | GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS), | 603 | GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS), |
604 | GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS), | 604 | GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS), |
605 | GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS), | 605 | GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS), |
606 | GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS), | 606 | GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS), |
607 | GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS), | 607 | GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS), |
608 | GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS), | 608 | GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS), |
609 | GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS), | 609 | GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS), |
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 4a65b410e4d5..af29359677da 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c | |||
@@ -139,9 +139,13 @@ static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate, | |||
139 | static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, | 139 | static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, |
140 | unsigned long parent_rate) | 140 | unsigned long parent_rate) |
141 | { | 141 | { |
142 | struct dra7_atl_desc *cdesc = to_atl_desc(hw); | 142 | struct dra7_atl_desc *cdesc; |
143 | u32 divider; | 143 | u32 divider; |
144 | 144 | ||
145 | if (!hw || !rate) | ||
146 | return -EINVAL; | ||
147 | |||
148 | cdesc = to_atl_desc(hw); | ||
145 | divider = ((parent_rate + rate / 2) / rate) - 1; | 149 | divider = ((parent_rate + rate / 2) / rate) - 1; |
146 | if (divider > DRA7_ATL_DIVIDER_MASK) | 150 | if (divider > DRA7_ATL_DIVIDER_MASK) |
147 | divider = DRA7_ATL_DIVIDER_MASK; | 151 | divider = DRA7_ATL_DIVIDER_MASK; |
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index e6aa10db7bba..a837f703be65 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c | |||
@@ -211,11 +211,16 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, | |||
211 | static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, | 211 | static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, |
212 | unsigned long parent_rate) | 212 | unsigned long parent_rate) |
213 | { | 213 | { |
214 | struct clk_divider *divider = to_clk_divider(hw); | 214 | struct clk_divider *divider; |
215 | unsigned int div, value; | 215 | unsigned int div, value; |
216 | unsigned long flags = 0; | 216 | unsigned long flags = 0; |
217 | u32 val; | 217 | u32 val; |
218 | 218 | ||
219 | if (!hw || !rate) | ||
220 | return -EINVAL; | ||
221 | |||
222 | divider = to_clk_divider(hw); | ||
223 | |||
219 | div = DIV_ROUND_UP(parent_rate, rate); | 224 | div = DIV_ROUND_UP(parent_rate, rate); |
220 | value = _get_val(divider, div); | 225 | value = _get_val(divider, div); |
221 | 226 | ||
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ffe350f86bca..3489f8f5fada 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -183,14 +183,14 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
183 | 183 | ||
184 | If in doubt, say N. | 184 | If in doubt, say N. |
185 | 185 | ||
186 | config GENERIC_CPUFREQ_CPU0 | 186 | config CPUFREQ_DT |
187 | tristate "Generic CPU0 cpufreq driver" | 187 | tristate "Generic DT based cpufreq driver" |
188 | depends on HAVE_CLK && OF | 188 | depends on HAVE_CLK && OF |
189 | # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y: | 189 | # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: |
190 | depends on !CPU_THERMAL || THERMAL | 190 | depends on !CPU_THERMAL || THERMAL |
191 | select PM_OPP | 191 | select PM_OPP |
192 | help | 192 | help |
193 | This adds a generic cpufreq driver for CPU0 frequency management. | 193 | This adds a generic DT based cpufreq driver for frequency management. |
194 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) | 194 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) |
195 | systems which share clock and voltage across all CPUs. | 195 | systems which share clock and voltage across all CPUs. |
196 | 196 | ||
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 7364a538e056..48ed28b789f7 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -92,7 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW | |||
92 | 92 | ||
93 | config ARM_HIGHBANK_CPUFREQ | 93 | config ARM_HIGHBANK_CPUFREQ |
94 | tristate "Calxeda Highbank-based" | 94 | tristate "Calxeda Highbank-based" |
95 | depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR | 95 | depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR |
96 | default m | 96 | default m |
97 | help | 97 | help |
98 | This adds the CPUFreq driver for Calxeda Highbank SoC | 98 | This adds the CPUFreq driver for Calxeda Highbank SoC |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index db6d9a2fea4d..40c53dc1937e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o | |||
13 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | 13 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o |
14 | obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o | 14 | obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o |
15 | 15 | ||
16 | obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o | 16 | obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o |
17 | 17 | ||
18 | ################################################################################## | 18 | ################################################################################## |
19 | # x86 drivers. | 19 | # x86 drivers. |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c deleted file mode 100644 index 0d2172b07765..000000000000 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ /dev/null | |||
@@ -1,248 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * The OPP code in function cpu0_set_target() is reused from | ||
5 | * drivers/cpufreq/omap-cpufreq.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/cpu.h> | ||
16 | #include <linux/cpu_cooling.h> | ||
17 | #include <linux/cpufreq.h> | ||
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/pm_opp.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/regulator/consumer.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/thermal.h> | ||
27 | |||
28 | static unsigned int transition_latency; | ||
29 | static unsigned int voltage_tolerance; /* in percentage */ | ||
30 | |||
31 | static struct device *cpu_dev; | ||
32 | static struct clk *cpu_clk; | ||
33 | static struct regulator *cpu_reg; | ||
34 | static struct cpufreq_frequency_table *freq_table; | ||
35 | static struct thermal_cooling_device *cdev; | ||
36 | |||
37 | static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) | ||
38 | { | ||
39 | struct dev_pm_opp *opp; | ||
40 | unsigned long volt = 0, volt_old = 0, tol = 0; | ||
41 | unsigned int old_freq, new_freq; | ||
42 | long freq_Hz, freq_exact; | ||
43 | int ret; | ||
44 | |||
45 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); | ||
46 | if (freq_Hz <= 0) | ||
47 | freq_Hz = freq_table[index].frequency * 1000; | ||
48 | |||
49 | freq_exact = freq_Hz; | ||
50 | new_freq = freq_Hz / 1000; | ||
51 | old_freq = clk_get_rate(cpu_clk) / 1000; | ||
52 | |||
53 | if (!IS_ERR(cpu_reg)) { | ||
54 | rcu_read_lock(); | ||
55 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); | ||
56 | if (IS_ERR(opp)) { | ||
57 | rcu_read_unlock(); | ||
58 | pr_err("failed to find OPP for %ld\n", freq_Hz); | ||
59 | return PTR_ERR(opp); | ||
60 | } | ||
61 | volt = dev_pm_opp_get_voltage(opp); | ||
62 | rcu_read_unlock(); | ||
63 | tol = volt * voltage_tolerance / 100; | ||
64 | volt_old = regulator_get_voltage(cpu_reg); | ||
65 | } | ||
66 | |||
67 | pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", | ||
68 | old_freq / 1000, volt_old ? volt_old / 1000 : -1, | ||
69 | new_freq / 1000, volt ? volt / 1000 : -1); | ||
70 | |||
71 | /* scaling up? scale voltage before frequency */ | ||
72 | if (!IS_ERR(cpu_reg) && new_freq > old_freq) { | ||
73 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); | ||
74 | if (ret) { | ||
75 | pr_err("failed to scale voltage up: %d\n", ret); | ||
76 | return ret; | ||
77 | } | ||
78 | } | ||
79 | |||
80 | ret = clk_set_rate(cpu_clk, freq_exact); | ||
81 | if (ret) { | ||
82 | pr_err("failed to set clock rate: %d\n", ret); | ||
83 | if (!IS_ERR(cpu_reg)) | ||
84 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | /* scaling down? scale voltage after frequency */ | ||
89 | if (!IS_ERR(cpu_reg) && new_freq < old_freq) { | ||
90 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); | ||
91 | if (ret) { | ||
92 | pr_err("failed to scale voltage down: %d\n", ret); | ||
93 | clk_set_rate(cpu_clk, old_freq * 1000); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | static int cpu0_cpufreq_init(struct cpufreq_policy *policy) | ||
101 | { | ||
102 | policy->clk = cpu_clk; | ||
103 | return cpufreq_generic_init(policy, freq_table, transition_latency); | ||
104 | } | ||
105 | |||
106 | static struct cpufreq_driver cpu0_cpufreq_driver = { | ||
107 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | ||
108 | .verify = cpufreq_generic_frequency_table_verify, | ||
109 | .target_index = cpu0_set_target, | ||
110 | .get = cpufreq_generic_get, | ||
111 | .init = cpu0_cpufreq_init, | ||
112 | .name = "generic_cpu0", | ||
113 | .attr = cpufreq_generic_attr, | ||
114 | }; | ||
115 | |||
116 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | ||
117 | { | ||
118 | struct device_node *np; | ||
119 | int ret; | ||
120 | |||
121 | cpu_dev = get_cpu_device(0); | ||
122 | if (!cpu_dev) { | ||
123 | pr_err("failed to get cpu0 device\n"); | ||
124 | return -ENODEV; | ||
125 | } | ||
126 | |||
127 | np = of_node_get(cpu_dev->of_node); | ||
128 | if (!np) { | ||
129 | pr_err("failed to find cpu0 node\n"); | ||
130 | return -ENOENT; | ||
131 | } | ||
132 | |||
133 | cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); | ||
134 | if (IS_ERR(cpu_reg)) { | ||
135 | /* | ||
136 | * If cpu0 regulator supply node is present, but regulator is | ||
137 | * not yet registered, we should try defering probe. | ||
138 | */ | ||
139 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | ||
140 | dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); | ||
141 | ret = -EPROBE_DEFER; | ||
142 | goto out_put_node; | ||
143 | } | ||
144 | pr_warn("failed to get cpu0 regulator: %ld\n", | ||
145 | PTR_ERR(cpu_reg)); | ||
146 | } | ||
147 | |||
148 | cpu_clk = clk_get(cpu_dev, NULL); | ||
149 | if (IS_ERR(cpu_clk)) { | ||
150 | ret = PTR_ERR(cpu_clk); | ||
151 | pr_err("failed to get cpu0 clock: %d\n", ret); | ||
152 | goto out_put_reg; | ||
153 | } | ||
154 | |||
155 | /* OPPs might be populated at runtime, don't check for error here */ | ||
156 | of_init_opp_table(cpu_dev); | ||
157 | |||
158 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); | ||
159 | if (ret) { | ||
160 | pr_err("failed to init cpufreq table: %d\n", ret); | ||
161 | goto out_put_clk; | ||
162 | } | ||
163 | |||
164 | of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); | ||
165 | |||
166 | if (of_property_read_u32(np, "clock-latency", &transition_latency)) | ||
167 | transition_latency = CPUFREQ_ETERNAL; | ||
168 | |||
169 | if (!IS_ERR(cpu_reg)) { | ||
170 | struct dev_pm_opp *opp; | ||
171 | unsigned long min_uV, max_uV; | ||
172 | int i; | ||
173 | |||
174 | /* | ||
175 | * OPP is maintained in order of increasing frequency, and | ||
176 | * freq_table initialised from OPP is therefore sorted in the | ||
177 | * same order. | ||
178 | */ | ||
179 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) | ||
180 | ; | ||
181 | rcu_read_lock(); | ||
182 | opp = dev_pm_opp_find_freq_exact(cpu_dev, | ||
183 | freq_table[0].frequency * 1000, true); | ||
184 | min_uV = dev_pm_opp_get_voltage(opp); | ||
185 | opp = dev_pm_opp_find_freq_exact(cpu_dev, | ||
186 | freq_table[i-1].frequency * 1000, true); | ||
187 | max_uV = dev_pm_opp_get_voltage(opp); | ||
188 | rcu_read_unlock(); | ||
189 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); | ||
190 | if (ret > 0) | ||
191 | transition_latency += ret * 1000; | ||
192 | } | ||
193 | |||
194 | ret = cpufreq_register_driver(&cpu0_cpufreq_driver); | ||
195 | if (ret) { | ||
196 | pr_err("failed register driver: %d\n", ret); | ||
197 | goto out_free_table; | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * For now, just loading the cooling device; | ||
202 | * thermal DT code takes care of matching them. | ||
203 | */ | ||
204 | if (of_find_property(np, "#cooling-cells", NULL)) { | ||
205 | cdev = of_cpufreq_cooling_register(np, cpu_present_mask); | ||
206 | if (IS_ERR(cdev)) | ||
207 | pr_err("running cpufreq without cooling device: %ld\n", | ||
208 | PTR_ERR(cdev)); | ||
209 | } | ||
210 | |||
211 | of_node_put(np); | ||
212 | return 0; | ||
213 | |||
214 | out_free_table: | ||
215 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); | ||
216 | out_put_clk: | ||
217 | if (!IS_ERR(cpu_clk)) | ||
218 | clk_put(cpu_clk); | ||
219 | out_put_reg: | ||
220 | if (!IS_ERR(cpu_reg)) | ||
221 | regulator_put(cpu_reg); | ||
222 | out_put_node: | ||
223 | of_node_put(np); | ||
224 | return ret; | ||
225 | } | ||
226 | |||
227 | static int cpu0_cpufreq_remove(struct platform_device *pdev) | ||
228 | { | ||
229 | cpufreq_cooling_unregister(cdev); | ||
230 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); | ||
231 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static struct platform_driver cpu0_cpufreq_platdrv = { | ||
237 | .driver = { | ||
238 | .name = "cpufreq-cpu0", | ||
239 | .owner = THIS_MODULE, | ||
240 | }, | ||
241 | .probe = cpu0_cpufreq_probe, | ||
242 | .remove = cpu0_cpufreq_remove, | ||
243 | }; | ||
244 | module_platform_driver(cpu0_cpufreq_platdrv); | ||
245 | |||
246 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); | ||
247 | MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); | ||
248 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c new file mode 100644 index 000000000000..6bbb8b913446 --- /dev/null +++ b/drivers/cpufreq/cpufreq-dt.c | |||
@@ -0,0 +1,364 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * Copyright (C) 2014 Linaro. | ||
5 | * Viresh Kumar <viresh.kumar@linaro.org> | ||
6 | * | ||
7 | * The OPP code in function set_target() is reused from | ||
8 | * drivers/cpufreq/omap-cpufreq.c | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
17 | #include <linux/clk.h> | ||
18 | #include <linux/cpu.h> | ||
19 | #include <linux/cpu_cooling.h> | ||
20 | #include <linux/cpufreq.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/pm_opp.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/regulator/consumer.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/thermal.h> | ||
30 | |||
31 | struct private_data { | ||
32 | struct device *cpu_dev; | ||
33 | struct regulator *cpu_reg; | ||
34 | struct thermal_cooling_device *cdev; | ||
35 | unsigned int voltage_tolerance; /* in percentage */ | ||
36 | }; | ||
37 | |||
38 | static int set_target(struct cpufreq_policy *policy, unsigned int index) | ||
39 | { | ||
40 | struct dev_pm_opp *opp; | ||
41 | struct cpufreq_frequency_table *freq_table = policy->freq_table; | ||
42 | struct clk *cpu_clk = policy->clk; | ||
43 | struct private_data *priv = policy->driver_data; | ||
44 | struct device *cpu_dev = priv->cpu_dev; | ||
45 | struct regulator *cpu_reg = priv->cpu_reg; | ||
46 | unsigned long volt = 0, volt_old = 0, tol = 0; | ||
47 | unsigned int old_freq, new_freq; | ||
48 | long freq_Hz, freq_exact; | ||
49 | int ret; | ||
50 | |||
51 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); | ||
52 | if (freq_Hz <= 0) | ||
53 | freq_Hz = freq_table[index].frequency * 1000; | ||
54 | |||
55 | freq_exact = freq_Hz; | ||
56 | new_freq = freq_Hz / 1000; | ||
57 | old_freq = clk_get_rate(cpu_clk) / 1000; | ||
58 | |||
59 | if (!IS_ERR(cpu_reg)) { | ||
60 | rcu_read_lock(); | ||
61 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); | ||
62 | if (IS_ERR(opp)) { | ||
63 | rcu_read_unlock(); | ||
64 | dev_err(cpu_dev, "failed to find OPP for %ld\n", | ||
65 | freq_Hz); | ||
66 | return PTR_ERR(opp); | ||
67 | } | ||
68 | volt = dev_pm_opp_get_voltage(opp); | ||
69 | rcu_read_unlock(); | ||
70 | tol = volt * priv->voltage_tolerance / 100; | ||
71 | volt_old = regulator_get_voltage(cpu_reg); | ||
72 | } | ||
73 | |||
74 | dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", | ||
75 | old_freq / 1000, volt_old ? volt_old / 1000 : -1, | ||
76 | new_freq / 1000, volt ? volt / 1000 : -1); | ||
77 | |||
78 | /* scaling up? scale voltage before frequency */ | ||
79 | if (!IS_ERR(cpu_reg) && new_freq > old_freq) { | ||
80 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); | ||
81 | if (ret) { | ||
82 | dev_err(cpu_dev, "failed to scale voltage up: %d\n", | ||
83 | ret); | ||
84 | return ret; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | ret = clk_set_rate(cpu_clk, freq_exact); | ||
89 | if (ret) { | ||
90 | dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); | ||
91 | if (!IS_ERR(cpu_reg)) | ||
92 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | /* scaling down? scale voltage after frequency */ | ||
97 | if (!IS_ERR(cpu_reg) && new_freq < old_freq) { | ||
98 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); | ||
99 | if (ret) { | ||
100 | dev_err(cpu_dev, "failed to scale voltage down: %d\n", | ||
101 | ret); | ||
102 | clk_set_rate(cpu_clk, old_freq * 1000); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static int allocate_resources(int cpu, struct device **cdev, | ||
110 | struct regulator **creg, struct clk **cclk) | ||
111 | { | ||
112 | struct device *cpu_dev; | ||
113 | struct regulator *cpu_reg; | ||
114 | struct clk *cpu_clk; | ||
115 | int ret = 0; | ||
116 | char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg; | ||
117 | |||
118 | cpu_dev = get_cpu_device(cpu); | ||
119 | if (!cpu_dev) { | ||
120 | pr_err("failed to get cpu%d device\n", cpu); | ||
121 | return -ENODEV; | ||
122 | } | ||
123 | |||
124 | /* Try "cpu0" for older DTs */ | ||
125 | if (!cpu) | ||
126 | reg = reg_cpu0; | ||
127 | else | ||
128 | reg = reg_cpu; | ||
129 | |||
130 | try_again: | ||
131 | cpu_reg = regulator_get_optional(cpu_dev, reg); | ||
132 | if (IS_ERR(cpu_reg)) { | ||
133 | /* | ||
134 | * If cpu's regulator supply node is present, but regulator is | ||
135 | * not yet registered, we should try defering probe. | ||
136 | */ | ||
137 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | ||
138 | dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", | ||
139 | cpu); | ||
140 | return -EPROBE_DEFER; | ||
141 | } | ||
142 | |||
143 | /* Try with "cpu-supply" */ | ||
144 | if (reg == reg_cpu0) { | ||
145 | reg = reg_cpu; | ||
146 | goto try_again; | ||
147 | } | ||
148 | |||
149 | dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n", | ||
150 | cpu, PTR_ERR(cpu_reg)); | ||
151 | } | ||
152 | |||
153 | cpu_clk = clk_get(cpu_dev, NULL); | ||
154 | if (IS_ERR(cpu_clk)) { | ||
155 | /* put regulator */ | ||
156 | if (!IS_ERR(cpu_reg)) | ||
157 | regulator_put(cpu_reg); | ||
158 | |||
159 | ret = PTR_ERR(cpu_clk); | ||
160 | |||
161 | /* | ||
162 | * If cpu's clk node is present, but clock is not yet | ||
163 | * registered, we should try defering probe. | ||
164 | */ | ||
165 | if (ret == -EPROBE_DEFER) | ||
166 | dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); | ||
167 | else | ||
168 | dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret, | ||
169 | cpu); | ||
170 | } else { | ||
171 | *cdev = cpu_dev; | ||
172 | *creg = cpu_reg; | ||
173 | *cclk = cpu_clk; | ||
174 | } | ||
175 | |||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | static int cpufreq_init(struct cpufreq_policy *policy) | ||
180 | { | ||
181 | struct cpufreq_frequency_table *freq_table; | ||
182 | struct thermal_cooling_device *cdev; | ||
183 | struct device_node *np; | ||
184 | struct private_data *priv; | ||
185 | struct device *cpu_dev; | ||
186 | struct regulator *cpu_reg; | ||
187 | struct clk *cpu_clk; | ||
188 | unsigned int transition_latency; | ||
189 | int ret; | ||
190 | |||
191 | ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); | ||
192 | if (ret) { | ||
193 | pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | np = of_node_get(cpu_dev->of_node); | ||
198 | if (!np) { | ||
199 | dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); | ||
200 | ret = -ENOENT; | ||
201 | goto out_put_reg_clk; | ||
202 | } | ||
203 | |||
204 | /* OPPs might be populated at runtime, don't check for error here */ | ||
205 | of_init_opp_table(cpu_dev); | ||
206 | |||
207 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); | ||
208 | if (ret) { | ||
209 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); | ||
210 | goto out_put_node; | ||
211 | } | ||
212 | |||
213 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
214 | if (!priv) { | ||
215 | ret = -ENOMEM; | ||
216 | goto out_free_table; | ||
217 | } | ||
218 | |||
219 | of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); | ||
220 | |||
221 | if (of_property_read_u32(np, "clock-latency", &transition_latency)) | ||
222 | transition_latency = CPUFREQ_ETERNAL; | ||
223 | |||
224 | if (!IS_ERR(cpu_reg)) { | ||
225 | struct dev_pm_opp *opp; | ||
226 | unsigned long min_uV, max_uV; | ||
227 | int i; | ||
228 | |||
229 | /* | ||
230 | * OPP is maintained in order of increasing frequency, and | ||
231 | * freq_table initialised from OPP is therefore sorted in the | ||
232 | * same order. | ||
233 | */ | ||
234 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) | ||
235 | ; | ||
236 | rcu_read_lock(); | ||
237 | opp = dev_pm_opp_find_freq_exact(cpu_dev, | ||
238 | freq_table[0].frequency * 1000, true); | ||
239 | min_uV = dev_pm_opp_get_voltage(opp); | ||
240 | opp = dev_pm_opp_find_freq_exact(cpu_dev, | ||
241 | freq_table[i-1].frequency * 1000, true); | ||
242 | max_uV = dev_pm_opp_get_voltage(opp); | ||
243 | rcu_read_unlock(); | ||
244 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); | ||
245 | if (ret > 0) | ||
246 | transition_latency += ret * 1000; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * For now, just loading the cooling device; | ||
251 | * thermal DT code takes care of matching them. | ||
252 | */ | ||
253 | if (of_find_property(np, "#cooling-cells", NULL)) { | ||
254 | cdev = of_cpufreq_cooling_register(np, cpu_present_mask); | ||
255 | if (IS_ERR(cdev)) | ||
256 | dev_err(cpu_dev, | ||
257 | "running cpufreq without cooling device: %ld\n", | ||
258 | PTR_ERR(cdev)); | ||
259 | else | ||
260 | priv->cdev = cdev; | ||
261 | } | ||
262 | |||
263 | priv->cpu_dev = cpu_dev; | ||
264 | priv->cpu_reg = cpu_reg; | ||
265 | policy->driver_data = priv; | ||
266 | |||
267 | policy->clk = cpu_clk; | ||
268 | ret = cpufreq_generic_init(policy, freq_table, transition_latency); | ||
269 | if (ret) | ||
270 | goto out_cooling_unregister; | ||
271 | |||
272 | of_node_put(np); | ||
273 | |||
274 | return 0; | ||
275 | |||
276 | out_cooling_unregister: | ||
277 | cpufreq_cooling_unregister(priv->cdev); | ||
278 | kfree(priv); | ||
279 | out_free_table: | ||
280 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); | ||
281 | out_put_node: | ||
282 | of_node_put(np); | ||
283 | out_put_reg_clk: | ||
284 | clk_put(cpu_clk); | ||
285 | if (!IS_ERR(cpu_reg)) | ||
286 | regulator_put(cpu_reg); | ||
287 | |||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | static int cpufreq_exit(struct cpufreq_policy *policy) | ||
292 | { | ||
293 | struct private_data *priv = policy->driver_data; | ||
294 | |||
295 | cpufreq_cooling_unregister(priv->cdev); | ||
296 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | ||
297 | clk_put(policy->clk); | ||
298 | if (!IS_ERR(priv->cpu_reg)) | ||
299 | regulator_put(priv->cpu_reg); | ||
300 | kfree(priv); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static struct cpufreq_driver dt_cpufreq_driver = { | ||
306 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | ||
307 | .verify = cpufreq_generic_frequency_table_verify, | ||
308 | .target_index = set_target, | ||
309 | .get = cpufreq_generic_get, | ||
310 | .init = cpufreq_init, | ||
311 | .exit = cpufreq_exit, | ||
312 | .name = "cpufreq-dt", | ||
313 | .attr = cpufreq_generic_attr, | ||
314 | }; | ||
315 | |||
316 | static int dt_cpufreq_probe(struct platform_device *pdev) | ||
317 | { | ||
318 | struct device *cpu_dev; | ||
319 | struct regulator *cpu_reg; | ||
320 | struct clk *cpu_clk; | ||
321 | int ret; | ||
322 | |||
323 | /* | ||
324 | * All per-cluster (CPUs sharing clock/voltages) initialization is done | ||
325 | * from ->init(). In probe(), we just need to make sure that clk and | ||
326 | * regulators are available. Else defer probe and retry. | ||
327 | * | ||
328 | * FIXME: Is checking this only for CPU0 sufficient ? | ||
329 | */ | ||
330 | ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); | ||
331 | if (ret) | ||
332 | return ret; | ||
333 | |||
334 | clk_put(cpu_clk); | ||
335 | if (!IS_ERR(cpu_reg)) | ||
336 | regulator_put(cpu_reg); | ||
337 | |||
338 | ret = cpufreq_register_driver(&dt_cpufreq_driver); | ||
339 | if (ret) | ||
340 | dev_err(cpu_dev, "failed register driver: %d\n", ret); | ||
341 | |||
342 | return ret; | ||
343 | } | ||
344 | |||
345 | static int dt_cpufreq_remove(struct platform_device *pdev) | ||
346 | { | ||
347 | cpufreq_unregister_driver(&dt_cpufreq_driver); | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static struct platform_driver dt_cpufreq_platdrv = { | ||
352 | .driver = { | ||
353 | .name = "cpufreq-dt", | ||
354 | .owner = THIS_MODULE, | ||
355 | }, | ||
356 | .probe = dt_cpufreq_probe, | ||
357 | .remove = dt_cpufreq_remove, | ||
358 | }; | ||
359 | module_platform_driver(dt_cpufreq_platdrv); | ||
360 | |||
361 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); | ||
362 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); | ||
363 | MODULE_DESCRIPTION("Generic cpufreq driver"); | ||
364 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d9fdeddcef96..24bf76fba141 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -437,7 +437,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor) | |||
437 | struct cpufreq_governor *t; | 437 | struct cpufreq_governor *t; |
438 | 438 | ||
439 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) | 439 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) |
440 | if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) | 440 | if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) |
441 | return t; | 441 | return t; |
442 | 442 | ||
443 | return NULL; | 443 | return NULL; |
@@ -455,10 +455,10 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
455 | goto out; | 455 | goto out; |
456 | 456 | ||
457 | if (cpufreq_driver->setpolicy) { | 457 | if (cpufreq_driver->setpolicy) { |
458 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { | 458 | if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { |
459 | *policy = CPUFREQ_POLICY_PERFORMANCE; | 459 | *policy = CPUFREQ_POLICY_PERFORMANCE; |
460 | err = 0; | 460 | err = 0; |
461 | } else if (!strnicmp(str_governor, "powersave", | 461 | } else if (!strncasecmp(str_governor, "powersave", |
462 | CPUFREQ_NAME_LEN)) { | 462 | CPUFREQ_NAME_LEN)) { |
463 | *policy = CPUFREQ_POLICY_POWERSAVE; | 463 | *policy = CPUFREQ_POLICY_POWERSAVE; |
464 | err = 0; | 464 | err = 0; |
@@ -1289,6 +1289,8 @@ err_get_freq: | |||
1289 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1289 | per_cpu(cpufreq_cpu_data, j) = NULL; |
1290 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1290 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1291 | 1291 | ||
1292 | up_write(&policy->rwsem); | ||
1293 | |||
1292 | if (cpufreq_driver->exit) | 1294 | if (cpufreq_driver->exit) |
1293 | cpufreq_driver->exit(policy); | 1295 | cpufreq_driver->exit(policy); |
1294 | err_set_policy_cpu: | 1296 | err_set_policy_cpu: |
@@ -1380,7 +1382,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1380 | if (!cpufreq_suspended) | 1382 | if (!cpufreq_suspended) |
1381 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", | 1383 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1382 | __func__, new_cpu, cpu); | 1384 | __func__, new_cpu, cpu); |
1383 | } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { | 1385 | } else if (cpufreq_driver->stop_cpu) { |
1384 | cpufreq_driver->stop_cpu(policy); | 1386 | cpufreq_driver->stop_cpu(policy); |
1385 | } | 1387 | } |
1386 | 1388 | ||
@@ -1657,7 +1659,7 @@ void cpufreq_suspend(void) | |||
1657 | return; | 1659 | return; |
1658 | 1660 | ||
1659 | if (!has_target()) | 1661 | if (!has_target()) |
1660 | return; | 1662 | goto suspend; |
1661 | 1663 | ||
1662 | pr_debug("%s: Suspending Governors\n", __func__); | 1664 | pr_debug("%s: Suspending Governors\n", __func__); |
1663 | 1665 | ||
@@ -1671,6 +1673,7 @@ void cpufreq_suspend(void) | |||
1671 | policy); | 1673 | policy); |
1672 | } | 1674 | } |
1673 | 1675 | ||
1676 | suspend: | ||
1674 | cpufreq_suspended = true; | 1677 | cpufreq_suspended = true; |
1675 | } | 1678 | } |
1676 | 1679 | ||
@@ -1687,13 +1690,13 @@ void cpufreq_resume(void) | |||
1687 | if (!cpufreq_driver) | 1690 | if (!cpufreq_driver) |
1688 | return; | 1691 | return; |
1689 | 1692 | ||
1693 | cpufreq_suspended = false; | ||
1694 | |||
1690 | if (!has_target()) | 1695 | if (!has_target()) |
1691 | return; | 1696 | return; |
1692 | 1697 | ||
1693 | pr_debug("%s: Resuming Governors\n", __func__); | 1698 | pr_debug("%s: Resuming Governors\n", __func__); |
1694 | 1699 | ||
1695 | cpufreq_suspended = false; | ||
1696 | |||
1697 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { | 1700 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { |
1698 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) | 1701 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) |
1699 | pr_err("%s: Failed to resume driver: %p\n", __func__, | 1702 | pr_err("%s: Failed to resume driver: %p\n", __func__, |
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index 61a54310a1b9..843ec824fd91 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c | |||
@@ -127,7 +127,7 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) | |||
127 | * dependencies on platform headers. It is necessary to enable | 127 | * dependencies on platform headers. It is necessary to enable |
128 | * Exynos multi-platform support and will be removed together with | 128 | * Exynos multi-platform support and will be removed together with |
129 | * this whole driver as soon as Exynos gets migrated to use | 129 | * this whole driver as soon as Exynos gets migrated to use |
130 | * cpufreq-cpu0 driver. | 130 | * cpufreq-dt driver. |
131 | */ | 131 | */ |
132 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); | 132 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); |
133 | if (!np) { | 133 | if (!np) { |
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 351a2074cfea..9e78a850e29f 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c | |||
@@ -174,7 +174,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) | |||
174 | * dependencies on platform headers. It is necessary to enable | 174 | * dependencies on platform headers. It is necessary to enable |
175 | * Exynos multi-platform support and will be removed together with | 175 | * Exynos multi-platform support and will be removed together with |
176 | * this whole driver as soon as Exynos gets migrated to use | 176 | * this whole driver as soon as Exynos gets migrated to use |
177 | * cpufreq-cpu0 driver. | 177 | * cpufreq-dt driver. |
178 | */ | 178 | */ |
179 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); | 179 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); |
180 | if (!np) { | 180 | if (!np) { |
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index c91ce69dc631..3eafdc7ba787 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c | |||
@@ -153,7 +153,7 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) | |||
153 | * dependencies on platform headers. It is necessary to enable | 153 | * dependencies on platform headers. It is necessary to enable |
154 | * Exynos multi-platform support and will be removed together with | 154 | * Exynos multi-platform support and will be removed together with |
155 | * this whole driver as soon as Exynos gets migrated to use | 155 | * this whole driver as soon as Exynos gets migrated to use |
156 | * cpufreq-cpu0 driver. | 156 | * cpufreq-dt driver. |
157 | */ | 157 | */ |
158 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); | 158 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); |
159 | if (!np) { | 159 | if (!np) { |
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index bf8902a0866d..ec399ad2f059 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This driver provides the clk notifier callbacks that are used when | 8 | * This driver provides the clk notifier callbacks that are used when |
9 | * the cpufreq-cpu0 driver changes to frequency to alert the highbank | 9 | * the cpufreq-dt driver changes to frequency to alert the highbank |
10 | * EnergyCore Management Engine (ECME) about the need to change | 10 | * EnergyCore Management Engine (ECME) about the need to change |
11 | * voltage. The ECME interfaces with the actual voltage regulators. | 11 | * voltage. The ECME interfaces with the actual voltage regulators. |
12 | */ | 12 | */ |
@@ -60,7 +60,7 @@ static struct notifier_block hb_cpufreq_clk_nb = { | |||
60 | 60 | ||
61 | static int hb_cpufreq_driver_init(void) | 61 | static int hb_cpufreq_driver_init(void) |
62 | { | 62 | { |
63 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; | 63 | struct platform_device_info devinfo = { .name = "cpufreq-dt", }; |
64 | struct device *cpu_dev; | 64 | struct device *cpu_dev; |
65 | struct clk *cpu_clk; | 65 | struct clk *cpu_clk; |
66 | struct device_node *np; | 66 | struct device_node *np; |
@@ -95,7 +95,7 @@ static int hb_cpufreq_driver_init(void) | |||
95 | goto out_put_node; | 95 | goto out_put_node; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* Instantiate cpufreq-cpu0 */ | 98 | /* Instantiate cpufreq-dt */ |
99 | platform_device_register_full(&devinfo); | 99 | platform_device_register_full(&devinfo); |
100 | 100 | ||
101 | out_put_node: | 101 | out_put_node: |
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index c1320528b9d0..6bd69adc3c5e 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c | |||
@@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) | |||
213 | return cpufreq_register_driver(&integrator_driver); | 213 | return cpufreq_register_driver(&integrator_driver); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void __exit integrator_cpufreq_remove(struct platform_device *pdev) | 216 | static int __exit integrator_cpufreq_remove(struct platform_device *pdev) |
217 | { | 217 | { |
218 | cpufreq_unregister_driver(&integrator_driver); | 218 | return cpufreq_unregister_driver(&integrator_driver); |
219 | } | 219 | } |
220 | 220 | ||
221 | static const struct of_device_id integrator_cpufreq_match[] = { | 221 | static const struct of_device_id integrator_cpufreq_match[] = { |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 728a2d879499..4d2c8e861089 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
204 | u32 input_buffer; | 204 | u32 input_buffer; |
205 | int cpu; | 205 | int cpu; |
206 | 206 | ||
207 | spin_lock(&pcc_lock); | ||
208 | cpu = policy->cpu; | 207 | cpu = policy->cpu; |
209 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 208 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
210 | 209 | ||
@@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
216 | freqs.old = policy->cur; | 215 | freqs.old = policy->cur; |
217 | freqs.new = target_freq; | 216 | freqs.new = target_freq; |
218 | cpufreq_freq_transition_begin(policy, &freqs); | 217 | cpufreq_freq_transition_begin(policy, &freqs); |
218 | spin_lock(&pcc_lock); | ||
219 | 219 | ||
220 | input_buffer = 0x1 | (((target_freq * 100) | 220 | input_buffer = 0x1 | (((target_freq * 100) |
221 | / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); | 221 | / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); |
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 379c0837f5a9..2dfd4fdb5a52 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/cpufreq.h> | 26 | #include <linux/cpufreq.h> |
27 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
28 | #include <linux/of.h> | 28 | #include <linux/of.h> |
29 | #include <linux/reboot.h> | ||
29 | 30 | ||
30 | #include <asm/cputhreads.h> | 31 | #include <asm/cputhreads.h> |
31 | #include <asm/firmware.h> | 32 | #include <asm/firmware.h> |
@@ -35,6 +36,7 @@ | |||
35 | #define POWERNV_MAX_PSTATES 256 | 36 | #define POWERNV_MAX_PSTATES 256 |
36 | 37 | ||
37 | static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; | 38 | static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; |
39 | static bool rebooting; | ||
38 | 40 | ||
39 | /* | 41 | /* |
40 | * Note: The set of pstates consists of contiguous integers, the | 42 | * Note: The set of pstates consists of contiguous integers, the |
@@ -284,6 +286,15 @@ static void set_pstate(void *freq_data) | |||
284 | } | 286 | } |
285 | 287 | ||
286 | /* | 288 | /* |
289 | * get_nominal_index: Returns the index corresponding to the nominal | ||
290 | * pstate in the cpufreq table | ||
291 | */ | ||
292 | static inline unsigned int get_nominal_index(void) | ||
293 | { | ||
294 | return powernv_pstate_info.max - powernv_pstate_info.nominal; | ||
295 | } | ||
296 | |||
297 | /* | ||
287 | * powernv_cpufreq_target_index: Sets the frequency corresponding to | 298 | * powernv_cpufreq_target_index: Sets the frequency corresponding to |
288 | * the cpufreq table entry indexed by new_index on the cpus in the | 299 | * the cpufreq table entry indexed by new_index on the cpus in the |
289 | * mask policy->cpus | 300 | * mask policy->cpus |
@@ -293,6 +304,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, | |||
293 | { | 304 | { |
294 | struct powernv_smp_call_data freq_data; | 305 | struct powernv_smp_call_data freq_data; |
295 | 306 | ||
307 | if (unlikely(rebooting) && new_index != get_nominal_index()) | ||
308 | return 0; | ||
309 | |||
296 | freq_data.pstate_id = powernv_freqs[new_index].driver_data; | 310 | freq_data.pstate_id = powernv_freqs[new_index].driver_data; |
297 | 311 | ||
298 | /* | 312 | /* |
@@ -317,6 +331,33 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
317 | return cpufreq_table_validate_and_show(policy, powernv_freqs); | 331 | return cpufreq_table_validate_and_show(policy, powernv_freqs); |
318 | } | 332 | } |
319 | 333 | ||
334 | static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, | ||
335 | unsigned long action, void *unused) | ||
336 | { | ||
337 | int cpu; | ||
338 | struct cpufreq_policy cpu_policy; | ||
339 | |||
340 | rebooting = true; | ||
341 | for_each_online_cpu(cpu) { | ||
342 | cpufreq_get_policy(&cpu_policy, cpu); | ||
343 | powernv_cpufreq_target_index(&cpu_policy, get_nominal_index()); | ||
344 | } | ||
345 | |||
346 | return NOTIFY_DONE; | ||
347 | } | ||
348 | |||
349 | static struct notifier_block powernv_cpufreq_reboot_nb = { | ||
350 | .notifier_call = powernv_cpufreq_reboot_notifier, | ||
351 | }; | ||
352 | |||
353 | static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) | ||
354 | { | ||
355 | struct powernv_smp_call_data freq_data; | ||
356 | |||
357 | freq_data.pstate_id = powernv_pstate_info.min; | ||
358 | smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); | ||
359 | } | ||
360 | |||
320 | static struct cpufreq_driver powernv_cpufreq_driver = { | 361 | static struct cpufreq_driver powernv_cpufreq_driver = { |
321 | .name = "powernv-cpufreq", | 362 | .name = "powernv-cpufreq", |
322 | .flags = CPUFREQ_CONST_LOOPS, | 363 | .flags = CPUFREQ_CONST_LOOPS, |
@@ -324,6 +365,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = { | |||
324 | .verify = cpufreq_generic_frequency_table_verify, | 365 | .verify = cpufreq_generic_frequency_table_verify, |
325 | .target_index = powernv_cpufreq_target_index, | 366 | .target_index = powernv_cpufreq_target_index, |
326 | .get = powernv_cpufreq_get, | 367 | .get = powernv_cpufreq_get, |
368 | .stop_cpu = powernv_cpufreq_stop_cpu, | ||
327 | .attr = powernv_cpu_freq_attr, | 369 | .attr = powernv_cpu_freq_attr, |
328 | }; | 370 | }; |
329 | 371 | ||
@@ -342,12 +384,14 @@ static int __init powernv_cpufreq_init(void) | |||
342 | return rc; | 384 | return rc; |
343 | } | 385 | } |
344 | 386 | ||
387 | register_reboot_notifier(&powernv_cpufreq_reboot_nb); | ||
345 | return cpufreq_register_driver(&powernv_cpufreq_driver); | 388 | return cpufreq_register_driver(&powernv_cpufreq_driver); |
346 | } | 389 | } |
347 | module_init(powernv_cpufreq_init); | 390 | module_init(powernv_cpufreq_init); |
348 | 391 | ||
349 | static void __exit powernv_cpufreq_exit(void) | 392 | static void __exit powernv_cpufreq_exit(void) |
350 | { | 393 | { |
394 | unregister_reboot_notifier(&powernv_cpufreq_reboot_nb); | ||
351 | cpufreq_unregister_driver(&powernv_cpufreq_driver); | 395 | cpufreq_unregister_driver(&powernv_cpufreq_driver); |
352 | } | 396 | } |
353 | module_exit(powernv_cpufreq_exit); | 397 | module_exit(powernv_cpufreq_exit); |
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index 3607070797af..bee5df7794d3 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
@@ -199,7 +199,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
199 | } | 199 | } |
200 | 200 | ||
201 | data->table = table; | 201 | data->table = table; |
202 | per_cpu(cpu_data, cpu) = data; | ||
203 | 202 | ||
204 | /* update ->cpus if we have cluster, no harm if not */ | 203 | /* update ->cpus if we have cluster, no harm if not */ |
205 | cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); | 204 | cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); |
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 3f9791f07b8e..567caa6313ff 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c | |||
@@ -597,7 +597,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
597 | * and dependencies on platform headers. It is necessary to enable | 597 | * and dependencies on platform headers. It is necessary to enable |
598 | * S5PV210 multi-platform support and will be removed together with | 598 | * S5PV210 multi-platform support and will be removed together with |
599 | * this whole driver as soon as S5PV210 gets migrated to use | 599 | * this whole driver as soon as S5PV210 gets migrated to use |
600 | * cpufreq-cpu0 driver. | 600 | * cpufreq-dt driver. |
601 | */ | 601 | */ |
602 | np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); | 602 | np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); |
603 | if (!np) { | 603 | if (!np) { |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 32748c36c477..c5029c1209b4 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
@@ -25,11 +25,19 @@ config CPU_IDLE_GOV_MENU | |||
25 | bool "Menu governor (for tickless system)" | 25 | bool "Menu governor (for tickless system)" |
26 | default y | 26 | default y |
27 | 27 | ||
28 | config DT_IDLE_STATES | ||
29 | bool | ||
30 | |||
28 | menu "ARM CPU Idle Drivers" | 31 | menu "ARM CPU Idle Drivers" |
29 | depends on ARM | 32 | depends on ARM |
30 | source "drivers/cpuidle/Kconfig.arm" | 33 | source "drivers/cpuidle/Kconfig.arm" |
31 | endmenu | 34 | endmenu |
32 | 35 | ||
36 | menu "ARM64 CPU Idle Drivers" | ||
37 | depends on ARM64 | ||
38 | source "drivers/cpuidle/Kconfig.arm64" | ||
39 | endmenu | ||
40 | |||
33 | menu "MIPS CPU Idle Drivers" | 41 | menu "MIPS CPU Idle Drivers" |
34 | depends on MIPS | 42 | depends on MIPS |
35 | source "drivers/cpuidle/Kconfig.mips" | 43 | source "drivers/cpuidle/Kconfig.mips" |
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 38cff69ffe06..e339c7f2c2b7 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm | |||
@@ -7,6 +7,7 @@ config ARM_BIG_LITTLE_CPUIDLE | |||
7 | depends on MCPM | 7 | depends on MCPM |
8 | select ARM_CPU_SUSPEND | 8 | select ARM_CPU_SUSPEND |
9 | select CPU_IDLE_MULTIPLE_DRIVERS | 9 | select CPU_IDLE_MULTIPLE_DRIVERS |
10 | select DT_IDLE_STATES | ||
10 | help | 11 | help |
11 | Select this option to enable CPU idle driver for big.LITTLE based | 12 | Select this option to enable CPU idle driver for big.LITTLE based |
12 | ARM systems. Driver manages CPUs coordination through MCPM and | 13 | ARM systems. Driver manages CPUs coordination through MCPM and |
diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64 new file mode 100644 index 000000000000..d0a08ed1b2ee --- /dev/null +++ b/drivers/cpuidle/Kconfig.arm64 | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # ARM64 CPU Idle drivers | ||
3 | # | ||
4 | |||
5 | config ARM64_CPUIDLE | ||
6 | bool "Generic ARM64 CPU idle Driver" | ||
7 | select ARM64_CPU_SUSPEND | ||
8 | select DT_IDLE_STATES | ||
9 | help | ||
10 | Select this to enable generic cpuidle driver for ARM64. | ||
11 | It provides a generic idle driver whose idle states are configured | ||
12 | at run-time through DT nodes. The CPUidle suspend backend is | ||
13 | initialized by calling the CPU operations init idle hook | ||
14 | provided by architecture code. | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 11edb31c55e9..4d177b916f75 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ | 5 | obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ |
6 | obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o | 6 | obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o |
7 | obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o | ||
7 | 8 | ||
8 | ################################################################################## | 9 | ################################################################################## |
9 | # ARM SoC drivers | 10 | # ARM SoC drivers |
@@ -22,6 +23,10 @@ obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o | |||
22 | obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o | 23 | obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o |
23 | 24 | ||
24 | ############################################################################### | 25 | ############################################################################### |
26 | # ARM64 drivers | ||
27 | obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o | ||
28 | |||
29 | ############################################################################### | ||
25 | # POWERPC drivers | 30 | # POWERPC drivers |
26 | obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o | 31 | obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o |
27 | obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o | 32 | obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o |
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c new file mode 100644 index 000000000000..50997ea942fc --- /dev/null +++ b/drivers/cpuidle/cpuidle-arm64.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * ARM64 generic CPU idle driver. | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #define pr_fmt(fmt) "CPUidle arm64: " fmt | ||
13 | |||
14 | #include <linux/cpuidle.h> | ||
15 | #include <linux/cpumask.h> | ||
16 | #include <linux/cpu_pm.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/cpuidle.h> | ||
22 | #include <asm/suspend.h> | ||
23 | |||
24 | #include "dt_idle_states.h" | ||
25 | |||
26 | /* | ||
27 | * arm64_enter_idle_state - Programs CPU to enter the specified state | ||
28 | * | ||
29 | * dev: cpuidle device | ||
30 | * drv: cpuidle driver | ||
31 | * idx: state index | ||
32 | * | ||
33 | * Called from the CPUidle framework to program the device to the | ||
34 | * specified target state selected by the governor. | ||
35 | */ | ||
36 | static int arm64_enter_idle_state(struct cpuidle_device *dev, | ||
37 | struct cpuidle_driver *drv, int idx) | ||
38 | { | ||
39 | int ret; | ||
40 | |||
41 | if (!idx) { | ||
42 | cpu_do_idle(); | ||
43 | return idx; | ||
44 | } | ||
45 | |||
46 | ret = cpu_pm_enter(); | ||
47 | if (!ret) { | ||
48 | /* | ||
49 | * Pass idle state index to cpu_suspend which in turn will | ||
50 | * call the CPU ops suspend protocol with idle index as a | ||
51 | * parameter. | ||
52 | */ | ||
53 | ret = cpu_suspend(idx); | ||
54 | |||
55 | cpu_pm_exit(); | ||
56 | } | ||
57 | |||
58 | return ret ? -1 : idx; | ||
59 | } | ||
60 | |||
61 | static struct cpuidle_driver arm64_idle_driver = { | ||
62 | .name = "arm64_idle", | ||
63 | .owner = THIS_MODULE, | ||
64 | /* | ||
65 | * State at index 0 is standby wfi and considered standard | ||
66 | * on all ARM platforms. If in some platforms simple wfi | ||
67 | * can't be used as "state 0", DT bindings must be implemented | ||
68 | * to work around this issue and allow installing a special | ||
69 | * handler for idle state index 0. | ||
70 | */ | ||
71 | .states[0] = { | ||
72 | .enter = arm64_enter_idle_state, | ||
73 | .exit_latency = 1, | ||
74 | .target_residency = 1, | ||
75 | .power_usage = UINT_MAX, | ||
76 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
77 | .name = "WFI", | ||
78 | .desc = "ARM64 WFI", | ||
79 | } | ||
80 | }; | ||
81 | |||
82 | static const struct of_device_id arm64_idle_state_match[] __initconst = { | ||
83 | { .compatible = "arm,idle-state", | ||
84 | .data = arm64_enter_idle_state }, | ||
85 | { }, | ||
86 | }; | ||
87 | |||
88 | /* | ||
89 | * arm64_idle_init | ||
90 | * | ||
91 | * Registers the arm64 specific cpuidle driver with the cpuidle | ||
92 | * framework. It relies on core code to parse the idle states | ||
93 | * and initialize them using driver data structures accordingly. | ||
94 | */ | ||
95 | static int __init arm64_idle_init(void) | ||
96 | { | ||
97 | int cpu, ret; | ||
98 | struct cpuidle_driver *drv = &arm64_idle_driver; | ||
99 | |||
100 | /* | ||
101 | * Initialize idle states data, starting at index 1. | ||
102 | * This driver is DT only, if no DT idle states are detected (ret == 0) | ||
103 | * let the driver initialization fail accordingly since there is no | ||
104 | * reason to initialize the idle driver if only wfi is supported. | ||
105 | */ | ||
106 | ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1); | ||
107 | if (ret <= 0) { | ||
108 | if (ret) | ||
109 | pr_err("failed to initialize idle states\n"); | ||
110 | return ret ? : -ENODEV; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Call arch CPU operations in order to initialize | ||
115 | * idle states suspend back-end specific data | ||
116 | */ | ||
117 | for_each_possible_cpu(cpu) { | ||
118 | ret = cpu_init_idle(cpu); | ||
119 | if (ret) { | ||
120 | pr_err("CPU %d failed to init idle CPU ops\n", cpu); | ||
121 | return ret; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | ret = cpuidle_register(drv, NULL); | ||
126 | if (ret) { | ||
127 | pr_err("failed to register cpuidle driver\n"); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | device_initcall(arm64_idle_init); | ||
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index ef94c3b81f18..fbc00a1d3c48 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <asm/smp_plat.h> | 24 | #include <asm/smp_plat.h> |
25 | #include <asm/suspend.h> | 25 | #include <asm/suspend.h> |
26 | 26 | ||
27 | #include "dt_idle_states.h" | ||
28 | |||
27 | static int bl_enter_powerdown(struct cpuidle_device *dev, | 29 | static int bl_enter_powerdown(struct cpuidle_device *dev, |
28 | struct cpuidle_driver *drv, int idx); | 30 | struct cpuidle_driver *drv, int idx); |
29 | 31 | ||
@@ -73,6 +75,12 @@ static struct cpuidle_driver bl_idle_little_driver = { | |||
73 | .state_count = 2, | 75 | .state_count = 2, |
74 | }; | 76 | }; |
75 | 77 | ||
78 | static const struct of_device_id bl_idle_state_match[] __initconst = { | ||
79 | { .compatible = "arm,idle-state", | ||
80 | .data = bl_enter_powerdown }, | ||
81 | { }, | ||
82 | }; | ||
83 | |||
76 | static struct cpuidle_driver bl_idle_big_driver = { | 84 | static struct cpuidle_driver bl_idle_big_driver = { |
77 | .name = "big_idle", | 85 | .name = "big_idle", |
78 | .owner = THIS_MODULE, | 86 | .owner = THIS_MODULE, |
@@ -159,6 +167,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) | |||
159 | static const struct of_device_id compatible_machine_match[] = { | 167 | static const struct of_device_id compatible_machine_match[] = { |
160 | { .compatible = "arm,vexpress,v2p-ca15_a7" }, | 168 | { .compatible = "arm,vexpress,v2p-ca15_a7" }, |
161 | { .compatible = "samsung,exynos5420" }, | 169 | { .compatible = "samsung,exynos5420" }, |
170 | { .compatible = "samsung,exynos5800" }, | ||
162 | {}, | 171 | {}, |
163 | }; | 172 | }; |
164 | 173 | ||
@@ -190,6 +199,17 @@ static int __init bl_idle_init(void) | |||
190 | if (ret) | 199 | if (ret) |
191 | goto out_uninit_little; | 200 | goto out_uninit_little; |
192 | 201 | ||
202 | /* Start at index 1, index 0 standard WFI */ | ||
203 | ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1); | ||
204 | if (ret < 0) | ||
205 | goto out_uninit_big; | ||
206 | |||
207 | /* Start at index 1, index 0 standard WFI */ | ||
208 | ret = dt_init_idle_driver(&bl_idle_little_driver, | ||
209 | bl_idle_state_match, 1); | ||
210 | if (ret < 0) | ||
211 | goto out_uninit_big; | ||
212 | |||
193 | ret = cpuidle_register(&bl_idle_little_driver, NULL); | 213 | ret = cpuidle_register(&bl_idle_little_driver, NULL); |
194 | if (ret) | 214 | if (ret) |
195 | goto out_uninit_big; | 215 | goto out_uninit_big; |
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c new file mode 100644 index 000000000000..52f4d11bbf3f --- /dev/null +++ b/drivers/cpuidle/dt_idle_states.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * DT idle states parsing code. | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #define pr_fmt(fmt) "DT idle-states: " fmt | ||
13 | |||
14 | #include <linux/cpuidle.h> | ||
15 | #include <linux/cpumask.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/of_device.h> | ||
21 | |||
22 | #include "dt_idle_states.h" | ||
23 | |||
24 | static int init_state_node(struct cpuidle_state *idle_state, | ||
25 | const struct of_device_id *matches, | ||
26 | struct device_node *state_node) | ||
27 | { | ||
28 | int err; | ||
29 | const struct of_device_id *match_id; | ||
30 | |||
31 | match_id = of_match_node(matches, state_node); | ||
32 | if (!match_id) | ||
33 | return -ENODEV; | ||
34 | /* | ||
35 | * CPUidle drivers are expected to initialize the const void *data | ||
36 | * pointer of the passed in struct of_device_id array to the idle | ||
37 | * state enter function. | ||
38 | */ | ||
39 | idle_state->enter = match_id->data; | ||
40 | |||
41 | err = of_property_read_u32(state_node, "wakeup-latency-us", | ||
42 | &idle_state->exit_latency); | ||
43 | if (err) { | ||
44 | u32 entry_latency, exit_latency; | ||
45 | |||
46 | err = of_property_read_u32(state_node, "entry-latency-us", | ||
47 | &entry_latency); | ||
48 | if (err) { | ||
49 | pr_debug(" * %s missing entry-latency-us property\n", | ||
50 | state_node->full_name); | ||
51 | return -EINVAL; | ||
52 | } | ||
53 | |||
54 | err = of_property_read_u32(state_node, "exit-latency-us", | ||
55 | &exit_latency); | ||
56 | if (err) { | ||
57 | pr_debug(" * %s missing exit-latency-us property\n", | ||
58 | state_node->full_name); | ||
59 | return -EINVAL; | ||
60 | } | ||
61 | /* | ||
62 | * If wakeup-latency-us is missing, default to entry+exit | ||
63 | * latencies as defined in idle states bindings | ||
64 | */ | ||
65 | idle_state->exit_latency = entry_latency + exit_latency; | ||
66 | } | ||
67 | |||
68 | err = of_property_read_u32(state_node, "min-residency-us", | ||
69 | &idle_state->target_residency); | ||
70 | if (err) { | ||
71 | pr_debug(" * %s missing min-residency-us property\n", | ||
72 | state_node->full_name); | ||
73 | return -EINVAL; | ||
74 | } | ||
75 | |||
76 | idle_state->flags = CPUIDLE_FLAG_TIME_VALID; | ||
77 | if (of_property_read_bool(state_node, "local-timer-stop")) | ||
78 | idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP; | ||
79 | /* | ||
80 | * TODO: | ||
81 | * replace with kstrdup and pointer assignment when name | ||
82 | * and desc become string pointers | ||
83 | */ | ||
84 | strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1); | ||
85 | strncpy(idle_state->desc, state_node->name, CPUIDLE_DESC_LEN - 1); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Check that the idle state is uniform across all CPUs in the CPUidle driver | ||
91 | * cpumask | ||
92 | */ | ||
93 | static bool idle_state_valid(struct device_node *state_node, unsigned int idx, | ||
94 | const cpumask_t *cpumask) | ||
95 | { | ||
96 | int cpu; | ||
97 | struct device_node *cpu_node, *curr_state_node; | ||
98 | bool valid = true; | ||
99 | |||
100 | /* | ||
101 | * Compare idle state phandles for index idx on all CPUs in the | ||
102 | * CPUidle driver cpumask. Start from next logical cpu following | ||
103 | * cpumask_first(cpumask) since that's the CPU state_node was | ||
104 | * retrieved from. If a mismatch is found bail out straight | ||
105 | * away since we certainly hit a firmware misconfiguration. | ||
106 | */ | ||
107 | for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); | ||
108 | cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { | ||
109 | cpu_node = of_cpu_device_node_get(cpu); | ||
110 | curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states", | ||
111 | idx); | ||
112 | if (state_node != curr_state_node) | ||
113 | valid = false; | ||
114 | |||
115 | of_node_put(curr_state_node); | ||
116 | of_node_put(cpu_node); | ||
117 | if (!valid) | ||
118 | break; | ||
119 | } | ||
120 | |||
121 | return valid; | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * dt_init_idle_driver() - Parse the DT idle states and initialize the | ||
126 | * idle driver states array | ||
127 | * @drv: Pointer to CPU idle driver to be initialized | ||
128 | * @matches: Array of of_device_id match structures to search in for | ||
129 | * compatible idle state nodes. The data pointer for each valid | ||
130 | * struct of_device_id entry in the matches array must point to | ||
131 | * a function with the following signature, that corresponds to | ||
132 | * the CPUidle state enter function signature: | ||
133 | * | ||
134 | * int (*)(struct cpuidle_device *dev, | ||
135 | * struct cpuidle_driver *drv, | ||
136 | * int index); | ||
137 | * | ||
138 | * @start_idx: First idle state index to be initialized | ||
139 | * | ||
140 | * If DT idle states are detected and are valid the state count and states | ||
141 | * array entries in the cpuidle driver are initialized accordingly starting | ||
142 | * from index start_idx. | ||
143 | * | ||
144 | * Return: number of valid DT idle states parsed, <0 on failure | ||
145 | */ | ||
146 | int dt_init_idle_driver(struct cpuidle_driver *drv, | ||
147 | const struct of_device_id *matches, | ||
148 | unsigned int start_idx) | ||
149 | { | ||
150 | struct cpuidle_state *idle_state; | ||
151 | struct device_node *state_node, *cpu_node; | ||
152 | int i, err = 0; | ||
153 | const cpumask_t *cpumask; | ||
154 | unsigned int state_idx = start_idx; | ||
155 | |||
156 | if (state_idx >= CPUIDLE_STATE_MAX) | ||
157 | return -EINVAL; | ||
158 | /* | ||
159 | * We get the idle states for the first logical cpu in the | ||
160 | * driver mask (or cpu_possible_mask if the driver cpumask is not set) | ||
161 | * and we check through idle_state_valid() if they are uniform | ||
162 | * across CPUs, otherwise we hit a firmware misconfiguration. | ||
163 | */ | ||
164 | cpumask = drv->cpumask ? : cpu_possible_mask; | ||
165 | cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); | ||
166 | |||
167 | for (i = 0; ; i++) { | ||
168 | state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); | ||
169 | if (!state_node) | ||
170 | break; | ||
171 | |||
172 | if (!idle_state_valid(state_node, i, cpumask)) { | ||
173 | pr_warn("%s idle state not valid, bailing out\n", | ||
174 | state_node->full_name); | ||
175 | err = -EINVAL; | ||
176 | break; | ||
177 | } | ||
178 | |||
179 | if (state_idx == CPUIDLE_STATE_MAX) { | ||
180 | pr_warn("State index reached static CPU idle driver states array size\n"); | ||
181 | break; | ||
182 | } | ||
183 | |||
184 | idle_state = &drv->states[state_idx++]; | ||
185 | err = init_state_node(idle_state, matches, state_node); | ||
186 | if (err) { | ||
187 | pr_err("Parsing idle state node %s failed with err %d\n", | ||
188 | state_node->full_name, err); | ||
189 | err = -EINVAL; | ||
190 | break; | ||
191 | } | ||
192 | of_node_put(state_node); | ||
193 | } | ||
194 | |||
195 | of_node_put(state_node); | ||
196 | of_node_put(cpu_node); | ||
197 | if (err) | ||
198 | return err; | ||
199 | /* | ||
200 | * Update the driver state count only if some valid DT idle states | ||
201 | * were detected | ||
202 | */ | ||
203 | if (i) | ||
204 | drv->state_count = state_idx; | ||
205 | |||
206 | /* | ||
207 | * Return the number of present and valid DT idle states, which can | ||
208 | * also be 0 on platforms with missing DT idle states or legacy DT | ||
209 | * configuration predating the DT idle states bindings. | ||
210 | */ | ||
211 | return i; | ||
212 | } | ||
213 | EXPORT_SYMBOL_GPL(dt_init_idle_driver); | ||
diff --git a/drivers/cpuidle/dt_idle_states.h b/drivers/cpuidle/dt_idle_states.h new file mode 100644 index 000000000000..4818134bc65b --- /dev/null +++ b/drivers/cpuidle/dt_idle_states.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef __DT_IDLE_STATES | ||
2 | #define __DT_IDLE_STATES | ||
3 | |||
4 | int dt_init_idle_driver(struct cpuidle_driver *drv, | ||
5 | const struct of_device_id *matches, | ||
6 | unsigned int start_idx); | ||
7 | #endif | ||
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index ca89412f5122..fb9f511cca23 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c | |||
@@ -28,7 +28,7 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str) | |||
28 | struct cpuidle_governor *gov; | 28 | struct cpuidle_governor *gov; |
29 | 29 | ||
30 | list_for_each_entry(gov, &cpuidle_governors, governor_list) | 30 | list_for_each_entry(gov, &cpuidle_governors, governor_list) |
31 | if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) | 31 | if (!strncasecmp(str, gov->name, CPUIDLE_NAME_LEN)) |
32 | return gov; | 32 | return gov; |
33 | 33 | ||
34 | return NULL; | 34 | return NULL; |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 20dc848481e7..4d4e016d755b 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c | |||
@@ -367,6 +367,10 @@ static int ccp_crypto_init(void) | |||
367 | { | 367 | { |
368 | int ret; | 368 | int ret; |
369 | 369 | ||
370 | ret = ccp_present(); | ||
371 | if (ret) | ||
372 | return ret; | ||
373 | |||
370 | spin_lock_init(&req_queue_lock); | 374 | spin_lock_init(&req_queue_lock); |
371 | INIT_LIST_HEAD(&req_queue.cmds); | 375 | INIT_LIST_HEAD(&req_queue.cmds); |
372 | req_queue.backlog = &req_queue.cmds; | 376 | req_queue.backlog = &req_queue.cmds; |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index a7d110652a74..c6e6171eb6d3 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -55,6 +55,20 @@ static inline void ccp_del_device(struct ccp_device *ccp) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | /** | 57 | /** |
58 | * ccp_present - check if a CCP device is present | ||
59 | * | ||
60 | * Returns zero if a CCP device is present, -ENODEV otherwise. | ||
61 | */ | ||
62 | int ccp_present(void) | ||
63 | { | ||
64 | if (ccp_get_device()) | ||
65 | return 0; | ||
66 | |||
67 | return -ENODEV; | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(ccp_present); | ||
70 | |||
71 | /** | ||
58 | * ccp_enqueue_cmd - queue an operation for processing by the CCP | 72 | * ccp_enqueue_cmd - queue an operation for processing by the CCP |
59 | * | 73 | * |
60 | * @cmd: ccp_cmd struct to be processed | 74 | * @cmd: ccp_cmd struct to be processed |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index b707f292b377..65dd1ff93d3b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | |||
@@ -66,7 +66,7 @@ | |||
66 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 | 66 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 |
67 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | 67 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) |
68 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | 68 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) |
69 | #define ADF_DH895XCC_SMIA0_MASK 0xFFFF | 69 | #define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF |
70 | #define ADF_DH895XCC_SMIA1_MASK 0x1 | 70 | #define ADF_DH895XCC_SMIA1_MASK 0x1 |
71 | /* Error detection and correction */ | 71 | /* Error detection and correction */ |
72 | #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) | 72 | #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 4cf7d9a950d7..bbea8243f9e8 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -1017,6 +1017,11 @@ static int omap_dma_resume(struct omap_chan *c) | |||
1017 | return -EINVAL; | 1017 | return -EINVAL; |
1018 | 1018 | ||
1019 | if (c->paused) { | 1019 | if (c->paused) { |
1020 | mb(); | ||
1021 | |||
1022 | /* Restore channel link register */ | ||
1023 | omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); | ||
1024 | |||
1020 | omap_dma_start(c, c->desc); | 1025 | omap_dma_start(c, c->desc); |
1021 | c->paused = false; | 1026 | c->paused = false; |
1022 | } | 1027 | } |
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608a9f3b..aef6a95adef5 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile | |||
@@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o | |||
7 | obj-$(CONFIG_UEFI_CPER) += cper.o | 7 | obj-$(CONFIG_UEFI_CPER) += cper.o |
8 | obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o | 8 | obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o |
9 | obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o | 9 | obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o |
10 | obj-$(CONFIG_EFI_STUB) += libstub/ | 10 | obj-$(CONFIG_EFI_ARM_STUB) += libstub/ |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d62eaaa75397..687476fb39e3 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -377,8 +377,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
377 | struct gpio_chip *chip = achip->chip; | 377 | struct gpio_chip *chip = achip->chip; |
378 | struct acpi_resource_gpio *agpio; | 378 | struct acpi_resource_gpio *agpio; |
379 | struct acpi_resource *ares; | 379 | struct acpi_resource *ares; |
380 | int pin_index = (int)address; | ||
380 | acpi_status status; | 381 | acpi_status status; |
381 | bool pull_up; | 382 | bool pull_up; |
383 | int length; | ||
382 | int i; | 384 | int i; |
383 | 385 | ||
384 | status = acpi_buffer_to_resource(achip->conn_info.connection, | 386 | status = acpi_buffer_to_resource(achip->conn_info.connection, |
@@ -400,7 +402,8 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
400 | return AE_BAD_PARAMETER; | 402 | return AE_BAD_PARAMETER; |
401 | } | 403 | } |
402 | 404 | ||
403 | for (i = 0; i < agpio->pin_table_length; i++) { | 405 | length = min(agpio->pin_table_length, (u16)(pin_index + bits)); |
406 | for (i = pin_index; i < length; ++i) { | ||
404 | unsigned pin = agpio->pin_table[i]; | 407 | unsigned pin = agpio->pin_table[i]; |
405 | struct acpi_gpio_connection *conn; | 408 | struct acpi_gpio_connection *conn; |
406 | struct gpio_desc *desc; | 409 | struct gpio_desc *desc; |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 15cc0bb65dda..c68d037de656 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -413,12 +413,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, | |||
413 | return; | 413 | return; |
414 | } | 414 | } |
415 | 415 | ||
416 | irq_set_chained_handler(parent_irq, parent_handler); | ||
417 | /* | 416 | /* |
418 | * The parent irqchip is already using the chip_data for this | 417 | * The parent irqchip is already using the chip_data for this |
419 | * irqchip, so our callbacks simply use the handler_data. | 418 | * irqchip, so our callbacks simply use the handler_data. |
420 | */ | 419 | */ |
421 | irq_set_handler_data(parent_irq, gpiochip); | 420 | irq_set_handler_data(parent_irq, gpiochip); |
421 | irq_set_chained_handler(parent_irq, parent_handler); | ||
422 | } | 422 | } |
423 | EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); | 423 | EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); |
424 | 424 | ||
@@ -1674,7 +1674,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, | |||
1674 | set_bit(FLAG_OPEN_SOURCE, &desc->flags); | 1674 | set_bit(FLAG_OPEN_SOURCE, &desc->flags); |
1675 | 1675 | ||
1676 | /* No particular flag request, return here... */ | 1676 | /* No particular flag request, return here... */ |
1677 | if (flags & GPIOD_FLAGS_BIT_DIR_SET) | 1677 | if (!(flags & GPIOD_FLAGS_BIT_DIR_SET)) |
1678 | return desc; | 1678 | return desc; |
1679 | 1679 | ||
1680 | /* Process flags */ | 1680 | /* Process flags */ |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index dea99d92fb4a..4b7ed5289217 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) | |||
709 | BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); | 709 | BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); |
710 | BUG_ON(!validate_regs_sorted(ring)); | 710 | BUG_ON(!validate_regs_sorted(ring)); |
711 | 711 | ||
712 | ret = init_hash_table(ring, cmd_tables, cmd_table_count); | 712 | if (hash_empty(ring->cmd_hash)) { |
713 | if (ret) { | 713 | ret = init_hash_table(ring, cmd_tables, cmd_table_count); |
714 | DRM_ERROR("CMD: cmd_parser_init failed!\n"); | 714 | if (ret) { |
715 | fini_hash_table(ring); | 715 | DRM_ERROR("CMD: cmd_parser_init failed!\n"); |
716 | return ret; | 716 | fini_hash_table(ring); |
717 | return ret; | ||
718 | } | ||
717 | } | 719 | } |
718 | 720 | ||
719 | ring->needs_cmd_parser = true; | 721 | ring->needs_cmd_parser = true; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1411613f2174..e42925f76b4b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) | |||
1310 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); | 1310 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | static void i915_ggtt_flush(struct drm_i915_private *dev_priv) | ||
1314 | { | ||
1315 | if (INTEL_INFO(dev_priv->dev)->gen < 6) { | ||
1316 | intel_gtt_chipset_flush(); | ||
1317 | } else { | ||
1318 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | ||
1319 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | ||
1320 | } | ||
1321 | } | ||
1322 | |||
1313 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | 1323 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
1314 | { | 1324 | { |
1315 | struct drm_i915_private *dev_priv = dev->dev_private; | 1325 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | |||
1326 | dev_priv->gtt.base.start, | 1336 | dev_priv->gtt.base.start, |
1327 | dev_priv->gtt.base.total, | 1337 | dev_priv->gtt.base.total, |
1328 | true); | 1338 | true); |
1339 | |||
1340 | i915_ggtt_flush(dev_priv); | ||
1329 | } | 1341 | } |
1330 | 1342 | ||
1331 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 1343 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
@@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
1378 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); | 1390 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); |
1379 | } | 1391 | } |
1380 | 1392 | ||
1381 | i915_gem_chipset_flush(dev); | 1393 | i915_ggtt_flush(dev_priv); |
1382 | } | 1394 | } |
1383 | 1395 | ||
1384 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | 1396 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ca34de7f6a7b..5a9de21637b7 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -732,7 +732,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, | |||
732 | if (tmp & HDMI_MODE_SELECT_HDMI) | 732 | if (tmp & HDMI_MODE_SELECT_HDMI) |
733 | pipe_config->has_hdmi_sink = true; | 733 | pipe_config->has_hdmi_sink = true; |
734 | 734 | ||
735 | if (tmp & HDMI_MODE_SELECT_HDMI) | 735 | if (tmp & SDVO_AUDIO_ENABLE) |
736 | pipe_config->has_audio = true; | 736 | pipe_config->has_audio = true; |
737 | 737 | ||
738 | if (!HAS_PCH_SPLIT(dev) && | 738 | if (!HAS_PCH_SPLIT(dev) && |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index ca52ad2ae7d1..d8de1d5140a7 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) | |||
396 | return -EINVAL; | 396 | return -EINVAL; |
397 | } | 397 | } |
398 | 398 | ||
399 | /* | ||
400 | * If the vendor backlight interface is not in use and ACPI backlight interface | ||
401 | * is broken, do not bother processing backlight change requests from firmware. | ||
402 | */ | ||
403 | static bool should_ignore_backlight_request(void) | ||
404 | { | ||
405 | return acpi_video_backlight_support() && | ||
406 | !acpi_video_verify_backlight_support(); | ||
407 | } | ||
408 | |||
399 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | 409 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) |
400 | { | 410 | { |
401 | struct drm_i915_private *dev_priv = dev->dev_private; | 411 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
404 | 414 | ||
405 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | 415 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); |
406 | 416 | ||
407 | /* | 417 | if (should_ignore_backlight_request()) { |
408 | * If the acpi_video interface is not supposed to be used, don't | ||
409 | * bother processing backlight level change requests from firmware. | ||
410 | */ | ||
411 | if (!acpi_video_verify_backlight_support()) { | ||
412 | DRM_DEBUG_KMS("opregion backlight request ignored\n"); | 418 | DRM_DEBUG_KMS("opregion backlight request ignored\n"); |
413 | return 0; | 419 | return 0; |
414 | } | 420 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 4b5bb5d58a54..f8cbb512132f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
@@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp | |||
1763 | const int or = ffs(outp->or) - 1; | 1763 | const int or = ffs(outp->or) - 1; |
1764 | const u32 loff = (or * 0x800) + (link * 0x80); | 1764 | const u32 loff = (or * 0x800) + (link * 0x80); |
1765 | const u16 mask = (outp->sorconf.link << 6) | outp->or; | 1765 | const u16 mask = (outp->sorconf.link << 6) | outp->or; |
1766 | struct dcb_output match; | ||
1766 | u8 ver, hdr; | 1767 | u8 ver, hdr; |
1767 | 1768 | ||
1768 | if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) | 1769 | if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) |
1769 | nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); | 1770 | nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); |
1770 | } | 1771 | } |
1771 | 1772 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 99cd9e4a2aa6..3440fc999f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c | |||
@@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
285 | struct nouveau_software_chan *swch; | 285 | struct nouveau_software_chan *swch; |
286 | struct nv_dma_v0 args = {}; | 286 | struct nv_dma_v0 args = {}; |
287 | int ret, i; | 287 | int ret, i; |
288 | bool save; | ||
288 | 289 | ||
289 | nvif_object_map(chan->object); | 290 | nvif_object_map(chan->object); |
290 | 291 | ||
@@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
386 | } | 387 | } |
387 | 388 | ||
388 | /* initialise synchronisation */ | 389 | /* initialise synchronisation */ |
389 | return nouveau_fence(chan->drm)->context_new(chan); | 390 | save = cli->base.super; |
391 | cli->base.super = true; /* hack until fencenv50 fixed */ | ||
392 | ret = nouveau_fence(chan->drm)->context_new(chan); | ||
393 | cli->base.super = save; | ||
394 | return ret; | ||
390 | } | 395 | } |
391 | 396 | ||
392 | int | 397 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 65b4fd53dd4e..4a21b2b06ce2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev) | |||
550 | } | 550 | } |
551 | 551 | ||
552 | int | 552 | int |
553 | nouveau_display_suspend(struct drm_device *dev) | 553 | nouveau_display_suspend(struct drm_device *dev, bool runtime) |
554 | { | 554 | { |
555 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
556 | struct drm_crtc *crtc; | 555 | struct drm_crtc *crtc; |
557 | 556 | ||
558 | nouveau_display_fini(dev); | 557 | nouveau_display_fini(dev); |
559 | 558 | ||
560 | NV_INFO(drm, "unpinning framebuffer(s)...\n"); | ||
561 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 559 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
562 | struct nouveau_framebuffer *nouveau_fb; | 560 | struct nouveau_framebuffer *nouveau_fb; |
563 | 561 | ||
@@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev) | |||
579 | } | 577 | } |
580 | 578 | ||
581 | void | 579 | void |
582 | nouveau_display_repin(struct drm_device *dev) | 580 | nouveau_display_resume(struct drm_device *dev, bool runtime) |
583 | { | 581 | { |
584 | struct nouveau_drm *drm = nouveau_drm(dev); | 582 | struct nouveau_drm *drm = nouveau_drm(dev); |
585 | struct drm_crtc *crtc; | 583 | struct drm_crtc *crtc; |
586 | int ret; | 584 | int ret, head; |
587 | 585 | ||
586 | /* re-pin fb/cursors */ | ||
588 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 587 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
589 | struct nouveau_framebuffer *nouveau_fb; | 588 | struct nouveau_framebuffer *nouveau_fb; |
590 | 589 | ||
@@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev) | |||
606 | if (ret) | 605 | if (ret) |
607 | NV_ERROR(drm, "Could not pin/map cursor.\n"); | 606 | NV_ERROR(drm, "Could not pin/map cursor.\n"); |
608 | } | 607 | } |
609 | } | ||
610 | |||
611 | void | ||
612 | nouveau_display_resume(struct drm_device *dev) | ||
613 | { | ||
614 | struct drm_crtc *crtc; | ||
615 | int head; | ||
616 | 608 | ||
617 | nouveau_display_init(dev); | 609 | nouveau_display_init(dev); |
618 | 610 | ||
@@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev) | |||
627 | for (head = 0; head < dev->mode_config.num_crtc; head++) | 619 | for (head = 0; head < dev->mode_config.num_crtc; head++) |
628 | drm_vblank_on(dev, head); | 620 | drm_vblank_on(dev, head); |
629 | 621 | ||
622 | /* This should ensure we don't hit a locking problem when someone | ||
623 | * wakes us up via a connector. We should never go into suspend | ||
624 | * while the display is on anyways. | ||
625 | */ | ||
626 | if (runtime) | ||
627 | return; | ||
628 | |||
630 | drm_helper_resume_force_mode(dev); | 629 | drm_helper_resume_force_mode(dev); |
631 | 630 | ||
632 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 631 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 88ca177cb1c7..be3d5947c6be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h | |||
@@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev); | |||
63 | void nouveau_display_destroy(struct drm_device *dev); | 63 | void nouveau_display_destroy(struct drm_device *dev); |
64 | int nouveau_display_init(struct drm_device *dev); | 64 | int nouveau_display_init(struct drm_device *dev); |
65 | void nouveau_display_fini(struct drm_device *dev); | 65 | void nouveau_display_fini(struct drm_device *dev); |
66 | int nouveau_display_suspend(struct drm_device *dev); | 66 | int nouveau_display_suspend(struct drm_device *dev, bool runtime); |
67 | void nouveau_display_repin(struct drm_device *dev); | 67 | void nouveau_display_resume(struct drm_device *dev, bool runtime); |
68 | void nouveau_display_resume(struct drm_device *dev); | ||
69 | int nouveau_display_vblank_enable(struct drm_device *, int); | 68 | int nouveau_display_vblank_enable(struct drm_device *, int); |
70 | void nouveau_display_vblank_disable(struct drm_device *, int); | 69 | void nouveau_display_vblank_disable(struct drm_device *, int); |
71 | int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, | 70 | int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 9c3af96a7153..3ed32dd90303 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
547 | struct nouveau_cli *cli; | 547 | struct nouveau_cli *cli; |
548 | int ret; | 548 | int ret; |
549 | 549 | ||
550 | if (dev->mode_config.num_crtc && !runtime) { | 550 | if (dev->mode_config.num_crtc) { |
551 | NV_INFO(drm, "suspending console...\n"); | ||
552 | nouveau_fbcon_set_suspend(dev, 1); | ||
551 | NV_INFO(drm, "suspending display...\n"); | 553 | NV_INFO(drm, "suspending display...\n"); |
552 | ret = nouveau_display_suspend(dev); | 554 | ret = nouveau_display_suspend(dev, runtime); |
553 | if (ret) | 555 | if (ret) |
554 | return ret; | 556 | return ret; |
555 | } | 557 | } |
@@ -603,7 +605,7 @@ fail_client: | |||
603 | fail_display: | 605 | fail_display: |
604 | if (dev->mode_config.num_crtc) { | 606 | if (dev->mode_config.num_crtc) { |
605 | NV_INFO(drm, "resuming display...\n"); | 607 | NV_INFO(drm, "resuming display...\n"); |
606 | nouveau_display_resume(dev); | 608 | nouveau_display_resume(dev, runtime); |
607 | } | 609 | } |
608 | return ret; | 610 | return ret; |
609 | } | 611 | } |
@@ -618,9 +620,6 @@ int nouveau_pmops_suspend(struct device *dev) | |||
618 | drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) | 620 | drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) |
619 | return 0; | 621 | return 0; |
620 | 622 | ||
621 | if (drm_dev->mode_config.num_crtc) | ||
622 | nouveau_fbcon_set_suspend(drm_dev, 1); | ||
623 | |||
624 | ret = nouveau_do_suspend(drm_dev, false); | 623 | ret = nouveau_do_suspend(drm_dev, false); |
625 | if (ret) | 624 | if (ret) |
626 | return ret; | 625 | return ret; |
@@ -633,7 +632,7 @@ int nouveau_pmops_suspend(struct device *dev) | |||
633 | } | 632 | } |
634 | 633 | ||
635 | static int | 634 | static int |
636 | nouveau_do_resume(struct drm_device *dev) | 635 | nouveau_do_resume(struct drm_device *dev, bool runtime) |
637 | { | 636 | { |
638 | struct nouveau_drm *drm = nouveau_drm(dev); | 637 | struct nouveau_drm *drm = nouveau_drm(dev); |
639 | struct nouveau_cli *cli; | 638 | struct nouveau_cli *cli; |
@@ -658,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev) | |||
658 | 657 | ||
659 | if (dev->mode_config.num_crtc) { | 658 | if (dev->mode_config.num_crtc) { |
660 | NV_INFO(drm, "resuming display...\n"); | 659 | NV_INFO(drm, "resuming display...\n"); |
661 | nouveau_display_repin(dev); | 660 | nouveau_display_resume(dev, runtime); |
661 | NV_INFO(drm, "resuming console...\n"); | ||
662 | nouveau_fbcon_set_suspend(dev, 0); | ||
662 | } | 663 | } |
663 | 664 | ||
664 | return 0; | 665 | return 0; |
@@ -681,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev) | |||
681 | return ret; | 682 | return ret; |
682 | pci_set_master(pdev); | 683 | pci_set_master(pdev); |
683 | 684 | ||
684 | ret = nouveau_do_resume(drm_dev); | 685 | return nouveau_do_resume(drm_dev, false); |
685 | if (ret) | ||
686 | return ret; | ||
687 | |||
688 | if (drm_dev->mode_config.num_crtc) { | ||
689 | nouveau_display_resume(drm_dev); | ||
690 | nouveau_fbcon_set_suspend(drm_dev, 0); | ||
691 | } | ||
692 | |||
693 | return 0; | ||
694 | } | 686 | } |
695 | 687 | ||
696 | static int nouveau_pmops_freeze(struct device *dev) | 688 | static int nouveau_pmops_freeze(struct device *dev) |
697 | { | 689 | { |
698 | struct pci_dev *pdev = to_pci_dev(dev); | 690 | struct pci_dev *pdev = to_pci_dev(dev); |
699 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 691 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
700 | int ret; | 692 | return nouveau_do_suspend(drm_dev, false); |
701 | |||
702 | if (drm_dev->mode_config.num_crtc) | ||
703 | nouveau_fbcon_set_suspend(drm_dev, 1); | ||
704 | |||
705 | ret = nouveau_do_suspend(drm_dev, false); | ||
706 | return ret; | ||
707 | } | 693 | } |
708 | 694 | ||
709 | static int nouveau_pmops_thaw(struct device *dev) | 695 | static int nouveau_pmops_thaw(struct device *dev) |
710 | { | 696 | { |
711 | struct pci_dev *pdev = to_pci_dev(dev); | 697 | struct pci_dev *pdev = to_pci_dev(dev); |
712 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 698 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
713 | int ret; | 699 | return nouveau_do_resume(drm_dev, false); |
714 | |||
715 | ret = nouveau_do_resume(drm_dev); | ||
716 | if (ret) | ||
717 | return ret; | ||
718 | |||
719 | if (drm_dev->mode_config.num_crtc) { | ||
720 | nouveau_display_resume(drm_dev); | ||
721 | nouveau_fbcon_set_suspend(drm_dev, 0); | ||
722 | } | ||
723 | |||
724 | return 0; | ||
725 | } | 700 | } |
726 | 701 | ||
727 | 702 | ||
@@ -977,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) | |||
977 | return ret; | 952 | return ret; |
978 | pci_set_master(pdev); | 953 | pci_set_master(pdev); |
979 | 954 | ||
980 | ret = nouveau_do_resume(drm_dev); | 955 | ret = nouveau_do_resume(drm_dev, true); |
981 | drm_kms_helper_poll_enable(drm_dev); | 956 | drm_kms_helper_poll_enable(drm_dev); |
982 | /* do magic */ | 957 | /* do magic */ |
983 | nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); | 958 | nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 8bdd27091db8..49fe6075cc7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { | |||
486 | .fb_probe = nouveau_fbcon_create, | 486 | .fb_probe = nouveau_fbcon_create, |
487 | }; | 487 | }; |
488 | 488 | ||
489 | static void | ||
490 | nouveau_fbcon_set_suspend_work(struct work_struct *work) | ||
491 | { | ||
492 | struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work); | ||
493 | console_lock(); | ||
494 | nouveau_fbcon_accel_restore(fbcon->dev); | ||
495 | nouveau_fbcon_zfill(fbcon->dev, fbcon); | ||
496 | fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING); | ||
497 | console_unlock(); | ||
498 | } | ||
489 | 499 | ||
490 | int | 500 | int |
491 | nouveau_fbcon_init(struct drm_device *dev) | 501 | nouveau_fbcon_init(struct drm_device *dev) |
@@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
503 | if (!fbcon) | 513 | if (!fbcon) |
504 | return -ENOMEM; | 514 | return -ENOMEM; |
505 | 515 | ||
516 | INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work); | ||
506 | fbcon->dev = dev; | 517 | fbcon->dev = dev; |
507 | drm->fbcon = fbcon; | 518 | drm->fbcon = fbcon; |
508 | 519 | ||
@@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | |||
551 | { | 562 | { |
552 | struct nouveau_drm *drm = nouveau_drm(dev); | 563 | struct nouveau_drm *drm = nouveau_drm(dev); |
553 | if (drm->fbcon) { | 564 | if (drm->fbcon) { |
554 | console_lock(); | 565 | if (state == FBINFO_STATE_RUNNING) { |
555 | if (state == 0) { | 566 | schedule_work(&drm->fbcon->work); |
556 | nouveau_fbcon_accel_restore(dev); | 567 | return; |
557 | nouveau_fbcon_zfill(dev, drm->fbcon); | ||
558 | } | 568 | } |
569 | flush_work(&drm->fbcon->work); | ||
570 | console_lock(); | ||
559 | fb_set_suspend(drm->fbcon->helper.fbdev, state); | 571 | fb_set_suspend(drm->fbcon->helper.fbdev, state); |
560 | if (state == 1) | 572 | nouveau_fbcon_accel_save_disable(dev); |
561 | nouveau_fbcon_accel_save_disable(dev); | ||
562 | console_unlock(); | 573 | console_unlock(); |
563 | } | 574 | } |
564 | } | 575 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 34658cfa8f5d..0b465c7d3907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -36,6 +36,7 @@ struct nouveau_fbdev { | |||
36 | struct nouveau_framebuffer nouveau_fb; | 36 | struct nouveau_framebuffer nouveau_fb; |
37 | struct list_head fbdev_list; | 37 | struct list_head fbdev_list; |
38 | struct drm_device *dev; | 38 | struct drm_device *dev; |
39 | struct work_struct work; | ||
39 | unsigned int saved_flags; | 40 | unsigned int saved_flags; |
40 | struct nvif_object surf2d; | 41 | struct nvif_object surf2d; |
41 | struct nvif_object clip; | 42 | struct nvif_object clip; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index fa9565957f9d..3d546c606b43 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -4803,7 +4803,7 @@ struct bonaire_mqd | |||
4803 | */ | 4803 | */ |
4804 | static int cik_cp_compute_resume(struct radeon_device *rdev) | 4804 | static int cik_cp_compute_resume(struct radeon_device *rdev) |
4805 | { | 4805 | { |
4806 | int r, i, idx; | 4806 | int r, i, j, idx; |
4807 | u32 tmp; | 4807 | u32 tmp; |
4808 | bool use_doorbell = true; | 4808 | bool use_doorbell = true; |
4809 | u64 hqd_gpu_addr; | 4809 | u64 hqd_gpu_addr; |
@@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
4922 | mqd->queue_state.cp_hqd_pq_wptr= 0; | 4922 | mqd->queue_state.cp_hqd_pq_wptr= 0; |
4923 | if (RREG32(CP_HQD_ACTIVE) & 1) { | 4923 | if (RREG32(CP_HQD_ACTIVE) & 1) { |
4924 | WREG32(CP_HQD_DEQUEUE_REQUEST, 1); | 4924 | WREG32(CP_HQD_DEQUEUE_REQUEST, 1); |
4925 | for (i = 0; i < rdev->usec_timeout; i++) { | 4925 | for (j = 0; j < rdev->usec_timeout; j++) { |
4926 | if (!(RREG32(CP_HQD_ACTIVE) & 1)) | 4926 | if (!(RREG32(CP_HQD_ACTIVE) & 1)) |
4927 | break; | 4927 | break; |
4928 | udelay(1); | 4928 | udelay(1); |
@@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) | |||
7751 | wptr = RREG32(IH_RB_WPTR); | 7751 | wptr = RREG32(IH_RB_WPTR); |
7752 | 7752 | ||
7753 | if (wptr & RB_OVERFLOW) { | 7753 | if (wptr & RB_OVERFLOW) { |
7754 | wptr &= ~RB_OVERFLOW; | ||
7754 | /* When a ring buffer overflow happen start parsing interrupt | 7755 | /* When a ring buffer overflow happen start parsing interrupt |
7755 | * from the last not overwritten vector (wptr + 16). Hopefully | 7756 | * from the last not overwritten vector (wptr + 16). Hopefully |
7756 | * this should allow us to catchup. | 7757 | * this should allow us to catchup. |
7757 | */ | 7758 | */ |
7758 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 7759 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
7759 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 7760 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
7760 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 7761 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
7761 | tmp = RREG32(IH_RB_CNTL); | 7762 | tmp = RREG32(IH_RB_CNTL); |
7762 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 7763 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
7763 | WREG32(IH_RB_CNTL, tmp); | 7764 | WREG32(IH_RB_CNTL, tmp); |
7764 | wptr &= ~RB_OVERFLOW; | ||
7765 | } | 7765 | } |
7766 | return (wptr & rdev->ih.ptr_mask); | 7766 | return (wptr & rdev->ih.ptr_mask); |
7767 | } | 7767 | } |
@@ -8251,6 +8251,7 @@ restart_ih: | |||
8251 | /* wptr/rptr are in bytes! */ | 8251 | /* wptr/rptr are in bytes! */ |
8252 | rptr += 16; | 8252 | rptr += 16; |
8253 | rptr &= rdev->ih.ptr_mask; | 8253 | rptr &= rdev->ih.ptr_mask; |
8254 | WREG32(IH_RB_RPTR, rptr); | ||
8254 | } | 8255 | } |
8255 | if (queue_hotplug) | 8256 | if (queue_hotplug) |
8256 | schedule_work(&rdev->hotplug_work); | 8257 | schedule_work(&rdev->hotplug_work); |
@@ -8259,7 +8260,6 @@ restart_ih: | |||
8259 | if (queue_thermal) | 8260 | if (queue_thermal) |
8260 | schedule_work(&rdev->pm.dpm.thermal.work); | 8261 | schedule_work(&rdev->pm.dpm.thermal.work); |
8261 | rdev->ih.rptr = rptr; | 8262 | rdev->ih.rptr = rptr; |
8262 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
8263 | atomic_set(&rdev->ih.lock, 0); | 8263 | atomic_set(&rdev->ih.lock, 0); |
8264 | 8264 | ||
8265 | /* make sure wptr hasn't changed while processing */ | 8265 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index dbca60c7d097..e50807c29f69 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
4749 | wptr = RREG32(IH_RB_WPTR); | 4749 | wptr = RREG32(IH_RB_WPTR); |
4750 | 4750 | ||
4751 | if (wptr & RB_OVERFLOW) { | 4751 | if (wptr & RB_OVERFLOW) { |
4752 | wptr &= ~RB_OVERFLOW; | ||
4752 | /* When a ring buffer overflow happen start parsing interrupt | 4753 | /* When a ring buffer overflow happen start parsing interrupt |
4753 | * from the last not overwritten vector (wptr + 16). Hopefully | 4754 | * from the last not overwritten vector (wptr + 16). Hopefully |
4754 | * this should allow us to catchup. | 4755 | * this should allow us to catchup. |
4755 | */ | 4756 | */ |
4756 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 4757 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
4757 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 4758 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
4758 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 4759 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
4759 | tmp = RREG32(IH_RB_CNTL); | 4760 | tmp = RREG32(IH_RB_CNTL); |
4760 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 4761 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
4761 | WREG32(IH_RB_CNTL, tmp); | 4762 | WREG32(IH_RB_CNTL, tmp); |
4762 | wptr &= ~RB_OVERFLOW; | ||
4763 | } | 4763 | } |
4764 | return (wptr & rdev->ih.ptr_mask); | 4764 | return (wptr & rdev->ih.ptr_mask); |
4765 | } | 4765 | } |
@@ -5137,6 +5137,7 @@ restart_ih: | |||
5137 | /* wptr/rptr are in bytes! */ | 5137 | /* wptr/rptr are in bytes! */ |
5138 | rptr += 16; | 5138 | rptr += 16; |
5139 | rptr &= rdev->ih.ptr_mask; | 5139 | rptr &= rdev->ih.ptr_mask; |
5140 | WREG32(IH_RB_RPTR, rptr); | ||
5140 | } | 5141 | } |
5141 | if (queue_hotplug) | 5142 | if (queue_hotplug) |
5142 | schedule_work(&rdev->hotplug_work); | 5143 | schedule_work(&rdev->hotplug_work); |
@@ -5145,7 +5146,6 @@ restart_ih: | |||
5145 | if (queue_thermal && rdev->pm.dpm_enabled) | 5146 | if (queue_thermal && rdev->pm.dpm_enabled) |
5146 | schedule_work(&rdev->pm.dpm.thermal.work); | 5147 | schedule_work(&rdev->pm.dpm.thermal.work); |
5147 | rdev->ih.rptr = rptr; | 5148 | rdev->ih.rptr = rptr; |
5148 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
5149 | atomic_set(&rdev->ih.lock, 0); | 5149 | atomic_set(&rdev->ih.lock, 0); |
5150 | 5150 | ||
5151 | /* make sure wptr hasn't changed while processing */ | 5151 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3cfb50056f7a..ea5c9af722ef 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3792,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
3792 | wptr = RREG32(IH_RB_WPTR); | 3792 | wptr = RREG32(IH_RB_WPTR); |
3793 | 3793 | ||
3794 | if (wptr & RB_OVERFLOW) { | 3794 | if (wptr & RB_OVERFLOW) { |
3795 | wptr &= ~RB_OVERFLOW; | ||
3795 | /* When a ring buffer overflow happen start parsing interrupt | 3796 | /* When a ring buffer overflow happen start parsing interrupt |
3796 | * from the last not overwritten vector (wptr + 16). Hopefully | 3797 | * from the last not overwritten vector (wptr + 16). Hopefully |
3797 | * this should allow us to catchup. | 3798 | * this should allow us to catchup. |
3798 | */ | 3799 | */ |
3799 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 3800 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
3800 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 3801 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
3801 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 3802 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
3802 | tmp = RREG32(IH_RB_CNTL); | 3803 | tmp = RREG32(IH_RB_CNTL); |
3803 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 3804 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
3804 | WREG32(IH_RB_CNTL, tmp); | 3805 | WREG32(IH_RB_CNTL, tmp); |
3805 | wptr &= ~RB_OVERFLOW; | ||
3806 | } | 3806 | } |
3807 | return (wptr & rdev->ih.ptr_mask); | 3807 | return (wptr & rdev->ih.ptr_mask); |
3808 | } | 3808 | } |
@@ -4048,6 +4048,7 @@ restart_ih: | |||
4048 | /* wptr/rptr are in bytes! */ | 4048 | /* wptr/rptr are in bytes! */ |
4049 | rptr += 16; | 4049 | rptr += 16; |
4050 | rptr &= rdev->ih.ptr_mask; | 4050 | rptr &= rdev->ih.ptr_mask; |
4051 | WREG32(IH_RB_RPTR, rptr); | ||
4051 | } | 4052 | } |
4052 | if (queue_hotplug) | 4053 | if (queue_hotplug) |
4053 | schedule_work(&rdev->hotplug_work); | 4054 | schedule_work(&rdev->hotplug_work); |
@@ -4056,7 +4057,6 @@ restart_ih: | |||
4056 | if (queue_thermal && rdev->pm.dpm_enabled) | 4057 | if (queue_thermal && rdev->pm.dpm_enabled) |
4057 | schedule_work(&rdev->pm.dpm.thermal.work); | 4058 | schedule_work(&rdev->pm.dpm.thermal.work); |
4058 | rdev->ih.rptr = rptr; | 4059 | rdev->ih.rptr = rptr; |
4059 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
4060 | atomic_set(&rdev->ih.lock, 0); | 4060 | atomic_set(&rdev->ih.lock, 0); |
4061 | 4061 | ||
4062 | /* make sure wptr hasn't changed while processing */ | 4062 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5f05b4c84338..3247bfd14410 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -106,6 +106,7 @@ extern int radeon_vm_block_size; | |||
106 | extern int radeon_deep_color; | 106 | extern int radeon_deep_color; |
107 | extern int radeon_use_pflipirq; | 107 | extern int radeon_use_pflipirq; |
108 | extern int radeon_bapm; | 108 | extern int radeon_bapm; |
109 | extern int radeon_backlight; | ||
109 | 110 | ||
110 | /* | 111 | /* |
111 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 112 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 75223dd3a8a3..12c8329644c4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { | |||
123 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 | 123 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 |
124 | */ | 124 | */ |
125 | { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, | 125 | { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, |
126 | /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU | ||
127 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 | ||
128 | */ | ||
129 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, | ||
126 | /* macbook pro 8.2 */ | 130 | /* macbook pro 8.2 */ |
127 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, | 131 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, |
128 | { 0, 0, 0, 0, 0 }, | 132 | { 0, 0, 0, 0, 0 }, |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4126fd0937a2..f9d17b29b343 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -181,6 +181,7 @@ int radeon_vm_block_size = -1; | |||
181 | int radeon_deep_color = 0; | 181 | int radeon_deep_color = 0; |
182 | int radeon_use_pflipirq = 2; | 182 | int radeon_use_pflipirq = 2; |
183 | int radeon_bapm = -1; | 183 | int radeon_bapm = -1; |
184 | int radeon_backlight = -1; | ||
184 | 185 | ||
185 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 186 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
186 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 187 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -263,6 +264,9 @@ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); | |||
263 | MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); | 264 | MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); |
264 | module_param_named(bapm, radeon_bapm, int, 0444); | 265 | module_param_named(bapm, radeon_bapm, int, 0444); |
265 | 266 | ||
267 | MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)"); | ||
268 | module_param_named(backlight, radeon_backlight, int, 0444); | ||
269 | |||
266 | static struct pci_device_id pciidlist[] = { | 270 | static struct pci_device_id pciidlist[] = { |
267 | radeon_PCI_IDS | 271 | radeon_PCI_IDS |
268 | }; | 272 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3c2094c25b53..15edf23b465c 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 | |||
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, | ||
162 | struct drm_connector *connector) | ||
163 | { | ||
164 | struct drm_device *dev = radeon_encoder->base.dev; | ||
165 | struct radeon_device *rdev = dev->dev_private; | ||
166 | bool use_bl = false; | ||
167 | |||
168 | if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))) | ||
169 | return; | ||
170 | |||
171 | if (radeon_backlight == 0) { | ||
172 | return; | ||
173 | } else if (radeon_backlight == 1) { | ||
174 | use_bl = true; | ||
175 | } else if (radeon_backlight == -1) { | ||
176 | /* Quirks */ | ||
177 | /* Amilo Xi 2550 only works with acpi bl */ | ||
178 | if ((rdev->pdev->device == 0x9583) && | ||
179 | (rdev->pdev->subsystem_vendor == 0x1734) && | ||
180 | (rdev->pdev->subsystem_device == 0x1107)) | ||
181 | use_bl = false; | ||
182 | else | ||
183 | use_bl = true; | ||
184 | } | ||
185 | |||
186 | if (use_bl) { | ||
187 | if (rdev->is_atom_bios) | ||
188 | radeon_atom_backlight_init(radeon_encoder, connector); | ||
189 | else | ||
190 | radeon_legacy_backlight_init(radeon_encoder, connector); | ||
191 | rdev->mode_info.bl_encoder = radeon_encoder; | ||
192 | } | ||
193 | } | ||
194 | |||
161 | void | 195 | void |
162 | radeon_link_encoder_connector(struct drm_device *dev) | 196 | radeon_link_encoder_connector(struct drm_device *dev) |
163 | { | 197 | { |
164 | struct radeon_device *rdev = dev->dev_private; | ||
165 | struct drm_connector *connector; | 198 | struct drm_connector *connector; |
166 | struct radeon_connector *radeon_connector; | 199 | struct radeon_connector *radeon_connector; |
167 | struct drm_encoder *encoder; | 200 | struct drm_encoder *encoder; |
@@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev) | |||
174 | radeon_encoder = to_radeon_encoder(encoder); | 207 | radeon_encoder = to_radeon_encoder(encoder); |
175 | if (radeon_encoder->devices & radeon_connector->devices) { | 208 | if (radeon_encoder->devices & radeon_connector->devices) { |
176 | drm_mode_connector_attach_encoder(connector, encoder); | 209 | drm_mode_connector_attach_encoder(connector, encoder); |
177 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 210 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
178 | if (rdev->is_atom_bios) | 211 | radeon_encoder_add_backlight(radeon_encoder, connector); |
179 | radeon_atom_backlight_init(radeon_encoder, connector); | ||
180 | else | ||
181 | radeon_legacy_backlight_init(radeon_encoder, connector); | ||
182 | rdev->mode_info.bl_encoder = radeon_encoder; | ||
183 | } | ||
184 | } | 212 | } |
185 | } | 213 | } |
186 | } | 214 | } |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6bce40847753..3a0b973e8a96 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -6316,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) | |||
6316 | wptr = RREG32(IH_RB_WPTR); | 6316 | wptr = RREG32(IH_RB_WPTR); |
6317 | 6317 | ||
6318 | if (wptr & RB_OVERFLOW) { | 6318 | if (wptr & RB_OVERFLOW) { |
6319 | wptr &= ~RB_OVERFLOW; | ||
6319 | /* When a ring buffer overflow happen start parsing interrupt | 6320 | /* When a ring buffer overflow happen start parsing interrupt |
6320 | * from the last not overwritten vector (wptr + 16). Hopefully | 6321 | * from the last not overwritten vector (wptr + 16). Hopefully |
6321 | * this should allow us to catchup. | 6322 | * this should allow us to catchup. |
6322 | */ | 6323 | */ |
6323 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 6324 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
6324 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 6325 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
6325 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 6326 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
6326 | tmp = RREG32(IH_RB_CNTL); | 6327 | tmp = RREG32(IH_RB_CNTL); |
6327 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 6328 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
6328 | WREG32(IH_RB_CNTL, tmp); | 6329 | WREG32(IH_RB_CNTL, tmp); |
6329 | wptr &= ~RB_OVERFLOW; | ||
6330 | } | 6330 | } |
6331 | return (wptr & rdev->ih.ptr_mask); | 6331 | return (wptr & rdev->ih.ptr_mask); |
6332 | } | 6332 | } |
@@ -6664,13 +6664,13 @@ restart_ih: | |||
6664 | /* wptr/rptr are in bytes! */ | 6664 | /* wptr/rptr are in bytes! */ |
6665 | rptr += 16; | 6665 | rptr += 16; |
6666 | rptr &= rdev->ih.ptr_mask; | 6666 | rptr &= rdev->ih.ptr_mask; |
6667 | WREG32(IH_RB_RPTR, rptr); | ||
6667 | } | 6668 | } |
6668 | if (queue_hotplug) | 6669 | if (queue_hotplug) |
6669 | schedule_work(&rdev->hotplug_work); | 6670 | schedule_work(&rdev->hotplug_work); |
6670 | if (queue_thermal && rdev->pm.dpm_enabled) | 6671 | if (queue_thermal && rdev->pm.dpm_enabled) |
6671 | schedule_work(&rdev->pm.dpm.thermal.work); | 6672 | schedule_work(&rdev->pm.dpm.thermal.work); |
6672 | rdev->ih.rptr = rptr; | 6673 | rdev->ih.rptr = rptr; |
6673 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
6674 | atomic_set(&rdev->ih.lock, 0); | 6674 | atomic_set(&rdev->ih.lock, 0); |
6675 | 6675 | ||
6676 | /* make sure wptr hasn't changed while processing */ | 6676 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 4a7cbfad1d74..fcdbde4ec692 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
@@ -93,13 +93,29 @@ static ssize_t show_power_crit(struct device *dev, | |||
93 | } | 93 | } |
94 | static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); | 94 | static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); |
95 | 95 | ||
96 | static umode_t fam15h_power_is_visible(struct kobject *kobj, | ||
97 | struct attribute *attr, | ||
98 | int index) | ||
99 | { | ||
100 | /* power1_input is only reported for Fam15h, Models 00h-0fh */ | ||
101 | if (attr == &dev_attr_power1_input.attr && | ||
102 | (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf)) | ||
103 | return 0; | ||
104 | |||
105 | return attr->mode; | ||
106 | } | ||
107 | |||
96 | static struct attribute *fam15h_power_attrs[] = { | 108 | static struct attribute *fam15h_power_attrs[] = { |
97 | &dev_attr_power1_input.attr, | 109 | &dev_attr_power1_input.attr, |
98 | &dev_attr_power1_crit.attr, | 110 | &dev_attr_power1_crit.attr, |
99 | NULL | 111 | NULL |
100 | }; | 112 | }; |
101 | 113 | ||
102 | ATTRIBUTE_GROUPS(fam15h_power); | 114 | static const struct attribute_group fam15h_power_group = { |
115 | .attrs = fam15h_power_attrs, | ||
116 | .is_visible = fam15h_power_is_visible, | ||
117 | }; | ||
118 | __ATTRIBUTE_GROUPS(fam15h_power); | ||
103 | 119 | ||
104 | static bool fam15h_power_is_internal_node0(struct pci_dev *f4) | 120 | static bool fam15h_power_is_internal_node0(struct pci_dev *f4) |
105 | { | 121 | { |
@@ -216,7 +232,9 @@ static int fam15h_power_probe(struct pci_dev *pdev, | |||
216 | 232 | ||
217 | static const struct pci_device_id fam15h_power_id_table[] = { | 233 | static const struct pci_device_id fam15h_power_id_table[] = { |
218 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, | 234 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
235 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, | ||
219 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, | 236 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
237 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | ||
220 | {} | 238 | {} |
221 | }; | 239 | }; |
222 | MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); | 240 | MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); |
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c index e42964f07f67..ad571ec795a3 100644 --- a/drivers/hwmon/tmp103.c +++ b/drivers/hwmon/tmp103.c | |||
@@ -145,7 +145,7 @@ static int tmp103_probe(struct i2c_client *client, | |||
145 | } | 145 | } |
146 | 146 | ||
147 | i2c_set_clientdata(client, regmap); | 147 | i2c_set_clientdata(client, regmap); |
148 | hwmon_dev = hwmon_device_register_with_groups(dev, client->name, | 148 | hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, |
149 | regmap, tmp103_groups); | 149 | regmap, tmp103_groups); |
150 | return PTR_ERR_OR_ZERO(hwmon_dev); | 150 | return PTR_ERR_OR_ZERO(hwmon_dev); |
151 | } | 151 | } |
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index e0228b228256..1722f50f2473 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile | |||
@@ -2,11 +2,8 @@ | |||
2 | # Makefile for the i2c core. | 2 | # Makefile for the i2c core. |
3 | # | 3 | # |
4 | 4 | ||
5 | i2ccore-y := i2c-core.o | ||
6 | i2ccore-$(CONFIG_ACPI) += i2c-acpi.o | ||
7 | |||
8 | obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o | 5 | obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o |
9 | obj-$(CONFIG_I2C) += i2ccore.o | 6 | obj-$(CONFIG_I2C) += i2c-core.o |
10 | obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o | 7 | obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o |
11 | obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o | 8 | obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o |
12 | obj-$(CONFIG_I2C_MUX) += i2c-mux.o | 9 | obj-$(CONFIG_I2C_MUX) += i2c-mux.o |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 984492553e95..d9ee43c80cde 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
497 | desc->wr_len_cmd = dma_size; | 497 | desc->wr_len_cmd = dma_size; |
498 | desc->control |= ISMT_DESC_BLK; | 498 | desc->control |= ISMT_DESC_BLK; |
499 | priv->dma_buffer[0] = command; | 499 | priv->dma_buffer[0] = command; |
500 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); | 500 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); |
501 | } else { | 501 | } else { |
502 | /* Block Read */ | 502 | /* Block Read */ |
503 | dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); | 503 | dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); |
@@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
525 | desc->wr_len_cmd = dma_size; | 525 | desc->wr_len_cmd = dma_size; |
526 | desc->control |= ISMT_DESC_I2C; | 526 | desc->control |= ISMT_DESC_I2C; |
527 | priv->dma_buffer[0] = command; | 527 | priv->dma_buffer[0] = command; |
528 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); | 528 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); |
529 | } else { | 529 | } else { |
530 | /* i2c Block Read */ | 530 | /* i2c Block Read */ |
531 | dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n"); | 531 | dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n"); |
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 7170fc892829..65a21fed08b5 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c | |||
@@ -429,7 +429,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, | |||
429 | ret = mxs_i2c_pio_wait_xfer_end(i2c); | 429 | ret = mxs_i2c_pio_wait_xfer_end(i2c); |
430 | if (ret) { | 430 | if (ret) { |
431 | dev_err(i2c->dev, | 431 | dev_err(i2c->dev, |
432 | "PIO: Failed to send SELECT command!\n"); | 432 | "PIO: Failed to send READ command!\n"); |
433 | goto cleanup; | 433 | goto cleanup; |
434 | } | 434 | } |
435 | 435 | ||
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 3a4d64e1dfb1..092d89bd3224 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c | |||
@@ -674,16 +674,20 @@ static int qup_i2c_probe(struct platform_device *pdev) | |||
674 | qup->adap.dev.of_node = pdev->dev.of_node; | 674 | qup->adap.dev.of_node = pdev->dev.of_node; |
675 | strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); | 675 | strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); |
676 | 676 | ||
677 | ret = i2c_add_adapter(&qup->adap); | ||
678 | if (ret) | ||
679 | goto fail; | ||
680 | |||
681 | pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); | 677 | pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); |
682 | pm_runtime_use_autosuspend(qup->dev); | 678 | pm_runtime_use_autosuspend(qup->dev); |
683 | pm_runtime_set_active(qup->dev); | 679 | pm_runtime_set_active(qup->dev); |
684 | pm_runtime_enable(qup->dev); | 680 | pm_runtime_enable(qup->dev); |
681 | |||
682 | ret = i2c_add_adapter(&qup->adap); | ||
683 | if (ret) | ||
684 | goto fail_runtime; | ||
685 | |||
685 | return 0; | 686 | return 0; |
686 | 687 | ||
688 | fail_runtime: | ||
689 | pm_runtime_disable(qup->dev); | ||
690 | pm_runtime_set_suspended(qup->dev); | ||
687 | fail: | 691 | fail: |
688 | qup_i2c_disable_clocks(qup); | 692 | qup_i2c_disable_clocks(qup); |
689 | return ret; | 693 | return ret; |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 1cc146cfc1f3..e506fcd3ca04 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -76,8 +76,8 @@ | |||
76 | #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) | 76 | #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) |
77 | #define RCAR_IRQ_STOP (MST) | 77 | #define RCAR_IRQ_STOP (MST) |
78 | 78 | ||
79 | #define RCAR_IRQ_ACK_SEND (~(MAT | MDE)) | 79 | #define RCAR_IRQ_ACK_SEND (~(MAT | MDE) & 0xFF) |
80 | #define RCAR_IRQ_ACK_RECV (~(MAT | MDR)) | 80 | #define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF) |
81 | 81 | ||
82 | #define ID_LAST_MSG (1 << 0) | 82 | #define ID_LAST_MSG (1 << 0) |
83 | #define ID_IOERROR (1 << 1) | 83 | #define ID_IOERROR (1 << 1) |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index e637c32ae517..b38b0529946a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
@@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c) | |||
238 | for (i = 0; i < 8; ++i) { | 238 | for (i = 0; i < 8; ++i) { |
239 | val = 0; | 239 | val = 0; |
240 | for (j = 0; j < 4; ++j) { | 240 | for (j = 0; j < 4; ++j) { |
241 | if (i2c->processed == i2c->msg->len) | 241 | if ((i2c->processed == i2c->msg->len) && (cnt != 0)) |
242 | break; | 242 | break; |
243 | 243 | ||
244 | if (i2c->processed == 0 && cnt == 0) | 244 | if (i2c->processed == 0 && cnt == 0) |
@@ -433,12 +433,11 @@ static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate) | |||
433 | unsigned long i2c_rate = clk_get_rate(i2c->clk); | 433 | unsigned long i2c_rate = clk_get_rate(i2c->clk); |
434 | unsigned int div; | 434 | unsigned int div; |
435 | 435 | ||
436 | /* SCL rate = (clk rate) / (8 * DIV) */ | 436 | /* set DIV = DIVH = DIVL |
437 | div = DIV_ROUND_UP(i2c_rate, scl_rate * 8); | 437 | * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1)) |
438 | 438 | * = (clk rate) / (16 * (DIV + 1)) | |
439 | /* The lower and upper half of the CLKDIV reg describe the length of | 439 | */ |
440 | * SCL low & high periods. */ | 440 | div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1; |
441 | div = DIV_ROUND_UP(div, 2); | ||
442 | 441 | ||
443 | i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV); | 442 | i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV); |
444 | } | 443 | } |
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 87d0371cebb7..efba1ebe16ba 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
@@ -380,34 +380,33 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev) | |||
380 | { | 380 | { |
381 | int ret; | 381 | int ret; |
382 | if (!i2c_dev->hw->has_single_clk_source) { | 382 | if (!i2c_dev->hw->has_single_clk_source) { |
383 | ret = clk_prepare_enable(i2c_dev->fast_clk); | 383 | ret = clk_enable(i2c_dev->fast_clk); |
384 | if (ret < 0) { | 384 | if (ret < 0) { |
385 | dev_err(i2c_dev->dev, | 385 | dev_err(i2c_dev->dev, |
386 | "Enabling fast clk failed, err %d\n", ret); | 386 | "Enabling fast clk failed, err %d\n", ret); |
387 | return ret; | 387 | return ret; |
388 | } | 388 | } |
389 | } | 389 | } |
390 | ret = clk_prepare_enable(i2c_dev->div_clk); | 390 | ret = clk_enable(i2c_dev->div_clk); |
391 | if (ret < 0) { | 391 | if (ret < 0) { |
392 | dev_err(i2c_dev->dev, | 392 | dev_err(i2c_dev->dev, |
393 | "Enabling div clk failed, err %d\n", ret); | 393 | "Enabling div clk failed, err %d\n", ret); |
394 | clk_disable_unprepare(i2c_dev->fast_clk); | 394 | clk_disable(i2c_dev->fast_clk); |
395 | } | 395 | } |
396 | return ret; | 396 | return ret; |
397 | } | 397 | } |
398 | 398 | ||
399 | static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev) | 399 | static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev) |
400 | { | 400 | { |
401 | clk_disable_unprepare(i2c_dev->div_clk); | 401 | clk_disable(i2c_dev->div_clk); |
402 | if (!i2c_dev->hw->has_single_clk_source) | 402 | if (!i2c_dev->hw->has_single_clk_source) |
403 | clk_disable_unprepare(i2c_dev->fast_clk); | 403 | clk_disable(i2c_dev->fast_clk); |
404 | } | 404 | } |
405 | 405 | ||
406 | static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) | 406 | static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) |
407 | { | 407 | { |
408 | u32 val; | 408 | u32 val; |
409 | int err = 0; | 409 | int err = 0; |
410 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; | ||
411 | u32 clk_divisor; | 410 | u32 clk_divisor; |
412 | 411 | ||
413 | err = tegra_i2c_clock_enable(i2c_dev); | 412 | err = tegra_i2c_clock_enable(i2c_dev); |
@@ -428,9 +427,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) | |||
428 | i2c_writel(i2c_dev, val, I2C_CNFG); | 427 | i2c_writel(i2c_dev, val, I2C_CNFG); |
429 | i2c_writel(i2c_dev, 0, I2C_INT_MASK); | 428 | i2c_writel(i2c_dev, 0, I2C_INT_MASK); |
430 | 429 | ||
431 | clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1); | ||
432 | clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier); | ||
433 | |||
434 | /* Make sure clock divisor programmed correctly */ | 430 | /* Make sure clock divisor programmed correctly */ |
435 | clk_divisor = i2c_dev->hw->clk_divisor_hs_mode; | 431 | clk_divisor = i2c_dev->hw->clk_divisor_hs_mode; |
436 | clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode << | 432 | clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode << |
@@ -712,6 +708,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
712 | void __iomem *base; | 708 | void __iomem *base; |
713 | int irq; | 709 | int irq; |
714 | int ret = 0; | 710 | int ret = 0; |
711 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; | ||
715 | 712 | ||
716 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 713 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
717 | base = devm_ioremap_resource(&pdev->dev, res); | 714 | base = devm_ioremap_resource(&pdev->dev, res); |
@@ -777,17 +774,39 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
777 | 774 | ||
778 | platform_set_drvdata(pdev, i2c_dev); | 775 | platform_set_drvdata(pdev, i2c_dev); |
779 | 776 | ||
777 | if (!i2c_dev->hw->has_single_clk_source) { | ||
778 | ret = clk_prepare(i2c_dev->fast_clk); | ||
779 | if (ret < 0) { | ||
780 | dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); | ||
781 | return ret; | ||
782 | } | ||
783 | } | ||
784 | |||
785 | clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1); | ||
786 | ret = clk_set_rate(i2c_dev->div_clk, | ||
787 | i2c_dev->bus_clk_rate * clk_multiplier); | ||
788 | if (ret) { | ||
789 | dev_err(i2c_dev->dev, "Clock rate change failed %d\n", ret); | ||
790 | goto unprepare_fast_clk; | ||
791 | } | ||
792 | |||
793 | ret = clk_prepare(i2c_dev->div_clk); | ||
794 | if (ret < 0) { | ||
795 | dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); | ||
796 | goto unprepare_fast_clk; | ||
797 | } | ||
798 | |||
780 | ret = tegra_i2c_init(i2c_dev); | 799 | ret = tegra_i2c_init(i2c_dev); |
781 | if (ret) { | 800 | if (ret) { |
782 | dev_err(&pdev->dev, "Failed to initialize i2c controller"); | 801 | dev_err(&pdev->dev, "Failed to initialize i2c controller"); |
783 | return ret; | 802 | goto unprepare_div_clk; |
784 | } | 803 | } |
785 | 804 | ||
786 | ret = devm_request_irq(&pdev->dev, i2c_dev->irq, | 805 | ret = devm_request_irq(&pdev->dev, i2c_dev->irq, |
787 | tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); | 806 | tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); |
788 | if (ret) { | 807 | if (ret) { |
789 | dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); | 808 | dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); |
790 | return ret; | 809 | goto unprepare_div_clk; |
791 | } | 810 | } |
792 | 811 | ||
793 | i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); | 812 | i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); |
@@ -803,16 +822,30 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
803 | ret = i2c_add_numbered_adapter(&i2c_dev->adapter); | 822 | ret = i2c_add_numbered_adapter(&i2c_dev->adapter); |
804 | if (ret) { | 823 | if (ret) { |
805 | dev_err(&pdev->dev, "Failed to add I2C adapter\n"); | 824 | dev_err(&pdev->dev, "Failed to add I2C adapter\n"); |
806 | return ret; | 825 | goto unprepare_div_clk; |
807 | } | 826 | } |
808 | 827 | ||
809 | return 0; | 828 | return 0; |
829 | |||
830 | unprepare_div_clk: | ||
831 | clk_unprepare(i2c_dev->div_clk); | ||
832 | |||
833 | unprepare_fast_clk: | ||
834 | if (!i2c_dev->hw->has_single_clk_source) | ||
835 | clk_unprepare(i2c_dev->fast_clk); | ||
836 | |||
837 | return ret; | ||
810 | } | 838 | } |
811 | 839 | ||
812 | static int tegra_i2c_remove(struct platform_device *pdev) | 840 | static int tegra_i2c_remove(struct platform_device *pdev) |
813 | { | 841 | { |
814 | struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); | 842 | struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); |
815 | i2c_del_adapter(&i2c_dev->adapter); | 843 | i2c_del_adapter(&i2c_dev->adapter); |
844 | |||
845 | clk_unprepare(i2c_dev->div_clk); | ||
846 | if (!i2c_dev->hw->has_single_clk_source) | ||
847 | clk_unprepare(i2c_dev->fast_clk); | ||
848 | |||
816 | return 0; | 849 | return 0; |
817 | } | 850 | } |
818 | 851 | ||
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c deleted file mode 100644 index 0dbc18c15c43..000000000000 --- a/drivers/i2c/i2c-acpi.c +++ /dev/null | |||
@@ -1,364 +0,0 @@ | |||
1 | /* | ||
2 | * I2C ACPI code | ||
3 | * | ||
4 | * Copyright (C) 2014 Intel Corp | ||
5 | * | ||
6 | * Author: Lan Tianyu <tianyu.lan@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
14 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
15 | * for more details. | ||
16 | */ | ||
17 | #define pr_fmt(fmt) "I2C/ACPI : " fmt | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/i2c.h> | ||
23 | #include <linux/acpi.h> | ||
24 | |||
25 | struct acpi_i2c_handler_data { | ||
26 | struct acpi_connection_info info; | ||
27 | struct i2c_adapter *adapter; | ||
28 | }; | ||
29 | |||
30 | struct gsb_buffer { | ||
31 | u8 status; | ||
32 | u8 len; | ||
33 | union { | ||
34 | u16 wdata; | ||
35 | u8 bdata; | ||
36 | u8 data[0]; | ||
37 | }; | ||
38 | } __packed; | ||
39 | |||
40 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
41 | { | ||
42 | struct i2c_board_info *info = data; | ||
43 | |||
44 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
45 | struct acpi_resource_i2c_serialbus *sb; | ||
46 | |||
47 | sb = &ares->data.i2c_serial_bus; | ||
48 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
49 | info->addr = sb->slave_address; | ||
50 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
51 | info->flags |= I2C_CLIENT_TEN; | ||
52 | } | ||
53 | } else if (info->irq < 0) { | ||
54 | struct resource r; | ||
55 | |||
56 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
57 | info->irq = r.start; | ||
58 | } | ||
59 | |||
60 | /* Tell the ACPI core to skip this resource */ | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
65 | void *data, void **return_value) | ||
66 | { | ||
67 | struct i2c_adapter *adapter = data; | ||
68 | struct list_head resource_list; | ||
69 | struct i2c_board_info info; | ||
70 | struct acpi_device *adev; | ||
71 | int ret; | ||
72 | |||
73 | if (acpi_bus_get_device(handle, &adev)) | ||
74 | return AE_OK; | ||
75 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
76 | return AE_OK; | ||
77 | |||
78 | memset(&info, 0, sizeof(info)); | ||
79 | info.acpi_node.companion = adev; | ||
80 | info.irq = -1; | ||
81 | |||
82 | INIT_LIST_HEAD(&resource_list); | ||
83 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
84 | acpi_i2c_add_resource, &info); | ||
85 | acpi_dev_free_resource_list(&resource_list); | ||
86 | |||
87 | if (ret < 0 || !info.addr) | ||
88 | return AE_OK; | ||
89 | |||
90 | adev->power.flags.ignore_parent = true; | ||
91 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
92 | if (!i2c_new_device(adapter, &info)) { | ||
93 | adev->power.flags.ignore_parent = false; | ||
94 | dev_err(&adapter->dev, | ||
95 | "failed to add I2C device %s from ACPI\n", | ||
96 | dev_name(&adev->dev)); | ||
97 | } | ||
98 | |||
99 | return AE_OK; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
104 | * @adap: pointer to adapter | ||
105 | * | ||
106 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
107 | * namespace. When a device is found it will be added to the Linux device | ||
108 | * model and bound to the corresponding ACPI handle. | ||
109 | */ | ||
110 | void acpi_i2c_register_devices(struct i2c_adapter *adap) | ||
111 | { | ||
112 | acpi_handle handle; | ||
113 | acpi_status status; | ||
114 | |||
115 | if (!adap->dev.parent) | ||
116 | return; | ||
117 | |||
118 | handle = ACPI_HANDLE(adap->dev.parent); | ||
119 | if (!handle) | ||
120 | return; | ||
121 | |||
122 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
123 | acpi_i2c_add_device, NULL, | ||
124 | adap, NULL); | ||
125 | if (ACPI_FAILURE(status)) | ||
126 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | ||
127 | } | ||
128 | |||
129 | #ifdef CONFIG_ACPI_I2C_OPREGION | ||
130 | static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | ||
131 | u8 cmd, u8 *data, u8 data_len) | ||
132 | { | ||
133 | |||
134 | struct i2c_msg msgs[2]; | ||
135 | int ret; | ||
136 | u8 *buffer; | ||
137 | |||
138 | buffer = kzalloc(data_len, GFP_KERNEL); | ||
139 | if (!buffer) | ||
140 | return AE_NO_MEMORY; | ||
141 | |||
142 | msgs[0].addr = client->addr; | ||
143 | msgs[0].flags = client->flags; | ||
144 | msgs[0].len = 1; | ||
145 | msgs[0].buf = &cmd; | ||
146 | |||
147 | msgs[1].addr = client->addr; | ||
148 | msgs[1].flags = client->flags | I2C_M_RD; | ||
149 | msgs[1].len = data_len; | ||
150 | msgs[1].buf = buffer; | ||
151 | |||
152 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
153 | if (ret < 0) | ||
154 | dev_err(&client->adapter->dev, "i2c read failed\n"); | ||
155 | else | ||
156 | memcpy(data, buffer, data_len); | ||
157 | |||
158 | kfree(buffer); | ||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, | ||
163 | u8 cmd, u8 *data, u8 data_len) | ||
164 | { | ||
165 | |||
166 | struct i2c_msg msgs[1]; | ||
167 | u8 *buffer; | ||
168 | int ret = AE_OK; | ||
169 | |||
170 | buffer = kzalloc(data_len + 1, GFP_KERNEL); | ||
171 | if (!buffer) | ||
172 | return AE_NO_MEMORY; | ||
173 | |||
174 | buffer[0] = cmd; | ||
175 | memcpy(buffer + 1, data, data_len); | ||
176 | |||
177 | msgs[0].addr = client->addr; | ||
178 | msgs[0].flags = client->flags; | ||
179 | msgs[0].len = data_len + 1; | ||
180 | msgs[0].buf = buffer; | ||
181 | |||
182 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
183 | if (ret < 0) | ||
184 | dev_err(&client->adapter->dev, "i2c write failed\n"); | ||
185 | |||
186 | kfree(buffer); | ||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | static acpi_status | ||
191 | acpi_i2c_space_handler(u32 function, acpi_physical_address command, | ||
192 | u32 bits, u64 *value64, | ||
193 | void *handler_context, void *region_context) | ||
194 | { | ||
195 | struct gsb_buffer *gsb = (struct gsb_buffer *)value64; | ||
196 | struct acpi_i2c_handler_data *data = handler_context; | ||
197 | struct acpi_connection_info *info = &data->info; | ||
198 | struct acpi_resource_i2c_serialbus *sb; | ||
199 | struct i2c_adapter *adapter = data->adapter; | ||
200 | struct i2c_client client; | ||
201 | struct acpi_resource *ares; | ||
202 | u32 accessor_type = function >> 16; | ||
203 | u8 action = function & ACPI_IO_MASK; | ||
204 | acpi_status ret = AE_OK; | ||
205 | int status; | ||
206 | |||
207 | ret = acpi_buffer_to_resource(info->connection, info->length, &ares); | ||
208 | if (ACPI_FAILURE(ret)) | ||
209 | return ret; | ||
210 | |||
211 | if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
212 | ret = AE_BAD_PARAMETER; | ||
213 | goto err; | ||
214 | } | ||
215 | |||
216 | sb = &ares->data.i2c_serial_bus; | ||
217 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
218 | ret = AE_BAD_PARAMETER; | ||
219 | goto err; | ||
220 | } | ||
221 | |||
222 | memset(&client, 0, sizeof(client)); | ||
223 | client.adapter = adapter; | ||
224 | client.addr = sb->slave_address; | ||
225 | client.flags = 0; | ||
226 | |||
227 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
228 | client.flags |= I2C_CLIENT_TEN; | ||
229 | |||
230 | switch (accessor_type) { | ||
231 | case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: | ||
232 | if (action == ACPI_READ) { | ||
233 | status = i2c_smbus_read_byte(&client); | ||
234 | if (status >= 0) { | ||
235 | gsb->bdata = status; | ||
236 | status = 0; | ||
237 | } | ||
238 | } else { | ||
239 | status = i2c_smbus_write_byte(&client, gsb->bdata); | ||
240 | } | ||
241 | break; | ||
242 | |||
243 | case ACPI_GSB_ACCESS_ATTRIB_BYTE: | ||
244 | if (action == ACPI_READ) { | ||
245 | status = i2c_smbus_read_byte_data(&client, command); | ||
246 | if (status >= 0) { | ||
247 | gsb->bdata = status; | ||
248 | status = 0; | ||
249 | } | ||
250 | } else { | ||
251 | status = i2c_smbus_write_byte_data(&client, command, | ||
252 | gsb->bdata); | ||
253 | } | ||
254 | break; | ||
255 | |||
256 | case ACPI_GSB_ACCESS_ATTRIB_WORD: | ||
257 | if (action == ACPI_READ) { | ||
258 | status = i2c_smbus_read_word_data(&client, command); | ||
259 | if (status >= 0) { | ||
260 | gsb->wdata = status; | ||
261 | status = 0; | ||
262 | } | ||
263 | } else { | ||
264 | status = i2c_smbus_write_word_data(&client, command, | ||
265 | gsb->wdata); | ||
266 | } | ||
267 | break; | ||
268 | |||
269 | case ACPI_GSB_ACCESS_ATTRIB_BLOCK: | ||
270 | if (action == ACPI_READ) { | ||
271 | status = i2c_smbus_read_block_data(&client, command, | ||
272 | gsb->data); | ||
273 | if (status >= 0) { | ||
274 | gsb->len = status; | ||
275 | status = 0; | ||
276 | } | ||
277 | } else { | ||
278 | status = i2c_smbus_write_block_data(&client, command, | ||
279 | gsb->len, gsb->data); | ||
280 | } | ||
281 | break; | ||
282 | |||
283 | case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: | ||
284 | if (action == ACPI_READ) { | ||
285 | status = acpi_gsb_i2c_read_bytes(&client, command, | ||
286 | gsb->data, info->access_length); | ||
287 | if (status > 0) | ||
288 | status = 0; | ||
289 | } else { | ||
290 | status = acpi_gsb_i2c_write_bytes(&client, command, | ||
291 | gsb->data, info->access_length); | ||
292 | } | ||
293 | break; | ||
294 | |||
295 | default: | ||
296 | pr_info("protocol(0x%02x) is not supported.\n", accessor_type); | ||
297 | ret = AE_BAD_PARAMETER; | ||
298 | goto err; | ||
299 | } | ||
300 | |||
301 | gsb->status = status; | ||
302 | |||
303 | err: | ||
304 | ACPI_FREE(ares); | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | |||
309 | int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
310 | { | ||
311 | acpi_handle handle = ACPI_HANDLE(adapter->dev.parent); | ||
312 | struct acpi_i2c_handler_data *data; | ||
313 | acpi_status status; | ||
314 | |||
315 | if (!handle) | ||
316 | return -ENODEV; | ||
317 | |||
318 | data = kzalloc(sizeof(struct acpi_i2c_handler_data), | ||
319 | GFP_KERNEL); | ||
320 | if (!data) | ||
321 | return -ENOMEM; | ||
322 | |||
323 | data->adapter = adapter; | ||
324 | status = acpi_bus_attach_private_data(handle, (void *)data); | ||
325 | if (ACPI_FAILURE(status)) { | ||
326 | kfree(data); | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | |||
330 | status = acpi_install_address_space_handler(handle, | ||
331 | ACPI_ADR_SPACE_GSBUS, | ||
332 | &acpi_i2c_space_handler, | ||
333 | NULL, | ||
334 | data); | ||
335 | if (ACPI_FAILURE(status)) { | ||
336 | dev_err(&adapter->dev, "Error installing i2c space handler\n"); | ||
337 | acpi_bus_detach_private_data(handle); | ||
338 | kfree(data); | ||
339 | return -ENOMEM; | ||
340 | } | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
346 | { | ||
347 | acpi_handle handle = ACPI_HANDLE(adapter->dev.parent); | ||
348 | struct acpi_i2c_handler_data *data; | ||
349 | acpi_status status; | ||
350 | |||
351 | if (!handle) | ||
352 | return; | ||
353 | |||
354 | acpi_remove_address_space_handler(handle, | ||
355 | ACPI_ADR_SPACE_GSBUS, | ||
356 | &acpi_i2c_space_handler); | ||
357 | |||
358 | status = acpi_bus_get_private_data(handle, (void **)&data); | ||
359 | if (ACPI_SUCCESS(status)) | ||
360 | kfree(data); | ||
361 | |||
362 | acpi_bus_detach_private_data(handle); | ||
363 | } | ||
364 | #endif | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 632057a44615..2f90ac6a7f79 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -27,6 +27,8 @@ | |||
27 | OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de> | 27 | OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de> |
28 | (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and | 28 | (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and |
29 | (c) 2013 Wolfram Sang <wsa@the-dreams.de> | 29 | (c) 2013 Wolfram Sang <wsa@the-dreams.de> |
30 | I2C ACPI code Copyright (C) 2014 Intel Corp | ||
31 | Author: Lan Tianyu <tianyu.lan@intel.com> | ||
30 | */ | 32 | */ |
31 | 33 | ||
32 | #include <linux/module.h> | 34 | #include <linux/module.h> |
@@ -48,6 +50,7 @@ | |||
48 | #include <linux/irqflags.h> | 50 | #include <linux/irqflags.h> |
49 | #include <linux/rwsem.h> | 51 | #include <linux/rwsem.h> |
50 | #include <linux/pm_runtime.h> | 52 | #include <linux/pm_runtime.h> |
53 | #include <linux/pm_domain.h> | ||
51 | #include <linux/acpi.h> | 54 | #include <linux/acpi.h> |
52 | #include <linux/jump_label.h> | 55 | #include <linux/jump_label.h> |
53 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
@@ -78,6 +81,368 @@ void i2c_transfer_trace_unreg(void) | |||
78 | static_key_slow_dec(&i2c_trace_msg); | 81 | static_key_slow_dec(&i2c_trace_msg); |
79 | } | 82 | } |
80 | 83 | ||
84 | #if defined(CONFIG_ACPI) | ||
85 | struct acpi_i2c_handler_data { | ||
86 | struct acpi_connection_info info; | ||
87 | struct i2c_adapter *adapter; | ||
88 | }; | ||
89 | |||
90 | struct gsb_buffer { | ||
91 | u8 status; | ||
92 | u8 len; | ||
93 | union { | ||
94 | u16 wdata; | ||
95 | u8 bdata; | ||
96 | u8 data[0]; | ||
97 | }; | ||
98 | } __packed; | ||
99 | |||
100 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
101 | { | ||
102 | struct i2c_board_info *info = data; | ||
103 | |||
104 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
105 | struct acpi_resource_i2c_serialbus *sb; | ||
106 | |||
107 | sb = &ares->data.i2c_serial_bus; | ||
108 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
109 | info->addr = sb->slave_address; | ||
110 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
111 | info->flags |= I2C_CLIENT_TEN; | ||
112 | } | ||
113 | } else if (info->irq < 0) { | ||
114 | struct resource r; | ||
115 | |||
116 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
117 | info->irq = r.start; | ||
118 | } | ||
119 | |||
120 | /* Tell the ACPI core to skip this resource */ | ||
121 | return 1; | ||
122 | } | ||
123 | |||
124 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
125 | void *data, void **return_value) | ||
126 | { | ||
127 | struct i2c_adapter *adapter = data; | ||
128 | struct list_head resource_list; | ||
129 | struct i2c_board_info info; | ||
130 | struct acpi_device *adev; | ||
131 | int ret; | ||
132 | |||
133 | if (acpi_bus_get_device(handle, &adev)) | ||
134 | return AE_OK; | ||
135 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
136 | return AE_OK; | ||
137 | |||
138 | memset(&info, 0, sizeof(info)); | ||
139 | info.acpi_node.companion = adev; | ||
140 | info.irq = -1; | ||
141 | |||
142 | INIT_LIST_HEAD(&resource_list); | ||
143 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
144 | acpi_i2c_add_resource, &info); | ||
145 | acpi_dev_free_resource_list(&resource_list); | ||
146 | |||
147 | if (ret < 0 || !info.addr) | ||
148 | return AE_OK; | ||
149 | |||
150 | adev->power.flags.ignore_parent = true; | ||
151 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
152 | if (!i2c_new_device(adapter, &info)) { | ||
153 | adev->power.flags.ignore_parent = false; | ||
154 | dev_err(&adapter->dev, | ||
155 | "failed to add I2C device %s from ACPI\n", | ||
156 | dev_name(&adev->dev)); | ||
157 | } | ||
158 | |||
159 | return AE_OK; | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
164 | * @adap: pointer to adapter | ||
165 | * | ||
166 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
167 | * namespace. When a device is found it will be added to the Linux device | ||
168 | * model and bound to the corresponding ACPI handle. | ||
169 | */ | ||
170 | static void acpi_i2c_register_devices(struct i2c_adapter *adap) | ||
171 | { | ||
172 | acpi_handle handle; | ||
173 | acpi_status status; | ||
174 | |||
175 | if (!adap->dev.parent) | ||
176 | return; | ||
177 | |||
178 | handle = ACPI_HANDLE(adap->dev.parent); | ||
179 | if (!handle) | ||
180 | return; | ||
181 | |||
182 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
183 | acpi_i2c_add_device, NULL, | ||
184 | adap, NULL); | ||
185 | if (ACPI_FAILURE(status)) | ||
186 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | ||
187 | } | ||
188 | |||
189 | #else /* CONFIG_ACPI */ | ||
190 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } | ||
191 | #endif /* CONFIG_ACPI */ | ||
192 | |||
193 | #ifdef CONFIG_ACPI_I2C_OPREGION | ||
194 | static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | ||
195 | u8 cmd, u8 *data, u8 data_len) | ||
196 | { | ||
197 | |||
198 | struct i2c_msg msgs[2]; | ||
199 | int ret; | ||
200 | u8 *buffer; | ||
201 | |||
202 | buffer = kzalloc(data_len, GFP_KERNEL); | ||
203 | if (!buffer) | ||
204 | return AE_NO_MEMORY; | ||
205 | |||
206 | msgs[0].addr = client->addr; | ||
207 | msgs[0].flags = client->flags; | ||
208 | msgs[0].len = 1; | ||
209 | msgs[0].buf = &cmd; | ||
210 | |||
211 | msgs[1].addr = client->addr; | ||
212 | msgs[1].flags = client->flags | I2C_M_RD; | ||
213 | msgs[1].len = data_len; | ||
214 | msgs[1].buf = buffer; | ||
215 | |||
216 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
217 | if (ret < 0) | ||
218 | dev_err(&client->adapter->dev, "i2c read failed\n"); | ||
219 | else | ||
220 | memcpy(data, buffer, data_len); | ||
221 | |||
222 | kfree(buffer); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, | ||
227 | u8 cmd, u8 *data, u8 data_len) | ||
228 | { | ||
229 | |||
230 | struct i2c_msg msgs[1]; | ||
231 | u8 *buffer; | ||
232 | int ret = AE_OK; | ||
233 | |||
234 | buffer = kzalloc(data_len + 1, GFP_KERNEL); | ||
235 | if (!buffer) | ||
236 | return AE_NO_MEMORY; | ||
237 | |||
238 | buffer[0] = cmd; | ||
239 | memcpy(buffer + 1, data, data_len); | ||
240 | |||
241 | msgs[0].addr = client->addr; | ||
242 | msgs[0].flags = client->flags; | ||
243 | msgs[0].len = data_len + 1; | ||
244 | msgs[0].buf = buffer; | ||
245 | |||
246 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
247 | if (ret < 0) | ||
248 | dev_err(&client->adapter->dev, "i2c write failed\n"); | ||
249 | |||
250 | kfree(buffer); | ||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | static acpi_status | ||
255 | acpi_i2c_space_handler(u32 function, acpi_physical_address command, | ||
256 | u32 bits, u64 *value64, | ||
257 | void *handler_context, void *region_context) | ||
258 | { | ||
259 | struct gsb_buffer *gsb = (struct gsb_buffer *)value64; | ||
260 | struct acpi_i2c_handler_data *data = handler_context; | ||
261 | struct acpi_connection_info *info = &data->info; | ||
262 | struct acpi_resource_i2c_serialbus *sb; | ||
263 | struct i2c_adapter *adapter = data->adapter; | ||
264 | struct i2c_client client; | ||
265 | struct acpi_resource *ares; | ||
266 | u32 accessor_type = function >> 16; | ||
267 | u8 action = function & ACPI_IO_MASK; | ||
268 | acpi_status ret = AE_OK; | ||
269 | int status; | ||
270 | |||
271 | ret = acpi_buffer_to_resource(info->connection, info->length, &ares); | ||
272 | if (ACPI_FAILURE(ret)) | ||
273 | return ret; | ||
274 | |||
275 | if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
276 | ret = AE_BAD_PARAMETER; | ||
277 | goto err; | ||
278 | } | ||
279 | |||
280 | sb = &ares->data.i2c_serial_bus; | ||
281 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
282 | ret = AE_BAD_PARAMETER; | ||
283 | goto err; | ||
284 | } | ||
285 | |||
286 | memset(&client, 0, sizeof(client)); | ||
287 | client.adapter = adapter; | ||
288 | client.addr = sb->slave_address; | ||
289 | client.flags = 0; | ||
290 | |||
291 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
292 | client.flags |= I2C_CLIENT_TEN; | ||
293 | |||
294 | switch (accessor_type) { | ||
295 | case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: | ||
296 | if (action == ACPI_READ) { | ||
297 | status = i2c_smbus_read_byte(&client); | ||
298 | if (status >= 0) { | ||
299 | gsb->bdata = status; | ||
300 | status = 0; | ||
301 | } | ||
302 | } else { | ||
303 | status = i2c_smbus_write_byte(&client, gsb->bdata); | ||
304 | } | ||
305 | break; | ||
306 | |||
307 | case ACPI_GSB_ACCESS_ATTRIB_BYTE: | ||
308 | if (action == ACPI_READ) { | ||
309 | status = i2c_smbus_read_byte_data(&client, command); | ||
310 | if (status >= 0) { | ||
311 | gsb->bdata = status; | ||
312 | status = 0; | ||
313 | } | ||
314 | } else { | ||
315 | status = i2c_smbus_write_byte_data(&client, command, | ||
316 | gsb->bdata); | ||
317 | } | ||
318 | break; | ||
319 | |||
320 | case ACPI_GSB_ACCESS_ATTRIB_WORD: | ||
321 | if (action == ACPI_READ) { | ||
322 | status = i2c_smbus_read_word_data(&client, command); | ||
323 | if (status >= 0) { | ||
324 | gsb->wdata = status; | ||
325 | status = 0; | ||
326 | } | ||
327 | } else { | ||
328 | status = i2c_smbus_write_word_data(&client, command, | ||
329 | gsb->wdata); | ||
330 | } | ||
331 | break; | ||
332 | |||
333 | case ACPI_GSB_ACCESS_ATTRIB_BLOCK: | ||
334 | if (action == ACPI_READ) { | ||
335 | status = i2c_smbus_read_block_data(&client, command, | ||
336 | gsb->data); | ||
337 | if (status >= 0) { | ||
338 | gsb->len = status; | ||
339 | status = 0; | ||
340 | } | ||
341 | } else { | ||
342 | status = i2c_smbus_write_block_data(&client, command, | ||
343 | gsb->len, gsb->data); | ||
344 | } | ||
345 | break; | ||
346 | |||
347 | case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: | ||
348 | if (action == ACPI_READ) { | ||
349 | status = acpi_gsb_i2c_read_bytes(&client, command, | ||
350 | gsb->data, info->access_length); | ||
351 | if (status > 0) | ||
352 | status = 0; | ||
353 | } else { | ||
354 | status = acpi_gsb_i2c_write_bytes(&client, command, | ||
355 | gsb->data, info->access_length); | ||
356 | } | ||
357 | break; | ||
358 | |||
359 | default: | ||
360 | pr_info("protocol(0x%02x) is not supported.\n", accessor_type); | ||
361 | ret = AE_BAD_PARAMETER; | ||
362 | goto err; | ||
363 | } | ||
364 | |||
365 | gsb->status = status; | ||
366 | |||
367 | err: | ||
368 | ACPI_FREE(ares); | ||
369 | return ret; | ||
370 | } | ||
371 | |||
372 | |||
373 | static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
374 | { | ||
375 | acpi_handle handle; | ||
376 | struct acpi_i2c_handler_data *data; | ||
377 | acpi_status status; | ||
378 | |||
379 | if (!adapter->dev.parent) | ||
380 | return -ENODEV; | ||
381 | |||
382 | handle = ACPI_HANDLE(adapter->dev.parent); | ||
383 | |||
384 | if (!handle) | ||
385 | return -ENODEV; | ||
386 | |||
387 | data = kzalloc(sizeof(struct acpi_i2c_handler_data), | ||
388 | GFP_KERNEL); | ||
389 | if (!data) | ||
390 | return -ENOMEM; | ||
391 | |||
392 | data->adapter = adapter; | ||
393 | status = acpi_bus_attach_private_data(handle, (void *)data); | ||
394 | if (ACPI_FAILURE(status)) { | ||
395 | kfree(data); | ||
396 | return -ENOMEM; | ||
397 | } | ||
398 | |||
399 | status = acpi_install_address_space_handler(handle, | ||
400 | ACPI_ADR_SPACE_GSBUS, | ||
401 | &acpi_i2c_space_handler, | ||
402 | NULL, | ||
403 | data); | ||
404 | if (ACPI_FAILURE(status)) { | ||
405 | dev_err(&adapter->dev, "Error installing i2c space handler\n"); | ||
406 | acpi_bus_detach_private_data(handle); | ||
407 | kfree(data); | ||
408 | return -ENOMEM; | ||
409 | } | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
415 | { | ||
416 | acpi_handle handle; | ||
417 | struct acpi_i2c_handler_data *data; | ||
418 | acpi_status status; | ||
419 | |||
420 | if (!adapter->dev.parent) | ||
421 | return; | ||
422 | |||
423 | handle = ACPI_HANDLE(adapter->dev.parent); | ||
424 | |||
425 | if (!handle) | ||
426 | return; | ||
427 | |||
428 | acpi_remove_address_space_handler(handle, | ||
429 | ACPI_ADR_SPACE_GSBUS, | ||
430 | &acpi_i2c_space_handler); | ||
431 | |||
432 | status = acpi_bus_get_private_data(handle, (void **)&data); | ||
433 | if (ACPI_SUCCESS(status)) | ||
434 | kfree(data); | ||
435 | |||
436 | acpi_bus_detach_private_data(handle); | ||
437 | } | ||
438 | #else /* CONFIG_ACPI_I2C_OPREGION */ | ||
439 | static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
440 | { } | ||
441 | |||
442 | static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
443 | { return 0; } | ||
444 | #endif /* CONFIG_ACPI_I2C_OPREGION */ | ||
445 | |||
81 | /* ------------------------------------------------------------------------- */ | 446 | /* ------------------------------------------------------------------------- */ |
82 | 447 | ||
83 | static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, | 448 | static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, |
@@ -279,10 +644,13 @@ static int i2c_device_probe(struct device *dev) | |||
279 | if (status < 0) | 644 | if (status < 0) |
280 | return status; | 645 | return status; |
281 | 646 | ||
282 | acpi_dev_pm_attach(&client->dev, true); | 647 | status = dev_pm_domain_attach(&client->dev, true); |
283 | status = driver->probe(client, i2c_match_id(driver->id_table, client)); | 648 | if (status != -EPROBE_DEFER) { |
284 | if (status) | 649 | status = driver->probe(client, i2c_match_id(driver->id_table, |
285 | acpi_dev_pm_detach(&client->dev, true); | 650 | client)); |
651 | if (status) | ||
652 | dev_pm_domain_detach(&client->dev, true); | ||
653 | } | ||
286 | 654 | ||
287 | return status; | 655 | return status; |
288 | } | 656 | } |
@@ -302,7 +670,7 @@ static int i2c_device_remove(struct device *dev) | |||
302 | status = driver->remove(client); | 670 | status = driver->remove(client); |
303 | } | 671 | } |
304 | 672 | ||
305 | acpi_dev_pm_detach(&client->dev, true); | 673 | dev_pm_domain_detach(&client->dev, true); |
306 | return status; | 674 | return status; |
307 | } | 675 | } |
308 | 676 | ||
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index a3a2e9c1639b..df0c4f605a21 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
105 | umem->length = size; | 105 | umem->length = size; |
106 | umem->offset = addr & ~PAGE_MASK; | 106 | umem->offset = addr & ~PAGE_MASK; |
107 | umem->page_size = PAGE_SIZE; | 107 | umem->page_size = PAGE_SIZE; |
108 | umem->pid = get_task_pid(current, PIDTYPE_PID); | ||
108 | /* | 109 | /* |
109 | * We ask for writable memory if any access flags other than | 110 | * We ask for writable memory if any access flags other than |
110 | * "remote read" are set. "Local write" and "remote write" | 111 | * "remote read" are set. "Local write" and "remote write" |
@@ -198,6 +199,7 @@ out: | |||
198 | if (ret < 0) { | 199 | if (ret < 0) { |
199 | if (need_release) | 200 | if (need_release) |
200 | __ib_umem_release(context->device, umem, 0); | 201 | __ib_umem_release(context->device, umem, 0); |
202 | put_pid(umem->pid); | ||
201 | kfree(umem); | 203 | kfree(umem); |
202 | } else | 204 | } else |
203 | current->mm->pinned_vm = locked; | 205 | current->mm->pinned_vm = locked; |
@@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem) | |||
230 | { | 232 | { |
231 | struct ib_ucontext *context = umem->context; | 233 | struct ib_ucontext *context = umem->context; |
232 | struct mm_struct *mm; | 234 | struct mm_struct *mm; |
235 | struct task_struct *task; | ||
233 | unsigned long diff; | 236 | unsigned long diff; |
234 | 237 | ||
235 | __ib_umem_release(umem->context->device, umem, 1); | 238 | __ib_umem_release(umem->context->device, umem, 1); |
236 | 239 | ||
237 | mm = get_task_mm(current); | 240 | task = get_pid_task(umem->pid, PIDTYPE_PID); |
238 | if (!mm) { | 241 | put_pid(umem->pid); |
239 | kfree(umem); | 242 | if (!task) |
240 | return; | 243 | goto out; |
241 | } | 244 | mm = get_task_mm(task); |
245 | put_task_struct(task); | ||
246 | if (!mm) | ||
247 | goto out; | ||
242 | 248 | ||
243 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 249 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
244 | 250 | ||
@@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem) | |||
262 | } else | 268 | } else |
263 | down_write(&mm->mmap_sem); | 269 | down_write(&mm->mmap_sem); |
264 | 270 | ||
265 | current->mm->pinned_vm -= diff; | 271 | mm->pinned_vm -= diff; |
266 | up_write(&mm->mmap_sem); | 272 | up_write(&mm->mmap_sem); |
267 | mmput(mm); | 273 | mmput(mm); |
274 | out: | ||
268 | kfree(umem); | 275 | kfree(umem); |
269 | } | 276 | } |
270 | EXPORT_SYMBOL(ib_umem_release); | 277 | EXPORT_SYMBOL(ib_umem_release); |
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index e7bee46868d1..abd97247443e 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c | |||
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst, | |||
140 | dst->packet_life_time = src->packet_life_time; | 140 | dst->packet_life_time = src->packet_life_time; |
141 | dst->preference = src->preference; | 141 | dst->preference = src->preference; |
142 | dst->packet_life_time_selector = src->packet_life_time_selector; | 142 | dst->packet_life_time_selector = src->packet_life_time_selector; |
143 | |||
144 | memset(dst->smac, 0, sizeof(dst->smac)); | ||
145 | memset(dst->dmac, 0, sizeof(dst->dmac)); | ||
146 | dst->vlan_id = 0xffff; | ||
143 | } | 147 | } |
144 | EXPORT_SYMBOL(ib_copy_path_rec_from_user); | 148 | EXPORT_SYMBOL(ib_copy_path_rec_from_user); |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index dc66c4506916..1da1252dcdb3 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages, | |||
54 | 54 | ||
55 | /* call with current->mm->mmap_sem held */ | 55 | /* call with current->mm->mmap_sem held */ |
56 | static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, | 56 | static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, |
57 | struct page **p, struct vm_area_struct **vma) | 57 | struct page **p) |
58 | { | 58 | { |
59 | unsigned long lock_limit; | 59 | unsigned long lock_limit; |
60 | size_t got; | 60 | size_t got; |
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, | |||
74 | ret = get_user_pages(current, current->mm, | 74 | ret = get_user_pages(current, current->mm, |
75 | start_page + got * PAGE_SIZE, | 75 | start_page + got * PAGE_SIZE, |
76 | num_pages - got, 1, 1, | 76 | num_pages - got, 1, 1, |
77 | p + got, vma); | 77 | p + got, NULL); |
78 | if (ret < 0) | 78 | if (ret < 0) |
79 | goto bail_release; | 79 | goto bail_release; |
80 | } | 80 | } |
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, | |||
165 | 165 | ||
166 | down_write(¤t->mm->mmap_sem); | 166 | down_write(¤t->mm->mmap_sem); |
167 | 167 | ||
168 | ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); | 168 | ret = __ipath_get_user_pages(start_page, num_pages, p); |
169 | 169 | ||
170 | up_write(¤t->mm->mmap_sem); | 170 | up_write(¤t->mm->mmap_sem); |
171 | 171 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index af8256353c7d..bda5994ceb68 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -59,6 +59,7 @@ | |||
59 | 59 | ||
60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
62 | #define MLX4_IB_CARD_REV_A0 0xA0 | ||
62 | 63 | ||
63 | MODULE_AUTHOR("Roland Dreier"); | 64 | MODULE_AUTHOR("Roland Dreier"); |
64 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); | 65 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); |
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) | |||
119 | return dmfs; | 120 | return dmfs; |
120 | } | 121 | } |
121 | 122 | ||
123 | static int num_ib_ports(struct mlx4_dev *dev) | ||
124 | { | ||
125 | int ib_ports = 0; | ||
126 | int i; | ||
127 | |||
128 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
129 | ib_ports++; | ||
130 | |||
131 | return ib_ports; | ||
132 | } | ||
133 | |||
122 | static int mlx4_ib_query_device(struct ib_device *ibdev, | 134 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
123 | struct ib_device_attr *props) | 135 | struct ib_device_attr *props) |
124 | { | 136 | { |
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
126 | struct ib_smp *in_mad = NULL; | 138 | struct ib_smp *in_mad = NULL; |
127 | struct ib_smp *out_mad = NULL; | 139 | struct ib_smp *out_mad = NULL; |
128 | int err = -ENOMEM; | 140 | int err = -ENOMEM; |
141 | int have_ib_ports; | ||
129 | 142 | ||
130 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 143 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
131 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 144 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
142 | 155 | ||
143 | memset(props, 0, sizeof *props); | 156 | memset(props, 0, sizeof *props); |
144 | 157 | ||
158 | have_ib_ports = num_ib_ports(dev->dev); | ||
159 | |||
145 | props->fw_ver = dev->dev->caps.fw_ver; | 160 | props->fw_ver = dev->dev->caps.fw_ver; |
146 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 161 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
147 | IB_DEVICE_PORT_ACTIVE_EVENT | | 162 | IB_DEVICE_PORT_ACTIVE_EVENT | |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
152 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 167 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
153 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) | 168 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) |
154 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 169 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
155 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) | 170 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) |
156 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 171 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
157 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) | 172 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) |
158 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; | 173 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; |
159 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) | 174 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) |
160 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; | 175 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
161 | if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) | 176 | if (dev->dev->caps.max_gso_sz && |
177 | (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && | ||
178 | (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) | ||
162 | props->device_cap_flags |= IB_DEVICE_UD_TSO; | 179 | props->device_cap_flags |= IB_DEVICE_UD_TSO; |
163 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) | 180 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) |
164 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | 181 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; |
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
357 | props->state = IB_PORT_DOWN; | 374 | props->state = IB_PORT_DOWN; |
358 | props->phys_state = state_to_phys_state(props->state); | 375 | props->phys_state = state_to_phys_state(props->state); |
359 | props->active_mtu = IB_MTU_256; | 376 | props->active_mtu = IB_MTU_256; |
360 | spin_lock(&iboe->lock); | 377 | spin_lock_bh(&iboe->lock); |
361 | ndev = iboe->netdevs[port - 1]; | 378 | ndev = iboe->netdevs[port - 1]; |
362 | if (!ndev) | 379 | if (!ndev) |
363 | goto out_unlock; | 380 | goto out_unlock; |
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
369 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 386 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
370 | props->phys_state = state_to_phys_state(props->state); | 387 | props->phys_state = state_to_phys_state(props->state); |
371 | out_unlock: | 388 | out_unlock: |
372 | spin_unlock(&iboe->lock); | 389 | spin_unlock_bh(&iboe->lock); |
373 | out: | 390 | out: |
374 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
375 | return err; | 392 | return err; |
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | |||
811 | if (!mqp->port) | 828 | if (!mqp->port) |
812 | return 0; | 829 | return 0; |
813 | 830 | ||
814 | spin_lock(&mdev->iboe.lock); | 831 | spin_lock_bh(&mdev->iboe.lock); |
815 | ndev = mdev->iboe.netdevs[mqp->port - 1]; | 832 | ndev = mdev->iboe.netdevs[mqp->port - 1]; |
816 | if (ndev) | 833 | if (ndev) |
817 | dev_hold(ndev); | 834 | dev_hold(ndev); |
818 | spin_unlock(&mdev->iboe.lock); | 835 | spin_unlock_bh(&mdev->iboe.lock); |
819 | 836 | ||
820 | if (ndev) { | 837 | if (ndev) { |
821 | ret = 1; | 838 | ret = 1; |
@@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1292 | mutex_lock(&mqp->mutex); | 1309 | mutex_lock(&mqp->mutex); |
1293 | ge = find_gid_entry(mqp, gid->raw); | 1310 | ge = find_gid_entry(mqp, gid->raw); |
1294 | if (ge) { | 1311 | if (ge) { |
1295 | spin_lock(&mdev->iboe.lock); | 1312 | spin_lock_bh(&mdev->iboe.lock); |
1296 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; | 1313 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; |
1297 | if (ndev) | 1314 | if (ndev) |
1298 | dev_hold(ndev); | 1315 | dev_hold(ndev); |
1299 | spin_unlock(&mdev->iboe.lock); | 1316 | spin_unlock_bh(&mdev->iboe.lock); |
1300 | if (ndev) | 1317 | if (ndev) |
1301 | dev_put(ndev); | 1318 | dev_put(ndev); |
1302 | list_del(&ge->list); | 1319 | list_del(&ge->list); |
@@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work) | |||
1417 | int err; | 1434 | int err; |
1418 | struct mlx4_dev *dev = gw->dev->dev; | 1435 | struct mlx4_dev *dev = gw->dev->dev; |
1419 | 1436 | ||
1437 | if (!gw->dev->ib_active) | ||
1438 | return; | ||
1439 | |||
1420 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1440 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1421 | if (IS_ERR(mailbox)) { | 1441 | if (IS_ERR(mailbox)) { |
1422 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); | 1442 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); |
@@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work) | |||
1447 | int err; | 1467 | int err; |
1448 | struct mlx4_dev *dev = gw->dev->dev; | 1468 | struct mlx4_dev *dev = gw->dev->dev; |
1449 | 1469 | ||
1470 | if (!gw->dev->ib_active) | ||
1471 | return; | ||
1472 | |||
1450 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1473 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1451 | if (IS_ERR(mailbox)) { | 1474 | if (IS_ERR(mailbox)) { |
1452 | pr_warn("reset gid table failed\n"); | 1475 | pr_warn("reset gid table failed\n"); |
@@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1581 | return 0; | 1604 | return 0; |
1582 | 1605 | ||
1583 | iboe = &ibdev->iboe; | 1606 | iboe = &ibdev->iboe; |
1584 | spin_lock(&iboe->lock); | 1607 | spin_lock_bh(&iboe->lock); |
1585 | 1608 | ||
1586 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) | 1609 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) |
1587 | if ((netif_is_bond_master(real_dev) && | 1610 | if ((netif_is_bond_master(real_dev) && |
@@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1591 | update_gid_table(ibdev, port, gid, | 1614 | update_gid_table(ibdev, port, gid, |
1592 | event == NETDEV_DOWN, 0); | 1615 | event == NETDEV_DOWN, 0); |
1593 | 1616 | ||
1594 | spin_unlock(&iboe->lock); | 1617 | spin_unlock_bh(&iboe->lock); |
1595 | return 0; | 1618 | return 0; |
1596 | 1619 | ||
1597 | } | 1620 | } |
@@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
1664 | new_smac = mlx4_mac_to_u64(dev->dev_addr); | 1687 | new_smac = mlx4_mac_to_u64(dev->dev_addr); |
1665 | read_unlock(&dev_base_lock); | 1688 | read_unlock(&dev_base_lock); |
1666 | 1689 | ||
1690 | atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); | ||
1691 | |||
1692 | /* no need for update QP1 and mac registration in non-SRIOV */ | ||
1693 | if (!mlx4_is_mfunc(ibdev->dev)) | ||
1694 | return; | ||
1695 | |||
1667 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); | 1696 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); |
1668 | qp = ibdev->qp1_proxy[port - 1]; | 1697 | qp = ibdev->qp1_proxy[port - 1]; |
1669 | if (qp) { | 1698 | if (qp) { |
1670 | int new_smac_index; | 1699 | int new_smac_index; |
1671 | u64 old_smac = qp->pri.smac; | 1700 | u64 old_smac; |
1672 | struct mlx4_update_qp_params update_params; | 1701 | struct mlx4_update_qp_params update_params; |
1673 | 1702 | ||
1703 | mutex_lock(&qp->mutex); | ||
1704 | old_smac = qp->pri.smac; | ||
1674 | if (new_smac == old_smac) | 1705 | if (new_smac == old_smac) |
1675 | goto unlock; | 1706 | goto unlock; |
1676 | 1707 | ||
@@ -1680,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
1680 | goto unlock; | 1711 | goto unlock; |
1681 | 1712 | ||
1682 | update_params.smac_index = new_smac_index; | 1713 | update_params.smac_index = new_smac_index; |
1683 | if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, | 1714 | if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, |
1684 | &update_params)) { | 1715 | &update_params)) { |
1685 | release_mac = new_smac; | 1716 | release_mac = new_smac; |
1686 | goto unlock; | 1717 | goto unlock; |
1687 | } | 1718 | } |
1688 | 1719 | /* if old port was zero, no mac was yet registered for this QP */ | |
1720 | if (qp->pri.smac_port) | ||
1721 | release_mac = old_smac; | ||
1689 | qp->pri.smac = new_smac; | 1722 | qp->pri.smac = new_smac; |
1723 | qp->pri.smac_port = port; | ||
1690 | qp->pri.smac_index = new_smac_index; | 1724 | qp->pri.smac_index = new_smac_index; |
1691 | |||
1692 | release_mac = old_smac; | ||
1693 | } | 1725 | } |
1694 | 1726 | ||
1695 | unlock: | 1727 | unlock: |
1696 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1697 | if (release_mac != MLX4_IB_INVALID_MAC) | 1728 | if (release_mac != MLX4_IB_INVALID_MAC) |
1698 | mlx4_unregister_mac(ibdev->dev, port, release_mac); | 1729 | mlx4_unregister_mac(ibdev->dev, port, release_mac); |
1730 | if (qp) | ||
1731 | mutex_unlock(&qp->mutex); | ||
1732 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1699 | } | 1733 | } |
1700 | 1734 | ||
1701 | static void mlx4_ib_get_dev_addr(struct net_device *dev, | 1735 | static void mlx4_ib_get_dev_addr(struct net_device *dev, |
@@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1706 | struct inet6_dev *in6_dev; | 1740 | struct inet6_dev *in6_dev; |
1707 | union ib_gid *pgid; | 1741 | union ib_gid *pgid; |
1708 | struct inet6_ifaddr *ifp; | 1742 | struct inet6_ifaddr *ifp; |
1743 | union ib_gid default_gid; | ||
1709 | #endif | 1744 | #endif |
1710 | union ib_gid gid; | 1745 | union ib_gid gid; |
1711 | 1746 | ||
@@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1726 | in_dev_put(in_dev); | 1761 | in_dev_put(in_dev); |
1727 | } | 1762 | } |
1728 | #if IS_ENABLED(CONFIG_IPV6) | 1763 | #if IS_ENABLED(CONFIG_IPV6) |
1764 | mlx4_make_default_gid(dev, &default_gid); | ||
1729 | /* IPv6 gids */ | 1765 | /* IPv6 gids */ |
1730 | in6_dev = in6_dev_get(dev); | 1766 | in6_dev = in6_dev_get(dev); |
1731 | if (in6_dev) { | 1767 | if (in6_dev) { |
1732 | read_lock_bh(&in6_dev->lock); | 1768 | read_lock_bh(&in6_dev->lock); |
1733 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1769 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
1734 | pgid = (union ib_gid *)&ifp->addr; | 1770 | pgid = (union ib_gid *)&ifp->addr; |
1771 | if (!memcmp(pgid, &default_gid, sizeof(*pgid))) | ||
1772 | continue; | ||
1735 | update_gid_table(ibdev, port, pgid, 0, 0); | 1773 | update_gid_table(ibdev, port, pgid, 0, 0); |
1736 | } | 1774 | } |
1737 | read_unlock_bh(&in6_dev->lock); | 1775 | read_unlock_bh(&in6_dev->lock); |
@@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1753 | struct net_device *dev; | 1791 | struct net_device *dev; |
1754 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | 1792 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
1755 | int i; | 1793 | int i; |
1794 | int err = 0; | ||
1756 | 1795 | ||
1757 | for (i = 1; i <= ibdev->num_ports; ++i) | 1796 | for (i = 1; i <= ibdev->num_ports; ++i) { |
1758 | if (reset_gid_table(ibdev, i)) | 1797 | if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == |
1759 | return -1; | 1798 | IB_LINK_LAYER_ETHERNET) { |
1799 | err = reset_gid_table(ibdev, i); | ||
1800 | if (err) | ||
1801 | goto out; | ||
1802 | } | ||
1803 | } | ||
1760 | 1804 | ||
1761 | read_lock(&dev_base_lock); | 1805 | read_lock(&dev_base_lock); |
1762 | spin_lock(&iboe->lock); | 1806 | spin_lock_bh(&iboe->lock); |
1763 | 1807 | ||
1764 | for_each_netdev(&init_net, dev) { | 1808 | for_each_netdev(&init_net, dev) { |
1765 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1809 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
1766 | if (port) | 1810 | /* port will be non-zero only for ETH ports */ |
1811 | if (port) { | ||
1812 | mlx4_ib_set_default_gid(ibdev, dev, port); | ||
1767 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1813 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
1814 | } | ||
1768 | } | 1815 | } |
1769 | 1816 | ||
1770 | spin_unlock(&iboe->lock); | 1817 | spin_unlock_bh(&iboe->lock); |
1771 | read_unlock(&dev_base_lock); | 1818 | read_unlock(&dev_base_lock); |
1772 | 1819 | out: | |
1773 | return 0; | 1820 | return err; |
1774 | } | 1821 | } |
1775 | 1822 | ||
1776 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | 1823 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, |
@@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1784 | 1831 | ||
1785 | iboe = &ibdev->iboe; | 1832 | iboe = &ibdev->iboe; |
1786 | 1833 | ||
1787 | spin_lock(&iboe->lock); | 1834 | spin_lock_bh(&iboe->lock); |
1788 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1835 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
1789 | enum ib_port_state port_state = IB_PORT_NOP; | 1836 | enum ib_port_state port_state = IB_PORT_NOP; |
1790 | struct net_device *old_master = iboe->masters[port - 1]; | 1837 | struct net_device *old_master = iboe->masters[port - 1]; |
@@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1816 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | 1863 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? |
1817 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 1864 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
1818 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1865 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); |
1819 | } else { | 1866 | if (curr_master) { |
1820 | reset_gid_table(ibdev, port); | 1867 | /* if using bonding/team and a slave port is down, we |
1821 | } | 1868 | * don't want the bond IP based gids in the table since |
1822 | /* if using bonding/team and a slave port is down, we don't the bond IP | 1869 | * flows that select port by gid may get the down port. |
1823 | * based gids in the table since flows that select port by gid may get | 1870 | */ |
1824 | * the down port. | 1871 | if (port_state == IB_PORT_DOWN) { |
1825 | */ | 1872 | reset_gid_table(ibdev, port); |
1826 | if (curr_master && (port_state == IB_PORT_DOWN)) { | 1873 | mlx4_ib_set_default_gid(ibdev, |
1827 | reset_gid_table(ibdev, port); | 1874 | curr_netdev, |
1828 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1875 | port); |
1829 | } | 1876 | } else { |
1830 | /* if bonding is used it is possible that we add it to masters | 1877 | /* gids from the upper dev (bond/team) |
1831 | * only after IP address is assigned to the net bonding | 1878 | * should appear in port's gid table |
1832 | * interface. | 1879 | */ |
1833 | */ | 1880 | mlx4_ib_get_dev_addr(curr_master, |
1834 | if (curr_master && (old_master != curr_master)) { | 1881 | ibdev, port); |
1835 | reset_gid_table(ibdev, port); | 1882 | } |
1836 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1883 | } |
1837 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1884 | /* if bonding is used it is possible that we add it to |
1838 | } | 1885 | * masters only after IP address is assigned to the |
1886 | * net bonding interface. | ||
1887 | */ | ||
1888 | if (curr_master && (old_master != curr_master)) { | ||
1889 | reset_gid_table(ibdev, port); | ||
1890 | mlx4_ib_set_default_gid(ibdev, | ||
1891 | curr_netdev, port); | ||
1892 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | ||
1893 | } | ||
1839 | 1894 | ||
1840 | if (!curr_master && (old_master != curr_master)) { | 1895 | if (!curr_master && (old_master != curr_master)) { |
1896 | reset_gid_table(ibdev, port); | ||
1897 | mlx4_ib_set_default_gid(ibdev, | ||
1898 | curr_netdev, port); | ||
1899 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1900 | } | ||
1901 | } else { | ||
1841 | reset_gid_table(ibdev, port); | 1902 | reset_gid_table(ibdev, port); |
1842 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1843 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1844 | } | 1903 | } |
1845 | } | 1904 | } |
1846 | 1905 | ||
1847 | spin_unlock(&iboe->lock); | 1906 | spin_unlock_bh(&iboe->lock); |
1848 | 1907 | ||
1849 | if (update_qps_port > 0) | 1908 | if (update_qps_port > 0) |
1850 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); | 1909 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); |
@@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2186 | goto err_steer_free_bitmap; | 2245 | goto err_steer_free_bitmap; |
2187 | } | 2246 | } |
2188 | 2247 | ||
2248 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) | ||
2249 | atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); | ||
2250 | |||
2189 | if (ib_register_device(&ibdev->ib_dev, NULL)) | 2251 | if (ib_register_device(&ibdev->ib_dev, NULL)) |
2190 | goto err_steer_free_bitmap; | 2252 | goto err_steer_free_bitmap; |
2191 | 2253 | ||
@@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2222 | } | 2284 | } |
2223 | } | 2285 | } |
2224 | #endif | 2286 | #endif |
2225 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | 2287 | if (mlx4_ib_init_gid_table(ibdev)) |
2226 | reset_gid_table(ibdev, i); | 2288 | goto err_notif; |
2227 | rtnl_lock(); | ||
2228 | mlx4_ib_scan_netdevs(ibdev, NULL, 0); | ||
2229 | rtnl_unlock(); | ||
2230 | mlx4_ib_init_gid_table(ibdev); | ||
2231 | } | 2289 | } |
2232 | 2290 | ||
2233 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { | 2291 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { |
@@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
2375 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 2433 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
2376 | int p; | 2434 | int p; |
2377 | 2435 | ||
2436 | ibdev->ib_active = false; | ||
2437 | flush_workqueue(wq); | ||
2438 | |||
2378 | mlx4_ib_close_sriov(ibdev); | 2439 | mlx4_ib_close_sriov(ibdev); |
2379 | mlx4_ib_mad_cleanup(ibdev); | 2440 | mlx4_ib_mad_cleanup(ibdev); |
2380 | ib_unregister_device(&ibdev->ib_dev); | 2441 | ib_unregister_device(&ibdev->ib_dev); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e8cad3926bfc..6eb743f65f6f 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe { | |||
451 | spinlock_t lock; | 451 | spinlock_t lock; |
452 | struct net_device *netdevs[MLX4_MAX_PORTS]; | 452 | struct net_device *netdevs[MLX4_MAX_PORTS]; |
453 | struct net_device *masters[MLX4_MAX_PORTS]; | 453 | struct net_device *masters[MLX4_MAX_PORTS]; |
454 | atomic64_t mac[MLX4_MAX_PORTS]; | ||
454 | struct notifier_block nb; | 455 | struct notifier_block nb; |
455 | struct notifier_block nb_inet; | 456 | struct notifier_block nb_inet; |
456 | struct notifier_block nb_inet6; | 457 | struct notifier_block nb_inet6; |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 9b0e80e59b08..8f9325cfc85d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
234 | 0); | 234 | 0); |
235 | if (IS_ERR(mmr->umem)) { | 235 | if (IS_ERR(mmr->umem)) { |
236 | err = PTR_ERR(mmr->umem); | 236 | err = PTR_ERR(mmr->umem); |
237 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ | ||
237 | mmr->umem = NULL; | 238 | mmr->umem = NULL; |
238 | goto release_mpt_entry; | 239 | goto release_mpt_entry; |
239 | } | 240 | } |
240 | n = ib_umem_page_count(mmr->umem); | 241 | n = ib_umem_page_count(mmr->umem); |
241 | shift = ilog2(mmr->umem->page_size); | 242 | shift = ilog2(mmr->umem->page_size); |
242 | 243 | ||
243 | mmr->mmr.iova = virt_addr; | ||
244 | mmr->mmr.size = length; | ||
245 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, | 244 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
246 | virt_addr, length, n, shift, | 245 | virt_addr, length, n, shift, |
247 | *pmpt_entry); | 246 | *pmpt_entry); |
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
249 | ib_umem_release(mmr->umem); | 248 | ib_umem_release(mmr->umem); |
250 | goto release_mpt_entry; | 249 | goto release_mpt_entry; |
251 | } | 250 | } |
251 | mmr->mmr.iova = virt_addr; | ||
252 | mmr->mmr.size = length; | ||
252 | 253 | ||
253 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | 254 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); |
254 | if (err) { | 255 | if (err) { |
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
262 | * return a failure. But dereg_mr will free the resources. | 263 | * return a failure. But dereg_mr will free the resources. |
263 | */ | 264 | */ |
264 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | 265 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); |
266 | if (!err && flags & IB_MR_REREG_ACCESS) | ||
267 | mmr->mmr.access = mr_access_flags; | ||
265 | 268 | ||
266 | release_mpt_entry: | 269 | release_mpt_entry: |
267 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | 270 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index efb9eff8906c..9c5150c3cb31 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | 964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) |
965 | pr_warn("modify QP %06x to RESET failed.\n", | 965 | pr_warn("modify QP %06x to RESET failed.\n", |
966 | qp->mqp.qpn); | 966 | qp->mqp.qpn); |
967 | if (qp->pri.smac) { | 967 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
969 | qp->pri.smac = 0; | 969 | qp->pri.smac = 0; |
970 | qp->pri.smac_port = 0; | ||
970 | } | 971 | } |
971 | if (qp->alt.smac) { | 972 | if (qp->alt.smac) { |
972 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 973 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | |||
1325 | * If one was already assigned, but the new mac differs, | 1326 | * If one was already assigned, but the new mac differs, |
1326 | * unregister the old one and register the new one. | 1327 | * unregister the old one and register the new one. |
1327 | */ | 1328 | */ |
1328 | if (!smac_info->smac || smac_info->smac != smac) { | 1329 | if ((!smac_info->smac && !smac_info->smac_port) || |
1330 | smac_info->smac != smac) { | ||
1329 | /* register candidate now, unreg if needed, after success */ | 1331 | /* register candidate now, unreg if needed, after success */ |
1330 | smac_index = mlx4_register_mac(dev->dev, port, smac); | 1332 | smac_index = mlx4_register_mac(dev->dev, port, smac); |
1331 | if (smac_index >= 0) { | 1333 | if (smac_index >= 0) { |
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) | |||
1390 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, | 1392 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, |
1391 | struct mlx4_qp_context *context) | 1393 | struct mlx4_qp_context *context) |
1392 | { | 1394 | { |
1393 | struct net_device *ndev; | ||
1394 | u64 u64_mac; | 1395 | u64 u64_mac; |
1395 | int smac_index; | 1396 | int smac_index; |
1396 | 1397 | ||
1397 | 1398 | u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); | |
1398 | ndev = dev->iboe.netdevs[qp->port - 1]; | ||
1399 | if (ndev) { | ||
1400 | smac = ndev->dev_addr; | ||
1401 | u64_mac = mlx4_mac_to_u64(smac); | ||
1402 | } else { | ||
1403 | u64_mac = dev->dev->caps.def_mac[qp->port]; | ||
1404 | } | ||
1405 | 1399 | ||
1406 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); | 1400 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); |
1407 | if (!qp->pri.smac) { | 1401 | if (!qp->pri.smac && !qp->pri.smac_port) { |
1408 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); | 1402 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); |
1409 | if (smac_index >= 0) { | 1403 | if (smac_index >= 0) { |
1410 | qp->pri.candidate_smac_index = smac_index; | 1404 | qp->pri.candidate_smac_index = smac_index; |
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1432 | int steer_qp = 0; | 1426 | int steer_qp = 0; |
1433 | int err = -EINVAL; | 1427 | int err = -EINVAL; |
1434 | 1428 | ||
1429 | /* APM is not supported under RoCE */ | ||
1430 | if (attr_mask & IB_QP_ALT_PATH && | ||
1431 | rdma_port_get_link_layer(&dev->ib_dev, qp->port) == | ||
1432 | IB_LINK_LAYER_ETHERNET) | ||
1433 | return -ENOTSUPP; | ||
1434 | |||
1435 | context = kzalloc(sizeof *context, GFP_KERNEL); | 1435 | context = kzalloc(sizeof *context, GFP_KERNEL); |
1436 | if (!context) | 1436 | if (!context) |
1437 | return -ENOMEM; | 1437 | return -ENOMEM; |
@@ -1682,7 +1682,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1682 | MLX4_IB_LINK_TYPE_ETH; | 1682 | MLX4_IB_LINK_TYPE_ETH; |
1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | 1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { |
1684 | /* set QP to receive both tunneled & non-tunneled packets */ | 1684 | /* set QP to receive both tunneled & non-tunneled packets */ |
1685 | if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) | 1685 | if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) |
1686 | context->srqn = cpu_to_be32(7 << 28); | 1686 | context->srqn = cpu_to_be32(7 << 28); |
1687 | } | 1687 | } |
1688 | } | 1688 | } |
@@ -1786,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1786 | if (qp->flags & MLX4_IB_QP_NETIF) | 1786 | if (qp->flags & MLX4_IB_QP_NETIF) |
1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
1788 | } | 1788 | } |
1789 | if (qp->pri.smac) { | 1789 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1791 | qp->pri.smac = 0; | 1791 | qp->pri.smac = 0; |
1792 | qp->pri.smac_port = 0; | ||
1792 | } | 1793 | } |
1793 | if (qp->alt.smac) { | 1794 | if (qp->alt.smac) { |
1794 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 1795 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
@@ -1812,11 +1813,12 @@ out: | |||
1812 | if (err && steer_qp) | 1813 | if (err && steer_qp) |
1813 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1814 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
1814 | kfree(context); | 1815 | kfree(context); |
1815 | if (qp->pri.candidate_smac) { | 1816 | if (qp->pri.candidate_smac || |
1817 | (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { | ||
1816 | if (err) { | 1818 | if (err) { |
1817 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); | 1819 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); |
1818 | } else { | 1820 | } else { |
1819 | if (qp->pri.smac) | 1821 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) |
1820 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1822 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1821 | qp->pri.smac = qp->pri.candidate_smac; | 1823 | qp->pri.smac = qp->pri.candidate_smac; |
1822 | qp->pri.smac_index = qp->pri.candidate_smac_index; | 1824 | qp->pri.smac_index = qp->pri.candidate_smac_index; |
@@ -2089,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
2089 | return 0; | 2091 | return 0; |
2090 | } | 2092 | } |
2091 | 2093 | ||
2094 | static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) | ||
2095 | { | ||
2096 | int i; | ||
2097 | |||
2098 | for (i = ETH_ALEN; i; i--) { | ||
2099 | dst_mac[i - 1] = src_mac & 0xff; | ||
2100 | src_mac >>= 8; | ||
2101 | } | ||
2102 | } | ||
2103 | |||
2092 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | 2104 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, |
2093 | void *wqe, unsigned *mlx_seg_len) | 2105 | void *wqe, unsigned *mlx_seg_len) |
2094 | { | 2106 | { |
@@ -2203,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2203 | } | 2215 | } |
2204 | 2216 | ||
2205 | if (is_eth) { | 2217 | if (is_eth) { |
2206 | u8 *smac; | ||
2207 | struct in6_addr in6; | 2218 | struct in6_addr in6; |
2208 | 2219 | ||
2209 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; | 2220 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; |
@@ -2216,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2216 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); | 2227 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); |
2217 | memcpy(&in6, sgid.raw, sizeof(in6)); | 2228 | memcpy(&in6, sgid.raw, sizeof(in6)); |
2218 | 2229 | ||
2219 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) | 2230 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { |
2220 | smac = to_mdev(sqp->qp.ibqp.device)-> | 2231 | u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); |
2221 | iboe.netdevs[sqp->qp.port - 1]->dev_addr; | 2232 | u8 smac[ETH_ALEN]; |
2222 | else /* use the src mac of the tunnel */ | 2233 | |
2223 | smac = ah->av.eth.s_mac; | 2234 | mlx4_u64_to_smac(smac, mac); |
2224 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); | 2235 | memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); |
2236 | } else { | ||
2237 | /* use the src mac of the tunnel */ | ||
2238 | memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); | ||
2239 | } | ||
2240 | |||
2225 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) | 2241 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) |
2226 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | 2242 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); |
2227 | if (!is_vlan) { | 2243 | if (!is_vlan) { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 40f8536c10b0..ac02ce4e8040 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #define OCRDMA_VID_PCP_SHIFT 0xD | 38 | #define OCRDMA_VID_PCP_SHIFT 0xD |
39 | 39 | ||
40 | static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | 40 | static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, |
41 | struct ib_ah_attr *attr, int pdid) | 41 | struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) |
42 | { | 42 | { |
43 | int status = 0; | 43 | int status = 0; |
44 | u16 vlan_tag; bool vlan_enabled = false; | 44 | u16 vlan_tag; bool vlan_enabled = false; |
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
49 | memset(ð, 0, sizeof(eth)); | 49 | memset(ð, 0, sizeof(eth)); |
50 | memset(&grh, 0, sizeof(grh)); | 50 | memset(&grh, 0, sizeof(grh)); |
51 | 51 | ||
52 | ah->sgid_index = attr->grh.sgid_index; | 52 | /* VLAN */ |
53 | |||
54 | vlan_tag = attr->vlan_id; | 53 | vlan_tag = attr->vlan_id; |
55 | if (!vlan_tag || (vlan_tag > 0xFFF)) | 54 | if (!vlan_tag || (vlan_tag > 0xFFF)) |
56 | vlan_tag = dev->pvid; | 55 | vlan_tag = dev->pvid; |
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
65 | eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); | 64 | eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); |
66 | eth_sz = sizeof(struct ocrdma_eth_basic); | 65 | eth_sz = sizeof(struct ocrdma_eth_basic); |
67 | } | 66 | } |
67 | /* MAC */ | ||
68 | memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); | 68 | memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); |
69 | memcpy(ð.dmac[0], attr->dmac, ETH_ALEN); | ||
70 | status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); | 69 | status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); |
71 | if (status) | 70 | if (status) |
72 | return status; | 71 | return status; |
73 | status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, | 72 | ah->sgid_index = attr->grh.sgid_index; |
74 | (union ib_gid *)&grh.sgid[0]); | 73 | memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid)); |
75 | if (status) | 74 | memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); |
76 | return status; | ||
77 | 75 | ||
78 | grh.tclass_flow = cpu_to_be32((6 << 28) | | 76 | grh.tclass_flow = cpu_to_be32((6 << 28) | |
79 | (attr->grh.traffic_class << 24) | | 77 | (attr->grh.traffic_class << 24) | |
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
81 | /* 0x1b is next header value in GRH */ | 79 | /* 0x1b is next header value in GRH */ |
82 | grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | | 80 | grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | |
83 | (0x1b << 8) | attr->grh.hop_limit); | 81 | (0x1b << 8) | attr->grh.hop_limit); |
84 | 82 | /* Eth HDR */ | |
85 | memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); | ||
86 | memcpy(&ah->av->eth_hdr, ð, eth_sz); | 83 | memcpy(&ah->av->eth_hdr, ð, eth_sz); |
87 | memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); | 84 | memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); |
88 | if (vlan_enabled) | 85 | if (vlan_enabled) |
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |||
98 | struct ocrdma_ah *ah; | 95 | struct ocrdma_ah *ah; |
99 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | 96 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
100 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | 97 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
98 | union ib_gid sgid; | ||
99 | u8 zmac[ETH_ALEN]; | ||
101 | 100 | ||
102 | if (!(attr->ah_flags & IB_AH_GRH)) | 101 | if (!(attr->ah_flags & IB_AH_GRH)) |
103 | return ERR_PTR(-EINVAL); | 102 | return ERR_PTR(-EINVAL); |
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |||
111 | status = ocrdma_alloc_av(dev, ah); | 110 | status = ocrdma_alloc_av(dev, ah); |
112 | if (status) | 111 | if (status) |
113 | goto av_err; | 112 | goto av_err; |
114 | status = set_av_attr(dev, ah, attr, pd->id); | 113 | |
114 | status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid); | ||
115 | if (status) { | ||
116 | pr_err("%s(): Failed to query sgid, status = %d\n", | ||
117 | __func__, status); | ||
118 | goto av_conf_err; | ||
119 | } | ||
120 | |||
121 | memset(&zmac, 0, ETH_ALEN); | ||
122 | if (pd->uctx && | ||
123 | memcmp(attr->dmac, &zmac, ETH_ALEN)) { | ||
124 | status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, | ||
125 | attr->dmac, &attr->vlan_id); | ||
126 | if (status) { | ||
127 | pr_err("%s(): Failed to resolve dmac from gid." | ||
128 | "status = %d\n", __func__, status); | ||
129 | goto av_conf_err; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | status = set_av_attr(dev, ah, attr, &sgid, pd->id); | ||
115 | if (status) | 134 | if (status) |
116 | goto av_conf_err; | 135 | goto av_conf_err; |
117 | 136 | ||
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) | |||
145 | struct ocrdma_av *av = ah->av; | 164 | struct ocrdma_av *av = ah->av; |
146 | struct ocrdma_grh *grh; | 165 | struct ocrdma_grh *grh; |
147 | attr->ah_flags |= IB_AH_GRH; | 166 | attr->ah_flags |= IB_AH_GRH; |
148 | if (ah->av->valid & Bit(1)) { | 167 | if (ah->av->valid & OCRDMA_AV_VALID) { |
149 | grh = (struct ocrdma_grh *)((u8 *)ah->av + | 168 | grh = (struct ocrdma_grh *)((u8 *)ah->av + |
150 | sizeof(struct ocrdma_eth_vlan)); | 169 | sizeof(struct ocrdma_eth_vlan)); |
151 | attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; | 170 | attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index acb434d16903..8f5f2577f288 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
101 | attr->max_srq_sge = dev->attr.max_srq_sge; | 101 | attr->max_srq_sge = dev->attr.max_srq_sge; |
102 | attr->max_srq_wr = dev->attr.max_rqe; | 102 | attr->max_srq_wr = dev->attr.max_rqe; |
103 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | 103 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; |
104 | attr->max_fast_reg_page_list_len = 0; | 104 | attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; |
105 | attr->max_pkeys = 1; | 105 | attr->max_pkeys = 1; |
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |||
2846 | if (cq->first_arm) { | 2846 | if (cq->first_arm) { |
2847 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); | 2847 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); |
2848 | cq->first_arm = false; | 2848 | cq->first_arm = false; |
2849 | goto skip_defer; | ||
2850 | } | 2849 | } |
2851 | cq->deferred_arm = true; | ||
2852 | 2850 | ||
2853 | skip_defer: | 2851 | cq->deferred_arm = true; |
2854 | cq->deferred_sol = sol_needed; | 2852 | cq->deferred_sol = sol_needed; |
2855 | spin_unlock_irqrestore(&cq->cq_lock, flags); | 2853 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
2856 | 2854 | ||
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 799a0c3bffc4..6abd3ed3cd51 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c | |||
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | |||
193 | struct qib_qp_iter *iter; | 193 | struct qib_qp_iter *iter; |
194 | loff_t n = *pos; | 194 | loff_t n = *pos; |
195 | 195 | ||
196 | rcu_read_lock(); | ||
196 | iter = qib_qp_iter_init(s->private); | 197 | iter = qib_qp_iter_init(s->private); |
197 | if (!iter) | 198 | if (!iter) |
198 | return NULL; | 199 | return NULL; |
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | |||
224 | 225 | ||
225 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | 226 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) |
226 | { | 227 | { |
227 | /* nothing for now */ | 228 | rcu_read_unlock(); |
228 | } | 229 | } |
229 | 230 | ||
230 | static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) | 231 | static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 7fcc150d603c..6ddc0264aad2 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) | |||
1325 | struct qib_qp *pqp = iter->qp; | 1325 | struct qib_qp *pqp = iter->qp; |
1326 | struct qib_qp *qp; | 1326 | struct qib_qp *qp; |
1327 | 1327 | ||
1328 | rcu_read_lock(); | ||
1329 | for (; n < dev->qp_table_size; n++) { | 1328 | for (; n < dev->qp_table_size; n++) { |
1330 | if (pqp) | 1329 | if (pqp) |
1331 | qp = rcu_dereference(pqp->next); | 1330 | qp = rcu_dereference(pqp->next); |
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) | |||
1333 | qp = rcu_dereference(dev->qp_table[n]); | 1332 | qp = rcu_dereference(dev->qp_table[n]); |
1334 | pqp = qp; | 1333 | pqp = qp; |
1335 | if (qp) { | 1334 | if (qp) { |
1336 | if (iter->qp) | ||
1337 | atomic_dec(&iter->qp->refcount); | ||
1338 | atomic_inc(&qp->refcount); | ||
1339 | rcu_read_unlock(); | ||
1340 | iter->qp = qp; | 1335 | iter->qp = qp; |
1341 | iter->n = n; | 1336 | iter->n = n; |
1342 | return 0; | 1337 | return 0; |
1343 | } | 1338 | } |
1344 | } | 1339 | } |
1345 | rcu_read_unlock(); | ||
1346 | if (iter->qp) | ||
1347 | atomic_dec(&iter->qp->refcount); | ||
1348 | return ret; | 1340 | return ret; |
1349 | } | 1341 | } |
1350 | 1342 | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 2bc1d2b96298..74f90b2619f6 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages, | |||
52 | * Call with current->mm->mmap_sem held. | 52 | * Call with current->mm->mmap_sem held. |
53 | */ | 53 | */ |
54 | static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, | 54 | static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, |
55 | struct page **p, struct vm_area_struct **vma) | 55 | struct page **p) |
56 | { | 56 | { |
57 | unsigned long lock_limit; | 57 | unsigned long lock_limit; |
58 | size_t got; | 58 | size_t got; |
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
69 | ret = get_user_pages(current, current->mm, | 69 | ret = get_user_pages(current, current->mm, |
70 | start_page + got * PAGE_SIZE, | 70 | start_page + got * PAGE_SIZE, |
71 | num_pages - got, 1, 1, | 71 | num_pages - got, 1, 1, |
72 | p + got, vma); | 72 | p + got, NULL); |
73 | if (ret < 0) | 73 | if (ret < 0) |
74 | goto bail_release; | 74 | goto bail_release; |
75 | } | 75 | } |
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
136 | 136 | ||
137 | down_write(¤t->mm->mmap_sem); | 137 | down_write(¤t->mm->mmap_sem); |
138 | 138 | ||
139 | ret = __qib_get_user_pages(start_page, num_pages, p, NULL); | 139 | ret = __qib_get_user_pages(start_page, num_pages, p); |
140 | 140 | ||
141 | up_write(¤t->mm->mmap_sem); | 141 | up_write(¤t->mm->mmap_sem); |
142 | 142 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 3edce617c31b..d7562beb5423 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -131,6 +131,12 @@ struct ipoib_cb { | |||
131 | u8 hwaddr[INFINIBAND_ALEN]; | 131 | u8 hwaddr[INFINIBAND_ALEN]; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb) | ||
135 | { | ||
136 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb)); | ||
137 | return (struct ipoib_cb *)skb->cb; | ||
138 | } | ||
139 | |||
134 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ | 140 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ |
135 | struct ipoib_mcast { | 141 | struct ipoib_mcast { |
136 | struct ib_sa_mcmember_rec mcmember; | 142 | struct ib_sa_mcmember_rec mcmember; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1310acf6bf92..13e6e0431592 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
716 | { | 716 | { |
717 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 717 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
718 | struct ipoib_neigh *neigh; | 718 | struct ipoib_neigh *neigh; |
719 | struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; | 719 | struct ipoib_cb *cb = ipoib_skb_cb(skb); |
720 | struct ipoib_header *header; | 720 | struct ipoib_header *header; |
721 | unsigned long flags; | 721 | unsigned long flags; |
722 | 722 | ||
@@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
813 | const void *daddr, const void *saddr, unsigned len) | 813 | const void *daddr, const void *saddr, unsigned len) |
814 | { | 814 | { |
815 | struct ipoib_header *header; | 815 | struct ipoib_header *header; |
816 | struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; | 816 | struct ipoib_cb *cb = ipoib_skb_cb(skb); |
817 | 817 | ||
818 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); | 818 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); |
819 | 819 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d4e005720d01..ffb83b5f7e80 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
529 | port_attr.state); | 529 | port_attr.state); |
530 | return; | 530 | return; |
531 | } | 531 | } |
532 | priv->local_lid = port_attr.lid; | ||
532 | 533 | ||
533 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) | 534 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) |
534 | ipoib_warn(priv, "ib_query_gid() failed\n"); | 535 | ipoib_warn(priv, "ib_query_gid() failed\n"); |
535 | else | 536 | else |
536 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | 537 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); |
537 | 538 | ||
538 | { | ||
539 | struct ib_port_attr attr; | ||
540 | |||
541 | if (!ib_query_port(priv->ca, priv->port, &attr)) | ||
542 | priv->local_lid = attr.lid; | ||
543 | else | ||
544 | ipoib_warn(priv, "ib_query_port failed\n"); | ||
545 | } | ||
546 | |||
547 | if (!priv->broadcast) { | 539 | if (!priv->broadcast) { |
548 | struct ipoib_mcast *broadcast; | 540 | struct ipoib_mcast *broadcast; |
549 | 541 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 61ee91d88380..93ce62fe1594 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
344 | int is_leading) | 344 | int is_leading) |
345 | { | 345 | { |
346 | struct iscsi_conn *conn = cls_conn->dd_data; | 346 | struct iscsi_conn *conn = cls_conn->dd_data; |
347 | struct iscsi_session *session; | ||
348 | struct iser_conn *ib_conn; | 347 | struct iser_conn *ib_conn; |
349 | struct iscsi_endpoint *ep; | 348 | struct iscsi_endpoint *ep; |
350 | int error; | 349 | int error; |
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
363 | } | 362 | } |
364 | ib_conn = ep->dd_data; | 363 | ib_conn = ep->dd_data; |
365 | 364 | ||
366 | session = conn->session; | 365 | mutex_lock(&ib_conn->state_mutex); |
367 | if (iser_alloc_rx_descriptors(ib_conn, session)) | 366 | if (ib_conn->state != ISER_CONN_UP) { |
368 | return -ENOMEM; | 367 | error = -EINVAL; |
368 | iser_err("iser_conn %p state is %d, teardown started\n", | ||
369 | ib_conn, ib_conn->state); | ||
370 | goto out; | ||
371 | } | ||
372 | |||
373 | error = iser_alloc_rx_descriptors(ib_conn, conn->session); | ||
374 | if (error) | ||
375 | goto out; | ||
369 | 376 | ||
370 | /* binds the iSER connection retrieved from the previously | 377 | /* binds the iSER connection retrieved from the previously |
371 | * connected ep_handle to the iSCSI layer connection. exchanges | 378 | * connected ep_handle to the iSCSI layer connection. exchanges |
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
375 | conn->dd_data = ib_conn; | 382 | conn->dd_data = ib_conn; |
376 | ib_conn->iscsi_conn = conn; | 383 | ib_conn->iscsi_conn = conn; |
377 | 384 | ||
378 | return 0; | 385 | out: |
386 | mutex_unlock(&ib_conn->state_mutex); | ||
387 | return error; | ||
379 | } | 388 | } |
380 | 389 | ||
381 | static int | 390 | static int |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index c877dad381cb..9f0e0e34d6ca 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -69,7 +69,7 @@ | |||
69 | 69 | ||
70 | #define DRV_NAME "iser" | 70 | #define DRV_NAME "iser" |
71 | #define PFX DRV_NAME ": " | 71 | #define PFX DRV_NAME ": " |
72 | #define DRV_VER "1.4" | 72 | #define DRV_VER "1.4.1" |
73 | 73 | ||
74 | #define iser_dbg(fmt, arg...) \ | 74 | #define iser_dbg(fmt, arg...) \ |
75 | do { \ | 75 | do { \ |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 3ef167f97d6f..3bfec4bbda52 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
73 | { | 73 | { |
74 | struct iser_cq_desc *cq_desc; | 74 | struct iser_cq_desc *cq_desc; |
75 | struct ib_device_attr *dev_attr = &device->dev_attr; | 75 | struct ib_device_attr *dev_attr = &device->dev_attr; |
76 | int ret, i, j; | 76 | int ret, i; |
77 | 77 | ||
78 | ret = ib_query_device(device->ib_device, dev_attr); | 78 | ret = ib_query_device(device->ib_device, dev_attr); |
79 | if (ret) { | 79 | if (ret) { |
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
125 | iser_cq_event_callback, | 125 | iser_cq_event_callback, |
126 | (void *)&cq_desc[i], | 126 | (void *)&cq_desc[i], |
127 | ISER_MAX_RX_CQ_LEN, i); | 127 | ISER_MAX_RX_CQ_LEN, i); |
128 | if (IS_ERR(device->rx_cq[i])) | 128 | if (IS_ERR(device->rx_cq[i])) { |
129 | device->rx_cq[i] = NULL; | ||
129 | goto cq_err; | 130 | goto cq_err; |
131 | } | ||
130 | 132 | ||
131 | device->tx_cq[i] = ib_create_cq(device->ib_device, | 133 | device->tx_cq[i] = ib_create_cq(device->ib_device, |
132 | NULL, iser_cq_event_callback, | 134 | NULL, iser_cq_event_callback, |
133 | (void *)&cq_desc[i], | 135 | (void *)&cq_desc[i], |
134 | ISER_MAX_TX_CQ_LEN, i); | 136 | ISER_MAX_TX_CQ_LEN, i); |
135 | 137 | ||
136 | if (IS_ERR(device->tx_cq[i])) | 138 | if (IS_ERR(device->tx_cq[i])) { |
139 | device->tx_cq[i] = NULL; | ||
137 | goto cq_err; | 140 | goto cq_err; |
141 | } | ||
138 | 142 | ||
139 | if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) | 143 | if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) |
140 | goto cq_err; | 144 | goto cq_err; |
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
160 | handler_err: | 164 | handler_err: |
161 | ib_dereg_mr(device->mr); | 165 | ib_dereg_mr(device->mr); |
162 | dma_mr_err: | 166 | dma_mr_err: |
163 | for (j = 0; j < device->cqs_used; j++) | 167 | for (i = 0; i < device->cqs_used; i++) |
164 | tasklet_kill(&device->cq_tasklet[j]); | 168 | tasklet_kill(&device->cq_tasklet[i]); |
165 | cq_err: | 169 | cq_err: |
166 | for (j = 0; j < i; j++) { | 170 | for (i = 0; i < device->cqs_used; i++) { |
167 | if (device->tx_cq[j]) | 171 | if (device->tx_cq[i]) |
168 | ib_destroy_cq(device->tx_cq[j]); | 172 | ib_destroy_cq(device->tx_cq[i]); |
169 | if (device->rx_cq[j]) | 173 | if (device->rx_cq[i]) |
170 | ib_destroy_cq(device->rx_cq[j]); | 174 | ib_destroy_cq(device->rx_cq[i]); |
171 | } | 175 | } |
172 | ib_dealloc_pd(device->pd); | 176 | ib_dealloc_pd(device->pd); |
173 | pd_err: | 177 | pd_err: |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 713e3ddb43bd..40b7d6c0ff17 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -466,6 +466,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | |||
466 | }, | 466 | }, |
467 | }, | 467 | }, |
468 | { | 468 | { |
469 | /* Asus X450LCP */ | ||
470 | .matches = { | ||
471 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
472 | DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), | ||
473 | }, | ||
474 | }, | ||
475 | { | ||
469 | /* Avatar AVIU-145A6 */ | 476 | /* Avatar AVIU-145A6 */ |
470 | .matches = { | 477 | .matches = { |
471 | DMI_MATCH(DMI_SYS_VENDOR, "Intel"), | 478 | DMI_MATCH(DMI_SYS_VENDOR, "Intel"), |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d7690f86fdb9..55de4f6f7eaf 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
540 | has_nonrot_disk = 0; | 540 | has_nonrot_disk = 0; |
541 | choose_next_idle = 0; | 541 | choose_next_idle = 0; |
542 | 542 | ||
543 | if (conf->mddev->recovery_cp < MaxSector && | 543 | choose_first = (conf->mddev->recovery_cp < this_sector + sectors); |
544 | (this_sector + sectors >= conf->next_resync)) | ||
545 | choose_first = 1; | ||
546 | else | ||
547 | choose_first = 0; | ||
548 | 544 | ||
549 | for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { | 545 | for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { |
550 | sector_t dist; | 546 | sector_t dist; |
@@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf) | |||
831 | * there is no normal IO happeing. It must arrange to call | 827 | * there is no normal IO happeing. It must arrange to call |
832 | * lower_barrier when the particular background IO completes. | 828 | * lower_barrier when the particular background IO completes. |
833 | */ | 829 | */ |
834 | static void raise_barrier(struct r1conf *conf) | 830 | static void raise_barrier(struct r1conf *conf, sector_t sector_nr) |
835 | { | 831 | { |
836 | spin_lock_irq(&conf->resync_lock); | 832 | spin_lock_irq(&conf->resync_lock); |
837 | 833 | ||
@@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf) | |||
841 | 837 | ||
842 | /* block any new IO from starting */ | 838 | /* block any new IO from starting */ |
843 | conf->barrier++; | 839 | conf->barrier++; |
840 | conf->next_resync = sector_nr; | ||
844 | 841 | ||
845 | /* For these conditions we must wait: | 842 | /* For these conditions we must wait: |
846 | * A: while the array is in frozen state | 843 | * A: while the array is in frozen state |
@@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf) | |||
849 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning | 846 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning |
850 | * next resync will reach to the window which normal bios are | 847 | * next resync will reach to the window which normal bios are |
851 | * handling. | 848 | * handling. |
849 | * D: while there are any active requests in the current window. | ||
852 | */ | 850 | */ |
853 | wait_event_lock_irq(conf->wait_barrier, | 851 | wait_event_lock_irq(conf->wait_barrier, |
854 | !conf->array_frozen && | 852 | !conf->array_frozen && |
855 | conf->barrier < RESYNC_DEPTH && | 853 | conf->barrier < RESYNC_DEPTH && |
854 | conf->current_window_requests == 0 && | ||
856 | (conf->start_next_window >= | 855 | (conf->start_next_window >= |
857 | conf->next_resync + RESYNC_SECTORS), | 856 | conf->next_resync + RESYNC_SECTORS), |
858 | conf->resync_lock); | 857 | conf->resync_lock); |
859 | 858 | ||
859 | conf->nr_pending++; | ||
860 | spin_unlock_irq(&conf->resync_lock); | 860 | spin_unlock_irq(&conf->resync_lock); |
861 | } | 861 | } |
862 | 862 | ||
@@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf) | |||
866 | BUG_ON(conf->barrier <= 0); | 866 | BUG_ON(conf->barrier <= 0); |
867 | spin_lock_irqsave(&conf->resync_lock, flags); | 867 | spin_lock_irqsave(&conf->resync_lock, flags); |
868 | conf->barrier--; | 868 | conf->barrier--; |
869 | conf->nr_pending--; | ||
869 | spin_unlock_irqrestore(&conf->resync_lock, flags); | 870 | spin_unlock_irqrestore(&conf->resync_lock, flags); |
870 | wake_up(&conf->wait_barrier); | 871 | wake_up(&conf->wait_barrier); |
871 | } | 872 | } |
@@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) | |||
877 | if (conf->array_frozen || !bio) | 878 | if (conf->array_frozen || !bio) |
878 | wait = true; | 879 | wait = true; |
879 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { | 880 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { |
880 | if (conf->next_resync < RESYNC_WINDOW_SECTORS) | 881 | if ((conf->mddev->curr_resync_completed |
881 | wait = true; | 882 | >= bio_end_sector(bio)) || |
882 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS | 883 | (conf->next_resync + NEXT_NORMALIO_DISTANCE |
883 | >= bio_end_sector(bio)) || | 884 | <= bio->bi_iter.bi_sector)) |
884 | (conf->next_resync + NEXT_NORMALIO_DISTANCE | ||
885 | <= bio->bi_iter.bi_sector)) | ||
886 | wait = false; | 885 | wait = false; |
887 | else | 886 | else |
888 | wait = true; | 887 | wait = true; |
@@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
919 | } | 918 | } |
920 | 919 | ||
921 | if (bio && bio_data_dir(bio) == WRITE) { | 920 | if (bio && bio_data_dir(bio) == WRITE) { |
922 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE | 921 | if (bio->bi_iter.bi_sector >= |
923 | <= bio->bi_iter.bi_sector) { | 922 | conf->mddev->curr_resync_completed) { |
924 | if (conf->start_next_window == MaxSector) | 923 | if (conf->start_next_window == MaxSector) |
925 | conf->start_next_window = | 924 | conf->start_next_window = |
926 | conf->next_resync + | 925 | conf->next_resync + |
@@ -1186,6 +1185,7 @@ read_again: | |||
1186 | atomic_read(&bitmap->behind_writes) == 0); | 1185 | atomic_read(&bitmap->behind_writes) == 0); |
1187 | } | 1186 | } |
1188 | r1_bio->read_disk = rdisk; | 1187 | r1_bio->read_disk = rdisk; |
1188 | r1_bio->start_next_window = 0; | ||
1189 | 1189 | ||
1190 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1190 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1191 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, | 1191 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, |
@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf) | |||
1548 | mempool_destroy(conf->r1buf_pool); | 1548 | mempool_destroy(conf->r1buf_pool); |
1549 | conf->r1buf_pool = NULL; | 1549 | conf->r1buf_pool = NULL; |
1550 | 1550 | ||
1551 | spin_lock_irq(&conf->resync_lock); | ||
1551 | conf->next_resync = 0; | 1552 | conf->next_resync = 0; |
1552 | conf->start_next_window = MaxSector; | 1553 | conf->start_next_window = MaxSector; |
1554 | conf->current_window_requests += | ||
1555 | conf->next_window_requests; | ||
1556 | conf->next_window_requests = 0; | ||
1557 | spin_unlock_irq(&conf->resync_lock); | ||
1553 | } | 1558 | } |
1554 | 1559 | ||
1555 | static int raid1_spare_active(struct mddev *mddev) | 1560 | static int raid1_spare_active(struct mddev *mddev) |
@@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
2150 | d--; | 2155 | d--; |
2151 | rdev = conf->mirrors[d].rdev; | 2156 | rdev = conf->mirrors[d].rdev; |
2152 | if (rdev && | 2157 | if (rdev && |
2153 | test_bit(In_sync, &rdev->flags)) | 2158 | !test_bit(Faulty, &rdev->flags)) |
2154 | r1_sync_page_io(rdev, sect, s, | 2159 | r1_sync_page_io(rdev, sect, s, |
2155 | conf->tmppage, WRITE); | 2160 | conf->tmppage, WRITE); |
2156 | } | 2161 | } |
@@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
2162 | d--; | 2167 | d--; |
2163 | rdev = conf->mirrors[d].rdev; | 2168 | rdev = conf->mirrors[d].rdev; |
2164 | if (rdev && | 2169 | if (rdev && |
2165 | test_bit(In_sync, &rdev->flags)) { | 2170 | !test_bit(Faulty, &rdev->flags)) { |
2166 | if (r1_sync_page_io(rdev, sect, s, | 2171 | if (r1_sync_page_io(rdev, sect, s, |
2167 | conf->tmppage, READ)) { | 2172 | conf->tmppage, READ)) { |
2168 | atomic_add(s, &rdev->corrected_errors); | 2173 | atomic_add(s, &rdev->corrected_errors); |
@@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
2541 | 2546 | ||
2542 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | 2547 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
2543 | r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); | 2548 | r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); |
2544 | raise_barrier(conf); | ||
2545 | 2549 | ||
2546 | conf->next_resync = sector_nr; | 2550 | raise_barrier(conf, sector_nr); |
2547 | 2551 | ||
2548 | rcu_read_lock(); | 2552 | rcu_read_lock(); |
2549 | /* | 2553 | /* |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 183588b11fc1..9f0fbecd1eb5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -64,6 +64,10 @@ | |||
64 | #define cpu_to_group(cpu) cpu_to_node(cpu) | 64 | #define cpu_to_group(cpu) cpu_to_node(cpu) |
65 | #define ANY_GROUP NUMA_NO_NODE | 65 | #define ANY_GROUP NUMA_NO_NODE |
66 | 66 | ||
67 | static bool devices_handle_discard_safely = false; | ||
68 | module_param(devices_handle_discard_safely, bool, 0644); | ||
69 | MODULE_PARM_DESC(devices_handle_discard_safely, | ||
70 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); | ||
67 | static struct workqueue_struct *raid5_wq; | 71 | static struct workqueue_struct *raid5_wq; |
68 | /* | 72 | /* |
69 | * Stripe cache | 73 | * Stripe cache |
@@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev) | |||
6208 | mddev->queue->limits.discard_granularity = stripe; | 6212 | mddev->queue->limits.discard_granularity = stripe; |
6209 | /* | 6213 | /* |
6210 | * unaligned part of discard request will be ignored, so can't | 6214 | * unaligned part of discard request will be ignored, so can't |
6211 | * guarantee discard_zerors_data | 6215 | * guarantee discard_zeroes_data |
6212 | */ | 6216 | */ |
6213 | mddev->queue->limits.discard_zeroes_data = 0; | 6217 | mddev->queue->limits.discard_zeroes_data = 0; |
6214 | 6218 | ||
@@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev) | |||
6233 | !bdev_get_queue(rdev->bdev)-> | 6237 | !bdev_get_queue(rdev->bdev)-> |
6234 | limits.discard_zeroes_data) | 6238 | limits.discard_zeroes_data) |
6235 | discard_supported = false; | 6239 | discard_supported = false; |
6240 | /* Unfortunately, discard_zeroes_data is not currently | ||
6241 | * a guarantee - just a hint. So we only allow DISCARD | ||
6242 | * if the sysadmin has confirmed that only safe devices | ||
6243 | * are in use by setting a module parameter. | ||
6244 | */ | ||
6245 | if (!devices_handle_discard_safely) { | ||
6246 | if (discard_supported) { | ||
6247 | pr_info("md/raid456: discard support disabled due to uncertainty.\n"); | ||
6248 | pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); | ||
6249 | } | ||
6250 | discard_supported = false; | ||
6251 | } | ||
6236 | } | 6252 | } |
6237 | 6253 | ||
6238 | if (discard_supported && | 6254 | if (discard_supported && |
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c index 103ef6bad2e2..be763150b8aa 100644 --- a/drivers/media/common/cx2341x.c +++ b/drivers/media/common/cx2341x.c | |||
@@ -1490,6 +1490,7 @@ static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, | |||
1490 | { | 1490 | { |
1491 | struct v4l2_ctrl_config cfg; | 1491 | struct v4l2_ctrl_config cfg; |
1492 | 1492 | ||
1493 | memset(&cfg, 0, sizeof(cfg)); | ||
1493 | cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); | 1494 | cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); |
1494 | cfg.ops = &cx2341x_ops; | 1495 | cfg.ops = &cx2341x_ops; |
1495 | cfg.id = id; | 1496 | cfg.id = id; |
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index 72fb5838cae0..7975c6608e20 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c | |||
@@ -1095,6 +1095,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config, | |||
1095 | sizeof(state->tuner_i2c_adapter.name)); | 1095 | sizeof(state->tuner_i2c_adapter.name)); |
1096 | state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo; | 1096 | state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo; |
1097 | state->tuner_i2c_adapter.algo_data = NULL; | 1097 | state->tuner_i2c_adapter.algo_data = NULL; |
1098 | state->tuner_i2c_adapter.dev.parent = i2c->dev.parent; | ||
1098 | i2c_set_adapdata(&state->tuner_i2c_adapter, state); | 1099 | i2c_set_adapdata(&state->tuner_i2c_adapter, state); |
1099 | if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) { | 1100 | if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) { |
1100 | err("tuner i2c bus could not be initialized\n"); | 1101 | err("tuner i2c bus could not be initialized\n"); |
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index d4fa213ba74a..de88b980a837 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c | |||
@@ -2325,7 +2325,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd) | |||
2325 | v4l2_info(sd, "HDCP keys read: %s%s\n", | 2325 | v4l2_info(sd, "HDCP keys read: %s%s\n", |
2326 | (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", | 2326 | (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", |
2327 | (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); | 2327 | (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); |
2328 | if (!is_hdmi(sd)) { | 2328 | if (is_hdmi(sd)) { |
2329 | bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; | 2329 | bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; |
2330 | bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; | 2330 | bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; |
2331 | bool audio_mute = io_read(sd, 0x65) & 0x40; | 2331 | bool audio_mute = io_read(sd, 0x65) & 0x40; |
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c index 998919e97dfe..7b35e633118d 100644 --- a/drivers/media/radio/radio-miropcm20.c +++ b/drivers/media/radio/radio-miropcm20.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/io.h> | ||
30 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
31 | #include <linux/videodev2.h> | 32 | #include <linux/videodev2.h> |
32 | #include <linux/kthread.h> | 33 | #include <linux/kthread.h> |
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index a7e24848f6c8..9da812b8a786 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c | |||
@@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = { | |||
3524 | .disconnect = em28xx_usb_disconnect, | 3524 | .disconnect = em28xx_usb_disconnect, |
3525 | .suspend = em28xx_usb_suspend, | 3525 | .suspend = em28xx_usb_suspend, |
3526 | .resume = em28xx_usb_resume, | 3526 | .resume = em28xx_usb_resume, |
3527 | .reset_resume = em28xx_usb_resume, | ||
3527 | .id_table = em28xx_id_table, | 3528 | .id_table = em28xx_id_table, |
3528 | }; | 3529 | }; |
3529 | 3530 | ||
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 90dec2955f1c..29abc379551e 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c | |||
@@ -1342,7 +1342,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, | |||
1342 | struct em28xx *dev = video_drvdata(file); | 1342 | struct em28xx *dev = video_drvdata(file); |
1343 | struct em28xx_v4l2 *v4l2 = dev->v4l2; | 1343 | struct em28xx_v4l2 *v4l2 = dev->v4l2; |
1344 | 1344 | ||
1345 | if (v4l2->streaming_users > 0) | 1345 | if (vb2_is_busy(&v4l2->vb_vidq)) |
1346 | return -EBUSY; | 1346 | return -EBUSY; |
1347 | 1347 | ||
1348 | vidioc_try_fmt_vid_cap(file, priv, f); | 1348 | vidioc_try_fmt_vid_cap(file, priv, f); |
@@ -1883,8 +1883,9 @@ static int em28xx_v4l2_open(struct file *filp) | |||
1883 | return -EINVAL; | 1883 | return -EINVAL; |
1884 | } | 1884 | } |
1885 | 1885 | ||
1886 | em28xx_videodbg("open dev=%s type=%s\n", | 1886 | em28xx_videodbg("open dev=%s type=%s users=%d\n", |
1887 | video_device_node_name(vdev), v4l2_type_names[fh_type]); | 1887 | video_device_node_name(vdev), v4l2_type_names[fh_type], |
1888 | v4l2->users); | ||
1888 | 1889 | ||
1889 | if (mutex_lock_interruptible(&dev->lock)) | 1890 | if (mutex_lock_interruptible(&dev->lock)) |
1890 | return -ERESTARTSYS; | 1891 | return -ERESTARTSYS; |
@@ -1897,9 +1898,7 @@ static int em28xx_v4l2_open(struct file *filp) | |||
1897 | return ret; | 1898 | return ret; |
1898 | } | 1899 | } |
1899 | 1900 | ||
1900 | if (v4l2_fh_is_singular_file(filp)) { | 1901 | if (v4l2->users == 0) { |
1901 | em28xx_videodbg("first opened filehandle, initializing device\n"); | ||
1902 | |||
1903 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); | 1902 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); |
1904 | 1903 | ||
1905 | if (vdev->vfl_type != VFL_TYPE_RADIO) | 1904 | if (vdev->vfl_type != VFL_TYPE_RADIO) |
@@ -1910,8 +1909,6 @@ static int em28xx_v4l2_open(struct file *filp) | |||
1910 | * of some i2c devices | 1909 | * of some i2c devices |
1911 | */ | 1910 | */ |
1912 | em28xx_wake_i2c(dev); | 1911 | em28xx_wake_i2c(dev); |
1913 | } else { | ||
1914 | em28xx_videodbg("further filehandles are already opened\n"); | ||
1915 | } | 1912 | } |
1916 | 1913 | ||
1917 | if (vdev->vfl_type == VFL_TYPE_RADIO) { | 1914 | if (vdev->vfl_type == VFL_TYPE_RADIO) { |
@@ -1921,6 +1918,7 @@ static int em28xx_v4l2_open(struct file *filp) | |||
1921 | 1918 | ||
1922 | kref_get(&dev->ref); | 1919 | kref_get(&dev->ref); |
1923 | kref_get(&v4l2->ref); | 1920 | kref_get(&v4l2->ref); |
1921 | v4l2->users++; | ||
1924 | 1922 | ||
1925 | mutex_unlock(&dev->lock); | 1923 | mutex_unlock(&dev->lock); |
1926 | 1924 | ||
@@ -2027,11 +2025,12 @@ static int em28xx_v4l2_close(struct file *filp) | |||
2027 | struct em28xx_v4l2 *v4l2 = dev->v4l2; | 2025 | struct em28xx_v4l2 *v4l2 = dev->v4l2; |
2028 | int errCode; | 2026 | int errCode; |
2029 | 2027 | ||
2030 | mutex_lock(&dev->lock); | 2028 | em28xx_videodbg("users=%d\n", v4l2->users); |
2031 | 2029 | ||
2032 | if (v4l2_fh_is_singular_file(filp)) { | 2030 | vb2_fop_release(filp); |
2033 | em28xx_videodbg("last opened filehandle, shutting down device\n"); | 2031 | mutex_lock(&dev->lock); |
2034 | 2032 | ||
2033 | if (v4l2->users == 1) { | ||
2035 | /* No sense to try to write to the device */ | 2034 | /* No sense to try to write to the device */ |
2036 | if (dev->disconnected) | 2035 | if (dev->disconnected) |
2037 | goto exit; | 2036 | goto exit; |
@@ -2050,12 +2049,10 @@ static int em28xx_v4l2_close(struct file *filp) | |||
2050 | em28xx_errdev("cannot change alternate number to " | 2049 | em28xx_errdev("cannot change alternate number to " |
2051 | "0 (error=%i)\n", errCode); | 2050 | "0 (error=%i)\n", errCode); |
2052 | } | 2051 | } |
2053 | } else { | ||
2054 | em28xx_videodbg("further opened filehandles left\n"); | ||
2055 | } | 2052 | } |
2056 | 2053 | ||
2057 | exit: | 2054 | exit: |
2058 | vb2_fop_release(filp); | 2055 | v4l2->users--; |
2059 | kref_put(&v4l2->ref, em28xx_free_v4l2); | 2056 | kref_put(&v4l2->ref, em28xx_free_v4l2); |
2060 | mutex_unlock(&dev->lock); | 2057 | mutex_unlock(&dev->lock); |
2061 | kref_put(&dev->ref, em28xx_free_device); | 2058 | kref_put(&dev->ref, em28xx_free_device); |
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 84ef8efdb148..4360338e7b31 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h | |||
@@ -524,6 +524,7 @@ struct em28xx_v4l2 { | |||
524 | int sensor_yres; | 524 | int sensor_yres; |
525 | int sensor_xtal; | 525 | int sensor_xtal; |
526 | 526 | ||
527 | int users; /* user count for exclusive use */ | ||
527 | int streaming_users; /* number of actively streaming users */ | 528 | int streaming_users; /* number of actively streaming users */ |
528 | 529 | ||
529 | u32 frequency; /* selected tuner frequency */ | 530 | u32 frequency; /* selected tuner frequency */ |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index c359006074a8..25d3ae2188cb 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -971,6 +971,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | |||
971 | * to the userspace. | 971 | * to the userspace. |
972 | */ | 972 | */ |
973 | req->count = allocated_buffers; | 973 | req->count = allocated_buffers; |
974 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | ||
974 | 975 | ||
975 | return 0; | 976 | return 0; |
976 | } | 977 | } |
@@ -1018,6 +1019,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create | |||
1018 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); | 1019 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); |
1019 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); | 1020 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); |
1020 | q->memory = create->memory; | 1021 | q->memory = create->memory; |
1022 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | ||
1021 | } | 1023 | } |
1022 | 1024 | ||
1023 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); | 1025 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); |
@@ -1130,7 +1132,7 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr); | |||
1130 | */ | 1132 | */ |
1131 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) | 1133 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) |
1132 | { | 1134 | { |
1133 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 1135 | if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) |
1134 | return NULL; | 1136 | return NULL; |
1135 | 1137 | ||
1136 | return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); | 1138 | return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); |
@@ -1165,13 +1167,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
1165 | if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) | 1167 | if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) |
1166 | return; | 1168 | return; |
1167 | 1169 | ||
1168 | if (!q->start_streaming_called) { | 1170 | if (WARN_ON(state != VB2_BUF_STATE_DONE && |
1169 | if (WARN_ON(state != VB2_BUF_STATE_QUEUED)) | 1171 | state != VB2_BUF_STATE_ERROR && |
1170 | state = VB2_BUF_STATE_QUEUED; | 1172 | state != VB2_BUF_STATE_QUEUED)) |
1171 | } else if (WARN_ON(state != VB2_BUF_STATE_DONE && | 1173 | state = VB2_BUF_STATE_ERROR; |
1172 | state != VB2_BUF_STATE_ERROR)) { | ||
1173 | state = VB2_BUF_STATE_ERROR; | ||
1174 | } | ||
1175 | 1174 | ||
1176 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 1175 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
1177 | /* | 1176 | /* |
@@ -1762,6 +1761,12 @@ static int vb2_start_streaming(struct vb2_queue *q) | |||
1762 | q->start_streaming_called = 0; | 1761 | q->start_streaming_called = 0; |
1763 | 1762 | ||
1764 | dprintk(1, "driver refused to start streaming\n"); | 1763 | dprintk(1, "driver refused to start streaming\n"); |
1764 | /* | ||
1765 | * If you see this warning, then the driver isn't cleaning up properly | ||
1766 | * after a failed start_streaming(). See the start_streaming() | ||
1767 | * documentation in videobuf2-core.h for more information how buffers | ||
1768 | * should be returned to vb2 in start_streaming(). | ||
1769 | */ | ||
1765 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { | 1770 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
1766 | unsigned i; | 1771 | unsigned i; |
1767 | 1772 | ||
@@ -1777,6 +1782,12 @@ static int vb2_start_streaming(struct vb2_queue *q) | |||
1777 | /* Must be zero now */ | 1782 | /* Must be zero now */ |
1778 | WARN_ON(atomic_read(&q->owned_by_drv_count)); | 1783 | WARN_ON(atomic_read(&q->owned_by_drv_count)); |
1779 | } | 1784 | } |
1785 | /* | ||
1786 | * If done_list is not empty, then start_streaming() didn't call | ||
1787 | * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or | ||
1788 | * STATE_DONE. | ||
1789 | */ | ||
1790 | WARN_ON(!list_empty(&q->done_list)); | ||
1780 | return ret; | 1791 | return ret; |
1781 | } | 1792 | } |
1782 | 1793 | ||
@@ -1812,6 +1823,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) | |||
1812 | */ | 1823 | */ |
1813 | list_add_tail(&vb->queued_entry, &q->queued_list); | 1824 | list_add_tail(&vb->queued_entry, &q->queued_list); |
1814 | q->queued_count++; | 1825 | q->queued_count++; |
1826 | q->waiting_for_buffers = false; | ||
1815 | vb->state = VB2_BUF_STATE_QUEUED; | 1827 | vb->state = VB2_BUF_STATE_QUEUED; |
1816 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { | 1828 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
1817 | /* | 1829 | /* |
@@ -2123,6 +2135,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q) | |||
2123 | if (q->start_streaming_called) | 2135 | if (q->start_streaming_called) |
2124 | call_void_qop(q, stop_streaming, q); | 2136 | call_void_qop(q, stop_streaming, q); |
2125 | 2137 | ||
2138 | /* | ||
2139 | * If you see this warning, then the driver isn't cleaning up properly | ||
2140 | * in stop_streaming(). See the stop_streaming() documentation in | ||
2141 | * videobuf2-core.h for more information how buffers should be returned | ||
2142 | * to vb2 in stop_streaming(). | ||
2143 | */ | ||
2126 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { | 2144 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
2127 | for (i = 0; i < q->num_buffers; ++i) | 2145 | for (i = 0; i < q->num_buffers; ++i) |
2128 | if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) | 2146 | if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) |
@@ -2272,6 +2290,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) | |||
2272 | * their normal dequeued state. | 2290 | * their normal dequeued state. |
2273 | */ | 2291 | */ |
2274 | __vb2_queue_cancel(q); | 2292 | __vb2_queue_cancel(q); |
2293 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | ||
2275 | 2294 | ||
2276 | dprintk(3, "successful\n"); | 2295 | dprintk(3, "successful\n"); |
2277 | return 0; | 2296 | return 0; |
@@ -2590,10 +2609,17 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) | |||
2590 | } | 2609 | } |
2591 | 2610 | ||
2592 | /* | 2611 | /* |
2593 | * There is nothing to wait for if no buffer has been queued and the | 2612 | * There is nothing to wait for if the queue isn't streaming, or if the |
2594 | * queue isn't streaming, or if the error flag is set. | 2613 | * error flag is set. |
2614 | */ | ||
2615 | if (!vb2_is_streaming(q) || q->error) | ||
2616 | return res | POLLERR; | ||
2617 | /* | ||
2618 | * For compatibility with vb1: if QBUF hasn't been called yet, then | ||
2619 | * return POLLERR as well. This only affects capture queues, output | ||
2620 | * queues will always initialize waiting_for_buffers to false. | ||
2595 | */ | 2621 | */ |
2596 | if ((list_empty(&q->queued_list) && !vb2_is_streaming(q)) || q->error) | 2622 | if (q->waiting_for_buffers) |
2597 | return res | POLLERR; | 2623 | return res | POLLERR; |
2598 | 2624 | ||
2599 | /* | 2625 | /* |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index adefc31bb853..9b163a440f89 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -113,7 +113,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
113 | goto fail_pages_alloc; | 113 | goto fail_pages_alloc; |
114 | 114 | ||
115 | ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, | 115 | ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, |
116 | buf->num_pages, 0, size, gfp_flags); | 116 | buf->num_pages, 0, size, GFP_KERNEL); |
117 | if (ret) | 117 | if (ret) |
118 | goto fail_table_alloc; | 118 | goto fail_table_alloc; |
119 | 119 | ||
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig index a34a11d2fef2..63ca9841db10 100644 --- a/drivers/message/fusion/Kconfig +++ b/drivers/message/fusion/Kconfig | |||
@@ -29,7 +29,7 @@ config FUSION_SPI | |||
29 | config FUSION_FC | 29 | config FUSION_FC |
30 | tristate "Fusion MPT ScsiHost drivers for FC" | 30 | tristate "Fusion MPT ScsiHost drivers for FC" |
31 | depends on PCI && SCSI | 31 | depends on PCI && SCSI |
32 | select SCSI_FC_ATTRS | 32 | depends on SCSI_FC_ATTRS |
33 | ---help--- | 33 | ---help--- |
34 | SCSI HOST support for a Fiber Channel host adapters. | 34 | SCSI HOST support for a Fiber Channel host adapters. |
35 | 35 | ||
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 4fa8fef9147f..65cf7a7e05ea 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/export.h> | 16 | #include <linux/export.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
19 | #include <linux/pm_domain.h> | ||
19 | #include <linux/acpi.h> | 20 | #include <linux/acpi.h> |
20 | 21 | ||
21 | #include <linux/mmc/card.h> | 22 | #include <linux/mmc/card.h> |
@@ -315,7 +316,7 @@ int sdio_add_func(struct sdio_func *func) | |||
315 | ret = device_add(&func->dev); | 316 | ret = device_add(&func->dev); |
316 | if (ret == 0) { | 317 | if (ret == 0) { |
317 | sdio_func_set_present(func); | 318 | sdio_func_set_present(func); |
318 | acpi_dev_pm_attach(&func->dev, false); | 319 | dev_pm_domain_attach(&func->dev, false); |
319 | } | 320 | } |
320 | 321 | ||
321 | return ret; | 322 | return ret; |
@@ -332,7 +333,7 @@ void sdio_remove_func(struct sdio_func *func) | |||
332 | if (!sdio_func_present(func)) | 333 | if (!sdio_func_present(func)) |
333 | return; | 334 | return; |
334 | 335 | ||
335 | acpi_dev_pm_detach(&func->dev, false); | 336 | dev_pm_domain_detach(&func->dev, false); |
336 | device_del(&func->dev); | 337 | device_del(&func->dev); |
337 | put_device(&func->dev); | 338 | put_device(&func->dev); |
338 | } | 339 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f0f5eab0fab1..798ae69fb63c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " | |||
175 | "the same MAC; 0 for none (default), " | 175 | "the same MAC; 0 for none (default), " |
176 | "1 for active, 2 for follow"); | 176 | "1 for active, 2 for follow"); |
177 | module_param(all_slaves_active, int, 0); | 177 | module_param(all_slaves_active, int, 0); |
178 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | 178 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " |
179 | "by setting active flag for all slaves; " | 179 | "by setting active flag for all slaves; " |
180 | "0 for never (default), 1 for always."); | 180 | "0 for never (default), 1 for always."); |
181 | module_param(resend_igmp, int, 0); | 181 | module_param(resend_igmp, int, 0); |
@@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
3659 | else | 3659 | else |
3660 | bond_xmit_slave_id(bond, skb, 0); | 3660 | bond_xmit_slave_id(bond, skb, 0); |
3661 | } else { | 3661 | } else { |
3662 | slave_id = bond_rr_gen_slave_id(bond); | 3662 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); |
3663 | bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); | 3663 | |
3664 | if (likely(slave_cnt)) { | ||
3665 | slave_id = bond_rr_gen_slave_id(bond); | ||
3666 | bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); | ||
3667 | } else { | ||
3668 | dev_kfree_skb_any(skb); | ||
3669 | } | ||
3664 | } | 3670 | } |
3665 | 3671 | ||
3666 | return NETDEV_TX_OK; | 3672 | return NETDEV_TX_OK; |
@@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
3691 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | 3697 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) |
3692 | { | 3698 | { |
3693 | struct bonding *bond = netdev_priv(bond_dev); | 3699 | struct bonding *bond = netdev_priv(bond_dev); |
3700 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); | ||
3694 | 3701 | ||
3695 | bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt); | 3702 | if (likely(slave_cnt)) |
3703 | bond_xmit_slave_id(bond, skb, | ||
3704 | bond_xmit_hash(bond, skb) % slave_cnt); | ||
3705 | else | ||
3706 | dev_kfree_skb_any(skb); | ||
3696 | 3707 | ||
3697 | return NETDEV_TX_OK; | 3708 | return NETDEV_TX_OK; |
3698 | } | 3709 | } |
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f07fa89b5fd5..05e1aa090add 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c | |||
@@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev) | |||
1123 | struct at91_priv *priv = netdev_priv(dev); | 1123 | struct at91_priv *priv = netdev_priv(dev); |
1124 | int err; | 1124 | int err; |
1125 | 1125 | ||
1126 | clk_enable(priv->clk); | 1126 | err = clk_prepare_enable(priv->clk); |
1127 | if (err) | ||
1128 | return err; | ||
1127 | 1129 | ||
1128 | /* check or determine and set bittime */ | 1130 | /* check or determine and set bittime */ |
1129 | err = open_candev(dev); | 1131 | err = open_candev(dev); |
@@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev) | |||
1149 | out_close: | 1151 | out_close: |
1150 | close_candev(dev); | 1152 | close_candev(dev); |
1151 | out: | 1153 | out: |
1152 | clk_disable(priv->clk); | 1154 | clk_disable_unprepare(priv->clk); |
1153 | 1155 | ||
1154 | return err; | 1156 | return err; |
1155 | } | 1157 | } |
@@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev) | |||
1166 | at91_chip_stop(dev, CAN_STATE_STOPPED); | 1168 | at91_chip_stop(dev, CAN_STATE_STOPPED); |
1167 | 1169 | ||
1168 | free_irq(dev->irq, dev); | 1170 | free_irq(dev->irq, dev); |
1169 | clk_disable(priv->clk); | 1171 | clk_disable_unprepare(priv->clk); |
1170 | 1172 | ||
1171 | close_candev(dev); | 1173 | close_candev(dev); |
1172 | 1174 | ||
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 109cb44291f5..fb279d6ae484 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
@@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) | |||
97 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); | 97 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); |
98 | writel(ctrl, priv->raminit_ctrlreg); | 98 | writel(ctrl, priv->raminit_ctrlreg); |
99 | ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); | 99 | ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); |
100 | c_can_hw_raminit_wait_ti(priv, ctrl, mask); | 100 | c_can_hw_raminit_wait_ti(priv, mask, ctrl); |
101 | 101 | ||
102 | if (enable) { | 102 | if (enable) { |
103 | /* Set start bit and wait for the done bit. */ | 103 | /* Set start bit and wait for the done bit. */ |
104 | ctrl |= CAN_RAMINIT_START_MASK(priv->instance); | 104 | ctrl |= CAN_RAMINIT_START_MASK(priv->instance); |
105 | writel(ctrl, priv->raminit_ctrlreg); | 105 | writel(ctrl, priv->raminit_ctrlreg); |
106 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); | 106 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); |
107 | c_can_hw_raminit_wait_ti(priv, ctrl, mask); | 107 | c_can_hw_raminit_wait_ti(priv, mask, ctrl); |
108 | } | 108 | } |
109 | spin_unlock(&raminit_lock); | 109 | spin_unlock(&raminit_lock); |
110 | } | 110 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 944aa5d3af6e..6586309329e6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #define FLEXCAN_MCR_BCC BIT(16) | 62 | #define FLEXCAN_MCR_BCC BIT(16) |
63 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) | 63 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) |
64 | #define FLEXCAN_MCR_AEN BIT(12) | 64 | #define FLEXCAN_MCR_AEN BIT(12) |
65 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) | 65 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) |
66 | #define FLEXCAN_MCR_IDAM_A (0 << 8) | 66 | #define FLEXCAN_MCR_IDAM_A (0 << 8) |
67 | #define FLEXCAN_MCR_IDAM_B (1 << 8) | 67 | #define FLEXCAN_MCR_IDAM_B (1 << 8) |
68 | #define FLEXCAN_MCR_IDAM_C (2 << 8) | 68 | #define FLEXCAN_MCR_IDAM_C (2 << 8) |
@@ -125,7 +125,9 @@ | |||
125 | FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) | 125 | FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) |
126 | 126 | ||
127 | /* FLEXCAN interrupt flag register (IFLAG) bits */ | 127 | /* FLEXCAN interrupt flag register (IFLAG) bits */ |
128 | #define FLEXCAN_TX_BUF_ID 8 | 128 | /* Errata ERR005829 step7: Reserve first valid MB */ |
129 | #define FLEXCAN_TX_BUF_RESERVED 8 | ||
130 | #define FLEXCAN_TX_BUF_ID 9 | ||
129 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) | 131 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) |
130 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) | 132 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) |
131 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) | 133 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) |
@@ -136,6 +138,17 @@ | |||
136 | 138 | ||
137 | /* FLEXCAN message buffers */ | 139 | /* FLEXCAN message buffers */ |
138 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) | 140 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) |
141 | #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) | ||
142 | #define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) | ||
143 | #define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) | ||
144 | #define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24) | ||
145 | #define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) | ||
146 | |||
147 | #define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) | ||
148 | #define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) | ||
149 | #define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) | ||
150 | #define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) | ||
151 | |||
139 | #define FLEXCAN_MB_CNT_SRR BIT(22) | 152 | #define FLEXCAN_MB_CNT_SRR BIT(22) |
140 | #define FLEXCAN_MB_CNT_IDE BIT(21) | 153 | #define FLEXCAN_MB_CNT_IDE BIT(21) |
141 | #define FLEXCAN_MB_CNT_RTR BIT(20) | 154 | #define FLEXCAN_MB_CNT_RTR BIT(20) |
@@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv) | |||
298 | flexcan_write(reg, ®s->mcr); | 311 | flexcan_write(reg, ®s->mcr); |
299 | 312 | ||
300 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 313 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
301 | usleep_range(10, 20); | 314 | udelay(10); |
302 | 315 | ||
303 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) | 316 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) |
304 | return -ETIMEDOUT; | 317 | return -ETIMEDOUT; |
@@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) | |||
317 | flexcan_write(reg, ®s->mcr); | 330 | flexcan_write(reg, ®s->mcr); |
318 | 331 | ||
319 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 332 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
320 | usleep_range(10, 20); | 333 | udelay(10); |
321 | 334 | ||
322 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 335 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
323 | return -ETIMEDOUT; | 336 | return -ETIMEDOUT; |
@@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) | |||
336 | flexcan_write(reg, ®s->mcr); | 349 | flexcan_write(reg, ®s->mcr); |
337 | 350 | ||
338 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 351 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
339 | usleep_range(100, 200); | 352 | udelay(100); |
340 | 353 | ||
341 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 354 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
342 | return -ETIMEDOUT; | 355 | return -ETIMEDOUT; |
@@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv) | |||
355 | flexcan_write(reg, ®s->mcr); | 368 | flexcan_write(reg, ®s->mcr); |
356 | 369 | ||
357 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 370 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
358 | usleep_range(10, 20); | 371 | udelay(10); |
359 | 372 | ||
360 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) | 373 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) |
361 | return -ETIMEDOUT; | 374 | return -ETIMEDOUT; |
@@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) | |||
370 | 383 | ||
371 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | 384 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); |
372 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) | 385 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) |
373 | usleep_range(10, 20); | 386 | udelay(10); |
374 | 387 | ||
375 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) | 388 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) |
376 | return -ETIMEDOUT; | 389 | return -ETIMEDOUT; |
@@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
428 | flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); | 441 | flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); |
429 | flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | 442 | flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); |
430 | 443 | ||
444 | /* Errata ERR005829 step8: | ||
445 | * Write twice INACTIVE(0x8) code to first MB. | ||
446 | */ | ||
447 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
448 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
449 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
450 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
451 | |||
431 | return NETDEV_TX_OK; | 452 | return NETDEV_TX_OK; |
432 | } | 453 | } |
433 | 454 | ||
@@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
744 | stats->tx_bytes += can_get_echo_skb(dev, 0); | 765 | stats->tx_bytes += can_get_echo_skb(dev, 0); |
745 | stats->tx_packets++; | 766 | stats->tx_packets++; |
746 | can_led_event(dev, CAN_LED_EVENT_TX); | 767 | can_led_event(dev, CAN_LED_EVENT_TX); |
768 | /* after sending a RTR frame mailbox is in RX mode */ | ||
769 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
770 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | ||
747 | flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); | 771 | flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); |
748 | netif_wake_queue(dev); | 772 | netif_wake_queue(dev); |
749 | } | 773 | } |
@@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev) | |||
801 | struct flexcan_regs __iomem *regs = priv->base; | 825 | struct flexcan_regs __iomem *regs = priv->base; |
802 | int err; | 826 | int err; |
803 | u32 reg_mcr, reg_ctrl; | 827 | u32 reg_mcr, reg_ctrl; |
828 | int i; | ||
804 | 829 | ||
805 | /* enable module */ | 830 | /* enable module */ |
806 | err = flexcan_chip_enable(priv); | 831 | err = flexcan_chip_enable(priv); |
@@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev) | |||
867 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); | 892 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); |
868 | flexcan_write(reg_ctrl, ®s->ctrl); | 893 | flexcan_write(reg_ctrl, ®s->ctrl); |
869 | 894 | ||
870 | /* Abort any pending TX, mark Mailbox as INACTIVE */ | 895 | /* clear and invalidate all mailboxes first */ |
871 | flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), | 896 | for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) { |
897 | flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, | ||
898 | ®s->cantxfg[i].can_ctrl); | ||
899 | } | ||
900 | |||
901 | /* Errata ERR005829: mark first TX mailbox as INACTIVE */ | ||
902 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
903 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
904 | |||
905 | /* mark TX mailbox as INACTIVE */ | ||
906 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
872 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | 907 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); |
873 | 908 | ||
874 | /* acceptance mask/acceptance code (accept everything) */ | 909 | /* acceptance mask/acceptance code (accept everything) */ |
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 7a85590fefb9..e5fac368068a 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c | |||
@@ -70,6 +70,8 @@ struct peak_pci_chan { | |||
70 | #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ | 70 | #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ |
71 | #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ | 71 | #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ |
72 | #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ | 72 | #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ |
73 | #define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */ | ||
74 | #define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */ | ||
73 | 75 | ||
74 | #define PEAK_PCI_CHAN_MAX 4 | 76 | #define PEAK_PCI_CHAN_MAX 4 |
75 | 77 | ||
@@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = { | |||
87 | {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | 89 | {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
88 | #ifdef CONFIG_CAN_PEAK_PCIEC | 90 | #ifdef CONFIG_CAN_PEAK_PCIEC |
89 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | 91 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
92 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | ||
90 | #endif | 93 | #endif |
91 | {0,} | 94 | {0,} |
92 | }; | 95 | }; |
@@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
653 | * This must be done *before* register_sja1000dev() but | 656 | * This must be done *before* register_sja1000dev() but |
654 | * *after* devices linkage | 657 | * *after* devices linkage |
655 | */ | 658 | */ |
656 | if (pdev->device == PEAK_PCIEC_DEVICE_ID) { | 659 | if (pdev->device == PEAK_PCIEC_DEVICE_ID || |
660 | pdev->device == PEAK_PCIEC34_DEVICE_ID) { | ||
657 | err = peak_pciec_probe(pdev, dev); | 661 | err = peak_pciec_probe(pdev, dev); |
658 | if (err) { | 662 | if (err) { |
659 | dev_err(&pdev->dev, | 663 | dev_err(&pdev->dev, |
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 3fe45c705933..8ca49f04acec 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2129 | int entry = vp->cur_tx % TX_RING_SIZE; | 2129 | int entry = vp->cur_tx % TX_RING_SIZE; |
2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | 2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; |
2131 | unsigned long flags; | 2131 | unsigned long flags; |
2132 | dma_addr_t dma_addr; | ||
2132 | 2133 | ||
2133 | if (vortex_debug > 6) { | 2134 | if (vortex_debug > 6) { |
2134 | pr_debug("boomerang_start_xmit()\n"); | 2135 | pr_debug("boomerang_start_xmit()\n"); |
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2163 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | 2164 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); |
2164 | 2165 | ||
2165 | if (!skb_shinfo(skb)->nr_frags) { | 2166 | if (!skb_shinfo(skb)->nr_frags) { |
2166 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2167 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, |
2167 | skb->len, PCI_DMA_TODEVICE)); | 2168 | PCI_DMA_TODEVICE); |
2169 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2170 | goto out_dma_err; | ||
2171 | |||
2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2168 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | 2173 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); |
2169 | } else { | 2174 | } else { |
2170 | int i; | 2175 | int i; |
2171 | 2176 | ||
2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2177 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, |
2173 | skb_headlen(skb), PCI_DMA_TODEVICE)); | 2178 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2179 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2180 | goto out_dma_err; | ||
2181 | |||
2182 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2174 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); | 2183 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); |
2175 | 2184 | ||
2176 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2185 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2177 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2186 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2178 | 2187 | ||
2188 | dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, | ||
2189 | 0, | ||
2190 | frag->size, | ||
2191 | DMA_TO_DEVICE); | ||
2192 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { | ||
2193 | for(i = i-1; i >= 0; i--) | ||
2194 | dma_unmap_page(&VORTEX_PCI(vp)->dev, | ||
2195 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), | ||
2196 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), | ||
2197 | DMA_TO_DEVICE); | ||
2198 | |||
2199 | pci_unmap_single(VORTEX_PCI(vp), | ||
2200 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | ||
2201 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), | ||
2202 | PCI_DMA_TODEVICE); | ||
2203 | |||
2204 | goto out_dma_err; | ||
2205 | } | ||
2206 | |||
2179 | vp->tx_ring[entry].frag[i+1].addr = | 2207 | vp->tx_ring[entry].frag[i+1].addr = |
2180 | cpu_to_le32(skb_frag_dma_map( | 2208 | cpu_to_le32(dma_addr); |
2181 | &VORTEX_PCI(vp)->dev, | ||
2182 | frag, | ||
2183 | frag->page_offset, frag->size, DMA_TO_DEVICE)); | ||
2184 | 2209 | ||
2185 | if (i == skb_shinfo(skb)->nr_frags-1) | 2210 | if (i == skb_shinfo(skb)->nr_frags-1) |
2186 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); | 2211 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); |
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2189 | } | 2214 | } |
2190 | } | 2215 | } |
2191 | #else | 2216 | #else |
2192 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | 2217 | dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); |
2218 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2219 | goto out_dma_err; | ||
2220 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | ||
2193 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | 2221 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); |
2194 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | 2222 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); |
2195 | #endif | 2223 | #endif |
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2217 | skb_tx_timestamp(skb); | 2245 | skb_tx_timestamp(skb); |
2218 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | 2246 | iowrite16(DownUnstall, ioaddr + EL3_CMD); |
2219 | spin_unlock_irqrestore(&vp->lock, flags); | 2247 | spin_unlock_irqrestore(&vp->lock, flags); |
2248 | out: | ||
2220 | return NETDEV_TX_OK; | 2249 | return NETDEV_TX_OK; |
2250 | out_dma_err: | ||
2251 | dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); | ||
2252 | goto out; | ||
2221 | } | 2253 | } |
2222 | 2254 | ||
2223 | /* The interrupt handler does all of the Rx thread work and cleans up | 2255 | /* The interrupt handler does all of the Rx thread work and cleans up |
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index fe5cfeace6e3..5919394d9f58 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -30,6 +30,17 @@ | |||
30 | #define DRV_VERSION "1.0" | 30 | #define DRV_VERSION "1.0" |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * arc_emac_tx_avail - Return the number of available slots in the tx ring. | ||
34 | * @priv: Pointer to ARC EMAC private data structure. | ||
35 | * | ||
36 | * returns: the number of slots available for transmission in tx the ring. | ||
37 | */ | ||
38 | static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) | ||
39 | { | ||
40 | return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; | ||
41 | } | ||
42 | |||
43 | /** | ||
33 | * arc_emac_adjust_link - Adjust the PHY link duplex. | 44 | * arc_emac_adjust_link - Adjust the PHY link duplex. |
34 | * @ndev: Pointer to the net_device structure. | 45 | * @ndev: Pointer to the net_device structure. |
35 | * | 46 | * |
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) | |||
180 | txbd->info = 0; | 191 | txbd->info = 0; |
181 | 192 | ||
182 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; | 193 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
183 | |||
184 | if (netif_queue_stopped(ndev)) | ||
185 | netif_wake_queue(ndev); | ||
186 | } | 194 | } |
195 | |||
196 | /* Ensure that txbd_dirty is visible to tx() before checking | ||
197 | * for queue stopped. | ||
198 | */ | ||
199 | smp_mb(); | ||
200 | |||
201 | if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) | ||
202 | netif_wake_queue(ndev); | ||
187 | } | 203 | } |
188 | 204 | ||
189 | /** | 205 | /** |
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) | |||
298 | work_done = arc_emac_rx(ndev, budget); | 314 | work_done = arc_emac_rx(ndev, budget); |
299 | if (work_done < budget) { | 315 | if (work_done < budget) { |
300 | napi_complete(napi); | 316 | napi_complete(napi); |
301 | arc_reg_or(priv, R_ENABLE, RXINT_MASK); | 317 | arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
302 | } | 318 | } |
303 | 319 | ||
304 | return work_done; | 320 | return work_done; |
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) | |||
327 | /* Reset all flags except "MDIO complete" */ | 343 | /* Reset all flags except "MDIO complete" */ |
328 | arc_reg_set(priv, R_STATUS, status); | 344 | arc_reg_set(priv, R_STATUS, status); |
329 | 345 | ||
330 | if (status & RXINT_MASK) { | 346 | if (status & (RXINT_MASK | TXINT_MASK)) { |
331 | if (likely(napi_schedule_prep(&priv->napi))) { | 347 | if (likely(napi_schedule_prep(&priv->napi))) { |
332 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK); | 348 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
333 | __napi_schedule(&priv->napi); | 349 | __napi_schedule(&priv->napi); |
334 | } | 350 | } |
335 | } | 351 | } |
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev) | |||
440 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); | 456 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); |
441 | 457 | ||
442 | /* Enable interrupts */ | 458 | /* Enable interrupts */ |
443 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 459 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
444 | 460 | ||
445 | /* Set CONTROL */ | 461 | /* Set CONTROL */ |
446 | arc_reg_set(priv, R_CTRL, | 462 | arc_reg_set(priv, R_CTRL, |
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev) | |||
511 | netif_stop_queue(ndev); | 527 | netif_stop_queue(ndev); |
512 | 528 | ||
513 | /* Disable interrupts */ | 529 | /* Disable interrupts */ |
514 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 530 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
515 | 531 | ||
516 | /* Disable EMAC */ | 532 | /* Disable EMAC */ |
517 | arc_reg_clr(priv, R_CTRL, EN_MASK); | 533 | arc_reg_clr(priv, R_CTRL, EN_MASK); |
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
574 | 590 | ||
575 | len = max_t(unsigned int, ETH_ZLEN, skb->len); | 591 | len = max_t(unsigned int, ETH_ZLEN, skb->len); |
576 | 592 | ||
577 | /* EMAC still holds this buffer in its possession. | 593 | if (unlikely(!arc_emac_tx_avail(priv))) { |
578 | * CPU must not modify this buffer descriptor | ||
579 | */ | ||
580 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { | ||
581 | netif_stop_queue(ndev); | 594 | netif_stop_queue(ndev); |
595 | netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); | ||
582 | return NETDEV_TX_BUSY; | 596 | return NETDEV_TX_BUSY; |
583 | } | 597 | } |
584 | 598 | ||
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
607 | /* Increment index to point to the next BD */ | 621 | /* Increment index to point to the next BD */ |
608 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; | 622 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; |
609 | 623 | ||
610 | /* Get "info" of the next BD */ | 624 | /* Ensure that tx_clean() sees the new txbd_curr before |
611 | info = &priv->txbd[*txbd_curr].info; | 625 | * checking the queue status. This prevents an unneeded wake |
626 | * of the queue in tx_clean(). | ||
627 | */ | ||
628 | smp_mb(); | ||
612 | 629 | ||
613 | /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ | 630 | if (!arc_emac_tx_avail(priv)) { |
614 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) | ||
615 | netif_stop_queue(ndev); | 631 | netif_stop_queue(ndev); |
632 | /* Refresh tx_dirty */ | ||
633 | smp_mb(); | ||
634 | if (arc_emac_tx_avail(priv)) | ||
635 | netif_start_queue(ndev); | ||
636 | } | ||
616 | 637 | ||
617 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 638 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
618 | 639 | ||
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 4a7028d65912..d588136b23b9 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, | |||
1697 | hwstat->tx_underruns + | 1697 | hwstat->tx_underruns + |
1698 | hwstat->tx_excessive_cols + | 1698 | hwstat->tx_excessive_cols + |
1699 | hwstat->tx_late_cols); | 1699 | hwstat->tx_late_cols); |
1700 | nstat->multicast = hwstat->tx_multicast_pkts; | 1700 | nstat->multicast = hwstat->rx_multicast_pkts; |
1701 | nstat->collisions = hwstat->tx_total_cols; | 1701 | nstat->collisions = hwstat->tx_total_cols; |
1702 | 1702 | ||
1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6f4e18644bd4..d9b9170ed2fc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
534 | while ((processed < to_process) && (processed < budget)) { | 534 | while ((processed < to_process) && (processed < budget)) { |
535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
536 | skb = cb->skb; | 536 | skb = cb->skb; |
537 | |||
538 | processed++; | ||
539 | priv->rx_read_ptr++; | ||
540 | |||
541 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
542 | priv->rx_read_ptr = 0; | ||
543 | |||
544 | /* We do not have a backing SKB, so we do not a corresponding | ||
545 | * DMA mapping for this incoming packet since | ||
546 | * bcm_sysport_rx_refill always either has both skb and mapping | ||
547 | * or none. | ||
548 | */ | ||
549 | if (unlikely(!skb)) { | ||
550 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
551 | ndev->stats.rx_dropped++; | ||
552 | ndev->stats.rx_errors++; | ||
553 | goto refill; | ||
554 | } | ||
555 | |||
537 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | 556 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), |
538 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 557 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
539 | 558 | ||
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
543 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & | 562 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & |
544 | DESC_STATUS_MASK; | 563 | DESC_STATUS_MASK; |
545 | 564 | ||
546 | processed++; | ||
547 | priv->rx_read_ptr++; | ||
548 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
549 | priv->rx_read_ptr = 0; | ||
550 | |||
551 | netif_dbg(priv, rx_status, ndev, | 565 | netif_dbg(priv, rx_status, ndev, |
552 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", | 566 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", |
553 | p_index, priv->rx_c_index, priv->rx_read_ptr, | 567 | p_index, priv->rx_c_index, priv->rx_read_ptr, |
554 | len, status); | 568 | len, status); |
555 | 569 | ||
556 | if (unlikely(!skb)) { | ||
557 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
558 | ndev->stats.rx_dropped++; | ||
559 | ndev->stats.rx_errors++; | ||
560 | goto refill; | ||
561 | } | ||
562 | |||
563 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { | 570 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { |
564 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); | 571 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); |
565 | ndev->stats.rx_dropped++; | 572 | ndev->stats.rx_dropped++; |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 2fee73b878c2..823d01c5684c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
3236 | 3236 | ||
3237 | skb->protocol = eth_type_trans(skb, bp->dev); | 3237 | skb->protocol = eth_type_trans(skb, bp->dev); |
3238 | 3238 | ||
3239 | if ((len > (bp->dev->mtu + ETH_HLEN)) && | 3239 | if (len > (bp->dev->mtu + ETH_HLEN) && |
3240 | (ntohs(skb->protocol) != 0x8100)) { | 3240 | skb->protocol != htons(0x8100) && |
3241 | skb->protocol != htons(ETH_P_8021AD)) { | ||
3241 | 3242 | ||
3242 | dev_kfree_skb(skb); | 3243 | dev_kfree_skb(skb); |
3243 | goto next_rx; | 3244 | goto next_rx; |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3f9d4de8173c..5cc9cae21ed5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
875 | int last_tx_cn, last_c_index, num_tx_bds; | 875 | int last_tx_cn, last_c_index, num_tx_bds; |
876 | struct enet_cb *tx_cb_ptr; | 876 | struct enet_cb *tx_cb_ptr; |
877 | struct netdev_queue *txq; | 877 | struct netdev_queue *txq; |
878 | unsigned int bds_compl; | ||
878 | unsigned int c_index; | 879 | unsigned int c_index; |
879 | 880 | ||
880 | /* Compute how many buffers are transmitted since last xmit call */ | 881 | /* Compute how many buffers are transmitted since last xmit call */ |
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
899 | /* Reclaim transmitted buffers */ | 900 | /* Reclaim transmitted buffers */ |
900 | while (last_tx_cn-- > 0) { | 901 | while (last_tx_cn-- > 0) { |
901 | tx_cb_ptr = ring->cbs + last_c_index; | 902 | tx_cb_ptr = ring->cbs + last_c_index; |
903 | bds_compl = 0; | ||
902 | if (tx_cb_ptr->skb) { | 904 | if (tx_cb_ptr->skb) { |
905 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | ||
903 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 906 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
904 | dma_unmap_single(&dev->dev, | 907 | dma_unmap_single(&dev->dev, |
905 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 908 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
916 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); | 919 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
917 | } | 920 | } |
918 | dev->stats.tx_packets++; | 921 | dev->stats.tx_packets++; |
919 | ring->free_bds += 1; | 922 | ring->free_bds += bds_compl; |
920 | 923 | ||
921 | last_c_index++; | 924 | last_c_index++; |
922 | last_c_index &= (num_tx_bds - 1); | 925 | last_c_index &= (num_tx_bds - 1); |
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1274 | 1277 | ||
1275 | while ((rxpktprocessed < rxpkttoprocess) && | 1278 | while ((rxpktprocessed < rxpkttoprocess) && |
1276 | (rxpktprocessed < budget)) { | 1279 | (rxpktprocessed < budget)) { |
1280 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1281 | skb = cb->skb; | ||
1282 | |||
1283 | rxpktprocessed++; | ||
1284 | |||
1285 | priv->rx_read_ptr++; | ||
1286 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1287 | |||
1288 | /* We do not have a backing SKB, so we do not have a | ||
1289 | * corresponding DMA mapping for this incoming packet since | ||
1290 | * bcmgenet_rx_refill always either has both skb and mapping or | ||
1291 | * none. | ||
1292 | */ | ||
1293 | if (unlikely(!skb)) { | ||
1294 | dev->stats.rx_dropped++; | ||
1295 | dev->stats.rx_errors++; | ||
1296 | goto refill; | ||
1297 | } | ||
1298 | |||
1277 | /* Unmap the packet contents such that we can use the | 1299 | /* Unmap the packet contents such that we can use the |
1278 | * RSV from the 64 bytes descriptor when enabled and save | 1300 | * RSV from the 64 bytes descriptor when enabled and save |
1279 | * a 32-bits register read | 1301 | * a 32-bits register read |
1280 | */ | 1302 | */ |
1281 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1282 | skb = cb->skb; | ||
1283 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | 1303 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), |
1284 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1304 | priv->rx_buf_len, DMA_FROM_DEVICE); |
1285 | 1305 | ||
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1307 | __func__, p_index, priv->rx_c_index, | 1327 | __func__, p_index, priv->rx_c_index, |
1308 | priv->rx_read_ptr, dma_length_status); | 1328 | priv->rx_read_ptr, dma_length_status); |
1309 | 1329 | ||
1310 | rxpktprocessed++; | ||
1311 | |||
1312 | priv->rx_read_ptr++; | ||
1313 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1314 | |||
1315 | /* out of memory, just drop packets at the hardware level */ | ||
1316 | if (unlikely(!skb)) { | ||
1317 | dev->stats.rx_dropped++; | ||
1318 | dev->stats.rx_errors++; | ||
1319 | goto refill; | ||
1320 | } | ||
1321 | |||
1322 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | 1330 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1323 | netif_err(priv, rx_status, dev, | 1331 | netif_err(priv, rx_status, dev, |
1324 | "dropping fragmented packet!\n"); | 1332 | "dropping fragmented packet!\n"); |
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev) | |||
1736 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | 1744 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); |
1737 | } | 1745 | } |
1738 | 1746 | ||
1747 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
1748 | { | ||
1749 | int ret = 0; | ||
1750 | int timeout = 0; | ||
1751 | u32 reg; | ||
1752 | |||
1753 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
1754 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1755 | reg &= ~DMA_EN; | ||
1756 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
1757 | |||
1758 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
1759 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
1760 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
1761 | if (reg & DMA_DISABLED) | ||
1762 | break; | ||
1763 | |||
1764 | udelay(1); | ||
1765 | } | ||
1766 | |||
1767 | if (timeout == DMA_TIMEOUT_VAL) { | ||
1768 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
1769 | ret = -ETIMEDOUT; | ||
1770 | } | ||
1771 | |||
1772 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
1773 | usleep_range(10000, 20000); | ||
1774 | |||
1775 | /* Disable RDMA */ | ||
1776 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
1777 | reg &= ~DMA_EN; | ||
1778 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
1779 | |||
1780 | timeout = 0; | ||
1781 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
1782 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
1783 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
1784 | if (reg & DMA_DISABLED) | ||
1785 | break; | ||
1786 | |||
1787 | udelay(1); | ||
1788 | } | ||
1789 | |||
1790 | if (timeout == DMA_TIMEOUT_VAL) { | ||
1791 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
1792 | ret = -ETIMEDOUT; | ||
1793 | } | ||
1794 | |||
1795 | return ret; | ||
1796 | } | ||
1797 | |||
1739 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1798 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1740 | { | 1799 | { |
1741 | int i; | 1800 | int i; |
1742 | 1801 | ||
1743 | /* disable DMA */ | 1802 | /* disable DMA */ |
1744 | bcmgenet_rdma_writel(priv, 0, DMA_CTRL); | 1803 | bcmgenet_dma_teardown(priv); |
1745 | bcmgenet_tdma_writel(priv, 0, DMA_CTRL); | ||
1746 | 1804 | ||
1747 | for (i = 0; i < priv->num_tx_bds; i++) { | 1805 | for (i = 0; i < priv->num_tx_bds; i++) { |
1748 | if (priv->tx_cbs[i].skb != NULL) { | 1806 | if (priv->tx_cbs[i].skb != NULL) { |
@@ -2101,57 +2159,6 @@ err_clk_disable: | |||
2101 | return ret; | 2159 | return ret; |
2102 | } | 2160 | } |
2103 | 2161 | ||
2104 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
2105 | { | ||
2106 | int ret = 0; | ||
2107 | int timeout = 0; | ||
2108 | u32 reg; | ||
2109 | |||
2110 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
2111 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
2112 | reg &= ~DMA_EN; | ||
2113 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
2114 | |||
2115 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
2116 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2117 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
2118 | if (reg & DMA_DISABLED) | ||
2119 | break; | ||
2120 | |||
2121 | udelay(1); | ||
2122 | } | ||
2123 | |||
2124 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2125 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
2126 | ret = -ETIMEDOUT; | ||
2127 | } | ||
2128 | |||
2129 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
2130 | usleep_range(10000, 20000); | ||
2131 | |||
2132 | /* Disable RDMA */ | ||
2133 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
2134 | reg &= ~DMA_EN; | ||
2135 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
2136 | |||
2137 | timeout = 0; | ||
2138 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
2139 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2140 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
2141 | if (reg & DMA_DISABLED) | ||
2142 | break; | ||
2143 | |||
2144 | udelay(1); | ||
2145 | } | ||
2146 | |||
2147 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2148 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
2149 | ret = -ETIMEDOUT; | ||
2150 | } | ||
2151 | |||
2152 | return ret; | ||
2153 | } | ||
2154 | |||
2155 | static void bcmgenet_netif_stop(struct net_device *dev) | 2162 | static void bcmgenet_netif_stop(struct net_device *dev) |
2156 | { | 2163 | { |
2157 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2164 | struct bcmgenet_priv *priv = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index cb77ae93d89a..ba499489969a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
6918 | skb->protocol = eth_type_trans(skb, tp->dev); | 6918 | skb->protocol = eth_type_trans(skb, tp->dev); |
6919 | 6919 | ||
6920 | if (len > (tp->dev->mtu + ETH_HLEN) && | 6920 | if (len > (tp->dev->mtu + ETH_HLEN) && |
6921 | skb->protocol != htons(ETH_P_8021Q)) { | 6921 | skb->protocol != htons(ETH_P_8021Q) && |
6922 | skb->protocol != htons(ETH_P_8021AD)) { | ||
6922 | dev_kfree_skb_any(skb); | 6923 | dev_kfree_skb_any(skb); |
6923 | goto drop_it_no_recycle; | 6924 | goto drop_it_no_recycle; |
6924 | } | 6925 | } |
@@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7914 | 7915 | ||
7915 | entry = tnapi->tx_prod; | 7916 | entry = tnapi->tx_prod; |
7916 | base_flags = 0; | 7917 | base_flags = 0; |
7917 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
7918 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
7919 | 7918 | ||
7920 | mss = skb_shinfo(skb)->gso_size; | 7919 | mss = skb_shinfo(skb)->gso_size; |
7921 | if (mss) { | 7920 | if (mss) { |
@@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7929 | 7928 | ||
7930 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; | 7929 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; |
7931 | 7930 | ||
7931 | /* HW/FW can not correctly segment packets that have been | ||
7932 | * vlan encapsulated. | ||
7933 | */ | ||
7934 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
7935 | skb->protocol == htons(ETH_P_8021AD)) | ||
7936 | return tg3_tso_bug(tp, tnapi, txq, skb); | ||
7937 | |||
7932 | if (!skb_is_gso_v6(skb)) { | 7938 | if (!skb_is_gso_v6(skb)) { |
7933 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 7939 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
7934 | tg3_flag(tp, TSO_BUG)) | 7940 | tg3_flag(tp, TSO_BUG)) |
@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7979 | base_flags |= tsflags << 12; | 7985 | base_flags |= tsflags << 12; |
7980 | } | 7986 | } |
7981 | } | 7987 | } |
7988 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
7989 | /* HW/FW can not correctly checksum packets that have been | ||
7990 | * vlan encapsulated. | ||
7991 | */ | ||
7992 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
7993 | skb->protocol == htons(ETH_P_8021AD)) { | ||
7994 | if (skb_checksum_help(skb)) | ||
7995 | goto drop; | ||
7996 | } else { | ||
7997 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
7998 | } | ||
7982 | } | 7999 | } |
7983 | 8000 | ||
7984 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && | 8001 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ca5d7798b265..e1e02fba4fcc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
31 | #include <linux/of_mdio.h> | 31 | #include <linux/of_mdio.h> |
32 | #include <linux/of_net.h> | 32 | #include <linux/of_net.h> |
33 | #include <linux/pinctrl/consumer.h> | ||
34 | 33 | ||
35 | #include "macb.h" | 34 | #include "macb.h" |
36 | 35 | ||
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2071 | struct phy_device *phydev; | 2070 | struct phy_device *phydev; |
2072 | u32 config; | 2071 | u32 config; |
2073 | int err = -ENXIO; | 2072 | int err = -ENXIO; |
2074 | struct pinctrl *pinctrl; | ||
2075 | const char *mac; | 2073 | const char *mac; |
2076 | 2074 | ||
2077 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2075 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2080 | goto err_out; | 2078 | goto err_out; |
2081 | } | 2079 | } |
2082 | 2080 | ||
2083 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
2084 | if (IS_ERR(pinctrl)) { | ||
2085 | err = PTR_ERR(pinctrl); | ||
2086 | if (err == -EPROBE_DEFER) | ||
2087 | goto err_out; | ||
2088 | |||
2089 | dev_warn(&pdev->dev, "No pinctrl provided\n"); | ||
2090 | } | ||
2091 | |||
2092 | err = -ENOMEM; | 2081 | err = -ENOMEM; |
2093 | dev = alloc_etherdev(sizeof(*bp)); | 2082 | dev = alloc_etherdev(sizeof(*bp)); |
2094 | if (!dev) | 2083 | if (!dev) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8c34811a1128..e5be511a3c38 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6478 | struct port_info *pi; | 6478 | struct port_info *pi; |
6479 | bool highdma = false; | 6479 | bool highdma = false; |
6480 | struct adapter *adapter = NULL; | 6480 | struct adapter *adapter = NULL; |
6481 | void __iomem *regs; | ||
6481 | 6482 | ||
6482 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | 6483 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); |
6483 | 6484 | ||
@@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6494 | goto out_release_regions; | 6495 | goto out_release_regions; |
6495 | } | 6496 | } |
6496 | 6497 | ||
6498 | regs = pci_ioremap_bar(pdev, 0); | ||
6499 | if (!regs) { | ||
6500 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
6501 | err = -ENOMEM; | ||
6502 | goto out_disable_device; | ||
6503 | } | ||
6504 | |||
6505 | /* We control everything through one PF */ | ||
6506 | func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); | ||
6507 | if (func != ent->driver_data) { | ||
6508 | iounmap(regs); | ||
6509 | pci_disable_device(pdev); | ||
6510 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
6511 | goto sriov; | ||
6512 | } | ||
6513 | |||
6497 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 6514 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
6498 | highdma = true; | 6515 | highdma = true; |
6499 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 6516 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
6500 | if (err) { | 6517 | if (err) { |
6501 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | 6518 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " |
6502 | "coherent allocations\n"); | 6519 | "coherent allocations\n"); |
6503 | goto out_disable_device; | 6520 | goto out_unmap_bar0; |
6504 | } | 6521 | } |
6505 | } else { | 6522 | } else { |
6506 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 6523 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
6507 | if (err) { | 6524 | if (err) { |
6508 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | 6525 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
6509 | goto out_disable_device; | 6526 | goto out_unmap_bar0; |
6510 | } | 6527 | } |
6511 | } | 6528 | } |
6512 | 6529 | ||
@@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6518 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | 6535 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
6519 | if (!adapter) { | 6536 | if (!adapter) { |
6520 | err = -ENOMEM; | 6537 | err = -ENOMEM; |
6521 | goto out_disable_device; | 6538 | goto out_unmap_bar0; |
6522 | } | 6539 | } |
6523 | 6540 | ||
6524 | adapter->workq = create_singlethread_workqueue("cxgb4"); | 6541 | adapter->workq = create_singlethread_workqueue("cxgb4"); |
@@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6530 | /* PCI device has been enabled */ | 6547 | /* PCI device has been enabled */ |
6531 | adapter->flags |= DEV_ENABLED; | 6548 | adapter->flags |= DEV_ENABLED; |
6532 | 6549 | ||
6533 | adapter->regs = pci_ioremap_bar(pdev, 0); | 6550 | adapter->regs = regs; |
6534 | if (!adapter->regs) { | ||
6535 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
6536 | err = -ENOMEM; | ||
6537 | goto out_free_adapter; | ||
6538 | } | ||
6539 | |||
6540 | /* We control everything through one PF */ | ||
6541 | func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); | ||
6542 | if (func != ent->driver_data) { | ||
6543 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
6544 | goto sriov; | ||
6545 | } | ||
6546 | |||
6547 | adapter->pdev = pdev; | 6551 | adapter->pdev = pdev; |
6548 | adapter->pdev_dev = &pdev->dev; | 6552 | adapter->pdev_dev = &pdev->dev; |
6549 | adapter->mbox = func; | 6553 | adapter->mbox = func; |
@@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6560 | 6564 | ||
6561 | err = t4_prep_adapter(adapter); | 6565 | err = t4_prep_adapter(adapter); |
6562 | if (err) | 6566 | if (err) |
6563 | goto out_unmap_bar0; | 6567 | goto out_free_adapter; |
6568 | |||
6564 | 6569 | ||
6565 | if (!is_t4(adapter->params.chip)) { | 6570 | if (!is_t4(adapter->params.chip)) { |
6566 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; | 6571 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; |
@@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6577 | dev_err(&pdev->dev, | 6582 | dev_err(&pdev->dev, |
6578 | "Incorrect number of egress queues per page\n"); | 6583 | "Incorrect number of egress queues per page\n"); |
6579 | err = -EINVAL; | 6584 | err = -EINVAL; |
6580 | goto out_unmap_bar0; | 6585 | goto out_free_adapter; |
6581 | } | 6586 | } |
6582 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), | 6587 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), |
6583 | pci_resource_len(pdev, 2)); | 6588 | pci_resource_len(pdev, 2)); |
6584 | if (!adapter->bar2) { | 6589 | if (!adapter->bar2) { |
6585 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); | 6590 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); |
6586 | err = -ENOMEM; | 6591 | err = -ENOMEM; |
6587 | goto out_unmap_bar0; | 6592 | goto out_free_adapter; |
6588 | } | 6593 | } |
6589 | } | 6594 | } |
6590 | 6595 | ||
@@ -6722,13 +6727,13 @@ sriov: | |||
6722 | out_unmap_bar: | 6727 | out_unmap_bar: |
6723 | if (!is_t4(adapter->params.chip)) | 6728 | if (!is_t4(adapter->params.chip)) |
6724 | iounmap(adapter->bar2); | 6729 | iounmap(adapter->bar2); |
6725 | out_unmap_bar0: | ||
6726 | iounmap(adapter->regs); | ||
6727 | out_free_adapter: | 6730 | out_free_adapter: |
6728 | if (adapter->workq) | 6731 | if (adapter->workq) |
6729 | destroy_workqueue(adapter->workq); | 6732 | destroy_workqueue(adapter->workq); |
6730 | 6733 | ||
6731 | kfree(adapter); | 6734 | kfree(adapter); |
6735 | out_unmap_bar0: | ||
6736 | iounmap(regs); | ||
6732 | out_disable_device: | 6737 | out_disable_device: |
6733 | pci_disable_pcie_error_reporting(pdev); | 6738 | pci_disable_pcie_error_reporting(pdev); |
6734 | pci_disable_device(pdev); | 6739 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9b33057a9477..70089c29d307 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) | |||
1399 | const void *mac_addr; | 1399 | const void *mac_addr; |
1400 | 1400 | ||
1401 | if (!IS_ENABLED(CONFIG_OF) || !np) | 1401 | if (!IS_ENABLED(CONFIG_OF) || !np) |
1402 | return NULL; | 1402 | return ERR_PTR(-ENXIO); |
1403 | 1403 | ||
1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
1405 | if (!pdata) | 1405 | if (!pdata) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 65a4a0f88ea0..02a2e90d581a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( | |||
2389 | } | 2389 | } |
2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); | 2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); |
2391 | 2391 | ||
2392 | static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) | ||
2393 | { | ||
2394 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); | ||
2395 | int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) | ||
2396 | + 1; | ||
2397 | int max_port = min_port + | ||
2398 | bitmap_weight(actv_ports.ports, dev->caps.num_ports); | ||
2399 | |||
2400 | if (port < min_port) | ||
2401 | port = min_port; | ||
2402 | else if (port >= max_port) | ||
2403 | port = max_port - 1; | ||
2404 | |||
2405 | return port; | ||
2406 | } | ||
2407 | |||
2392 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | 2408 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) |
2393 | { | 2409 | { |
2394 | struct mlx4_priv *priv = mlx4_priv(dev); | 2410 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | |||
2402 | if (slave < 0) | 2418 | if (slave < 0) |
2403 | return -EINVAL; | 2419 | return -EINVAL; |
2404 | 2420 | ||
2421 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2405 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2422 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2406 | s_info->mac = mac; | 2423 | s_info->mac = mac; |
2407 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", | 2424 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", |
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) | |||
2428 | if (slave < 0) | 2445 | if (slave < 0) |
2429 | return -EINVAL; | 2446 | return -EINVAL; |
2430 | 2447 | ||
2448 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2431 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2449 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2432 | 2450 | ||
2433 | if ((0 == vlan) && (0 == qos)) | 2451 | if ((0 == vlan) && (0 == qos)) |
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, | |||
2455 | struct mlx4_priv *priv; | 2473 | struct mlx4_priv *priv; |
2456 | 2474 | ||
2457 | priv = mlx4_priv(dev); | 2475 | priv = mlx4_priv(dev); |
2476 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2458 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 2477 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
2459 | 2478 | ||
2460 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 2479 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) | |||
2482 | if (slave < 0) | 2501 | if (slave < 0) |
2483 | return -EINVAL; | 2502 | return -EINVAL; |
2484 | 2503 | ||
2504 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2485 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2505 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2486 | s_info->spoofchk = setting; | 2506 | s_info->spoofchk = setting; |
2487 | 2507 | ||
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat | |||
2535 | if (slave < 0) | 2555 | if (slave < 0) |
2536 | return -EINVAL; | 2556 | return -EINVAL; |
2537 | 2557 | ||
2558 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2538 | switch (link_state) { | 2559 | switch (link_state) { |
2539 | case IFLA_VF_LINK_STATE_AUTO: | 2560 | case IFLA_VF_LINK_STATE_AUTO: |
2540 | /* get current link state */ | 2561 | /* get current link state */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784fc..35ff2925110a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, | |||
487 | struct mlx4_en_dev *mdev = priv->mdev; | 487 | struct mlx4_en_dev *mdev = priv->mdev; |
488 | int err; | 488 | int err; |
489 | 489 | ||
490 | if (pause->autoneg) | ||
491 | return -EINVAL; | ||
492 | |||
490 | priv->prof->tx_pause = pause->tx_pause != 0; | 493 | priv->prof->tx_pause = pause->tx_pause != 0; |
491 | priv->prof->rx_pause = pause->rx_pause != 0; | 494 | priv->prof->rx_pause = pause->rx_pause != 0; |
492 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 495 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e2d5d57c598..871e3a5bda38 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); | |||
78 | #endif /* CONFIG_PCI_MSI */ | 78 | #endif /* CONFIG_PCI_MSI */ |
79 | 79 | ||
80 | static uint8_t num_vfs[3] = {0, 0, 0}; | 80 | static uint8_t num_vfs[3] = {0, 0, 0}; |
81 | static int num_vfs_argc = 3; | 81 | static int num_vfs_argc; |
82 | module_param_array(num_vfs, byte , &num_vfs_argc, 0444); | 82 | module_param_array(num_vfs, byte , &num_vfs_argc, 0444); |
83 | MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" | 83 | MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" |
84 | "num_vfs=port1,port2,port1+2"); | 84 | "num_vfs=port1,port2,port1+2"); |
85 | 85 | ||
86 | static uint8_t probe_vf[3] = {0, 0, 0}; | 86 | static uint8_t probe_vf[3] = {0, 0, 0}; |
87 | static int probe_vfs_argc = 3; | 87 | static int probe_vfs_argc; |
88 | module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); | 88 | module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); |
89 | MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" | 89 | MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" |
90 | "probe_vf=port1,port2,port1+2"); | 90 | "probe_vf=port1,port2,port1+2"); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7d717eccb7b0..193a6adb5d04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
299 | } | 299 | } |
300 | 300 | ||
301 | /* Must protect against concurrent access */ | ||
301 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | 302 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, |
302 | struct mlx4_mpt_entry ***mpt_entry) | 303 | struct mlx4_mpt_entry ***mpt_entry) |
303 | { | 304 | { |
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
305 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | 306 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); |
306 | struct mlx4_cmd_mailbox *mailbox = NULL; | 307 | struct mlx4_cmd_mailbox *mailbox = NULL; |
307 | 308 | ||
308 | /* Make sure that at this point we have single-threaded access only */ | ||
309 | |||
310 | if (mmr->enabled != MLX4_MPT_EN_HW) | 309 | if (mmr->enabled != MLX4_MPT_EN_HW) |
311 | return -EINVAL; | 310 | return -EINVAL; |
312 | 311 | ||
313 | err = mlx4_HW2SW_MPT(dev, NULL, key); | 312 | err = mlx4_HW2SW_MPT(dev, NULL, key); |
314 | |||
315 | if (err) { | 313 | if (err) { |
316 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | 314 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); |
317 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | 315 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); |
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
333 | 0, MLX4_CMD_QUERY_MPT, | 331 | 0, MLX4_CMD_QUERY_MPT, |
334 | MLX4_CMD_TIME_CLASS_B, | 332 | MLX4_CMD_TIME_CLASS_B, |
335 | MLX4_CMD_WRAPPED); | 333 | MLX4_CMD_WRAPPED); |
336 | |||
337 | if (err) | 334 | if (err) |
338 | goto free_mailbox; | 335 | goto free_mailbox; |
339 | 336 | ||
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
378 | err = mlx4_SW2HW_MPT(dev, mailbox, key); | 375 | err = mlx4_SW2HW_MPT(dev, mailbox, key); |
379 | } | 376 | } |
380 | 377 | ||
381 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | 378 | if (!err) { |
382 | if (!err) | 379 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; |
383 | mmr->enabled = MLX4_MPT_EN_HW; | 380 | mmr->enabled = MLX4_MPT_EN_HW; |
381 | } | ||
384 | return err; | 382 | return err; |
385 | } | 383 | } |
386 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | 384 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); |
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | |||
400 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | 398 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, |
401 | u32 pdn) | 399 | u32 pdn) |
402 | { | 400 | { |
403 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); | 401 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; |
404 | /* The wrapper function will put the slave's id here */ | 402 | /* The wrapper function will put the slave's id here */ |
405 | if (mlx4_is_mfunc(dev)) | 403 | if (mlx4_is_mfunc(dev)) |
406 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | 404 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; |
407 | mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | | 405 | |
406 | mpt_entry->pd_flags = cpu_to_be32(pd_flags | | ||
408 | (pdn & MLX4_MPT_PD_MASK) | 407 | (pdn & MLX4_MPT_PD_MASK) |
409 | | MLX4_MPT_PD_FLAG_EN_INV); | 408 | | MLX4_MPT_PD_FLAG_EN_INV); |
410 | return 0; | 409 | return 0; |
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
600 | { | 599 | { |
601 | int err; | 600 | int err; |
602 | 601 | ||
603 | mpt_entry->start = cpu_to_be64(mr->iova); | 602 | mpt_entry->start = cpu_to_be64(iova); |
604 | mpt_entry->length = cpu_to_be64(mr->size); | 603 | mpt_entry->length = cpu_to_be64(size); |
605 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | 604 | mpt_entry->entity_size = cpu_to_be32(page_shift); |
606 | 605 | ||
607 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | 606 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
608 | if (err) | 607 | if (err) |
609 | return err; | 608 | return err; |
610 | 609 | ||
610 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | | ||
611 | MLX4_MPT_PD_FLAG_EN_INV); | ||
612 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | | ||
613 | MLX4_MPT_FLAG_SW_OWNS); | ||
611 | if (mr->mtt.order < 0) { | 614 | if (mr->mtt.order < 0) { |
612 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | 615 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); |
613 | mpt_entry->mtt_addr = 0; | 616 | mpt_entry->mtt_addr = 0; |
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
617 | if (mr->mtt.page_shift == 0) | 620 | if (mr->mtt.page_shift == 0) |
618 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | 621 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); |
619 | } | 622 | } |
623 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | ||
624 | /* fast register MR in free state */ | ||
625 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | ||
626 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | ||
627 | MLX4_MPT_PD_FLAG_RAE); | ||
628 | } else { | ||
629 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | ||
630 | } | ||
620 | mr->enabled = MLX4_MPT_EN_SW; | 631 | mr->enabled = MLX4_MPT_EN_SW; |
621 | 632 | ||
622 | return 0; | 633 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 9ba0c1ca10d5..94eeb2c7d7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev, | |||
103 | int i; | 103 | int i; |
104 | 104 | ||
105 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | 105 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { |
106 | if ((mac & MLX4_MAC_MASK) == | 106 | if (table->refs[i] && |
107 | (MLX4_MAC_MASK & mac) == | ||
107 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) | 108 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) |
108 | return i; | 109 | return i; |
109 | } | 110 | } |
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) | |||
165 | 166 | ||
166 | mutex_lock(&table->mutex); | 167 | mutex_lock(&table->mutex); |
167 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | 168 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { |
168 | if (free < 0 && !table->entries[i]) { | 169 | if (!table->refs[i]) { |
169 | free = i; | 170 | if (free < 0) |
171 | free = i; | ||
170 | continue; | 172 | continue; |
171 | } | 173 | } |
172 | 174 | ||
173 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | 175 | if ((MLX4_MAC_MASK & mac) == |
176 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | ||
174 | /* MAC already registered, increment ref count */ | 177 | /* MAC already registered, increment ref count */ |
175 | err = i; | 178 | err = i; |
176 | ++table->refs[i]; | 179 | ++table->refs[i]; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3b..2301365c79c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -390,13 +390,14 @@ err_icm: | |||
390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
391 | 391 | ||
392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | 392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC |
393 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | 393 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
394 | enum mlx4_update_qp_attr attr, | 394 | enum mlx4_update_qp_attr attr, |
395 | struct mlx4_update_qp_params *params) | 395 | struct mlx4_update_qp_params *params) |
396 | { | 396 | { |
397 | struct mlx4_cmd_mailbox *mailbox; | 397 | struct mlx4_cmd_mailbox *mailbox; |
398 | struct mlx4_update_qp_context *cmd; | 398 | struct mlx4_update_qp_context *cmd; |
399 | u64 pri_addr_path_mask = 0; | 399 | u64 pri_addr_path_mask = 0; |
400 | u64 qp_mask = 0; | ||
400 | int err = 0; | 401 | int err = 0; |
401 | 402 | ||
402 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 403 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
413 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; | 414 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; |
414 | } | 415 | } |
415 | 416 | ||
417 | if (attr & MLX4_UPDATE_QP_VSD) { | ||
418 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; | ||
419 | if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) | ||
420 | cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); | ||
421 | } | ||
422 | |||
416 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); | 423 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); |
424 | cmd->qp_mask = cpu_to_be64(qp_mask); | ||
417 | 425 | ||
418 | err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, | 426 | err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, |
419 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, | 427 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, |
420 | MLX4_CMD_NATIVE); | 428 | MLX4_CMD_NATIVE); |
421 | 429 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1089367fed22..5d2498dcf536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
702 | struct mlx4_qp_context *qpc = inbox->buf + 8; | 702 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
703 | struct mlx4_vport_oper_state *vp_oper; | 703 | struct mlx4_vport_oper_state *vp_oper; |
704 | struct mlx4_priv *priv; | 704 | struct mlx4_priv *priv; |
705 | u32 qp_type; | ||
705 | int port; | 706 | int port; |
706 | 707 | ||
707 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; | 708 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
708 | priv = mlx4_priv(dev); | 709 | priv = mlx4_priv(dev); |
709 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 710 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
711 | qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; | ||
710 | 712 | ||
711 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 713 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
712 | /* the reserved QPs (special, proxy, tunnel) | 714 | /* the reserved QPs (special, proxy, tunnel) |
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
715 | if (mlx4_is_qp_reserved(dev, qpn)) | 717 | if (mlx4_is_qp_reserved(dev, qpn)) |
716 | return 0; | 718 | return 0; |
717 | 719 | ||
718 | /* force strip vlan by clear vsd */ | 720 | /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ |
719 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | 721 | if (qp_type == MLX4_QP_ST_UD || |
722 | (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { | ||
723 | if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { | ||
724 | *(__be32 *)inbox->buf = | ||
725 | cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | | ||
726 | MLX4_QP_OPTPAR_VLAN_STRIPPING); | ||
727 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | ||
728 | } else { | ||
729 | struct mlx4_update_qp_params params = {.flags = 0}; | ||
730 | |||
731 | mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); | ||
732 | } | ||
733 | } | ||
720 | 734 | ||
721 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && | 735 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && |
722 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { | 736 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { |
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3998 | } | 4012 | } |
3999 | 4013 | ||
4000 | port = (rqp->sched_queue >> 6 & 1) + 1; | 4014 | port = (rqp->sched_queue >> 6 & 1) + 1; |
4001 | smac_index = cmd->qp_context.pri_path.grh_mylmc; | 4015 | |
4002 | err = mac_find_smac_ix_in_slave(dev, slave, port, | 4016 | if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { |
4003 | smac_index, &mac); | 4017 | smac_index = cmd->qp_context.pri_path.grh_mylmc; |
4004 | if (err) { | 4018 | err = mac_find_smac_ix_in_slave(dev, slave, port, |
4005 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | 4019 | smac_index, &mac); |
4006 | qpn, smac_index); | 4020 | |
4007 | goto err_mac; | 4021 | if (err) { |
4022 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | ||
4023 | qpn, smac_index); | ||
4024 | goto err_mac; | ||
4025 | } | ||
4008 | } | 4026 | } |
4009 | 4027 | ||
4010 | err = mlx4_cmd(dev, inbox->dma, | 4028 | err = mlx4_cmd(dev, inbox->dma, |
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | |||
4818 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; | 4836 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
4819 | 4837 | ||
4820 | upd_context = mailbox->buf; | 4838 | upd_context = mailbox->buf; |
4821 | upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); | 4839 | upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); |
4822 | 4840 | ||
4823 | spin_lock_irq(mlx4_tlock(dev)); | 4841 | spin_lock_irq(mlx4_tlock(dev)); |
4824 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { | 4842 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 979c6980639f..a42293092ea4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c | |||
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |||
290 | /* Read the hardware TX timestamp if one was recorded */ | 290 | /* Read the hardware TX timestamp if one was recorded */ |
291 | if (unlikely(re.s.tstamp)) { | 291 | if (unlikely(re.s.tstamp)) { |
292 | struct skb_shared_hwtstamps ts; | 292 | struct skb_shared_hwtstamps ts; |
293 | u64 ns; | ||
294 | |||
293 | memset(&ts, 0, sizeof(ts)); | 295 | memset(&ts, 0, sizeof(ts)); |
294 | /* Read the timestamp */ | 296 | /* Read the timestamp */ |
295 | u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); | 297 | ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); |
296 | /* Remove the timestamp from the FIFO */ | 298 | /* Remove the timestamp from the FIFO */ |
297 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); | 299 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); |
298 | /* Tell the kernel about the timestamp */ | 300 | /* Tell the kernel about the timestamp */ |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 44c8be1c6805..5f7a35212796 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | |||
@@ -7,6 +7,7 @@ config PCH_GBE | |||
7 | depends on PCI && (X86_32 || COMPILE_TEST) | 7 | depends on PCI && (X86_32 || COMPILE_TEST) |
8 | select MII | 8 | select MII |
9 | select PTP_1588_CLOCK_PCH | 9 | select PTP_1588_CLOCK_PCH |
10 | select NET_PTP_CLASSIFY | ||
10 | ---help--- | 11 | ---help--- |
11 | This is a gigabit ethernet driver for EG20T PCH. | 12 | This is a gigabit ethernet driver for EG20T PCH. |
12 | EG20T PCH is the platform controller hub that is used in Intel's | 13 | EG20T PCH is the platform controller hub that is used in Intel's |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 32058614151a..5c4068353f66 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
135 | int i, j; | 135 | int i, j; |
136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; | 136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; |
137 | 137 | ||
138 | spin_lock(&adapter->tx_clean_lock); | ||
138 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
139 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
140 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
158 | } | 159 | } |
159 | cmd_buf++; | 160 | cmd_buf++; |
160 | } | 161 | } |
162 | spin_unlock(&adapter->tx_clean_lock); | ||
161 | } | 163 | } |
162 | 164 | ||
163 | void netxen_free_sw_resources(struct netxen_adapter *adapter) | 165 | void netxen_free_sw_resources(struct netxen_adapter *adapter) |
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1792 | break; | 1794 | break; |
1793 | } | 1795 | } |
1794 | 1796 | ||
1795 | if (count && netif_running(netdev)) { | 1797 | tx_ring->sw_consumer = sw_consumer; |
1796 | tx_ring->sw_consumer = sw_consumer; | ||
1797 | 1798 | ||
1799 | if (count && netif_running(netdev)) { | ||
1798 | smp_mb(); | 1800 | smp_mb(); |
1799 | 1801 | ||
1800 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) | 1802 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1159031f885b..5ec5a2b0e989 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
1186 | return; | 1186 | return; |
1187 | 1187 | ||
1188 | smp_mb(); | 1188 | smp_mb(); |
1189 | spin_lock(&adapter->tx_clean_lock); | ||
1190 | netif_carrier_off(netdev); | 1189 | netif_carrier_off(netdev); |
1191 | netif_tx_disable(netdev); | 1190 | netif_tx_disable(netdev); |
1192 | 1191 | ||
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
1204 | netxen_napi_disable(adapter); | 1203 | netxen_napi_disable(adapter); |
1205 | 1204 | ||
1206 | netxen_release_tx_buffers(adapter); | 1205 | netxen_release_tx_buffers(adapter); |
1207 | spin_unlock(&adapter->tx_clean_lock); | ||
1208 | } | 1206 | } |
1209 | 1207 | ||
1210 | /* Usage: During suspend and firmware recovery module */ | 1208 | /* Usage: During suspend and firmware recovery module */ |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 86783e1afcf7..3172cdf591fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) | |||
1177 | { | 1177 | { |
1178 | u32 idc_params, val; | 1178 | u32 idc_params, val; |
1179 | 1179 | ||
1180 | if (qlcnic_83xx_lockless_flash_read32(adapter, | 1180 | if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, |
1181 | QLC_83XX_IDC_FLASH_PARAM_ADDR, | 1181 | (u8 *)&idc_params, 1)) { |
1182 | (u8 *)&idc_params, 1)) { | ||
1183 | dev_info(&adapter->pdev->dev, | 1182 | dev_info(&adapter->pdev->dev, |
1184 | "%s:failed to get IDC params from flash\n", __func__); | 1183 | "%s:failed to get IDC params from flash\n", __func__); |
1185 | adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; | 1184 | adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 141f116eb868..494e8105adee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, | |||
1333 | struct qlcnic_host_tx_ring *tx_ring; | 1333 | struct qlcnic_host_tx_ring *tx_ring; |
1334 | struct qlcnic_esw_statistics port_stats; | 1334 | struct qlcnic_esw_statistics port_stats; |
1335 | struct qlcnic_mac_statistics mac_stats; | 1335 | struct qlcnic_mac_statistics mac_stats; |
1336 | int index, ret, length, size, tx_size, ring; | 1336 | int index, ret, length, size, ring; |
1337 | char *p; | 1337 | char *p; |
1338 | 1338 | ||
1339 | tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; | 1339 | memset(data, 0, stats->n_stats * sizeof(u64)); |
1340 | 1340 | ||
1341 | memset(data, 0, tx_size * sizeof(u64)); | ||
1342 | for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { | 1341 | for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { |
1343 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | 1342 | if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { |
1344 | tx_ring = &adapter->tx_ring[ring]; | 1343 | tx_ring = &adapter->tx_ring[ring]; |
1345 | data = qlcnic_fill_tx_queue_stats(data, tx_ring); | 1344 | data = qlcnic_fill_tx_queue_stats(data, tx_ring); |
1346 | qlcnic_update_stats(adapter); | 1345 | qlcnic_update_stats(adapter); |
1346 | } else { | ||
1347 | data += QLCNIC_TX_STATS_LEN; | ||
1347 | } | 1348 | } |
1348 | } | 1349 | } |
1349 | 1350 | ||
1350 | memset(data, 0, stats->n_stats * sizeof(u64)); | ||
1351 | length = QLCNIC_STATS_LEN; | 1351 | length = QLCNIC_STATS_LEN; |
1352 | for (index = 0; index < length; index++) { | 1352 | for (index = 0; index < length; index++) { |
1353 | p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; | 1353 | p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91652e7235e4..0921302553c6 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev, | |||
1783 | netdev_features_t features) | 1783 | netdev_features_t features) |
1784 | { | 1784 | { |
1785 | struct rtl8169_private *tp = netdev_priv(dev); | 1785 | struct rtl8169_private *tp = netdev_priv(dev); |
1786 | netdev_features_t changed = features ^ dev->features; | ||
1787 | void __iomem *ioaddr = tp->mmio_addr; | 1786 | void __iomem *ioaddr = tp->mmio_addr; |
1787 | u32 rx_config; | ||
1788 | 1788 | ||
1789 | if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | | 1789 | rx_config = RTL_R32(RxConfig); |
1790 | NETIF_F_HW_VLAN_CTAG_RX))) | 1790 | if (features & NETIF_F_RXALL) |
1791 | return; | 1791 | rx_config |= (AcceptErr | AcceptRunt); |
1792 | else | ||
1793 | rx_config &= ~(AcceptErr | AcceptRunt); | ||
1792 | 1794 | ||
1793 | if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { | 1795 | RTL_W32(RxConfig, rx_config); |
1794 | if (features & NETIF_F_RXCSUM) | ||
1795 | tp->cp_cmd |= RxChkSum; | ||
1796 | else | ||
1797 | tp->cp_cmd &= ~RxChkSum; | ||
1798 | 1796 | ||
1799 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | 1797 | if (features & NETIF_F_RXCSUM) |
1800 | tp->cp_cmd |= RxVlan; | 1798 | tp->cp_cmd |= RxChkSum; |
1801 | else | 1799 | else |
1802 | tp->cp_cmd &= ~RxVlan; | 1800 | tp->cp_cmd &= ~RxChkSum; |
1803 | 1801 | ||
1804 | RTL_W16(CPlusCmd, tp->cp_cmd); | 1802 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
1805 | RTL_R16(CPlusCmd); | 1803 | tp->cp_cmd |= RxVlan; |
1806 | } | 1804 | else |
1807 | if (changed & NETIF_F_RXALL) { | 1805 | tp->cp_cmd &= ~RxVlan; |
1808 | int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); | 1806 | |
1809 | if (features & NETIF_F_RXALL) | 1807 | tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); |
1810 | tmp |= (AcceptErr | AcceptRunt); | 1808 | |
1811 | RTL_W32(RxConfig, tmp); | 1809 | RTL_W16(CPlusCmd, tp->cp_cmd); |
1812 | } | 1810 | RTL_R16(CPlusCmd); |
1813 | } | 1811 | } |
1814 | 1812 | ||
1815 | static int rtl8169_set_features(struct net_device *dev, | 1813 | static int rtl8169_set_features(struct net_device *dev, |
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev, | |||
1817 | { | 1815 | { |
1818 | struct rtl8169_private *tp = netdev_priv(dev); | 1816 | struct rtl8169_private *tp = netdev_priv(dev); |
1819 | 1817 | ||
1818 | features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; | ||
1819 | |||
1820 | rtl_lock_work(tp); | 1820 | rtl_lock_work(tp); |
1821 | __rtl8169_set_features(dev, features); | 1821 | if (features ^ dev->features) |
1822 | __rtl8169_set_features(dev, features); | ||
1822 | rtl_unlock_work(tp); | 1823 | rtl_unlock_work(tp); |
1823 | 1824 | ||
1824 | return 0; | 1825 | return 0; |
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) | |||
7118 | } | 7119 | } |
7119 | } | 7120 | } |
7120 | 7121 | ||
7121 | static int | 7122 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
7122 | rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
7123 | { | 7123 | { |
7124 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; | 7124 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
7125 | const unsigned int region = cfg->region; | 7125 | const unsigned int region = cfg->region; |
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7194 | goto err_out_mwi_2; | 7194 | goto err_out_mwi_2; |
7195 | } | 7195 | } |
7196 | 7196 | ||
7197 | tp->cp_cmd = RxChkSum; | 7197 | tp->cp_cmd = 0; |
7198 | 7198 | ||
7199 | if ((sizeof(dma_addr_t) > 4) && | 7199 | if ((sizeof(dma_addr_t) > 4) && |
7200 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 7200 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7235 | 7235 | ||
7236 | pci_set_master(pdev); | 7236 | pci_set_master(pdev); |
7237 | 7237 | ||
7238 | /* | ||
7239 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
7240 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
7241 | */ | ||
7242 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | ||
7243 | tp->cp_cmd |= RxVlan; | ||
7244 | |||
7245 | rtl_init_mdio_ops(tp); | 7238 | rtl_init_mdio_ops(tp); |
7246 | rtl_init_pll_power_ops(tp); | 7239 | rtl_init_pll_power_ops(tp); |
7247 | rtl_init_jumbo_ops(tp); | 7240 | rtl_init_jumbo_ops(tp); |
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7302 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | 7295 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | |
7303 | NETIF_F_HIGHDMA; | 7296 | NETIF_F_HIGHDMA; |
7304 | 7297 | ||
7298 | tp->cp_cmd |= RxChkSum | RxVlan; | ||
7299 | |||
7300 | /* | ||
7301 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
7302 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
7303 | */ | ||
7305 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | 7304 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) |
7306 | /* 8110SCd requires hardware Rx VLAN - disallow toggling */ | 7305 | /* Disallow toggling */ |
7307 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; | 7306 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; |
7308 | 7307 | ||
7309 | if (tp->txd_version == RTL_TD_0) | 7308 | if (tp->txd_version == RTL_TD_0) |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 0537381cd2f6..6859437b59fb 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) | |||
2933 | u32 crc; | 2933 | u32 crc; |
2934 | int bit; | 2934 | int bit; |
2935 | 2935 | ||
2936 | if (!efx_dev_registered(efx)) | ||
2937 | return; | ||
2938 | |||
2936 | netif_addr_lock_bh(net_dev); | 2939 | netif_addr_lock_bh(net_dev); |
2937 | 2940 | ||
2938 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); | 2941 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6e6ee226de04..b0c1521e08a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -2786,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
2786 | if (IS_ERR(priv->stmmac_clk)) { | 2786 | if (IS_ERR(priv->stmmac_clk)) { |
2787 | dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", | 2787 | dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", |
2788 | __func__); | 2788 | __func__); |
2789 | ret = PTR_ERR(priv->stmmac_clk); | 2789 | /* If failed to obtain stmmac_clk and specific clk_csr value |
2790 | goto error_clk_get; | 2790 | * is NOT passed from the platform, probe fail. |
2791 | */ | ||
2792 | if (!priv->plat->clk_csr) { | ||
2793 | ret = PTR_ERR(priv->stmmac_clk); | ||
2794 | goto error_clk_get; | ||
2795 | } else { | ||
2796 | priv->stmmac_clk = NULL; | ||
2797 | } | ||
2791 | } | 2798 | } |
2792 | clk_prepare_enable(priv->stmmac_clk); | 2799 | clk_prepare_enable(priv->stmmac_clk); |
2793 | 2800 | ||
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 23c89ab5a6ad..f67539650c38 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
350 | if (IS_ERR(desc)) | 350 | if (IS_ERR(desc)) |
351 | return PTR_ERR(desc); | 351 | return PTR_ERR(desc); |
352 | 352 | ||
353 | if (desc->hdr.state != VIO_DESC_READY) | ||
354 | return 1; | ||
355 | |||
356 | rmb(); | ||
357 | |||
353 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", | 358 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", |
354 | desc->hdr.state, desc->hdr.ack, | 359 | desc->hdr.state, desc->hdr.ack, |
355 | desc->size, desc->ncookies, | 360 | desc->size, desc->ncookies, |
356 | desc->cookies[0].cookie_addr, | 361 | desc->cookies[0].cookie_addr, |
357 | desc->cookies[0].cookie_size); | 362 | desc->cookies[0].cookie_size); |
358 | 363 | ||
359 | if (desc->hdr.state != VIO_DESC_READY) | ||
360 | return 1; | ||
361 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); | 364 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); |
362 | if (err == -ECONNRESET) | 365 | if (err == -ECONNRESET) |
363 | return err; | 366 | return err; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 999fb72688d2..e2a00287f8eb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
699 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); | 699 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); |
700 | 700 | ||
701 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { | 701 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { |
702 | bool ndev_status = false; | ||
703 | struct cpsw_slave *slave = priv->slaves; | ||
704 | int n; | ||
705 | |||
706 | if (priv->data.dual_emac) { | ||
707 | /* In dual emac mode check for all interfaces */ | ||
708 | for (n = priv->data.slaves; n; n--, slave++) | ||
709 | if (netif_running(slave->ndev)) | ||
710 | ndev_status = true; | ||
711 | } | ||
712 | |||
713 | if (ndev_status && (status >= 0)) { | ||
714 | /* The packet received is for the interface which | ||
715 | * is already down and the other interface is up | ||
716 | * and running, intead of freeing which results | ||
717 | * in reducing of the number of rx descriptor in | ||
718 | * DMA engine, requeue skb back to cpdma. | ||
719 | */ | ||
720 | new_skb = skb; | ||
721 | goto requeue; | ||
722 | } | ||
723 | |||
702 | /* the interface is going down, skbs are purged */ | 724 | /* the interface is going down, skbs are purged */ |
703 | dev_kfree_skb_any(skb); | 725 | dev_kfree_skb_any(skb); |
704 | return; | 726 | return; |
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
717 | new_skb = skb; | 739 | new_skb = skb; |
718 | } | 740 | } |
719 | 741 | ||
742 | requeue: | ||
720 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, | 743 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, |
721 | skb_tailroom(new_skb), 0); | 744 | skb_tailroom(new_skb), 0); |
722 | if (WARN_ON(ret < 0)) | 745 | if (WARN_ON(ret < 0)) |
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev) | |||
2311 | struct net_device *ndev = platform_get_drvdata(pdev); | 2334 | struct net_device *ndev = platform_get_drvdata(pdev); |
2312 | struct cpsw_priv *priv = netdev_priv(ndev); | 2335 | struct cpsw_priv *priv = netdev_priv(ndev); |
2313 | 2336 | ||
2314 | if (netif_running(ndev)) | 2337 | if (priv->data.dual_emac) { |
2315 | cpsw_ndo_stop(ndev); | 2338 | int i; |
2316 | 2339 | ||
2317 | for_each_slave(priv, soft_reset_slave); | 2340 | for (i = 0; i < priv->data.slaves; i++) { |
2341 | if (netif_running(priv->slaves[i].ndev)) | ||
2342 | cpsw_ndo_stop(priv->slaves[i].ndev); | ||
2343 | soft_reset_slave(priv->slaves + i); | ||
2344 | } | ||
2345 | } else { | ||
2346 | if (netif_running(ndev)) | ||
2347 | cpsw_ndo_stop(ndev); | ||
2348 | for_each_slave(priv, soft_reset_slave); | ||
2349 | } | ||
2318 | 2350 | ||
2319 | pm_runtime_put_sync(&pdev->dev); | 2351 | pm_runtime_put_sync(&pdev->dev); |
2320 | 2352 | ||
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev) | |||
2328 | { | 2360 | { |
2329 | struct platform_device *pdev = to_platform_device(dev); | 2361 | struct platform_device *pdev = to_platform_device(dev); |
2330 | struct net_device *ndev = platform_get_drvdata(pdev); | 2362 | struct net_device *ndev = platform_get_drvdata(pdev); |
2363 | struct cpsw_priv *priv = netdev_priv(ndev); | ||
2331 | 2364 | ||
2332 | pm_runtime_get_sync(&pdev->dev); | 2365 | pm_runtime_get_sync(&pdev->dev); |
2333 | 2366 | ||
2334 | /* Select default pin state */ | 2367 | /* Select default pin state */ |
2335 | pinctrl_pm_select_default_state(&pdev->dev); | 2368 | pinctrl_pm_select_default_state(&pdev->dev); |
2336 | 2369 | ||
2337 | if (netif_running(ndev)) | 2370 | if (priv->data.dual_emac) { |
2338 | cpsw_ndo_open(ndev); | 2371 | int i; |
2372 | |||
2373 | for (i = 0; i < priv->data.slaves; i++) { | ||
2374 | if (netif_running(priv->slaves[i].ndev)) | ||
2375 | cpsw_ndo_open(priv->slaves[i].ndev); | ||
2376 | } | ||
2377 | } else { | ||
2378 | if (netif_running(ndev)) | ||
2379 | cpsw_ndo_open(ndev); | ||
2380 | } | ||
2339 | return 0; | 2381 | return 0; |
2340 | } | 2382 | } |
2341 | 2383 | ||
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a9c5eaadc426..0fcb5e7eb073 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
387 | int hdr_offset; | 387 | int hdr_offset; |
388 | u32 net_trans_info; | 388 | u32 net_trans_info; |
389 | u32 hash; | 389 | u32 hash; |
390 | u32 skb_length = skb->len; | ||
390 | 391 | ||
391 | 392 | ||
392 | /* We will atmost need two pages to describe the rndis | 393 | /* We will atmost need two pages to describe the rndis |
@@ -562,7 +563,7 @@ do_send: | |||
562 | 563 | ||
563 | drop: | 564 | drop: |
564 | if (ret == 0) { | 565 | if (ret == 0) { |
565 | net->stats.tx_bytes += skb->len; | 566 | net->stats.tx_bytes += skb_length; |
566 | net->stats.tx_packets++; | 567 | net->stats.tx_packets++; |
567 | } else { | 568 | } else { |
568 | kfree(packet); | 569 | kfree(packet); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a96955597755..726edabff26b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/netpoll.h> | 36 | #include <linux/netpoll.h> |
37 | 37 | ||
38 | #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) | 38 | #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) |
39 | #define MACVLAN_BC_QUEUE_LEN 1000 | ||
39 | 40 | ||
40 | struct macvlan_port { | 41 | struct macvlan_port { |
41 | struct net_device *dev; | 42 | struct net_device *dev; |
@@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, | |||
248 | goto err; | 249 | goto err; |
249 | 250 | ||
250 | spin_lock(&port->bc_queue.lock); | 251 | spin_lock(&port->bc_queue.lock); |
251 | if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { | 252 | if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { |
252 | __skb_queue_tail(&port->bc_queue, nskb); | 253 | __skb_queue_tail(&port->bc_queue, nskb); |
253 | err = 0; | 254 | err = 0; |
254 | } | 255 | } |
@@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
806 | features, | 807 | features, |
807 | mask); | 808 | mask); |
808 | features |= ALWAYS_ON_FEATURES; | 809 | features |= ALWAYS_ON_FEATURES; |
810 | features &= ~NETIF_F_NETNS_LOCAL; | ||
809 | 811 | ||
810 | return features; | 812 | return features; |
811 | } | 813 | } |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 3381c4f91a8c..0c6adaaf898c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -112,17 +112,15 @@ out: | |||
112 | return err; | 112 | return err; |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Requires RTNL */ | ||
115 | static int macvtap_set_queue(struct net_device *dev, struct file *file, | 116 | static int macvtap_set_queue(struct net_device *dev, struct file *file, |
116 | struct macvtap_queue *q) | 117 | struct macvtap_queue *q) |
117 | { | 118 | { |
118 | struct macvlan_dev *vlan = netdev_priv(dev); | 119 | struct macvlan_dev *vlan = netdev_priv(dev); |
119 | int err = -EBUSY; | ||
120 | 120 | ||
121 | rtnl_lock(); | ||
122 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) | 121 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) |
123 | goto out; | 122 | return -EBUSY; |
124 | 123 | ||
125 | err = 0; | ||
126 | rcu_assign_pointer(q->vlan, vlan); | 124 | rcu_assign_pointer(q->vlan, vlan); |
127 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); | 125 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
128 | sock_hold(&q->sk); | 126 | sock_hold(&q->sk); |
@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, | |||
136 | vlan->numvtaps++; | 134 | vlan->numvtaps++; |
137 | vlan->numqueues++; | 135 | vlan->numqueues++; |
138 | 136 | ||
139 | out: | 137 | return 0; |
140 | rtnl_unlock(); | ||
141 | return err; | ||
142 | } | 138 | } |
143 | 139 | ||
144 | static int macvtap_disable_queue(struct macvtap_queue *q) | 140 | static int macvtap_disable_queue(struct macvtap_queue *q) |
@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk) | |||
454 | static int macvtap_open(struct inode *inode, struct file *file) | 450 | static int macvtap_open(struct inode *inode, struct file *file) |
455 | { | 451 | { |
456 | struct net *net = current->nsproxy->net_ns; | 452 | struct net *net = current->nsproxy->net_ns; |
457 | struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); | 453 | struct net_device *dev; |
458 | struct macvtap_queue *q; | 454 | struct macvtap_queue *q; |
459 | int err; | 455 | int err = -ENODEV; |
460 | 456 | ||
461 | err = -ENODEV; | 457 | rtnl_lock(); |
458 | dev = dev_get_by_macvtap_minor(iminor(inode)); | ||
462 | if (!dev) | 459 | if (!dev) |
463 | goto out; | 460 | goto out; |
464 | 461 | ||
@@ -498,6 +495,7 @@ out: | |||
498 | if (dev) | 495 | if (dev) |
499 | dev_put(dev); | 496 | dev_put(dev); |
500 | 497 | ||
498 | rtnl_unlock(); | ||
501 | return err; | 499 | return err; |
502 | } | 500 | } |
503 | 501 | ||
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fd0ea7c50ee6..011dbda2b2f1 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = { | |||
592 | .phy_id = PHY_ID_KSZ9031, | 592 | .phy_id = PHY_ID_KSZ9031, |
593 | .phy_id_mask = 0x00fffff0, | 593 | .phy_id_mask = 0x00fffff0, |
594 | .name = "Micrel KSZ9031 Gigabit PHY", | 594 | .name = "Micrel KSZ9031 Gigabit PHY", |
595 | .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | 595 | .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), |
596 | | SUPPORTED_Asym_Pause), | ||
597 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | 596 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
598 | .config_init = ksz9031_config_init, | 597 | .config_init = ksz9031_config_init, |
599 | .config_aneg = genphy_config_aneg, | 598 | .config_aneg = genphy_config_aneg, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 87f710476217..604ef210a4de 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <net/ip6_checksum.h> | 24 | #include <net/ip6_checksum.h> |
25 | 25 | ||
26 | /* Version Information */ | 26 | /* Version Information */ |
27 | #define DRIVER_VERSION "v1.06.0 (2014/03/03)" | 27 | #define DRIVER_VERSION "v1.06.1 (2014/10/01)" |
28 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 28 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
29 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" | 29 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" |
30 | #define MODULENAME "r8152" | 30 | #define MODULENAME "r8152" |
@@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) | |||
1949 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); | 1949 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | static int rtl_start_rx(struct r8152 *tp) | ||
1953 | { | ||
1954 | int i, ret = 0; | ||
1955 | |||
1956 | INIT_LIST_HEAD(&tp->rx_done); | ||
1957 | for (i = 0; i < RTL8152_MAX_RX; i++) { | ||
1958 | INIT_LIST_HEAD(&tp->rx_info[i].list); | ||
1959 | ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); | ||
1960 | if (ret) | ||
1961 | break; | ||
1962 | } | ||
1963 | |||
1964 | return ret; | ||
1965 | } | ||
1966 | |||
1967 | static int rtl_stop_rx(struct r8152 *tp) | ||
1968 | { | ||
1969 | int i; | ||
1970 | |||
1971 | for (i = 0; i < RTL8152_MAX_RX; i++) | ||
1972 | usb_kill_urb(tp->rx_info[i].urb); | ||
1973 | |||
1974 | return 0; | ||
1975 | } | ||
1976 | |||
1952 | static int rtl_enable(struct r8152 *tp) | 1977 | static int rtl_enable(struct r8152 *tp) |
1953 | { | 1978 | { |
1954 | u32 ocp_data; | 1979 | u32 ocp_data; |
1955 | int i, ret; | ||
1956 | 1980 | ||
1957 | r8152b_reset_packet_filter(tp); | 1981 | r8152b_reset_packet_filter(tp); |
1958 | 1982 | ||
@@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp) | |||
1962 | 1986 | ||
1963 | rxdy_gated_en(tp, false); | 1987 | rxdy_gated_en(tp, false); |
1964 | 1988 | ||
1965 | INIT_LIST_HEAD(&tp->rx_done); | 1989 | return rtl_start_rx(tp); |
1966 | ret = 0; | ||
1967 | for (i = 0; i < RTL8152_MAX_RX; i++) { | ||
1968 | INIT_LIST_HEAD(&tp->rx_info[i].list); | ||
1969 | ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); | ||
1970 | } | ||
1971 | |||
1972 | return ret; | ||
1973 | } | 1990 | } |
1974 | 1991 | ||
1975 | static int rtl8152_enable(struct r8152 *tp) | 1992 | static int rtl8152_enable(struct r8152 *tp) |
@@ -2019,7 +2036,7 @@ static int rtl8153_enable(struct r8152 *tp) | |||
2019 | return rtl_enable(tp); | 2036 | return rtl_enable(tp); |
2020 | } | 2037 | } |
2021 | 2038 | ||
2022 | static void rtl8152_disable(struct r8152 *tp) | 2039 | static void rtl_disable(struct r8152 *tp) |
2023 | { | 2040 | { |
2024 | u32 ocp_data; | 2041 | u32 ocp_data; |
2025 | int i; | 2042 | int i; |
@@ -2053,8 +2070,7 @@ static void rtl8152_disable(struct r8152 *tp) | |||
2053 | mdelay(1); | 2070 | mdelay(1); |
2054 | } | 2071 | } |
2055 | 2072 | ||
2056 | for (i = 0; i < RTL8152_MAX_RX; i++) | 2073 | rtl_stop_rx(tp); |
2057 | usb_kill_urb(tp->rx_info[i].urb); | ||
2058 | 2074 | ||
2059 | rtl8152_nic_reset(tp); | 2075 | rtl8152_nic_reset(tp); |
2060 | } | 2076 | } |
@@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp) | |||
2185 | } | 2201 | } |
2186 | } | 2202 | } |
2187 | 2203 | ||
2188 | static void rtl_clear_bp(struct r8152 *tp) | ||
2189 | { | ||
2190 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); | ||
2191 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); | ||
2192 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); | ||
2193 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0); | ||
2194 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0); | ||
2195 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); | ||
2196 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); | ||
2197 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); | ||
2198 | mdelay(3); | ||
2199 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); | ||
2200 | ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); | ||
2201 | } | ||
2202 | |||
2203 | static void r8153_clear_bp(struct r8152 *tp) | ||
2204 | { | ||
2205 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); | ||
2206 | ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0); | ||
2207 | rtl_clear_bp(tp); | ||
2208 | } | ||
2209 | |||
2210 | static void r8153_teredo_off(struct r8152 *tp) | 2204 | static void r8153_teredo_off(struct r8152 *tp) |
2211 | { | 2205 | { |
2212 | u32 ocp_data; | 2206 | u32 ocp_data; |
@@ -2232,6 +2226,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp) | |||
2232 | LINKENA | DIS_SDSAVE); | 2226 | LINKENA | DIS_SDSAVE); |
2233 | } | 2227 | } |
2234 | 2228 | ||
2229 | static void rtl8152_disable(struct r8152 *tp) | ||
2230 | { | ||
2231 | r8152b_disable_aldps(tp); | ||
2232 | rtl_disable(tp); | ||
2233 | r8152b_enable_aldps(tp); | ||
2234 | } | ||
2235 | |||
2235 | static void r8152b_hw_phy_cfg(struct r8152 *tp) | 2236 | static void r8152b_hw_phy_cfg(struct r8152 *tp) |
2236 | { | 2237 | { |
2237 | u16 data; | 2238 | u16 data; |
@@ -2242,11 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) | |||
2242 | r8152_mdio_write(tp, MII_BMCR, data); | 2243 | r8152_mdio_write(tp, MII_BMCR, data); |
2243 | } | 2244 | } |
2244 | 2245 | ||
2245 | r8152b_disable_aldps(tp); | ||
2246 | |||
2247 | rtl_clear_bp(tp); | ||
2248 | |||
2249 | r8152b_enable_aldps(tp); | ||
2250 | set_bit(PHY_RESET, &tp->flags); | 2246 | set_bit(PHY_RESET, &tp->flags); |
2251 | } | 2247 | } |
2252 | 2248 | ||
@@ -2255,9 +2251,6 @@ static void r8152b_exit_oob(struct r8152 *tp) | |||
2255 | u32 ocp_data; | 2251 | u32 ocp_data; |
2256 | int i; | 2252 | int i; |
2257 | 2253 | ||
2258 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2259 | return; | ||
2260 | |||
2261 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); | 2254 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); |
2262 | ocp_data &= ~RCR_ACPT_ALL; | 2255 | ocp_data &= ~RCR_ACPT_ALL; |
2263 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 2256 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
@@ -2347,7 +2340,7 @@ static void r8152b_enter_oob(struct r8152 *tp) | |||
2347 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); | 2340 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); |
2348 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); | 2341 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); |
2349 | 2342 | ||
2350 | rtl8152_disable(tp); | 2343 | rtl_disable(tp); |
2351 | 2344 | ||
2352 | for (i = 0; i < 1000; i++) { | 2345 | for (i = 0; i < 1000; i++) { |
2353 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 2346 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
@@ -2400,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) | |||
2400 | r8152_mdio_write(tp, MII_BMCR, data); | 2393 | r8152_mdio_write(tp, MII_BMCR, data); |
2401 | } | 2394 | } |
2402 | 2395 | ||
2403 | r8153_clear_bp(tp); | ||
2404 | |||
2405 | if (tp->version == RTL_VER_03) { | 2396 | if (tp->version == RTL_VER_03) { |
2406 | data = ocp_reg_read(tp, OCP_EEE_CFG); | 2397 | data = ocp_reg_read(tp, OCP_EEE_CFG); |
2407 | data &= ~CTAP_SHORT_EN; | 2398 | data &= ~CTAP_SHORT_EN; |
@@ -2485,9 +2476,6 @@ static void r8153_first_init(struct r8152 *tp) | |||
2485 | u32 ocp_data; | 2476 | u32 ocp_data; |
2486 | int i; | 2477 | int i; |
2487 | 2478 | ||
2488 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2489 | return; | ||
2490 | |||
2491 | rxdy_gated_en(tp, true); | 2479 | rxdy_gated_en(tp, true); |
2492 | r8153_teredo_off(tp); | 2480 | r8153_teredo_off(tp); |
2493 | 2481 | ||
@@ -2560,7 +2548,7 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
2560 | ocp_data &= ~NOW_IS_OOB; | 2548 | ocp_data &= ~NOW_IS_OOB; |
2561 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); | 2549 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); |
2562 | 2550 | ||
2563 | rtl8152_disable(tp); | 2551 | rtl_disable(tp); |
2564 | 2552 | ||
2565 | for (i = 0; i < 1000; i++) { | 2553 | for (i = 0; i < 1000; i++) { |
2566 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 2554 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
@@ -2624,6 +2612,13 @@ static void r8153_enable_aldps(struct r8152 *tp) | |||
2624 | ocp_reg_write(tp, OCP_POWER_CFG, data); | 2612 | ocp_reg_write(tp, OCP_POWER_CFG, data); |
2625 | } | 2613 | } |
2626 | 2614 | ||
2615 | static void rtl8153_disable(struct r8152 *tp) | ||
2616 | { | ||
2617 | r8153_disable_aldps(tp); | ||
2618 | rtl_disable(tp); | ||
2619 | r8153_enable_aldps(tp); | ||
2620 | } | ||
2621 | |||
2627 | static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) | 2622 | static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) |
2628 | { | 2623 | { |
2629 | u16 bmcr, anar, gbcr; | 2624 | u16 bmcr, anar, gbcr; |
@@ -2714,6 +2709,16 @@ out: | |||
2714 | return ret; | 2709 | return ret; |
2715 | } | 2710 | } |
2716 | 2711 | ||
2712 | static void rtl8152_up(struct r8152 *tp) | ||
2713 | { | ||
2714 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2715 | return; | ||
2716 | |||
2717 | r8152b_disable_aldps(tp); | ||
2718 | r8152b_exit_oob(tp); | ||
2719 | r8152b_enable_aldps(tp); | ||
2720 | } | ||
2721 | |||
2717 | static void rtl8152_down(struct r8152 *tp) | 2722 | static void rtl8152_down(struct r8152 *tp) |
2718 | { | 2723 | { |
2719 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { | 2724 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { |
@@ -2727,6 +2732,16 @@ static void rtl8152_down(struct r8152 *tp) | |||
2727 | r8152b_enable_aldps(tp); | 2732 | r8152b_enable_aldps(tp); |
2728 | } | 2733 | } |
2729 | 2734 | ||
2735 | static void rtl8153_up(struct r8152 *tp) | ||
2736 | { | ||
2737 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2738 | return; | ||
2739 | |||
2740 | r8153_disable_aldps(tp); | ||
2741 | r8153_first_init(tp); | ||
2742 | r8153_enable_aldps(tp); | ||
2743 | } | ||
2744 | |||
2730 | static void rtl8153_down(struct r8152 *tp) | 2745 | static void rtl8153_down(struct r8152 *tp) |
2731 | { | 2746 | { |
2732 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { | 2747 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { |
@@ -2946,6 +2961,8 @@ static void r8152b_init(struct r8152 *tp) | |||
2946 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 2961 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
2947 | return; | 2962 | return; |
2948 | 2963 | ||
2964 | r8152b_disable_aldps(tp); | ||
2965 | |||
2949 | if (tp->version == RTL_VER_01) { | 2966 | if (tp->version == RTL_VER_01) { |
2950 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); | 2967 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); |
2951 | ocp_data &= ~LED_MODE_MASK; | 2968 | ocp_data &= ~LED_MODE_MASK; |
@@ -2984,6 +3001,7 @@ static void r8153_init(struct r8152 *tp) | |||
2984 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3001 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
2985 | return; | 3002 | return; |
2986 | 3003 | ||
3004 | r8153_disable_aldps(tp); | ||
2987 | r8153_u1u2en(tp, false); | 3005 | r8153_u1u2en(tp, false); |
2988 | 3006 | ||
2989 | for (i = 0; i < 500; i++) { | 3007 | for (i = 0; i < 500; i++) { |
@@ -3055,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) | |||
3055 | clear_bit(WORK_ENABLE, &tp->flags); | 3073 | clear_bit(WORK_ENABLE, &tp->flags); |
3056 | usb_kill_urb(tp->intr_urb); | 3074 | usb_kill_urb(tp->intr_urb); |
3057 | cancel_delayed_work_sync(&tp->schedule); | 3075 | cancel_delayed_work_sync(&tp->schedule); |
3076 | tasklet_disable(&tp->tl); | ||
3058 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3077 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
3078 | rtl_stop_rx(tp); | ||
3059 | rtl_runtime_suspend_enable(tp, true); | 3079 | rtl_runtime_suspend_enable(tp, true); |
3060 | } else { | 3080 | } else { |
3061 | tasklet_disable(&tp->tl); | ||
3062 | tp->rtl_ops.down(tp); | 3081 | tp->rtl_ops.down(tp); |
3063 | tasklet_enable(&tp->tl); | ||
3064 | } | 3082 | } |
3083 | tasklet_enable(&tp->tl); | ||
3065 | } | 3084 | } |
3066 | 3085 | ||
3067 | return 0; | 3086 | return 0; |
@@ -3080,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
3080 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3099 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
3081 | rtl_runtime_suspend_enable(tp, false); | 3100 | rtl_runtime_suspend_enable(tp, false); |
3082 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | 3101 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); |
3102 | set_bit(WORK_ENABLE, &tp->flags); | ||
3083 | if (tp->speed & LINK_STATUS) | 3103 | if (tp->speed & LINK_STATUS) |
3084 | tp->rtl_ops.disable(tp); | 3104 | rtl_start_rx(tp); |
3085 | } else { | 3105 | } else { |
3086 | tp->rtl_ops.up(tp); | 3106 | tp->rtl_ops.up(tp); |
3087 | rtl8152_set_speed(tp, AUTONEG_ENABLE, | 3107 | rtl8152_set_speed(tp, AUTONEG_ENABLE, |
3088 | tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, | 3108 | tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, |
3089 | DUPLEX_FULL); | 3109 | DUPLEX_FULL); |
3110 | tp->speed = 0; | ||
3111 | netif_carrier_off(tp->netdev); | ||
3112 | set_bit(WORK_ENABLE, &tp->flags); | ||
3090 | } | 3113 | } |
3091 | tp->speed = 0; | ||
3092 | netif_carrier_off(tp->netdev); | ||
3093 | set_bit(WORK_ENABLE, &tp->flags); | ||
3094 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); | 3114 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); |
3095 | } | 3115 | } |
3096 | 3116 | ||
@@ -3377,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp) | |||
3377 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3397 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
3378 | return; | 3398 | return; |
3379 | 3399 | ||
3380 | r8153_power_cut_en(tp, true); | 3400 | r8153_power_cut_en(tp, false); |
3381 | } | 3401 | } |
3382 | 3402 | ||
3383 | static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | 3403 | static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) |
@@ -3392,7 +3412,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
3392 | ops->init = r8152b_init; | 3412 | ops->init = r8152b_init; |
3393 | ops->enable = rtl8152_enable; | 3413 | ops->enable = rtl8152_enable; |
3394 | ops->disable = rtl8152_disable; | 3414 | ops->disable = rtl8152_disable; |
3395 | ops->up = r8152b_exit_oob; | 3415 | ops->up = rtl8152_up; |
3396 | ops->down = rtl8152_down; | 3416 | ops->down = rtl8152_down; |
3397 | ops->unload = rtl8152_unload; | 3417 | ops->unload = rtl8152_unload; |
3398 | ret = 0; | 3418 | ret = 0; |
@@ -3400,8 +3420,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
3400 | case PRODUCT_ID_RTL8153: | 3420 | case PRODUCT_ID_RTL8153: |
3401 | ops->init = r8153_init; | 3421 | ops->init = r8153_init; |
3402 | ops->enable = rtl8153_enable; | 3422 | ops->enable = rtl8153_enable; |
3403 | ops->disable = rtl8152_disable; | 3423 | ops->disable = rtl8153_disable; |
3404 | ops->up = r8153_first_init; | 3424 | ops->up = rtl8153_up; |
3405 | ops->down = rtl8153_down; | 3425 | ops->down = rtl8153_down; |
3406 | ops->unload = rtl8153_unload; | 3426 | ops->unload = rtl8153_unload; |
3407 | ret = 0; | 3427 | ret = 0; |
@@ -3416,8 +3436,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
3416 | case PRODUCT_ID_SAMSUNG: | 3436 | case PRODUCT_ID_SAMSUNG: |
3417 | ops->init = r8153_init; | 3437 | ops->init = r8153_init; |
3418 | ops->enable = rtl8153_enable; | 3438 | ops->enable = rtl8153_enable; |
3419 | ops->disable = rtl8152_disable; | 3439 | ops->disable = rtl8153_disable; |
3420 | ops->up = r8153_first_init; | 3440 | ops->up = rtl8153_up; |
3421 | ops->down = rtl8153_down; | 3441 | ops->down = rtl8153_down; |
3422 | ops->unload = rtl8153_unload; | 3442 | ops->unload = rtl8153_unload; |
3423 | ret = 0; | 3443 | ret = 0; |
@@ -3530,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf) | |||
3530 | 3550 | ||
3531 | usb_set_intfdata(intf, NULL); | 3551 | usb_set_intfdata(intf, NULL); |
3532 | if (tp) { | 3552 | if (tp) { |
3533 | set_bit(RTL8152_UNPLUG, &tp->flags); | 3553 | struct usb_device *udev = tp->udev; |
3554 | |||
3555 | if (udev->state == USB_STATE_NOTATTACHED) | ||
3556 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
3557 | |||
3534 | tasklet_kill(&tp->tl); | 3558 | tasklet_kill(&tp->tl); |
3535 | unregister_netdev(tp->netdev); | 3559 | unregister_netdev(tp->netdev); |
3536 | tp->rtl_ops.unload(tp); | 3560 | tp->rtl_ops.unload(tp); |
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c index 733be5178481..6ad44470d0f2 100644 --- a/drivers/net/wireless/ath/ath9k/common-beacon.c +++ b/drivers/net/wireless/ath/ath9k/common-beacon.c | |||
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
57 | struct ath9k_beacon_state *bs) | 57 | struct ath9k_beacon_state *bs) |
58 | { | 58 | { |
59 | struct ath_common *common = ath9k_hw_common(ah); | 59 | struct ath_common *common = ath9k_hw_common(ah); |
60 | int dtim_intval, sleepduration; | 60 | int dtim_intval; |
61 | u64 tsf; | 61 | u64 tsf; |
62 | 62 | ||
63 | /* No need to configure beacon if we are not associated */ | 63 | /* No need to configure beacon if we are not associated */ |
@@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
75 | * last beacon we received (which may be none). | 75 | * last beacon we received (which may be none). |
76 | */ | 76 | */ |
77 | dtim_intval = conf->intval * conf->dtim_period; | 77 | dtim_intval = conf->intval * conf->dtim_period; |
78 | sleepduration = ah->hw->conf.listen_interval * conf->intval; | ||
79 | 78 | ||
80 | /* | 79 | /* |
81 | * Pull nexttbtt forward to reflect the current | 80 | * Pull nexttbtt forward to reflect the current |
@@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
113 | */ | 112 | */ |
114 | 113 | ||
115 | bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), | 114 | bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), |
116 | sleepduration)); | 115 | conf->intval)); |
117 | if (bs->bs_sleepduration > bs->bs_dtimperiod) | 116 | if (bs->bs_sleepduration > bs->bs_dtimperiod) |
118 | bs->bs_sleepduration = bs->bs_dtimperiod; | 117 | bs->bs_sleepduration = bs->bs_dtimperiod; |
119 | 118 | ||
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bb86eb2ffc95..f0484b1b617e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, | |||
978 | struct ath_hw *ah = common->ah; | 978 | struct ath_hw *ah = common->ah; |
979 | struct ath_htc_rx_status *rxstatus; | 979 | struct ath_htc_rx_status *rxstatus; |
980 | struct ath_rx_status rx_stats; | 980 | struct ath_rx_status rx_stats; |
981 | bool decrypt_error; | 981 | bool decrypt_error = false; |
982 | 982 | ||
983 | if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { | 983 | if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { |
984 | ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", | 984 | ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e6ac8d2e610c..4b148bbb2bf6 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
513 | * touch anything. Note this can happen early | 513 | * touch anything. Note this can happen early |
514 | * on if the IRQ is shared. | 514 | * on if the IRQ is shared. |
515 | */ | 515 | */ |
516 | if (test_bit(ATH_OP_INVALID, &common->op_flags)) | 516 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) |
517 | return IRQ_NONE; | 517 | return IRQ_NONE; |
518 | 518 | ||
519 | /* shared irq, not for us */ | 519 | /* shared irq, not for us */ |
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b8e2561ea645..fe3dc126b149 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig | |||
@@ -27,10 +27,17 @@ config BRCMFMAC | |||
27 | one of the bus interface support. If you choose to build a module, | 27 | one of the bus interface support. If you choose to build a module, |
28 | it'll be called brcmfmac.ko. | 28 | it'll be called brcmfmac.ko. |
29 | 29 | ||
30 | config BRCMFMAC_PROTO_BCDC | ||
31 | bool | ||
32 | |||
33 | config BRCMFMAC_PROTO_MSGBUF | ||
34 | bool | ||
35 | |||
30 | config BRCMFMAC_SDIO | 36 | config BRCMFMAC_SDIO |
31 | bool "SDIO bus interface support for FullMAC driver" | 37 | bool "SDIO bus interface support for FullMAC driver" |
32 | depends on (MMC = y || MMC = BRCMFMAC) | 38 | depends on (MMC = y || MMC = BRCMFMAC) |
33 | depends on BRCMFMAC | 39 | depends on BRCMFMAC |
40 | select BRCMFMAC_PROTO_BCDC | ||
34 | select FW_LOADER | 41 | select FW_LOADER |
35 | default y | 42 | default y |
36 | ---help--- | 43 | ---help--- |
@@ -42,6 +49,7 @@ config BRCMFMAC_USB | |||
42 | bool "USB bus interface support for FullMAC driver" | 49 | bool "USB bus interface support for FullMAC driver" |
43 | depends on (USB = y || USB = BRCMFMAC) | 50 | depends on (USB = y || USB = BRCMFMAC) |
44 | depends on BRCMFMAC | 51 | depends on BRCMFMAC |
52 | select BRCMFMAC_PROTO_BCDC | ||
45 | select FW_LOADER | 53 | select FW_LOADER |
46 | ---help--- | 54 | ---help--- |
47 | This option enables the USB bus interface support for Broadcom | 55 | This option enables the USB bus interface support for Broadcom |
@@ -52,6 +60,8 @@ config BRCMFMAC_PCIE | |||
52 | bool "PCIE bus interface support for FullMAC driver" | 60 | bool "PCIE bus interface support for FullMAC driver" |
53 | depends on BRCMFMAC | 61 | depends on BRCMFMAC |
54 | depends on PCI | 62 | depends on PCI |
63 | depends on HAS_DMA | ||
64 | select BRCMFMAC_PROTO_MSGBUF | ||
55 | select FW_LOADER | 65 | select FW_LOADER |
56 | ---help--- | 66 | ---help--- |
57 | This option enables the PCIE bus interface support for Broadcom | 67 | This option enables the PCIE bus interface support for Broadcom |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index c35adf4bc70b..90a977fe9a64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile | |||
@@ -30,16 +30,18 @@ brcmfmac-objs += \ | |||
30 | fwsignal.o \ | 30 | fwsignal.o \ |
31 | p2p.o \ | 31 | p2p.o \ |
32 | proto.o \ | 32 | proto.o \ |
33 | bcdc.o \ | ||
34 | commonring.o \ | ||
35 | flowring.o \ | ||
36 | msgbuf.o \ | ||
37 | dhd_common.o \ | 33 | dhd_common.o \ |
38 | dhd_linux.o \ | 34 | dhd_linux.o \ |
39 | firmware.o \ | 35 | firmware.o \ |
40 | feature.o \ | 36 | feature.o \ |
41 | btcoex.o \ | 37 | btcoex.o \ |
42 | vendor.o | 38 | vendor.o |
39 | brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ | ||
40 | bcdc.o | ||
41 | brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ | ||
42 | commonring.o \ | ||
43 | flowring.o \ | ||
44 | msgbuf.o | ||
43 | brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ | 45 | brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ |
44 | dhd_sdio.o \ | 46 | dhd_sdio.o \ |
45 | bcmsdh.o | 47 | bcmsdh.o |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h index 17e8c039ff32..6003179c0ceb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h | |||
@@ -16,9 +16,12 @@ | |||
16 | #ifndef BRCMFMAC_BCDC_H | 16 | #ifndef BRCMFMAC_BCDC_H |
17 | #define BRCMFMAC_BCDC_H | 17 | #define BRCMFMAC_BCDC_H |
18 | 18 | ||
19 | 19 | #ifdef CONFIG_BRCMFMAC_PROTO_BCDC | |
20 | int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); | 20 | int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); |
21 | void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); | 21 | void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); |
22 | 22 | #else | |
23 | static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } | ||
24 | static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} | ||
25 | #endif | ||
23 | 26 | ||
24 | #endif /* BRCMFMAC_BCDC_H */ | 27 | #endif /* BRCMFMAC_BCDC_H */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 4f1daabc551b..44fc85f68f7a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c | |||
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, | |||
185 | ifevent->action, ifevent->ifidx, ifevent->bssidx, | 185 | ifevent->action, ifevent->ifidx, ifevent->bssidx, |
186 | ifevent->flags, ifevent->role); | 186 | ifevent->flags, ifevent->role); |
187 | 187 | ||
188 | if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { | 188 | /* The P2P Device interface event must not be ignored |
189 | * contrary to what firmware tells us. The only way to | ||
190 | * distinguish the P2P Device is by looking at the ifidx | ||
191 | * and bssidx received. | ||
192 | */ | ||
193 | if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) && | ||
194 | (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) { | ||
189 | brcmf_dbg(EVENT, "event can be ignored\n"); | 195 | brcmf_dbg(EVENT, "event can be ignored\n"); |
190 | return; | 196 | return; |
191 | } | 197 | } |
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, | |||
210 | return; | 216 | return; |
211 | } | 217 | } |
212 | 218 | ||
213 | if (ifevent->action == BRCMF_E_IF_CHANGE) | 219 | if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) |
214 | brcmf_fws_reset_interface(ifp); | 220 | brcmf_fws_reset_interface(ifp); |
215 | 221 | ||
216 | err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); | 222 | err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); |
217 | 223 | ||
218 | if (ifevent->action == BRCMF_E_IF_DEL) { | 224 | if (ifp && ifevent->action == BRCMF_E_IF_DEL) { |
219 | brcmf_fws_del_interface(ifp); | 225 | brcmf_fws_del_interface(ifp); |
220 | brcmf_del_if(drvr, ifevent->bssidx); | 226 | brcmf_del_if(drvr, ifevent->bssidx); |
221 | } | 227 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index dd20b1862d44..cbf033f59109 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h | |||
@@ -172,6 +172,8 @@ enum brcmf_fweh_event_code { | |||
172 | #define BRCMF_E_IF_ROLE_STA 0 | 172 | #define BRCMF_E_IF_ROLE_STA 0 |
173 | #define BRCMF_E_IF_ROLE_AP 1 | 173 | #define BRCMF_E_IF_ROLE_AP 1 |
174 | #define BRCMF_E_IF_ROLE_WDS 2 | 174 | #define BRCMF_E_IF_ROLE_WDS 2 |
175 | #define BRCMF_E_IF_ROLE_P2P_GO 3 | ||
176 | #define BRCMF_E_IF_ROLE_P2P_CLIENT 4 | ||
175 | 177 | ||
176 | /** | 178 | /** |
177 | * definitions for event packet validation. | 179 | * definitions for event packet validation. |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h index f901ae52bf2b..77a51b8c1e12 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef BRCMFMAC_MSGBUF_H | 15 | #ifndef BRCMFMAC_MSGBUF_H |
16 | #define BRCMFMAC_MSGBUF_H | 16 | #define BRCMFMAC_MSGBUF_H |
17 | 17 | ||
18 | #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF | ||
18 | 19 | ||
19 | #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 | 20 | #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 |
20 | #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 | 21 | #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 |
@@ -32,9 +33,15 @@ | |||
32 | 33 | ||
33 | 34 | ||
34 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev); | 35 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev); |
36 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); | ||
35 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); | 37 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); |
36 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); | 38 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); |
37 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); | 39 | #else |
38 | 40 | static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) | |
41 | { | ||
42 | return 0; | ||
43 | } | ||
44 | static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {} | ||
45 | #endif | ||
39 | 46 | ||
40 | #endif /* BRCMFMAC_MSGBUF_H */ | 47 | #endif /* BRCMFMAC_MSGBUF_H */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 02fe706fc9ec..16a246bfc343 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) | |||
497 | static void | 497 | static void |
498 | brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) | 498 | brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) |
499 | { | 499 | { |
500 | struct net_device *ndev = wdev->netdev; | 500 | struct brcmf_cfg80211_vif *vif; |
501 | struct brcmf_if *ifp = netdev_priv(ndev); | 501 | struct brcmf_if *ifp; |
502 | |||
503 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | ||
504 | ifp = vif->ifp; | ||
502 | 505 | ||
503 | if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || | 506 | if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || |
504 | (wdev->iftype == NL80211_IFTYPE_AP) || | 507 | (wdev->iftype == NL80211_IFTYPE_AP) || |
@@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg, | |||
4918 | struct brcmu_chan ch; | 4921 | struct brcmu_chan ch; |
4919 | int i; | 4922 | int i; |
4920 | 4923 | ||
4921 | for (i = 0; i <= total; i++) { | 4924 | for (i = 0; i < total; i++) { |
4922 | ch.chspec = (u16)le32_to_cpu(chlist->element[i]); | 4925 | ch.chspec = (u16)le32_to_cpu(chlist->element[i]); |
4923 | cfg->d11inf.decchspec(&ch); | 4926 | cfg->d11inf.decchspec(&ch); |
4924 | 4927 | ||
@@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) | |||
5143 | 5146 | ||
5144 | ch.band = BRCMU_CHAN_BAND_2G; | 5147 | ch.band = BRCMU_CHAN_BAND_2G; |
5145 | ch.bw = BRCMU_CHAN_BW_40; | 5148 | ch.bw = BRCMU_CHAN_BW_40; |
5149 | ch.sb = BRCMU_CHAN_SB_NONE; | ||
5146 | ch.chnum = 0; | 5150 | ch.chnum = 0; |
5147 | cfg->d11inf.encchspec(&ch); | 5151 | cfg->d11inf.encchspec(&ch); |
5148 | 5152 | ||
@@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) | |||
5176 | 5180 | ||
5177 | brcmf_update_bw40_channel_flag(&band->channels[j], &ch); | 5181 | brcmf_update_bw40_channel_flag(&band->channels[j], &ch); |
5178 | } | 5182 | } |
5183 | kfree(pbuf); | ||
5179 | } | 5184 | } |
5180 | return err; | 5185 | return err; |
5181 | } | 5186 | } |
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index 760c45c34ef3..1513dbc79c14 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "commands.h" | 40 | #include "commands.h" |
41 | #include "power.h" | 41 | #include "power.h" |
42 | 42 | ||
43 | static bool force_cam; | 43 | static bool force_cam = true; |
44 | module_param(force_cam, bool, 0644); | 44 | module_param(force_cam, bool, 0644); |
45 | MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); | 45 | MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); |
46 | 46 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index d67a37a786aa..d53adc245497 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
@@ -83,6 +83,8 @@ | |||
83 | #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ | 83 | #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ |
84 | #define IWL3160_NVM_VERSION 0x709 | 84 | #define IWL3160_NVM_VERSION 0x709 |
85 | #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ | 85 | #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ |
86 | #define IWL3165_NVM_VERSION 0x709 | ||
87 | #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ | ||
86 | #define IWL7265_NVM_VERSION 0x0a1d | 88 | #define IWL7265_NVM_VERSION 0x0a1d |
87 | #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ | 89 | #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ |
88 | 90 | ||
@@ -92,6 +94,9 @@ | |||
92 | #define IWL3160_FW_PRE "iwlwifi-3160-" | 94 | #define IWL3160_FW_PRE "iwlwifi-3160-" |
93 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" | 95 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" |
94 | 96 | ||
97 | #define IWL3165_FW_PRE "iwlwifi-3165-" | ||
98 | #define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" | ||
99 | |||
95 | #define IWL7265_FW_PRE "iwlwifi-7265-" | 100 | #define IWL7265_FW_PRE "iwlwifi-7265-" |
96 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" | 101 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" |
97 | 102 | ||
@@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { | |||
213 | {0}, | 218 | {0}, |
214 | }; | 219 | }; |
215 | 220 | ||
221 | const struct iwl_cfg iwl3165_2ac_cfg = { | ||
222 | .name = "Intel(R) Dual Band Wireless AC 3165", | ||
223 | .fw_name_pre = IWL3165_FW_PRE, | ||
224 | IWL_DEVICE_7000, | ||
225 | .ht_params = &iwl7000_ht_params, | ||
226 | .nvm_ver = IWL3165_NVM_VERSION, | ||
227 | .nvm_calib_ver = IWL3165_TX_POWER_VERSION, | ||
228 | .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, | ||
229 | }; | ||
230 | |||
216 | const struct iwl_cfg iwl7265_2ac_cfg = { | 231 | const struct iwl_cfg iwl7265_2ac_cfg = { |
217 | .name = "Intel(R) Dual Band Wireless AC 7265", | 232 | .name = "Intel(R) Dual Band Wireless AC 7265", |
218 | .fw_name_pre = IWL7265_FW_PRE, | 233 | .fw_name_pre = IWL7265_FW_PRE, |
@@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = { | |||
245 | 260 | ||
246 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 261 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
247 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | 262 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); |
263 | MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | ||
248 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 264 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 8da596db9abe..3d7cc37420ae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h | |||
@@ -120,6 +120,8 @@ enum iwl_led_mode { | |||
120 | #define IWL_LONG_WD_TIMEOUT 10000 | 120 | #define IWL_LONG_WD_TIMEOUT 10000 |
121 | #define IWL_MAX_WD_TIMEOUT 120000 | 121 | #define IWL_MAX_WD_TIMEOUT 120000 |
122 | 122 | ||
123 | #define IWL_DEFAULT_MAX_TX_POWER 22 | ||
124 | |||
123 | /* Antenna presence definitions */ | 125 | /* Antenna presence definitions */ |
124 | #define ANT_NONE 0x0 | 126 | #define ANT_NONE 0x0 |
125 | #define ANT_A BIT(0) | 127 | #define ANT_A BIT(0) |
@@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg; | |||
335 | extern const struct iwl_cfg iwl3160_2ac_cfg; | 337 | extern const struct iwl_cfg iwl3160_2ac_cfg; |
336 | extern const struct iwl_cfg iwl3160_2n_cfg; | 338 | extern const struct iwl_cfg iwl3160_2n_cfg; |
337 | extern const struct iwl_cfg iwl3160_n_cfg; | 339 | extern const struct iwl_cfg iwl3160_n_cfg; |
340 | extern const struct iwl_cfg iwl3165_2ac_cfg; | ||
338 | extern const struct iwl_cfg iwl7265_2ac_cfg; | 341 | extern const struct iwl_cfg iwl7265_2ac_cfg; |
339 | extern const struct iwl_cfg iwl7265_2n_cfg; | 342 | extern const struct iwl_cfg iwl7265_2n_cfg; |
340 | extern const struct iwl_cfg iwl7265_n_cfg; | 343 | extern const struct iwl_cfg iwl7265_n_cfg; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 018af2957d3b..354255f08754 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | |||
@@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = { | |||
146 | #define LAST_2GHZ_HT_PLUS 9 | 146 | #define LAST_2GHZ_HT_PLUS 9 |
147 | #define LAST_5GHZ_HT 161 | 147 | #define LAST_5GHZ_HT 161 |
148 | 148 | ||
149 | #define DEFAULT_MAX_TX_POWER 16 | ||
150 | |||
151 | /* rate data (static) */ | 149 | /* rate data (static) */ |
152 | static struct ieee80211_rate iwl_cfg80211_rates[] = { | 150 | static struct ieee80211_rate iwl_cfg80211_rates[] = { |
153 | { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, | 151 | { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, |
@@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
295 | * Default value - highest tx power value. max_power | 293 | * Default value - highest tx power value. max_power |
296 | * is not used in mvm, and is used for backwards compatibility | 294 | * is not used in mvm, and is used for backwards compatibility |
297 | */ | 295 | */ |
298 | channel->max_power = DEFAULT_MAX_TX_POWER; | 296 | channel->max_power = IWL_DEFAULT_MAX_TX_POWER; |
299 | is_5ghz = channel->band == IEEE80211_BAND_5GHZ; | 297 | is_5ghz = channel->band == IEEE80211_BAND_5GHZ; |
300 | IWL_DEBUG_EEPROM(dev, | 298 | IWL_DEBUG_EEPROM(dev, |
301 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", | 299 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 2291bbcaaeab..ce71625f497f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c | |||
@@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) | |||
585 | lockdep_assert_held(&mvm->mutex); | 585 | lockdep_assert_held(&mvm->mutex); |
586 | 586 | ||
587 | if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { | 587 | if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { |
588 | u32 mode; | ||
589 | |||
590 | switch (mvm->bt_force_ant_mode) { | 588 | switch (mvm->bt_force_ant_mode) { |
591 | case BT_FORCE_ANT_BT: | 589 | case BT_FORCE_ANT_BT: |
592 | mode = BT_COEX_BT; | 590 | mode = BT_COEX_BT; |
@@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
756 | struct iwl_bt_iterator_data *data = _data; | 754 | struct iwl_bt_iterator_data *data = _data; |
757 | struct iwl_mvm *mvm = data->mvm; | 755 | struct iwl_mvm *mvm = data->mvm; |
758 | struct ieee80211_chanctx_conf *chanctx_conf; | 756 | struct ieee80211_chanctx_conf *chanctx_conf; |
759 | enum ieee80211_smps_mode smps_mode; | 757 | /* default smps_mode is AUTOMATIC - only used for client modes */ |
758 | enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; | ||
760 | u32 bt_activity_grading; | 759 | u32 bt_activity_grading; |
761 | int ave_rssi; | 760 | int ave_rssi; |
762 | 761 | ||
@@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
764 | 763 | ||
765 | switch (vif->type) { | 764 | switch (vif->type) { |
766 | case NL80211_IFTYPE_STATION: | 765 | case NL80211_IFTYPE_STATION: |
767 | /* default smps_mode for BSS / P2P client is AUTOMATIC */ | ||
768 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | ||
769 | break; | 766 | break; |
770 | case NL80211_IFTYPE_AP: | 767 | case NL80211_IFTYPE_AP: |
771 | if (!mvmvif->ap_ibss_active) | 768 | if (!mvmvif->ap_ibss_active) |
@@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
797 | else if (bt_activity_grading >= BT_LOW_TRAFFIC) | 794 | else if (bt_activity_grading >= BT_LOW_TRAFFIC) |
798 | smps_mode = IEEE80211_SMPS_DYNAMIC; | 795 | smps_mode = IEEE80211_SMPS_DYNAMIC; |
799 | 796 | ||
800 | /* relax SMPS contraints for next association */ | 797 | /* relax SMPS constraints for next association */ |
801 | if (!vif->bss_conf.assoc) | 798 | if (!vif->bss_conf.assoc) |
802 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 799 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
803 | 800 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 2e90ff795c13..87e517bffedc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c | |||
@@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, | |||
74 | 74 | ||
75 | switch (param) { | 75 | switch (param) { |
76 | case MVM_DEBUGFS_PM_KEEP_ALIVE: { | 76 | case MVM_DEBUGFS_PM_KEEP_ALIVE: { |
77 | struct ieee80211_hw *hw = mvm->hw; | 77 | int dtimper = vif->bss_conf.dtim_period ?: 1; |
78 | int dtimper = hw->conf.ps_dtim_period ?: 1; | ||
79 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; | 78 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; |
80 | 79 | ||
81 | IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); | 80 | IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 95f5b3274efb..9a922f3bd16b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h | |||
@@ -1563,14 +1563,14 @@ enum iwl_sf_scenario { | |||
1563 | 1563 | ||
1564 | /** | 1564 | /** |
1565 | * Smart Fifo configuration command. | 1565 | * Smart Fifo configuration command. |
1566 | * @state: smart fifo state, types listed in iwl_sf_sate. | 1566 | * @state: smart fifo state, types listed in enum %iwl_sf_sate. |
1567 | * @watermark: Minimum allowed availabe free space in RXF for transient state. | 1567 | * @watermark: Minimum allowed availabe free space in RXF for transient state. |
1568 | * @long_delay_timeouts: aging and idle timer values for each scenario | 1568 | * @long_delay_timeouts: aging and idle timer values for each scenario |
1569 | * in long delay state. | 1569 | * in long delay state. |
1570 | * @full_on_timeouts: timer values for each scenario in full on state. | 1570 | * @full_on_timeouts: timer values for each scenario in full on state. |
1571 | */ | 1571 | */ |
1572 | struct iwl_sf_cfg_cmd { | 1572 | struct iwl_sf_cfg_cmd { |
1573 | enum iwl_sf_state state; | 1573 | __le32 state; |
1574 | __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; | 1574 | __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; |
1575 | __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; | 1575 | __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; |
1576 | __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; | 1576 | __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 0e523e28cabf..8242e689ddb1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | |||
@@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, | |||
721 | !force_assoc_off) { | 721 | !force_assoc_off) { |
722 | u32 dtim_offs; | 722 | u32 dtim_offs; |
723 | 723 | ||
724 | /* Allow beacons to pass through as long as we are not | ||
725 | * associated, or we do not have dtim period information. | ||
726 | */ | ||
727 | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); | ||
728 | |||
729 | /* | 724 | /* |
730 | * The DTIM count counts down, so when it is N that means N | 725 | * The DTIM count counts down, so when it is N that means N |
731 | * more beacon intervals happen until the DTIM TBTT. Therefore | 726 | * more beacon intervals happen until the DTIM TBTT. Therefore |
@@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, | |||
759 | ctxt_sta->is_assoc = cpu_to_le32(1); | 754 | ctxt_sta->is_assoc = cpu_to_le32(1); |
760 | } else { | 755 | } else { |
761 | ctxt_sta->is_assoc = cpu_to_le32(0); | 756 | ctxt_sta->is_assoc = cpu_to_le32(0); |
757 | |||
758 | /* Allow beacons to pass through as long as we are not | ||
759 | * associated, or we do not have dtim period information. | ||
760 | */ | ||
761 | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); | ||
762 | } | 762 | } |
763 | 763 | ||
764 | ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); | 764 | ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 7c8796584c25..cdc272d776e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
396 | else | 396 | else |
397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | 397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; |
398 | 398 | ||
399 | /* TODO: enable that only for firmwares that don't crash */ | 399 | if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) { |
400 | /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */ | 400 | hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; |
401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; | 401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; |
402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; | 402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; |
403 | /* we create the 802.11 header and zero length SSID IE. */ | 403 | /* we create the 802.11 header and zero length SSID IE. */ |
404 | hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; | 404 | hw->wiphy->max_sched_scan_ie_len = |
405 | SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; | ||
406 | } | ||
405 | 407 | ||
406 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | | 408 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | |
407 | NL80211_FEATURE_LOW_PRIORITY_SCAN | | 409 | NL80211_FEATURE_LOW_PRIORITY_SCAN | |
@@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
1524 | */ | 1526 | */ |
1525 | iwl_mvm_remove_time_event(mvm, mvmvif, | 1527 | iwl_mvm_remove_time_event(mvm, mvmvif, |
1526 | &mvmvif->time_event_data); | 1528 | &mvmvif->time_event_data); |
1527 | } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | | ||
1528 | BSS_CHANGED_QOS)) { | ||
1529 | ret = iwl_mvm_power_update_mac(mvm); | ||
1530 | if (ret) | ||
1531 | IWL_ERR(mvm, "failed to update power mode\n"); | ||
1532 | } | 1529 | } |
1533 | 1530 | ||
1534 | if (changes & BSS_CHANGED_BEACON_INFO) { | 1531 | if (changes & BSS_CHANGED_BEACON_INFO) { |
@@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
1536 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); | 1533 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); |
1537 | } | 1534 | } |
1538 | 1535 | ||
1536 | if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { | ||
1537 | ret = iwl_mvm_power_update_mac(mvm); | ||
1538 | if (ret) | ||
1539 | IWL_ERR(mvm, "failed to update power mode\n"); | ||
1540 | } | ||
1541 | |||
1539 | if (changes & BSS_CHANGED_TXPOWER) { | 1542 | if (changes & BSS_CHANGED_TXPOWER) { |
1540 | IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", | 1543 | IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", |
1541 | bss_conf->txpower); | 1544 | bss_conf->txpower); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2b2d10800a55..d9769a23c68b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c | |||
@@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, | |||
281 | struct ieee80211_vif *vif, | 281 | struct ieee80211_vif *vif, |
282 | struct iwl_mac_power_cmd *cmd) | 282 | struct iwl_mac_power_cmd *cmd) |
283 | { | 283 | { |
284 | struct ieee80211_hw *hw = mvm->hw; | ||
285 | struct ieee80211_chanctx_conf *chanctx_conf; | 284 | struct ieee80211_chanctx_conf *chanctx_conf; |
286 | struct ieee80211_channel *chan; | 285 | struct ieee80211_channel *chan; |
287 | int dtimper, dtimper_msec; | 286 | int dtimper, dtimper_msec; |
@@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, | |||
292 | 291 | ||
293 | cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, | 292 | cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, |
294 | mvmvif->color)); | 293 | mvmvif->color)); |
295 | dtimper = hw->conf.ps_dtim_period ?: 1; | 294 | dtimper = vif->bss_conf.dtim_period; |
296 | 295 | ||
297 | /* | 296 | /* |
298 | * Regardless of power management state the driver must set | 297 | * Regardless of power management state the driver must set |
@@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, | |||
885 | iwl_mvm_power_build_cmd(mvm, vif, &cmd); | 884 | iwl_mvm_power_build_cmd(mvm, vif, &cmd); |
886 | if (enable) { | 885 | if (enable) { |
887 | /* configure skip over dtim up to 300 msec */ | 886 | /* configure skip over dtim up to 300 msec */ |
888 | int dtimper = mvm->hw->conf.ps_dtim_period ?: 1; | 887 | int dtimper = vif->bss_conf.dtim_period ?: 1; |
889 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; | 888 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; |
890 | 889 | ||
891 | if (WARN_ON(!dtimper_msec)) | 890 | if (WARN_ON(!dtimper_msec)) |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4b98987fc413..bf5cd8c8b0f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c | |||
@@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, | |||
149 | le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); | 149 | le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); |
150 | energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> | 150 | energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> |
151 | IWL_RX_INFO_ENERGY_ANT_A_POS; | 151 | IWL_RX_INFO_ENERGY_ANT_A_POS; |
152 | energy_a = energy_a ? -energy_a : -256; | 152 | energy_a = energy_a ? -energy_a : S8_MIN; |
153 | energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> | 153 | energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> |
154 | IWL_RX_INFO_ENERGY_ANT_B_POS; | 154 | IWL_RX_INFO_ENERGY_ANT_B_POS; |
155 | energy_b = energy_b ? -energy_b : -256; | 155 | energy_b = energy_b ? -energy_b : S8_MIN; |
156 | energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> | 156 | energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> |
157 | IWL_RX_INFO_ENERGY_ANT_C_POS; | 157 | IWL_RX_INFO_ENERGY_ANT_C_POS; |
158 | energy_c = energy_c ? -energy_c : -256; | 158 | energy_c = energy_c ? -energy_c : S8_MIN; |
159 | max_energy = max(energy_a, energy_b); | 159 | max_energy = max(energy_a, energy_b); |
160 | max_energy = max(max_energy, energy_c); | 160 | max_energy = max(max_energy, energy_c); |
161 | 161 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index 7edfd15efc9d..e843b67f2201 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c | |||
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, | |||
172 | enum iwl_sf_state new_state) | 172 | enum iwl_sf_state new_state) |
173 | { | 173 | { |
174 | struct iwl_sf_cfg_cmd sf_cmd = { | 174 | struct iwl_sf_cfg_cmd sf_cmd = { |
175 | .state = new_state, | 175 | .state = cpu_to_le32(new_state), |
176 | }; | 176 | }; |
177 | struct ieee80211_sta *sta; | 177 | struct ieee80211_sta *sta; |
178 | int ret = 0; | 178 | int ret = 0; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index dbc870713882..9ee410bf6da2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, | |||
168 | 168 | ||
169 | /* | 169 | /* |
170 | * for data packets, rate info comes from the table inside the fw. This | 170 | * for data packets, rate info comes from the table inside the fw. This |
171 | * table is controlled by LINK_QUALITY commands | 171 | * table is controlled by LINK_QUALITY commands. Exclude ctrl port |
172 | * frames like EAPOLs which should be treated as mgmt frames. This | ||
173 | * avoids them being sent initially in high rates which increases the | ||
174 | * chances for completion of the 4-Way handshake. | ||
172 | */ | 175 | */ |
173 | 176 | ||
174 | if (ieee80211_is_data(fc) && sta) { | 177 | if (ieee80211_is_data(fc) && sta && |
178 | !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) { | ||
175 | tx_cmd->initial_rate_index = 0; | 179 | tx_cmd->initial_rate_index = 0; |
176 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); | 180 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); |
177 | return; | 181 | return; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f0e722ced080..073a68b97a72 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
352 | {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, | 352 | {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, |
353 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, | 353 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, |
354 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, | 354 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, |
355 | {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, | ||
356 | {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, | ||
355 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, | 357 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, |
356 | {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, | 358 | {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, |
357 | {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, | 359 | {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, |
358 | {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, |
359 | 361 | ||
362 | /* 3165 Series */ | ||
363 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | ||
364 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, | ||
365 | |||
360 | /* 7265 Series */ | 366 | /* 7265 Series */ |
361 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 367 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
362 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, |
@@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
378 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, | 384 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, |
379 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, | 385 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, |
380 | {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, | 386 | {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, |
387 | {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, | ||
381 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, | 388 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, |
382 | {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, | 389 | {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, |
383 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, | 390 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, |
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index f868333271aa..963a4a5dc88e 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c | |||
@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, | |||
501 | targets->sens_res = | 501 | targets->sens_res = |
502 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); | 502 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); |
503 | targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; | 503 | targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; |
504 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], | ||
505 | skb->data[MICROREAD_EMCF_A_LEN]); | ||
506 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; | 504 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; |
505 | if (targets->nfcid1_len > sizeof(targets->nfcid1)) { | ||
506 | r = -EINVAL; | ||
507 | goto exit_free; | ||
508 | } | ||
509 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], | ||
510 | targets->nfcid1_len); | ||
507 | break; | 511 | break; |
508 | case MICROREAD_GATE_ID_MREAD_ISO_A_3: | 512 | case MICROREAD_GATE_ID_MREAD_ISO_A_3: |
509 | targets->supported_protocols = | 513 | targets->supported_protocols = |
@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, | |||
511 | targets->sens_res = | 515 | targets->sens_res = |
512 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); | 516 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); |
513 | targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; | 517 | targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; |
514 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], | ||
515 | skb->data[MICROREAD_EMCF_A3_LEN]); | ||
516 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; | 518 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; |
519 | if (targets->nfcid1_len > sizeof(targets->nfcid1)) { | ||
520 | r = -EINVAL; | ||
521 | goto exit_free; | ||
522 | } | ||
523 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], | ||
524 | targets->nfcid1_len); | ||
517 | break; | 525 | break; |
518 | case MICROREAD_GATE_ID_MREAD_ISO_B: | 526 | case MICROREAD_GATE_ID_MREAD_ISO_B: |
519 | targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; | 527 | targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; |
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile index db7a38ae05f7..7d688f97aa27 100644 --- a/drivers/nfc/st21nfca/Makefile +++ b/drivers/nfc/st21nfca/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for ST21NFCA HCI based NFC driver | 2 | # Makefile for ST21NFCA HCI based NFC driver |
3 | # | 3 | # |
4 | 4 | ||
5 | st21nfca_i2c-objs = i2c.o | 5 | st21nfca_hci-objs = st21nfca.o st21nfca_dep.o |
6 | obj-$(CONFIG_NFC_ST21NFCA) += st21nfca_hci.o | ||
6 | 7 | ||
7 | obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o st21nfca_dep.o | 8 | st21nfca_i2c-objs = i2c.o |
8 | obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o | 9 | obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o |
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile index 13d9f03b2fea..f4d835dd15f2 100644 --- a/drivers/nfc/st21nfcb/Makefile +++ b/drivers/nfc/st21nfcb/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for ST21NFCB NCI based NFC driver | 2 | # Makefile for ST21NFCB NCI based NFC driver |
3 | # | 3 | # |
4 | 4 | ||
5 | st21nfcb_i2c-objs = i2c.o | 5 | st21nfcb_nci-objs = ndlc.o st21nfcb.o |
6 | obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o | ||
6 | 7 | ||
7 | obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb.o ndlc.o | 8 | st21nfcb_i2c-objs = i2c.o |
8 | obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o | 9 | obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o |
diff --git a/drivers/of/base.c b/drivers/of/base.c index d8574adf0d62..293ed4b687ba 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -138,6 +138,9 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp) | |||
138 | /* Important: Don't leak passwords */ | 138 | /* Important: Don't leak passwords */ |
139 | bool secure = strncmp(pp->name, "security-", 9) == 0; | 139 | bool secure = strncmp(pp->name, "security-", 9) == 0; |
140 | 140 | ||
141 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
142 | return 0; | ||
143 | |||
141 | if (!of_kset || !of_node_is_attached(np)) | 144 | if (!of_kset || !of_node_is_attached(np)) |
142 | return 0; | 145 | return 0; |
143 | 146 | ||
@@ -158,6 +161,9 @@ int __of_attach_node_sysfs(struct device_node *np) | |||
158 | struct property *pp; | 161 | struct property *pp; |
159 | int rc; | 162 | int rc; |
160 | 163 | ||
164 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
165 | return 0; | ||
166 | |||
161 | if (!of_kset) | 167 | if (!of_kset) |
162 | return 0; | 168 | return 0; |
163 | 169 | ||
@@ -1713,6 +1719,9 @@ int __of_remove_property(struct device_node *np, struct property *prop) | |||
1713 | 1719 | ||
1714 | void __of_remove_property_sysfs(struct device_node *np, struct property *prop) | 1720 | void __of_remove_property_sysfs(struct device_node *np, struct property *prop) |
1715 | { | 1721 | { |
1722 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
1723 | return; | ||
1724 | |||
1716 | /* at early boot, bail here and defer setup to of_init() */ | 1725 | /* at early boot, bail here and defer setup to of_init() */ |
1717 | if (of_kset && of_node_is_attached(np)) | 1726 | if (of_kset && of_node_is_attached(np)) |
1718 | sysfs_remove_bin_file(&np->kobj, &prop->attr); | 1727 | sysfs_remove_bin_file(&np->kobj, &prop->attr); |
@@ -1777,6 +1786,9 @@ int __of_update_property(struct device_node *np, struct property *newprop, | |||
1777 | void __of_update_property_sysfs(struct device_node *np, struct property *newprop, | 1786 | void __of_update_property_sysfs(struct device_node *np, struct property *newprop, |
1778 | struct property *oldprop) | 1787 | struct property *oldprop) |
1779 | { | 1788 | { |
1789 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
1790 | return; | ||
1791 | |||
1780 | /* At early boot, bail out and defer setup to of_init() */ | 1792 | /* At early boot, bail out and defer setup to of_init() */ |
1781 | if (!of_kset) | 1793 | if (!of_kset) |
1782 | return; | 1794 | return; |
@@ -1847,6 +1859,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
1847 | { | 1859 | { |
1848 | struct property *pp; | 1860 | struct property *pp; |
1849 | 1861 | ||
1862 | of_aliases = of_find_node_by_path("/aliases"); | ||
1850 | of_chosen = of_find_node_by_path("/chosen"); | 1863 | of_chosen = of_find_node_by_path("/chosen"); |
1851 | if (of_chosen == NULL) | 1864 | if (of_chosen == NULL) |
1852 | of_chosen = of_find_node_by_path("/chosen@0"); | 1865 | of_chosen = of_find_node_by_path("/chosen@0"); |
@@ -1862,7 +1875,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
1862 | of_stdout = of_find_node_by_path(name); | 1875 | of_stdout = of_find_node_by_path(name); |
1863 | } | 1876 | } |
1864 | 1877 | ||
1865 | of_aliases = of_find_node_by_path("/aliases"); | ||
1866 | if (!of_aliases) | 1878 | if (!of_aliases) |
1867 | return; | 1879 | return; |
1868 | 1880 | ||
@@ -1986,7 +1998,7 @@ bool of_console_check(struct device_node *dn, char *name, int index) | |||
1986 | { | 1998 | { |
1987 | if (!dn || dn != of_stdout || console_set_on_cmdline) | 1999 | if (!dn || dn != of_stdout || console_set_on_cmdline) |
1988 | return false; | 2000 | return false; |
1989 | return add_preferred_console(name, index, NULL); | 2001 | return !add_preferred_console(name, index, NULL); |
1990 | } | 2002 | } |
1991 | EXPORT_SYMBOL_GPL(of_console_check); | 2003 | EXPORT_SYMBOL_GPL(of_console_check); |
1992 | 2004 | ||
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 54fecc49a1fe..f297891d8529 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c | |||
@@ -45,6 +45,9 @@ void __of_detach_node_sysfs(struct device_node *np) | |||
45 | { | 45 | { |
46 | struct property *pp; | 46 | struct property *pp; |
47 | 47 | ||
48 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
49 | return; | ||
50 | |||
48 | BUG_ON(!of_node_is_initialized(np)); | 51 | BUG_ON(!of_node_is_initialized(np)); |
49 | if (!of_kset) | 52 | if (!of_kset) |
50 | return; | 53 | return; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 79cb8313c7d8..d1ffca8b34ea 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -928,7 +928,11 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, | |||
928 | void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) | 928 | void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) |
929 | { | 929 | { |
930 | const u64 phys_offset = __pa(PAGE_OFFSET); | 930 | const u64 phys_offset = __pa(PAGE_OFFSET); |
931 | base &= PAGE_MASK; | 931 | |
932 | if (!PAGE_ALIGNED(base)) { | ||
933 | size -= PAGE_SIZE - (base & ~PAGE_MASK); | ||
934 | base = PAGE_ALIGN(base); | ||
935 | } | ||
932 | size &= PAGE_MASK; | 936 | size &= PAGE_MASK; |
933 | 937 | ||
934 | if (base > MAX_PHYS_ADDR) { | 938 | if (base > MAX_PHYS_ADDR) { |
@@ -937,10 +941,10 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) | |||
937 | return; | 941 | return; |
938 | } | 942 | } |
939 | 943 | ||
940 | if (base + size > MAX_PHYS_ADDR) { | 944 | if (base + size - 1 > MAX_PHYS_ADDR) { |
941 | pr_warning("Ignoring memory range 0x%lx - 0x%llx\n", | 945 | pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", |
942 | ULONG_MAX, base + size); | 946 | ((u64)MAX_PHYS_ADDR) + 1, base + size); |
943 | size = MAX_PHYS_ADDR - base; | 947 | size = MAX_PHYS_ADDR - base + 1; |
944 | } | 948 | } |
945 | 949 | ||
946 | if (base + size < phys_offset) { | 950 | if (base + size < phys_offset) { |
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 0f54ab6260df..3651c3871d5b 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c | |||
@@ -278,7 +278,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun | |||
278 | { | 278 | { |
279 | struct hardware_path hwpath; | 279 | struct hardware_path hwpath; |
280 | unsigned short i; | 280 | unsigned short i; |
281 | char in[count+1], *temp; | 281 | char in[64], *temp; |
282 | struct device *dev; | 282 | struct device *dev; |
283 | int ret; | 283 | int ret; |
284 | 284 | ||
@@ -286,8 +286,9 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun | |||
286 | return -EINVAL; | 286 | return -EINVAL; |
287 | 287 | ||
288 | /* We'll use a local copy of buf */ | 288 | /* We'll use a local copy of buf */ |
289 | memset(in, 0, count+1); | 289 | count = min_t(size_t, count, sizeof(in)-1); |
290 | strncpy(in, buf, count); | 290 | strncpy(in, buf, count); |
291 | in[count] = '\0'; | ||
291 | 292 | ||
292 | /* Let's clean up the target. 0xff is a blank pattern */ | 293 | /* Let's clean up the target. 0xff is a blank pattern */ |
293 | memset(&hwpath, 0xff, sizeof(hwpath)); | 294 | memset(&hwpath, 0xff, sizeof(hwpath)); |
@@ -393,14 +394,15 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count | |||
393 | { | 394 | { |
394 | unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */ | 395 | unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */ |
395 | unsigned short i; | 396 | unsigned short i; |
396 | char in[count+1], *temp; | 397 | char in[64], *temp; |
397 | 398 | ||
398 | if (!entry || !buf || !count) | 399 | if (!entry || !buf || !count) |
399 | return -EINVAL; | 400 | return -EINVAL; |
400 | 401 | ||
401 | /* We'll use a local copy of buf */ | 402 | /* We'll use a local copy of buf */ |
402 | memset(in, 0, count+1); | 403 | count = min_t(size_t, count, sizeof(in)-1); |
403 | strncpy(in, buf, count); | 404 | strncpy(in, buf, count); |
405 | in[count] = '\0'; | ||
404 | 406 | ||
405 | /* Let's clean up the target. 0 is a blank pattern */ | 407 | /* Let's clean up the target. 0 is a blank pattern */ |
406 | memset(&layers, 0, sizeof(layers)); | 408 | memset(&layers, 0, sizeof(layers)); |
@@ -755,7 +757,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, | |||
755 | { | 757 | { |
756 | struct pdcspath_entry *pathentry; | 758 | struct pdcspath_entry *pathentry; |
757 | unsigned char flags; | 759 | unsigned char flags; |
758 | char in[count+1], *temp; | 760 | char in[8], *temp; |
759 | char c; | 761 | char c; |
760 | 762 | ||
761 | if (!capable(CAP_SYS_ADMIN)) | 763 | if (!capable(CAP_SYS_ADMIN)) |
@@ -765,8 +767,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, | |||
765 | return -EINVAL; | 767 | return -EINVAL; |
766 | 768 | ||
767 | /* We'll use a local copy of buf */ | 769 | /* We'll use a local copy of buf */ |
768 | memset(in, 0, count+1); | 770 | count = min_t(size_t, count, sizeof(in)-1); |
769 | strncpy(in, buf, count); | 771 | strncpy(in, buf, count); |
772 | in[count] = '\0'; | ||
770 | 773 | ||
771 | /* Current flags are stored in primary boot path entry */ | 774 | /* Current flags are stored in primary boot path entry */ |
772 | pathentry = &pdcspath_entry_primary; | 775 | pathentry = &pdcspath_entry_primary; |
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a042d065a0c7..8be2096c8423 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -395,7 +395,8 @@ static void __init superio_serial_init(void) | |||
395 | serial_port.iotype = UPIO_PORT; | 395 | serial_port.iotype = UPIO_PORT; |
396 | serial_port.type = PORT_16550A; | 396 | serial_port.type = PORT_16550A; |
397 | serial_port.uartclk = 115200*16; | 397 | serial_port.uartclk = 115200*16; |
398 | serial_port.fifosize = 16; | 398 | serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | |
399 | UPF_BOOT_AUTOCONF; | ||
399 | 400 | ||
400 | /* serial port #1 */ | 401 | /* serial port #1 */ |
401 | serial_port.iobase = sio_dev.sp1_base; | 402 | serial_port.iobase = sio_dev.sp1_base; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 5e01ae39ec46..2a412fa3b338 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -160,7 +160,7 @@ static void pcie_wait_cmd(struct controller *ctrl) | |||
160 | ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) | 160 | ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) |
161 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); | 161 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); |
162 | else | 162 | else |
163 | rc = pcie_poll_cmd(ctrl, timeout); | 163 | rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout)); |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * Controllers with errata like Intel CF118 don't generate | 166 | * Controllers with errata like Intel CF118 don't generate |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 82e06a86cd77..a9f9c46e5022 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
@@ -41,11 +41,17 @@ static int __init pcie_pme_setup(char *str) | |||
41 | } | 41 | } |
42 | __setup("pcie_pme=", pcie_pme_setup); | 42 | __setup("pcie_pme=", pcie_pme_setup); |
43 | 43 | ||
44 | enum pme_suspend_level { | ||
45 | PME_SUSPEND_NONE = 0, | ||
46 | PME_SUSPEND_WAKEUP, | ||
47 | PME_SUSPEND_NOIRQ, | ||
48 | }; | ||
49 | |||
44 | struct pcie_pme_service_data { | 50 | struct pcie_pme_service_data { |
45 | spinlock_t lock; | 51 | spinlock_t lock; |
46 | struct pcie_device *srv; | 52 | struct pcie_device *srv; |
47 | struct work_struct work; | 53 | struct work_struct work; |
48 | bool noirq; /* Don't enable the PME interrupt used by this service. */ | 54 | enum pme_suspend_level suspend_level; |
49 | }; | 55 | }; |
50 | 56 | ||
51 | /** | 57 | /** |
@@ -223,7 +229,7 @@ static void pcie_pme_work_fn(struct work_struct *work) | |||
223 | spin_lock_irq(&data->lock); | 229 | spin_lock_irq(&data->lock); |
224 | 230 | ||
225 | for (;;) { | 231 | for (;;) { |
226 | if (data->noirq) | 232 | if (data->suspend_level != PME_SUSPEND_NONE) |
227 | break; | 233 | break; |
228 | 234 | ||
229 | pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); | 235 | pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); |
@@ -250,7 +256,7 @@ static void pcie_pme_work_fn(struct work_struct *work) | |||
250 | spin_lock_irq(&data->lock); | 256 | spin_lock_irq(&data->lock); |
251 | } | 257 | } |
252 | 258 | ||
253 | if (!data->noirq) | 259 | if (data->suspend_level == PME_SUSPEND_NONE) |
254 | pcie_pme_interrupt_enable(port, true); | 260 | pcie_pme_interrupt_enable(port, true); |
255 | 261 | ||
256 | spin_unlock_irq(&data->lock); | 262 | spin_unlock_irq(&data->lock); |
@@ -367,6 +373,21 @@ static int pcie_pme_probe(struct pcie_device *srv) | |||
367 | return ret; | 373 | return ret; |
368 | } | 374 | } |
369 | 375 | ||
376 | static bool pcie_pme_check_wakeup(struct pci_bus *bus) | ||
377 | { | ||
378 | struct pci_dev *dev; | ||
379 | |||
380 | if (!bus) | ||
381 | return false; | ||
382 | |||
383 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
384 | if (device_may_wakeup(&dev->dev) | ||
385 | || pcie_pme_check_wakeup(dev->subordinate)) | ||
386 | return true; | ||
387 | |||
388 | return false; | ||
389 | } | ||
390 | |||
370 | /** | 391 | /** |
371 | * pcie_pme_suspend - Suspend PCIe PME service device. | 392 | * pcie_pme_suspend - Suspend PCIe PME service device. |
372 | * @srv: PCIe service device to suspend. | 393 | * @srv: PCIe service device to suspend. |
@@ -375,11 +396,26 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
375 | { | 396 | { |
376 | struct pcie_pme_service_data *data = get_service_data(srv); | 397 | struct pcie_pme_service_data *data = get_service_data(srv); |
377 | struct pci_dev *port = srv->port; | 398 | struct pci_dev *port = srv->port; |
399 | bool wakeup; | ||
378 | 400 | ||
401 | if (device_may_wakeup(&port->dev)) { | ||
402 | wakeup = true; | ||
403 | } else { | ||
404 | down_read(&pci_bus_sem); | ||
405 | wakeup = pcie_pme_check_wakeup(port->subordinate); | ||
406 | up_read(&pci_bus_sem); | ||
407 | } | ||
379 | spin_lock_irq(&data->lock); | 408 | spin_lock_irq(&data->lock); |
380 | pcie_pme_interrupt_enable(port, false); | 409 | if (wakeup) { |
381 | pcie_clear_root_pme_status(port); | 410 | enable_irq_wake(srv->irq); |
382 | data->noirq = true; | 411 | data->suspend_level = PME_SUSPEND_WAKEUP; |
412 | } else { | ||
413 | struct pci_dev *port = srv->port; | ||
414 | |||
415 | pcie_pme_interrupt_enable(port, false); | ||
416 | pcie_clear_root_pme_status(port); | ||
417 | data->suspend_level = PME_SUSPEND_NOIRQ; | ||
418 | } | ||
383 | spin_unlock_irq(&data->lock); | 419 | spin_unlock_irq(&data->lock); |
384 | 420 | ||
385 | synchronize_irq(srv->irq); | 421 | synchronize_irq(srv->irq); |
@@ -394,12 +430,17 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
394 | static int pcie_pme_resume(struct pcie_device *srv) | 430 | static int pcie_pme_resume(struct pcie_device *srv) |
395 | { | 431 | { |
396 | struct pcie_pme_service_data *data = get_service_data(srv); | 432 | struct pcie_pme_service_data *data = get_service_data(srv); |
397 | struct pci_dev *port = srv->port; | ||
398 | 433 | ||
399 | spin_lock_irq(&data->lock); | 434 | spin_lock_irq(&data->lock); |
400 | data->noirq = false; | 435 | if (data->suspend_level == PME_SUSPEND_NOIRQ) { |
401 | pcie_clear_root_pme_status(port); | 436 | struct pci_dev *port = srv->port; |
402 | pcie_pme_interrupt_enable(port, true); | 437 | |
438 | pcie_clear_root_pme_status(port); | ||
439 | pcie_pme_interrupt_enable(port, true); | ||
440 | } else { | ||
441 | disable_irq_wake(srv->irq); | ||
442 | } | ||
443 | data->suspend_level = PME_SUSPEND_NONE; | ||
403 | spin_unlock_irq(&data->lock); | 444 | spin_unlock_irq(&data->lock); |
404 | 445 | ||
405 | return 0; | 446 | return 0; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e3cf8a2e6292..4170113cde61 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -775,7 +775,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
775 | /* Check if setup is sensible at all */ | 775 | /* Check if setup is sensible at all */ |
776 | if (!pass && | 776 | if (!pass && |
777 | (primary != bus->number || secondary <= bus->number || | 777 | (primary != bus->number || secondary <= bus->number || |
778 | secondary > subordinate || subordinate > bus->busn_res.end)) { | 778 | secondary > subordinate)) { |
779 | dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", | 779 | dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", |
780 | secondary, subordinate); | 780 | secondary, subordinate); |
781 | broken = 1; | 781 | broken = 1; |
@@ -838,23 +838,18 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
838 | goto out; | 838 | goto out; |
839 | } | 839 | } |
840 | 840 | ||
841 | if (max >= bus->busn_res.end) { | ||
842 | dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n", | ||
843 | max, &bus->busn_res); | ||
844 | goto out; | ||
845 | } | ||
846 | |||
847 | /* Clear errors */ | 841 | /* Clear errors */ |
848 | pci_write_config_word(dev, PCI_STATUS, 0xffff); | 842 | pci_write_config_word(dev, PCI_STATUS, 0xffff); |
849 | 843 | ||
850 | /* The bus will already exist if we are rescanning */ | 844 | /* Prevent assigning a bus number that already exists. |
845 | * This can happen when a bridge is hot-plugged, so in | ||
846 | * this case we only re-scan this bus. */ | ||
851 | child = pci_find_bus(pci_domain_nr(bus), max+1); | 847 | child = pci_find_bus(pci_domain_nr(bus), max+1); |
852 | if (!child) { | 848 | if (!child) { |
853 | child = pci_add_new_bus(bus, dev, max+1); | 849 | child = pci_add_new_bus(bus, dev, max+1); |
854 | if (!child) | 850 | if (!child) |
855 | goto out; | 851 | goto out; |
856 | pci_bus_insert_busn_res(child, max+1, | 852 | pci_bus_insert_busn_res(child, max+1, 0xff); |
857 | bus->busn_res.end); | ||
858 | } | 853 | } |
859 | max++; | 854 | max++; |
860 | buses = (buses & 0xff000000) | 855 | buses = (buses & 0xff000000) |
@@ -913,11 +908,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
913 | /* | 908 | /* |
914 | * Set the subordinate bus number to its real value. | 909 | * Set the subordinate bus number to its real value. |
915 | */ | 910 | */ |
916 | if (max > bus->busn_res.end) { | ||
917 | dev_warn(&dev->dev, "max busn %02x is outside %pR\n", | ||
918 | max, &bus->busn_res); | ||
919 | max = bus->busn_res.end; | ||
920 | } | ||
921 | pci_bus_update_busn_res_end(child, max); | 911 | pci_bus_update_busn_res_end(child, max); |
922 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); | 912 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); |
923 | } | 913 | } |
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 87aa28c4280f..2655d4a988f3 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
@@ -1050,6 +1050,13 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = { | |||
1050 | }, | 1050 | }, |
1051 | }; | 1051 | }; |
1052 | 1052 | ||
1053 | static const struct acpi_device_id fujitsu_ids[] __used = { | ||
1054 | {ACPI_FUJITSU_HID, 0}, | ||
1055 | {ACPI_FUJITSU_HOTKEY_HID, 0}, | ||
1056 | {"", 0} | ||
1057 | }; | ||
1058 | MODULE_DEVICE_TABLE(acpi, fujitsu_ids); | ||
1059 | |||
1053 | static int __init fujitsu_init(void) | 1060 | static int __init fujitsu_init(void) |
1054 | { | 1061 | { |
1055 | int ret, result, max_brightness; | 1062 | int ret, result, max_brightness; |
@@ -1208,12 +1215,3 @@ MODULE_LICENSE("GPL"); | |||
1208 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); | 1215 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); |
1209 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); | 1216 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); |
1210 | MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); | 1217 | MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); |
1211 | |||
1212 | static struct pnp_device_id pnp_ids[] __used = { | ||
1213 | {.id = "FUJ02bf"}, | ||
1214 | {.id = "FUJ02B1"}, | ||
1215 | {.id = "FUJ02E3"}, | ||
1216 | {.id = ""} | ||
1217 | }; | ||
1218 | |||
1219 | MODULE_DEVICE_TABLE(pnp, pnp_ids); | ||
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index 8225b89de810..c384fec6d173 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c | |||
@@ -232,6 +232,7 @@ static struct platform_driver efi_rtc_driver = { | |||
232 | 232 | ||
233 | module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); | 233 | module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); |
234 | 234 | ||
235 | MODULE_ALIAS("platform:rtc-efi"); | ||
235 | MODULE_AUTHOR("dann frazier <dannf@hp.com>"); | 236 | MODULE_AUTHOR("dann frazier <dannf@hp.com>"); |
236 | MODULE_LICENSE("GPL"); | 237 | MODULE_LICENSE("GPL"); |
237 | MODULE_DESCRIPTION("EFI RTC driver"); | 238 | MODULE_DESCRIPTION("EFI RTC driver"); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 18a3358eb1d4..bd85fb4978e0 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -43,7 +43,7 @@ config SCSI_DMA | |||
43 | config SCSI_NETLINK | 43 | config SCSI_NETLINK |
44 | bool | 44 | bool |
45 | default n | 45 | default n |
46 | select NET | 46 | depends on NET |
47 | 47 | ||
48 | config SCSI_PROC_FS | 48 | config SCSI_PROC_FS |
49 | bool "legacy /proc/scsi/ support" | 49 | bool "legacy /proc/scsi/ support" |
@@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS | |||
257 | 257 | ||
258 | config SCSI_FC_ATTRS | 258 | config SCSI_FC_ATTRS |
259 | tristate "FiberChannel Transport Attributes" | 259 | tristate "FiberChannel Transport Attributes" |
260 | depends on SCSI | 260 | depends on SCSI && NET |
261 | select SCSI_NETLINK | 261 | select SCSI_NETLINK |
262 | help | 262 | help |
263 | If you wish to export transport-specific information about | 263 | If you wish to export transport-specific information about |
@@ -585,28 +585,28 @@ config HYPERV_STORAGE | |||
585 | 585 | ||
586 | config LIBFC | 586 | config LIBFC |
587 | tristate "LibFC module" | 587 | tristate "LibFC module" |
588 | select SCSI_FC_ATTRS | 588 | depends on SCSI_FC_ATTRS |
589 | select CRC32 | 589 | select CRC32 |
590 | ---help--- | 590 | ---help--- |
591 | Fibre Channel library module | 591 | Fibre Channel library module |
592 | 592 | ||
593 | config LIBFCOE | 593 | config LIBFCOE |
594 | tristate "LibFCoE module" | 594 | tristate "LibFCoE module" |
595 | select LIBFC | 595 | depends on LIBFC |
596 | ---help--- | 596 | ---help--- |
597 | Library for Fibre Channel over Ethernet module | 597 | Library for Fibre Channel over Ethernet module |
598 | 598 | ||
599 | config FCOE | 599 | config FCOE |
600 | tristate "FCoE module" | 600 | tristate "FCoE module" |
601 | depends on PCI | 601 | depends on PCI |
602 | select LIBFCOE | 602 | depends on LIBFCOE |
603 | ---help--- | 603 | ---help--- |
604 | Fibre Channel over Ethernet module | 604 | Fibre Channel over Ethernet module |
605 | 605 | ||
606 | config FCOE_FNIC | 606 | config FCOE_FNIC |
607 | tristate "Cisco FNIC Driver" | 607 | tristate "Cisco FNIC Driver" |
608 | depends on PCI && X86 | 608 | depends on PCI && X86 |
609 | select LIBFCOE | 609 | depends on LIBFCOE |
610 | help | 610 | help |
611 | This is support for the Cisco PCI-Express FCoE HBA. | 611 | This is support for the Cisco PCI-Express FCoE HBA. |
612 | 612 | ||
@@ -816,7 +816,7 @@ config SCSI_IBMVSCSI | |||
816 | config SCSI_IBMVFC | 816 | config SCSI_IBMVFC |
817 | tristate "IBM Virtual FC support" | 817 | tristate "IBM Virtual FC support" |
818 | depends on PPC_PSERIES && SCSI | 818 | depends on PPC_PSERIES && SCSI |
819 | select SCSI_FC_ATTRS | 819 | depends on SCSI_FC_ATTRS |
820 | help | 820 | help |
821 | This is the IBM POWER Virtual FC Client | 821 | This is the IBM POWER Virtual FC Client |
822 | 822 | ||
@@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig" | |||
1266 | config SCSI_LPFC | 1266 | config SCSI_LPFC |
1267 | tristate "Emulex LightPulse Fibre Channel Support" | 1267 | tristate "Emulex LightPulse Fibre Channel Support" |
1268 | depends on PCI && SCSI | 1268 | depends on PCI && SCSI |
1269 | select SCSI_FC_ATTRS | 1269 | depends on SCSI_FC_ATTRS |
1270 | select CRC_T10DIF | 1270 | select CRC_T10DIF |
1271 | help | 1271 | help |
1272 | This lpfc driver supports the Emulex LightPulse | 1272 | This lpfc driver supports the Emulex LightPulse |
@@ -1676,7 +1676,7 @@ config SCSI_SUNESP | |||
1676 | config ZFCP | 1676 | config ZFCP |
1677 | tristate "FCP host bus adapter driver for IBM eServer zSeries" | 1677 | tristate "FCP host bus adapter driver for IBM eServer zSeries" |
1678 | depends on S390 && QDIO && SCSI | 1678 | depends on S390 && QDIO && SCSI |
1679 | select SCSI_FC_ATTRS | 1679 | depends on SCSI_FC_ATTRS |
1680 | help | 1680 | help |
1681 | If you want to access SCSI devices attached to your IBM eServer | 1681 | If you want to access SCSI devices attached to your IBM eServer |
1682 | zSeries by means of Fibre Channel interfaces say Y. | 1682 | zSeries by means of Fibre Channel interfaces say Y. |
@@ -1704,7 +1704,7 @@ config SCSI_PM8001 | |||
1704 | config SCSI_BFA_FC | 1704 | config SCSI_BFA_FC |
1705 | tristate "Brocade BFA Fibre Channel Support" | 1705 | tristate "Brocade BFA Fibre Channel Support" |
1706 | depends on PCI && SCSI | 1706 | depends on PCI && SCSI |
1707 | select SCSI_FC_ATTRS | 1707 | depends on SCSI_FC_ATTRS |
1708 | help | 1708 | help |
1709 | This bfa driver supports all Brocade PCIe FC/FCOE host adapters. | 1709 | This bfa driver supports all Brocade PCIe FC/FCOE host adapters. |
1710 | 1710 | ||
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index f245d543d7b1..097882882649 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig | |||
@@ -1,11 +1,12 @@ | |||
1 | config SCSI_BNX2X_FCOE | 1 | config SCSI_BNX2X_FCOE |
2 | tristate "QLogic NetXtreme II FCoE support" | 2 | tristate "QLogic NetXtreme II FCoE support" |
3 | depends on PCI | 3 | depends on PCI |
4 | depends on (IPV6 || IPV6=n) | ||
5 | depends on LIBFC | ||
6 | depends on LIBFCOE | ||
4 | select NETDEVICES | 7 | select NETDEVICES |
5 | select ETHERNET | 8 | select ETHERNET |
6 | select NET_VENDOR_BROADCOM | 9 | select NET_VENDOR_BROADCOM |
7 | select LIBFC | ||
8 | select LIBFCOE | ||
9 | select CNIC | 10 | select CNIC |
10 | ---help--- | 11 | ---help--- |
11 | This driver supports FCoE offload for the QLogic NetXtreme II | 12 | This driver supports FCoE offload for the QLogic NetXtreme II |
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 44ce54e536e5..ba30ff86d581 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig | |||
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI | |||
2 | tristate "QLogic NetXtreme II iSCSI support" | 2 | tristate "QLogic NetXtreme II iSCSI support" |
3 | depends on NET | 3 | depends on NET |
4 | depends on PCI | 4 | depends on PCI |
5 | depends on (IPV6 || IPV6=n) | ||
5 | select SCSI_ISCSI_ATTRS | 6 | select SCSI_ISCSI_ATTRS |
6 | select NETDEVICES | 7 | select NETDEVICES |
7 | select ETHERNET | 8 | select ETHERNET |
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig index 4d03b032aa10..7c7e5085968b 100644 --- a/drivers/scsi/csiostor/Kconfig +++ b/drivers/scsi/csiostor/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | config SCSI_CHELSIO_FCOE | 1 | config SCSI_CHELSIO_FCOE |
2 | tristate "Chelsio Communications FCoE support" | 2 | tristate "Chelsio Communications FCoE support" |
3 | depends on PCI && SCSI | 3 | depends on PCI && SCSI |
4 | select SCSI_FC_ATTRS | 4 | depends on SCSI_FC_ATTRS |
5 | select FW_LOADER | 5 | select FW_LOADER |
6 | help | 6 | help |
7 | This driver supports FCoE Offload functionality over | 7 | This driver supports FCoE Offload functionality over |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 79788a12712d..02e69e7ee4a3 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
@@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this, | |||
1647 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) | 1647 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) |
1648 | event_dev = vlan_dev_real_dev(event_dev); | 1648 | event_dev = vlan_dev_real_dev(event_dev); |
1649 | 1649 | ||
1650 | cdev = cxgbi_device_find_by_netdev(event_dev, NULL); | 1650 | cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL); |
1651 | 1651 | ||
1652 | if (!cdev) | 1652 | if (!cdev) |
1653 | return ret; | 1653 | return ret; |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d65df6dc106f..addd1dddce14 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); | |||
57 | static LIST_HEAD(cdev_list); | 57 | static LIST_HEAD(cdev_list); |
58 | static DEFINE_MUTEX(cdev_mutex); | 58 | static DEFINE_MUTEX(cdev_mutex); |
59 | 59 | ||
60 | static LIST_HEAD(cdev_rcu_list); | ||
61 | static DEFINE_SPINLOCK(cdev_rcu_lock); | ||
62 | |||
60 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, | 63 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, |
61 | unsigned int max_conn) | 64 | unsigned int max_conn) |
62 | { | 65 | { |
@@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra, | |||
142 | list_add_tail(&cdev->list_head, &cdev_list); | 145 | list_add_tail(&cdev->list_head, &cdev_list); |
143 | mutex_unlock(&cdev_mutex); | 146 | mutex_unlock(&cdev_mutex); |
144 | 147 | ||
148 | spin_lock(&cdev_rcu_lock); | ||
149 | list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); | ||
150 | spin_unlock(&cdev_rcu_lock); | ||
151 | |||
145 | log_debug(1 << CXGBI_DBG_DEV, | 152 | log_debug(1 << CXGBI_DBG_DEV, |
146 | "cdev 0x%p, p# %u.\n", cdev, nports); | 153 | "cdev 0x%p, p# %u.\n", cdev, nports); |
147 | return cdev; | 154 | return cdev; |
@@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev) | |||
153 | log_debug(1 << CXGBI_DBG_DEV, | 160 | log_debug(1 << CXGBI_DBG_DEV, |
154 | "cdev 0x%p, p# %u,%s.\n", | 161 | "cdev 0x%p, p# %u,%s.\n", |
155 | cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); | 162 | cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); |
163 | |||
156 | mutex_lock(&cdev_mutex); | 164 | mutex_lock(&cdev_mutex); |
157 | list_del(&cdev->list_head); | 165 | list_del(&cdev->list_head); |
158 | mutex_unlock(&cdev_mutex); | 166 | mutex_unlock(&cdev_mutex); |
167 | |||
168 | spin_lock(&cdev_rcu_lock); | ||
169 | list_del_rcu(&cdev->rcu_node); | ||
170 | spin_unlock(&cdev_rcu_lock); | ||
171 | synchronize_rcu(); | ||
172 | |||
159 | cxgbi_device_destroy(cdev); | 173 | cxgbi_device_destroy(cdev); |
160 | } | 174 | } |
161 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister); | 175 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister); |
@@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag) | |||
167 | mutex_lock(&cdev_mutex); | 181 | mutex_lock(&cdev_mutex); |
168 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | 182 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
169 | if ((cdev->flags & flag) == flag) { | 183 | if ((cdev->flags & flag) == flag) { |
170 | log_debug(1 << CXGBI_DBG_DEV, | 184 | mutex_unlock(&cdev_mutex); |
171 | "cdev 0x%p, p# %u,%s.\n", | 185 | cxgbi_device_unregister(cdev); |
172 | cdev, cdev->nports, cdev->nports ? | 186 | mutex_lock(&cdev_mutex); |
173 | cdev->ports[0]->name : ""); | ||
174 | list_del(&cdev->list_head); | ||
175 | cxgbi_device_destroy(cdev); | ||
176 | } | 187 | } |
177 | } | 188 | } |
178 | mutex_unlock(&cdev_mutex); | 189 | mutex_unlock(&cdev_mutex); |
@@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) | |||
191 | } | 202 | } |
192 | } | 203 | } |
193 | mutex_unlock(&cdev_mutex); | 204 | mutex_unlock(&cdev_mutex); |
205 | |||
194 | log_debug(1 << CXGBI_DBG_DEV, | 206 | log_debug(1 << CXGBI_DBG_DEV, |
195 | "lldev 0x%p, NO match found.\n", lldev); | 207 | "lldev 0x%p, NO match found.\n", lldev); |
196 | return NULL; | 208 | return NULL; |
@@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, | |||
230 | } | 242 | } |
231 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); | 243 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); |
232 | 244 | ||
245 | struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, | ||
246 | int *port) | ||
247 | { | ||
248 | struct net_device *vdev = NULL; | ||
249 | struct cxgbi_device *cdev; | ||
250 | int i; | ||
251 | |||
252 | if (ndev->priv_flags & IFF_802_1Q_VLAN) { | ||
253 | vdev = ndev; | ||
254 | ndev = vlan_dev_real_dev(ndev); | ||
255 | pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); | ||
256 | } | ||
257 | |||
258 | rcu_read_lock(); | ||
259 | list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { | ||
260 | for (i = 0; i < cdev->nports; i++) { | ||
261 | if (ndev == cdev->ports[i]) { | ||
262 | cdev->hbas[i]->vdev = vdev; | ||
263 | rcu_read_unlock(); | ||
264 | if (port) | ||
265 | *port = i; | ||
266 | return cdev; | ||
267 | } | ||
268 | } | ||
269 | } | ||
270 | rcu_read_unlock(); | ||
271 | |||
272 | log_debug(1 << CXGBI_DBG_DEV, | ||
273 | "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); | ||
274 | return NULL; | ||
275 | } | ||
276 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); | ||
277 | |||
233 | static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, | 278 | static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, |
234 | int *port) | 279 | int *port) |
235 | { | 280 | { |
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index b3e6e7541cc5..1d98fad6a0ab 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
@@ -527,6 +527,7 @@ struct cxgbi_ports_map { | |||
527 | #define CXGBI_FLAG_IPV4_SET 0x10 | 527 | #define CXGBI_FLAG_IPV4_SET 0x10 |
528 | struct cxgbi_device { | 528 | struct cxgbi_device { |
529 | struct list_head list_head; | 529 | struct list_head list_head; |
530 | struct list_head rcu_node; | ||
530 | unsigned int flags; | 531 | unsigned int flags; |
531 | struct net_device **ports; | 532 | struct net_device **ports; |
532 | void *lldev; | 533 | void *lldev; |
@@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *); | |||
709 | void cxgbi_device_unregister_all(unsigned int flag); | 710 | void cxgbi_device_unregister_all(unsigned int flag); |
710 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); | 711 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); |
711 | struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); | 712 | struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); |
713 | struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *, | ||
714 | int *); | ||
712 | int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, | 715 | int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, |
713 | struct scsi_host_template *, | 716 | struct scsi_host_template *, |
714 | struct scsi_transport_template *); | 717 | struct scsi_transport_template *); |
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 23d607218ae8..113e6c9826a1 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | config SCSI_QLA_FC | 1 | config SCSI_QLA_FC |
2 | tristate "QLogic QLA2XXX Fibre Channel Support" | 2 | tristate "QLogic QLA2XXX Fibre Channel Support" |
3 | depends on PCI && SCSI | 3 | depends on PCI && SCSI |
4 | select SCSI_FC_ATTRS | 4 | depends on SCSI_FC_ATTRS |
5 | select FW_LOADER | 5 | select FW_LOADER |
6 | ---help--- | 6 | ---help--- |
7 | This qla2xxx driver supports all QLogic Fibre Channel | 7 | This qla2xxx driver supports all QLogic Fibre Channel |
@@ -31,7 +31,7 @@ config SCSI_QLA_FC | |||
31 | config TCM_QLA2XXX | 31 | config TCM_QLA2XXX |
32 | tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" | 32 | tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" |
33 | depends on SCSI_QLA_FC && TARGET_CORE | 33 | depends on SCSI_QLA_FC && TARGET_CORE |
34 | select LIBFC | 34 | depends on LIBFC |
35 | select BTREE | 35 | select BTREE |
36 | default n | 36 | default n |
37 | ---help--- | 37 | ---help--- |
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 72f63817a1a0..fe2c2d595f59 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
@@ -75,8 +75,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = { | |||
75 | .con_ids = { NULL, }, | 75 | .con_ids = { NULL, }, |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static bool default_pm_on; | ||
79 | |||
80 | static int __init sh_pm_runtime_init(void) | 78 | static int __init sh_pm_runtime_init(void) |
81 | { | 79 | { |
82 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { | 80 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { |
@@ -96,16 +94,7 @@ static int __init sh_pm_runtime_init(void) | |||
96 | return 0; | 94 | return 0; |
97 | } | 95 | } |
98 | 96 | ||
99 | default_pm_on = true; | ||
100 | pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); | 97 | pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); |
101 | return 0; | 98 | return 0; |
102 | } | 99 | } |
103 | core_initcall(sh_pm_runtime_init); | 100 | core_initcall(sh_pm_runtime_init); |
104 | |||
105 | static int __init sh_pm_runtime_late_init(void) | ||
106 | { | ||
107 | if (default_pm_on) | ||
108 | pm_genpd_poweroff_unused(); | ||
109 | return 0; | ||
110 | } | ||
111 | late_initcall(sh_pm_runtime_late_init); | ||
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c index 447458e696a9..7e1f120f2b32 100644 --- a/drivers/soc/qcom/qcom_gsbi.c +++ b/drivers/soc/qcom/qcom_gsbi.c | |||
@@ -22,44 +22,63 @@ | |||
22 | #define GSBI_CTRL_REG 0x0000 | 22 | #define GSBI_CTRL_REG 0x0000 |
23 | #define GSBI_PROTOCOL_SHIFT 4 | 23 | #define GSBI_PROTOCOL_SHIFT 4 |
24 | 24 | ||
25 | struct gsbi_info { | ||
26 | struct clk *hclk; | ||
27 | u32 mode; | ||
28 | u32 crci; | ||
29 | }; | ||
30 | |||
25 | static int gsbi_probe(struct platform_device *pdev) | 31 | static int gsbi_probe(struct platform_device *pdev) |
26 | { | 32 | { |
27 | struct device_node *node = pdev->dev.of_node; | 33 | struct device_node *node = pdev->dev.of_node; |
28 | struct resource *res; | 34 | struct resource *res; |
29 | void __iomem *base; | 35 | void __iomem *base; |
30 | struct clk *hclk; | 36 | struct gsbi_info *gsbi; |
31 | u32 mode, crci = 0; | 37 | |
38 | gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL); | ||
39 | |||
40 | if (!gsbi) | ||
41 | return -ENOMEM; | ||
32 | 42 | ||
33 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 43 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
34 | base = devm_ioremap_resource(&pdev->dev, res); | 44 | base = devm_ioremap_resource(&pdev->dev, res); |
35 | if (IS_ERR(base)) | 45 | if (IS_ERR(base)) |
36 | return PTR_ERR(base); | 46 | return PTR_ERR(base); |
37 | 47 | ||
38 | if (of_property_read_u32(node, "qcom,mode", &mode)) { | 48 | if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) { |
39 | dev_err(&pdev->dev, "missing mode configuration\n"); | 49 | dev_err(&pdev->dev, "missing mode configuration\n"); |
40 | return -EINVAL; | 50 | return -EINVAL; |
41 | } | 51 | } |
42 | 52 | ||
43 | /* not required, so default to 0 if not present */ | 53 | /* not required, so default to 0 if not present */ |
44 | of_property_read_u32(node, "qcom,crci", &crci); | 54 | of_property_read_u32(node, "qcom,crci", &gsbi->crci); |
45 | 55 | ||
46 | dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci); | 56 | dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", |
57 | gsbi->mode, gsbi->crci); | ||
58 | gsbi->hclk = devm_clk_get(&pdev->dev, "iface"); | ||
59 | if (IS_ERR(gsbi->hclk)) | ||
60 | return PTR_ERR(gsbi->hclk); | ||
47 | 61 | ||
48 | hclk = devm_clk_get(&pdev->dev, "iface"); | 62 | clk_prepare_enable(gsbi->hclk); |
49 | if (IS_ERR(hclk)) | ||
50 | return PTR_ERR(hclk); | ||
51 | 63 | ||
52 | clk_prepare_enable(hclk); | 64 | writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci, |
53 | |||
54 | writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci, | ||
55 | base + GSBI_CTRL_REG); | 65 | base + GSBI_CTRL_REG); |
56 | 66 | ||
57 | /* make sure the gsbi control write is not reordered */ | 67 | /* make sure the gsbi control write is not reordered */ |
58 | wmb(); | 68 | wmb(); |
59 | 69 | ||
60 | clk_disable_unprepare(hclk); | 70 | platform_set_drvdata(pdev, gsbi); |
71 | |||
72 | return of_platform_populate(node, NULL, NULL, &pdev->dev); | ||
73 | } | ||
74 | |||
75 | static int gsbi_remove(struct platform_device *pdev) | ||
76 | { | ||
77 | struct gsbi_info *gsbi = platform_get_drvdata(pdev); | ||
78 | |||
79 | clk_disable_unprepare(gsbi->hclk); | ||
61 | 80 | ||
62 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | 81 | return 0; |
63 | } | 82 | } |
64 | 83 | ||
65 | static const struct of_device_id gsbi_dt_match[] = { | 84 | static const struct of_device_id gsbi_dt_match[] = { |
@@ -76,6 +95,7 @@ static struct platform_driver gsbi_driver = { | |||
76 | .of_match_table = gsbi_dt_match, | 95 | .of_match_table = gsbi_dt_match, |
77 | }, | 96 | }, |
78 | .probe = gsbi_probe, | 97 | .probe = gsbi_probe, |
98 | .remove = gsbi_remove, | ||
79 | }; | 99 | }; |
80 | 100 | ||
81 | module_platform_driver(gsbi_driver); | 101 | module_platform_driver(gsbi_driver); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ca935df80c88..3907f1493e7d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
36 | #include <linux/of_gpio.h> | 36 | #include <linux/of_gpio.h> |
37 | #include <linux/pm_runtime.h> | 37 | #include <linux/pm_runtime.h> |
38 | #include <linux/pm_domain.h> | ||
38 | #include <linux/export.h> | 39 | #include <linux/export.h> |
39 | #include <linux/sched/rt.h> | 40 | #include <linux/sched/rt.h> |
40 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
@@ -264,10 +265,12 @@ static int spi_drv_probe(struct device *dev) | |||
264 | if (ret) | 265 | if (ret) |
265 | return ret; | 266 | return ret; |
266 | 267 | ||
267 | acpi_dev_pm_attach(dev, true); | 268 | ret = dev_pm_domain_attach(dev, true); |
268 | ret = sdrv->probe(to_spi_device(dev)); | 269 | if (ret != -EPROBE_DEFER) { |
269 | if (ret) | 270 | ret = sdrv->probe(to_spi_device(dev)); |
270 | acpi_dev_pm_detach(dev, true); | 271 | if (ret) |
272 | dev_pm_domain_detach(dev, true); | ||
273 | } | ||
271 | 274 | ||
272 | return ret; | 275 | return ret; |
273 | } | 276 | } |
@@ -278,7 +281,7 @@ static int spi_drv_remove(struct device *dev) | |||
278 | int ret; | 281 | int ret; |
279 | 282 | ||
280 | ret = sdrv->remove(to_spi_device(dev)); | 283 | ret = sdrv->remove(to_spi_device(dev)); |
281 | acpi_dev_pm_detach(dev, true); | 284 | dev_pm_domain_detach(dev, true); |
282 | 285 | ||
283 | return ret; | 286 | return ret; |
284 | } | 287 | } |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3f42785f653c..9bfa7252f7f9 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -970,6 +970,13 @@ static struct scsi_host_template uas_host_template = { | |||
970 | .cmd_per_lun = 1, /* until we override it */ | 970 | .cmd_per_lun = 1, /* until we override it */ |
971 | .skip_settle_delay = 1, | 971 | .skip_settle_delay = 1, |
972 | .ordered_tag = 1, | 972 | .ordered_tag = 1, |
973 | |||
974 | /* | ||
975 | * The uas drivers expects tags not to be bigger than the maximum | ||
976 | * per-device queue depth, which is not true with the blk-mq tag | ||
977 | * allocator. | ||
978 | */ | ||
979 | .disable_blk_mq = true, | ||
973 | }; | 980 | }; |
974 | 981 | ||
975 | #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ | 982 | #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ |
diff --git a/fs/buffer.c b/fs/buffer.c index 8f05111bbb8b..3588a80854b2 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1022,7 +1022,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
1022 | bh = page_buffers(page); | 1022 | bh = page_buffers(page); |
1023 | if (bh->b_size == size) { | 1023 | if (bh->b_size == size) { |
1024 | end_block = init_page_buffers(page, bdev, | 1024 | end_block = init_page_buffers(page, bdev, |
1025 | index << sizebits, size); | 1025 | (sector_t)index << sizebits, |
1026 | size); | ||
1026 | goto done; | 1027 | goto done; |
1027 | } | 1028 | } |
1028 | if (!try_to_free_buffers(page)) | 1029 | if (!try_to_free_buffers(page)) |
@@ -1043,7 +1044,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
1043 | */ | 1044 | */ |
1044 | spin_lock(&inode->i_mapping->private_lock); | 1045 | spin_lock(&inode->i_mapping->private_lock); |
1045 | link_dev_buffers(page, bh); | 1046 | link_dev_buffers(page, bh); |
1046 | end_block = init_page_buffers(page, bdev, index << sizebits, size); | 1047 | end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, |
1048 | size); | ||
1047 | spin_unlock(&inode->i_mapping->private_lock); | 1049 | spin_unlock(&inode->i_mapping->private_lock); |
1048 | done: | 1050 | done: |
1049 | ret = (block < end_block) ? 1 : -ENXIO; | 1051 | ret = (block < end_block) ? 1 : -ENXIO; |
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c index d749731dc0ee..fbb08e97438d 100644 --- a/fs/cachefiles/bind.c +++ b/fs/cachefiles/bind.c | |||
@@ -50,18 +50,18 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) | |||
50 | cache->brun_percent < 100); | 50 | cache->brun_percent < 100); |
51 | 51 | ||
52 | if (*args) { | 52 | if (*args) { |
53 | pr_err("'bind' command doesn't take an argument"); | 53 | pr_err("'bind' command doesn't take an argument\n"); |
54 | return -EINVAL; | 54 | return -EINVAL; |
55 | } | 55 | } |
56 | 56 | ||
57 | if (!cache->rootdirname) { | 57 | if (!cache->rootdirname) { |
58 | pr_err("No cache directory specified"); | 58 | pr_err("No cache directory specified\n"); |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | } | 60 | } |
61 | 61 | ||
62 | /* don't permit already bound caches to be re-bound */ | 62 | /* don't permit already bound caches to be re-bound */ |
63 | if (test_bit(CACHEFILES_READY, &cache->flags)) { | 63 | if (test_bit(CACHEFILES_READY, &cache->flags)) { |
64 | pr_err("Cache already bound"); | 64 | pr_err("Cache already bound\n"); |
65 | return -EBUSY; | 65 | return -EBUSY; |
66 | } | 66 | } |
67 | 67 | ||
@@ -248,7 +248,7 @@ error_open_root: | |||
248 | kmem_cache_free(cachefiles_object_jar, fsdef); | 248 | kmem_cache_free(cachefiles_object_jar, fsdef); |
249 | error_root_object: | 249 | error_root_object: |
250 | cachefiles_end_secure(cache, saved_cred); | 250 | cachefiles_end_secure(cache, saved_cred); |
251 | pr_err("Failed to register: %d", ret); | 251 | pr_err("Failed to register: %d\n", ret); |
252 | return ret; | 252 | return ret; |
253 | } | 253 | } |
254 | 254 | ||
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index b078d3081d6c..ce1b115dcc28 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c | |||
@@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_poll(struct file *file, | |||
315 | static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, | 315 | static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, |
316 | char *args) | 316 | char *args) |
317 | { | 317 | { |
318 | pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%"); | 318 | pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n"); |
319 | 319 | ||
320 | return -EINVAL; | 320 | return -EINVAL; |
321 | } | 321 | } |
@@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) | |||
475 | _enter(",%s", args); | 475 | _enter(",%s", args); |
476 | 476 | ||
477 | if (!*args) { | 477 | if (!*args) { |
478 | pr_err("Empty directory specified"); | 478 | pr_err("Empty directory specified\n"); |
479 | return -EINVAL; | 479 | return -EINVAL; |
480 | } | 480 | } |
481 | 481 | ||
482 | if (cache->rootdirname) { | 482 | if (cache->rootdirname) { |
483 | pr_err("Second cache directory specified"); | 483 | pr_err("Second cache directory specified\n"); |
484 | return -EEXIST; | 484 | return -EEXIST; |
485 | } | 485 | } |
486 | 486 | ||
@@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) | |||
503 | _enter(",%s", args); | 503 | _enter(",%s", args); |
504 | 504 | ||
505 | if (!*args) { | 505 | if (!*args) { |
506 | pr_err("Empty security context specified"); | 506 | pr_err("Empty security context specified\n"); |
507 | return -EINVAL; | 507 | return -EINVAL; |
508 | } | 508 | } |
509 | 509 | ||
510 | if (cache->secctx) { | 510 | if (cache->secctx) { |
511 | pr_err("Second security context specified"); | 511 | pr_err("Second security context specified\n"); |
512 | return -EINVAL; | 512 | return -EINVAL; |
513 | } | 513 | } |
514 | 514 | ||
@@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args) | |||
531 | _enter(",%s", args); | 531 | _enter(",%s", args); |
532 | 532 | ||
533 | if (!*args) { | 533 | if (!*args) { |
534 | pr_err("Empty tag specified"); | 534 | pr_err("Empty tag specified\n"); |
535 | return -EINVAL; | 535 | return -EINVAL; |
536 | } | 536 | } |
537 | 537 | ||
@@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) | |||
562 | goto inval; | 562 | goto inval; |
563 | 563 | ||
564 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { | 564 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { |
565 | pr_err("cull applied to unready cache"); | 565 | pr_err("cull applied to unready cache\n"); |
566 | return -EIO; | 566 | return -EIO; |
567 | } | 567 | } |
568 | 568 | ||
569 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { | 569 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { |
570 | pr_err("cull applied to dead cache"); | 570 | pr_err("cull applied to dead cache\n"); |
571 | return -EIO; | 571 | return -EIO; |
572 | } | 572 | } |
573 | 573 | ||
@@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) | |||
587 | 587 | ||
588 | notdir: | 588 | notdir: |
589 | path_put(&path); | 589 | path_put(&path); |
590 | pr_err("cull command requires dirfd to be a directory"); | 590 | pr_err("cull command requires dirfd to be a directory\n"); |
591 | return -ENOTDIR; | 591 | return -ENOTDIR; |
592 | 592 | ||
593 | inval: | 593 | inval: |
594 | pr_err("cull command requires dirfd and filename"); | 594 | pr_err("cull command requires dirfd and filename\n"); |
595 | return -EINVAL; | 595 | return -EINVAL; |
596 | } | 596 | } |
597 | 597 | ||
@@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args) | |||
614 | return 0; | 614 | return 0; |
615 | 615 | ||
616 | inval: | 616 | inval: |
617 | pr_err("debug command requires mask"); | 617 | pr_err("debug command requires mask\n"); |
618 | return -EINVAL; | 618 | return -EINVAL; |
619 | } | 619 | } |
620 | 620 | ||
@@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) | |||
634 | goto inval; | 634 | goto inval; |
635 | 635 | ||
636 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { | 636 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { |
637 | pr_err("inuse applied to unready cache"); | 637 | pr_err("inuse applied to unready cache\n"); |
638 | return -EIO; | 638 | return -EIO; |
639 | } | 639 | } |
640 | 640 | ||
641 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { | 641 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { |
642 | pr_err("inuse applied to dead cache"); | 642 | pr_err("inuse applied to dead cache\n"); |
643 | return -EIO; | 643 | return -EIO; |
644 | } | 644 | } |
645 | 645 | ||
@@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) | |||
659 | 659 | ||
660 | notdir: | 660 | notdir: |
661 | path_put(&path); | 661 | path_put(&path); |
662 | pr_err("inuse command requires dirfd to be a directory"); | 662 | pr_err("inuse command requires dirfd to be a directory\n"); |
663 | return -ENOTDIR; | 663 | return -ENOTDIR; |
664 | 664 | ||
665 | inval: | 665 | inval: |
666 | pr_err("inuse command requires dirfd and filename"); | 666 | pr_err("inuse command requires dirfd and filename\n"); |
667 | return -EINVAL; | 667 | return -EINVAL; |
668 | } | 668 | } |
669 | 669 | ||
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 3d50998abf57..8c52472d2efa 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h | |||
@@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, | |||
255 | 255 | ||
256 | #define cachefiles_io_error(___cache, FMT, ...) \ | 256 | #define cachefiles_io_error(___cache, FMT, ...) \ |
257 | do { \ | 257 | do { \ |
258 | pr_err("I/O Error: " FMT, ##__VA_ARGS__); \ | 258 | pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \ |
259 | fscache_io_error(&(___cache)->cache); \ | 259 | fscache_io_error(&(___cache)->cache); \ |
260 | set_bit(CACHEFILES_DEAD, &(___cache)->flags); \ | 260 | set_bit(CACHEFILES_DEAD, &(___cache)->flags); \ |
261 | } while (0) | 261 | } while (0) |
diff --git a/fs/cachefiles/main.c b/fs/cachefiles/main.c index 180edfb45f66..711f13d8c2de 100644 --- a/fs/cachefiles/main.c +++ b/fs/cachefiles/main.c | |||
@@ -84,7 +84,7 @@ error_proc: | |||
84 | error_object_jar: | 84 | error_object_jar: |
85 | misc_deregister(&cachefiles_dev); | 85 | misc_deregister(&cachefiles_dev); |
86 | error_dev: | 86 | error_dev: |
87 | pr_err("failed to register: %d", ret); | 87 | pr_err("failed to register: %d\n", ret); |
88 | return ret; | 88 | return ret; |
89 | } | 89 | } |
90 | 90 | ||
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 5bf2b41e66d3..dad7d9542a24 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
@@ -543,7 +543,7 @@ lookup_again: | |||
543 | next, next->d_inode, next->d_inode->i_ino); | 543 | next, next->d_inode, next->d_inode->i_ino); |
544 | 544 | ||
545 | } else if (!S_ISDIR(next->d_inode->i_mode)) { | 545 | } else if (!S_ISDIR(next->d_inode->i_mode)) { |
546 | pr_err("inode %lu is not a directory", | 546 | pr_err("inode %lu is not a directory\n", |
547 | next->d_inode->i_ino); | 547 | next->d_inode->i_ino); |
548 | ret = -ENOBUFS; | 548 | ret = -ENOBUFS; |
549 | goto error; | 549 | goto error; |
@@ -574,7 +574,7 @@ lookup_again: | |||
574 | } else if (!S_ISDIR(next->d_inode->i_mode) && | 574 | } else if (!S_ISDIR(next->d_inode->i_mode) && |
575 | !S_ISREG(next->d_inode->i_mode) | 575 | !S_ISREG(next->d_inode->i_mode) |
576 | ) { | 576 | ) { |
577 | pr_err("inode %lu is not a file or directory", | 577 | pr_err("inode %lu is not a file or directory\n", |
578 | next->d_inode->i_ino); | 578 | next->d_inode->i_ino); |
579 | ret = -ENOBUFS; | 579 | ret = -ENOBUFS; |
580 | goto error; | 580 | goto error; |
@@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, | |||
768 | ASSERT(subdir->d_inode); | 768 | ASSERT(subdir->d_inode); |
769 | 769 | ||
770 | if (!S_ISDIR(subdir->d_inode->i_mode)) { | 770 | if (!S_ISDIR(subdir->d_inode->i_mode)) { |
771 | pr_err("%s is not a directory", dirname); | 771 | pr_err("%s is not a directory\n", dirname); |
772 | ret = -EIO; | 772 | ret = -EIO; |
773 | goto check_error; | 773 | goto check_error; |
774 | } | 774 | } |
@@ -779,7 +779,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, | |||
779 | !subdir->d_inode->i_op->lookup || | 779 | !subdir->d_inode->i_op->lookup || |
780 | !subdir->d_inode->i_op->mkdir || | 780 | !subdir->d_inode->i_op->mkdir || |
781 | !subdir->d_inode->i_op->create || | 781 | !subdir->d_inode->i_op->create || |
782 | !subdir->d_inode->i_op->rename || | 782 | (!subdir->d_inode->i_op->rename && |
783 | !subdir->d_inode->i_op->rename2) || | ||
783 | !subdir->d_inode->i_op->rmdir || | 784 | !subdir->d_inode->i_op->rmdir || |
784 | !subdir->d_inode->i_op->unlink) | 785 | !subdir->d_inode->i_op->unlink) |
785 | goto check_error; | 786 | goto check_error; |
@@ -795,13 +796,13 @@ check_error: | |||
795 | mkdir_error: | 796 | mkdir_error: |
796 | mutex_unlock(&dir->d_inode->i_mutex); | 797 | mutex_unlock(&dir->d_inode->i_mutex); |
797 | dput(subdir); | 798 | dput(subdir); |
798 | pr_err("mkdir %s failed with error %d", dirname, ret); | 799 | pr_err("mkdir %s failed with error %d\n", dirname, ret); |
799 | return ERR_PTR(ret); | 800 | return ERR_PTR(ret); |
800 | 801 | ||
801 | lookup_error: | 802 | lookup_error: |
802 | mutex_unlock(&dir->d_inode->i_mutex); | 803 | mutex_unlock(&dir->d_inode->i_mutex); |
803 | ret = PTR_ERR(subdir); | 804 | ret = PTR_ERR(subdir); |
804 | pr_err("Lookup %s failed with error %d", dirname, ret); | 805 | pr_err("Lookup %s failed with error %d\n", dirname, ret); |
805 | return ERR_PTR(ret); | 806 | return ERR_PTR(ret); |
806 | 807 | ||
807 | nomem_d_alloc: | 808 | nomem_d_alloc: |
@@ -891,7 +892,7 @@ lookup_error: | |||
891 | if (ret == -EIO) { | 892 | if (ret == -EIO) { |
892 | cachefiles_io_error(cache, "Lookup failed"); | 893 | cachefiles_io_error(cache, "Lookup failed"); |
893 | } else if (ret != -ENOMEM) { | 894 | } else if (ret != -ENOMEM) { |
894 | pr_err("Internal error: %d", ret); | 895 | pr_err("Internal error: %d\n", ret); |
895 | ret = -EIO; | 896 | ret = -EIO; |
896 | } | 897 | } |
897 | 898 | ||
@@ -950,7 +951,7 @@ error: | |||
950 | } | 951 | } |
951 | 952 | ||
952 | if (ret != -ENOMEM) { | 953 | if (ret != -ENOMEM) { |
953 | pr_err("Internal error: %d", ret); | 954 | pr_err("Internal error: %d\n", ret); |
954 | ret = -EIO; | 955 | ret = -EIO; |
955 | } | 956 | } |
956 | 957 | ||
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 4b1fb5ca65b8..25e745b8eb1b 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -151,7 +151,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op) | |||
151 | struct cachefiles_one_read *monitor; | 151 | struct cachefiles_one_read *monitor; |
152 | struct cachefiles_object *object; | 152 | struct cachefiles_object *object; |
153 | struct fscache_retrieval *op; | 153 | struct fscache_retrieval *op; |
154 | struct pagevec pagevec; | ||
155 | int error, max; | 154 | int error, max; |
156 | 155 | ||
157 | op = container_of(_op, struct fscache_retrieval, op); | 156 | op = container_of(_op, struct fscache_retrieval, op); |
@@ -160,8 +159,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op) | |||
160 | 159 | ||
161 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); | 160 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); |
162 | 161 | ||
163 | pagevec_init(&pagevec, 0); | ||
164 | |||
165 | max = 8; | 162 | max = 8; |
166 | spin_lock_irq(&object->work_lock); | 163 | spin_lock_irq(&object->work_lock); |
167 | 164 | ||
@@ -396,7 +393,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |||
396 | { | 393 | { |
397 | struct cachefiles_object *object; | 394 | struct cachefiles_object *object; |
398 | struct cachefiles_cache *cache; | 395 | struct cachefiles_cache *cache; |
399 | struct pagevec pagevec; | ||
400 | struct inode *inode; | 396 | struct inode *inode; |
401 | sector_t block0, block; | 397 | sector_t block0, block; |
402 | unsigned shift; | 398 | unsigned shift; |
@@ -427,8 +423,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |||
427 | op->op.flags |= FSCACHE_OP_ASYNC; | 423 | op->op.flags |= FSCACHE_OP_ASYNC; |
428 | op->op.processor = cachefiles_read_copier; | 424 | op->op.processor = cachefiles_read_copier; |
429 | 425 | ||
430 | pagevec_init(&pagevec, 0); | ||
431 | |||
432 | /* we assume the absence or presence of the first block is a good | 426 | /* we assume the absence or presence of the first block is a good |
433 | * enough indication for the page as a whole | 427 | * enough indication for the page as a whole |
434 | * - TODO: don't use bmap() for this as it is _not_ actually good | 428 | * - TODO: don't use bmap() for this as it is _not_ actually good |
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 1ad51ffbb275..acbc1f094fb1 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c | |||
@@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | if (ret != -EEXIST) { | 53 | if (ret != -EEXIST) { |
54 | pr_err("Can't set xattr on %*.*s [%lu] (err %d)", | 54 | pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n", |
55 | dentry->d_name.len, dentry->d_name.len, | 55 | dentry->d_name.len, dentry->d_name.len, |
56 | dentry->d_name.name, dentry->d_inode->i_ino, | 56 | dentry->d_name.name, dentry->d_inode->i_ino, |
57 | -ret); | 57 | -ret); |
@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) | |||
64 | if (ret == -ERANGE) | 64 | if (ret == -ERANGE) |
65 | goto bad_type_length; | 65 | goto bad_type_length; |
66 | 66 | ||
67 | pr_err("Can't read xattr on %*.*s [%lu] (err %d)", | 67 | pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n", |
68 | dentry->d_name.len, dentry->d_name.len, | 68 | dentry->d_name.len, dentry->d_name.len, |
69 | dentry->d_name.name, dentry->d_inode->i_ino, | 69 | dentry->d_name.name, dentry->d_inode->i_ino, |
70 | -ret); | 70 | -ret); |
@@ -85,14 +85,14 @@ error: | |||
85 | return ret; | 85 | return ret; |
86 | 86 | ||
87 | bad_type_length: | 87 | bad_type_length: |
88 | pr_err("Cache object %lu type xattr length incorrect", | 88 | pr_err("Cache object %lu type xattr length incorrect\n", |
89 | dentry->d_inode->i_ino); | 89 | dentry->d_inode->i_ino); |
90 | ret = -EIO; | 90 | ret = -EIO; |
91 | goto error; | 91 | goto error; |
92 | 92 | ||
93 | bad_type: | 93 | bad_type: |
94 | xtype[2] = 0; | 94 | xtype[2] = 0; |
95 | pr_err("Cache object %*.*s [%lu] type %s not %s", | 95 | pr_err("Cache object %*.*s [%lu] type %s not %s\n", |
96 | dentry->d_name.len, dentry->d_name.len, | 96 | dentry->d_name.len, dentry->d_name.len, |
97 | dentry->d_name.name, dentry->d_inode->i_ino, | 97 | dentry->d_name.name, dentry->d_inode->i_ino, |
98 | xtype, type); | 98 | xtype, type); |
@@ -293,7 +293,7 @@ error: | |||
293 | return ret; | 293 | return ret; |
294 | 294 | ||
295 | bad_type_length: | 295 | bad_type_length: |
296 | pr_err("Cache object %lu xattr length incorrect", | 296 | pr_err("Cache object %lu xattr length incorrect\n", |
297 | dentry->d_inode->i_ino); | 297 | dentry->d_inode->i_ino); |
298 | ret = -EIO; | 298 | ret = -EIO; |
299 | goto error; | 299 | goto error; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 7c018a1c52f7..5f29354b072a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3568,15 +3568,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3568 | lru_cache_add_file(page); | 3568 | lru_cache_add_file(page); |
3569 | unlock_page(page); | 3569 | unlock_page(page); |
3570 | page_cache_release(page); | 3570 | page_cache_release(page); |
3571 | if (rc == -EAGAIN) | ||
3572 | list_add_tail(&page->lru, &tmplist); | ||
3573 | } | 3571 | } |
3572 | /* Fallback to the readpage in error/reconnect cases */ | ||
3574 | kref_put(&rdata->refcount, cifs_readdata_release); | 3573 | kref_put(&rdata->refcount, cifs_readdata_release); |
3575 | if (rc == -EAGAIN) { | ||
3576 | /* Re-add pages to the page_list and retry */ | ||
3577 | list_splice(&tmplist, page_list); | ||
3578 | continue; | ||
3579 | } | ||
3580 | break; | 3574 | break; |
3581 | } | 3575 | } |
3582 | 3576 | ||
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 1a6df4b03f67..52131d8cb4d5 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -586,7 +586,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
586 | tmprc = CIFS_open(xid, &oparms, &oplock, NULL); | 586 | tmprc = CIFS_open(xid, &oparms, &oplock, NULL); |
587 | if (tmprc == -EOPNOTSUPP) | 587 | if (tmprc == -EOPNOTSUPP) |
588 | *symlink = true; | 588 | *symlink = true; |
589 | else | 589 | else if (tmprc == 0) |
590 | CIFSSMBClose(xid, tcon, fid.netfid); | 590 | CIFSSMBClose(xid, tcon, fid.netfid); |
591 | } | 591 | } |
592 | 592 | ||
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index af59d03db492..8257a5a97cc0 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c | |||
@@ -256,6 +256,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = { | |||
256 | {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO, | 256 | {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO, |
257 | "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"}, | 257 | "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"}, |
258 | {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"}, | 258 | {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"}, |
259 | {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP, | ||
260 | "STATUS_REPARSE_NOT_HANDLED"}, | ||
259 | {STATUS_DEVICE_REQUIRES_CLEANING, -EIO, | 261 | {STATUS_DEVICE_REQUIRES_CLEANING, -EIO, |
260 | "STATUS_DEVICE_REQUIRES_CLEANING"}, | 262 | "STATUS_DEVICE_REQUIRES_CLEANING"}, |
261 | {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"}, | 263 | {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"}, |
diff --git a/fs/dcache.c b/fs/dcache.c index 7a5b51440afa..cb25a1a5e307 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -2372,7 +2372,8 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) | |||
2372 | } | 2372 | } |
2373 | EXPORT_SYMBOL(dentry_update_name_case); | 2373 | EXPORT_SYMBOL(dentry_update_name_case); |
2374 | 2374 | ||
2375 | static void switch_names(struct dentry *dentry, struct dentry *target) | 2375 | static void switch_names(struct dentry *dentry, struct dentry *target, |
2376 | bool exchange) | ||
2376 | { | 2377 | { |
2377 | if (dname_external(target)) { | 2378 | if (dname_external(target)) { |
2378 | if (dname_external(dentry)) { | 2379 | if (dname_external(dentry)) { |
@@ -2406,13 +2407,19 @@ static void switch_names(struct dentry *dentry, struct dentry *target) | |||
2406 | */ | 2407 | */ |
2407 | unsigned int i; | 2408 | unsigned int i; |
2408 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); | 2409 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); |
2410 | if (!exchange) { | ||
2411 | memcpy(dentry->d_iname, target->d_name.name, | ||
2412 | target->d_name.len + 1); | ||
2413 | dentry->d_name.hash_len = target->d_name.hash_len; | ||
2414 | return; | ||
2415 | } | ||
2409 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { | 2416 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { |
2410 | swap(((long *) &dentry->d_iname)[i], | 2417 | swap(((long *) &dentry->d_iname)[i], |
2411 | ((long *) &target->d_iname)[i]); | 2418 | ((long *) &target->d_iname)[i]); |
2412 | } | 2419 | } |
2413 | } | 2420 | } |
2414 | } | 2421 | } |
2415 | swap(dentry->d_name.len, target->d_name.len); | 2422 | swap(dentry->d_name.hash_len, target->d_name.hash_len); |
2416 | } | 2423 | } |
2417 | 2424 | ||
2418 | static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) | 2425 | static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) |
@@ -2442,25 +2449,29 @@ static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) | |||
2442 | } | 2449 | } |
2443 | } | 2450 | } |
2444 | 2451 | ||
2445 | static void dentry_unlock_parents_for_move(struct dentry *dentry, | 2452 | static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) |
2446 | struct dentry *target) | ||
2447 | { | 2453 | { |
2448 | if (target->d_parent != dentry->d_parent) | 2454 | if (target->d_parent != dentry->d_parent) |
2449 | spin_unlock(&dentry->d_parent->d_lock); | 2455 | spin_unlock(&dentry->d_parent->d_lock); |
2450 | if (target->d_parent != target) | 2456 | if (target->d_parent != target) |
2451 | spin_unlock(&target->d_parent->d_lock); | 2457 | spin_unlock(&target->d_parent->d_lock); |
2458 | spin_unlock(&target->d_lock); | ||
2459 | spin_unlock(&dentry->d_lock); | ||
2452 | } | 2460 | } |
2453 | 2461 | ||
2454 | /* | 2462 | /* |
2455 | * When switching names, the actual string doesn't strictly have to | 2463 | * When switching names, the actual string doesn't strictly have to |
2456 | * be preserved in the target - because we're dropping the target | 2464 | * be preserved in the target - because we're dropping the target |
2457 | * anyway. As such, we can just do a simple memcpy() to copy over | 2465 | * anyway. As such, we can just do a simple memcpy() to copy over |
2458 | * the new name before we switch. | 2466 | * the new name before we switch, unless we are going to rehash |
2459 | * | 2467 | * it. Note that if we *do* unhash the target, we are not allowed |
2460 | * Note that we have to be a lot more careful about getting the hash | 2468 | * to rehash it without giving it a new name/hash key - whether |
2461 | * switched - we have to switch the hash value properly even if it | 2469 | * we swap or overwrite the names here, resulting name won't match |
2462 | * then no longer matches the actual (corrupted) string of the target. | 2470 | * the reality in filesystem; it's only there for d_path() purposes. |
2463 | * The hash value has to match the hash queue that the dentry is on.. | 2471 | * Note that all of this is happening under rename_lock, so the |
2472 | * any hash lookup seeing it in the middle of manipulations will | ||
2473 | * be discarded anyway. So we do not care what happens to the hash | ||
2474 | * key in that case. | ||
2464 | */ | 2475 | */ |
2465 | /* | 2476 | /* |
2466 | * __d_move - move a dentry | 2477 | * __d_move - move a dentry |
@@ -2506,36 +2517,30 @@ static void __d_move(struct dentry *dentry, struct dentry *target, | |||
2506 | d_hash(dentry->d_parent, dentry->d_name.hash)); | 2517 | d_hash(dentry->d_parent, dentry->d_name.hash)); |
2507 | } | 2518 | } |
2508 | 2519 | ||
2509 | list_del(&dentry->d_u.d_child); | ||
2510 | list_del(&target->d_u.d_child); | ||
2511 | |||
2512 | /* Switch the names.. */ | 2520 | /* Switch the names.. */ |
2513 | switch_names(dentry, target); | 2521 | switch_names(dentry, target, exchange); |
2514 | swap(dentry->d_name.hash, target->d_name.hash); | ||
2515 | 2522 | ||
2516 | /* ... and switch the parents */ | 2523 | /* ... and switch them in the tree */ |
2517 | if (IS_ROOT(dentry)) { | 2524 | if (IS_ROOT(dentry)) { |
2525 | /* splicing a tree */ | ||
2518 | dentry->d_parent = target->d_parent; | 2526 | dentry->d_parent = target->d_parent; |
2519 | target->d_parent = target; | 2527 | target->d_parent = target; |
2520 | INIT_LIST_HEAD(&target->d_u.d_child); | 2528 | list_del_init(&target->d_u.d_child); |
2529 | list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | ||
2521 | } else { | 2530 | } else { |
2531 | /* swapping two dentries */ | ||
2522 | swap(dentry->d_parent, target->d_parent); | 2532 | swap(dentry->d_parent, target->d_parent); |
2523 | 2533 | list_move(&target->d_u.d_child, &target->d_parent->d_subdirs); | |
2524 | /* And add them back to the (new) parent lists */ | 2534 | list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); |
2525 | list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); | 2535 | if (exchange) |
2536 | fsnotify_d_move(target); | ||
2537 | fsnotify_d_move(dentry); | ||
2526 | } | 2538 | } |
2527 | 2539 | ||
2528 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | ||
2529 | |||
2530 | write_seqcount_end(&target->d_seq); | 2540 | write_seqcount_end(&target->d_seq); |
2531 | write_seqcount_end(&dentry->d_seq); | 2541 | write_seqcount_end(&dentry->d_seq); |
2532 | 2542 | ||
2533 | dentry_unlock_parents_for_move(dentry, target); | 2543 | dentry_unlock_for_move(dentry, target); |
2534 | if (exchange) | ||
2535 | fsnotify_d_move(target); | ||
2536 | spin_unlock(&target->d_lock); | ||
2537 | fsnotify_d_move(dentry); | ||
2538 | spin_unlock(&dentry->d_lock); | ||
2539 | } | 2544 | } |
2540 | 2545 | ||
2541 | /* | 2546 | /* |
@@ -2633,45 +2638,6 @@ out_err: | |||
2633 | return ret; | 2638 | return ret; |
2634 | } | 2639 | } |
2635 | 2640 | ||
2636 | /* | ||
2637 | * Prepare an anonymous dentry for life in the superblock's dentry tree as a | ||
2638 | * named dentry in place of the dentry to be replaced. | ||
2639 | * returns with anon->d_lock held! | ||
2640 | */ | ||
2641 | static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) | ||
2642 | { | ||
2643 | struct dentry *dparent; | ||
2644 | |||
2645 | dentry_lock_for_move(anon, dentry); | ||
2646 | |||
2647 | write_seqcount_begin(&dentry->d_seq); | ||
2648 | write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED); | ||
2649 | |||
2650 | dparent = dentry->d_parent; | ||
2651 | |||
2652 | switch_names(dentry, anon); | ||
2653 | swap(dentry->d_name.hash, anon->d_name.hash); | ||
2654 | |||
2655 | dentry->d_parent = dentry; | ||
2656 | list_del_init(&dentry->d_u.d_child); | ||
2657 | anon->d_parent = dparent; | ||
2658 | if (likely(!d_unhashed(anon))) { | ||
2659 | hlist_bl_lock(&anon->d_sb->s_anon); | ||
2660 | __hlist_bl_del(&anon->d_hash); | ||
2661 | anon->d_hash.pprev = NULL; | ||
2662 | hlist_bl_unlock(&anon->d_sb->s_anon); | ||
2663 | } | ||
2664 | list_move(&anon->d_u.d_child, &dparent->d_subdirs); | ||
2665 | |||
2666 | write_seqcount_end(&dentry->d_seq); | ||
2667 | write_seqcount_end(&anon->d_seq); | ||
2668 | |||
2669 | dentry_unlock_parents_for_move(anon, dentry); | ||
2670 | spin_unlock(&dentry->d_lock); | ||
2671 | |||
2672 | /* anon->d_lock still locked, returns locked */ | ||
2673 | } | ||
2674 | |||
2675 | /** | 2641 | /** |
2676 | * d_splice_alias - splice a disconnected dentry into the tree if one exists | 2642 | * d_splice_alias - splice a disconnected dentry into the tree if one exists |
2677 | * @inode: the inode which may have a disconnected dentry | 2643 | * @inode: the inode which may have a disconnected dentry |
@@ -2717,10 +2683,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |||
2717 | return ERR_PTR(-EIO); | 2683 | return ERR_PTR(-EIO); |
2718 | } | 2684 | } |
2719 | write_seqlock(&rename_lock); | 2685 | write_seqlock(&rename_lock); |
2720 | __d_materialise_dentry(dentry, new); | 2686 | __d_move(new, dentry, false); |
2721 | write_sequnlock(&rename_lock); | 2687 | write_sequnlock(&rename_lock); |
2722 | _d_rehash(new); | ||
2723 | spin_unlock(&new->d_lock); | ||
2724 | spin_unlock(&inode->i_lock); | 2688 | spin_unlock(&inode->i_lock); |
2725 | security_d_instantiate(new, inode); | 2689 | security_d_instantiate(new, inode); |
2726 | iput(inode); | 2690 | iput(inode); |
@@ -2780,7 +2744,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2780 | } else if (IS_ROOT(alias)) { | 2744 | } else if (IS_ROOT(alias)) { |
2781 | /* Is this an anonymous mountpoint that we | 2745 | /* Is this an anonymous mountpoint that we |
2782 | * could splice into our tree? */ | 2746 | * could splice into our tree? */ |
2783 | __d_materialise_dentry(dentry, alias); | 2747 | __d_move(alias, dentry, false); |
2784 | write_sequnlock(&rename_lock); | 2748 | write_sequnlock(&rename_lock); |
2785 | goto found; | 2749 | goto found; |
2786 | } else { | 2750 | } else { |
@@ -2807,13 +2771,9 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2807 | actual = __d_instantiate_unique(dentry, inode); | 2771 | actual = __d_instantiate_unique(dentry, inode); |
2808 | if (!actual) | 2772 | if (!actual) |
2809 | actual = dentry; | 2773 | actual = dentry; |
2810 | else | ||
2811 | BUG_ON(!d_unhashed(actual)); | ||
2812 | 2774 | ||
2813 | spin_lock(&actual->d_lock); | 2775 | d_rehash(actual); |
2814 | found: | 2776 | found: |
2815 | _d_rehash(actual); | ||
2816 | spin_unlock(&actual->d_lock); | ||
2817 | spin_unlock(&inode->i_lock); | 2777 | spin_unlock(&inode->i_lock); |
2818 | out_nolock: | 2778 | out_nolock: |
2819 | if (actual == dentry) { | 2779 | if (actual == dentry) { |
diff --git a/fs/direct-io.c b/fs/direct-io.c index c3116404ab49..e181b6b2e297 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -158,7 +158,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) | |||
158 | { | 158 | { |
159 | ssize_t ret; | 159 | ssize_t ret; |
160 | 160 | ||
161 | ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES, | 161 | ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, |
162 | &sdio->from); | 162 | &sdio->from); |
163 | 163 | ||
164 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { | 164 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index d3b4539f1651..da032daf0e0d 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
@@ -982,6 +982,7 @@ nomem: | |||
982 | submit_op_failed: | 982 | submit_op_failed: |
983 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 983 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); |
984 | spin_unlock(&cookie->lock); | 984 | spin_unlock(&cookie->lock); |
985 | fscache_unuse_cookie(object); | ||
985 | kfree(op); | 986 | kfree(op); |
986 | _leave(" [EIO]"); | 987 | _leave(" [EIO]"); |
987 | return transit_to(KILL_OBJECT); | 988 | return transit_to(KILL_OBJECT); |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 85332b9d19d1..de33b3fccca6 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -44,6 +44,19 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa | |||
44 | EXPORT_SYMBOL(__fscache_wait_on_page_write); | 44 | EXPORT_SYMBOL(__fscache_wait_on_page_write); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * wait for a page to finish being written to the cache. Put a timeout here | ||
48 | * since we might be called recursively via parent fs. | ||
49 | */ | ||
50 | static | ||
51 | bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) | ||
52 | { | ||
53 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); | ||
54 | |||
55 | return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), | ||
56 | HZ); | ||
57 | } | ||
58 | |||
59 | /* | ||
47 | * decide whether a page can be released, possibly by cancelling a store to it | 60 | * decide whether a page can be released, possibly by cancelling a store to it |
48 | * - we're allowed to sleep if __GFP_WAIT is flagged | 61 | * - we're allowed to sleep if __GFP_WAIT is flagged |
49 | */ | 62 | */ |
@@ -115,7 +128,10 @@ page_busy: | |||
115 | } | 128 | } |
116 | 129 | ||
117 | fscache_stat(&fscache_n_store_vmscan_wait); | 130 | fscache_stat(&fscache_n_store_vmscan_wait); |
118 | __fscache_wait_on_page_write(cookie, page); | 131 | if (!release_page_wait_timeout(cookie, page)) |
132 | _debug("fscache writeout timeout page: %p{%lx}", | ||
133 | page, page->index); | ||
134 | |||
119 | gfp &= ~__GFP_WAIT; | 135 | gfp &= ~__GFP_WAIT; |
120 | goto try_again; | 136 | goto try_again; |
121 | } | 137 | } |
@@ -182,7 +198,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
182 | { | 198 | { |
183 | struct fscache_operation *op; | 199 | struct fscache_operation *op; |
184 | struct fscache_object *object; | 200 | struct fscache_object *object; |
185 | bool wake_cookie; | 201 | bool wake_cookie = false; |
186 | 202 | ||
187 | _enter("%p", cookie); | 203 | _enter("%p", cookie); |
188 | 204 | ||
@@ -212,15 +228,16 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
212 | 228 | ||
213 | __fscache_use_cookie(cookie); | 229 | __fscache_use_cookie(cookie); |
214 | if (fscache_submit_exclusive_op(object, op) < 0) | 230 | if (fscache_submit_exclusive_op(object, op) < 0) |
215 | goto nobufs; | 231 | goto nobufs_dec; |
216 | spin_unlock(&cookie->lock); | 232 | spin_unlock(&cookie->lock); |
217 | fscache_stat(&fscache_n_attr_changed_ok); | 233 | fscache_stat(&fscache_n_attr_changed_ok); |
218 | fscache_put_operation(op); | 234 | fscache_put_operation(op); |
219 | _leave(" = 0"); | 235 | _leave(" = 0"); |
220 | return 0; | 236 | return 0; |
221 | 237 | ||
222 | nobufs: | 238 | nobufs_dec: |
223 | wake_cookie = __fscache_unuse_cookie(cookie); | 239 | wake_cookie = __fscache_unuse_cookie(cookie); |
240 | nobufs: | ||
224 | spin_unlock(&cookie->lock); | 241 | spin_unlock(&cookie->lock); |
225 | kfree(op); | 242 | kfree(op); |
226 | if (wake_cookie) | 243 | if (wake_cookie) |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 912061ac4baf..caa8d95b24e8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1305,6 +1305,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, | |||
1305 | size_t start; | 1305 | size_t start; |
1306 | ssize_t ret = iov_iter_get_pages(ii, | 1306 | ssize_t ret = iov_iter_get_pages(ii, |
1307 | &req->pages[req->num_pages], | 1307 | &req->pages[req->num_pages], |
1308 | *nbytesp - nbytes, | ||
1308 | req->max_pages - req->num_pages, | 1309 | req->max_pages - req->num_pages, |
1309 | &start); | 1310 | &start); |
1310 | if (ret < 0) | 1311 | if (ret < 0) |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e94457c33ad6..b01f6e100ee8 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -3104,7 +3104,8 @@ static __be32 nfsd4_encode_splice_read( | |||
3104 | 3104 | ||
3105 | buf->page_len = maxcount; | 3105 | buf->page_len = maxcount; |
3106 | buf->len += maxcount; | 3106 | buf->len += maxcount; |
3107 | xdr->page_ptr += (maxcount + PAGE_SIZE - 1) / PAGE_SIZE; | 3107 | xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1) |
3108 | / PAGE_SIZE; | ||
3108 | 3109 | ||
3109 | /* Use rest of head for padding and remaining ops: */ | 3110 | /* Use rest of head for padding and remaining ops: */ |
3110 | buf->tail[0].iov_base = xdr->p; | 3111 | buf->tail[0].iov_base = xdr->p; |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 6252b173a465..d071e7f23de2 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/buffer_head.h> | 24 | #include <linux/buffer_head.h> |
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <linux/mpage.h> | 26 | #include <linux/mpage.h> |
27 | #include <linux/pagemap.h> | ||
27 | #include <linux/writeback.h> | 28 | #include <linux/writeback.h> |
28 | #include <linux/aio.h> | 29 | #include <linux/aio.h> |
29 | #include "nilfs.h" | 30 | #include "nilfs.h" |
@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) | |||
219 | 220 | ||
220 | static int nilfs_set_page_dirty(struct page *page) | 221 | static int nilfs_set_page_dirty(struct page *page) |
221 | { | 222 | { |
223 | struct inode *inode = page->mapping->host; | ||
222 | int ret = __set_page_dirty_nobuffers(page); | 224 | int ret = __set_page_dirty_nobuffers(page); |
223 | 225 | ||
224 | if (page_has_buffers(page)) { | 226 | if (page_has_buffers(page)) { |
225 | struct inode *inode = page->mapping->host; | ||
226 | unsigned nr_dirty = 0; | 227 | unsigned nr_dirty = 0; |
227 | struct buffer_head *bh, *head; | 228 | struct buffer_head *bh, *head; |
228 | 229 | ||
@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page) | |||
245 | 246 | ||
246 | if (nr_dirty) | 247 | if (nr_dirty) |
247 | nilfs_set_file_dirty(inode, nr_dirty); | 248 | nilfs_set_file_dirty(inode, nr_dirty); |
249 | } else if (ret) { | ||
250 | unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
251 | |||
252 | nilfs_set_file_dirty(inode, nr_dirty); | ||
248 | } | 253 | } |
249 | return ret; | 254 | return ret; |
250 | } | 255 | } |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 3ec906ef5d9a..12ba682fc53c 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -655,12 +655,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | |||
655 | clear_bit(bit, res->refmap); | 655 | clear_bit(bit, res->refmap); |
656 | } | 656 | } |
657 | 657 | ||
658 | 658 | static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |
659 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
660 | struct dlm_lock_resource *res) | 659 | struct dlm_lock_resource *res) |
661 | { | 660 | { |
662 | assert_spin_locked(&res->spinlock); | ||
663 | |||
664 | res->inflight_locks++; | 661 | res->inflight_locks++; |
665 | 662 | ||
666 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, | 663 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
@@ -668,6 +665,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |||
668 | __builtin_return_address(0)); | 665 | __builtin_return_address(0)); |
669 | } | 666 | } |
670 | 667 | ||
668 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
669 | struct dlm_lock_resource *res) | ||
670 | { | ||
671 | assert_spin_locked(&res->spinlock); | ||
672 | __dlm_lockres_grab_inflight_ref(dlm, res); | ||
673 | } | ||
674 | |||
671 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | 675 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
672 | struct dlm_lock_resource *res) | 676 | struct dlm_lock_resource *res) |
673 | { | 677 | { |
@@ -894,10 +898,8 @@ lookup: | |||
894 | /* finally add the lockres to its hash bucket */ | 898 | /* finally add the lockres to its hash bucket */ |
895 | __dlm_insert_lockres(dlm, res); | 899 | __dlm_insert_lockres(dlm, res); |
896 | 900 | ||
897 | /* Grab inflight ref to pin the resource */ | 901 | /* since this lockres is new it doesn't not require the spinlock */ |
898 | spin_lock(&res->spinlock); | 902 | __dlm_lockres_grab_inflight_ref(dlm, res); |
899 | dlm_lockres_grab_inflight_ref(dlm, res); | ||
900 | spin_unlock(&res->spinlock); | ||
901 | 903 | ||
902 | /* get an extra ref on the mle in case this is a BLOCK | 904 | /* get an extra ref on the mle in case this is a BLOCK |
903 | * if so, the creator of the BLOCK may try to put the last | 905 | * if so, the creator of the BLOCK may try to put the last |
@@ -2037,6 +2039,10 @@ kill: | |||
2037 | "and killing the other node now! This node is OK and can continue.\n"); | 2039 | "and killing the other node now! This node is OK and can continue.\n"); |
2038 | __dlm_print_one_lock_resource(res); | 2040 | __dlm_print_one_lock_resource(res); |
2039 | spin_unlock(&res->spinlock); | 2041 | spin_unlock(&res->spinlock); |
2042 | spin_lock(&dlm->master_lock); | ||
2043 | if (mle) | ||
2044 | __dlm_put_mle(mle); | ||
2045 | spin_unlock(&dlm->master_lock); | ||
2040 | spin_unlock(&dlm->spinlock); | 2046 | spin_unlock(&dlm->spinlock); |
2041 | *ret_data = (void *)res; | 2047 | *ret_data = (void *)res; |
2042 | dlm_put(dlm); | 2048 | dlm_put(dlm); |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index ddb662b32447..4142546aedae 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -2532,6 +2532,7 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb) | |||
2532 | kfree(osb->journal); | 2532 | kfree(osb->journal); |
2533 | kfree(osb->local_alloc_copy); | 2533 | kfree(osb->local_alloc_copy); |
2534 | kfree(osb->uuid_str); | 2534 | kfree(osb->uuid_str); |
2535 | kfree(osb->vol_label); | ||
2535 | ocfs2_put_dlm_debug(osb->osb_dlm_debug); | 2536 | ocfs2_put_dlm_debug(osb->osb_dlm_debug); |
2536 | memset(osb, 0, sizeof(struct ocfs2_super)); | 2537 | memset(osb, 0, sizeof(struct ocfs2_super)); |
2537 | } | 2538 | } |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dfc791c42d64..c34156888d70 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -931,23 +931,32 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
931 | while (addr < end) { | 931 | while (addr < end) { |
932 | struct vm_area_struct *vma = find_vma(walk->mm, addr); | 932 | struct vm_area_struct *vma = find_vma(walk->mm, addr); |
933 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); | 933 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
934 | unsigned long vm_end; | 934 | /* End of address space hole, which we mark as non-present. */ |
935 | unsigned long hole_end; | ||
935 | 936 | ||
936 | if (!vma) { | 937 | if (vma) |
937 | vm_end = end; | 938 | hole_end = min(end, vma->vm_start); |
938 | } else { | 939 | else |
939 | vm_end = min(end, vma->vm_end); | 940 | hole_end = end; |
940 | if (vma->vm_flags & VM_SOFTDIRTY) | 941 | |
941 | pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); | 942 | for (; addr < hole_end; addr += PAGE_SIZE) { |
943 | err = add_to_pagemap(addr, &pme, pm); | ||
944 | if (err) | ||
945 | goto out; | ||
942 | } | 946 | } |
943 | 947 | ||
944 | for (; addr < vm_end; addr += PAGE_SIZE) { | 948 | if (!vma) |
949 | break; | ||
950 | |||
951 | /* Addresses in the VMA. */ | ||
952 | if (vma->vm_flags & VM_SOFTDIRTY) | ||
953 | pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); | ||
954 | for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { | ||
945 | err = add_to_pagemap(addr, &pme, pm); | 955 | err = add_to_pagemap(addr, &pme, pm); |
946 | if (err) | 956 | if (err) |
947 | goto out; | 957 | goto out; |
948 | } | 958 | } |
949 | } | 959 | } |
950 | |||
951 | out: | 960 | out: |
952 | return err; | 961 | return err; |
953 | } | 962 | } |
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index a9cc75ffa925..7caa01652888 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c | |||
@@ -298,7 +298,10 @@ cg_found: | |||
298 | ufsi->i_oeftflag = 0; | 298 | ufsi->i_oeftflag = 0; |
299 | ufsi->i_dir_start_lookup = 0; | 299 | ufsi->i_dir_start_lookup = 0; |
300 | memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); | 300 | memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); |
301 | insert_inode_hash(inode); | 301 | if (insert_inode_locked(inode) < 0) { |
302 | err = -EIO; | ||
303 | goto failed; | ||
304 | } | ||
302 | mark_inode_dirty(inode); | 305 | mark_inode_dirty(inode); |
303 | 306 | ||
304 | if (uspi->fs_magic == UFS2_MAGIC) { | 307 | if (uspi->fs_magic == UFS2_MAGIC) { |
@@ -337,6 +340,7 @@ cg_found: | |||
337 | fail_remove_inode: | 340 | fail_remove_inode: |
338 | unlock_ufs(sb); | 341 | unlock_ufs(sb); |
339 | clear_nlink(inode); | 342 | clear_nlink(inode); |
343 | unlock_new_inode(inode); | ||
340 | iput(inode); | 344 | iput(inode); |
341 | UFSD("EXIT (FAILED): err %d\n", err); | 345 | UFSD("EXIT (FAILED): err %d\n", err); |
342 | return ERR_PTR(err); | 346 | return ERR_PTR(err); |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 2df62a73f20c..fd65deb4b5f0 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -38,10 +38,12 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) | |||
38 | { | 38 | { |
39 | int err = ufs_add_link(dentry, inode); | 39 | int err = ufs_add_link(dentry, inode); |
40 | if (!err) { | 40 | if (!err) { |
41 | unlock_new_inode(inode); | ||
41 | d_instantiate(dentry, inode); | 42 | d_instantiate(dentry, inode); |
42 | return 0; | 43 | return 0; |
43 | } | 44 | } |
44 | inode_dec_link_count(inode); | 45 | inode_dec_link_count(inode); |
46 | unlock_new_inode(inode); | ||
45 | iput(inode); | 47 | iput(inode); |
46 | return err; | 48 | return err; |
47 | } | 49 | } |
@@ -155,6 +157,7 @@ out_notlocked: | |||
155 | 157 | ||
156 | out_fail: | 158 | out_fail: |
157 | inode_dec_link_count(inode); | 159 | inode_dec_link_count(inode); |
160 | unlock_new_inode(inode); | ||
158 | iput(inode); | 161 | iput(inode); |
159 | goto out; | 162 | goto out; |
160 | } | 163 | } |
@@ -210,6 +213,7 @@ out: | |||
210 | out_fail: | 213 | out_fail: |
211 | inode_dec_link_count(inode); | 214 | inode_dec_link_count(inode); |
212 | inode_dec_link_count(inode); | 215 | inode_dec_link_count(inode); |
216 | unlock_new_inode(inode); | ||
213 | iput (inode); | 217 | iput (inode); |
214 | inode_dec_link_count(dir); | 218 | inode_dec_link_count(dir); |
215 | unlock_ufs(dir->i_sb); | 219 | unlock_ufs(dir->i_sb); |
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index c728113374f5..f97804bdf1ff 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h | |||
@@ -59,6 +59,10 @@ | |||
59 | #define METHOD_NAME__PRS "_PRS" | 59 | #define METHOD_NAME__PRS "_PRS" |
60 | #define METHOD_NAME__PRT "_PRT" | 60 | #define METHOD_NAME__PRT "_PRT" |
61 | #define METHOD_NAME__PRW "_PRW" | 61 | #define METHOD_NAME__PRW "_PRW" |
62 | #define METHOD_NAME__PS0 "_PS0" | ||
63 | #define METHOD_NAME__PS1 "_PS1" | ||
64 | #define METHOD_NAME__PS2 "_PS2" | ||
65 | #define METHOD_NAME__PS3 "_PS3" | ||
62 | #define METHOD_NAME__REG "_REG" | 66 | #define METHOD_NAME__REG "_REG" |
63 | #define METHOD_NAME__SB_ "_SB_" | 67 | #define METHOD_NAME__SB_ "_SB_" |
64 | #define METHOD_NAME__SEG "_SEG" | 68 | #define METHOD_NAME__SEG "_SEG" |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index d91e59b79f0d..57ee0528aacb 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -118,6 +118,7 @@ struct acpi_device; | |||
118 | struct acpi_hotplug_profile { | 118 | struct acpi_hotplug_profile { |
119 | struct kobject kobj; | 119 | struct kobject kobj; |
120 | int (*scan_dependent)(struct acpi_device *adev); | 120 | int (*scan_dependent)(struct acpi_device *adev); |
121 | void (*notify_online)(struct acpi_device *adev); | ||
121 | bool enabled:1; | 122 | bool enabled:1; |
122 | bool demand_offline:1; | 123 | bool demand_offline:1; |
123 | }; | 124 | }; |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index b7c89d47efbe..9fc1d71c82bc 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | /* Current ACPICA subsystem version in YYYYMMDD format */ | 47 | /* Current ACPICA subsystem version in YYYYMMDD format */ |
48 | 48 | ||
49 | #define ACPI_CA_VERSION 0x20140724 | 49 | #define ACPI_CA_VERSION 0x20140828 |
50 | 50 | ||
51 | #include <acpi/acconfig.h> | 51 | #include <acpi/acconfig.h> |
52 | #include <acpi/actypes.h> | 52 | #include <acpi/actypes.h> |
@@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | |||
692 | *event_status)) | 692 | *event_status)) |
693 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) | 693 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) |
694 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) | 694 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) |
695 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) | ||
695 | 696 | ||
696 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | 697 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status |
697 | acpi_get_gpe_device(u32 gpe_index, | 698 | acpi_get_gpe_device(u32 gpe_index, |
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 7626bfeac2cb..29e79370641d 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h | |||
@@ -952,7 +952,8 @@ enum acpi_srat_type { | |||
952 | ACPI_SRAT_TYPE_CPU_AFFINITY = 0, | 952 | ACPI_SRAT_TYPE_CPU_AFFINITY = 0, |
953 | ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, | 953 | ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, |
954 | ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, | 954 | ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, |
955 | ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ | 955 | ACPI_SRAT_TYPE_GICC_AFFINITY = 3, |
956 | ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */ | ||
956 | }; | 957 | }; |
957 | 958 | ||
958 | /* | 959 | /* |
@@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity { | |||
968 | u32 flags; | 969 | u32 flags; |
969 | u8 local_sapic_eid; | 970 | u8 local_sapic_eid; |
970 | u8 proximity_domain_hi[3]; | 971 | u8 proximity_domain_hi[3]; |
971 | u32 reserved; /* Reserved, must be zero */ | 972 | u32 clock_domain; |
972 | }; | 973 | }; |
973 | 974 | ||
974 | /* Flags */ | 975 | /* Flags */ |
@@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity { | |||
1010 | 1011 | ||
1011 | #define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ | 1012 | #define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ |
1012 | 1013 | ||
1014 | /* 3: GICC Affinity (ACPI 5.1) */ | ||
1015 | |||
1016 | struct acpi_srat_gicc_affinity { | ||
1017 | struct acpi_subtable_header header; | ||
1018 | u32 proximity_domain; | ||
1019 | u32 acpi_processor_uid; | ||
1020 | u32 flags; | ||
1021 | u32 clock_domain; | ||
1022 | }; | ||
1023 | |||
1024 | /* Flags for struct acpi_srat_gicc_affinity */ | ||
1025 | |||
1026 | #define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */ | ||
1027 | |||
1013 | /* Reset to default packing */ | 1028 | /* Reset to default packing */ |
1014 | 1029 | ||
1015 | #pragma pack() | 1030 | #pragma pack() |
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 787bcc814463..5480cb2236bf 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h | |||
@@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry { | |||
310 | u32 common_flags; | 310 | u32 common_flags; |
311 | }; | 311 | }; |
312 | 312 | ||
313 | /* Flag Definitions: timer_flags and virtual_timer_flags above */ | ||
314 | |||
315 | #define ACPI_GTDT_GT_IRQ_MODE (1) | ||
316 | #define ACPI_GTDT_GT_IRQ_POLARITY (1<<1) | ||
317 | |||
313 | /* Flag Definitions: common_flags above */ | 318 | /* Flag Definitions: common_flags above */ |
314 | 319 | ||
315 | #define ACPI_GTDT_GT_IS_SECURE_TIMER (1) | 320 | #define ACPI_GTDT_GT_IS_SECURE_TIMER (1) |
316 | #define ACPI_GTDT_GT_ALWAYS_ON (1<<1) | 321 | #define ACPI_GTDT_GT_ALWAYS_ON (1<<1) |
317 | 322 | ||
318 | /* 1: SBSA Generic Watchdog Structure */ | 323 | /* 1: SBSA Generic Watchdog Structure */ |
319 | 324 | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 807cbc46d73e..b7926bb9b444 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -587,7 +587,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; } | |||
587 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) | 587 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) |
588 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); | 588 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); |
589 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | 589 | int acpi_dev_pm_attach(struct device *dev, bool power_on); |
590 | void acpi_dev_pm_detach(struct device *dev, bool power_off); | ||
591 | #else | 590 | #else |
592 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | 591 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) |
593 | { | 592 | { |
@@ -597,7 +596,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | |||
597 | { | 596 | { |
598 | return -ENODEV; | 597 | return -ENODEV; |
599 | } | 598 | } |
600 | static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} | ||
601 | #endif | 599 | #endif |
602 | 600 | ||
603 | #ifdef CONFIG_ACPI | 601 | #ifdef CONFIG_ACPI |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index ebcc9d146219..7f437036baa4 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
@@ -27,6 +27,13 @@ struct ccp_cmd; | |||
27 | defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) | 27 | defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) |
28 | 28 | ||
29 | /** | 29 | /** |
30 | * ccp_present - check if a CCP device is present | ||
31 | * | ||
32 | * Returns zero if a CCP device is present, -ENODEV otherwise. | ||
33 | */ | ||
34 | int ccp_present(void); | ||
35 | |||
36 | /** | ||
30 | * ccp_enqueue_cmd - queue an operation for processing by the CCP | 37 | * ccp_enqueue_cmd - queue an operation for processing by the CCP |
31 | * | 38 | * |
32 | * @cmd: ccp_cmd struct to be processed | 39 | * @cmd: ccp_cmd struct to be processed |
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd); | |||
53 | 60 | ||
54 | #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ | 61 | #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ |
55 | 62 | ||
63 | static inline int ccp_present(void) | ||
64 | { | ||
65 | return -ENODEV; | ||
66 | } | ||
67 | |||
56 | static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) | 68 | static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) |
57 | { | 69 | { |
58 | return -ENODEV; | 70 | return -ENODEV; |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 7d1955afa62c..138336b6bb04 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -112,6 +112,9 @@ struct cpufreq_policy { | |||
112 | spinlock_t transition_lock; | 112 | spinlock_t transition_lock; |
113 | wait_queue_head_t transition_wait; | 113 | wait_queue_head_t transition_wait; |
114 | struct task_struct *transition_task; /* Task which is doing the transition */ | 114 | struct task_struct *transition_task; /* Task which is doing the transition */ |
115 | |||
116 | /* For cpufreq driver's internal use */ | ||
117 | void *driver_data; | ||
115 | }; | 118 | }; |
116 | 119 | ||
117 | /* Only for ACPI */ | 120 | /* Only for ACPI */ |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index ade2390ffe92..6e39c9bb0dae 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -93,12 +93,12 @@ extern int cpuset_slab_spread_node(void); | |||
93 | 93 | ||
94 | static inline int cpuset_do_page_mem_spread(void) | 94 | static inline int cpuset_do_page_mem_spread(void) |
95 | { | 95 | { |
96 | return current->flags & PF_SPREAD_PAGE; | 96 | return task_spread_page(current); |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int cpuset_do_slab_mem_spread(void) | 99 | static inline int cpuset_do_slab_mem_spread(void) |
100 | { | 100 | { |
101 | return current->flags & PF_SPREAD_SLAB; | 101 | return task_spread_slab(current); |
102 | } | 102 | } |
103 | 103 | ||
104 | extern int current_cpuset_is_being_rebound(void); | 104 | extern int current_cpuset_is_being_rebound(void); |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a95efeb53a8b..b556e0ab946f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node | |||
577 | } | 577 | } |
578 | #endif /* CONFIG_OF */ | 578 | #endif /* CONFIG_OF */ |
579 | 579 | ||
580 | #ifdef CONFIG_ACPI | ||
581 | void acpi_i2c_register_devices(struct i2c_adapter *adap); | ||
582 | #else | ||
583 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } | ||
584 | #endif /* CONFIG_ACPI */ | ||
585 | |||
586 | #ifdef CONFIG_ACPI_I2C_OPREGION | ||
587 | int acpi_i2c_install_space_handler(struct i2c_adapter *adapter); | ||
588 | void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter); | ||
589 | #else | ||
590 | static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
591 | { } | ||
592 | static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
593 | { return 0; } | ||
594 | #endif /* CONFIG_ACPI_I2C_OPREGION */ | ||
595 | |||
596 | #endif /* _LINUX_I2C_H */ | 580 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 698ad053d064..69517a24bc50 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id); | |||
193 | /* The following three functions are for the core kernel use only. */ | 193 | /* The following three functions are for the core kernel use only. */ |
194 | extern void suspend_device_irqs(void); | 194 | extern void suspend_device_irqs(void); |
195 | extern void resume_device_irqs(void); | 195 | extern void resume_device_irqs(void); |
196 | #ifdef CONFIG_PM_SLEEP | ||
197 | extern int check_wakeup_irqs(void); | ||
198 | #else | ||
199 | static inline int check_wakeup_irqs(void) { return 0; } | ||
200 | #endif | ||
201 | 196 | ||
202 | /** | 197 | /** |
203 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | 198 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 62af59242ddc..03f48d936f66 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -173,6 +173,7 @@ struct irq_data { | |||
173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | 173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
174 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 174 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
176 | * IRQD_WAKEUP_ARMED - Wakeup mode armed | ||
176 | */ | 177 | */ |
177 | enum { | 178 | enum { |
178 | IRQD_TRIGGER_MASK = 0xf, | 179 | IRQD_TRIGGER_MASK = 0xf, |
@@ -186,6 +187,7 @@ enum { | |||
186 | IRQD_IRQ_DISABLED = (1 << 16), | 187 | IRQD_IRQ_DISABLED = (1 << 16), |
187 | IRQD_IRQ_MASKED = (1 << 17), | 188 | IRQD_IRQ_MASKED = (1 << 17), |
188 | IRQD_IRQ_INPROGRESS = (1 << 18), | 189 | IRQD_IRQ_INPROGRESS = (1 << 18), |
190 | IRQD_WAKEUP_ARMED = (1 << 19), | ||
189 | }; | 191 | }; |
190 | 192 | ||
191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 193 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
@@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d) | |||
257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 259 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; |
258 | } | 260 | } |
259 | 261 | ||
262 | static inline bool irqd_is_wakeup_armed(struct irq_data *d) | ||
263 | { | ||
264 | return d->state_use_accessors & IRQD_WAKEUP_ARMED; | ||
265 | } | ||
266 | |||
267 | |||
260 | /* | 268 | /* |
261 | * Functions for chained handlers which can be enabled/disabled by the | 269 | * Functions for chained handlers which can be enabled/disabled by the |
262 | * standard disable_irq/enable_irq calls. Must be called with | 270 | * standard disable_irq/enable_irq calls. Must be called with |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 472c021a2d4f..cb1a31e448ae 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -36,6 +36,11 @@ struct irq_desc; | |||
36 | * @threads_oneshot: bitfield to handle shared oneshot threads | 36 | * @threads_oneshot: bitfield to handle shared oneshot threads |
37 | * @threads_active: number of irqaction threads currently running | 37 | * @threads_active: number of irqaction threads currently running |
38 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | 38 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
39 | * @nr_actions: number of installed actions on this descriptor | ||
40 | * @no_suspend_depth: number of irqactions on a irq descriptor with | ||
41 | * IRQF_NO_SUSPEND set | ||
42 | * @force_resume_depth: number of irqactions on a irq descriptor with | ||
43 | * IRQF_FORCE_RESUME set | ||
39 | * @dir: /proc/irq/ procfs entry | 44 | * @dir: /proc/irq/ procfs entry |
40 | * @name: flow handler name for /proc/interrupts output | 45 | * @name: flow handler name for /proc/interrupts output |
41 | */ | 46 | */ |
@@ -68,6 +73,11 @@ struct irq_desc { | |||
68 | unsigned long threads_oneshot; | 73 | unsigned long threads_oneshot; |
69 | atomic_t threads_active; | 74 | atomic_t threads_active; |
70 | wait_queue_head_t wait_for_threads; | 75 | wait_queue_head_t wait_for_threads; |
76 | #ifdef CONFIG_PM_SLEEP | ||
77 | unsigned int nr_actions; | ||
78 | unsigned int no_suspend_depth; | ||
79 | unsigned int force_resume_depth; | ||
80 | #endif | ||
71 | #ifdef CONFIG_PROC_FS | 81 | #ifdef CONFIG_PROC_FS |
72 | struct proc_dir_entry *dir; | 82 | struct proc_dir_entry *dir; |
73 | #endif | 83 | #endif |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 511c6e0d21a9..a5b7d7cfcedf 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -209,6 +209,7 @@ enum { | |||
209 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, | 209 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, |
210 | MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, | 210 | MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, |
211 | MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, | 211 | MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, |
212 | MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, | ||
212 | }; | 213 | }; |
213 | 214 | ||
214 | enum mlx4_event { | 215 | enum mlx4_event { |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 7040dc98ff8b..5f4e36cf0091 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -56,7 +56,8 @@ enum mlx4_qp_optpar { | |||
56 | MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, | 56 | MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, |
57 | MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, | 57 | MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, |
58 | MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, | 58 | MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, |
59 | MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20 | 59 | MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20, |
60 | MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21, | ||
60 | }; | 61 | }; |
61 | 62 | ||
62 | enum mlx4_qp_state { | 63 | enum mlx4_qp_state { |
@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg { | |||
423 | 424 | ||
424 | enum mlx4_update_qp_attr { | 425 | enum mlx4_update_qp_attr { |
425 | MLX4_UPDATE_QP_SMAC = 1 << 0, | 426 | MLX4_UPDATE_QP_SMAC = 1 << 0, |
427 | MLX4_UPDATE_QP_VSD = 1 << 2, | ||
428 | MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 | ||
429 | }; | ||
430 | |||
431 | enum mlx4_update_qp_params_flags { | ||
432 | MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0, | ||
426 | }; | 433 | }; |
427 | 434 | ||
428 | struct mlx4_update_qp_params { | 435 | struct mlx4_update_qp_params { |
429 | u8 smac_index; | 436 | u8 smac_index; |
437 | u32 flags; | ||
430 | }; | 438 | }; |
431 | 439 | ||
432 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | 440 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
433 | enum mlx4_update_qp_attr attr, | 441 | enum mlx4_update_qp_attr attr, |
434 | struct mlx4_update_qp_params *params); | 442 | struct mlx4_update_qp_params *params); |
435 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 443 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 3dfbf237cd8f..ef5894ca8e50 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -71,6 +71,7 @@ void percpu_ref_reinit(struct percpu_ref *ref); | |||
71 | void percpu_ref_exit(struct percpu_ref *ref); | 71 | void percpu_ref_exit(struct percpu_ref *ref); |
72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
73 | percpu_ref_func_t *confirm_kill); | 73 | percpu_ref_func_t *confirm_kill); |
74 | void __percpu_ref_kill_expedited(struct percpu_ref *ref); | ||
74 | 75 | ||
75 | /** | 76 | /** |
76 | * percpu_ref_kill - drop the initial ref | 77 | * percpu_ref_kill - drop the initial ref |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 72c0fe098a27..383fd68aaee1 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -619,6 +619,7 @@ extern int dev_pm_put_subsys_data(struct device *dev); | |||
619 | */ | 619 | */ |
620 | struct dev_pm_domain { | 620 | struct dev_pm_domain { |
621 | struct dev_pm_ops ops; | 621 | struct dev_pm_ops ops; |
622 | void (*detach)(struct device *dev, bool power_off); | ||
622 | }; | 623 | }; |
623 | 624 | ||
624 | /* | 625 | /* |
@@ -679,12 +680,16 @@ struct dev_pm_domain { | |||
679 | extern void device_pm_lock(void); | 680 | extern void device_pm_lock(void); |
680 | extern void dpm_resume_start(pm_message_t state); | 681 | extern void dpm_resume_start(pm_message_t state); |
681 | extern void dpm_resume_end(pm_message_t state); | 682 | extern void dpm_resume_end(pm_message_t state); |
683 | extern void dpm_resume_noirq(pm_message_t state); | ||
684 | extern void dpm_resume_early(pm_message_t state); | ||
682 | extern void dpm_resume(pm_message_t state); | 685 | extern void dpm_resume(pm_message_t state); |
683 | extern void dpm_complete(pm_message_t state); | 686 | extern void dpm_complete(pm_message_t state); |
684 | 687 | ||
685 | extern void device_pm_unlock(void); | 688 | extern void device_pm_unlock(void); |
686 | extern int dpm_suspend_end(pm_message_t state); | 689 | extern int dpm_suspend_end(pm_message_t state); |
687 | extern int dpm_suspend_start(pm_message_t state); | 690 | extern int dpm_suspend_start(pm_message_t state); |
691 | extern int dpm_suspend_noirq(pm_message_t state); | ||
692 | extern int dpm_suspend_late(pm_message_t state); | ||
688 | extern int dpm_suspend(pm_message_t state); | 693 | extern int dpm_suspend(pm_message_t state); |
689 | extern int dpm_prepare(pm_message_t state); | 694 | extern int dpm_prepare(pm_message_t state); |
690 | 695 | ||
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index ebc4c76ffb73..73e938b7e937 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -35,18 +35,10 @@ struct gpd_dev_ops { | |||
35 | int (*stop)(struct device *dev); | 35 | int (*stop)(struct device *dev); |
36 | int (*save_state)(struct device *dev); | 36 | int (*save_state)(struct device *dev); |
37 | int (*restore_state)(struct device *dev); | 37 | int (*restore_state)(struct device *dev); |
38 | int (*suspend)(struct device *dev); | ||
39 | int (*suspend_late)(struct device *dev); | ||
40 | int (*resume_early)(struct device *dev); | ||
41 | int (*resume)(struct device *dev); | ||
42 | int (*freeze)(struct device *dev); | ||
43 | int (*freeze_late)(struct device *dev); | ||
44 | int (*thaw_early)(struct device *dev); | ||
45 | int (*thaw)(struct device *dev); | ||
46 | bool (*active_wakeup)(struct device *dev); | 38 | bool (*active_wakeup)(struct device *dev); |
47 | }; | 39 | }; |
48 | 40 | ||
49 | struct gpd_cpu_data { | 41 | struct gpd_cpuidle_data { |
50 | unsigned int saved_exit_latency; | 42 | unsigned int saved_exit_latency; |
51 | struct cpuidle_state *idle_state; | 43 | struct cpuidle_state *idle_state; |
52 | }; | 44 | }; |
@@ -71,7 +63,6 @@ struct generic_pm_domain { | |||
71 | unsigned int suspended_count; /* System suspend device counter */ | 63 | unsigned int suspended_count; /* System suspend device counter */ |
72 | unsigned int prepared_count; /* Suspend counter of prepared devices */ | 64 | unsigned int prepared_count; /* Suspend counter of prepared devices */ |
73 | bool suspend_power_off; /* Power status before system suspend */ | 65 | bool suspend_power_off; /* Power status before system suspend */ |
74 | bool dev_irq_safe; /* Device callbacks are IRQ-safe */ | ||
75 | int (*power_off)(struct generic_pm_domain *domain); | 66 | int (*power_off)(struct generic_pm_domain *domain); |
76 | s64 power_off_latency_ns; | 67 | s64 power_off_latency_ns; |
77 | int (*power_on)(struct generic_pm_domain *domain); | 68 | int (*power_on)(struct generic_pm_domain *domain); |
@@ -80,8 +71,9 @@ struct generic_pm_domain { | |||
80 | s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ | 71 | s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ |
81 | bool max_off_time_changed; | 72 | bool max_off_time_changed; |
82 | bool cached_power_down_ok; | 73 | bool cached_power_down_ok; |
83 | struct device_node *of_node; /* Node in device tree */ | 74 | struct gpd_cpuidle_data *cpuidle_data; |
84 | struct gpd_cpu_data *cpu_data; | 75 | void (*attach_dev)(struct device *dev); |
76 | void (*detach_dev)(struct device *dev); | ||
85 | }; | 77 | }; |
86 | 78 | ||
87 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) | 79 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) |
@@ -108,7 +100,6 @@ struct gpd_timing_data { | |||
108 | 100 | ||
109 | struct generic_pm_domain_data { | 101 | struct generic_pm_domain_data { |
110 | struct pm_domain_data base; | 102 | struct pm_domain_data base; |
111 | struct gpd_dev_ops ops; | ||
112 | struct gpd_timing_data td; | 103 | struct gpd_timing_data td; |
113 | struct notifier_block nb; | 104 | struct notifier_block nb; |
114 | struct mutex lock; | 105 | struct mutex lock; |
@@ -127,17 +118,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) | |||
127 | return to_gpd_data(dev->power.subsys_data->domain_data); | 118 | return to_gpd_data(dev->power.subsys_data->domain_data); |
128 | } | 119 | } |
129 | 120 | ||
130 | extern struct dev_power_governor simple_qos_governor; | ||
131 | |||
132 | extern struct generic_pm_domain *dev_to_genpd(struct device *dev); | 121 | extern struct generic_pm_domain *dev_to_genpd(struct device *dev); |
133 | extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, | 122 | extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, |
134 | struct device *dev, | 123 | struct device *dev, |
135 | struct gpd_timing_data *td); | 124 | struct gpd_timing_data *td); |
136 | 125 | ||
137 | extern int __pm_genpd_of_add_device(struct device_node *genpd_node, | ||
138 | struct device *dev, | ||
139 | struct gpd_timing_data *td); | ||
140 | |||
141 | extern int __pm_genpd_name_add_device(const char *domain_name, | 126 | extern int __pm_genpd_name_add_device(const char *domain_name, |
142 | struct device *dev, | 127 | struct device *dev, |
143 | struct gpd_timing_data *td); | 128 | struct gpd_timing_data *td); |
@@ -151,10 +136,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name, | |||
151 | const char *subdomain_name); | 136 | const char *subdomain_name); |
152 | extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 137 | extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
153 | struct generic_pm_domain *target); | 138 | struct generic_pm_domain *target); |
154 | extern int pm_genpd_add_callbacks(struct device *dev, | ||
155 | struct gpd_dev_ops *ops, | ||
156 | struct gpd_timing_data *td); | ||
157 | extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); | ||
158 | extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); | 139 | extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); |
159 | extern int pm_genpd_name_attach_cpuidle(const char *name, int state); | 140 | extern int pm_genpd_name_attach_cpuidle(const char *name, int state); |
160 | extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); | 141 | extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); |
@@ -165,8 +146,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, | |||
165 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); | 146 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); |
166 | extern int pm_genpd_name_poweron(const char *domain_name); | 147 | extern int pm_genpd_name_poweron(const char *domain_name); |
167 | 148 | ||
168 | extern bool default_stop_ok(struct device *dev); | 149 | extern struct dev_power_governor simple_qos_governor; |
169 | |||
170 | extern struct dev_power_governor pm_domain_always_on_gov; | 150 | extern struct dev_power_governor pm_domain_always_on_gov; |
171 | #else | 151 | #else |
172 | 152 | ||
@@ -184,12 +164,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, | |||
184 | { | 164 | { |
185 | return -ENOSYS; | 165 | return -ENOSYS; |
186 | } | 166 | } |
187 | static inline int __pm_genpd_of_add_device(struct device_node *genpd_node, | ||
188 | struct device *dev, | ||
189 | struct gpd_timing_data *td) | ||
190 | { | ||
191 | return -ENOSYS; | ||
192 | } | ||
193 | static inline int __pm_genpd_name_add_device(const char *domain_name, | 167 | static inline int __pm_genpd_name_add_device(const char *domain_name, |
194 | struct device *dev, | 168 | struct device *dev, |
195 | struct gpd_timing_data *td) | 169 | struct gpd_timing_data *td) |
@@ -217,16 +191,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
217 | { | 191 | { |
218 | return -ENOSYS; | 192 | return -ENOSYS; |
219 | } | 193 | } |
220 | static inline int pm_genpd_add_callbacks(struct device *dev, | ||
221 | struct gpd_dev_ops *ops, | ||
222 | struct gpd_timing_data *td) | ||
223 | { | ||
224 | return -ENOSYS; | ||
225 | } | ||
226 | static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) | ||
227 | { | ||
228 | return -ENOSYS; | ||
229 | } | ||
230 | static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) | 194 | static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) |
231 | { | 195 | { |
232 | return -ENOSYS; | 196 | return -ENOSYS; |
@@ -255,10 +219,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name) | |||
255 | { | 219 | { |
256 | return -ENOSYS; | 220 | return -ENOSYS; |
257 | } | 221 | } |
258 | static inline bool default_stop_ok(struct device *dev) | ||
259 | { | ||
260 | return false; | ||
261 | } | ||
262 | #define simple_qos_governor NULL | 222 | #define simple_qos_governor NULL |
263 | #define pm_domain_always_on_gov NULL | 223 | #define pm_domain_always_on_gov NULL |
264 | #endif | 224 | #endif |
@@ -269,45 +229,87 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, | |||
269 | return __pm_genpd_add_device(genpd, dev, NULL); | 229 | return __pm_genpd_add_device(genpd, dev, NULL); |
270 | } | 230 | } |
271 | 231 | ||
272 | static inline int pm_genpd_of_add_device(struct device_node *genpd_node, | ||
273 | struct device *dev) | ||
274 | { | ||
275 | return __pm_genpd_of_add_device(genpd_node, dev, NULL); | ||
276 | } | ||
277 | |||
278 | static inline int pm_genpd_name_add_device(const char *domain_name, | 232 | static inline int pm_genpd_name_add_device(const char *domain_name, |
279 | struct device *dev) | 233 | struct device *dev) |
280 | { | 234 | { |
281 | return __pm_genpd_name_add_device(domain_name, dev, NULL); | 235 | return __pm_genpd_name_add_device(domain_name, dev, NULL); |
282 | } | 236 | } |
283 | 237 | ||
284 | static inline int pm_genpd_remove_callbacks(struct device *dev) | ||
285 | { | ||
286 | return __pm_genpd_remove_callbacks(dev, true); | ||
287 | } | ||
288 | |||
289 | #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME | 238 | #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME |
290 | extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); | ||
291 | extern void pm_genpd_poweroff_unused(void); | 239 | extern void pm_genpd_poweroff_unused(void); |
292 | #else | 240 | #else |
293 | static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} | ||
294 | static inline void pm_genpd_poweroff_unused(void) {} | 241 | static inline void pm_genpd_poweroff_unused(void) {} |
295 | #endif | 242 | #endif |
296 | 243 | ||
297 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP | 244 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP |
298 | extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); | 245 | extern void pm_genpd_syscore_poweroff(struct device *dev); |
246 | extern void pm_genpd_syscore_poweron(struct device *dev); | ||
299 | #else | 247 | #else |
300 | static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} | 248 | static inline void pm_genpd_syscore_poweroff(struct device *dev) {} |
249 | static inline void pm_genpd_syscore_poweron(struct device *dev) {} | ||
301 | #endif | 250 | #endif |
302 | 251 | ||
303 | static inline void pm_genpd_syscore_poweroff(struct device *dev) | 252 | /* OF PM domain providers */ |
253 | struct of_device_id; | ||
254 | |||
255 | struct genpd_onecell_data { | ||
256 | struct generic_pm_domain **domains; | ||
257 | unsigned int num_domains; | ||
258 | }; | ||
259 | |||
260 | typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, | ||
261 | void *data); | ||
262 | |||
263 | #ifdef CONFIG_PM_GENERIC_DOMAINS_OF | ||
264 | int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, | ||
265 | void *data); | ||
266 | void of_genpd_del_provider(struct device_node *np); | ||
267 | |||
268 | struct generic_pm_domain *__of_genpd_xlate_simple( | ||
269 | struct of_phandle_args *genpdspec, | ||
270 | void *data); | ||
271 | struct generic_pm_domain *__of_genpd_xlate_onecell( | ||
272 | struct of_phandle_args *genpdspec, | ||
273 | void *data); | ||
274 | |||
275 | int genpd_dev_pm_attach(struct device *dev); | ||
276 | #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ | ||
277 | static inline int __of_genpd_add_provider(struct device_node *np, | ||
278 | genpd_xlate_t xlate, void *data) | ||
279 | { | ||
280 | return 0; | ||
281 | } | ||
282 | static inline void of_genpd_del_provider(struct device_node *np) {} | ||
283 | |||
284 | #define __of_genpd_xlate_simple NULL | ||
285 | #define __of_genpd_xlate_onecell NULL | ||
286 | |||
287 | static inline int genpd_dev_pm_attach(struct device *dev) | ||
288 | { | ||
289 | return -ENODEV; | ||
290 | } | ||
291 | #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ | ||
292 | |||
293 | static inline int of_genpd_add_provider_simple(struct device_node *np, | ||
294 | struct generic_pm_domain *genpd) | ||
295 | { | ||
296 | return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd); | ||
297 | } | ||
298 | static inline int of_genpd_add_provider_onecell(struct device_node *np, | ||
299 | struct genpd_onecell_data *data) | ||
304 | { | 300 | { |
305 | pm_genpd_syscore_switch(dev, true); | 301 | return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data); |
306 | } | 302 | } |
307 | 303 | ||
308 | static inline void pm_genpd_syscore_poweron(struct device *dev) | 304 | #ifdef CONFIG_PM |
305 | extern int dev_pm_domain_attach(struct device *dev, bool power_on); | ||
306 | extern void dev_pm_domain_detach(struct device *dev, bool power_off); | ||
307 | #else | ||
308 | static inline int dev_pm_domain_attach(struct device *dev, bool power_on) | ||
309 | { | 309 | { |
310 | pm_genpd_syscore_switch(dev, false); | 310 | return -ENODEV; |
311 | } | 311 | } |
312 | static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} | ||
313 | #endif | ||
312 | 314 | ||
313 | #endif /* _LINUX_PM_DOMAIN_H */ | 315 | #endif /* _LINUX_PM_DOMAIN_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5c2c885ee52b..b867a4dab38a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1903,8 +1903,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, | |||
1903 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ | 1903 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
1904 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ | 1904 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
1905 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1905 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
1906 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | ||
1907 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | ||
1908 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ | 1906 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
1909 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1907 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1910 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1908 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
@@ -1957,17 +1955,31 @@ static inline void memalloc_noio_restore(unsigned int flags) | |||
1957 | } | 1955 | } |
1958 | 1956 | ||
1959 | /* Per-process atomic flags. */ | 1957 | /* Per-process atomic flags. */ |
1960 | #define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */ | 1958 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
1959 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ | ||
1960 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ | ||
1961 | 1961 | ||
1962 | static inline bool task_no_new_privs(struct task_struct *p) | ||
1963 | { | ||
1964 | return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); | ||
1965 | } | ||
1966 | 1962 | ||
1967 | static inline void task_set_no_new_privs(struct task_struct *p) | 1963 | #define TASK_PFA_TEST(name, func) \ |
1968 | { | 1964 | static inline bool task_##func(struct task_struct *p) \ |
1969 | set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); | 1965 | { return test_bit(PFA_##name, &p->atomic_flags); } |
1970 | } | 1966 | #define TASK_PFA_SET(name, func) \ |
1967 | static inline void task_set_##func(struct task_struct *p) \ | ||
1968 | { set_bit(PFA_##name, &p->atomic_flags); } | ||
1969 | #define TASK_PFA_CLEAR(name, func) \ | ||
1970 | static inline void task_clear_##func(struct task_struct *p) \ | ||
1971 | { clear_bit(PFA_##name, &p->atomic_flags); } | ||
1972 | |||
1973 | TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) | ||
1974 | TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) | ||
1975 | |||
1976 | TASK_PFA_TEST(SPREAD_PAGE, spread_page) | ||
1977 | TASK_PFA_SET(SPREAD_PAGE, spread_page) | ||
1978 | TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) | ||
1979 | |||
1980 | TASK_PFA_TEST(SPREAD_SLAB, spread_slab) | ||
1981 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) | ||
1982 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) | ||
1971 | 1983 | ||
1972 | /* | 1984 | /* |
1973 | * task->jobctl flags | 1985 | * task->jobctl flags |
@@ -2608,9 +2620,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct | |||
2608 | task_thread_info(p)->task = p; | 2620 | task_thread_info(p)->task = p; |
2609 | } | 2621 | } |
2610 | 2622 | ||
2623 | /* | ||
2624 | * Return the address of the last usable long on the stack. | ||
2625 | * | ||
2626 | * When the stack grows down, this is just above the thread | ||
2627 | * info struct. Going any lower will corrupt the threadinfo. | ||
2628 | * | ||
2629 | * When the stack grows up, this is the highest address. | ||
2630 | * Beyond that position, we corrupt data on the next page. | ||
2631 | */ | ||
2611 | static inline unsigned long *end_of_stack(struct task_struct *p) | 2632 | static inline unsigned long *end_of_stack(struct task_struct *p) |
2612 | { | 2633 | { |
2634 | #ifdef CONFIG_STACK_GROWSUP | ||
2635 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; | ||
2636 | #else | ||
2613 | return (unsigned long *)(task_thread_info(p) + 1); | 2637 | return (unsigned long *)(task_thread_info(p) + 1); |
2638 | #endif | ||
2614 | } | 2639 | } |
2615 | 2640 | ||
2616 | #endif | 2641 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 519064e0c943..3388c1b6f7d8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -189,6 +189,8 @@ struct platform_suspend_ops { | |||
189 | 189 | ||
190 | struct platform_freeze_ops { | 190 | struct platform_freeze_ops { |
191 | int (*begin)(void); | 191 | int (*begin)(void); |
192 | int (*prepare)(void); | ||
193 | void (*restore)(void); | ||
192 | void (*end)(void); | 194 | void (*end)(void); |
193 | }; | 195 | }; |
194 | 196 | ||
@@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
371 | extern bool events_check_enabled; | 373 | extern bool events_check_enabled; |
372 | 374 | ||
373 | extern bool pm_wakeup_pending(void); | 375 | extern bool pm_wakeup_pending(void); |
376 | extern void pm_system_wakeup(void); | ||
377 | extern void pm_wakeup_clear(void); | ||
374 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); | 378 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); |
375 | extern bool pm_save_wakeup_count(unsigned int count); | 379 | extern bool pm_save_wakeup_count(unsigned int count); |
376 | extern void pm_wakep_autosleep_enabled(bool set); | 380 | extern void pm_wakep_autosleep_enabled(bool set); |
@@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) | |||
418 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) | 422 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) |
419 | 423 | ||
420 | static inline bool pm_wakeup_pending(void) { return false; } | 424 | static inline bool pm_wakeup_pending(void) { return false; } |
425 | static inline void pm_system_wakeup(void) {} | ||
426 | static inline void pm_wakeup_clear(void) {} | ||
421 | 427 | ||
422 | static inline void lock_system_sleep(void) {} | 428 | static inline void lock_system_sleep(void) {} |
423 | static inline void unlock_system_sleep(void) {} | 429 | static inline void unlock_system_sleep(void) {} |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 48d64e6ab292..290fbf0b6b8a 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -84,7 +84,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i); | |||
84 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | 84 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, |
85 | unsigned long nr_segs, size_t count); | 85 | unsigned long nr_segs, size_t count); |
86 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, | 86 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, |
87 | unsigned maxpages, size_t *start); | 87 | size_t maxsize, unsigned maxpages, size_t *start); |
88 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | 88 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, |
89 | size_t maxsize, size_t *start); | 89 | size_t maxsize, size_t *start); |
90 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 90 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a0cc2e95ed1b..b996e6cde6bb 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
419 | alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ | 419 | alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ |
420 | 1, (name)) | 420 | 1, (name)) |
421 | #define create_singlethread_workqueue(name) \ | 421 | #define create_singlethread_workqueue(name) \ |
422 | alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) | 422 | alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) |
423 | 423 | ||
424 | extern void destroy_workqueue(struct workqueue_struct *wq); | 424 | extern void destroy_workqueue(struct workqueue_struct *wq); |
425 | 425 | ||
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index fc910a622451..2fefcf491aa8 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
@@ -295,7 +295,7 @@ struct vb2_buffer { | |||
295 | * can return an error if hardware fails, in that case all | 295 | * can return an error if hardware fails, in that case all |
296 | * buffers that have been already given by the @buf_queue | 296 | * buffers that have been already given by the @buf_queue |
297 | * callback are to be returned by the driver by calling | 297 | * callback are to be returned by the driver by calling |
298 | * @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED). | 298 | * @vb2_buffer_done(VB2_BUF_STATE_QUEUED). |
299 | * If you need a minimum number of buffers before you can | 299 | * If you need a minimum number of buffers before you can |
300 | * start streaming, then set @min_buffers_needed in the | 300 | * start streaming, then set @min_buffers_needed in the |
301 | * vb2_queue structure. If that is non-zero then | 301 | * vb2_queue structure. If that is non-zero then |
@@ -380,6 +380,9 @@ struct v4l2_fh; | |||
380 | * @start_streaming_called: start_streaming() was called successfully and we | 380 | * @start_streaming_called: start_streaming() was called successfully and we |
381 | * started streaming. | 381 | * started streaming. |
382 | * @error: a fatal error occurred on the queue | 382 | * @error: a fatal error occurred on the queue |
383 | * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for | ||
384 | * buffers. Only set for capture queues if qbuf has not yet been | ||
385 | * called since poll() needs to return POLLERR in that situation. | ||
383 | * @fileio: file io emulator internal data, used only if emulator is active | 386 | * @fileio: file io emulator internal data, used only if emulator is active |
384 | * @threadio: thread io internal data, used only if thread is active | 387 | * @threadio: thread io internal data, used only if thread is active |
385 | */ | 388 | */ |
@@ -417,6 +420,7 @@ struct vb2_queue { | |||
417 | unsigned int streaming:1; | 420 | unsigned int streaming:1; |
418 | unsigned int start_streaming_called:1; | 421 | unsigned int start_streaming_called:1; |
419 | unsigned int error:1; | 422 | unsigned int error:1; |
423 | unsigned int waiting_for_buffers:1; | ||
420 | 424 | ||
421 | struct vb2_fileio_data *fileio; | 425 | struct vb2_fileio_data *fileio; |
422 | struct vb2_threadio_data *threadio; | 426 | struct vb2_threadio_data *threadio; |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f679877bb601..ec51e673b4b6 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
@@ -204,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk); | |||
204 | 204 | ||
205 | int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); | 205 | int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); |
206 | int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); | 206 | int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); |
207 | void ipv6_ac_destroy_dev(struct inet6_dev *idev); | ||
207 | bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, | 208 | bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, |
208 | const struct in6_addr *addr); | 209 | const struct in6_addr *addr); |
209 | bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, | 210 | bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, |
diff --git a/include/net/dst.h b/include/net/dst.h index 71c60f42be48..a8ae4e760778 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -480,6 +480,7 @@ void dst_init(void); | |||
480 | /* Flags for xfrm_lookup flags argument. */ | 480 | /* Flags for xfrm_lookup flags argument. */ |
481 | enum { | 481 | enum { |
482 | XFRM_LOOKUP_ICMP = 1 << 0, | 482 | XFRM_LOOKUP_ICMP = 1 << 0, |
483 | XFRM_LOOKUP_QUEUE = 1 << 1, | ||
483 | }; | 484 | }; |
484 | 485 | ||
485 | struct flowi; | 486 | struct flowi; |
@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, | |||
490 | int flags) | 491 | int flags) |
491 | { | 492 | { |
492 | return dst_orig; | 493 | return dst_orig; |
493 | } | 494 | } |
495 | |||
496 | static inline struct dst_entry *xfrm_lookup_route(struct net *net, | ||
497 | struct dst_entry *dst_orig, | ||
498 | const struct flowi *fl, | ||
499 | struct sock *sk, | ||
500 | int flags) | ||
501 | { | ||
502 | return dst_orig; | ||
503 | } | ||
494 | 504 | ||
495 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | 505 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
496 | { | 506 | { |
@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
502 | const struct flowi *fl, struct sock *sk, | 512 | const struct flowi *fl, struct sock *sk, |
503 | int flags); | 513 | int flags); |
504 | 514 | ||
515 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | ||
516 | const struct flowi *fl, struct sock *sk, | ||
517 | int flags); | ||
518 | |||
505 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ | 519 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
506 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | 520 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
507 | { | 521 | { |
diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 93695f0e22a5..af10c2cf8a1d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h | |||
@@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net, | |||
394 | return netlink_set_err(net->genl_sock, portid, group, code); | 394 | return netlink_set_err(net->genl_sock, portid, group, code); |
395 | } | 395 | } |
396 | 396 | ||
397 | static inline int genl_has_listeners(struct genl_family *family, | ||
398 | struct sock *sk, unsigned int group) | ||
399 | { | ||
400 | if (WARN_ON_ONCE(group >= family->n_mcgrps)) | ||
401 | return -EINVAL; | ||
402 | group = family->mcgrp_offset + group; | ||
403 | return netlink_has_listeners(sk, group); | ||
404 | } | ||
397 | #endif /* __NET_GENERIC_NETLINK_H */ | 405 | #endif /* __NET_GENERIC_NETLINK_H */ |
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 9bcb220bd4ad..cf485f9aa563 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
@@ -114,16 +114,13 @@ struct rt6_info { | |||
114 | u32 rt6i_flags; | 114 | u32 rt6i_flags; |
115 | struct rt6key rt6i_src; | 115 | struct rt6key rt6i_src; |
116 | struct rt6key rt6i_prefsrc; | 116 | struct rt6key rt6i_prefsrc; |
117 | u32 rt6i_metric; | ||
118 | 117 | ||
119 | struct inet6_dev *rt6i_idev; | 118 | struct inet6_dev *rt6i_idev; |
120 | unsigned long _rt6i_peer; | 119 | unsigned long _rt6i_peer; |
121 | 120 | ||
122 | u32 rt6i_genid; | 121 | u32 rt6i_metric; |
123 | |||
124 | /* more non-fragment space at head required */ | 122 | /* more non-fragment space at head required */ |
125 | unsigned short rt6i_nfheader_len; | 123 | unsigned short rt6i_nfheader_len; |
126 | |||
127 | u8 rt6i_protocol; | 124 | u8 rt6i_protocol; |
128 | }; | 125 | }; |
129 | 126 | ||
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 361d26077196..e0d64667a4b3 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net) | |||
352 | atomic_inc(&net->ipv4.rt_genid); | 352 | atomic_inc(&net->ipv4.rt_genid); |
353 | } | 353 | } |
354 | 354 | ||
355 | #if IS_ENABLED(CONFIG_IPV6) | 355 | extern void (*__fib6_flush_trees)(struct net *net); |
356 | static inline int rt_genid_ipv6(struct net *net) | ||
357 | { | ||
358 | return atomic_read(&net->ipv6.rt_genid); | ||
359 | } | ||
360 | |||
361 | static inline void rt_genid_bump_ipv6(struct net *net) | ||
362 | { | ||
363 | atomic_inc(&net->ipv6.rt_genid); | ||
364 | } | ||
365 | #else | ||
366 | static inline int rt_genid_ipv6(struct net *net) | ||
367 | { | ||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static inline void rt_genid_bump_ipv6(struct net *net) | 356 | static inline void rt_genid_bump_ipv6(struct net *net) |
372 | { | 357 | { |
358 | if (__fib6_flush_trees) | ||
359 | __fib6_flush_trees(net); | ||
373 | } | 360 | } |
374 | #endif | ||
375 | 361 | ||
376 | #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) | 362 | #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) |
377 | static inline struct netns_ieee802154_lowpan * | 363 | static inline struct netns_ieee802154_lowpan * |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a3cfb8ebeb53..620e086c0cbe 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -231,7 +231,8 @@ struct qdisc_skb_cb { | |||
231 | unsigned int pkt_len; | 231 | unsigned int pkt_len; |
232 | u16 slave_dev_queue_mapping; | 232 | u16 slave_dev_queue_mapping; |
233 | u16 _pad; | 233 | u16 _pad; |
234 | unsigned char data[24]; | 234 | #define QDISC_CB_PRIV_LEN 20 |
235 | unsigned char data[QDISC_CB_PRIV_LEN]; | ||
235 | }; | 236 | }; |
236 | 237 | ||
237 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 238 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 1ea0b65c4cfb..a2bf41e0bde9 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h | |||
@@ -47,6 +47,7 @@ struct ib_umem { | |||
47 | int writable; | 47 | int writable; |
48 | int hugetlb; | 48 | int hugetlb; |
49 | struct work_struct work; | 49 | struct work_struct work; |
50 | struct pid *pid; | ||
50 | struct mm_struct *mm; | 51 | struct mm_struct *mm; |
51 | unsigned long diff; | 52 | unsigned long diff; |
52 | struct sg_table sg_head; | 53 | struct sg_table sg_head; |
diff --git a/init/Kconfig b/init/Kconfig index e84c6423a2e5..80a6907f91c5 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -811,6 +811,7 @@ config LOG_BUF_SHIFT | |||
811 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" | 811 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" |
812 | range 12 21 | 812 | range 12 21 |
813 | default 17 | 813 | default 17 |
814 | depends on PRINTK | ||
814 | help | 815 | help |
815 | Select the minimal kernel log buffer size as a power of 2. | 816 | Select the minimal kernel log buffer size as a power of 2. |
816 | The final size is affected by LOG_CPU_MAX_BUF_SHIFT config | 817 | The final size is affected by LOG_CPU_MAX_BUF_SHIFT config |
@@ -830,6 +831,7 @@ config LOG_CPU_MAX_BUF_SHIFT | |||
830 | range 0 21 | 831 | range 0 21 |
831 | default 12 if !BASE_SMALL | 832 | default 12 if !BASE_SMALL |
832 | default 0 if BASE_SMALL | 833 | default 0 if BASE_SMALL |
834 | depends on PRINTK | ||
833 | help | 835 | help |
834 | This option allows to increase the default ring buffer size | 836 | This option allows to increase the default ring buffer size |
835 | according to the number of CPUs. The value defines the contribution | 837 | according to the number of CPUs. The value defines the contribution |
@@ -1475,6 +1477,7 @@ config FUTEX | |||
1475 | 1477 | ||
1476 | config HAVE_FUTEX_CMPXCHG | 1478 | config HAVE_FUTEX_CMPXCHG |
1477 | bool | 1479 | bool |
1480 | depends on FUTEX | ||
1478 | help | 1481 | help |
1479 | Architectures should select this if futex_atomic_cmpxchg_inatomic() | 1482 | Architectures should select this if futex_atomic_cmpxchg_inatomic() |
1480 | is implemented and always working. This removes a couple of runtime | 1483 | is implemented and always working. This removes a couple of runtime |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 940aced4ed00..3a73f995a81e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -3985,7 +3985,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | |||
3985 | 3985 | ||
3986 | l = cgroup_pidlist_find_create(cgrp, type); | 3986 | l = cgroup_pidlist_find_create(cgrp, type); |
3987 | if (!l) { | 3987 | if (!l) { |
3988 | mutex_unlock(&cgrp->pidlist_mutex); | ||
3989 | pidlist_free(array); | 3988 | pidlist_free(array); |
3990 | return -ENOMEM; | 3989 | return -ENOMEM; |
3991 | } | 3990 | } |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 22874d7cf2c0..52cb04c993b7 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -365,13 +365,14 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, | |||
365 | struct task_struct *tsk) | 365 | struct task_struct *tsk) |
366 | { | 366 | { |
367 | if (is_spread_page(cs)) | 367 | if (is_spread_page(cs)) |
368 | tsk->flags |= PF_SPREAD_PAGE; | 368 | task_set_spread_page(tsk); |
369 | else | 369 | else |
370 | tsk->flags &= ~PF_SPREAD_PAGE; | 370 | task_clear_spread_page(tsk); |
371 | |||
371 | if (is_spread_slab(cs)) | 372 | if (is_spread_slab(cs)) |
372 | tsk->flags |= PF_SPREAD_SLAB; | 373 | task_set_spread_slab(tsk); |
373 | else | 374 | else |
374 | tsk->flags &= ~PF_SPREAD_SLAB; | 375 | task_clear_spread_slab(tsk); |
375 | } | 376 | } |
376 | 377 | ||
377 | /* | 378 | /* |
diff --git a/kernel/events/core.c b/kernel/events/core.c index d640a8b4dcbc..963bf139e2b2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7948,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child) | |||
7948 | 7948 | ||
7949 | for_each_task_context_nr(ctxn) { | 7949 | for_each_task_context_nr(ctxn) { |
7950 | ret = perf_event_init_context(child, ctxn); | 7950 | ret = perf_event_init_context(child, ctxn); |
7951 | if (ret) | 7951 | if (ret) { |
7952 | perf_event_free_task(child); | ||
7952 | return ret; | 7953 | return ret; |
7954 | } | ||
7953 | } | 7955 | } |
7954 | 7956 | ||
7955 | return 0; | 7957 | return 0; |
diff --git a/kernel/fork.c b/kernel/fork.c index 0cf9cdb6e491..a91e47d86de2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1360 | goto bad_fork_cleanup_policy; | 1360 | goto bad_fork_cleanup_policy; |
1361 | retval = audit_alloc(p); | 1361 | retval = audit_alloc(p); |
1362 | if (retval) | 1362 | if (retval) |
1363 | goto bad_fork_cleanup_policy; | 1363 | goto bad_fork_cleanup_perf; |
1364 | /* copy all the process information */ | 1364 | /* copy all the process information */ |
1365 | shm_init_task(p); | 1365 | shm_init_task(p); |
1366 | retval = copy_semundo(clone_flags, p); | 1366 | retval = copy_semundo(clone_flags, p); |
@@ -1566,8 +1566,9 @@ bad_fork_cleanup_semundo: | |||
1566 | exit_sem(p); | 1566 | exit_sem(p); |
1567 | bad_fork_cleanup_audit: | 1567 | bad_fork_cleanup_audit: |
1568 | audit_free(p); | 1568 | audit_free(p); |
1569 | bad_fork_cleanup_policy: | 1569 | bad_fork_cleanup_perf: |
1570 | perf_event_free_task(p); | 1570 | perf_event_free_task(p); |
1571 | bad_fork_cleanup_policy: | ||
1571 | #ifdef CONFIG_NUMA | 1572 | #ifdef CONFIG_NUMA |
1572 | mpol_put(p->mempolicy); | 1573 | mpol_put(p->mempolicy); |
1573 | bad_fork_cleanup_threadgroup_lock: | 1574 | bad_fork_cleanup_threadgroup_lock: |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6223fab9a9d2..8fb52e9bddc1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc) | |||
342 | return irq_wait_for_poll(desc); | 342 | return irq_wait_for_poll(desc); |
343 | } | 343 | } |
344 | 344 | ||
345 | static bool irq_may_run(struct irq_desc *desc) | ||
346 | { | ||
347 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; | ||
348 | |||
349 | /* | ||
350 | * If the interrupt is not in progress and is not an armed | ||
351 | * wakeup interrupt, proceed. | ||
352 | */ | ||
353 | if (!irqd_has_set(&desc->irq_data, mask)) | ||
354 | return true; | ||
355 | |||
356 | /* | ||
357 | * If the interrupt is an armed wakeup source, mark it pending | ||
358 | * and suspended, disable it and notify the pm core about the | ||
359 | * event. | ||
360 | */ | ||
361 | if (irq_pm_check_wakeup(desc)) | ||
362 | return false; | ||
363 | |||
364 | /* | ||
365 | * Handle a potential concurrent poll on a different core. | ||
366 | */ | ||
367 | return irq_check_poll(desc); | ||
368 | } | ||
369 | |||
345 | /** | 370 | /** |
346 | * handle_simple_irq - Simple and software-decoded IRQs. | 371 | * handle_simple_irq - Simple and software-decoded IRQs. |
347 | * @irq: the interrupt number | 372 | * @irq: the interrupt number |
@@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
359 | { | 384 | { |
360 | raw_spin_lock(&desc->lock); | 385 | raw_spin_lock(&desc->lock); |
361 | 386 | ||
362 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 387 | if (!irq_may_run(desc)) |
363 | if (!irq_check_poll(desc)) | 388 | goto out_unlock; |
364 | goto out_unlock; | ||
365 | 389 | ||
366 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 390 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
367 | kstat_incr_irqs_this_cpu(irq, desc); | 391 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
412 | raw_spin_lock(&desc->lock); | 436 | raw_spin_lock(&desc->lock); |
413 | mask_ack_irq(desc); | 437 | mask_ack_irq(desc); |
414 | 438 | ||
415 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 439 | if (!irq_may_run(desc)) |
416 | if (!irq_check_poll(desc)) | 440 | goto out_unlock; |
417 | goto out_unlock; | ||
418 | 441 | ||
419 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 442 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
420 | kstat_incr_irqs_this_cpu(irq, desc); | 443 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
485 | 508 | ||
486 | raw_spin_lock(&desc->lock); | 509 | raw_spin_lock(&desc->lock); |
487 | 510 | ||
488 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 511 | if (!irq_may_run(desc)) |
489 | if (!irq_check_poll(desc)) | 512 | goto out; |
490 | goto out; | ||
491 | 513 | ||
492 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 514 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
493 | kstat_incr_irqs_this_cpu(irq, desc); | 515 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
541 | raw_spin_lock(&desc->lock); | 563 | raw_spin_lock(&desc->lock); |
542 | 564 | ||
543 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 565 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
566 | |||
567 | if (!irq_may_run(desc)) { | ||
568 | desc->istate |= IRQS_PENDING; | ||
569 | mask_ack_irq(desc); | ||
570 | goto out_unlock; | ||
571 | } | ||
572 | |||
544 | /* | 573 | /* |
545 | * If we're currently running this IRQ, or its disabled, | 574 | * If its disabled or no action available then mask it and get |
546 | * we shouldn't process the IRQ. Mark it pending, handle | 575 | * out of here. |
547 | * the necessary masking and go out | ||
548 | */ | 576 | */ |
549 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | 577 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
550 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | 578 | desc->istate |= IRQS_PENDING; |
551 | if (!irq_check_poll(desc)) { | 579 | mask_ack_irq(desc); |
552 | desc->istate |= IRQS_PENDING; | 580 | goto out_unlock; |
553 | mask_ack_irq(desc); | ||
554 | goto out_unlock; | ||
555 | } | ||
556 | } | 581 | } |
582 | |||
557 | kstat_incr_irqs_this_cpu(irq, desc); | 583 | kstat_incr_irqs_this_cpu(irq, desc); |
558 | 584 | ||
559 | /* Start handling the irq */ | 585 | /* Start handling the irq */ |
@@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |||
602 | raw_spin_lock(&desc->lock); | 628 | raw_spin_lock(&desc->lock); |
603 | 629 | ||
604 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 630 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
631 | |||
632 | if (!irq_may_run(desc)) { | ||
633 | desc->istate |= IRQS_PENDING; | ||
634 | goto out_eoi; | ||
635 | } | ||
636 | |||
605 | /* | 637 | /* |
606 | * If we're currently running this IRQ, or its disabled, | 638 | * If its disabled or no action available then mask it and get |
607 | * we shouldn't process the IRQ. Mark it pending, handle | 639 | * out of here. |
608 | * the necessary masking and go out | ||
609 | */ | 640 | */ |
610 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | 641 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
611 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | 642 | desc->istate |= IRQS_PENDING; |
612 | if (!irq_check_poll(desc)) { | 643 | goto out_eoi; |
613 | desc->istate |= IRQS_PENDING; | ||
614 | goto out_eoi; | ||
615 | } | ||
616 | } | 644 | } |
645 | |||
617 | kstat_incr_irqs_this_cpu(irq, desc); | 646 | kstat_incr_irqs_this_cpu(irq, desc); |
618 | 647 | ||
619 | do { | 648 | do { |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 099ea2e0eb88..4332d766619d 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -63,8 +63,8 @@ enum { | |||
63 | 63 | ||
64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
65 | unsigned long flags); | 65 | unsigned long flags); |
66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq); |
67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq); |
68 | 68 | ||
69 | extern int irq_startup(struct irq_desc *desc, bool resend); | 69 | extern int irq_startup(struct irq_desc *desc, bool resend); |
70 | extern void irq_shutdown(struct irq_desc *desc); | 70 | extern void irq_shutdown(struct irq_desc *desc); |
@@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d | |||
194 | __this_cpu_inc(*desc->kstat_irqs); | 194 | __this_cpu_inc(*desc->kstat_irqs); |
195 | __this_cpu_inc(kstat.irqs_sum); | 195 | __this_cpu_inc(kstat.irqs_sum); |
196 | } | 196 | } |
197 | |||
198 | #ifdef CONFIG_PM_SLEEP | ||
199 | bool irq_pm_check_wakeup(struct irq_desc *desc); | ||
200 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); | ||
201 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); | ||
202 | #else | ||
203 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } | ||
204 | static inline void | ||
205 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } | ||
206 | static inline void | ||
207 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } | ||
208 | #endif | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3dc6a61bf06a..0a9104b4608b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
382 | } | 382 | } |
383 | #endif | 383 | #endif |
384 | 384 | ||
385 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 385 | void __disable_irq(struct irq_desc *desc, unsigned int irq) |
386 | { | 386 | { |
387 | if (suspend) { | ||
388 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | ||
389 | return; | ||
390 | desc->istate |= IRQS_SUSPENDED; | ||
391 | } | ||
392 | |||
393 | if (!desc->depth++) | 387 | if (!desc->depth++) |
394 | irq_disable(desc); | 388 | irq_disable(desc); |
395 | } | 389 | } |
@@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq) | |||
401 | 395 | ||
402 | if (!desc) | 396 | if (!desc) |
403 | return -EINVAL; | 397 | return -EINVAL; |
404 | __disable_irq(desc, irq, false); | 398 | __disable_irq(desc, irq); |
405 | irq_put_desc_busunlock(desc, flags); | 399 | irq_put_desc_busunlock(desc, flags); |
406 | return 0; | 400 | return 0; |
407 | } | 401 | } |
@@ -442,20 +436,8 @@ void disable_irq(unsigned int irq) | |||
442 | } | 436 | } |
443 | EXPORT_SYMBOL(disable_irq); | 437 | EXPORT_SYMBOL(disable_irq); |
444 | 438 | ||
445 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 439 | void __enable_irq(struct irq_desc *desc, unsigned int irq) |
446 | { | 440 | { |
447 | if (resume) { | ||
448 | if (!(desc->istate & IRQS_SUSPENDED)) { | ||
449 | if (!desc->action) | ||
450 | return; | ||
451 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
452 | return; | ||
453 | /* Pretend that it got disabled ! */ | ||
454 | desc->depth++; | ||
455 | } | ||
456 | desc->istate &= ~IRQS_SUSPENDED; | ||
457 | } | ||
458 | |||
459 | switch (desc->depth) { | 441 | switch (desc->depth) { |
460 | case 0: | 442 | case 0: |
461 | err_out: | 443 | err_out: |
@@ -497,7 +479,7 @@ void enable_irq(unsigned int irq) | |||
497 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 479 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) |
498 | goto out; | 480 | goto out; |
499 | 481 | ||
500 | __enable_irq(desc, irq, false); | 482 | __enable_irq(desc, irq); |
501 | out: | 483 | out: |
502 | irq_put_desc_busunlock(desc, flags); | 484 | irq_put_desc_busunlock(desc, flags); |
503 | } | 485 | } |
@@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1218 | new->irq = irq; | 1200 | new->irq = irq; |
1219 | *old_ptr = new; | 1201 | *old_ptr = new; |
1220 | 1202 | ||
1203 | irq_pm_install_action(desc, new); | ||
1204 | |||
1221 | /* Reset broken irq detection when installing new handler */ | 1205 | /* Reset broken irq detection when installing new handler */ |
1222 | desc->irq_count = 0; | 1206 | desc->irq_count = 0; |
1223 | desc->irqs_unhandled = 0; | 1207 | desc->irqs_unhandled = 0; |
@@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1228 | */ | 1212 | */ |
1229 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | 1213 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1230 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | 1214 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
1231 | __enable_irq(desc, irq, false); | 1215 | __enable_irq(desc, irq); |
1232 | } | 1216 | } |
1233 | 1217 | ||
1234 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1218 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
@@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1336 | /* Found it - now remove it from the list of entries: */ | 1320 | /* Found it - now remove it from the list of entries: */ |
1337 | *action_ptr = action->next; | 1321 | *action_ptr = action->next; |
1338 | 1322 | ||
1323 | irq_pm_remove_action(desc, action); | ||
1324 | |||
1339 | /* If this was the last handler, shut down the IRQ line: */ | 1325 | /* If this was the last handler, shut down the IRQ line: */ |
1340 | if (!desc->action) { | 1326 | if (!desc->action) { |
1341 | irq_shutdown(desc); | 1327 | irq_shutdown(desc); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index abcd6ca86cb7..3ca532592704 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -9,17 +9,105 @@ | |||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/suspend.h> | ||
12 | #include <linux/syscore_ops.h> | 13 | #include <linux/syscore_ops.h> |
13 | 14 | ||
14 | #include "internals.h" | 15 | #include "internals.h" |
15 | 16 | ||
17 | bool irq_pm_check_wakeup(struct irq_desc *desc) | ||
18 | { | ||
19 | if (irqd_is_wakeup_armed(&desc->irq_data)) { | ||
20 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
21 | desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; | ||
22 | desc->depth++; | ||
23 | irq_disable(desc); | ||
24 | pm_system_wakeup(); | ||
25 | return true; | ||
26 | } | ||
27 | return false; | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * Called from __setup_irq() with desc->lock held after @action has | ||
32 | * been installed in the action chain. | ||
33 | */ | ||
34 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) | ||
35 | { | ||
36 | desc->nr_actions++; | ||
37 | |||
38 | if (action->flags & IRQF_FORCE_RESUME) | ||
39 | desc->force_resume_depth++; | ||
40 | |||
41 | WARN_ON_ONCE(desc->force_resume_depth && | ||
42 | desc->force_resume_depth != desc->nr_actions); | ||
43 | |||
44 | if (action->flags & IRQF_NO_SUSPEND) | ||
45 | desc->no_suspend_depth++; | ||
46 | |||
47 | WARN_ON_ONCE(desc->no_suspend_depth && | ||
48 | desc->no_suspend_depth != desc->nr_actions); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Called from __free_irq() with desc->lock held after @action has | ||
53 | * been removed from the action chain. | ||
54 | */ | ||
55 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) | ||
56 | { | ||
57 | desc->nr_actions--; | ||
58 | |||
59 | if (action->flags & IRQF_FORCE_RESUME) | ||
60 | desc->force_resume_depth--; | ||
61 | |||
62 | if (action->flags & IRQF_NO_SUSPEND) | ||
63 | desc->no_suspend_depth--; | ||
64 | } | ||
65 | |||
66 | static bool suspend_device_irq(struct irq_desc *desc, int irq) | ||
67 | { | ||
68 | if (!desc->action || desc->no_suspend_depth) | ||
69 | return false; | ||
70 | |||
71 | if (irqd_is_wakeup_set(&desc->irq_data)) { | ||
72 | irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
73 | /* | ||
74 | * We return true here to force the caller to issue | ||
75 | * synchronize_irq(). We need to make sure that the | ||
76 | * IRQD_WAKEUP_ARMED is visible before we return from | ||
77 | * suspend_device_irqs(). | ||
78 | */ | ||
79 | return true; | ||
80 | } | ||
81 | |||
82 | desc->istate |= IRQS_SUSPENDED; | ||
83 | __disable_irq(desc, irq); | ||
84 | |||
85 | /* | ||
86 | * Hardware which has no wakeup source configuration facility | ||
87 | * requires that the non wakeup interrupts are masked at the | ||
88 | * chip level. The chip implementation indicates that with | ||
89 | * IRQCHIP_MASK_ON_SUSPEND. | ||
90 | */ | ||
91 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
92 | mask_irq(desc); | ||
93 | return true; | ||
94 | } | ||
95 | |||
16 | /** | 96 | /** |
17 | * suspend_device_irqs - disable all currently enabled interrupt lines | 97 | * suspend_device_irqs - disable all currently enabled interrupt lines |
18 | * | 98 | * |
19 | * During system-wide suspend or hibernation device drivers need to be prevented | 99 | * During system-wide suspend or hibernation device drivers need to be |
20 | * from receiving interrupts and this function is provided for this purpose. | 100 | * prevented from receiving interrupts and this function is provided |
21 | * It marks all interrupt lines in use, except for the timer ones, as disabled | 101 | * for this purpose. |
22 | * and sets the IRQS_SUSPENDED flag for each of them. | 102 | * |
103 | * So we disable all interrupts and mark them IRQS_SUSPENDED except | ||
104 | * for those which are unused, those which are marked as not | ||
105 | * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND | ||
106 | * set and those which are marked as active wakeup sources. | ||
107 | * | ||
108 | * The active wakeup sources are handled by the flow handler entry | ||
109 | * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the | ||
110 | * interrupt and notifies the pm core about the wakeup. | ||
23 | */ | 111 | */ |
24 | void suspend_device_irqs(void) | 112 | void suspend_device_irqs(void) |
25 | { | 113 | { |
@@ -28,18 +116,36 @@ void suspend_device_irqs(void) | |||
28 | 116 | ||
29 | for_each_irq_desc(irq, desc) { | 117 | for_each_irq_desc(irq, desc) { |
30 | unsigned long flags; | 118 | unsigned long flags; |
119 | bool sync; | ||
31 | 120 | ||
32 | raw_spin_lock_irqsave(&desc->lock, flags); | 121 | raw_spin_lock_irqsave(&desc->lock, flags); |
33 | __disable_irq(desc, irq, true); | 122 | sync = suspend_device_irq(desc, irq); |
34 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 123 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
35 | } | ||
36 | 124 | ||
37 | for_each_irq_desc(irq, desc) | 125 | if (sync) |
38 | if (desc->istate & IRQS_SUSPENDED) | ||
39 | synchronize_irq(irq); | 126 | synchronize_irq(irq); |
127 | } | ||
40 | } | 128 | } |
41 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | 129 | EXPORT_SYMBOL_GPL(suspend_device_irqs); |
42 | 130 | ||
131 | static void resume_irq(struct irq_desc *desc, int irq) | ||
132 | { | ||
133 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
134 | |||
135 | if (desc->istate & IRQS_SUSPENDED) | ||
136 | goto resume; | ||
137 | |||
138 | /* Force resume the interrupt? */ | ||
139 | if (!desc->force_resume_depth) | ||
140 | return; | ||
141 | |||
142 | /* Pretend that it got disabled ! */ | ||
143 | desc->depth++; | ||
144 | resume: | ||
145 | desc->istate &= ~IRQS_SUSPENDED; | ||
146 | __enable_irq(desc, irq); | ||
147 | } | ||
148 | |||
43 | static void resume_irqs(bool want_early) | 149 | static void resume_irqs(bool want_early) |
44 | { | 150 | { |
45 | struct irq_desc *desc; | 151 | struct irq_desc *desc; |
@@ -54,7 +160,7 @@ static void resume_irqs(bool want_early) | |||
54 | continue; | 160 | continue; |
55 | 161 | ||
56 | raw_spin_lock_irqsave(&desc->lock, flags); | 162 | raw_spin_lock_irqsave(&desc->lock, flags); |
57 | __enable_irq(desc, irq, true); | 163 | resume_irq(desc, irq); |
58 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 164 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
59 | } | 165 | } |
60 | } | 166 | } |
@@ -93,38 +199,3 @@ void resume_device_irqs(void) | |||
93 | resume_irqs(false); | 199 | resume_irqs(false); |
94 | } | 200 | } |
95 | EXPORT_SYMBOL_GPL(resume_device_irqs); | 201 | EXPORT_SYMBOL_GPL(resume_device_irqs); |
96 | |||
97 | /** | ||
98 | * check_wakeup_irqs - check if any wake-up interrupts are pending | ||
99 | */ | ||
100 | int check_wakeup_irqs(void) | ||
101 | { | ||
102 | struct irq_desc *desc; | ||
103 | int irq; | ||
104 | |||
105 | for_each_irq_desc(irq, desc) { | ||
106 | /* | ||
107 | * Only interrupts which are marked as wakeup source | ||
108 | * and have not been disabled before the suspend check | ||
109 | * can abort suspend. | ||
110 | */ | ||
111 | if (irqd_is_wakeup_set(&desc->irq_data)) { | ||
112 | if (desc->depth == 1 && desc->istate & IRQS_PENDING) | ||
113 | return -EBUSY; | ||
114 | continue; | ||
115 | } | ||
116 | /* | ||
117 | * Check the non wakeup interrupts whether they need | ||
118 | * to be masked before finally going into suspend | ||
119 | * state. That's for hardware which has no wakeup | ||
120 | * source configuration facility. The chip | ||
121 | * implementation indicates that with | ||
122 | * IRQCHIP_MASK_ON_SUSPEND. | ||
123 | */ | ||
124 | if (desc->istate & IRQS_SUSPENDED && | ||
125 | irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
126 | mask_irq(desc); | ||
127 | } | ||
128 | |||
129 | return 0; | ||
130 | } | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index e4e4121fa327..bbef57f5bdfd 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -302,6 +302,10 @@ config PM_GENERIC_DOMAINS_RUNTIME | |||
302 | def_bool y | 302 | def_bool y |
303 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS | 303 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS |
304 | 304 | ||
305 | config PM_GENERIC_DOMAINS_OF | ||
306 | def_bool y | ||
307 | depends on PM_GENERIC_DOMAINS && OF | ||
308 | |||
305 | config CPU_PM | 309 | config CPU_PM |
306 | bool | 310 | bool |
307 | depends on SUSPEND || CPU_IDLE | 311 | depends on SUSPEND || CPU_IDLE |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 4ee194eb524b..7b323221b9ee 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -129,6 +129,7 @@ int freeze_processes(void) | |||
129 | if (!pm_freezing) | 129 | if (!pm_freezing) |
130 | atomic_inc(&system_freezing_cnt); | 130 | atomic_inc(&system_freezing_cnt); |
131 | 131 | ||
132 | pm_wakeup_clear(); | ||
132 | printk("Freezing user space processes ... "); | 133 | printk("Freezing user space processes ... "); |
133 | pm_freezing = true; | 134 | pm_freezing = true; |
134 | error = try_to_freeze_tasks(true); | 135 | error = try_to_freeze_tasks(true); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index c4b8093c80b3..791a61892bb5 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1343,6 +1343,9 @@ void swsusp_free(void) | |||
1343 | { | 1343 | { |
1344 | unsigned long fb_pfn, fr_pfn; | 1344 | unsigned long fb_pfn, fr_pfn; |
1345 | 1345 | ||
1346 | if (!forbidden_pages_map || !free_pages_map) | ||
1347 | goto out; | ||
1348 | |||
1346 | memory_bm_position_reset(forbidden_pages_map); | 1349 | memory_bm_position_reset(forbidden_pages_map); |
1347 | memory_bm_position_reset(free_pages_map); | 1350 | memory_bm_position_reset(free_pages_map); |
1348 | 1351 | ||
@@ -1370,6 +1373,7 @@ loop: | |||
1370 | goto loop; | 1373 | goto loop; |
1371 | } | 1374 | } |
1372 | 1375 | ||
1376 | out: | ||
1373 | nr_copy_pages = 0; | 1377 | nr_copy_pages = 0; |
1374 | nr_meta_pages = 0; | 1378 | nr_meta_pages = 0; |
1375 | restore_pblist = NULL; | 1379 | restore_pblist = NULL; |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 18c62195660f..4ca9a33ff620 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -146,17 +146,29 @@ static int platform_suspend_prepare(suspend_state_t state) | |||
146 | 146 | ||
147 | static int platform_suspend_prepare_late(suspend_state_t state) | 147 | static int platform_suspend_prepare_late(suspend_state_t state) |
148 | { | 148 | { |
149 | return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ? | ||
150 | freeze_ops->prepare() : 0; | ||
151 | } | ||
152 | |||
153 | static int platform_suspend_prepare_noirq(suspend_state_t state) | ||
154 | { | ||
149 | return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ? | 155 | return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ? |
150 | suspend_ops->prepare_late() : 0; | 156 | suspend_ops->prepare_late() : 0; |
151 | } | 157 | } |
152 | 158 | ||
153 | static void platform_suspend_wake(suspend_state_t state) | 159 | static void platform_resume_noirq(suspend_state_t state) |
154 | { | 160 | { |
155 | if (state != PM_SUSPEND_FREEZE && suspend_ops->wake) | 161 | if (state != PM_SUSPEND_FREEZE && suspend_ops->wake) |
156 | suspend_ops->wake(); | 162 | suspend_ops->wake(); |
157 | } | 163 | } |
158 | 164 | ||
159 | static void platform_suspend_finish(suspend_state_t state) | 165 | static void platform_resume_early(suspend_state_t state) |
166 | { | ||
167 | if (state == PM_SUSPEND_FREEZE && freeze_ops->restore) | ||
168 | freeze_ops->restore(); | ||
169 | } | ||
170 | |||
171 | static void platform_resume_finish(suspend_state_t state) | ||
160 | { | 172 | { |
161 | if (state != PM_SUSPEND_FREEZE && suspend_ops->finish) | 173 | if (state != PM_SUSPEND_FREEZE && suspend_ops->finish) |
162 | suspend_ops->finish(); | 174 | suspend_ops->finish(); |
@@ -172,7 +184,7 @@ static int platform_suspend_begin(suspend_state_t state) | |||
172 | return 0; | 184 | return 0; |
173 | } | 185 | } |
174 | 186 | ||
175 | static void platform_suspend_end(suspend_state_t state) | 187 | static void platform_resume_end(suspend_state_t state) |
176 | { | 188 | { |
177 | if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) | 189 | if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) |
178 | freeze_ops->end(); | 190 | freeze_ops->end(); |
@@ -180,7 +192,7 @@ static void platform_suspend_end(suspend_state_t state) | |||
180 | suspend_ops->end(); | 192 | suspend_ops->end(); |
181 | } | 193 | } |
182 | 194 | ||
183 | static void platform_suspend_recover(suspend_state_t state) | 195 | static void platform_recover(suspend_state_t state) |
184 | { | 196 | { |
185 | if (state != PM_SUSPEND_FREEZE && suspend_ops->recover) | 197 | if (state != PM_SUSPEND_FREEZE && suspend_ops->recover) |
186 | suspend_ops->recover(); | 198 | suspend_ops->recover(); |
@@ -265,13 +277,22 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
265 | if (error) | 277 | if (error) |
266 | goto Platform_finish; | 278 | goto Platform_finish; |
267 | 279 | ||
268 | error = dpm_suspend_end(PMSG_SUSPEND); | 280 | error = dpm_suspend_late(PMSG_SUSPEND); |
269 | if (error) { | 281 | if (error) { |
270 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 282 | printk(KERN_ERR "PM: late suspend of devices failed\n"); |
271 | goto Platform_finish; | 283 | goto Platform_finish; |
272 | } | 284 | } |
273 | error = platform_suspend_prepare_late(state); | 285 | error = platform_suspend_prepare_late(state); |
274 | if (error) | 286 | if (error) |
287 | goto Devices_early_resume; | ||
288 | |||
289 | error = dpm_suspend_noirq(PMSG_SUSPEND); | ||
290 | if (error) { | ||
291 | printk(KERN_ERR "PM: noirq suspend of devices failed\n"); | ||
292 | goto Platform_early_resume; | ||
293 | } | ||
294 | error = platform_suspend_prepare_noirq(state); | ||
295 | if (error) | ||
275 | goto Platform_wake; | 296 | goto Platform_wake; |
276 | 297 | ||
277 | if (suspend_test(TEST_PLATFORM)) | 298 | if (suspend_test(TEST_PLATFORM)) |
@@ -318,11 +339,17 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
318 | enable_nonboot_cpus(); | 339 | enable_nonboot_cpus(); |
319 | 340 | ||
320 | Platform_wake: | 341 | Platform_wake: |
321 | platform_suspend_wake(state); | 342 | platform_resume_noirq(state); |
322 | dpm_resume_start(PMSG_RESUME); | 343 | dpm_resume_noirq(PMSG_RESUME); |
344 | |||
345 | Platform_early_resume: | ||
346 | platform_resume_early(state); | ||
347 | |||
348 | Devices_early_resume: | ||
349 | dpm_resume_early(PMSG_RESUME); | ||
323 | 350 | ||
324 | Platform_finish: | 351 | Platform_finish: |
325 | platform_suspend_finish(state); | 352 | platform_resume_finish(state); |
326 | return error; | 353 | return error; |
327 | } | 354 | } |
328 | 355 | ||
@@ -361,14 +388,16 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
361 | suspend_test_start(); | 388 | suspend_test_start(); |
362 | dpm_resume_end(PMSG_RESUME); | 389 | dpm_resume_end(PMSG_RESUME); |
363 | suspend_test_finish("resume devices"); | 390 | suspend_test_finish("resume devices"); |
391 | trace_suspend_resume(TPS("resume_console"), state, true); | ||
364 | resume_console(); | 392 | resume_console(); |
393 | trace_suspend_resume(TPS("resume_console"), state, false); | ||
365 | 394 | ||
366 | Close: | 395 | Close: |
367 | platform_suspend_end(state); | 396 | platform_resume_end(state); |
368 | return error; | 397 | return error; |
369 | 398 | ||
370 | Recover_platform: | 399 | Recover_platform: |
371 | platform_suspend_recover(state); | 400 | platform_recover(state); |
372 | goto Resume_devices; | 401 | goto Resume_devices; |
373 | } | 402 | } |
374 | 403 | ||
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index bd91bc177c93..084452e34a12 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #define TEST_SUSPEND_SECONDS 10 | 22 | #define TEST_SUSPEND_SECONDS 10 |
23 | 23 | ||
24 | static unsigned long suspend_test_start_time; | 24 | static unsigned long suspend_test_start_time; |
25 | static u32 test_repeat_count_max = 1; | ||
26 | static u32 test_repeat_count_current; | ||
25 | 27 | ||
26 | void suspend_test_start(void) | 28 | void suspend_test_start(void) |
27 | { | 29 | { |
@@ -74,6 +76,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | |||
74 | int status; | 76 | int status; |
75 | 77 | ||
76 | /* this may fail if the RTC hasn't been initialized */ | 78 | /* this may fail if the RTC hasn't been initialized */ |
79 | repeat: | ||
77 | status = rtc_read_time(rtc, &alm.time); | 80 | status = rtc_read_time(rtc, &alm.time); |
78 | if (status < 0) { | 81 | if (status < 0) { |
79 | printk(err_readtime, dev_name(&rtc->dev), status); | 82 | printk(err_readtime, dev_name(&rtc->dev), status); |
@@ -100,10 +103,21 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | |||
100 | if (state == PM_SUSPEND_STANDBY) { | 103 | if (state == PM_SUSPEND_STANDBY) { |
101 | printk(info_test, pm_states[state]); | 104 | printk(info_test, pm_states[state]); |
102 | status = pm_suspend(state); | 105 | status = pm_suspend(state); |
106 | if (status < 0) | ||
107 | state = PM_SUSPEND_FREEZE; | ||
103 | } | 108 | } |
109 | if (state == PM_SUSPEND_FREEZE) { | ||
110 | printk(info_test, pm_states[state]); | ||
111 | status = pm_suspend(state); | ||
112 | } | ||
113 | |||
104 | if (status < 0) | 114 | if (status < 0) |
105 | printk(err_suspend, status); | 115 | printk(err_suspend, status); |
106 | 116 | ||
117 | test_repeat_count_current++; | ||
118 | if (test_repeat_count_current < test_repeat_count_max) | ||
119 | goto repeat; | ||
120 | |||
107 | /* Some platforms can't detect that the alarm triggered the | 121 | /* Some platforms can't detect that the alarm triggered the |
108 | * wakeup, or (accordingly) disable it after it afterwards. | 122 | * wakeup, or (accordingly) disable it after it afterwards. |
109 | * It's supposed to give oneshot behavior; cope. | 123 | * It's supposed to give oneshot behavior; cope. |
@@ -137,16 +151,28 @@ static char warn_bad_state[] __initdata = | |||
137 | static int __init setup_test_suspend(char *value) | 151 | static int __init setup_test_suspend(char *value) |
138 | { | 152 | { |
139 | int i; | 153 | int i; |
154 | char *repeat; | ||
155 | char *suspend_type; | ||
140 | 156 | ||
141 | /* "=mem" ==> "mem" */ | 157 | /* example : "=mem[,N]" ==> "mem[,N]" */ |
142 | value++; | 158 | value++; |
159 | suspend_type = strsep(&value, ","); | ||
160 | if (!suspend_type) | ||
161 | return 0; | ||
162 | |||
163 | repeat = strsep(&value, ","); | ||
164 | if (repeat) { | ||
165 | if (kstrtou32(repeat, 0, &test_repeat_count_max)) | ||
166 | return 0; | ||
167 | } | ||
168 | |||
143 | for (i = 0; pm_labels[i]; i++) | 169 | for (i = 0; pm_labels[i]; i++) |
144 | if (!strcmp(pm_labels[i], value)) { | 170 | if (!strcmp(pm_labels[i], suspend_type)) { |
145 | test_state_label = pm_labels[i]; | 171 | test_state_label = pm_labels[i]; |
146 | return 0; | 172 | return 0; |
147 | } | 173 | } |
148 | 174 | ||
149 | printk(warn_bad_state, value); | 175 | printk(warn_bad_state, suspend_type); |
150 | return 0; | 176 | return 0; |
151 | } | 177 | } |
152 | __setup("test_suspend", setup_test_suspend); | 178 | __setup("test_suspend", setup_test_suspend); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b38fb2b9e237..2d75c94ae87d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3359,7 +3359,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
3359 | iter->head = cpu_buffer->reader_page->read; | 3359 | iter->head = cpu_buffer->reader_page->read; |
3360 | 3360 | ||
3361 | iter->cache_reader_page = iter->head_page; | 3361 | iter->cache_reader_page = iter->head_page; |
3362 | iter->cache_read = iter->head; | 3362 | iter->cache_read = cpu_buffer->read; |
3363 | 3363 | ||
3364 | if (iter->head) | 3364 | if (iter->head) |
3365 | iter->read_stamp = cpu_buffer->read_stamp; | 3365 | iter->read_stamp = cpu_buffer->read_stamp; |
diff --git a/lib/genalloc.c b/lib/genalloc.c index bdb9a456bcbb..38d2db82228c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np, | |||
588 | if (!np_pool) | 588 | if (!np_pool) |
589 | return NULL; | 589 | return NULL; |
590 | pdev = of_find_device_by_node(np_pool); | 590 | pdev = of_find_device_by_node(np_pool); |
591 | of_node_put(np_pool); | ||
591 | if (!pdev) | 592 | if (!pdev) |
592 | return NULL; | 593 | return NULL; |
593 | return dev_get_gen_pool(&pdev->dev); | 594 | return dev_get_gen_pool(&pdev->dev); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index fe5a3342e960..a89cf09a8268 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |||
184 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); | 184 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
185 | } | 185 | } |
186 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | 186 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |
187 | |||
188 | /* | ||
189 | * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by | ||
190 | * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18 | ||
191 | * devel cycle. Do not use anywhere else. | ||
192 | */ | ||
193 | void __percpu_ref_kill_expedited(struct percpu_ref *ref) | ||
194 | { | ||
195 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | ||
196 | "percpu_ref_kill() called more than once on %pf!", | ||
197 | ref->release); | ||
198 | |||
199 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | ||
200 | synchronize_sched_expedited(); | ||
201 | percpu_ref_kill_rcu(&ref->rcu); | ||
202 | } | ||
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a2c78810ebc1..16d02639d334 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/hash.h> | 23 | #include <linux/hash.h> |
24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
25 | #include <linux/rhashtable.h> | 25 | #include <linux/rhashtable.h> |
26 | #include <linux/log2.h> | ||
27 | 26 | ||
28 | #define HASH_DEFAULT_SIZE 64UL | 27 | #define HASH_DEFAULT_SIZE 64UL |
29 | #define HASH_MIN_SIZE 4UL | 28 | #define HASH_MIN_SIZE 4UL |
@@ -589,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init); | |||
589 | * rhashtable_destroy - destroy hash table | 588 | * rhashtable_destroy - destroy hash table |
590 | * @ht: the hash table to destroy | 589 | * @ht: the hash table to destroy |
591 | * | 590 | * |
592 | * Frees the bucket array. | 591 | * Frees the bucket array. This function is not rcu safe, therefore the caller |
592 | * has to make sure that no resizing may happen by unpublishing the hashtable | ||
593 | * and waiting for the quiescent cycle before releasing the bucket array. | ||
593 | */ | 594 | */ |
594 | void rhashtable_destroy(const struct rhashtable *ht) | 595 | void rhashtable_destroy(const struct rhashtable *ht) |
595 | { | 596 | { |
596 | const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 597 | bucket_table_free(ht->tbl); |
597 | |||
598 | bucket_table_free(tbl); | ||
599 | } | 598 | } |
600 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | 599 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
601 | 600 | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d9a21d06b862..f8ffd9412ec5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1795,14 +1795,17 @@ static int __split_huge_page_map(struct page *page, | |||
1795 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { | 1795 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
1796 | pte_t *pte, entry; | 1796 | pte_t *pte, entry; |
1797 | BUG_ON(PageCompound(page+i)); | 1797 | BUG_ON(PageCompound(page+i)); |
1798 | /* | ||
1799 | * Note that pmd_numa is not transferred deliberately | ||
1800 | * to avoid any possibility that pte_numa leaks to | ||
1801 | * a PROT_NONE VMA by accident. | ||
1802 | */ | ||
1798 | entry = mk_pte(page + i, vma->vm_page_prot); | 1803 | entry = mk_pte(page + i, vma->vm_page_prot); |
1799 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1804 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
1800 | if (!pmd_write(*pmd)) | 1805 | if (!pmd_write(*pmd)) |
1801 | entry = pte_wrprotect(entry); | 1806 | entry = pte_wrprotect(entry); |
1802 | if (!pmd_young(*pmd)) | 1807 | if (!pmd_young(*pmd)) |
1803 | entry = pte_mkold(entry); | 1808 | entry = pte_mkold(entry); |
1804 | if (pmd_numa(*pmd)) | ||
1805 | entry = pte_mknuma(entry); | ||
1806 | pte = pte_offset_map(&_pmd, haddr); | 1809 | pte = pte_offset_map(&_pmd, haddr); |
1807 | BUG_ON(!pte_none(*pte)); | 1810 | BUG_ON(!pte_none(*pte)); |
1808 | set_pte_at(mm, haddr, pte, entry); | 1811 | set_pte_at(mm, haddr, pte, entry); |
diff --git a/mm/iov_iter.c b/mm/iov_iter.c index ab88dc0ea1d3..9a09f2034fcc 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c | |||
@@ -310,7 +310,7 @@ void iov_iter_init(struct iov_iter *i, int direction, | |||
310 | EXPORT_SYMBOL(iov_iter_init); | 310 | EXPORT_SYMBOL(iov_iter_init); |
311 | 311 | ||
312 | static ssize_t get_pages_iovec(struct iov_iter *i, | 312 | static ssize_t get_pages_iovec(struct iov_iter *i, |
313 | struct page **pages, unsigned maxpages, | 313 | struct page **pages, size_t maxsize, unsigned maxpages, |
314 | size_t *start) | 314 | size_t *start) |
315 | { | 315 | { |
316 | size_t offset = i->iov_offset; | 316 | size_t offset = i->iov_offset; |
@@ -323,6 +323,8 @@ static ssize_t get_pages_iovec(struct iov_iter *i, | |||
323 | len = iov->iov_len - offset; | 323 | len = iov->iov_len - offset; |
324 | if (len > i->count) | 324 | if (len > i->count) |
325 | len = i->count; | 325 | len = i->count; |
326 | if (len > maxsize) | ||
327 | len = maxsize; | ||
326 | addr = (unsigned long)iov->iov_base + offset; | 328 | addr = (unsigned long)iov->iov_base + offset; |
327 | len += *start = addr & (PAGE_SIZE - 1); | 329 | len += *start = addr & (PAGE_SIZE - 1); |
328 | if (len > maxpages * PAGE_SIZE) | 330 | if (len > maxpages * PAGE_SIZE) |
@@ -588,13 +590,15 @@ static unsigned long alignment_bvec(const struct iov_iter *i) | |||
588 | } | 590 | } |
589 | 591 | ||
590 | static ssize_t get_pages_bvec(struct iov_iter *i, | 592 | static ssize_t get_pages_bvec(struct iov_iter *i, |
591 | struct page **pages, unsigned maxpages, | 593 | struct page **pages, size_t maxsize, unsigned maxpages, |
592 | size_t *start) | 594 | size_t *start) |
593 | { | 595 | { |
594 | const struct bio_vec *bvec = i->bvec; | 596 | const struct bio_vec *bvec = i->bvec; |
595 | size_t len = bvec->bv_len - i->iov_offset; | 597 | size_t len = bvec->bv_len - i->iov_offset; |
596 | if (len > i->count) | 598 | if (len > i->count) |
597 | len = i->count; | 599 | len = i->count; |
600 | if (len > maxsize) | ||
601 | len = maxsize; | ||
598 | /* can't be more than PAGE_SIZE */ | 602 | /* can't be more than PAGE_SIZE */ |
599 | *start = bvec->bv_offset + i->iov_offset; | 603 | *start = bvec->bv_offset + i->iov_offset; |
600 | 604 | ||
@@ -711,13 +715,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) | |||
711 | EXPORT_SYMBOL(iov_iter_alignment); | 715 | EXPORT_SYMBOL(iov_iter_alignment); |
712 | 716 | ||
713 | ssize_t iov_iter_get_pages(struct iov_iter *i, | 717 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
714 | struct page **pages, unsigned maxpages, | 718 | struct page **pages, size_t maxsize, unsigned maxpages, |
715 | size_t *start) | 719 | size_t *start) |
716 | { | 720 | { |
717 | if (i->type & ITER_BVEC) | 721 | if (i->type & ITER_BVEC) |
718 | return get_pages_bvec(i, pages, maxpages, start); | 722 | return get_pages_bvec(i, pages, maxsize, maxpages, start); |
719 | else | 723 | else |
720 | return get_pages_iovec(i, pages, maxpages, start); | 724 | return get_pages_iovec(i, pages, maxsize, maxpages, start); |
721 | } | 725 | } |
722 | EXPORT_SYMBOL(iov_iter_get_pages); | 726 | EXPORT_SYMBOL(iov_iter_get_pages); |
723 | 727 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 085dc6d2f876..28928ce9b07f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -292,6 +292,9 @@ struct mem_cgroup { | |||
292 | /* vmpressure notifications */ | 292 | /* vmpressure notifications */ |
293 | struct vmpressure vmpressure; | 293 | struct vmpressure vmpressure; |
294 | 294 | ||
295 | /* css_online() has been completed */ | ||
296 | int initialized; | ||
297 | |||
295 | /* | 298 | /* |
296 | * the counter to account for mem+swap usage. | 299 | * the counter to account for mem+swap usage. |
297 | */ | 300 | */ |
@@ -1099,10 +1102,21 @@ skip_node: | |||
1099 | * skipping css reference should be safe. | 1102 | * skipping css reference should be safe. |
1100 | */ | 1103 | */ |
1101 | if (next_css) { | 1104 | if (next_css) { |
1102 | if ((next_css == &root->css) || | 1105 | struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); |
1103 | ((next_css->flags & CSS_ONLINE) && | 1106 | |
1104 | css_tryget_online(next_css))) | 1107 | if (next_css == &root->css) |
1105 | return mem_cgroup_from_css(next_css); | 1108 | return memcg; |
1109 | |||
1110 | if (css_tryget_online(next_css)) { | ||
1111 | /* | ||
1112 | * Make sure the memcg is initialized: | ||
1113 | * mem_cgroup_css_online() orders the the | ||
1114 | * initialization against setting the flag. | ||
1115 | */ | ||
1116 | if (smp_load_acquire(&memcg->initialized)) | ||
1117 | return memcg; | ||
1118 | css_put(next_css); | ||
1119 | } | ||
1106 | 1120 | ||
1107 | prev_css = next_css; | 1121 | prev_css = next_css; |
1108 | goto skip_node; | 1122 | goto skip_node; |
@@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5549 | { | 5563 | { |
5550 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 5564 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
5551 | struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); | 5565 | struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); |
5566 | int ret; | ||
5552 | 5567 | ||
5553 | if (css->id > MEM_CGROUP_ID_MAX) | 5568 | if (css->id > MEM_CGROUP_ID_MAX) |
5554 | return -ENOSPC; | 5569 | return -ENOSPC; |
@@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5585 | } | 5600 | } |
5586 | mutex_unlock(&memcg_create_mutex); | 5601 | mutex_unlock(&memcg_create_mutex); |
5587 | 5602 | ||
5588 | return memcg_init_kmem(memcg, &memory_cgrp_subsys); | 5603 | ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); |
5604 | if (ret) | ||
5605 | return ret; | ||
5606 | |||
5607 | /* | ||
5608 | * Make sure the memcg is initialized: mem_cgroup_iter() | ||
5609 | * orders reading memcg->initialized against its callers | ||
5610 | * reading the memcg members. | ||
5611 | */ | ||
5612 | smp_store_release(&memcg->initialized, 1); | ||
5613 | |||
5614 | return 0; | ||
5589 | } | 5615 | } |
5590 | 5616 | ||
5591 | /* | 5617 | /* |
diff --git a/mm/memory.c b/mm/memory.c index adeac306610f..e229970e4223 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps); | |||
118 | unsigned long zero_pfn __read_mostly; | 118 | unsigned long zero_pfn __read_mostly; |
119 | unsigned long highest_memmap_pfn __read_mostly; | 119 | unsigned long highest_memmap_pfn __read_mostly; |
120 | 120 | ||
121 | EXPORT_SYMBOL(zero_pfn); | ||
122 | |||
121 | /* | 123 | /* |
122 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() | 124 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
123 | */ | 125 | */ |
@@ -1125,7 +1127,7 @@ again: | |||
1125 | addr) != page->index) { | 1127 | addr) != page->index) { |
1126 | pte_t ptfile = pgoff_to_pte(page->index); | 1128 | pte_t ptfile = pgoff_to_pte(page->index); |
1127 | if (pte_soft_dirty(ptent)) | 1129 | if (pte_soft_dirty(ptent)) |
1128 | pte_file_mksoft_dirty(ptfile); | 1130 | ptfile = pte_file_mksoft_dirty(ptfile); |
1129 | set_pte_at(mm, addr, pte, ptfile); | 1131 | set_pte_at(mm, addr, pte, ptfile); |
1130 | } | 1132 | } |
1131 | if (PageAnon(page)) | 1133 | if (PageAnon(page)) |
diff --git a/mm/migrate.c b/mm/migrate.c index f78ec9bd454d..2740360cd216 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
146 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 146 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
147 | if (pte_swp_soft_dirty(*ptep)) | 147 | if (pte_swp_soft_dirty(*ptep)) |
148 | pte = pte_mksoft_dirty(pte); | 148 | pte = pte_mksoft_dirty(pte); |
149 | |||
150 | /* Recheck VMA as permissions can change since migration started */ | ||
149 | if (is_write_migration_entry(entry)) | 151 | if (is_write_migration_entry(entry)) |
150 | pte = pte_mkwrite(pte); | 152 | pte = maybe_mkwrite(pte, vma); |
153 | |||
151 | #ifdef CONFIG_HUGETLB_PAGE | 154 | #ifdef CONFIG_HUGETLB_PAGE |
152 | if (PageHuge(new)) { | 155 | if (PageHuge(new)) { |
153 | pte = pte_mkhuge(pte); | 156 | pte = pte_mkhuge(pte); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 18cee0d4c8a2..eee961958021 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1612,7 +1612,7 @@ again: | |||
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); | 1614 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); |
1615 | if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && | 1615 | if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && |
1616 | !zone_is_fair_depleted(zone)) | 1616 | !zone_is_fair_depleted(zone)) |
1617 | zone_set_flag(zone, ZONE_FAIR_DEPLETED); | 1617 | zone_set_flag(zone, ZONE_FAIR_DEPLETED); |
1618 | 1618 | ||
@@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void) | |||
5701 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); | 5701 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); |
5702 | 5702 | ||
5703 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, | 5703 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, |
5704 | high_wmark_pages(zone) - | 5704 | high_wmark_pages(zone) - low_wmark_pages(zone) - |
5705 | low_wmark_pages(zone) - | 5705 | atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); |
5706 | zone_page_state(zone, NR_ALLOC_BATCH)); | ||
5707 | 5706 | ||
5708 | setup_zone_migrate_reserve(zone); | 5707 | setup_zone_migrate_reserve(zone); |
5709 | spin_unlock_irqrestore(&zone->lock, flags); | 5708 | spin_unlock_irqrestore(&zone->lock, flags); |
diff --git a/mm/shmem.c b/mm/shmem.c index 0e5fb225007c..469f90d56051 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2367,8 +2367,10 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc | |||
2367 | 2367 | ||
2368 | if (new_dentry->d_inode) { | 2368 | if (new_dentry->d_inode) { |
2369 | (void) shmem_unlink(new_dir, new_dentry); | 2369 | (void) shmem_unlink(new_dir, new_dentry); |
2370 | if (they_are_dirs) | 2370 | if (they_are_dirs) { |
2371 | drop_nlink(new_dentry->d_inode); | ||
2371 | drop_nlink(old_dir); | 2372 | drop_nlink(old_dir); |
2373 | } | ||
2372 | } else if (they_are_dirs) { | 2374 | } else if (they_are_dirs) { |
2373 | drop_nlink(old_dir); | 2375 | drop_nlink(old_dir); |
2374 | inc_nlink(new_dir); | 2376 | inc_nlink(new_dir); |
@@ -2124,7 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2124 | int | 2124 | int |
2125 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | 2125 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
2126 | { | 2126 | { |
2127 | size_t left_over, freelist_size, ralign; | 2127 | size_t left_over, freelist_size; |
2128 | size_t ralign = BYTES_PER_WORD; | ||
2128 | gfp_t gfp; | 2129 | gfp_t gfp; |
2129 | int err; | 2130 | int err; |
2130 | size_t size = cachep->size; | 2131 | size_t size = cachep->size; |
@@ -2157,14 +2158,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2157 | size &= ~(BYTES_PER_WORD - 1); | 2158 | size &= ~(BYTES_PER_WORD - 1); |
2158 | } | 2159 | } |
2159 | 2160 | ||
2160 | /* | ||
2161 | * Redzoning and user store require word alignment or possibly larger. | ||
2162 | * Note this will be overridden by architecture or caller mandated | ||
2163 | * alignment if either is greater than BYTES_PER_WORD. | ||
2164 | */ | ||
2165 | if (flags & SLAB_STORE_USER) | ||
2166 | ralign = BYTES_PER_WORD; | ||
2167 | |||
2168 | if (flags & SLAB_RED_ZONE) { | 2161 | if (flags & SLAB_RED_ZONE) { |
2169 | ralign = REDZONE_ALIGN; | 2162 | ralign = REDZONE_ALIGN; |
2170 | /* If redzoning, ensure that the second redzone is suitably | 2163 | /* If redzoning, ensure that the second redzone is suitably |
@@ -2994,7 +2987,7 @@ out: | |||
2994 | 2987 | ||
2995 | #ifdef CONFIG_NUMA | 2988 | #ifdef CONFIG_NUMA |
2996 | /* | 2989 | /* |
2997 | * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set. | 2990 | * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. |
2998 | * | 2991 | * |
2999 | * If we are in_interrupt, then process context, including cpusets and | 2992 | * If we are in_interrupt, then process context, including cpusets and |
3000 | * mempolicy, may not apply and should not be used for allocation policy. | 2993 | * mempolicy, may not apply and should not be used for allocation policy. |
@@ -3226,7 +3219,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3226 | { | 3219 | { |
3227 | void *objp; | 3220 | void *objp; |
3228 | 3221 | ||
3229 | if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) { | 3222 | if (current->mempolicy || cpuset_do_slab_mem_spread()) { |
3230 | objp = alternate_node_alloc(cache, flags); | 3223 | objp = alternate_node_alloc(cache, flags); |
3231 | if (objp) | 3224 | if (objp) |
3232 | goto out; | 3225 | goto out; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 62a7fa2e3569..b6c04cbcfdc5 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -309,6 +309,9 @@ struct br_input_skb_cb { | |||
309 | int igmp; | 309 | int igmp; |
310 | int mrouters_only; | 310 | int mrouters_only; |
311 | #endif | 311 | #endif |
312 | #ifdef CONFIG_BRIDGE_VLAN_FILTERING | ||
313 | bool vlan_filtered; | ||
314 | #endif | ||
312 | }; | 315 | }; |
313 | 316 | ||
314 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) | 317 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index e1bcd653899b..3ba57fcdcd13 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -27,9 +27,13 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags) | |||
27 | { | 27 | { |
28 | if (flags & BRIDGE_VLAN_INFO_PVID) | 28 | if (flags & BRIDGE_VLAN_INFO_PVID) |
29 | __vlan_add_pvid(v, vid); | 29 | __vlan_add_pvid(v, vid); |
30 | else | ||
31 | __vlan_delete_pvid(v, vid); | ||
30 | 32 | ||
31 | if (flags & BRIDGE_VLAN_INFO_UNTAGGED) | 33 | if (flags & BRIDGE_VLAN_INFO_UNTAGGED) |
32 | set_bit(vid, v->untagged_bitmap); | 34 | set_bit(vid, v->untagged_bitmap); |
35 | else | ||
36 | clear_bit(vid, v->untagged_bitmap); | ||
33 | } | 37 | } |
34 | 38 | ||
35 | static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) | 39 | static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) |
@@ -125,7 +129,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, | |||
125 | { | 129 | { |
126 | u16 vid; | 130 | u16 vid; |
127 | 131 | ||
128 | if (!br->vlan_enabled) | 132 | /* If this packet was not filtered at input, let it pass */ |
133 | if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) | ||
129 | goto out; | 134 | goto out; |
130 | 135 | ||
131 | /* Vlan filter table must be configured at this point. The | 136 | /* Vlan filter table must be configured at this point. The |
@@ -164,8 +169,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
164 | /* If VLAN filtering is disabled on the bridge, all packets are | 169 | /* If VLAN filtering is disabled on the bridge, all packets are |
165 | * permitted. | 170 | * permitted. |
166 | */ | 171 | */ |
167 | if (!br->vlan_enabled) | 172 | if (!br->vlan_enabled) { |
173 | BR_INPUT_SKB_CB(skb)->vlan_filtered = false; | ||
168 | return true; | 174 | return true; |
175 | } | ||
169 | 176 | ||
170 | /* If there are no vlan in the permitted list, all packets are | 177 | /* If there are no vlan in the permitted list, all packets are |
171 | * rejected. | 178 | * rejected. |
@@ -173,6 +180,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
173 | if (!v) | 180 | if (!v) |
174 | goto drop; | 181 | goto drop; |
175 | 182 | ||
183 | BR_INPUT_SKB_CB(skb)->vlan_filtered = true; | ||
176 | proto = br->vlan_proto; | 184 | proto = br->vlan_proto; |
177 | 185 | ||
178 | /* If vlan tx offload is disabled on bridge device and frame was | 186 | /* If vlan tx offload is disabled on bridge device and frame was |
@@ -251,7 +259,8 @@ bool br_allowed_egress(struct net_bridge *br, | |||
251 | { | 259 | { |
252 | u16 vid; | 260 | u16 vid; |
253 | 261 | ||
254 | if (!br->vlan_enabled) | 262 | /* If this packet was not filtered at input, let it pass */ |
263 | if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) | ||
255 | return true; | 264 | return true; |
256 | 265 | ||
257 | if (!v) | 266 | if (!v) |
@@ -270,6 +279,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) | |||
270 | struct net_bridge *br = p->br; | 279 | struct net_bridge *br = p->br; |
271 | struct net_port_vlans *v; | 280 | struct net_port_vlans *v; |
272 | 281 | ||
282 | /* If filtering was disabled at input, let it pass. */ | ||
273 | if (!br->vlan_enabled) | 283 | if (!br->vlan_enabled) |
274 | return true; | 284 | return true; |
275 | 285 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index ab9a16530c36..cf8a95f48cff 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4809,9 +4809,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev, | |||
4809 | sysfs_remove_link(&(dev->dev.kobj), linkname); | 4809 | sysfs_remove_link(&(dev->dev.kobj), linkname); |
4810 | } | 4810 | } |
4811 | 4811 | ||
4812 | #define netdev_adjacent_is_neigh_list(dev, dev_list) \ | 4812 | static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, |
4813 | (dev_list == &dev->adj_list.upper || \ | 4813 | struct net_device *adj_dev, |
4814 | dev_list == &dev->adj_list.lower) | 4814 | struct list_head *dev_list) |
4815 | { | ||
4816 | return (dev_list == &dev->adj_list.upper || | ||
4817 | dev_list == &dev->adj_list.lower) && | ||
4818 | net_eq(dev_net(dev), dev_net(adj_dev)); | ||
4819 | } | ||
4815 | 4820 | ||
4816 | static int __netdev_adjacent_dev_insert(struct net_device *dev, | 4821 | static int __netdev_adjacent_dev_insert(struct net_device *dev, |
4817 | struct net_device *adj_dev, | 4822 | struct net_device *adj_dev, |
@@ -4841,7 +4846,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, | |||
4841 | pr_debug("dev_hold for %s, because of link added from %s to %s\n", | 4846 | pr_debug("dev_hold for %s, because of link added from %s to %s\n", |
4842 | adj_dev->name, dev->name, adj_dev->name); | 4847 | adj_dev->name, dev->name, adj_dev->name); |
4843 | 4848 | ||
4844 | if (netdev_adjacent_is_neigh_list(dev, dev_list)) { | 4849 | if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { |
4845 | ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); | 4850 | ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); |
4846 | if (ret) | 4851 | if (ret) |
4847 | goto free_adj; | 4852 | goto free_adj; |
@@ -4862,7 +4867,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, | |||
4862 | return 0; | 4867 | return 0; |
4863 | 4868 | ||
4864 | remove_symlinks: | 4869 | remove_symlinks: |
4865 | if (netdev_adjacent_is_neigh_list(dev, dev_list)) | 4870 | if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) |
4866 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); | 4871 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); |
4867 | free_adj: | 4872 | free_adj: |
4868 | kfree(adj); | 4873 | kfree(adj); |
@@ -4895,8 +4900,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, | |||
4895 | if (adj->master) | 4900 | if (adj->master) |
4896 | sysfs_remove_link(&(dev->dev.kobj), "master"); | 4901 | sysfs_remove_link(&(dev->dev.kobj), "master"); |
4897 | 4902 | ||
4898 | if (netdev_adjacent_is_neigh_list(dev, dev_list) && | 4903 | if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) |
4899 | net_eq(dev_net(dev),dev_net(adj_dev))) | ||
4900 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); | 4904 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); |
4901 | 4905 | ||
4902 | list_del_rcu(&adj->list); | 4906 | list_del_rcu(&adj->list); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index da1378a3e2c7..8d289697cc7a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3152,6 +3152,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
3152 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; | 3152 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; |
3153 | goto done; | 3153 | goto done; |
3154 | } | 3154 | } |
3155 | /* switch back to head shinfo */ | ||
3156 | pinfo = skb_shinfo(p); | ||
3157 | |||
3155 | if (pinfo->frag_list) | 3158 | if (pinfo->frag_list) |
3156 | goto merge; | 3159 | goto merge; |
3157 | if (skb_gro_len(p) != pinfo->gso_size) | 3160 | if (skb_gro_len(p) != pinfo->gso_size) |
diff --git a/net/core/sock.c b/net/core/sock.c index d372b4bd3f99..9c3f823e76a9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1866,7 +1866,7 @@ EXPORT_SYMBOL(sock_alloc_send_skb); | |||
1866 | * skb_page_frag_refill - check that a page_frag contains enough room | 1866 | * skb_page_frag_refill - check that a page_frag contains enough room |
1867 | * @sz: minimum size of the fragment we want to get | 1867 | * @sz: minimum size of the fragment we want to get |
1868 | * @pfrag: pointer to page_frag | 1868 | * @pfrag: pointer to page_frag |
1869 | * @prio: priority for memory allocation | 1869 | * @gfp: priority for memory allocation |
1870 | * | 1870 | * |
1871 | * Note: While this allocator tries to use high order pages, there is | 1871 | * Note: While this allocator tries to use high order pages, there is |
1872 | * no guarantee that allocations succeed. Therefore, @sz MUST be | 1872 | * no guarantee that allocations succeed. Therefore, @sz MUST be |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index afed1aac2638..bda4bb8ae260 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -79,10 +79,10 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst, | |||
79 | idst->saddr = saddr; | 79 | idst->saddr = saddr; |
80 | } | 80 | } |
81 | 81 | ||
82 | static void tunnel_dst_set(struct ip_tunnel *t, | 82 | static noinline void tunnel_dst_set(struct ip_tunnel *t, |
83 | struct dst_entry *dst, __be32 saddr) | 83 | struct dst_entry *dst, __be32 saddr) |
84 | { | 84 | { |
85 | __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr); | 85 | __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr); |
86 | } | 86 | } |
87 | 87 | ||
88 | static void tunnel_dst_reset(struct ip_tunnel *t) | 88 | static void tunnel_dst_reset(struct ip_tunnel *t) |
@@ -106,7 +106,7 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, | |||
106 | struct dst_entry *dst; | 106 | struct dst_entry *dst; |
107 | 107 | ||
108 | rcu_read_lock(); | 108 | rcu_read_lock(); |
109 | idst = this_cpu_ptr(t->dst_cache); | 109 | idst = raw_cpu_ptr(t->dst_cache); |
110 | dst = rcu_dereference(idst->dst); | 110 | dst = rcu_dereference(idst->dst); |
111 | if (dst && !atomic_inc_not_zero(&dst->__refcnt)) | 111 | if (dst && !atomic_inc_not_zero(&dst->__refcnt)) |
112 | dst = NULL; | 112 | dst = NULL; |
@@ -764,9 +764,14 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | |||
764 | 764 | ||
765 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); | 765 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
766 | 766 | ||
767 | if (!t && (cmd == SIOCADDTUNNEL)) { | 767 | if (cmd == SIOCADDTUNNEL) { |
768 | t = ip_tunnel_create(net, itn, p); | 768 | if (!t) { |
769 | err = PTR_ERR_OR_ZERO(t); | 769 | t = ip_tunnel_create(net, itn, p); |
770 | err = PTR_ERR_OR_ZERO(t); | ||
771 | break; | ||
772 | } | ||
773 | |||
774 | err = -EEXIST; | ||
770 | break; | 775 | break; |
771 | } | 776 | } |
772 | if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { | 777 | if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index eaa4b000c7b4..cbadb942c332 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -746,7 +746,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow | |||
746 | } | 746 | } |
747 | 747 | ||
748 | n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); | 748 | n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); |
749 | if (n) { | 749 | if (!IS_ERR(n)) { |
750 | if (!(n->nud_state & NUD_VALID)) { | 750 | if (!(n->nud_state & NUD_VALID)) { |
751 | neigh_event_send(n, NULL); | 751 | neigh_event_send(n, NULL); |
752 | } else { | 752 | } else { |
@@ -2265,9 +2265,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, | |||
2265 | return rt; | 2265 | return rt; |
2266 | 2266 | ||
2267 | if (flp4->flowi4_proto) | 2267 | if (flp4->flowi4_proto) |
2268 | rt = (struct rtable *) xfrm_lookup(net, &rt->dst, | 2268 | rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, |
2269 | flowi4_to_flowi(flp4), | 2269 | flowi4_to_flowi(flp4), |
2270 | sk, 0); | 2270 | sk, 0); |
2271 | 2271 | ||
2272 | return rt; | 2272 | return rt; |
2273 | } | 2273 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index fc1fac2a0528..3e118dfddd02 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3094,11 +3094,13 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
3094 | 3094 | ||
3095 | write_unlock_bh(&idev->lock); | 3095 | write_unlock_bh(&idev->lock); |
3096 | 3096 | ||
3097 | /* Step 5: Discard multicast list */ | 3097 | /* Step 5: Discard anycast and multicast list */ |
3098 | if (how) | 3098 | if (how) { |
3099 | ipv6_ac_destroy_dev(idev); | ||
3099 | ipv6_mc_destroy_dev(idev); | 3100 | ipv6_mc_destroy_dev(idev); |
3100 | else | 3101 | } else { |
3101 | ipv6_mc_down(idev); | 3102 | ipv6_mc_down(idev); |
3103 | } | ||
3102 | 3104 | ||
3103 | idev->tstamp = jiffies; | 3105 | idev->tstamp = jiffies; |
3104 | 3106 | ||
@@ -4778,10 +4780,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4778 | 4780 | ||
4779 | if (ip6_del_rt(ifp->rt)) | 4781 | if (ip6_del_rt(ifp->rt)) |
4780 | dst_free(&ifp->rt->dst); | 4782 | dst_free(&ifp->rt->dst); |
4783 | |||
4784 | rt_genid_bump_ipv6(net); | ||
4781 | break; | 4785 | break; |
4782 | } | 4786 | } |
4783 | atomic_inc(&net->ipv6.dev_addr_genid); | 4787 | atomic_inc(&net->ipv6.dev_addr_genid); |
4784 | rt_genid_bump_ipv6(net); | ||
4785 | } | 4788 | } |
4786 | 4789 | ||
4787 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | 4790 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) |
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index e6960457f625..98cc4cd570e2 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c | |||
@@ -8,6 +8,13 @@ | |||
8 | #include <net/addrconf.h> | 8 | #include <net/addrconf.h> |
9 | #include <net/ip.h> | 9 | #include <net/ip.h> |
10 | 10 | ||
11 | /* if ipv6 module registers this function is used by xfrm to force all | ||
12 | * sockets to relookup their nodes - this is fairly expensive, be | ||
13 | * careful | ||
14 | */ | ||
15 | void (*__fib6_flush_trees)(struct net *); | ||
16 | EXPORT_SYMBOL(__fib6_flush_trees); | ||
17 | |||
11 | #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) | 18 | #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) |
12 | 19 | ||
13 | static inline unsigned int ipv6_addr_scope2type(unsigned int scope) | 20 | static inline unsigned int ipv6_addr_scope2type(unsigned int scope) |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index ff2de7d9d8e6..9a386842fd62 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -351,6 +351,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr) | |||
351 | return __ipv6_dev_ac_dec(idev, addr); | 351 | return __ipv6_dev_ac_dec(idev, addr); |
352 | } | 352 | } |
353 | 353 | ||
354 | void ipv6_ac_destroy_dev(struct inet6_dev *idev) | ||
355 | { | ||
356 | struct ifacaddr6 *aca; | ||
357 | |||
358 | write_lock_bh(&idev->lock); | ||
359 | while ((aca = idev->ac_list) != NULL) { | ||
360 | idev->ac_list = aca->aca_next; | ||
361 | write_unlock_bh(&idev->lock); | ||
362 | |||
363 | addrconf_leave_solict(idev, &aca->aca_addr); | ||
364 | |||
365 | dst_hold(&aca->aca_rt->dst); | ||
366 | ip6_del_rt(aca->aca_rt); | ||
367 | |||
368 | aca_put(aca); | ||
369 | |||
370 | write_lock_bh(&idev->lock); | ||
371 | } | ||
372 | write_unlock_bh(&idev->lock); | ||
373 | } | ||
374 | |||
354 | /* | 375 | /* |
355 | * check if the interface has this anycast address | 376 | * check if the interface has this anycast address |
356 | * called with rcu_read_lock() | 377 | * called with rcu_read_lock() |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 76b7f5ee8f4c..97b9fa8de377 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -1605,6 +1605,24 @@ static void fib6_prune_clones(struct net *net, struct fib6_node *fn) | |||
1605 | fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL); | 1605 | fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL); |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | static int fib6_update_sernum(struct rt6_info *rt, void *arg) | ||
1609 | { | ||
1610 | __u32 sernum = *(__u32 *)arg; | ||
1611 | |||
1612 | if (rt->rt6i_node && | ||
1613 | rt->rt6i_node->fn_sernum != sernum) | ||
1614 | rt->rt6i_node->fn_sernum = sernum; | ||
1615 | |||
1616 | return 0; | ||
1617 | } | ||
1618 | |||
1619 | static void fib6_flush_trees(struct net *net) | ||
1620 | { | ||
1621 | __u32 new_sernum = fib6_new_sernum(); | ||
1622 | |||
1623 | fib6_clean_all(net, fib6_update_sernum, &new_sernum); | ||
1624 | } | ||
1625 | |||
1608 | /* | 1626 | /* |
1609 | * Garbage collection | 1627 | * Garbage collection |
1610 | */ | 1628 | */ |
@@ -1788,6 +1806,8 @@ int __init fib6_init(void) | |||
1788 | NULL); | 1806 | NULL); |
1789 | if (ret) | 1807 | if (ret) |
1790 | goto out_unregister_subsys; | 1808 | goto out_unregister_subsys; |
1809 | |||
1810 | __fib6_flush_trees = fib6_flush_trees; | ||
1791 | out: | 1811 | out: |
1792 | return ret; | 1812 | return ret; |
1793 | 1813 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 5f19dfbc4c6a..f304471477dc 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -314,6 +314,8 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, | |||
314 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | 314 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
315 | 315 | ||
316 | t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); | 316 | t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); |
317 | if (t && create) | ||
318 | return NULL; | ||
317 | if (t || !create) | 319 | if (t || !create) |
318 | return t; | 320 | return t; |
319 | 321 | ||
@@ -1724,4 +1726,5 @@ MODULE_LICENSE("GPL"); | |||
1724 | MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); | 1726 | MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); |
1725 | MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); | 1727 | MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); |
1726 | MODULE_ALIAS_RTNL_LINK("ip6gre"); | 1728 | MODULE_ALIAS_RTNL_LINK("ip6gre"); |
1729 | MODULE_ALIAS_RTNL_LINK("ip6gretap"); | ||
1727 | MODULE_ALIAS_NETDEV("ip6gre0"); | 1730 | MODULE_ALIAS_NETDEV("ip6gre0"); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 315a55d66079..0a3448b2888f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1009,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
1009 | if (final_dst) | 1009 | if (final_dst) |
1010 | fl6->daddr = *final_dst; | 1010 | fl6->daddr = *final_dst; |
1011 | 1011 | ||
1012 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | 1012 | return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1013 | } | 1013 | } |
1014 | EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); | 1014 | EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); |
1015 | 1015 | ||
@@ -1041,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
1041 | if (final_dst) | 1041 | if (final_dst) |
1042 | fl6->daddr = *final_dst; | 1042 | fl6->daddr = *final_dst; |
1043 | 1043 | ||
1044 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | 1044 | return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1045 | } | 1045 | } |
1046 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); | 1046 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); |
1047 | 1047 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index f9de5a695072..69a84b464009 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -364,8 +364,12 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net, | |||
364 | (t = rtnl_dereference(*tp)) != NULL; | 364 | (t = rtnl_dereference(*tp)) != NULL; |
365 | tp = &t->next) { | 365 | tp = &t->next) { |
366 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 366 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
367 | ipv6_addr_equal(remote, &t->parms.raddr)) | 367 | ipv6_addr_equal(remote, &t->parms.raddr)) { |
368 | if (create) | ||
369 | return NULL; | ||
370 | |||
368 | return t; | 371 | return t; |
372 | } | ||
369 | } | 373 | } |
370 | if (!create) | 374 | if (!create) |
371 | return NULL; | 375 | return NULL; |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 7f52fd9fa7b0..5833a2244467 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -253,8 +253,12 @@ static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p, | |||
253 | (t = rtnl_dereference(*tp)) != NULL; | 253 | (t = rtnl_dereference(*tp)) != NULL; |
254 | tp = &t->next) { | 254 | tp = &t->next) { |
255 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 255 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
256 | ipv6_addr_equal(remote, &t->parms.raddr)) | 256 | ipv6_addr_equal(remote, &t->parms.raddr)) { |
257 | if (create) | ||
258 | return NULL; | ||
259 | |||
257 | return t; | 260 | return t; |
261 | } | ||
258 | } | 262 | } |
259 | if (!create) | 263 | if (!create) |
260 | return NULL; | 264 | return NULL; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f23fbd28a501..bafde82324c5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -314,7 +314,6 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, | |||
314 | 314 | ||
315 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); | 315 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); |
316 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); | 316 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); |
317 | rt->rt6i_genid = rt_genid_ipv6(net); | ||
318 | INIT_LIST_HEAD(&rt->rt6i_siblings); | 317 | INIT_LIST_HEAD(&rt->rt6i_siblings); |
319 | } | 318 | } |
320 | return rt; | 319 | return rt; |
@@ -1098,9 +1097,6 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
1098 | * DST_OBSOLETE_FORCE_CHK which forces validation calls down | 1097 | * DST_OBSOLETE_FORCE_CHK which forces validation calls down |
1099 | * into this function always. | 1098 | * into this function always. |
1100 | */ | 1099 | */ |
1101 | if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev))) | ||
1102 | return NULL; | ||
1103 | |||
1104 | if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) | 1100 | if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) |
1105 | return NULL; | 1101 | return NULL; |
1106 | 1102 | ||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 441875f03750..a1e433b88c66 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -1822,7 +1822,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
1822 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; | 1822 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; |
1823 | if (sdata->vif.bss_conf.use_short_slot) | 1823 | if (sdata->vif.bss_conf.use_short_slot) |
1824 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; | 1824 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; |
1825 | sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period; | 1825 | sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; |
1826 | sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; | 1826 | sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; |
1827 | 1827 | ||
1828 | sinfo->sta_flags.set = 0; | 1828 | sinfo->sta_flags.set = 0; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index b5c1d3aadb41..6d77cce481d5 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY | |||
847 | tristate '"TPROXY" target transparent proxying support' | 847 | tristate '"TPROXY" target transparent proxying support' |
848 | depends on NETFILTER_XTABLES | 848 | depends on NETFILTER_XTABLES |
849 | depends on NETFILTER_ADVANCED | 849 | depends on NETFILTER_ADVANCED |
850 | depends on (IPV6 || IPV6=n) | ||
850 | depends on IP_NF_MANGLE | 851 | depends on IP_NF_MANGLE |
851 | select NF_DEFRAG_IPV4 | 852 | select NF_DEFRAG_IPV4 |
852 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | 853 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index c138b8fbe280..f37f0716a9fc 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -222,6 +222,51 @@ replay: | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | struct nfnl_err { | ||
226 | struct list_head head; | ||
227 | struct nlmsghdr *nlh; | ||
228 | int err; | ||
229 | }; | ||
230 | |||
231 | static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err) | ||
232 | { | ||
233 | struct nfnl_err *nfnl_err; | ||
234 | |||
235 | nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); | ||
236 | if (nfnl_err == NULL) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | nfnl_err->nlh = nlh; | ||
240 | nfnl_err->err = err; | ||
241 | list_add_tail(&nfnl_err->head, list); | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static void nfnl_err_del(struct nfnl_err *nfnl_err) | ||
247 | { | ||
248 | list_del(&nfnl_err->head); | ||
249 | kfree(nfnl_err); | ||
250 | } | ||
251 | |||
252 | static void nfnl_err_reset(struct list_head *err_list) | ||
253 | { | ||
254 | struct nfnl_err *nfnl_err, *next; | ||
255 | |||
256 | list_for_each_entry_safe(nfnl_err, next, err_list, head) | ||
257 | nfnl_err_del(nfnl_err); | ||
258 | } | ||
259 | |||
260 | static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) | ||
261 | { | ||
262 | struct nfnl_err *nfnl_err, *next; | ||
263 | |||
264 | list_for_each_entry_safe(nfnl_err, next, err_list, head) { | ||
265 | netlink_ack(skb, nfnl_err->nlh, nfnl_err->err); | ||
266 | nfnl_err_del(nfnl_err); | ||
267 | } | ||
268 | } | ||
269 | |||
225 | static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, | 270 | static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, |
226 | u_int16_t subsys_id) | 271 | u_int16_t subsys_id) |
227 | { | 272 | { |
@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
230 | const struct nfnetlink_subsystem *ss; | 275 | const struct nfnetlink_subsystem *ss; |
231 | const struct nfnl_callback *nc; | 276 | const struct nfnl_callback *nc; |
232 | bool success = true, done = false; | 277 | bool success = true, done = false; |
278 | static LIST_HEAD(err_list); | ||
233 | int err; | 279 | int err; |
234 | 280 | ||
235 | if (subsys_id >= NFNL_SUBSYS_COUNT) | 281 | if (subsys_id >= NFNL_SUBSYS_COUNT) |
@@ -287,6 +333,7 @@ replay: | |||
287 | type = nlh->nlmsg_type; | 333 | type = nlh->nlmsg_type; |
288 | if (type == NFNL_MSG_BATCH_BEGIN) { | 334 | if (type == NFNL_MSG_BATCH_BEGIN) { |
289 | /* Malformed: Batch begin twice */ | 335 | /* Malformed: Batch begin twice */ |
336 | nfnl_err_reset(&err_list); | ||
290 | success = false; | 337 | success = false; |
291 | goto done; | 338 | goto done; |
292 | } else if (type == NFNL_MSG_BATCH_END) { | 339 | } else if (type == NFNL_MSG_BATCH_END) { |
@@ -333,6 +380,7 @@ replay: | |||
333 | * original skb. | 380 | * original skb. |
334 | */ | 381 | */ |
335 | if (err == -EAGAIN) { | 382 | if (err == -EAGAIN) { |
383 | nfnl_err_reset(&err_list); | ||
336 | ss->abort(skb); | 384 | ss->abort(skb); |
337 | nfnl_unlock(subsys_id); | 385 | nfnl_unlock(subsys_id); |
338 | kfree_skb(nskb); | 386 | kfree_skb(nskb); |
@@ -341,11 +389,24 @@ replay: | |||
341 | } | 389 | } |
342 | ack: | 390 | ack: |
343 | if (nlh->nlmsg_flags & NLM_F_ACK || err) { | 391 | if (nlh->nlmsg_flags & NLM_F_ACK || err) { |
392 | /* Errors are delivered once the full batch has been | ||
393 | * processed, this avoids that the same error is | ||
394 | * reported several times when replaying the batch. | ||
395 | */ | ||
396 | if (nfnl_err_add(&err_list, nlh, err) < 0) { | ||
397 | /* We failed to enqueue an error, reset the | ||
398 | * list of errors and send OOM to userspace | ||
399 | * pointing to the batch header. | ||
400 | */ | ||
401 | nfnl_err_reset(&err_list); | ||
402 | netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); | ||
403 | success = false; | ||
404 | goto done; | ||
405 | } | ||
344 | /* We don't stop processing the batch on errors, thus, | 406 | /* We don't stop processing the batch on errors, thus, |
345 | * userspace gets all the errors that the batch | 407 | * userspace gets all the errors that the batch |
346 | * triggers. | 408 | * triggers. |
347 | */ | 409 | */ |
348 | netlink_ack(skb, nlh, err); | ||
349 | if (err) | 410 | if (err) |
350 | success = false; | 411 | success = false; |
351 | } | 412 | } |
@@ -361,6 +422,7 @@ done: | |||
361 | else | 422 | else |
362 | ss->abort(skb); | 423 | ss->abort(skb); |
363 | 424 | ||
425 | nfnl_err_deliver(&err_list, oskb); | ||
364 | nfnl_unlock(subsys_id); | 426 | nfnl_unlock(subsys_id); |
365 | kfree_skb(nskb); | 427 | kfree_skb(nskb); |
366 | } | 428 | } |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 28fb8f38e6ba..8892b7b6184a 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set, | |||
180 | static void nft_hash_destroy(const struct nft_set *set) | 180 | static void nft_hash_destroy(const struct nft_set *set) |
181 | { | 181 | { |
182 | const struct rhashtable *priv = nft_set_priv(set); | 182 | const struct rhashtable *priv = nft_set_priv(set); |
183 | const struct bucket_table *tbl; | 183 | const struct bucket_table *tbl = priv->tbl; |
184 | struct nft_hash_elem *he, *next; | 184 | struct nft_hash_elem *he, *next; |
185 | unsigned int i; | 185 | unsigned int i; |
186 | 186 | ||
187 | tbl = rht_dereference(priv->tbl, priv); | 187 | for (i = 0; i < tbl->size; i++) { |
188 | for (i = 0; i < tbl->size; i++) | 188 | for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node); |
189 | rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node) | 189 | he != NULL; he = next) { |
190 | next = rht_entry(he->node.next, struct nft_hash_elem, node); | ||
190 | nft_hash_elem_destroy(set, he); | 191 | nft_hash_elem_destroy(set, he); |
191 | 192 | } | |
193 | } | ||
192 | rhashtable_destroy(priv); | 194 | rhashtable_destroy(priv); |
193 | } | 195 | } |
194 | 196 | ||
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index e1836ff88199..46214f245665 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c | |||
@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set) | |||
234 | struct nft_rbtree_elem *rbe; | 234 | struct nft_rbtree_elem *rbe; |
235 | struct rb_node *node; | 235 | struct rb_node *node; |
236 | 236 | ||
237 | spin_lock_bh(&nft_rbtree_lock); | ||
238 | while ((node = priv->root.rb_node) != NULL) { | 237 | while ((node = priv->root.rb_node) != NULL) { |
239 | rb_erase(node, &priv->root); | 238 | rb_erase(node, &priv->root); |
240 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 239 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
241 | nft_rbtree_elem_destroy(set, rbe); | 240 | nft_rbtree_elem_destroy(set, rbe); |
242 | } | 241 | } |
243 | spin_unlock_bh(&nft_rbtree_lock); | ||
244 | } | 242 | } |
245 | 243 | ||
246 | static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, | 244 | static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 91d66b7e64ac..64dc864a417f 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -78,11 +78,12 @@ static const struct genl_multicast_group ovs_dp_vport_multicast_group = { | |||
78 | 78 | ||
79 | /* Check if need to build a reply message. | 79 | /* Check if need to build a reply message. |
80 | * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ | 80 | * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ |
81 | static bool ovs_must_notify(struct genl_info *info, | 81 | static bool ovs_must_notify(struct genl_family *family, struct genl_info *info, |
82 | const struct genl_multicast_group *grp) | 82 | unsigned int group) |
83 | { | 83 | { |
84 | return info->nlhdr->nlmsg_flags & NLM_F_ECHO || | 84 | return info->nlhdr->nlmsg_flags & NLM_F_ECHO || |
85 | netlink_has_listeners(genl_info_net(info)->genl_sock, 0); | 85 | genl_has_listeners(family, genl_info_net(info)->genl_sock, |
86 | group); | ||
86 | } | 87 | } |
87 | 88 | ||
88 | static void ovs_notify(struct genl_family *family, | 89 | static void ovs_notify(struct genl_family *family, |
@@ -763,7 +764,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act | |||
763 | { | 764 | { |
764 | struct sk_buff *skb; | 765 | struct sk_buff *skb; |
765 | 766 | ||
766 | if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group)) | 767 | if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0)) |
767 | return NULL; | 768 | return NULL; |
768 | 769 | ||
769 | skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL); | 770 | skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL); |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 02a86a27fd84..0f62326c0f5e 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -54,7 +54,7 @@ static int rfkill_gpio_set_power(void *data, bool blocked) | |||
54 | if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled) | 54 | if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled) |
55 | clk_disable(rfkill->clk); | 55 | clk_disable(rfkill->clk); |
56 | 56 | ||
57 | rfkill->clk_enabled = blocked; | 57 | rfkill->clk_enabled = !blocked; |
58 | 58 | ||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
@@ -163,6 +163,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = { | |||
163 | { "LNV4752", RFKILL_TYPE_GPS }, | 163 | { "LNV4752", RFKILL_TYPE_GPS }, |
164 | { }, | 164 | { }, |
165 | }; | 165 | }; |
166 | MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match); | ||
166 | #endif | 167 | #endif |
167 | 168 | ||
168 | static struct platform_driver rfkill_gpio_driver = { | 169 | static struct platform_driver rfkill_gpio_driver = { |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index b45d080e64a7..1b24191167f1 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -1143,7 +1143,7 @@ static long rxrpc_read(const struct key *key, | |||
1143 | if (copy_to_user(xdr, (s), _l) != 0) \ | 1143 | if (copy_to_user(xdr, (s), _l) != 0) \ |
1144 | goto fault; \ | 1144 | goto fault; \ |
1145 | if (_l & 3 && \ | 1145 | if (_l & 3 && \ |
1146 | copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ | 1146 | copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ |
1147 | goto fault; \ | 1147 | goto fault; \ |
1148 | xdr += (_l + 3) >> 2; \ | 1148 | xdr += (_l + 3) >> 2; \ |
1149 | } while(0) | 1149 | } while(0) |
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 3a633debb6df..ad57f4444b9c 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -526,9 +526,11 @@ pop_stack: | |||
526 | match_idx = stack[--stackp]; | 526 | match_idx = stack[--stackp]; |
527 | cur_match = tcf_em_get_match(tree, match_idx); | 527 | cur_match = tcf_em_get_match(tree, match_idx); |
528 | 528 | ||
529 | if (tcf_em_early_end(cur_match, res)) | 529 | if (tcf_em_early_end(cur_match, res)) { |
530 | if (tcf_em_is_inverted(cur_match)) | ||
531 | res = !res; | ||
530 | goto pop_stack; | 532 | goto pop_stack; |
531 | else { | 533 | } else { |
532 | match_idx++; | 534 | match_idx++; |
533 | goto proceed; | 535 | goto proceed; |
534 | } | 536 | } |
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index ed30e436128b..fb666d1e4de3 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c | |||
@@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) | |||
133 | --sch->q.qlen; | 133 | --sch->q.qlen; |
134 | } | 134 | } |
135 | 135 | ||
136 | /* private part of skb->cb[] that a qdisc is allowed to use | ||
137 | * is limited to QDISC_CB_PRIV_LEN bytes. | ||
138 | * As a flow key might be too large, we store a part of it only. | ||
139 | */ | ||
140 | #define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3) | ||
141 | |||
136 | struct choke_skb_cb { | 142 | struct choke_skb_cb { |
137 | u16 classid; | 143 | u16 classid; |
138 | u8 keys_valid; | 144 | u8 keys_valid; |
139 | struct flow_keys keys; | 145 | u8 keys[QDISC_CB_PRIV_LEN - 3]; |
140 | }; | 146 | }; |
141 | 147 | ||
142 | static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) | 148 | static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) |
@@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb) | |||
163 | static bool choke_match_flow(struct sk_buff *skb1, | 169 | static bool choke_match_flow(struct sk_buff *skb1, |
164 | struct sk_buff *skb2) | 170 | struct sk_buff *skb2) |
165 | { | 171 | { |
172 | struct flow_keys temp; | ||
173 | |||
166 | if (skb1->protocol != skb2->protocol) | 174 | if (skb1->protocol != skb2->protocol) |
167 | return false; | 175 | return false; |
168 | 176 | ||
169 | if (!choke_skb_cb(skb1)->keys_valid) { | 177 | if (!choke_skb_cb(skb1)->keys_valid) { |
170 | choke_skb_cb(skb1)->keys_valid = 1; | 178 | choke_skb_cb(skb1)->keys_valid = 1; |
171 | skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys); | 179 | skb_flow_dissect(skb1, &temp); |
180 | memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN); | ||
172 | } | 181 | } |
173 | 182 | ||
174 | if (!choke_skb_cb(skb2)->keys_valid) { | 183 | if (!choke_skb_cb(skb2)->keys_valid) { |
175 | choke_skb_cb(skb2)->keys_valid = 1; | 184 | choke_skb_cb(skb2)->keys_valid = 1; |
176 | skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys); | 185 | skb_flow_dissect(skb2, &temp); |
186 | memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN); | ||
177 | } | 187 | } |
178 | 188 | ||
179 | return !memcmp(&choke_skb_cb(skb1)->keys, | 189 | return !memcmp(&choke_skb_cb(skb1)->keys, |
180 | &choke_skb_cb(skb2)->keys, | 190 | &choke_skb_cb(skb2)->keys, |
181 | sizeof(struct flow_keys)); | 191 | CHOKE_K_LEN); |
182 | } | 192 | } |
183 | 193 | ||
184 | /* | 194 | /* |
diff --git a/net/socket.c b/net/socket.c index 2e2586e2dee1..4cdbc107606f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1996,6 +1996,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
1996 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | 1996 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
1997 | return -EFAULT; | 1997 | return -EFAULT; |
1998 | 1998 | ||
1999 | if (kmsg->msg_name == NULL) | ||
2000 | kmsg->msg_namelen = 0; | ||
2001 | |||
1999 | if (kmsg->msg_namelen < 0) | 2002 | if (kmsg->msg_namelen < 0) |
2000 | return -EINVAL; | 2003 | return -EINVAL; |
2001 | 2004 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index df7b1332a1ec..7257164af91b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -6969,6 +6969,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp) | |||
6969 | struct nlattr *data = ((void **)skb->cb)[2]; | 6969 | struct nlattr *data = ((void **)skb->cb)[2]; |
6970 | enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE; | 6970 | enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE; |
6971 | 6971 | ||
6972 | /* clear CB data for netlink core to own from now on */ | ||
6973 | memset(skb->cb, 0, sizeof(skb->cb)); | ||
6974 | |||
6972 | nla_nest_end(skb, data); | 6975 | nla_nest_end(skb, data); |
6973 | genlmsg_end(skb, hdr); | 6976 | genlmsg_end(skb, hdr); |
6974 | 6977 | ||
@@ -9294,6 +9297,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb) | |||
9294 | void *hdr = ((void **)skb->cb)[1]; | 9297 | void *hdr = ((void **)skb->cb)[1]; |
9295 | struct nlattr *data = ((void **)skb->cb)[2]; | 9298 | struct nlattr *data = ((void **)skb->cb)[2]; |
9296 | 9299 | ||
9300 | /* clear CB data for netlink core to own from now on */ | ||
9301 | memset(skb->cb, 0, sizeof(skb->cb)); | ||
9302 | |||
9297 | if (WARN_ON(!rdev->cur_cmd_info)) { | 9303 | if (WARN_ON(!rdev->cur_cmd_info)) { |
9298 | kfree_skb(skb); | 9304 | kfree_skb(skb); |
9299 | return -EINVAL; | 9305 | return -EINVAL; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index beeed602aeb3..fdde51f4271a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -39,6 +39,11 @@ | |||
39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) | 39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) |
40 | #define XFRM_MAX_QUEUE_LEN 100 | 40 | #define XFRM_MAX_QUEUE_LEN 100 |
41 | 41 | ||
42 | struct xfrm_flo { | ||
43 | struct dst_entry *dst_orig; | ||
44 | u8 flags; | ||
45 | }; | ||
46 | |||
42 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); | 47 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); |
43 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] | 48 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] |
44 | __read_mostly; | 49 | __read_mostly; |
@@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) | |||
1877 | } | 1882 | } |
1878 | 1883 | ||
1879 | static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, | 1884 | static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, |
1880 | struct dst_entry *dst, | 1885 | struct xfrm_flo *xflo, |
1881 | const struct flowi *fl, | 1886 | const struct flowi *fl, |
1882 | int num_xfrms, | 1887 | int num_xfrms, |
1883 | u16 family) | 1888 | u16 family) |
1884 | { | 1889 | { |
1885 | int err; | 1890 | int err; |
1886 | struct net_device *dev; | 1891 | struct net_device *dev; |
1892 | struct dst_entry *dst; | ||
1887 | struct dst_entry *dst1; | 1893 | struct dst_entry *dst1; |
1888 | struct xfrm_dst *xdst; | 1894 | struct xfrm_dst *xdst; |
1889 | 1895 | ||
@@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, | |||
1891 | if (IS_ERR(xdst)) | 1897 | if (IS_ERR(xdst)) |
1892 | return xdst; | 1898 | return xdst; |
1893 | 1899 | ||
1894 | if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0) | 1900 | if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || |
1901 | net->xfrm.sysctl_larval_drop || | ||
1902 | num_xfrms <= 0) | ||
1895 | return xdst; | 1903 | return xdst; |
1896 | 1904 | ||
1905 | dst = xflo->dst_orig; | ||
1897 | dst1 = &xdst->u.dst; | 1906 | dst1 = &xdst->u.dst; |
1898 | dst_hold(dst); | 1907 | dst_hold(dst); |
1899 | xdst->route = dst; | 1908 | xdst->route = dst; |
@@ -1935,7 +1944,7 @@ static struct flow_cache_object * | |||
1935 | xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, | 1944 | xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, |
1936 | struct flow_cache_object *oldflo, void *ctx) | 1945 | struct flow_cache_object *oldflo, void *ctx) |
1937 | { | 1946 | { |
1938 | struct dst_entry *dst_orig = (struct dst_entry *)ctx; | 1947 | struct xfrm_flo *xflo = (struct xfrm_flo *)ctx; |
1939 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | 1948 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
1940 | struct xfrm_dst *xdst, *new_xdst; | 1949 | struct xfrm_dst *xdst, *new_xdst; |
1941 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; | 1950 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; |
@@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, | |||
1976 | goto make_dummy_bundle; | 1985 | goto make_dummy_bundle; |
1977 | } | 1986 | } |
1978 | 1987 | ||
1979 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); | 1988 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, |
1989 | xflo->dst_orig); | ||
1980 | if (IS_ERR(new_xdst)) { | 1990 | if (IS_ERR(new_xdst)) { |
1981 | err = PTR_ERR(new_xdst); | 1991 | err = PTR_ERR(new_xdst); |
1982 | if (err != -EAGAIN) | 1992 | if (err != -EAGAIN) |
@@ -2010,7 +2020,7 @@ make_dummy_bundle: | |||
2010 | /* We found policies, but there's no bundles to instantiate: | 2020 | /* We found policies, but there's no bundles to instantiate: |
2011 | * either because the policy blocks, has no transformations or | 2021 | * either because the policy blocks, has no transformations or |
2012 | * we could not build template (no xfrm_states).*/ | 2022 | * we could not build template (no xfrm_states).*/ |
2013 | xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family); | 2023 | xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); |
2014 | if (IS_ERR(xdst)) { | 2024 | if (IS_ERR(xdst)) { |
2015 | xfrm_pols_put(pols, num_pols); | 2025 | xfrm_pols_put(pols, num_pols); |
2016 | return ERR_CAST(xdst); | 2026 | return ERR_CAST(xdst); |
@@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2104 | } | 2114 | } |
2105 | 2115 | ||
2106 | if (xdst == NULL) { | 2116 | if (xdst == NULL) { |
2117 | struct xfrm_flo xflo; | ||
2118 | |||
2119 | xflo.dst_orig = dst_orig; | ||
2120 | xflo.flags = flags; | ||
2121 | |||
2107 | /* To accelerate a bit... */ | 2122 | /* To accelerate a bit... */ |
2108 | if ((dst_orig->flags & DST_NOXFRM) || | 2123 | if ((dst_orig->flags & DST_NOXFRM) || |
2109 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) | 2124 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) |
2110 | goto nopol; | 2125 | goto nopol; |
2111 | 2126 | ||
2112 | flo = flow_cache_lookup(net, fl, family, dir, | 2127 | flo = flow_cache_lookup(net, fl, family, dir, |
2113 | xfrm_bundle_lookup, dst_orig); | 2128 | xfrm_bundle_lookup, &xflo); |
2114 | if (flo == NULL) | 2129 | if (flo == NULL) |
2115 | goto nopol; | 2130 | goto nopol; |
2116 | if (IS_ERR(flo)) { | 2131 | if (IS_ERR(flo)) { |
@@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2138 | xfrm_pols_put(pols, drop_pols); | 2153 | xfrm_pols_put(pols, drop_pols); |
2139 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | 2154 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
2140 | 2155 | ||
2141 | return make_blackhole(net, family, dst_orig); | 2156 | return ERR_PTR(-EREMOTE); |
2142 | } | 2157 | } |
2143 | 2158 | ||
2144 | err = -EAGAIN; | 2159 | err = -EAGAIN; |
@@ -2195,6 +2210,23 @@ dropdst: | |||
2195 | } | 2210 | } |
2196 | EXPORT_SYMBOL(xfrm_lookup); | 2211 | EXPORT_SYMBOL(xfrm_lookup); |
2197 | 2212 | ||
2213 | /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). | ||
2214 | * Otherwise we may send out blackholed packets. | ||
2215 | */ | ||
2216 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | ||
2217 | const struct flowi *fl, | ||
2218 | struct sock *sk, int flags) | ||
2219 | { | ||
2220 | struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, | ||
2221 | flags | XFRM_LOOKUP_QUEUE); | ||
2222 | |||
2223 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) | ||
2224 | return make_blackhole(net, dst_orig->ops->family, dst_orig); | ||
2225 | |||
2226 | return dst; | ||
2227 | } | ||
2228 | EXPORT_SYMBOL(xfrm_lookup_route); | ||
2229 | |||
2198 | static inline int | 2230 | static inline int |
2199 | xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) | 2231 | xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) |
2200 | { | 2232 | { |
@@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2460 | 2492 | ||
2461 | skb_dst_force(skb); | 2493 | skb_dst_force(skb); |
2462 | 2494 | ||
2463 | dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); | 2495 | dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); |
2464 | if (IS_ERR(dst)) { | 2496 | if (IS_ERR(dst)) { |
2465 | res = 0; | 2497 | res = 0; |
2466 | dst = NULL; | 2498 | dst = NULL; |
diff --git a/scripts/tags.sh b/scripts/tags.sh index cbfd269a6011..293828bfd4ac 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
@@ -197,6 +197,9 @@ exuberant() | |||
197 | --regex-c++='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ | 197 | --regex-c++='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ |
198 | --regex-c++='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ | 198 | --regex-c++='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ |
199 | --regex-c++='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ | 199 | --regex-c++='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ |
200 | --regex-c++='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/' \ | ||
201 | --regex-c++='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/' \ | ||
202 | --regex-c++='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'\ | ||
200 | --regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \ | 203 | --regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \ |
201 | --regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \ | 204 | --regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \ |
202 | --regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/' \ | 205 | --regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/' \ |
@@ -260,6 +263,9 @@ emacs() | |||
260 | --regex='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ | 263 | --regex='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ |
261 | --regex='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ | 264 | --regex='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ |
262 | --regex='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ | 265 | --regex='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ |
266 | --regex='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/' \ | ||
267 | --regex='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/' \ | ||
268 | --regex='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/' \ | ||
263 | --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' \ | 269 | --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' \ |
264 | --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \ | 270 | --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \ |
265 | --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\ | 271 | --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\ |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 9acc77eae487..0032278567ad 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -1782,14 +1782,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, | |||
1782 | { | 1782 | { |
1783 | struct snd_pcm_hw_params *params = arg; | 1783 | struct snd_pcm_hw_params *params = arg; |
1784 | snd_pcm_format_t format; | 1784 | snd_pcm_format_t format; |
1785 | int channels, width; | 1785 | int channels; |
1786 | ssize_t frame_size; | ||
1786 | 1787 | ||
1787 | params->fifo_size = substream->runtime->hw.fifo_size; | 1788 | params->fifo_size = substream->runtime->hw.fifo_size; |
1788 | if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { | 1789 | if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { |
1789 | format = params_format(params); | 1790 | format = params_format(params); |
1790 | channels = params_channels(params); | 1791 | channels = params_channels(params); |
1791 | width = snd_pcm_format_physical_width(format); | 1792 | frame_size = snd_pcm_format_size(format, channels); |
1792 | params->fifo_size /= width * channels; | 1793 | if (frame_size > 0) |
1794 | params->fifo_size /= (unsigned)frame_size; | ||
1793 | } | 1795 | } |
1794 | return 0; | 1796 | return 0; |
1795 | } | 1797 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 6e5d0cb4e3d7..47ccb8f44adb 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -777,6 +777,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { | |||
777 | { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" }, | 777 | { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" }, |
778 | { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" }, | 778 | { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" }, |
779 | { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, | 779 | { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, |
780 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, | ||
780 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, | 781 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, |
781 | {} | 782 | {} |
782 | }; | 783 | }; |
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index e4f6102efc1a..b86b426f159d 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c | |||
@@ -51,7 +51,7 @@ static struct reg_default rt286_index_def[] = { | |||
51 | { 0x04, 0xaf01 }, | 51 | { 0x04, 0xaf01 }, |
52 | { 0x08, 0x000d }, | 52 | { 0x08, 0x000d }, |
53 | { 0x09, 0xd810 }, | 53 | { 0x09, 0xd810 }, |
54 | { 0x0a, 0x0060 }, | 54 | { 0x0a, 0x0120 }, |
55 | { 0x0b, 0x0000 }, | 55 | { 0x0b, 0x0000 }, |
56 | { 0x0d, 0x2800 }, | 56 | { 0x0d, 0x2800 }, |
57 | { 0x0f, 0x0000 }, | 57 | { 0x0f, 0x0000 }, |
@@ -60,7 +60,7 @@ static struct reg_default rt286_index_def[] = { | |||
60 | { 0x33, 0x0208 }, | 60 | { 0x33, 0x0208 }, |
61 | { 0x49, 0x0004 }, | 61 | { 0x49, 0x0004 }, |
62 | { 0x4f, 0x50e9 }, | 62 | { 0x4f, 0x50e9 }, |
63 | { 0x50, 0x2c00 }, | 63 | { 0x50, 0x2000 }, |
64 | { 0x63, 0x2902 }, | 64 | { 0x63, 0x2902 }, |
65 | { 0x67, 0x1111 }, | 65 | { 0x67, 0x1111 }, |
66 | { 0x68, 0x1016 }, | 66 | { 0x68, 0x1016 }, |
@@ -104,7 +104,6 @@ static const struct reg_default rt286_reg[] = { | |||
104 | { 0x02170700, 0x00000000 }, | 104 | { 0x02170700, 0x00000000 }, |
105 | { 0x02270100, 0x00000000 }, | 105 | { 0x02270100, 0x00000000 }, |
106 | { 0x02370100, 0x00000000 }, | 106 | { 0x02370100, 0x00000000 }, |
107 | { 0x02040000, 0x00004002 }, | ||
108 | { 0x01870700, 0x00000020 }, | 107 | { 0x01870700, 0x00000020 }, |
109 | { 0x00830000, 0x000000c3 }, | 108 | { 0x00830000, 0x000000c3 }, |
110 | { 0x00930000, 0x000000c3 }, | 109 | { 0x00930000, 0x000000c3 }, |
@@ -192,7 +191,6 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value) | |||
192 | /*handle index registers*/ | 191 | /*handle index registers*/ |
193 | if (reg <= 0xff) { | 192 | if (reg <= 0xff) { |
194 | rt286_hw_write(client, RT286_COEF_INDEX, reg); | 193 | rt286_hw_write(client, RT286_COEF_INDEX, reg); |
195 | reg = RT286_PROC_COEF; | ||
196 | for (i = 0; i < INDEX_CACHE_SIZE; i++) { | 194 | for (i = 0; i < INDEX_CACHE_SIZE; i++) { |
197 | if (reg == rt286->index_cache[i].reg) { | 195 | if (reg == rt286->index_cache[i].reg) { |
198 | rt286->index_cache[i].def = value; | 196 | rt286->index_cache[i].def = value; |
@@ -200,6 +198,7 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value) | |||
200 | } | 198 | } |
201 | 199 | ||
202 | } | 200 | } |
201 | reg = RT286_PROC_COEF; | ||
203 | } | 202 | } |
204 | 203 | ||
205 | data[0] = (reg >> 24) & 0xff; | 204 | data[0] = (reg >> 24) & 0xff; |
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 484b3bbe8624..4021cd435740 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c | |||
@@ -647,7 +647,7 @@ int ssm2602_probe(struct device *dev, enum ssm2602_type type, | |||
647 | return -ENOMEM; | 647 | return -ENOMEM; |
648 | 648 | ||
649 | dev_set_drvdata(dev, ssm2602); | 649 | dev_set_drvdata(dev, ssm2602); |
650 | ssm2602->type = SSM2602; | 650 | ssm2602->type = type; |
651 | ssm2602->regmap = regmap; | 651 | ssm2602->regmap = regmap; |
652 | 652 | ||
653 | return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602, | 653 | return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602, |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 87eb5776a39b..de6ab06f58a5 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -748,8 +748,9 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream, | |||
748 | return 0; | 748 | return 0; |
749 | } | 749 | } |
750 | 750 | ||
751 | static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, | 751 | static int _fsl_ssi_set_dai_fmt(struct device *dev, |
752 | unsigned int fmt) | 752 | struct fsl_ssi_private *ssi_private, |
753 | unsigned int fmt) | ||
753 | { | 754 | { |
754 | struct regmap *regs = ssi_private->regs; | 755 | struct regmap *regs = ssi_private->regs; |
755 | u32 strcr = 0, stcr, srcr, scr, mask; | 756 | u32 strcr = 0, stcr, srcr, scr, mask; |
@@ -758,7 +759,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, | |||
758 | ssi_private->dai_fmt = fmt; | 759 | ssi_private->dai_fmt = fmt; |
759 | 760 | ||
760 | if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { | 761 | if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { |
761 | dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n"); | 762 | dev_err(dev, "baudclk is missing which is necessary for master mode\n"); |
762 | return -EINVAL; | 763 | return -EINVAL; |
763 | } | 764 | } |
764 | 765 | ||
@@ -913,7 +914,7 @@ static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) | |||
913 | { | 914 | { |
914 | struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); | 915 | struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); |
915 | 916 | ||
916 | return _fsl_ssi_set_dai_fmt(ssi_private, fmt); | 917 | return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt); |
917 | } | 918 | } |
918 | 919 | ||
919 | /** | 920 | /** |
@@ -1387,7 +1388,8 @@ static int fsl_ssi_probe(struct platform_device *pdev) | |||
1387 | 1388 | ||
1388 | done: | 1389 | done: |
1389 | if (ssi_private->dai_fmt) | 1390 | if (ssi_private->dai_fmt) |
1390 | _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt); | 1391 | _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private, |
1392 | ssi_private->dai_fmt); | ||
1391 | 1393 | ||
1392 | return 0; | 1394 | return 0; |
1393 | 1395 | ||
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 3092b58fede6..cecfab3cc948 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c | |||
@@ -102,13 +102,11 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream) | |||
102 | fe->dpcm[stream].runtime = fe_substream->runtime; | 102 | fe->dpcm[stream].runtime = fe_substream->runtime; |
103 | 103 | ||
104 | ret = dpcm_path_get(fe, stream, &list); | 104 | ret = dpcm_path_get(fe, stream, &list); |
105 | if (ret < 0) { | 105 | if (ret < 0) |
106 | mutex_unlock(&fe->card->mutex); | ||
107 | goto fe_err; | 106 | goto fe_err; |
108 | } else if (ret == 0) { | 107 | else if (ret == 0) |
109 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", | 108 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", |
110 | fe->dai_link->name, stream ? "capture" : "playback"); | 109 | fe->dai_link->name, stream ? "capture" : "playback"); |
111 | } | ||
112 | 110 | ||
113 | /* calculate valid and active FE <-> BE dpcms */ | 111 | /* calculate valid and active FE <-> BE dpcms */ |
114 | dpcm_process_paths(fe, stream, &list, 1); | 112 | dpcm_process_paths(fe, stream, &list, 1); |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 889f4e3d35dc..d074aa91b023 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3203,7 +3203,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3203 | unsigned int val, mask; | 3203 | unsigned int val, mask; |
3204 | void *data; | 3204 | void *data; |
3205 | 3205 | ||
3206 | if (!component->regmap) | 3206 | if (!component->regmap || !params->num_regs) |
3207 | return -EINVAL; | 3207 | return -EINVAL; |
3208 | 3208 | ||
3209 | len = params->num_regs * component->val_bytes; | 3209 | len = params->num_regs * component->val_bytes; |
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c index f65fc0987cfb..b7a7c805d63f 100644 --- a/sound/usb/caiaq/control.c +++ b/sound/usb/caiaq/control.c | |||
@@ -100,15 +100,19 @@ static int control_put(struct snd_kcontrol *kcontrol, | |||
100 | struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card); | 100 | struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card); |
101 | int pos = kcontrol->private_value; | 101 | int pos = kcontrol->private_value; |
102 | int v = ucontrol->value.integer.value[0]; | 102 | int v = ucontrol->value.integer.value[0]; |
103 | unsigned char cmd = EP1_CMD_WRITE_IO; | 103 | unsigned char cmd; |
104 | 104 | ||
105 | if (cdev->chip.usb_id == | 105 | switch (cdev->chip.usb_id) { |
106 | USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1)) | 106 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER): |
107 | cmd = EP1_CMD_DIMM_LEDS; | 107 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1): |
108 | 108 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2): | |
109 | if (cdev->chip.usb_id == | 109 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER): |
110 | USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER)) | ||
111 | cmd = EP1_CMD_DIMM_LEDS; | 110 | cmd = EP1_CMD_DIMM_LEDS; |
111 | break; | ||
112 | default: | ||
113 | cmd = EP1_CMD_WRITE_IO; | ||
114 | break; | ||
115 | } | ||
112 | 116 | ||
113 | if (pos & CNT_INTVAL) { | 117 | if (pos & CNT_INTVAL) { |
114 | int i = pos & ~CNT_INTVAL; | 118 | int i = pos & ~CNT_INTVAL; |
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index 01124ef3690a..416baedfc89f 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c | |||
@@ -71,7 +71,7 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, | |||
71 | struct vgic_lr lr_desc) | 71 | struct vgic_lr lr_desc) |
72 | { | 72 | { |
73 | if (!(lr_desc.state & LR_STATE_MASK)) | 73 | if (!(lr_desc.state & LR_STATE_MASK)) |
74 | set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); | 74 | __set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); |
75 | } | 75 | } |
76 | 76 | ||
77 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) | 77 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 33712fb26eb1..95519bc959ed 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -110,7 +110,7 @@ static bool largepages_enabled = true; | |||
110 | bool kvm_is_mmio_pfn(pfn_t pfn) | 110 | bool kvm_is_mmio_pfn(pfn_t pfn) |
111 | { | 111 | { |
112 | if (pfn_valid(pfn)) | 112 | if (pfn_valid(pfn)) |
113 | return PageReserved(pfn_to_page(pfn)); | 113 | return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); |
114 | 114 | ||
115 | return true; | 115 | return true; |
116 | } | 116 | } |
@@ -1725,7 +1725,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target) | |||
1725 | rcu_read_lock(); | 1725 | rcu_read_lock(); |
1726 | pid = rcu_dereference(target->pid); | 1726 | pid = rcu_dereference(target->pid); |
1727 | if (pid) | 1727 | if (pid) |
1728 | task = get_pid_task(target->pid, PIDTYPE_PID); | 1728 | task = get_pid_task(pid, PIDTYPE_PID); |
1729 | rcu_read_unlock(); | 1729 | rcu_read_unlock(); |
1730 | if (!task) | 1730 | if (!task) |
1731 | return ret; | 1731 | return ret; |