diff options
309 files changed, 3195 insertions, 1910 deletions
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml index eee6f0f4aa43..3a626d1b8f2e 100644 --- a/Documentation/DocBook/media/v4l/compat.xml +++ b/Documentation/DocBook/media/v4l/compat.xml | |||
| @@ -2545,6 +2545,30 @@ fields changed from _s32 to _u32. | |||
| 2545 | </orderedlist> | 2545 | </orderedlist> |
| 2546 | </section> | 2546 | </section> |
| 2547 | 2547 | ||
| 2548 | <section> | ||
| 2549 | <title>V4L2 in Linux 3.16</title> | ||
| 2550 | <orderedlist> | ||
| 2551 | <listitem> | ||
| 2552 | <para>Added event V4L2_EVENT_SOURCE_CHANGE. | ||
| 2553 | </para> | ||
| 2554 | </listitem> | ||
| 2555 | </orderedlist> | ||
| 2556 | </section> | ||
| 2557 | |||
| 2558 | <section> | ||
| 2559 | <title>V4L2 in Linux 3.17</title> | ||
| 2560 | <orderedlist> | ||
| 2561 | <listitem> | ||
| 2562 | <para>Extended &v4l2-pix-format;. Added format flags. | ||
| 2563 | </para> | ||
| 2564 | </listitem> | ||
| 2565 | <listitem> | ||
| 2566 | <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;. | ||
| 2567 | </para> | ||
| 2568 | </listitem> | ||
| 2569 | </orderedlist> | ||
| 2570 | </section> | ||
| 2571 | |||
| 2548 | <section id="other"> | 2572 | <section id="other"> |
| 2549 | <title>Relation of V4L2 to other Linux multimedia APIs</title> | 2573 | <title>Relation of V4L2 to other Linux multimedia APIs</title> |
| 2550 | 2574 | ||
diff --git a/Documentation/DocBook/media/v4l/func-poll.xml b/Documentation/DocBook/media/v4l/func-poll.xml index 85cad8bff5ba..4c73f115219b 100644 --- a/Documentation/DocBook/media/v4l/func-poll.xml +++ b/Documentation/DocBook/media/v4l/func-poll.xml | |||
| @@ -29,9 +29,12 @@ can suspend execution until the driver has captured data or is ready | |||
| 29 | to accept data for output.</para> | 29 | to accept data for output.</para> |
| 30 | 30 | ||
| 31 | <para>When streaming I/O has been negotiated this function waits | 31 | <para>When streaming I/O has been negotiated this function waits |
| 32 | until a buffer has been filled or displayed and can be dequeued with | 32 | until a buffer has been filled by the capture device and can be dequeued |
| 33 | the &VIDIOC-DQBUF; ioctl. When buffers are already in the outgoing | 33 | with the &VIDIOC-DQBUF; ioctl. For output devices this function waits |
| 34 | queue of the driver the function returns immediately.</para> | 34 | until the device is ready to accept a new buffer to be queued up with |
| 35 | the &VIDIOC-QBUF; ioctl for display. When buffers are already in the outgoing | ||
| 36 | queue of the driver (capture) or the incoming queue isn't full (display) | ||
| 37 | the function returns immediately.</para> | ||
| 35 | 38 | ||
| 36 | <para>On success <function>poll()</function> returns the number of | 39 | <para>On success <function>poll()</function> returns the number of |
| 37 | file descriptors that have been selected (that is, file descriptors | 40 | file descriptors that have been selected (that is, file descriptors |
| @@ -44,10 +47,22 @@ Capture devices set the <constant>POLLIN</constant> and | |||
| 44 | flags. When the function timed out it returns a value of zero, on | 47 | flags. When the function timed out it returns a value of zero, on |
| 45 | failure it returns <returnvalue>-1</returnvalue> and the | 48 | failure it returns <returnvalue>-1</returnvalue> and the |
| 46 | <varname>errno</varname> variable is set appropriately. When the | 49 | <varname>errno</varname> variable is set appropriately. When the |
| 47 | application did not call &VIDIOC-QBUF; or &VIDIOC-STREAMON; yet the | 50 | application did not call &VIDIOC-STREAMON; the |
| 48 | <function>poll()</function> function succeeds, but sets the | 51 | <function>poll()</function> function succeeds, but sets the |
| 49 | <constant>POLLERR</constant> flag in the | 52 | <constant>POLLERR</constant> flag in the |
| 50 | <structfield>revents</structfield> field.</para> | 53 | <structfield>revents</structfield> field. When the |
| 54 | application has called &VIDIOC-STREAMON; for a capture device but hasn't | ||
| 55 | yet called &VIDIOC-QBUF;, the <function>poll()</function> function | ||
| 56 | succeeds and sets the <constant>POLLERR</constant> flag in the | ||
| 57 | <structfield>revents</structfield> field. For output devices this | ||
| 58 | same situation will cause <function>poll()</function> to succeed | ||
| 59 | as well, but it sets the <constant>POLLOUT</constant> and | ||
| 60 | <constant>POLLWRNORM</constant> flags in the <structfield>revents</structfield> | ||
| 61 | field.</para> | ||
| 62 | |||
| 63 | <para>If an event occurred (see &VIDIOC-DQEVENT;) then | ||
| 64 | <constant>POLLPRI</constant> will be set in the <structfield>revents</structfield> | ||
| 65 | field and <function>poll()</function> will return.</para> | ||
| 51 | 66 | ||
| 52 | <para>When use of the <function>read()</function> function has | 67 | <para>When use of the <function>read()</function> function has |
| 53 | been negotiated and the driver does not capture yet, the | 68 | been negotiated and the driver does not capture yet, the |
| @@ -58,10 +73,18 @@ continuously (as opposed to, for example, still images) the function | |||
| 58 | may return immediately.</para> | 73 | may return immediately.</para> |
| 59 | 74 | ||
| 60 | <para>When use of the <function>write()</function> function has | 75 | <para>When use of the <function>write()</function> function has |
| 61 | been negotiated the <function>poll</function> function just waits | 76 | been negotiated and the driver does not stream yet, the |
| 77 | <function>poll</function> function starts streaming. When that fails | ||
| 78 | it returns a <constant>POLLERR</constant> as above. Otherwise it waits | ||
| 62 | until the driver is ready for a non-blocking | 79 | until the driver is ready for a non-blocking |
| 63 | <function>write()</function> call.</para> | 80 | <function>write()</function> call.</para> |
| 64 | 81 | ||
| 82 | <para>If the caller is only interested in events (just | ||
| 83 | <constant>POLLPRI</constant> is set in the <structfield>events</structfield> | ||
| 84 | field), then <function>poll()</function> will <emphasis>not</emphasis> | ||
| 85 | start streaming if the driver does not stream yet. This makes it | ||
| 86 | possible to just poll for events and not for buffers.</para> | ||
| 87 | |||
| 65 | <para>All drivers implementing the <function>read()</function> or | 88 | <para>All drivers implementing the <function>read()</function> or |
| 66 | <function>write()</function> function or streaming I/O must also | 89 | <function>write()</function> function or streaming I/O must also |
| 67 | support the <function>poll()</function> function.</para> | 90 | support the <function>poll()</function> function.</para> |
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml index f2f81f06a17b..7cfe618f754d 100644 --- a/Documentation/DocBook/media/v4l/v4l2.xml +++ b/Documentation/DocBook/media/v4l/v4l2.xml | |||
| @@ -152,10 +152,11 @@ structs, ioctls) must be noted in more detail in the history chapter | |||
| 152 | applications. --> | 152 | applications. --> |
| 153 | 153 | ||
| 154 | <revision> | 154 | <revision> |
| 155 | <revnumber>3.16</revnumber> | 155 | <revnumber>3.17</revnumber> |
| 156 | <date>2014-05-27</date> | 156 | <date>2014-08-04</date> |
| 157 | <authorinitials>lp</authorinitials> | 157 | <authorinitials>lp, hv</authorinitials> |
| 158 | <revremark>Extended &v4l2-pix-format;. Added format flags. | 158 | <revremark>Extended &v4l2-pix-format;. Added format flags. Added compound control types |
| 159 | and VIDIOC_QUERY_EXT_CTRL. | ||
| 159 | </revremark> | 160 | </revremark> |
| 160 | </revision> | 161 | </revision> |
| 161 | 162 | ||
| @@ -538,7 +539,7 @@ and discussions on the V4L mailing list.</revremark> | |||
| 538 | </partinfo> | 539 | </partinfo> |
| 539 | 540 | ||
| 540 | <title>Video for Linux Two API Specification</title> | 541 | <title>Video for Linux Two API Specification</title> |
| 541 | <subtitle>Revision 3.14</subtitle> | 542 | <subtitle>Revision 3.17</subtitle> |
| 542 | 543 | ||
| 543 | <chapter id="common"> | 544 | <chapter id="common"> |
| 544 | &sub-common; | 545 | &sub-common; |
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml index 1ba9e999af3f..c62a7360719b 100644 --- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml +++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml | |||
| @@ -119,7 +119,7 @@ | |||
| 119 | </row> | 119 | </row> |
| 120 | <row> | 120 | <row> |
| 121 | <entry>&v4l2-rect;</entry> | 121 | <entry>&v4l2-rect;</entry> |
| 122 | <entry><structfield>rect</structfield></entry> | 122 | <entry><structfield>r</structfield></entry> |
| 123 | <entry>Selection rectangle, in pixels.</entry> | 123 | <entry>Selection rectangle, in pixels.</entry> |
| 124 | </row> | 124 | </row> |
| 125 | <row> | 125 | <row> |
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt index 7740038d82bc..3c94ff3f9693 100644 --- a/Documentation/cgroups/cpusets.txt +++ b/Documentation/cgroups/cpusets.txt | |||
| @@ -345,14 +345,14 @@ the named feature on. | |||
| 345 | The implementation is simple. | 345 | The implementation is simple. |
| 346 | 346 | ||
| 347 | Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag | 347 | Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag |
| 348 | PF_SPREAD_PAGE for each task that is in that cpuset or subsequently | 348 | PFA_SPREAD_PAGE for each task that is in that cpuset or subsequently |
| 349 | joins that cpuset. The page allocation calls for the page cache | 349 | joins that cpuset. The page allocation calls for the page cache |
| 350 | is modified to perform an inline check for this PF_SPREAD_PAGE task | 350 | is modified to perform an inline check for this PFA_SPREAD_PAGE task |
| 351 | flag, and if set, a call to a new routine cpuset_mem_spread_node() | 351 | flag, and if set, a call to a new routine cpuset_mem_spread_node() |
| 352 | returns the node to prefer for the allocation. | 352 | returns the node to prefer for the allocation. |
| 353 | 353 | ||
| 354 | Similarly, setting 'cpuset.memory_spread_slab' turns on the flag | 354 | Similarly, setting 'cpuset.memory_spread_slab' turns on the flag |
| 355 | PF_SPREAD_SLAB, and appropriately marked slab caches will allocate | 355 | PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate |
| 356 | pages from the node returned by cpuset_mem_spread_node(). | 356 | pages from the node returned by cpuset_mem_spread_node(). |
| 357 | 357 | ||
| 358 | The cpuset_mem_spread_node() routine is also simple. It uses the | 358 | The cpuset_mem_spread_node() routine is also simple. It uses the |
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt b/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt index 578a1fca366e..443bcb6134d5 100644 --- a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt +++ b/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt | |||
| @@ -56,6 +56,9 @@ Required properties: | |||
| 56 | - fsl,data-width : should be <18> or <24> | 56 | - fsl,data-width : should be <18> or <24> |
| 57 | - port: A port node with endpoint definitions as defined in | 57 | - port: A port node with endpoint definitions as defined in |
| 58 | Documentation/devicetree/bindings/media/video-interfaces.txt. | 58 | Documentation/devicetree/bindings/media/video-interfaces.txt. |
| 59 | On i.MX5, the internal two-input-multiplexer is used. | ||
| 60 | Due to hardware limitations, only one port (port@[0,1]) | ||
| 61 | can be used for each channel (lvds-channel@[0,1], respectively) | ||
| 59 | On i.MX6, there should be four ports (port@[0-3]) that correspond | 62 | On i.MX6, there should be four ports (port@[0-3]) that correspond |
| 60 | to the four LVDS multiplexer inputs. | 63 | to the four LVDS multiplexer inputs. |
| 61 | 64 | ||
| @@ -78,6 +81,8 @@ ldb: ldb@53fa8008 { | |||
| 78 | "di0", "di1"; | 81 | "di0", "di1"; |
| 79 | 82 | ||
| 80 | lvds-channel@0 { | 83 | lvds-channel@0 { |
| 84 | #address-cells = <1>; | ||
| 85 | #size-cells = <0>; | ||
| 81 | reg = <0>; | 86 | reg = <0>; |
| 82 | fsl,data-mapping = "spwg"; | 87 | fsl,data-mapping = "spwg"; |
| 83 | fsl,data-width = <24>; | 88 | fsl,data-width = <24>; |
| @@ -86,7 +91,9 @@ ldb: ldb@53fa8008 { | |||
| 86 | /* ... */ | 91 | /* ... */ |
| 87 | }; | 92 | }; |
| 88 | 93 | ||
| 89 | port { | 94 | port@0 { |
| 95 | reg = <0>; | ||
| 96 | |||
| 90 | lvds0_in: endpoint { | 97 | lvds0_in: endpoint { |
| 91 | remote-endpoint = <&ipu_di0_lvds0>; | 98 | remote-endpoint = <&ipu_di0_lvds0>; |
| 92 | }; | 99 | }; |
| @@ -94,6 +101,8 @@ ldb: ldb@53fa8008 { | |||
| 94 | }; | 101 | }; |
| 95 | 102 | ||
| 96 | lvds-channel@1 { | 103 | lvds-channel@1 { |
| 104 | #address-cells = <1>; | ||
| 105 | #size-cells = <0>; | ||
| 97 | reg = <1>; | 106 | reg = <1>; |
| 98 | fsl,data-mapping = "spwg"; | 107 | fsl,data-mapping = "spwg"; |
| 99 | fsl,data-width = <24>; | 108 | fsl,data-width = <24>; |
| @@ -102,7 +111,9 @@ ldb: ldb@53fa8008 { | |||
| 102 | /* ... */ | 111 | /* ... */ |
| 103 | }; | 112 | }; |
| 104 | 113 | ||
| 105 | port { | 114 | port@1 { |
| 115 | reg = <1>; | ||
| 116 | |||
| 106 | lvds1_in: endpoint { | 117 | lvds1_in: endpoint { |
| 107 | remote-endpoint = <&ipu_di1_lvds1>; | 118 | remote-endpoint = <&ipu_di1_lvds1>; |
| 108 | }; | 119 | }; |
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt new file mode 100644 index 000000000000..3a2f54d07fc5 --- /dev/null +++ b/Documentation/devicetree/of_selftest.txt | |||
| @@ -0,0 +1,211 @@ | |||
| 1 | Open Firmware Device Tree Selftest | ||
| 2 | ---------------------------------- | ||
| 3 | |||
| 4 | Author: Gaurav Minocha <gaurav.minocha.os@gmail.com> | ||
| 5 | |||
| 6 | 1. Introduction | ||
| 7 | |||
| 8 | This document explains how the test data required for executing OF selftest | ||
| 9 | is attached to the live tree dynamically, independent of the machine's | ||
| 10 | architecture. | ||
| 11 | |||
| 12 | It is recommended to read the following documents before moving ahead. | ||
| 13 | |||
| 14 | [1] Documentation/devicetree/usage-model.txt | ||
| 15 | [2] http://www.devicetree.org/Device_Tree_Usage | ||
| 16 | |||
| 17 | OF Selftest has been designed to test the interface (include/linux/of.h) | ||
| 18 | provided to device driver developers to fetch the device information..etc. | ||
| 19 | from the unflattened device tree data structure. This interface is used by | ||
| 20 | most of the device drivers in various use cases. | ||
| 21 | |||
| 22 | |||
| 23 | 2. Test-data | ||
| 24 | |||
| 25 | The Device Tree Source file (drivers/of/testcase-data/testcases.dts) contains | ||
| 26 | the test data required for executing the unit tests automated in | ||
| 27 | drivers/of/selftests.c. Currently, following Device Tree Source Include files | ||
| 28 | (.dtsi) are included in testcase.dts: | ||
| 29 | |||
| 30 | drivers/of/testcase-data/tests-interrupts.dtsi | ||
| 31 | drivers/of/testcase-data/tests-platform.dtsi | ||
| 32 | drivers/of/testcase-data/tests-phandle.dtsi | ||
| 33 | drivers/of/testcase-data/tests-match.dtsi | ||
| 34 | |||
| 35 | When the kernel is build with OF_SELFTEST enabled, then the following make rule | ||
| 36 | |||
| 37 | $(obj)/%.dtb: $(src)/%.dts FORCE | ||
| 38 | $(call if_changed_dep, dtc) | ||
| 39 | |||
| 40 | is used to compile the DT source file (testcase.dts) into a binary blob | ||
| 41 | (testcase.dtb), also referred as flattened DT. | ||
| 42 | |||
| 43 | After that, using the following rule the binary blob above is wrapped as an | ||
| 44 | assembly file (testcase.dtb.S). | ||
| 45 | |||
| 46 | $(obj)/%.dtb.S: $(obj)/%.dtb | ||
| 47 | $(call cmd, dt_S_dtb) | ||
| 48 | |||
| 49 | The assembly file is compiled into an object file (testcase.dtb.o), and is | ||
| 50 | linked into the kernel image. | ||
| 51 | |||
| 52 | |||
| 53 | 2.1. Adding the test data | ||
| 54 | |||
| 55 | Un-flattened device tree structure: | ||
| 56 | |||
| 57 | Un-flattened device tree consists of connected device_node(s) in form of a tree | ||
| 58 | structure described below. | ||
| 59 | |||
| 60 | // following struct members are used to construct the tree | ||
| 61 | struct device_node { | ||
| 62 | ... | ||
| 63 | struct device_node *parent; | ||
| 64 | struct device_node *child; | ||
| 65 | struct device_node *sibling; | ||
| 66 | struct device_node *allnext; /* next in list of all nodes */ | ||
| 67 | ... | ||
| 68 | }; | ||
| 69 | |||
| 70 | Figure 1, describes a generic structure of machine’s un-flattened device tree | ||
| 71 | considering only child and sibling pointers. There exists another pointer, | ||
| 72 | *parent, that is used to traverse the tree in the reverse direction. So, at | ||
| 73 | a particular level the child node and all the sibling nodes will have a parent | ||
| 74 | pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4’s | ||
| 75 | parent points to root node) | ||
| 76 | |||
| 77 | root (‘/’) | ||
| 78 | | | ||
| 79 | child1 -> sibling2 -> sibling3 -> sibling4 -> null | ||
| 80 | | | | | | ||
| 81 | | | | null | ||
| 82 | | | | | ||
| 83 | | | child31 -> sibling32 -> null | ||
| 84 | | | | | | ||
| 85 | | | null null | ||
| 86 | | | | ||
| 87 | | child21 -> sibling22 -> sibling23 -> null | ||
| 88 | | | | | | ||
| 89 | | null null null | ||
| 90 | | | ||
| 91 | child11 -> sibling12 -> sibling13 -> sibling14 -> null | ||
| 92 | | | | | | ||
| 93 | | | | null | ||
| 94 | | | | | ||
| 95 | null null child131 -> null | ||
| 96 | | | ||
| 97 | null | ||
| 98 | |||
| 99 | Figure 1: Generic structure of un-flattened device tree | ||
| 100 | |||
| 101 | |||
| 102 | *allnext: it is used to link all the nodes of DT into a list. So, for the | ||
| 103 | above tree the list would be as follows: | ||
| 104 | |||
| 105 | root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2-> | ||
| 106 | child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null | ||
| 107 | |||
| 108 | Before executing OF selftest, it is required to attach the test data to | ||
| 109 | machine's device tree (if present). So, when selftest_data_add() is called, | ||
| 110 | at first it reads the flattened device tree data linked into the kernel image | ||
| 111 | via the following kernel symbols: | ||
| 112 | |||
| 113 | __dtb_testcases_begin - address marking the start of test data blob | ||
| 114 | __dtb_testcases_end - address marking the end of test data blob | ||
| 115 | |||
| 116 | Secondly, it calls of_fdt_unflatten_device_tree() to unflatten the flattened | ||
| 117 | blob. And finally, if the machine’s device tree (i.e live tree) is present, | ||
| 118 | then it attaches the unflattened test data tree to the live tree, else it | ||
| 119 | attaches itself as a live device tree. | ||
| 120 | |||
| 121 | attach_node_and_children() uses of_attach_node() to attach the nodes into the | ||
| 122 | live tree as explained below. To explain the same, the test data tree described | ||
| 123 | in Figure 2 is attached to the live tree described in Figure 1. | ||
| 124 | |||
| 125 | root (‘/’) | ||
| 126 | | | ||
| 127 | testcase-data | ||
| 128 | | | ||
| 129 | test-child0 -> test-sibling1 -> test-sibling2 -> test-sibling3 -> null | ||
| 130 | | | | | | ||
| 131 | test-child01 null null null | ||
| 132 | |||
| 133 | |||
| 134 | allnext list: | ||
| 135 | |||
| 136 | root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2 | ||
| 137 | ->test-sibling3->null | ||
| 138 | |||
| 139 | Figure 2: Example test data tree to be attached to live tree. | ||
| 140 | |||
| 141 | According to the scenario above, the live tree is already present so it isn’t | ||
| 142 | required to attach the root(‘/’) node. All other nodes are attached by calling | ||
| 143 | of_attach_node() on each node. | ||
| 144 | |||
| 145 | In the function of_attach_node(), the new node is attached as the child of the | ||
| 146 | given parent in live tree. But, if parent already has a child then the new node | ||
| 147 | replaces the current child and turns it into its sibling. So, when the testcase | ||
| 148 | data node is attached to the live tree above (Figure 1), the final structure is | ||
| 149 | as shown in Figure 3. | ||
| 150 | |||
| 151 | root (‘/’) | ||
| 152 | | | ||
| 153 | testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null | ||
| 154 | | | | | | | ||
| 155 | (...) | | | null | ||
| 156 | | | child31 -> sibling32 -> null | ||
| 157 | | | | | | ||
| 158 | | | null null | ||
| 159 | | | | ||
| 160 | | child21 -> sibling22 -> sibling23 -> null | ||
| 161 | | | | | | ||
| 162 | | null null null | ||
| 163 | | | ||
| 164 | child11 -> sibling12 -> sibling13 -> sibling14 -> null | ||
| 165 | | | | | | ||
| 166 | null null | null | ||
| 167 | | | ||
| 168 | child131 -> null | ||
| 169 | | | ||
| 170 | null | ||
| 171 | ----------------------------------------------------------------------- | ||
| 172 | |||
| 173 | root (‘/’) | ||
| 174 | | | ||
| 175 | testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null | ||
| 176 | | | | | | | ||
| 177 | | (...) (...) (...) null | ||
| 178 | | | ||
| 179 | test-sibling3 -> test-sibling2 -> test-sibling1 -> test-child0 -> null | ||
| 180 | | | | | | ||
| 181 | null null null test-child01 | ||
| 182 | |||
| 183 | |||
| 184 | Figure 3: Live device tree structure after attaching the testcase-data. | ||
| 185 | |||
| 186 | |||
| 187 | Astute readers would have noticed that test-child0 node becomes the last | ||
| 188 | sibling compared to the earlier structure (Figure 2). After attaching first | ||
| 189 | test-child0 the test-sibling1 is attached that pushes the child node | ||
| 190 | (i.e. test-child0) to become a sibling and makes itself a child node, | ||
| 191 | as mentioned above. | ||
| 192 | |||
| 193 | If a duplicate node is found (i.e. if a node with same full_name property is | ||
| 194 | already present in the live tree), then the node isn’t attached rather its | ||
| 195 | properties are updated to the live tree’s node by calling the function | ||
| 196 | update_node_properties(). | ||
| 197 | |||
| 198 | |||
| 199 | 2.2. Removing the test data | ||
| 200 | |||
| 201 | Once the test case execution is complete, selftest_data_remove is called in | ||
| 202 | order to remove the device nodes attached initially (first the leaf nodes are | ||
| 203 | detached and then moving up the parent nodes are removed, and eventually the | ||
| 204 | whole tree). selftest_data_remove() calls detach_node_and_children() that uses | ||
| 205 | of_detach_node() to detach the nodes from the live device tree. | ||
| 206 | |||
| 207 | To detach a node, of_detach_node() first updates all_next linked list, by | ||
| 208 | attaching the previous node’s allnext to current node’s allnext pointer. And | ||
| 209 | then, it either updates the child pointer of given node’s parent to its | ||
| 210 | sibling or attaches the previous sibling to the given node’s sibling, as | ||
| 211 | appropriate. That is it :) | ||
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index c48a9704bda8..d16f424c5e8d 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
| @@ -462,9 +462,9 @@ JIT compiler | |||
| 462 | ------------ | 462 | ------------ |
| 463 | 463 | ||
| 464 | The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, | 464 | The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, |
| 465 | ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is | 465 | ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler |
| 466 | transparently invoked for each attached filter from user space or for internal | 466 | is transparently invoked for each attached filter from user space or for |
| 467 | kernel users if it has been previously enabled by root: | 467 | internal kernel users if it has been previously enabled by root: |
| 468 | 468 | ||
| 469 | echo 1 > /proc/sys/net/core/bpf_jit_enable | 469 | echo 1 > /proc/sys/net/core/bpf_jit_enable |
| 470 | 470 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 670b3dcce2de..37054306dc9f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3012,9 +3012,8 @@ S: Supported | |||
| 3012 | F: drivers/acpi/dock.c | 3012 | F: drivers/acpi/dock.c |
| 3013 | 3013 | ||
| 3014 | DOCUMENTATION | 3014 | DOCUMENTATION |
| 3015 | M: Randy Dunlap <rdunlap@infradead.org> | 3015 | M: Jiri Kosina <jkosina@suse.cz> |
| 3016 | L: linux-doc@vger.kernel.org | 3016 | L: linux-doc@vger.kernel.org |
| 3017 | T: quilt http://www.infradead.org/~rdunlap/Doc/patches/ | ||
| 3018 | S: Maintained | 3017 | S: Maintained |
| 3019 | F: Documentation/ | 3018 | F: Documentation/ |
| 3020 | X: Documentation/ABI/ | 3019 | X: Documentation/ABI/ |
| @@ -4477,7 +4476,6 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com> | |||
| 4477 | L: linux-i2c@vger.kernel.org | 4476 | L: linux-i2c@vger.kernel.org |
| 4478 | L: linux-acpi@vger.kernel.org | 4477 | L: linux-acpi@vger.kernel.org |
| 4479 | S: Maintained | 4478 | S: Maintained |
| 4480 | F: drivers/i2c/i2c-acpi.c | ||
| 4481 | 4479 | ||
| 4482 | I2C-TAOS-EVM DRIVER | 4480 | I2C-TAOS-EVM DRIVER |
| 4483 | M: Jean Delvare <jdelvare@suse.de> | 4481 | M: Jean Delvare <jdelvare@suse.de> |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 17 | 2 | PATCHLEVEL = 17 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = -rc7 |
| 5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index e03fbf3c6889..b40cdadb1f87 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
| @@ -447,22 +447,19 @@ | |||
| 447 | gpmc,device-width = <2>; | 447 | gpmc,device-width = <2>; |
| 448 | gpmc,sync-clk-ps = <0>; | 448 | gpmc,sync-clk-ps = <0>; |
| 449 | gpmc,cs-on-ns = <0>; | 449 | gpmc,cs-on-ns = <0>; |
| 450 | gpmc,cs-rd-off-ns = <40>; | 450 | gpmc,cs-rd-off-ns = <80>; |
| 451 | gpmc,cs-wr-off-ns = <40>; | 451 | gpmc,cs-wr-off-ns = <80>; |
| 452 | gpmc,adv-on-ns = <0>; | 452 | gpmc,adv-on-ns = <0>; |
| 453 | gpmc,adv-rd-off-ns = <30>; | 453 | gpmc,adv-rd-off-ns = <60>; |
| 454 | gpmc,adv-wr-off-ns = <30>; | 454 | gpmc,adv-wr-off-ns = <60>; |
| 455 | gpmc,we-on-ns = <5>; | 455 | gpmc,we-on-ns = <10>; |
| 456 | gpmc,we-off-ns = <25>; | 456 | gpmc,we-off-ns = <50>; |
| 457 | gpmc,oe-on-ns = <2>; | 457 | gpmc,oe-on-ns = <4>; |
| 458 | gpmc,oe-off-ns = <20>; | 458 | gpmc,oe-off-ns = <40>; |
| 459 | gpmc,access-ns = <20>; | 459 | gpmc,access-ns = <40>; |
| 460 | gpmc,wr-access-ns = <40>; | 460 | gpmc,wr-access-ns = <80>; |
| 461 | gpmc,rd-cycle-ns = <40>; | 461 | gpmc,rd-cycle-ns = <80>; |
| 462 | gpmc,wr-cycle-ns = <40>; | 462 | gpmc,wr-cycle-ns = <80>; |
| 463 | gpmc,wait-pin = <0>; | ||
| 464 | gpmc,wait-on-read; | ||
| 465 | gpmc,wait-on-write; | ||
| 466 | gpmc,bus-turnaround-ns = <0>; | 463 | gpmc,bus-turnaround-ns = <0>; |
| 467 | gpmc,cycle2cycle-delay-ns = <0>; | 464 | gpmc,cycle2cycle-delay-ns = <0>; |
| 468 | gpmc,clk-activation-ns = <0>; | 465 | gpmc,clk-activation-ns = <0>; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c6c58c1c00e3..6b675a02066f 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
| @@ -423,10 +423,14 @@ | |||
| 423 | status = "disabled"; | 423 | status = "disabled"; |
| 424 | 424 | ||
| 425 | lvds-channel@0 { | 425 | lvds-channel@0 { |
| 426 | #address-cells = <1>; | ||
| 427 | #size-cells = <0>; | ||
| 426 | reg = <0>; | 428 | reg = <0>; |
| 427 | status = "disabled"; | 429 | status = "disabled"; |
| 428 | 430 | ||
| 429 | port { | 431 | port@0 { |
| 432 | reg = <0>; | ||
| 433 | |||
| 430 | lvds0_in: endpoint { | 434 | lvds0_in: endpoint { |
| 431 | remote-endpoint = <&ipu_di0_lvds0>; | 435 | remote-endpoint = <&ipu_di0_lvds0>; |
| 432 | }; | 436 | }; |
| @@ -434,10 +438,14 @@ | |||
| 434 | }; | 438 | }; |
| 435 | 439 | ||
| 436 | lvds-channel@1 { | 440 | lvds-channel@1 { |
| 441 | #address-cells = <1>; | ||
| 442 | #size-cells = <0>; | ||
| 437 | reg = <1>; | 443 | reg = <1>; |
| 438 | status = "disabled"; | 444 | status = "disabled"; |
| 439 | 445 | ||
| 440 | port { | 446 | port@1 { |
| 447 | reg = <1>; | ||
| 448 | |||
| 441 | lvds1_in: endpoint { | 449 | lvds1_in: endpoint { |
| 442 | remote-endpoint = <&ipu_di1_lvds1>; | 450 | remote-endpoint = <&ipu_di1_lvds1>; |
| 443 | }; | 451 | }; |
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 598afe91c676..4773d6af66a0 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi | |||
| @@ -40,7 +40,7 @@ clocks { | |||
| 40 | #clock-cells = <0>; | 40 | #clock-cells = <0>; |
| 41 | compatible = "ti,keystone,psc-clock"; | 41 | compatible = "ti,keystone,psc-clock"; |
| 42 | clocks = <&chipclk16>; | 42 | clocks = <&chipclk16>; |
| 43 | clock-output-names = "usb"; | 43 | clock-output-names = "usb1"; |
| 44 | reg = <0x02350004 0xb00>, <0x02350000 0x400>; | 44 | reg = <0x02350004 0xb00>, <0x02350000 0x400>; |
| 45 | reg-names = "control", "domain"; | 45 | reg-names = "control", "domain"; |
| 46 | domain-id = <0>; | 46 | domain-id = <0>; |
| @@ -60,8 +60,8 @@ clocks { | |||
| 60 | #clock-cells = <0>; | 60 | #clock-cells = <0>; |
| 61 | compatible = "ti,keystone,psc-clock"; | 61 | compatible = "ti,keystone,psc-clock"; |
| 62 | clocks = <&chipclk12>; | 62 | clocks = <&chipclk12>; |
| 63 | clock-output-names = "pcie"; | 63 | clock-output-names = "pcie1"; |
| 64 | reg = <0x0235006c 0xb00>, <0x02350000 0x400>; | 64 | reg = <0x0235006c 0xb00>, <0x02350048 0x400>; |
| 65 | reg-names = "control", "domain"; | 65 | reg-names = "control", "domain"; |
| 66 | domain-id = <18>; | 66 | domain-id = <18>; |
| 67 | }; | 67 | }; |
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index b8698ca68647..429471aa7a1f 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
| @@ -353,13 +353,12 @@ | |||
| 353 | }; | 353 | }; |
| 354 | 354 | ||
| 355 | ldo8_reg: ldo8 { | 355 | ldo8_reg: ldo8 { |
| 356 | /* VDD_3v0: Does not go anywhere */ | 356 | /* VDD_3V_GP: act led/serial console */ |
| 357 | regulator-name = "ldo8"; | 357 | regulator-name = "ldo8"; |
| 358 | regulator-min-microvolt = <3000000>; | 358 | regulator-min-microvolt = <3000000>; |
| 359 | regulator-max-microvolt = <3000000>; | 359 | regulator-max-microvolt = <3000000>; |
| 360 | regulator-always-on; | ||
| 360 | regulator-boot-on; | 361 | regulator-boot-on; |
| 361 | /* Unused */ | ||
| 362 | status = "disabled"; | ||
| 363 | }; | 362 | }; |
| 364 | 363 | ||
| 365 | ldo9_reg: ldo9 { | 364 | ldo9_reg: ldo9 { |
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c index 84acdfd1d715..5a75cdc81891 100644 --- a/arch/arm/mach-imx/clk-gate2.c +++ b/arch/arm/mach-imx/clk-gate2.c | |||
| @@ -97,7 +97,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw) | |||
| 97 | struct clk_gate2 *gate = to_clk_gate2(hw); | 97 | struct clk_gate2 *gate = to_clk_gate2(hw); |
| 98 | 98 | ||
| 99 | if (gate->share_count) | 99 | if (gate->share_count) |
| 100 | return !!(*gate->share_count); | 100 | return !!__clk_get_enable_count(hw->clk); |
| 101 | else | 101 | else |
| 102 | return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); | 102 | return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); |
| 103 | } | 103 | } |
| @@ -127,10 +127,6 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, | |||
| 127 | gate->bit_idx = bit_idx; | 127 | gate->bit_idx = bit_idx; |
| 128 | gate->flags = clk_gate2_flags; | 128 | gate->flags = clk_gate2_flags; |
| 129 | gate->lock = lock; | 129 | gate->lock = lock; |
| 130 | |||
| 131 | /* Initialize share_count per hardware state */ | ||
| 132 | if (share_count) | ||
| 133 | *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0; | ||
| 134 | gate->share_count = share_count; | 130 | gate->share_count = share_count; |
| 135 | 131 | ||
| 136 | init.name = name; | 132 | init.name = name; |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index e7189dcc9309..08d4167cc7c5 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
| @@ -1,9 +1,6 @@ | |||
| 1 | menu "TI OMAP/AM/DM/DRA Family" | 1 | menu "TI OMAP/AM/DM/DRA Family" |
| 2 | depends on ARCH_MULTI_V6 || ARCH_MULTI_V7 | 2 | depends on ARCH_MULTI_V6 || ARCH_MULTI_V7 |
| 3 | 3 | ||
| 4 | config ARCH_OMAP | ||
| 5 | bool | ||
| 6 | |||
| 7 | config ARCH_OMAP2 | 4 | config ARCH_OMAP2 |
| 8 | bool "TI OMAP2" | 5 | bool "TI OMAP2" |
| 9 | depends on ARCH_MULTI_V6 | 6 | depends on ARCH_MULTI_V6 |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 8fd87a3055bf..9e91a4e7519a 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
| @@ -2065,7 +2065,7 @@ static void _reconfigure_io_chain(void) | |||
| 2065 | 2065 | ||
| 2066 | spin_lock_irqsave(&io_chain_lock, flags); | 2066 | spin_lock_irqsave(&io_chain_lock, flags); |
| 2067 | 2067 | ||
| 2068 | if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl()) | 2068 | if (cpu_is_omap34xx()) |
| 2069 | omap3xxx_prm_reconfigure_io_chain(); | 2069 | omap3xxx_prm_reconfigure_io_chain(); |
| 2070 | else if (cpu_is_omap44xx()) | 2070 | else if (cpu_is_omap44xx()) |
| 2071 | omap44xx_prm_reconfigure_io_chain(); | 2071 | omap44xx_prm_reconfigure_io_chain(); |
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 2458be6fc67b..372de3edf4a5 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c | |||
| @@ -45,7 +45,7 @@ static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { | |||
| 45 | .ocp_barrier = &omap3xxx_prm_ocp_barrier, | 45 | .ocp_barrier = &omap3xxx_prm_ocp_barrier, |
| 46 | .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, | 46 | .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, |
| 47 | .restore_irqen = &omap3xxx_prm_restore_irqen, | 47 | .restore_irqen = &omap3xxx_prm_restore_irqen, |
| 48 | .reconfigure_io_chain = &omap3xxx_prm_reconfigure_io_chain, | 48 | .reconfigure_io_chain = NULL, |
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | /* | 51 | /* |
| @@ -369,15 +369,30 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) | |||
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | /** | 371 | /** |
| 372 | * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain | 372 | * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain |
| 373 | * | ||
| 374 | * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only | ||
| 375 | * thing we can do is toggle EN_IO bit for earlier omaps. | ||
| 376 | */ | ||
| 377 | void omap3430_pre_es3_1_reconfigure_io_chain(void) | ||
| 378 | { | ||
| 379 | omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, | ||
| 380 | PM_WKEN); | ||
| 381 | omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, | ||
| 382 | PM_WKEN); | ||
| 383 | omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); | ||
| 384 | } | ||
| 385 | |||
| 386 | /** | ||
| 387 | * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain | ||
| 373 | * | 388 | * |
| 374 | * Clear any previously-latched I/O wakeup events and ensure that the | 389 | * Clear any previously-latched I/O wakeup events and ensure that the |
| 375 | * I/O wakeup gates are aligned with the current mux settings. Works | 390 | * I/O wakeup gates are aligned with the current mux settings. Works |
| 376 | * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then | 391 | * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then |
| 377 | * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No | 392 | * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No |
| 378 | * return value. | 393 | * return value. These registers are only available in 3430 es3.1 and later. |
| 379 | */ | 394 | */ |
| 380 | void omap3xxx_prm_reconfigure_io_chain(void) | 395 | void omap3_prm_reconfigure_io_chain(void) |
| 381 | { | 396 | { |
| 382 | int i = 0; | 397 | int i = 0; |
| 383 | 398 | ||
| @@ -400,6 +415,15 @@ void omap3xxx_prm_reconfigure_io_chain(void) | |||
| 400 | } | 415 | } |
| 401 | 416 | ||
| 402 | /** | 417 | /** |
| 418 | * omap3xxx_prm_reconfigure_io_chain - reconfigure I/O chain | ||
| 419 | */ | ||
| 420 | void omap3xxx_prm_reconfigure_io_chain(void) | ||
| 421 | { | ||
| 422 | if (omap3_prcm_irq_setup.reconfigure_io_chain) | ||
| 423 | omap3_prcm_irq_setup.reconfigure_io_chain(); | ||
| 424 | } | ||
| 425 | |||
| 426 | /** | ||
| 403 | * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches | 427 | * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches |
| 404 | * | 428 | * |
| 405 | * Activates the I/O wakeup event latches and allows events logged by | 429 | * Activates the I/O wakeup event latches and allows events logged by |
| @@ -656,6 +680,13 @@ static int omap3xxx_prm_late_init(void) | |||
| 656 | if (!(prm_features & PRM_HAS_IO_WAKEUP)) | 680 | if (!(prm_features & PRM_HAS_IO_WAKEUP)) |
| 657 | return 0; | 681 | return 0; |
| 658 | 682 | ||
| 683 | if (omap3_has_io_chain_ctrl()) | ||
| 684 | omap3_prcm_irq_setup.reconfigure_io_chain = | ||
| 685 | omap3_prm_reconfigure_io_chain; | ||
| 686 | else | ||
| 687 | omap3_prcm_irq_setup.reconfigure_io_chain = | ||
| 688 | omap3430_pre_es3_1_reconfigure_io_chain; | ||
| 689 | |||
| 659 | omap3xxx_prm_enable_io_wakeup(); | 690 | omap3xxx_prm_enable_io_wakeup(); |
| 660 | ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); | 691 | ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); |
| 661 | if (!ret) | 692 | if (!ret) |
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 630fa916bbc6..04b013fbc98f 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c | |||
| @@ -61,7 +61,7 @@ EXPORT_SYMBOL(get_clock_tick_rate); | |||
| 61 | /* | 61 | /* |
| 62 | * For non device-tree builds, keep legacy timer init | 62 | * For non device-tree builds, keep legacy timer init |
| 63 | */ | 63 | */ |
| 64 | void pxa_timer_init(void) | 64 | void __init pxa_timer_init(void) |
| 65 | { | 65 | { |
| 66 | pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), | 66 | pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), |
| 67 | get_clock_tick_rate()); | 67 | get_clock_tick_rate()); |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 02fc10d2d63b..d055db32ffcb 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | config ARCH_OMAP | ||
| 2 | bool | ||
| 3 | |||
| 1 | if ARCH_OMAP | 4 | if ARCH_OMAP |
| 2 | 5 | ||
| 3 | menu "TI OMAP Common Features" | 6 | menu "TI OMAP Common Features" |
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index 4c4ac163c600..b6bda1838629 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig | |||
| @@ -1,4 +1,3 @@ | |||
| 1 | CONFIG_EXPERIMENTAL=y | ||
| 2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 4 | CONFIG_LOG_BUF_SHIFT=16 | 3 | CONFIG_LOG_BUF_SHIFT=16 |
| @@ -6,6 +5,8 @@ CONFIG_PROFILING=y | |||
| 6 | CONFIG_OPROFILE=y | 5 | CONFIG_OPROFILE=y |
| 7 | CONFIG_MODULES=y | 6 | CONFIG_MODULES=y |
| 8 | CONFIG_MODULE_UNLOAD=y | 7 | CONFIG_MODULE_UNLOAD=y |
| 8 | CONFIG_PARTITION_ADVANCED=y | ||
| 9 | CONFIG_SGI_PARTITION=y | ||
| 9 | CONFIG_IA64_DIG=y | 10 | CONFIG_IA64_DIG=y |
| 10 | CONFIG_SMP=y | 11 | CONFIG_SMP=y |
| 11 | CONFIG_NR_CPUS=2 | 12 | CONFIG_NR_CPUS=2 |
| @@ -51,9 +52,6 @@ CONFIG_DM_MIRROR=m | |||
| 51 | CONFIG_DM_ZERO=m | 52 | CONFIG_DM_ZERO=m |
| 52 | CONFIG_NETDEVICES=y | 53 | CONFIG_NETDEVICES=y |
| 53 | CONFIG_DUMMY=y | 54 | CONFIG_DUMMY=y |
| 54 | CONFIG_NET_ETHERNET=y | ||
| 55 | CONFIG_MII=y | ||
| 56 | CONFIG_NET_PCI=y | ||
| 57 | CONFIG_INPUT_EVDEV=y | 55 | CONFIG_INPUT_EVDEV=y |
| 58 | CONFIG_SERIAL_8250=y | 56 | CONFIG_SERIAL_8250=y |
| 59 | CONFIG_SERIAL_8250_CONSOLE=y | 57 | CONFIG_SERIAL_8250_CONSOLE=y |
| @@ -85,7 +83,6 @@ CONFIG_EXT3_FS=y | |||
| 85 | CONFIG_XFS_FS=y | 83 | CONFIG_XFS_FS=y |
| 86 | CONFIG_XFS_QUOTA=y | 84 | CONFIG_XFS_QUOTA=y |
| 87 | CONFIG_XFS_POSIX_ACL=y | 85 | CONFIG_XFS_POSIX_ACL=y |
| 88 | CONFIG_AUTOFS_FS=m | ||
| 89 | CONFIG_AUTOFS4_FS=m | 86 | CONFIG_AUTOFS4_FS=m |
| 90 | CONFIG_ISO9660_FS=m | 87 | CONFIG_ISO9660_FS=m |
| 91 | CONFIG_JOLIET=y | 88 | CONFIG_JOLIET=y |
| @@ -95,17 +92,13 @@ CONFIG_PROC_KCORE=y | |||
| 95 | CONFIG_TMPFS=y | 92 | CONFIG_TMPFS=y |
| 96 | CONFIG_HUGETLBFS=y | 93 | CONFIG_HUGETLBFS=y |
| 97 | CONFIG_NFS_FS=m | 94 | CONFIG_NFS_FS=m |
| 98 | CONFIG_NFS_V3=y | 95 | CONFIG_NFS_V4=m |
| 99 | CONFIG_NFS_V4=y | ||
| 100 | CONFIG_NFSD=m | 96 | CONFIG_NFSD=m |
| 101 | CONFIG_NFSD_V4=y | 97 | CONFIG_NFSD_V4=y |
| 102 | CONFIG_CIFS=m | 98 | CONFIG_CIFS=m |
| 103 | CONFIG_CIFS_STATS=y | 99 | CONFIG_CIFS_STATS=y |
| 104 | CONFIG_CIFS_XATTR=y | 100 | CONFIG_CIFS_XATTR=y |
| 105 | CONFIG_CIFS_POSIX=y | 101 | CONFIG_CIFS_POSIX=y |
| 106 | CONFIG_PARTITION_ADVANCED=y | ||
| 107 | CONFIG_SGI_PARTITION=y | ||
| 108 | CONFIG_EFI_PARTITION=y | ||
| 109 | CONFIG_NLS_CODEPAGE_437=y | 102 | CONFIG_NLS_CODEPAGE_437=y |
| 110 | CONFIG_NLS_ISO8859_1=y | 103 | CONFIG_NLS_ISO8859_1=y |
| 111 | CONFIG_NLS_UTF8=m | 104 | CONFIG_NLS_UTF8=m |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index e8ed3ae70aae..81f686dee53c 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
| @@ -1,4 +1,3 @@ | |||
| 1 | CONFIG_EXPERIMENTAL=y | ||
| 2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 4 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
| @@ -6,13 +5,13 @@ CONFIG_IKCONFIG_PROC=y | |||
| 6 | CONFIG_LOG_BUF_SHIFT=20 | 5 | CONFIG_LOG_BUF_SHIFT=20 |
| 7 | CONFIG_CGROUPS=y | 6 | CONFIG_CGROUPS=y |
| 8 | CONFIG_CPUSETS=y | 7 | CONFIG_CPUSETS=y |
| 9 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
| 10 | CONFIG_BLK_DEV_INITRD=y | 8 | CONFIG_BLK_DEV_INITRD=y |
| 11 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
| 12 | CONFIG_MODULES=y | 10 | CONFIG_MODULES=y |
| 13 | CONFIG_MODULE_UNLOAD=y | 11 | CONFIG_MODULE_UNLOAD=y |
| 14 | CONFIG_MODVERSIONS=y | 12 | CONFIG_MODVERSIONS=y |
| 15 | # CONFIG_BLK_DEV_BSG is not set | 13 | CONFIG_PARTITION_ADVANCED=y |
| 14 | CONFIG_SGI_PARTITION=y | ||
| 16 | CONFIG_MCKINLEY=y | 15 | CONFIG_MCKINLEY=y |
| 17 | CONFIG_IA64_PAGE_SIZE_64KB=y | 16 | CONFIG_IA64_PAGE_SIZE_64KB=y |
| 18 | CONFIG_IA64_CYCLONE=y | 17 | CONFIG_IA64_CYCLONE=y |
| @@ -29,14 +28,13 @@ CONFIG_ACPI_BUTTON=m | |||
| 29 | CONFIG_ACPI_FAN=m | 28 | CONFIG_ACPI_FAN=m |
| 30 | CONFIG_ACPI_DOCK=y | 29 | CONFIG_ACPI_DOCK=y |
| 31 | CONFIG_ACPI_PROCESSOR=m | 30 | CONFIG_ACPI_PROCESSOR=m |
| 32 | CONFIG_ACPI_CONTAINER=y | ||
| 33 | CONFIG_HOTPLUG_PCI=y | 31 | CONFIG_HOTPLUG_PCI=y |
| 34 | CONFIG_HOTPLUG_PCI_ACPI=y | 32 | CONFIG_HOTPLUG_PCI_ACPI=y |
| 33 | CONFIG_NET=y | ||
| 35 | CONFIG_PACKET=y | 34 | CONFIG_PACKET=y |
| 36 | CONFIG_UNIX=y | 35 | CONFIG_UNIX=y |
| 37 | CONFIG_INET=y | 36 | CONFIG_INET=y |
| 38 | CONFIG_IP_MULTICAST=y | 37 | CONFIG_IP_MULTICAST=y |
| 39 | CONFIG_ARPD=y | ||
| 40 | CONFIG_SYN_COOKIES=y | 38 | CONFIG_SYN_COOKIES=y |
| 41 | # CONFIG_IPV6 is not set | 39 | # CONFIG_IPV6 is not set |
| 42 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 40 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
| @@ -82,16 +80,13 @@ CONFIG_FUSION_FC=m | |||
| 82 | CONFIG_FUSION_SAS=y | 80 | CONFIG_FUSION_SAS=y |
| 83 | CONFIG_NETDEVICES=y | 81 | CONFIG_NETDEVICES=y |
| 84 | CONFIG_DUMMY=m | 82 | CONFIG_DUMMY=m |
| 85 | CONFIG_NET_ETHERNET=y | 83 | CONFIG_NETCONSOLE=y |
| 84 | CONFIG_TIGON3=y | ||
| 86 | CONFIG_NET_TULIP=y | 85 | CONFIG_NET_TULIP=y |
| 87 | CONFIG_TULIP=m | 86 | CONFIG_TULIP=m |
| 88 | CONFIG_NET_PCI=y | ||
| 89 | CONFIG_NET_VENDOR_INTEL=y | ||
| 90 | CONFIG_E100=m | 87 | CONFIG_E100=m |
| 91 | CONFIG_E1000=y | 88 | CONFIG_E1000=y |
| 92 | CONFIG_IGB=y | 89 | CONFIG_IGB=y |
| 93 | CONFIG_TIGON3=y | ||
| 94 | CONFIG_NETCONSOLE=y | ||
| 95 | # CONFIG_SERIO_SERPORT is not set | 90 | # CONFIG_SERIO_SERPORT is not set |
| 96 | CONFIG_GAMEPORT=m | 91 | CONFIG_GAMEPORT=m |
| 97 | CONFIG_SERIAL_NONSTANDARD=y | 92 | CONFIG_SERIAL_NONSTANDARD=y |
| @@ -151,6 +146,7 @@ CONFIG_USB_STORAGE=m | |||
| 151 | CONFIG_INFINIBAND=m | 146 | CONFIG_INFINIBAND=m |
| 152 | CONFIG_INFINIBAND_MTHCA=m | 147 | CONFIG_INFINIBAND_MTHCA=m |
| 153 | CONFIG_INFINIBAND_IPOIB=m | 148 | CONFIG_INFINIBAND_IPOIB=m |
| 149 | CONFIG_INTEL_IOMMU=y | ||
| 154 | CONFIG_MSPEC=m | 150 | CONFIG_MSPEC=m |
| 155 | CONFIG_EXT2_FS=y | 151 | CONFIG_EXT2_FS=y |
| 156 | CONFIG_EXT2_FS_XATTR=y | 152 | CONFIG_EXT2_FS_XATTR=y |
| @@ -164,7 +160,6 @@ CONFIG_REISERFS_FS_XATTR=y | |||
| 164 | CONFIG_REISERFS_FS_POSIX_ACL=y | 160 | CONFIG_REISERFS_FS_POSIX_ACL=y |
| 165 | CONFIG_REISERFS_FS_SECURITY=y | 161 | CONFIG_REISERFS_FS_SECURITY=y |
| 166 | CONFIG_XFS_FS=y | 162 | CONFIG_XFS_FS=y |
| 167 | CONFIG_AUTOFS_FS=m | ||
| 168 | CONFIG_AUTOFS4_FS=m | 163 | CONFIG_AUTOFS4_FS=m |
| 169 | CONFIG_ISO9660_FS=m | 164 | CONFIG_ISO9660_FS=m |
| 170 | CONFIG_JOLIET=y | 165 | CONFIG_JOLIET=y |
| @@ -175,16 +170,10 @@ CONFIG_PROC_KCORE=y | |||
| 175 | CONFIG_TMPFS=y | 170 | CONFIG_TMPFS=y |
| 176 | CONFIG_HUGETLBFS=y | 171 | CONFIG_HUGETLBFS=y |
| 177 | CONFIG_NFS_FS=m | 172 | CONFIG_NFS_FS=m |
| 178 | CONFIG_NFS_V3=y | 173 | CONFIG_NFS_V4=m |
| 179 | CONFIG_NFS_V4=y | ||
| 180 | CONFIG_NFSD=m | 174 | CONFIG_NFSD=m |
| 181 | CONFIG_NFSD_V4=y | 175 | CONFIG_NFSD_V4=y |
| 182 | CONFIG_SMB_FS=m | ||
| 183 | CONFIG_SMB_NLS_DEFAULT=y | ||
| 184 | CONFIG_CIFS=m | 176 | CONFIG_CIFS=m |
| 185 | CONFIG_PARTITION_ADVANCED=y | ||
| 186 | CONFIG_SGI_PARTITION=y | ||
| 187 | CONFIG_EFI_PARTITION=y | ||
| 188 | CONFIG_NLS_CODEPAGE_437=y | 177 | CONFIG_NLS_CODEPAGE_437=y |
| 189 | CONFIG_NLS_CODEPAGE_737=m | 178 | CONFIG_NLS_CODEPAGE_737=m |
| 190 | CONFIG_NLS_CODEPAGE_775=m | 179 | CONFIG_NLS_CODEPAGE_775=m |
| @@ -225,11 +214,7 @@ CONFIG_NLS_UTF8=m | |||
| 225 | CONFIG_MAGIC_SYSRQ=y | 214 | CONFIG_MAGIC_SYSRQ=y |
| 226 | CONFIG_DEBUG_KERNEL=y | 215 | CONFIG_DEBUG_KERNEL=y |
| 227 | CONFIG_DEBUG_MUTEXES=y | 216 | CONFIG_DEBUG_MUTEXES=y |
| 228 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
| 229 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
| 230 | CONFIG_CRYPTO_ECB=m | ||
| 231 | CONFIG_CRYPTO_PCBC=m | 217 | CONFIG_CRYPTO_PCBC=m |
| 232 | CONFIG_CRYPTO_MD5=y | 218 | CONFIG_CRYPTO_MD5=y |
| 233 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 219 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
| 234 | CONFIG_CRC_T10DIF=y | 220 | CONFIG_CRC_T10DIF=y |
| 235 | CONFIG_INTEL_IOMMU=y | ||
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index d663efd1e4db..5b4fcdd51457 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig | |||
| @@ -1,4 +1,3 @@ | |||
| 1 | CONFIG_EXPERIMENTAL=y | ||
| 2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 4 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
| @@ -9,6 +8,8 @@ CONFIG_KALLSYMS_ALL=y | |||
| 9 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
| 10 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
| 11 | CONFIG_MODVERSIONS=y | 10 | CONFIG_MODVERSIONS=y |
| 11 | CONFIG_PARTITION_ADVANCED=y | ||
| 12 | CONFIG_SGI_PARTITION=y | ||
| 12 | CONFIG_MCKINLEY=y | 13 | CONFIG_MCKINLEY=y |
| 13 | CONFIG_IA64_CYCLONE=y | 14 | CONFIG_IA64_CYCLONE=y |
| 14 | CONFIG_SMP=y | 15 | CONFIG_SMP=y |
| @@ -24,14 +25,12 @@ CONFIG_BINFMT_MISC=m | |||
| 24 | CONFIG_ACPI_BUTTON=m | 25 | CONFIG_ACPI_BUTTON=m |
| 25 | CONFIG_ACPI_FAN=m | 26 | CONFIG_ACPI_FAN=m |
| 26 | CONFIG_ACPI_PROCESSOR=m | 27 | CONFIG_ACPI_PROCESSOR=m |
| 27 | CONFIG_ACPI_CONTAINER=m | ||
| 28 | CONFIG_HOTPLUG_PCI=y | 28 | CONFIG_HOTPLUG_PCI=y |
| 29 | CONFIG_HOTPLUG_PCI_ACPI=m | 29 | CONFIG_NET=y |
| 30 | CONFIG_PACKET=y | 30 | CONFIG_PACKET=y |
| 31 | CONFIG_UNIX=y | 31 | CONFIG_UNIX=y |
| 32 | CONFIG_INET=y | 32 | CONFIG_INET=y |
| 33 | CONFIG_IP_MULTICAST=y | 33 | CONFIG_IP_MULTICAST=y |
| 34 | CONFIG_ARPD=y | ||
| 35 | CONFIG_SYN_COOKIES=y | 34 | CONFIG_SYN_COOKIES=y |
| 36 | # CONFIG_IPV6 is not set | 35 | # CONFIG_IPV6 is not set |
| 37 | CONFIG_BLK_DEV_LOOP=m | 36 | CONFIG_BLK_DEV_LOOP=m |
| @@ -71,15 +70,12 @@ CONFIG_FUSION_SPI=y | |||
| 71 | CONFIG_FUSION_FC=m | 70 | CONFIG_FUSION_FC=m |
| 72 | CONFIG_NETDEVICES=y | 71 | CONFIG_NETDEVICES=y |
| 73 | CONFIG_DUMMY=m | 72 | CONFIG_DUMMY=m |
| 74 | CONFIG_NET_ETHERNET=y | 73 | CONFIG_NETCONSOLE=y |
| 74 | CONFIG_TIGON3=y | ||
| 75 | CONFIG_NET_TULIP=y | 75 | CONFIG_NET_TULIP=y |
| 76 | CONFIG_TULIP=m | 76 | CONFIG_TULIP=m |
| 77 | CONFIG_NET_PCI=y | ||
| 78 | CONFIG_NET_VENDOR_INTEL=y | ||
| 79 | CONFIG_E100=m | 77 | CONFIG_E100=m |
| 80 | CONFIG_E1000=y | 78 | CONFIG_E1000=y |
| 81 | CONFIG_TIGON3=y | ||
| 82 | CONFIG_NETCONSOLE=y | ||
| 83 | # CONFIG_SERIO_SERPORT is not set | 79 | # CONFIG_SERIO_SERPORT is not set |
| 84 | CONFIG_GAMEPORT=m | 80 | CONFIG_GAMEPORT=m |
| 85 | CONFIG_SERIAL_NONSTANDARD=y | 81 | CONFIG_SERIAL_NONSTANDARD=y |
| @@ -146,7 +142,6 @@ CONFIG_REISERFS_FS_XATTR=y | |||
| 146 | CONFIG_REISERFS_FS_POSIX_ACL=y | 142 | CONFIG_REISERFS_FS_POSIX_ACL=y |
| 147 | CONFIG_REISERFS_FS_SECURITY=y | 143 | CONFIG_REISERFS_FS_SECURITY=y |
| 148 | CONFIG_XFS_FS=y | 144 | CONFIG_XFS_FS=y |
| 149 | CONFIG_AUTOFS_FS=y | ||
| 150 | CONFIG_AUTOFS4_FS=y | 145 | CONFIG_AUTOFS4_FS=y |
| 151 | CONFIG_ISO9660_FS=m | 146 | CONFIG_ISO9660_FS=m |
| 152 | CONFIG_JOLIET=y | 147 | CONFIG_JOLIET=y |
| @@ -157,16 +152,10 @@ CONFIG_PROC_KCORE=y | |||
| 157 | CONFIG_TMPFS=y | 152 | CONFIG_TMPFS=y |
| 158 | CONFIG_HUGETLBFS=y | 153 | CONFIG_HUGETLBFS=y |
| 159 | CONFIG_NFS_FS=m | 154 | CONFIG_NFS_FS=m |
| 160 | CONFIG_NFS_V3=y | 155 | CONFIG_NFS_V4=m |
| 161 | CONFIG_NFS_V4=y | ||
| 162 | CONFIG_NFSD=m | 156 | CONFIG_NFSD=m |
| 163 | CONFIG_NFSD_V4=y | 157 | CONFIG_NFSD_V4=y |
| 164 | CONFIG_SMB_FS=m | ||
| 165 | CONFIG_SMB_NLS_DEFAULT=y | ||
| 166 | CONFIG_CIFS=m | 158 | CONFIG_CIFS=m |
| 167 | CONFIG_PARTITION_ADVANCED=y | ||
| 168 | CONFIG_SGI_PARTITION=y | ||
| 169 | CONFIG_EFI_PARTITION=y | ||
| 170 | CONFIG_NLS_CODEPAGE_437=y | 159 | CONFIG_NLS_CODEPAGE_437=y |
| 171 | CONFIG_NLS_CODEPAGE_737=m | 160 | CONFIG_NLS_CODEPAGE_737=m |
| 172 | CONFIG_NLS_CODEPAGE_775=m | 161 | CONFIG_NLS_CODEPAGE_775=m |
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig index b4548a3e82d5..f0f69fdbddae 100644 --- a/arch/ia64/configs/sim_defconfig +++ b/arch/ia64/configs/sim_defconfig | |||
| @@ -1,13 +1,12 @@ | |||
| 1 | CONFIG_EXPERIMENTAL=y | ||
| 2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 3 | CONFIG_IKCONFIG=y | 2 | CONFIG_IKCONFIG=y |
| 4 | CONFIG_IKCONFIG_PROC=y | 3 | CONFIG_IKCONFIG_PROC=y |
| 5 | CONFIG_LOG_BUF_SHIFT=16 | 4 | CONFIG_LOG_BUF_SHIFT=16 |
| 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
| 7 | CONFIG_MODULES=y | 5 | CONFIG_MODULES=y |
| 8 | CONFIG_MODULE_UNLOAD=y | 6 | CONFIG_MODULE_UNLOAD=y |
| 9 | CONFIG_MODULE_FORCE_UNLOAD=y | 7 | CONFIG_MODULE_FORCE_UNLOAD=y |
| 10 | CONFIG_MODVERSIONS=y | 8 | CONFIG_MODVERSIONS=y |
| 9 | CONFIG_PARTITION_ADVANCED=y | ||
| 11 | CONFIG_IA64_HP_SIM=y | 10 | CONFIG_IA64_HP_SIM=y |
| 12 | CONFIG_MCKINLEY=y | 11 | CONFIG_MCKINLEY=y |
| 13 | CONFIG_IA64_PAGE_SIZE_64KB=y | 12 | CONFIG_IA64_PAGE_SIZE_64KB=y |
| @@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y | |||
| 27 | CONFIG_BLK_DEV_RAM=y | 26 | CONFIG_BLK_DEV_RAM=y |
| 28 | CONFIG_SCSI=y | 27 | CONFIG_SCSI=y |
| 29 | CONFIG_BLK_DEV_SD=y | 28 | CONFIG_BLK_DEV_SD=y |
| 30 | CONFIG_SCSI_MULTI_LUN=y | ||
| 31 | CONFIG_SCSI_CONSTANTS=y | 29 | CONFIG_SCSI_CONSTANTS=y |
| 32 | CONFIG_SCSI_LOGGING=y | 30 | CONFIG_SCSI_LOGGING=y |
| 33 | CONFIG_SCSI_SPI_ATTRS=y | 31 | CONFIG_SCSI_SPI_ATTRS=y |
| @@ -49,8 +47,6 @@ CONFIG_HUGETLBFS=y | |||
| 49 | CONFIG_NFS_FS=y | 47 | CONFIG_NFS_FS=y |
| 50 | CONFIG_NFSD=y | 48 | CONFIG_NFSD=y |
| 51 | CONFIG_NFSD_V3=y | 49 | CONFIG_NFSD_V3=y |
| 52 | CONFIG_PARTITION_ADVANCED=y | 50 | CONFIG_DEBUG_INFO=y |
| 53 | CONFIG_EFI_PARTITION=y | ||
| 54 | CONFIG_DEBUG_KERNEL=y | 51 | CONFIG_DEBUG_KERNEL=y |
| 55 | CONFIG_DEBUG_MUTEXES=y | 52 | CONFIG_DEBUG_MUTEXES=y |
| 56 | CONFIG_DEBUG_INFO=y | ||
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index c8a3f40e77f6..192ed157c9ce 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
| @@ -1,4 +1,3 @@ | |||
| 1 | CONFIG_EXPERIMENTAL=y | ||
| 2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 4 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
| @@ -11,6 +10,8 @@ CONFIG_MODULE_UNLOAD=y | |||
| 11 | CONFIG_MODVERSIONS=y | 10 | CONFIG_MODVERSIONS=y |
| 12 | CONFIG_MODULE_SRCVERSION_ALL=y | 11 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 13 | # CONFIG_BLK_DEV_BSG is not set | 12 | # CONFIG_BLK_DEV_BSG is not set |
| 13 | CONFIG_PARTITION_ADVANCED=y | ||
| 14 | CONFIG_SGI_PARTITION=y | ||
| 14 | CONFIG_IA64_DIG=y | 15 | CONFIG_IA64_DIG=y |
| 15 | CONFIG_MCKINLEY=y | 16 | CONFIG_MCKINLEY=y |
| 16 | CONFIG_IA64_PAGE_SIZE_64KB=y | 17 | CONFIG_IA64_PAGE_SIZE_64KB=y |
| @@ -29,14 +30,12 @@ CONFIG_BINFMT_MISC=m | |||
| 29 | CONFIG_ACPI_BUTTON=m | 30 | CONFIG_ACPI_BUTTON=m |
| 30 | CONFIG_ACPI_FAN=m | 31 | CONFIG_ACPI_FAN=m |
| 31 | CONFIG_ACPI_PROCESSOR=m | 32 | CONFIG_ACPI_PROCESSOR=m |
| 32 | CONFIG_ACPI_CONTAINER=m | ||
| 33 | CONFIG_HOTPLUG_PCI=y | 33 | CONFIG_HOTPLUG_PCI=y |
| 34 | CONFIG_HOTPLUG_PCI_ACPI=m | 34 | CONFIG_NET=y |
| 35 | CONFIG_PACKET=y | 35 | CONFIG_PACKET=y |
| 36 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
| 37 | CONFIG_INET=y | 37 | CONFIG_INET=y |
| 38 | CONFIG_IP_MULTICAST=y | 38 | CONFIG_IP_MULTICAST=y |
| 39 | CONFIG_ARPD=y | ||
| 40 | CONFIG_SYN_COOKIES=y | 39 | CONFIG_SYN_COOKIES=y |
| 41 | # CONFIG_IPV6 is not set | 40 | # CONFIG_IPV6 is not set |
| 42 | CONFIG_BLK_DEV_LOOP=m | 41 | CONFIG_BLK_DEV_LOOP=m |
| @@ -53,6 +52,7 @@ CONFIG_BLK_DEV_SD=y | |||
| 53 | CONFIG_CHR_DEV_ST=m | 52 | CONFIG_CHR_DEV_ST=m |
| 54 | CONFIG_BLK_DEV_SR=m | 53 | CONFIG_BLK_DEV_SR=m |
| 55 | CONFIG_CHR_DEV_SG=m | 54 | CONFIG_CHR_DEV_SG=m |
| 55 | CONFIG_SCSI_FC_ATTRS=y | ||
| 56 | CONFIG_SCSI_SYM53C8XX_2=y | 56 | CONFIG_SCSI_SYM53C8XX_2=y |
| 57 | CONFIG_SCSI_QLOGIC_1280=y | 57 | CONFIG_SCSI_QLOGIC_1280=y |
| 58 | CONFIG_MD=y | 58 | CONFIG_MD=y |
| @@ -72,15 +72,12 @@ CONFIG_FUSION_FC=y | |||
| 72 | CONFIG_FUSION_CTL=y | 72 | CONFIG_FUSION_CTL=y |
| 73 | CONFIG_NETDEVICES=y | 73 | CONFIG_NETDEVICES=y |
| 74 | CONFIG_DUMMY=m | 74 | CONFIG_DUMMY=m |
| 75 | CONFIG_NET_ETHERNET=y | 75 | CONFIG_NETCONSOLE=y |
| 76 | CONFIG_TIGON3=y | ||
| 76 | CONFIG_NET_TULIP=y | 77 | CONFIG_NET_TULIP=y |
| 77 | CONFIG_TULIP=m | 78 | CONFIG_TULIP=m |
| 78 | CONFIG_NET_PCI=y | ||
| 79 | CONFIG_NET_VENDOR_INTEL=y | ||
| 80 | CONFIG_E100=m | 79 | CONFIG_E100=m |
| 81 | CONFIG_E1000=y | 80 | CONFIG_E1000=y |
| 82 | CONFIG_TIGON3=y | ||
| 83 | CONFIG_NETCONSOLE=y | ||
| 84 | # CONFIG_SERIO_SERPORT is not set | 81 | # CONFIG_SERIO_SERPORT is not set |
| 85 | CONFIG_GAMEPORT=m | 82 | CONFIG_GAMEPORT=m |
| 86 | CONFIG_SERIAL_NONSTANDARD=y | 83 | CONFIG_SERIAL_NONSTANDARD=y |
| @@ -118,7 +115,6 @@ CONFIG_REISERFS_FS_XATTR=y | |||
| 118 | CONFIG_REISERFS_FS_POSIX_ACL=y | 115 | CONFIG_REISERFS_FS_POSIX_ACL=y |
| 119 | CONFIG_REISERFS_FS_SECURITY=y | 116 | CONFIG_REISERFS_FS_SECURITY=y |
| 120 | CONFIG_XFS_FS=y | 117 | CONFIG_XFS_FS=y |
| 121 | CONFIG_AUTOFS_FS=y | ||
| 122 | CONFIG_AUTOFS4_FS=y | 118 | CONFIG_AUTOFS4_FS=y |
| 123 | CONFIG_ISO9660_FS=m | 119 | CONFIG_ISO9660_FS=m |
| 124 | CONFIG_JOLIET=y | 120 | CONFIG_JOLIET=y |
| @@ -129,16 +125,10 @@ CONFIG_PROC_KCORE=y | |||
| 129 | CONFIG_TMPFS=y | 125 | CONFIG_TMPFS=y |
| 130 | CONFIG_HUGETLBFS=y | 126 | CONFIG_HUGETLBFS=y |
| 131 | CONFIG_NFS_FS=m | 127 | CONFIG_NFS_FS=m |
| 132 | CONFIG_NFS_V3=y | 128 | CONFIG_NFS_V4=m |
| 133 | CONFIG_NFS_V4=y | ||
| 134 | CONFIG_NFSD=m | 129 | CONFIG_NFSD=m |
| 135 | CONFIG_NFSD_V4=y | 130 | CONFIG_NFSD_V4=y |
| 136 | CONFIG_SMB_FS=m | ||
| 137 | CONFIG_SMB_NLS_DEFAULT=y | ||
| 138 | CONFIG_CIFS=m | 131 | CONFIG_CIFS=m |
| 139 | CONFIG_PARTITION_ADVANCED=y | ||
| 140 | CONFIG_SGI_PARTITION=y | ||
| 141 | CONFIG_EFI_PARTITION=y | ||
| 142 | CONFIG_NLS_CODEPAGE_437=y | 132 | CONFIG_NLS_CODEPAGE_437=y |
| 143 | CONFIG_NLS_CODEPAGE_737=m | 133 | CONFIG_NLS_CODEPAGE_737=m |
| 144 | CONFIG_NLS_CODEPAGE_775=m | 134 | CONFIG_NLS_CODEPAGE_775=m |
| @@ -180,6 +170,5 @@ CONFIG_MAGIC_SYSRQ=y | |||
| 180 | CONFIG_DEBUG_KERNEL=y | 170 | CONFIG_DEBUG_KERNEL=y |
| 181 | CONFIG_DEBUG_MUTEXES=y | 171 | CONFIG_DEBUG_MUTEXES=y |
| 182 | CONFIG_IA64_GRANULE_16MB=y | 172 | CONFIG_IA64_GRANULE_16MB=y |
| 183 | CONFIG_CRYPTO_ECB=m | ||
| 184 | CONFIG_CRYPTO_PCBC=m | 173 | CONFIG_CRYPTO_PCBC=m |
| 185 | CONFIG_CRYPTO_MD5=y | 174 | CONFIG_CRYPTO_MD5=y |
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 54bc72eda30d..b504c8e2fd52 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | CONFIG_EXPERIMENTAL=y | ||
| 2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 3 | CONFIG_BSD_PROCESS_ACCT=y | 2 | CONFIG_BSD_PROCESS_ACCT=y |
| 4 | CONFIG_BLK_DEV_INITRD=y | 3 | CONFIG_BLK_DEV_INITRD=y |
| 5 | CONFIG_KPROBES=y | 4 | CONFIG_KPROBES=y |
| 6 | CONFIG_MODULES=y | 5 | CONFIG_MODULES=y |
| 6 | CONFIG_PARTITION_ADVANCED=y | ||
| 7 | CONFIG_IA64_HP_ZX1=y | 7 | CONFIG_IA64_HP_ZX1=y |
| 8 | CONFIG_MCKINLEY=y | 8 | CONFIG_MCKINLEY=y |
| 9 | CONFIG_SMP=y | 9 | CONFIG_SMP=y |
| @@ -18,6 +18,7 @@ CONFIG_EFI_VARS=y | |||
| 18 | CONFIG_BINFMT_MISC=y | 18 | CONFIG_BINFMT_MISC=y |
| 19 | CONFIG_HOTPLUG_PCI=y | 19 | CONFIG_HOTPLUG_PCI=y |
| 20 | CONFIG_HOTPLUG_PCI_ACPI=y | 20 | CONFIG_HOTPLUG_PCI_ACPI=y |
| 21 | CONFIG_NET=y | ||
| 21 | CONFIG_PACKET=y | 22 | CONFIG_PACKET=y |
| 22 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
| 23 | CONFIG_INET=y | 24 | CONFIG_INET=y |
| @@ -37,9 +38,9 @@ CONFIG_CHR_DEV_OSST=y | |||
| 37 | CONFIG_BLK_DEV_SR=y | 38 | CONFIG_BLK_DEV_SR=y |
| 38 | CONFIG_BLK_DEV_SR_VENDOR=y | 39 | CONFIG_BLK_DEV_SR_VENDOR=y |
| 39 | CONFIG_CHR_DEV_SG=y | 40 | CONFIG_CHR_DEV_SG=y |
| 40 | CONFIG_SCSI_MULTI_LUN=y | ||
| 41 | CONFIG_SCSI_CONSTANTS=y | 41 | CONFIG_SCSI_CONSTANTS=y |
| 42 | CONFIG_SCSI_LOGGING=y | 42 | CONFIG_SCSI_LOGGING=y |
| 43 | CONFIG_SCSI_FC_ATTRS=y | ||
| 43 | CONFIG_SCSI_SYM53C8XX_2=y | 44 | CONFIG_SCSI_SYM53C8XX_2=y |
| 44 | CONFIG_SCSI_QLOGIC_1280=y | 45 | CONFIG_SCSI_QLOGIC_1280=y |
| 45 | CONFIG_FUSION=y | 46 | CONFIG_FUSION=y |
| @@ -48,18 +49,15 @@ CONFIG_FUSION_FC=y | |||
| 48 | CONFIG_FUSION_CTL=m | 49 | CONFIG_FUSION_CTL=m |
| 49 | CONFIG_NETDEVICES=y | 50 | CONFIG_NETDEVICES=y |
| 50 | CONFIG_DUMMY=y | 51 | CONFIG_DUMMY=y |
| 51 | CONFIG_NET_ETHERNET=y | 52 | CONFIG_TIGON3=y |
| 52 | CONFIG_NET_TULIP=y | 53 | CONFIG_NET_TULIP=y |
| 53 | CONFIG_TULIP=y | 54 | CONFIG_TULIP=y |
| 54 | CONFIG_TULIP_MWI=y | 55 | CONFIG_TULIP_MWI=y |
| 55 | CONFIG_TULIP_MMIO=y | 56 | CONFIG_TULIP_MMIO=y |
| 56 | CONFIG_TULIP_NAPI=y | 57 | CONFIG_TULIP_NAPI=y |
| 57 | CONFIG_TULIP_NAPI_HW_MITIGATION=y | 58 | CONFIG_TULIP_NAPI_HW_MITIGATION=y |
| 58 | CONFIG_NET_PCI=y | ||
| 59 | CONFIG_NET_VENDOR_INTEL=y | ||
| 60 | CONFIG_E100=y | 59 | CONFIG_E100=y |
| 61 | CONFIG_E1000=y | 60 | CONFIG_E1000=y |
| 62 | CONFIG_TIGON3=y | ||
| 63 | CONFIG_INPUT_JOYDEV=y | 61 | CONFIG_INPUT_JOYDEV=y |
| 64 | CONFIG_INPUT_EVDEV=y | 62 | CONFIG_INPUT_EVDEV=y |
| 65 | # CONFIG_INPUT_KEYBOARD is not set | 63 | # CONFIG_INPUT_KEYBOARD is not set |
| @@ -100,7 +98,6 @@ CONFIG_USB_STORAGE=y | |||
| 100 | CONFIG_EXT2_FS=y | 98 | CONFIG_EXT2_FS=y |
| 101 | CONFIG_EXT2_FS_XATTR=y | 99 | CONFIG_EXT2_FS_XATTR=y |
| 102 | CONFIG_EXT3_FS=y | 100 | CONFIG_EXT3_FS=y |
| 103 | CONFIG_AUTOFS_FS=y | ||
| 104 | CONFIG_ISO9660_FS=y | 101 | CONFIG_ISO9660_FS=y |
| 105 | CONFIG_JOLIET=y | 102 | CONFIG_JOLIET=y |
| 106 | CONFIG_UDF_FS=y | 103 | CONFIG_UDF_FS=y |
| @@ -110,12 +107,9 @@ CONFIG_PROC_KCORE=y | |||
| 110 | CONFIG_TMPFS=y | 107 | CONFIG_TMPFS=y |
| 111 | CONFIG_HUGETLBFS=y | 108 | CONFIG_HUGETLBFS=y |
| 112 | CONFIG_NFS_FS=y | 109 | CONFIG_NFS_FS=y |
| 113 | CONFIG_NFS_V3=y | ||
| 114 | CONFIG_NFS_V4=y | 110 | CONFIG_NFS_V4=y |
| 115 | CONFIG_NFSD=y | 111 | CONFIG_NFSD=y |
| 116 | CONFIG_NFSD_V3=y | 112 | CONFIG_NFSD_V3=y |
| 117 | CONFIG_PARTITION_ADVANCED=y | ||
| 118 | CONFIG_EFI_PARTITION=y | ||
| 119 | CONFIG_NLS_CODEPAGE_437=y | 113 | CONFIG_NLS_CODEPAGE_437=y |
| 120 | CONFIG_NLS_CODEPAGE_737=y | 114 | CONFIG_NLS_CODEPAGE_737=y |
| 121 | CONFIG_NLS_CODEPAGE_775=y | 115 | CONFIG_NLS_CODEPAGE_775=y |
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 8f219dac9598..e24feb0633aa 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig | |||
| @@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y | |||
| 19 | # CONFIG_BLK_DEV_BSG is not set | 19 | # CONFIG_BLK_DEV_BSG is not set |
| 20 | CONFIG_PCI=y | 20 | CONFIG_PCI=y |
| 21 | CONFIG_BINFMT_MISC=m | 21 | CONFIG_BINFMT_MISC=m |
| 22 | CONFIG_NET=y | ||
| 22 | CONFIG_PACKET=y | 23 | CONFIG_PACKET=y |
| 23 | CONFIG_UNIX=y | 24 | CONFIG_UNIX=y |
| 24 | CONFIG_INET=y | 25 | CONFIG_INET=y |
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index cc0756021398..48e16d98b2cc 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig | |||
| @@ -28,6 +28,7 @@ CONFIG_MIPS32_COMPAT=y | |||
| 28 | CONFIG_MIPS32_O32=y | 28 | CONFIG_MIPS32_O32=y |
| 29 | CONFIG_MIPS32_N32=y | 29 | CONFIG_MIPS32_N32=y |
| 30 | CONFIG_PM=y | 30 | CONFIG_PM=y |
| 31 | CONFIG_NET=y | ||
| 31 | CONFIG_PACKET=y | 32 | CONFIG_PACKET=y |
| 32 | CONFIG_UNIX=y | 33 | CONFIG_UNIX=y |
| 33 | CONFIG_XFRM_USER=m | 34 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 2575302aa2be..4f37a5985459 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig | |||
| @@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y | |||
| 18 | CONFIG_MODVERSIONS=y | 18 | CONFIG_MODVERSIONS=y |
| 19 | CONFIG_BINFMT_MISC=m | 19 | CONFIG_BINFMT_MISC=m |
| 20 | CONFIG_PM=y | 20 | CONFIG_PM=y |
| 21 | CONFIG_NET=y | ||
| 21 | CONFIG_PACKET=m | 22 | CONFIG_PACKET=m |
| 22 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
| 23 | CONFIG_NET_KEY=m | 24 | CONFIG_NET_KEY=m |
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 4cb787ff273e..1c6191ebd583 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig | |||
| @@ -59,6 +59,7 @@ CONFIG_MIPS32_COMPAT=y | |||
| 59 | CONFIG_MIPS32_O32=y | 59 | CONFIG_MIPS32_O32=y |
| 60 | CONFIG_MIPS32_N32=y | 60 | CONFIG_MIPS32_N32=y |
| 61 | CONFIG_PM_RUNTIME=y | 61 | CONFIG_PM_RUNTIME=y |
| 62 | CONFIG_NET=y | ||
| 62 | CONFIG_PACKET=y | 63 | CONFIG_PACKET=y |
| 63 | CONFIG_UNIX=y | 64 | CONFIG_UNIX=y |
| 64 | CONFIG_XFRM_USER=y | 65 | CONFIG_XFRM_USER=y |
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index e18741ea1771..f57b96dcf7df 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig | |||
| @@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y | |||
| 19 | CONFIG_MODVERSIONS=y | 19 | CONFIG_MODVERSIONS=y |
| 20 | CONFIG_MODULE_SRCVERSION_ALL=y | 20 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 21 | CONFIG_PCI=y | 21 | CONFIG_PCI=y |
| 22 | CONFIG_NET=y | ||
| 22 | CONFIG_PACKET=y | 23 | CONFIG_PACKET=y |
| 23 | CONFIG_UNIX=y | 24 | CONFIG_UNIX=y |
| 24 | CONFIG_XFRM_USER=m | 25 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index cf0e01f814e1..d41742dd26c8 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig | |||
| @@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y | |||
| 20 | CONFIG_MODVERSIONS=y | 20 | CONFIG_MODVERSIONS=y |
| 21 | CONFIG_MODULE_SRCVERSION_ALL=y | 21 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 22 | CONFIG_PCI=y | 22 | CONFIG_PCI=y |
| 23 | CONFIG_NET=y | ||
| 23 | CONFIG_PACKET=y | 24 | CONFIG_PACKET=y |
| 24 | CONFIG_UNIX=y | 25 | CONFIG_UNIX=y |
| 25 | CONFIG_XFRM_USER=m | 26 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index edd9ec9cb678..a7806e83ea0f 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig | |||
| @@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y | |||
| 19 | CONFIG_MODVERSIONS=y | 19 | CONFIG_MODVERSIONS=y |
| 20 | CONFIG_MODULE_SRCVERSION_ALL=y | 20 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 21 | CONFIG_PCI=y | 21 | CONFIG_PCI=y |
| 22 | CONFIG_NET=y | ||
| 22 | CONFIG_PACKET=y | 23 | CONFIG_PACKET=y |
| 23 | CONFIG_UNIX=y | 24 | CONFIG_UNIX=y |
| 24 | CONFIG_XFRM_USER=m | 25 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index d269a5326a30..9b6926d6bb32 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig | |||
| @@ -27,6 +27,7 @@ CONFIG_PD6729=m | |||
| 27 | CONFIG_I82092=m | 27 | CONFIG_I82092=m |
| 28 | CONFIG_BINFMT_MISC=m | 28 | CONFIG_BINFMT_MISC=m |
| 29 | CONFIG_PM=y | 29 | CONFIG_PM=y |
| 30 | CONFIG_NET=y | ||
| 30 | CONFIG_PACKET=m | 31 | CONFIG_PACKET=m |
| 31 | CONFIG_UNIX=y | 32 | CONFIG_UNIX=y |
| 32 | CONFIG_XFRM_USER=m | 33 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 2f660e9a0da6..70509a48df82 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig | |||
| @@ -63,6 +63,7 @@ CONFIG_MIPS32_O32=y | |||
| 63 | CONFIG_MIPS32_N32=y | 63 | CONFIG_MIPS32_N32=y |
| 64 | CONFIG_PM_RUNTIME=y | 64 | CONFIG_PM_RUNTIME=y |
| 65 | CONFIG_PM_DEBUG=y | 65 | CONFIG_PM_DEBUG=y |
| 66 | CONFIG_NET=y | ||
| 66 | CONFIG_PACKET=y | 67 | CONFIG_PACKET=y |
| 67 | CONFIG_UNIX=y | 68 | CONFIG_UNIX=y |
| 68 | CONFIG_XFRM_USER=m | 69 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index c6f84655c98a..82207e8079f3 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig | |||
| @@ -43,6 +43,7 @@ CONFIG_PCI_DEBUG=y | |||
| 43 | CONFIG_BINFMT_MISC=m | 43 | CONFIG_BINFMT_MISC=m |
| 44 | CONFIG_PM_RUNTIME=y | 44 | CONFIG_PM_RUNTIME=y |
| 45 | CONFIG_PM_DEBUG=y | 45 | CONFIG_PM_DEBUG=y |
| 46 | CONFIG_NET=y | ||
| 46 | CONFIG_PACKET=y | 47 | CONFIG_PACKET=y |
| 47 | CONFIG_UNIX=y | 48 | CONFIG_UNIX=y |
| 48 | CONFIG_XFRM_USER=m | 49 | CONFIG_XFRM_USER=m |
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 29d79ae8a823..db029f4ff759 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig | |||
| @@ -20,6 +20,7 @@ CONFIG_MODVERSIONS=y | |||
| 20 | CONFIG_PCI=y | 20 | CONFIG_PCI=y |
| 21 | CONFIG_BINFMT_MISC=m | 21 | CONFIG_BINFMT_MISC=m |
| 22 | CONFIG_PM=y | 22 | CONFIG_PM=y |
| 23 | CONFIG_NET=y | ||
| 23 | CONFIG_PACKET=m | 24 | CONFIG_PACKET=m |
| 24 | CONFIG_UNIX=y | 25 | CONFIG_UNIX=y |
| 25 | CONFIG_NET_KEY=m | 26 | CONFIG_NET_KEY=m |
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 5d25462de8a6..2f7c734771f4 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
| @@ -129,7 +129,11 @@ NESTED(_mcount, PT_SIZE, ra) | |||
| 129 | nop | 129 | nop |
| 130 | #endif | 130 | #endif |
| 131 | b ftrace_stub | 131 | b ftrace_stub |
| 132 | #ifdef CONFIG_32BIT | ||
| 133 | addiu sp, sp, 8 | ||
| 134 | #else | ||
| 132 | nop | 135 | nop |
| 136 | #endif | ||
| 133 | 137 | ||
| 134 | static_trace: | 138 | static_trace: |
| 135 | MCOUNT_SAVE_REGS | 139 | MCOUNT_SAVE_REGS |
| @@ -139,6 +143,9 @@ static_trace: | |||
| 139 | move a1, AT /* arg2: parent's return address */ | 143 | move a1, AT /* arg2: parent's return address */ |
| 140 | 144 | ||
| 141 | MCOUNT_RESTORE_REGS | 145 | MCOUNT_RESTORE_REGS |
| 146 | #ifdef CONFIG_32BIT | ||
| 147 | addiu sp, sp, 8 | ||
| 148 | #endif | ||
| 142 | .globl ftrace_stub | 149 | .globl ftrace_stub |
| 143 | ftrace_stub: | 150 | ftrace_stub: |
| 144 | RETURN_BACK | 151 | RETURN_BACK |
| @@ -183,6 +190,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) | |||
| 183 | jal prepare_ftrace_return | 190 | jal prepare_ftrace_return |
| 184 | nop | 191 | nop |
| 185 | MCOUNT_RESTORE_REGS | 192 | MCOUNT_RESTORE_REGS |
| 193 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
| 194 | #ifdef CONFIG_32BIT | ||
| 195 | addiu sp, sp, 8 | ||
| 196 | #endif | ||
| 197 | #endif | ||
| 186 | RETURN_BACK | 198 | RETURN_BACK |
| 187 | END(ftrace_graph_caller) | 199 | END(ftrace_graph_caller) |
| 188 | 200 | ||
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index bf0fc6b16ad9..7a4727795a70 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
| @@ -650,9 +650,9 @@ static inline int cop1_64bit(struct pt_regs *xcp) | |||
| 650 | #define SIFROMREG(si, x) \ | 650 | #define SIFROMREG(si, x) \ |
| 651 | do { \ | 651 | do { \ |
| 652 | if (cop1_64bit(xcp)) \ | 652 | if (cop1_64bit(xcp)) \ |
| 653 | (si) = get_fpr32(&ctx->fpr[x], 0); \ | 653 | (si) = (int)get_fpr32(&ctx->fpr[x], 0); \ |
| 654 | else \ | 654 | else \ |
| 655 | (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ | 655 | (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ |
| 656 | } while (0) | 656 | } while (0) |
| 657 | 657 | ||
| 658 | #define SITOREG(si, x) \ | 658 | #define SITOREG(si, x) \ |
| @@ -667,7 +667,7 @@ do { \ | |||
| 667 | } \ | 667 | } \ |
| 668 | } while (0) | 668 | } while (0) |
| 669 | 669 | ||
| 670 | #define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1)) | 670 | #define SIFROMHREG(si, x) ((si) = (int)get_fpr32(&ctx->fpr[x], 1)) |
| 671 | 671 | ||
| 672 | #define SITOHREG(si, x) \ | 672 | #define SITOHREG(si, x) \ |
| 673 | do { \ | 673 | do { \ |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 571aab064936..f42e35e42790 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | */ | 53 | */ |
| 54 | unsigned long empty_zero_page, zero_page_mask; | 54 | unsigned long empty_zero_page, zero_page_mask; |
| 55 | EXPORT_SYMBOL_GPL(empty_zero_page); | 55 | EXPORT_SYMBOL_GPL(empty_zero_page); |
| 56 | EXPORT_SYMBOL(zero_page_mask); | ||
| 56 | 57 | ||
| 57 | /* | 58 | /* |
| 58 | * Not static inline because used by IP27 special magic initialization code | 59 | * Not static inline because used by IP27 special magic initialization code |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 7187664034c3..5db8882f732c 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
| @@ -48,7 +48,12 @@ cflags-y := -pipe | |||
| 48 | 48 | ||
| 49 | # These flags should be implied by an hppa-linux configuration, but they | 49 | # These flags should be implied by an hppa-linux configuration, but they |
| 50 | # are not in gcc 3.2. | 50 | # are not in gcc 3.2. |
| 51 | cflags-y += -mno-space-regs -mfast-indirect-calls | 51 | cflags-y += -mno-space-regs |
| 52 | |||
| 53 | # -mfast-indirect-calls is only relevant for 32-bit kernels. | ||
| 54 | ifndef CONFIG_64BIT | ||
| 55 | cflags-y += -mfast-indirect-calls | ||
| 56 | endif | ||
| 52 | 57 | ||
| 53 | # Currently we save and restore fpregs on all kernel entry/interruption paths. | 58 | # Currently we save and restore fpregs on all kernel entry/interruption paths. |
| 54 | # If that gets optimized, we might need to disable the use of fpregs in the | 59 | # If that gets optimized, we might need to disable the use of fpregs in the |
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig index 90025322b75e..0490199d7b15 100644 --- a/arch/parisc/configs/a500_defconfig +++ b/arch/parisc/configs/a500_defconfig | |||
| @@ -31,6 +31,7 @@ CONFIG_PD6729=m | |||
| 31 | CONFIG_I82092=m | 31 | CONFIG_I82092=m |
| 32 | # CONFIG_SUPERIO is not set | 32 | # CONFIG_SUPERIO is not set |
| 33 | # CONFIG_CHASSIS_LCD_LED is not set | 33 | # CONFIG_CHASSIS_LCD_LED is not set |
| 34 | CONFIG_NET=y | ||
| 34 | CONFIG_PACKET=y | 35 | CONFIG_PACKET=y |
| 35 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
| 36 | CONFIG_XFRM_USER=m | 37 | CONFIG_XFRM_USER=m |
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig index 8249ac9d9cfc..269c23d23fcb 100644 --- a/arch/parisc/configs/c8000_defconfig +++ b/arch/parisc/configs/c8000_defconfig | |||
| @@ -33,6 +33,7 @@ CONFIG_PCI_LBA=y | |||
| 33 | # CONFIG_PDC_CHASSIS_WARN is not set | 33 | # CONFIG_PDC_CHASSIS_WARN is not set |
| 34 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 34 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 35 | CONFIG_BINFMT_MISC=m | 35 | CONFIG_BINFMT_MISC=m |
| 36 | CONFIG_NET=y | ||
| 36 | CONFIG_PACKET=y | 37 | CONFIG_PACKET=y |
| 37 | CONFIG_UNIX=y | 38 | CONFIG_UNIX=y |
| 38 | CONFIG_XFRM_USER=m | 39 | CONFIG_XFRM_USER=m |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 3bab72462ab5..92438c21d453 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/user.h> | 17 | #include <linux/user.h> |
| 18 | #include <linux/personality.h> | 18 | #include <linux/personality.h> |
| 19 | #include <linux/security.h> | 19 | #include <linux/security.h> |
| 20 | #include <linux/seccomp.h> | ||
| 20 | #include <linux/compat.h> | 21 | #include <linux/compat.h> |
| 21 | #include <linux/signal.h> | 22 | #include <linux/signal.h> |
| 22 | #include <linux/audit.h> | 23 | #include <linux/audit.h> |
| @@ -271,10 +272,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
| 271 | long ret = 0; | 272 | long ret = 0; |
| 272 | 273 | ||
| 273 | /* Do the secure computing check first. */ | 274 | /* Do the secure computing check first. */ |
| 274 | if (secure_computing(regs->gr[20])) { | 275 | secure_computing_strict(regs->gr[20]); |
| 275 | /* seccomp failures shouldn't expose any additional code. */ | ||
| 276 | return -1; | ||
| 277 | } | ||
| 278 | 276 | ||
| 279 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 277 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
| 280 | tracehook_report_syscall_entry(regs)) | 278 | tracehook_report_syscall_entry(regs)) |
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 5e2aa43562b5..59734916986a 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig | |||
| @@ -29,6 +29,7 @@ CONFIG_PM=y | |||
| 29 | CONFIG_PCI_MSI=y | 29 | CONFIG_PCI_MSI=y |
| 30 | CONFIG_HOTPLUG_PCI=y | 30 | CONFIG_HOTPLUG_PCI=y |
| 31 | CONFIG_HOTPLUG_PCI_SHPC=m | 31 | CONFIG_HOTPLUG_PCI_SHPC=m |
| 32 | CONFIG_NET=y | ||
| 32 | CONFIG_PACKET=y | 33 | CONFIG_PACKET=y |
| 33 | CONFIG_UNIX=y | 34 | CONFIG_UNIX=y |
| 34 | CONFIG_XFRM_USER=y | 35 | CONFIG_XFRM_USER=y |
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 553e66278010..0351b5ffdfef 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig | |||
| @@ -31,6 +31,7 @@ CONFIG_HIBERNATION=y | |||
| 31 | CONFIG_APM_EMULATION=y | 31 | CONFIG_APM_EMULATION=y |
| 32 | CONFIG_PCCARD=m | 32 | CONFIG_PCCARD=m |
| 33 | CONFIG_YENTA=m | 33 | CONFIG_YENTA=m |
| 34 | CONFIG_NET=y | ||
| 34 | CONFIG_PACKET=y | 35 | CONFIG_PACKET=y |
| 35 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
| 36 | CONFIG_XFRM_USER=y | 37 | CONFIG_XFRM_USER=y |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f6c02f8cdc62..36518870e6b2 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
| @@ -58,6 +58,7 @@ CONFIG_ELECTRA_CF=y | |||
| 58 | CONFIG_HOTPLUG_PCI=y | 58 | CONFIG_HOTPLUG_PCI=y |
| 59 | CONFIG_HOTPLUG_PCI_RPA=m | 59 | CONFIG_HOTPLUG_PCI_RPA=m |
| 60 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 60 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
| 61 | CONFIG_NET=y | ||
| 61 | CONFIG_PACKET=y | 62 | CONFIG_PACKET=y |
| 62 | CONFIG_UNIX=y | 63 | CONFIG_UNIX=y |
| 63 | CONFIG_XFRM_USER=m | 64 | CONFIG_XFRM_USER=m |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 587f5514f9b1..c3a3269b0865 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
| @@ -33,6 +33,7 @@ CONFIG_SPARSEMEM_MANUAL=y | |||
| 33 | CONFIG_PCI_MSI=y | 33 | CONFIG_PCI_MSI=y |
| 34 | CONFIG_PCCARD=y | 34 | CONFIG_PCCARD=y |
| 35 | CONFIG_HOTPLUG_PCI=y | 35 | CONFIG_HOTPLUG_PCI=y |
| 36 | CONFIG_NET=y | ||
| 36 | CONFIG_PACKET=y | 37 | CONFIG_PACKET=y |
| 37 | CONFIG_UNIX=y | 38 | CONFIG_UNIX=y |
| 38 | CONFIG_XFRM_USER=m | 39 | CONFIG_XFRM_USER=m |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 50375f1f59e7..dd2a9cab4b50 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
| @@ -53,6 +53,7 @@ CONFIG_SCHED_SMT=y | |||
| 53 | CONFIG_HOTPLUG_PCI=y | 53 | CONFIG_HOTPLUG_PCI=y |
| 54 | CONFIG_HOTPLUG_PCI_RPA=m | 54 | CONFIG_HOTPLUG_PCI_RPA=m |
| 55 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 55 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
| 56 | CONFIG_NET=y | ||
| 56 | CONFIG_PACKET=y | 57 | CONFIG_PACKET=y |
| 57 | CONFIG_UNIX=y | 58 | CONFIG_UNIX=y |
| 58 | CONFIG_XFRM_USER=m | 59 | CONFIG_XFRM_USER=m |
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 4428ee428f4e..63392f4b29a4 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig | |||
| @@ -55,6 +55,7 @@ CONFIG_SCHED_SMT=y | |||
| 55 | CONFIG_HOTPLUG_PCI=y | 55 | CONFIG_HOTPLUG_PCI=y |
| 56 | CONFIG_HOTPLUG_PCI_RPA=m | 56 | CONFIG_HOTPLUG_PCI_RPA=m |
| 57 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 57 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
| 58 | CONFIG_NET=y | ||
| 58 | CONFIG_PACKET=y | 59 | CONFIG_PACKET=y |
| 59 | CONFIG_UNIX=y | 60 | CONFIG_UNIX=y |
| 60 | CONFIG_XFRM_USER=m | 61 | CONFIG_XFRM_USER=m |
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 3ca1894ade09..9d94fdd9f525 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
| @@ -63,6 +63,7 @@ CONFIG_CRASH_DUMP=y | |||
| 63 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 63 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 64 | CONFIG_BINFMT_MISC=m | 64 | CONFIG_BINFMT_MISC=m |
| 65 | CONFIG_HIBERNATION=y | 65 | CONFIG_HIBERNATION=y |
| 66 | CONFIG_NET=y | ||
| 66 | CONFIG_PACKET=y | 67 | CONFIG_PACKET=y |
| 67 | CONFIG_PACKET_DIAG=m | 68 | CONFIG_PACKET_DIAG=m |
| 68 | CONFIG_UNIX=y | 69 | CONFIG_UNIX=y |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 4830aa6e6f53..90f514baa37d 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
| @@ -61,6 +61,7 @@ CONFIG_CRASH_DUMP=y | |||
| 61 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 61 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 62 | CONFIG_BINFMT_MISC=m | 62 | CONFIG_BINFMT_MISC=m |
| 63 | CONFIG_HIBERNATION=y | 63 | CONFIG_HIBERNATION=y |
| 64 | CONFIG_NET=y | ||
| 64 | CONFIG_PACKET=y | 65 | CONFIG_PACKET=y |
| 65 | CONFIG_PACKET_DIAG=m | 66 | CONFIG_PACKET_DIAG=m |
| 66 | CONFIG_UNIX=y | 67 | CONFIG_UNIX=y |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 61db449bf309..13559d32af69 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
| @@ -59,6 +59,7 @@ CONFIG_CRASH_DUMP=y | |||
| 59 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 59 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 60 | CONFIG_BINFMT_MISC=m | 60 | CONFIG_BINFMT_MISC=m |
| 61 | CONFIG_HIBERNATION=y | 61 | CONFIG_HIBERNATION=y |
| 62 | CONFIG_NET=y | ||
| 62 | CONFIG_PACKET=y | 63 | CONFIG_PACKET=y |
| 63 | CONFIG_PACKET_DIAG=m | 64 | CONFIG_PACKET_DIAG=m |
| 64 | CONFIG_UNIX=y | 65 | CONFIG_UNIX=y |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 948e0e057a23..e376789f2d8d 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
| @@ -23,6 +23,7 @@ CONFIG_CRASH_DUMP=y | |||
| 23 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 23 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 24 | # CONFIG_SECCOMP is not set | 24 | # CONFIG_SECCOMP is not set |
| 25 | # CONFIG_IUCV is not set | 25 | # CONFIG_IUCV is not set |
| 26 | CONFIG_NET=y | ||
| 26 | CONFIG_ATM=y | 27 | CONFIG_ATM=y |
| 27 | CONFIG_ATM_LANE=y | 28 | CONFIG_ATM_LANE=y |
| 28 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 29 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 2e56498a40df..fab35a8efa4f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -50,6 +50,7 @@ CONFIG_CMA=y | |||
| 50 | CONFIG_CRASH_DUMP=y | 50 | CONFIG_CRASH_DUMP=y |
| 51 | CONFIG_BINFMT_MISC=m | 51 | CONFIG_BINFMT_MISC=m |
| 52 | CONFIG_HIBERNATION=y | 52 | CONFIG_HIBERNATION=y |
| 53 | CONFIG_NET=y | ||
| 53 | CONFIG_PACKET=y | 54 | CONFIG_PACKET=y |
| 54 | CONFIG_UNIX=y | 55 | CONFIG_UNIX=y |
| 55 | CONFIG_NET_KEY=y | 56 | CONFIG_NET_KEY=y |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0c1073ed1e84..c7235e01fd67 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | |||
| 43 | 43 | ||
| 44 | unsigned long empty_zero_page, zero_page_mask; | 44 | unsigned long empty_zero_page, zero_page_mask; |
| 45 | EXPORT_SYMBOL(empty_zero_page); | 45 | EXPORT_SYMBOL(empty_zero_page); |
| 46 | EXPORT_SYMBOL(zero_page_mask); | ||
| 46 | 47 | ||
| 47 | static void __init setup_zero_pages(void) | 48 | static void __init setup_zero_pages(void) |
| 48 | { | 49 | { |
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig index 6a96b9a2f7a5..bbd4c2298708 100644 --- a/arch/sh/configs/sdk7780_defconfig +++ b/arch/sh/configs/sdk7780_defconfig | |||
| @@ -30,6 +30,7 @@ CONFIG_PCI_DEBUG=y | |||
| 30 | CONFIG_PCCARD=y | 30 | CONFIG_PCCARD=y |
| 31 | CONFIG_YENTA=y | 31 | CONFIG_YENTA=y |
| 32 | CONFIG_HOTPLUG_PCI=y | 32 | CONFIG_HOTPLUG_PCI=y |
| 33 | CONFIG_NET=y | ||
| 33 | CONFIG_PACKET=y | 34 | CONFIG_PACKET=y |
| 34 | CONFIG_UNIX=y | 35 | CONFIG_UNIX=y |
| 35 | CONFIG_INET=y | 36 | CONFIG_INET=y |
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index e741b1e36acd..df25ae774ee0 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_CMDLINE_OVERWRITE=y | |||
| 25 | CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1" | 25 | CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1" |
| 26 | CONFIG_PCCARD=y | 26 | CONFIG_PCCARD=y |
| 27 | CONFIG_BINFMT_MISC=y | 27 | CONFIG_BINFMT_MISC=y |
| 28 | CONFIG_NET=y | ||
| 28 | CONFIG_PACKET=y | 29 | CONFIG_PACKET=y |
| 29 | CONFIG_UNIX=y | 30 | CONFIG_UNIX=y |
| 30 | CONFIG_XFRM_USER=y | 31 | CONFIG_XFRM_USER=y |
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 9d8521b8c854..6b68f12f29db 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig | |||
| @@ -29,6 +29,7 @@ CONFIG_PCI=y | |||
| 29 | CONFIG_PCI_MSI=y | 29 | CONFIG_PCI_MSI=y |
| 30 | CONFIG_SUN_OPENPROMFS=m | 30 | CONFIG_SUN_OPENPROMFS=m |
| 31 | CONFIG_BINFMT_MISC=m | 31 | CONFIG_BINFMT_MISC=m |
| 32 | CONFIG_NET=y | ||
| 32 | CONFIG_PACKET=y | 33 | CONFIG_PACKET=y |
| 33 | CONFIG_UNIX=y | 34 | CONFIG_UNIX=y |
| 34 | CONFIG_XFRM_USER=m | 35 | CONFIG_XFRM_USER=m |
diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S index 9d016c7017f7..8c83f4b8eb15 100644 --- a/arch/sparc/net/bpf_jit_asm.S +++ b/arch/sparc/net/bpf_jit_asm.S | |||
| @@ -6,10 +6,12 @@ | |||
| 6 | #define SAVE_SZ 176 | 6 | #define SAVE_SZ 176 |
| 7 | #define SCRATCH_OFF STACK_BIAS + 128 | 7 | #define SCRATCH_OFF STACK_BIAS + 128 |
| 8 | #define BE_PTR(label) be,pn %xcc, label | 8 | #define BE_PTR(label) be,pn %xcc, label |
| 9 | #define SIGN_EXTEND(reg) sra reg, 0, reg | ||
| 9 | #else | 10 | #else |
| 10 | #define SAVE_SZ 96 | 11 | #define SAVE_SZ 96 |
| 11 | #define SCRATCH_OFF 72 | 12 | #define SCRATCH_OFF 72 |
| 12 | #define BE_PTR(label) be label | 13 | #define BE_PTR(label) be label |
| 14 | #define SIGN_EXTEND(reg) | ||
| 13 | #endif | 15 | #endif |
| 14 | 16 | ||
| 15 | #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ | 17 | #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ |
| @@ -135,6 +137,7 @@ bpf_slow_path_byte_msh: | |||
| 135 | save %sp, -SAVE_SZ, %sp; \ | 137 | save %sp, -SAVE_SZ, %sp; \ |
| 136 | mov %i0, %o0; \ | 138 | mov %i0, %o0; \ |
| 137 | mov r_OFF, %o1; \ | 139 | mov r_OFF, %o1; \ |
| 140 | SIGN_EXTEND(%o1); \ | ||
| 138 | call bpf_internal_load_pointer_neg_helper; \ | 141 | call bpf_internal_load_pointer_neg_helper; \ |
| 139 | mov (LEN), %o2; \ | 142 | mov (LEN), %o2; \ |
| 140 | mov %o0, r_TMP; \ | 143 | mov %o0, r_TMP; \ |
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 1f76c22a6a75..ece4af0575e9 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c | |||
| @@ -184,7 +184,7 @@ do { \ | |||
| 184 | */ | 184 | */ |
| 185 | #define emit_alu_K(OPCODE, K) \ | 185 | #define emit_alu_K(OPCODE, K) \ |
| 186 | do { \ | 186 | do { \ |
| 187 | if (K) { \ | 187 | if (K || OPCODE == AND || OPCODE == MUL) { \ |
| 188 | unsigned int _insn = OPCODE; \ | 188 | unsigned int _insn = OPCODE; \ |
| 189 | _insn |= RS1(r_A) | RD(r_A); \ | 189 | _insn |= RS1(r_A) | RD(r_A); \ |
| 190 | if (is_simm13(K)) { \ | 190 | if (is_simm13(K)) { \ |
| @@ -234,12 +234,18 @@ do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ | |||
| 234 | __emit_load8(BASE, STRUCT, FIELD, DEST); \ | 234 | __emit_load8(BASE, STRUCT, FIELD, DEST); \ |
| 235 | } while (0) | 235 | } while (0) |
| 236 | 236 | ||
| 237 | #define emit_ldmem(OFF, DEST) \ | 237 | #ifdef CONFIG_SPARC64 |
| 238 | do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \ | 238 | #define BIAS (STACK_BIAS - 4) |
| 239 | #else | ||
| 240 | #define BIAS (-4) | ||
| 241 | #endif | ||
| 242 | |||
| 243 | #define emit_ldmem(OFF, DEST) \ | ||
| 244 | do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ | ||
| 239 | } while (0) | 245 | } while (0) |
| 240 | 246 | ||
| 241 | #define emit_stmem(OFF, SRC) \ | 247 | #define emit_stmem(OFF, SRC) \ |
| 242 | do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \ | 248 | do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ |
| 243 | } while (0) | 249 | } while (0) |
| 244 | 250 | ||
| 245 | #ifdef CONFIG_SMP | 251 | #ifdef CONFIG_SMP |
| @@ -615,10 +621,11 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
| 615 | case BPF_ANC | SKF_AD_VLAN_TAG: | 621 | case BPF_ANC | SKF_AD_VLAN_TAG: |
| 616 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: | 622 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: |
| 617 | emit_skb_load16(vlan_tci, r_A); | 623 | emit_skb_load16(vlan_tci, r_A); |
| 618 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { | 624 | if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) { |
| 619 | emit_andi(r_A, VLAN_VID_MASK, r_A); | 625 | emit_alu_K(SRL, 12); |
| 626 | emit_andi(r_A, 1, r_A); | ||
| 620 | } else { | 627 | } else { |
| 621 | emit_loadimm(VLAN_TAG_PRESENT, r_TMP); | 628 | emit_loadimm(~VLAN_TAG_PRESENT, r_TMP); |
| 622 | emit_and(r_A, r_TMP, r_A); | 629 | emit_and(r_A, r_TMP, r_A); |
| 623 | } | 630 | } |
| 624 | break; | 631 | break; |
| @@ -630,15 +637,19 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
| 630 | emit_loadimm(K, r_X); | 637 | emit_loadimm(K, r_X); |
| 631 | break; | 638 | break; |
| 632 | case BPF_LD | BPF_MEM: | 639 | case BPF_LD | BPF_MEM: |
| 640 | seen |= SEEN_MEM; | ||
| 633 | emit_ldmem(K * 4, r_A); | 641 | emit_ldmem(K * 4, r_A); |
| 634 | break; | 642 | break; |
| 635 | case BPF_LDX | BPF_MEM: | 643 | case BPF_LDX | BPF_MEM: |
| 644 | seen |= SEEN_MEM | SEEN_XREG; | ||
| 636 | emit_ldmem(K * 4, r_X); | 645 | emit_ldmem(K * 4, r_X); |
| 637 | break; | 646 | break; |
| 638 | case BPF_ST: | 647 | case BPF_ST: |
| 648 | seen |= SEEN_MEM; | ||
| 639 | emit_stmem(K * 4, r_A); | 649 | emit_stmem(K * 4, r_A); |
| 640 | break; | 650 | break; |
| 641 | case BPF_STX: | 651 | case BPF_STX: |
| 652 | seen |= SEEN_MEM | SEEN_XREG; | ||
| 642 | emit_stmem(K * 4, r_X); | 653 | emit_stmem(K * 4, r_X); |
| 643 | break; | 654 | break; |
| 644 | 655 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 7a801a310e37..0fcd9133790c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
| @@ -33,8 +33,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | |||
| 33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
| 34 | 34 | ||
| 35 | ifeq ($(CONFIG_EFI_STUB), y) | 35 | ifeq ($(CONFIG_EFI_STUB), y) |
| 36 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ | 36 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o |
| 37 | $(objtree)/drivers/firmware/efi/libstub/lib.a | ||
| 38 | endif | 37 | endif |
| 39 | 38 | ||
| 40 | $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE | 39 | $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE |
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index fc6091abedb7..d39189ba7f8e 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
| @@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, | |||
| 183 | static bool mem_avoid_overlap(struct mem_vector *img) | 183 | static bool mem_avoid_overlap(struct mem_vector *img) |
| 184 | { | 184 | { |
| 185 | int i; | 185 | int i; |
| 186 | struct setup_data *ptr; | ||
| 186 | 187 | ||
| 187 | for (i = 0; i < MEM_AVOID_MAX; i++) { | 188 | for (i = 0; i < MEM_AVOID_MAX; i++) { |
| 188 | if (mem_overlaps(img, &mem_avoid[i])) | 189 | if (mem_overlaps(img, &mem_avoid[i])) |
| 189 | return true; | 190 | return true; |
| 190 | } | 191 | } |
| 191 | 192 | ||
| 193 | /* Avoid all entries in the setup_data linked list. */ | ||
| 194 | ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data; | ||
| 195 | while (ptr) { | ||
| 196 | struct mem_vector avoid; | ||
| 197 | |||
| 198 | avoid.start = (u64)ptr; | ||
| 199 | avoid.size = sizeof(*ptr) + ptr->len; | ||
| 200 | |||
| 201 | if (mem_overlaps(img, &avoid)) | ||
| 202 | return true; | ||
| 203 | |||
| 204 | ptr = (struct setup_data *)(unsigned long)ptr->next; | ||
| 205 | } | ||
| 206 | |||
| 192 | return false; | 207 | return false; |
| 193 | } | 208 | } |
| 194 | 209 | ||
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index dca9842d8f91..de8eebd6f67c 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
| @@ -19,7 +19,10 @@ | |||
| 19 | 19 | ||
| 20 | static efi_system_table_t *sys_table; | 20 | static efi_system_table_t *sys_table; |
| 21 | 21 | ||
| 22 | struct efi_config *efi_early; | 22 | static struct efi_config *efi_early; |
| 23 | |||
| 24 | #define efi_call_early(f, ...) \ | ||
| 25 | efi_early->call(efi_early->f, __VA_ARGS__); | ||
| 23 | 26 | ||
| 24 | #define BOOT_SERVICES(bits) \ | 27 | #define BOOT_SERVICES(bits) \ |
| 25 | static void setup_boot_services##bits(struct efi_config *c) \ | 28 | static void setup_boot_services##bits(struct efi_config *c) \ |
| @@ -265,21 +268,25 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) | |||
| 265 | 268 | ||
| 266 | offset = offsetof(typeof(*out), output_string); | 269 | offset = offsetof(typeof(*out), output_string); |
| 267 | output_string = efi_early->text_output + offset; | 270 | output_string = efi_early->text_output + offset; |
| 271 | out = (typeof(out))(unsigned long)efi_early->text_output; | ||
| 268 | func = (u64 *)output_string; | 272 | func = (u64 *)output_string; |
| 269 | 273 | ||
| 270 | efi_early->call(*func, efi_early->text_output, str); | 274 | efi_early->call(*func, out, str); |
| 271 | } else { | 275 | } else { |
| 272 | struct efi_simple_text_output_protocol_32 *out; | 276 | struct efi_simple_text_output_protocol_32 *out; |
| 273 | u32 *func; | 277 | u32 *func; |
| 274 | 278 | ||
| 275 | offset = offsetof(typeof(*out), output_string); | 279 | offset = offsetof(typeof(*out), output_string); |
| 276 | output_string = efi_early->text_output + offset; | 280 | output_string = efi_early->text_output + offset; |
| 281 | out = (typeof(out))(unsigned long)efi_early->text_output; | ||
| 277 | func = (u32 *)output_string; | 282 | func = (u32 *)output_string; |
| 278 | 283 | ||
| 279 | efi_early->call(*func, efi_early->text_output, str); | 284 | efi_early->call(*func, out, str); |
| 280 | } | 285 | } |
| 281 | } | 286 | } |
| 282 | 287 | ||
| 288 | #include "../../../../drivers/firmware/efi/libstub/efi-stub-helper.c" | ||
| 289 | |||
| 283 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) | 290 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) |
| 284 | { | 291 | { |
| 285 | u8 first, len; | 292 | u8 first, len; |
| @@ -360,7 +367,7 @@ free_struct: | |||
| 360 | return status; | 367 | return status; |
| 361 | } | 368 | } |
| 362 | 369 | ||
| 363 | static efi_status_t | 370 | static void |
| 364 | setup_efi_pci32(struct boot_params *params, void **pci_handle, | 371 | setup_efi_pci32(struct boot_params *params, void **pci_handle, |
| 365 | unsigned long size) | 372 | unsigned long size) |
| 366 | { | 373 | { |
| @@ -403,8 +410,6 @@ setup_efi_pci32(struct boot_params *params, void **pci_handle, | |||
| 403 | data = (struct setup_data *)rom; | 410 | data = (struct setup_data *)rom; |
| 404 | 411 | ||
| 405 | } | 412 | } |
| 406 | |||
| 407 | return status; | ||
| 408 | } | 413 | } |
| 409 | 414 | ||
| 410 | static efi_status_t | 415 | static efi_status_t |
| @@ -463,7 +468,7 @@ free_struct: | |||
| 463 | 468 | ||
| 464 | } | 469 | } |
| 465 | 470 | ||
| 466 | static efi_status_t | 471 | static void |
| 467 | setup_efi_pci64(struct boot_params *params, void **pci_handle, | 472 | setup_efi_pci64(struct boot_params *params, void **pci_handle, |
| 468 | unsigned long size) | 473 | unsigned long size) |
| 469 | { | 474 | { |
| @@ -506,11 +511,18 @@ setup_efi_pci64(struct boot_params *params, void **pci_handle, | |||
| 506 | data = (struct setup_data *)rom; | 511 | data = (struct setup_data *)rom; |
| 507 | 512 | ||
| 508 | } | 513 | } |
| 509 | |||
| 510 | return status; | ||
| 511 | } | 514 | } |
| 512 | 515 | ||
| 513 | static efi_status_t setup_efi_pci(struct boot_params *params) | 516 | /* |
| 517 | * There's no way to return an informative status from this function, | ||
| 518 | * because any analysis (and printing of error messages) needs to be | ||
| 519 | * done directly at the EFI function call-site. | ||
| 520 | * | ||
| 521 | * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we | ||
| 522 | * just didn't find any PCI devices, but there's no way to tell outside | ||
| 523 | * the context of the call. | ||
| 524 | */ | ||
| 525 | static void setup_efi_pci(struct boot_params *params) | ||
| 514 | { | 526 | { |
| 515 | efi_status_t status; | 527 | efi_status_t status; |
| 516 | void **pci_handle = NULL; | 528 | void **pci_handle = NULL; |
| @@ -527,7 +539,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
| 527 | size, (void **)&pci_handle); | 539 | size, (void **)&pci_handle); |
| 528 | 540 | ||
| 529 | if (status != EFI_SUCCESS) | 541 | if (status != EFI_SUCCESS) |
| 530 | return status; | 542 | return; |
| 531 | 543 | ||
| 532 | status = efi_call_early(locate_handle, | 544 | status = efi_call_early(locate_handle, |
| 533 | EFI_LOCATE_BY_PROTOCOL, &pci_proto, | 545 | EFI_LOCATE_BY_PROTOCOL, &pci_proto, |
| @@ -538,13 +550,12 @@ static efi_status_t setup_efi_pci(struct boot_params *params) | |||
| 538 | goto free_handle; | 550 | goto free_handle; |
| 539 | 551 | ||
| 540 | if (efi_early->is64) | 552 | if (efi_early->is64) |
| 541 | status = setup_efi_pci64(params, pci_handle, size); | 553 | setup_efi_pci64(params, pci_handle, size); |
| 542 | else | 554 | else |
| 543 | status = setup_efi_pci32(params, pci_handle, size); | 555 | setup_efi_pci32(params, pci_handle, size); |
| 544 | 556 | ||
| 545 | free_handle: | 557 | free_handle: |
| 546 | efi_call_early(free_pool, pci_handle); | 558 | efi_call_early(free_pool, pci_handle); |
| 547 | return status; | ||
| 548 | } | 559 | } |
| 549 | 560 | ||
| 550 | static void | 561 | static void |
| @@ -1380,10 +1391,7 @@ struct boot_params *efi_main(struct efi_config *c, | |||
| 1380 | 1391 | ||
| 1381 | setup_graphics(boot_params); | 1392 | setup_graphics(boot_params); |
| 1382 | 1393 | ||
| 1383 | status = setup_efi_pci(boot_params); | 1394 | setup_efi_pci(boot_params); |
| 1384 | if (status != EFI_SUCCESS) { | ||
| 1385 | efi_printk(sys_table, "setup_efi_pci() failed!\n"); | ||
| 1386 | } | ||
| 1387 | 1395 | ||
| 1388 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, | 1396 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, |
| 1389 | sizeof(*gdt), (void **)&gdt); | 1397 | sizeof(*gdt), (void **)&gdt); |
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h index d487e727f1ec..c88c31ecad12 100644 --- a/arch/x86/boot/compressed/eboot.h +++ b/arch/x86/boot/compressed/eboot.h | |||
| @@ -103,4 +103,20 @@ struct efi_uga_draw_protocol { | |||
| 103 | void *blt; | 103 | void *blt; |
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | struct efi_config { | ||
| 107 | u64 image_handle; | ||
| 108 | u64 table; | ||
| 109 | u64 allocate_pool; | ||
| 110 | u64 allocate_pages; | ||
| 111 | u64 get_memory_map; | ||
| 112 | u64 free_pool; | ||
| 113 | u64 free_pages; | ||
| 114 | u64 locate_handle; | ||
| 115 | u64 handle_protocol; | ||
| 116 | u64 exit_boot_services; | ||
| 117 | u64 text_output; | ||
| 118 | efi_status_t (*call)(unsigned long, ...); | ||
| 119 | bool is64; | ||
| 120 | } __packed; | ||
| 121 | |||
| 106 | #endif /* BOOT_COMPRESSED_EBOOT_H */ | 122 | #endif /* BOOT_COMPRESSED_EBOOT_H */ |
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index d6b8aa4c986c..cbed1407a5cd 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
| @@ -30,33 +30,6 @@ | |||
| 30 | #include <asm/boot.h> | 30 | #include <asm/boot.h> |
| 31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
| 32 | 32 | ||
| 33 | /* | ||
| 34 | * Adjust our own GOT | ||
| 35 | * | ||
| 36 | * The relocation base must be in %ebx | ||
| 37 | * | ||
| 38 | * It is safe to call this macro more than once, because in some of the | ||
| 39 | * code paths multiple invocations are inevitable, e.g. via the efi* | ||
| 40 | * entry points. | ||
| 41 | * | ||
| 42 | * Relocation is only performed the first time. | ||
| 43 | */ | ||
| 44 | .macro FIXUP_GOT | ||
| 45 | cmpb $1, got_fixed(%ebx) | ||
| 46 | je 2f | ||
| 47 | |||
| 48 | leal _got(%ebx), %edx | ||
| 49 | leal _egot(%ebx), %ecx | ||
| 50 | 1: | ||
| 51 | cmpl %ecx, %edx | ||
| 52 | jae 2f | ||
| 53 | addl %ebx, (%edx) | ||
| 54 | addl $4, %edx | ||
| 55 | jmp 1b | ||
| 56 | 2: | ||
| 57 | movb $1, got_fixed(%ebx) | ||
| 58 | .endm | ||
| 59 | |||
| 60 | __HEAD | 33 | __HEAD |
| 61 | ENTRY(startup_32) | 34 | ENTRY(startup_32) |
| 62 | #ifdef CONFIG_EFI_STUB | 35 | #ifdef CONFIG_EFI_STUB |
| @@ -83,9 +56,6 @@ ENTRY(efi_pe_entry) | |||
| 83 | add %esi, 88(%eax) | 56 | add %esi, 88(%eax) |
| 84 | pushl %eax | 57 | pushl %eax |
| 85 | 58 | ||
| 86 | movl %esi, %ebx | ||
| 87 | FIXUP_GOT | ||
| 88 | |||
| 89 | call make_boot_params | 59 | call make_boot_params |
| 90 | cmpl $0, %eax | 60 | cmpl $0, %eax |
| 91 | je fail | 61 | je fail |
| @@ -111,10 +81,6 @@ ENTRY(efi32_stub_entry) | |||
| 111 | leal efi32_config(%esi), %eax | 81 | leal efi32_config(%esi), %eax |
| 112 | add %esi, 88(%eax) | 82 | add %esi, 88(%eax) |
| 113 | pushl %eax | 83 | pushl %eax |
| 114 | |||
| 115 | movl %esi, %ebx | ||
| 116 | FIXUP_GOT | ||
| 117 | |||
| 118 | 2: | 84 | 2: |
| 119 | call efi_main | 85 | call efi_main |
| 120 | cmpl $0, %eax | 86 | cmpl $0, %eax |
| @@ -224,7 +190,19 @@ relocated: | |||
| 224 | shrl $2, %ecx | 190 | shrl $2, %ecx |
| 225 | rep stosl | 191 | rep stosl |
| 226 | 192 | ||
| 227 | FIXUP_GOT | 193 | /* |
| 194 | * Adjust our own GOT | ||
| 195 | */ | ||
| 196 | leal _got(%ebx), %edx | ||
| 197 | leal _egot(%ebx), %ecx | ||
| 198 | 1: | ||
| 199 | cmpl %ecx, %edx | ||
| 200 | jae 2f | ||
| 201 | addl %ebx, (%edx) | ||
| 202 | addl $4, %edx | ||
| 203 | jmp 1b | ||
| 204 | 2: | ||
| 205 | |||
| 228 | /* | 206 | /* |
| 229 | * Do the decompression, and jump to the new kernel.. | 207 | * Do the decompression, and jump to the new kernel.. |
| 230 | */ | 208 | */ |
| @@ -247,12 +225,8 @@ relocated: | |||
| 247 | xorl %ebx, %ebx | 225 | xorl %ebx, %ebx |
| 248 | jmp *%eax | 226 | jmp *%eax |
| 249 | 227 | ||
| 250 | .data | ||
| 251 | /* Have we relocated the GOT? */ | ||
| 252 | got_fixed: | ||
| 253 | .byte 0 | ||
| 254 | |||
| 255 | #ifdef CONFIG_EFI_STUB | 228 | #ifdef CONFIG_EFI_STUB |
| 229 | .data | ||
| 256 | efi32_config: | 230 | efi32_config: |
| 257 | .fill 11,8,0 | 231 | .fill 11,8,0 |
| 258 | .long efi_call_phys | 232 | .long efi_call_phys |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 50f69c7eaaf4..2884e0c3e8a5 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
| @@ -32,33 +32,6 @@ | |||
| 32 | #include <asm/processor-flags.h> | 32 | #include <asm/processor-flags.h> |
| 33 | #include <asm/asm-offsets.h> | 33 | #include <asm/asm-offsets.h> |
| 34 | 34 | ||
| 35 | /* | ||
| 36 | * Adjust our own GOT | ||
| 37 | * | ||
| 38 | * The relocation base must be in %rbx | ||
| 39 | * | ||
| 40 | * It is safe to call this macro more than once, because in some of the | ||
| 41 | * code paths multiple invocations are inevitable, e.g. via the efi* | ||
| 42 | * entry points. | ||
| 43 | * | ||
| 44 | * Relocation is only performed the first time. | ||
| 45 | */ | ||
| 46 | .macro FIXUP_GOT | ||
| 47 | cmpb $1, got_fixed(%rip) | ||
| 48 | je 2f | ||
| 49 | |||
| 50 | leaq _got(%rip), %rdx | ||
| 51 | leaq _egot(%rip), %rcx | ||
| 52 | 1: | ||
| 53 | cmpq %rcx, %rdx | ||
| 54 | jae 2f | ||
| 55 | addq %rbx, (%rdx) | ||
| 56 | addq $8, %rdx | ||
| 57 | jmp 1b | ||
| 58 | 2: | ||
| 59 | movb $1, got_fixed(%rip) | ||
| 60 | .endm | ||
| 61 | |||
| 62 | __HEAD | 35 | __HEAD |
| 63 | .code32 | 36 | .code32 |
| 64 | ENTRY(startup_32) | 37 | ENTRY(startup_32) |
| @@ -279,13 +252,10 @@ ENTRY(efi_pe_entry) | |||
| 279 | subq $1b, %rbp | 252 | subq $1b, %rbp |
| 280 | 253 | ||
| 281 | /* | 254 | /* |
| 282 | * Relocate efi_config->call() and the GOT entries. | 255 | * Relocate efi_config->call(). |
| 283 | */ | 256 | */ |
| 284 | addq %rbp, efi64_config+88(%rip) | 257 | addq %rbp, efi64_config+88(%rip) |
| 285 | 258 | ||
| 286 | movq %rbp, %rbx | ||
| 287 | FIXUP_GOT | ||
| 288 | |||
| 289 | movq %rax, %rdi | 259 | movq %rax, %rdi |
| 290 | call make_boot_params | 260 | call make_boot_params |
| 291 | cmpq $0,%rax | 261 | cmpq $0,%rax |
| @@ -301,13 +271,10 @@ handover_entry: | |||
| 301 | subq $1b, %rbp | 271 | subq $1b, %rbp |
| 302 | 272 | ||
| 303 | /* | 273 | /* |
| 304 | * Relocate efi_config->call() and the GOT entries. | 274 | * Relocate efi_config->call(). |
| 305 | */ | 275 | */ |
| 306 | movq efi_config(%rip), %rax | 276 | movq efi_config(%rip), %rax |
| 307 | addq %rbp, 88(%rax) | 277 | addq %rbp, 88(%rax) |
| 308 | |||
| 309 | movq %rbp, %rbx | ||
| 310 | FIXUP_GOT | ||
| 311 | 2: | 278 | 2: |
| 312 | movq efi_config(%rip), %rdi | 279 | movq efi_config(%rip), %rdi |
| 313 | call efi_main | 280 | call efi_main |
| @@ -418,8 +385,19 @@ relocated: | |||
| 418 | shrq $3, %rcx | 385 | shrq $3, %rcx |
| 419 | rep stosq | 386 | rep stosq |
| 420 | 387 | ||
| 421 | FIXUP_GOT | 388 | /* |
| 422 | 389 | * Adjust our own GOT | |
| 390 | */ | ||
| 391 | leaq _got(%rip), %rdx | ||
| 392 | leaq _egot(%rip), %rcx | ||
| 393 | 1: | ||
| 394 | cmpq %rcx, %rdx | ||
| 395 | jae 2f | ||
| 396 | addq %rbx, (%rdx) | ||
| 397 | addq $8, %rdx | ||
| 398 | jmp 1b | ||
| 399 | 2: | ||
| 400 | |||
| 423 | /* | 401 | /* |
| 424 | * Do the decompression, and jump to the new kernel.. | 402 | * Do the decompression, and jump to the new kernel.. |
| 425 | */ | 403 | */ |
| @@ -459,10 +437,6 @@ gdt: | |||
| 459 | .quad 0x0000000000000000 /* TS continued */ | 437 | .quad 0x0000000000000000 /* TS continued */ |
| 460 | gdt_end: | 438 | gdt_end: |
| 461 | 439 | ||
| 462 | /* Have we relocated the GOT? */ | ||
| 463 | got_fixed: | ||
| 464 | .byte 0 | ||
| 465 | |||
| 466 | #ifdef CONFIG_EFI_STUB | 440 | #ifdef CONFIG_EFI_STUB |
| 467 | efi_config: | 441 | efi_config: |
| 468 | .quad 0 | 442 | .quad 0 |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 888950f29fd9..a7ccd57f19e4 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -481,7 +481,7 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, | |||
| 481 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | 481 | crypto_inc(ctrblk, AES_BLOCK_SIZE); |
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | #ifdef CONFIG_AS_AVX | 484 | #if 0 /* temporary disabled due to failing crypto tests */ |
| 485 | static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, | 485 | static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, |
| 486 | const u8 *in, unsigned int len, u8 *iv) | 486 | const u8 *in, unsigned int len, u8 *iv) |
| 487 | { | 487 | { |
| @@ -1522,7 +1522,7 @@ static int __init aesni_init(void) | |||
| 1522 | aesni_gcm_dec_tfm = aesni_gcm_dec; | 1522 | aesni_gcm_dec_tfm = aesni_gcm_dec; |
| 1523 | } | 1523 | } |
| 1524 | aesni_ctr_enc_tfm = aesni_ctr_enc; | 1524 | aesni_ctr_enc_tfm = aesni_ctr_enc; |
| 1525 | #ifdef CONFIG_AS_AVX | 1525 | #if 0 /* temporary disabled due to failing crypto tests */ |
| 1526 | if (cpu_has_avx) { | 1526 | if (cpu_has_avx) { |
| 1527 | /* optimize performance of ctr mode encryption transform */ | 1527 | /* optimize performance of ctr mode encryption transform */ |
| 1528 | aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; | 1528 | aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 044a2fd3c5fe..0ec241ede5a2 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
| @@ -159,30 +159,6 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( | |||
| 159 | } | 159 | } |
| 160 | #endif /* CONFIG_EFI_MIXED */ | 160 | #endif /* CONFIG_EFI_MIXED */ |
| 161 | 161 | ||
| 162 | |||
| 163 | /* arch specific definitions used by the stub code */ | ||
| 164 | |||
| 165 | struct efi_config { | ||
| 166 | u64 image_handle; | ||
| 167 | u64 table; | ||
| 168 | u64 allocate_pool; | ||
| 169 | u64 allocate_pages; | ||
| 170 | u64 get_memory_map; | ||
| 171 | u64 free_pool; | ||
| 172 | u64 free_pages; | ||
| 173 | u64 locate_handle; | ||
| 174 | u64 handle_protocol; | ||
| 175 | u64 exit_boot_services; | ||
| 176 | u64 text_output; | ||
| 177 | efi_status_t (*call)(unsigned long, ...); | ||
| 178 | bool is64; | ||
| 179 | } __packed; | ||
| 180 | |||
| 181 | extern struct efi_config *efi_early; | ||
| 182 | |||
| 183 | #define efi_call_early(f, ...) \ | ||
| 184 | efi_early->call(efi_early->f, __VA_ARGS__); | ||
| 185 | |||
| 186 | extern bool efi_reboot_required(void); | 162 | extern bool efi_reboot_required(void); |
| 187 | 163 | ||
| 188 | #else | 164 | #else |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b0910f97a3ea..ffb1733ac91f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
| @@ -106,14 +106,14 @@ enum fixed_addresses { | |||
| 106 | __end_of_permanent_fixed_addresses, | 106 | __end_of_permanent_fixed_addresses, |
| 107 | 107 | ||
| 108 | /* | 108 | /* |
| 109 | * 256 temporary boot-time mappings, used by early_ioremap(), | 109 | * 512 temporary boot-time mappings, used by early_ioremap(), |
| 110 | * before ioremap() is functional. | 110 | * before ioremap() is functional. |
| 111 | * | 111 | * |
| 112 | * If necessary we round it up to the next 256 pages boundary so | 112 | * If necessary we round it up to the next 512 pages boundary so |
| 113 | * that we can have a single pgd entry and a single pte table: | 113 | * that we can have a single pgd entry and a single pte table: |
| 114 | */ | 114 | */ |
| 115 | #define NR_FIX_BTMAPS 64 | 115 | #define NR_FIX_BTMAPS 64 |
| 116 | #define FIX_BTMAPS_SLOTS 4 | 116 | #define FIX_BTMAPS_SLOTS 8 |
| 117 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) | 117 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
| 118 | FIX_BTMAP_END = | 118 | FIX_BTMAP_END = |
| 119 | (__end_of_permanent_fixed_addresses ^ | 119 | (__end_of_permanent_fixed_addresses ^ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2d872e08fab9..42a2dca984b3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1284,6 +1284,9 @@ static void remove_siblinginfo(int cpu) | |||
| 1284 | 1284 | ||
| 1285 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) | 1285 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) |
| 1286 | cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); | 1286 | cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); |
| 1287 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) | ||
| 1288 | cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); | ||
| 1289 | cpumask_clear(cpu_llc_shared_mask(cpu)); | ||
| 1287 | cpumask_clear(cpu_sibling_mask(cpu)); | 1290 | cpumask_clear(cpu_sibling_mask(cpu)); |
| 1288 | cpumask_clear(cpu_core_mask(cpu)); | 1291 | cpumask_clear(cpu_core_mask(cpu)); |
| 1289 | c->phys_proc_id = 0; | 1292 | c->phys_proc_id = 0; |
diff --git a/block/blk-exec.c b/block/blk-exec.c index f4d27b12c90b..9924725fa50d 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
| @@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
| 56 | bool is_pm_resume; | 56 | bool is_pm_resume; |
| 57 | 57 | ||
| 58 | WARN_ON(irqs_disabled()); | 58 | WARN_ON(irqs_disabled()); |
| 59 | WARN_ON(rq->cmd_type == REQ_TYPE_FS); | ||
| 59 | 60 | ||
| 60 | rq->rq_disk = bd_disk; | 61 | rq->rq_disk = bd_disk; |
| 61 | rq->end_io = done; | 62 | rq->end_io = done; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 383ea0cb1f0a..df8e1e09dd17 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -119,7 +119,16 @@ void blk_mq_freeze_queue(struct request_queue *q) | |||
| 119 | spin_unlock_irq(q->queue_lock); | 119 | spin_unlock_irq(q->queue_lock); |
| 120 | 120 | ||
| 121 | if (freeze) { | 121 | if (freeze) { |
| 122 | percpu_ref_kill(&q->mq_usage_counter); | 122 | /* |
| 123 | * XXX: Temporary kludge to work around SCSI blk-mq stall. | ||
| 124 | * SCSI synchronously creates and destroys many queues | ||
| 125 | * back-to-back during probe leading to lengthy stalls. | ||
| 126 | * This will be fixed by keeping ->mq_usage_counter in | ||
| 127 | * atomic mode until genhd registration, but, for now, | ||
| 128 | * let's work around using expedited synchronization. | ||
| 129 | */ | ||
| 130 | __percpu_ref_kill_expedited(&q->mq_usage_counter); | ||
| 131 | |||
| 123 | blk_mq_run_queues(q, false); | 132 | blk_mq_run_queues(q, false); |
| 124 | } | 133 | } |
| 125 | wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); | 134 | wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); |
| @@ -203,7 +212,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | |||
| 203 | if (tag != BLK_MQ_TAG_FAIL) { | 212 | if (tag != BLK_MQ_TAG_FAIL) { |
| 204 | rq = data->hctx->tags->rqs[tag]; | 213 | rq = data->hctx->tags->rqs[tag]; |
| 205 | 214 | ||
| 206 | rq->cmd_flags = 0; | ||
| 207 | if (blk_mq_tag_busy(data->hctx)) { | 215 | if (blk_mq_tag_busy(data->hctx)) { |
| 208 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 216 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
| 209 | atomic_inc(&data->hctx->nr_active); | 217 | atomic_inc(&data->hctx->nr_active); |
| @@ -258,6 +266,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, | |||
| 258 | 266 | ||
| 259 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) | 267 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) |
| 260 | atomic_dec(&hctx->nr_active); | 268 | atomic_dec(&hctx->nr_active); |
| 269 | rq->cmd_flags = 0; | ||
| 261 | 270 | ||
| 262 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 271 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
| 263 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); | 272 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); |
| @@ -393,6 +402,12 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
| 393 | blk_add_timer(rq); | 402 | blk_add_timer(rq); |
| 394 | 403 | ||
| 395 | /* | 404 | /* |
| 405 | * Ensure that ->deadline is visible before set the started | ||
| 406 | * flag and clear the completed flag. | ||
| 407 | */ | ||
| 408 | smp_mb__before_atomic(); | ||
| 409 | |||
| 410 | /* | ||
| 396 | * Mark us as started and clear complete. Complete might have been | 411 | * Mark us as started and clear complete. Complete might have been |
| 397 | * set if requeue raced with timeout, which then marked it as | 412 | * set if requeue raced with timeout, which then marked it as |
| 398 | * complete. So be sure to clear complete again when we start | 413 | * complete. So be sure to clear complete again when we start |
| @@ -473,7 +488,11 @@ static void blk_mq_requeue_work(struct work_struct *work) | |||
| 473 | blk_mq_insert_request(rq, false, false, false); | 488 | blk_mq_insert_request(rq, false, false, false); |
| 474 | } | 489 | } |
| 475 | 490 | ||
| 476 | blk_mq_run_queues(q, false); | 491 | /* |
| 492 | * Use the start variant of queue running here, so that running | ||
| 493 | * the requeue work will kick stopped queues. | ||
| 494 | */ | ||
| 495 | blk_mq_start_hw_queues(q); | ||
| 477 | } | 496 | } |
| 478 | 497 | ||
| 479 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) | 498 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) |
| @@ -957,14 +976,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | |||
| 957 | 976 | ||
| 958 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 977 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
| 959 | 978 | ||
| 960 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && | 979 | spin_lock(&ctx->lock); |
| 961 | !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { | 980 | __blk_mq_insert_request(hctx, rq, at_head); |
| 962 | blk_insert_flush(rq); | 981 | spin_unlock(&ctx->lock); |
| 963 | } else { | ||
| 964 | spin_lock(&ctx->lock); | ||
| 965 | __blk_mq_insert_request(hctx, rq, at_head); | ||
| 966 | spin_unlock(&ctx->lock); | ||
| 967 | } | ||
| 968 | 982 | ||
| 969 | if (run_queue) | 983 | if (run_queue) |
| 970 | blk_mq_run_hw_queue(hctx, async); | 984 | blk_mq_run_hw_queue(hctx, async); |
| @@ -1404,6 +1418,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
| 1404 | left -= to_do * rq_size; | 1418 | left -= to_do * rq_size; |
| 1405 | for (j = 0; j < to_do; j++) { | 1419 | for (j = 0; j < to_do; j++) { |
| 1406 | tags->rqs[i] = p; | 1420 | tags->rqs[i] = p; |
| 1421 | tags->rqs[i]->atomic_flags = 0; | ||
| 1422 | tags->rqs[i]->cmd_flags = 0; | ||
| 1407 | if (set->ops->init_request) { | 1423 | if (set->ops->init_request) { |
| 1408 | if (set->ops->init_request(set->driver_data, | 1424 | if (set->ops->init_request(set->driver_data, |
| 1409 | tags->rqs[i], hctx_idx, i, | 1425 | tags->rqs[i], hctx_idx, i, |
| @@ -1956,7 +1972,6 @@ out_unwind: | |||
| 1956 | while (--i >= 0) | 1972 | while (--i >= 0) |
| 1957 | blk_mq_free_rq_map(set, set->tags[i], i); | 1973 | blk_mq_free_rq_map(set, set->tags[i], i); |
| 1958 | 1974 | ||
| 1959 | set->tags = NULL; | ||
| 1960 | return -ENOMEM; | 1975 | return -ENOMEM; |
| 1961 | } | 1976 | } |
| 1962 | 1977 | ||
diff --git a/block/genhd.c b/block/genhd.c index 09da5e4a8e03..e6723bd4d7a1 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) | |||
| 445 | */ | 445 | */ |
| 446 | void blk_free_devt(dev_t devt) | 446 | void blk_free_devt(dev_t devt) |
| 447 | { | 447 | { |
| 448 | might_sleep(); | ||
| 449 | |||
| 450 | if (devt == MKDEV(0, 0)) | 448 | if (devt == MKDEV(0, 0)) |
| 451 | return; | 449 | return; |
| 452 | 450 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index fddc1e86f9d0..b0ea767c8696 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -419,7 +419,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
| 419 | adev->driver_data = pdata; | 419 | adev->driver_data = pdata; |
| 420 | pdev = acpi_create_platform_device(adev); | 420 | pdev = acpi_create_platform_device(adev); |
| 421 | if (!IS_ERR_OR_NULL(pdev)) { | 421 | if (!IS_ERR_OR_NULL(pdev)) { |
| 422 | device_enable_async_suspend(&pdev->dev); | ||
| 423 | return 1; | 422 | return 1; |
| 424 | } | 423 | } |
| 425 | 424 | ||
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1f9aba5fb81f..2747279fbe3c 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
| @@ -254,6 +254,7 @@ struct acpi_create_field_info { | |||
| 254 | u32 field_bit_position; | 254 | u32 field_bit_position; |
| 255 | u32 field_bit_length; | 255 | u32 field_bit_length; |
| 256 | u16 resource_length; | 256 | u16 resource_length; |
| 257 | u16 pin_number_index; | ||
| 257 | u8 field_flags; | 258 | u8 field_flags; |
| 258 | u8 attribute; | 259 | u8 attribute; |
| 259 | u8 field_type; | 260 | u8 field_type; |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 22fb6449d3d6..8abb393dafab 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
| @@ -264,6 +264,7 @@ struct acpi_object_region_field { | |||
| 264 | ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; | 264 | ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; |
| 265 | union acpi_operand_object *region_obj; /* Containing op_region object */ | 265 | union acpi_operand_object *region_obj; /* Containing op_region object */ |
| 266 | u8 *resource_buffer; /* resource_template for serial regions/fields */ | 266 | u8 *resource_buffer; /* resource_template for serial regions/fields */ |
| 267 | u16 pin_number_index; /* Index relative to previous Connection/Template */ | ||
| 267 | }; | 268 | }; |
| 268 | 269 | ||
| 269 | struct acpi_object_bank_field { | 270 | struct acpi_object_bank_field { |
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 3661c8e90540..c57666196672 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
| @@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, | |||
| 360 | */ | 360 | */ |
| 361 | info->resource_buffer = NULL; | 361 | info->resource_buffer = NULL; |
| 362 | info->connection_node = NULL; | 362 | info->connection_node = NULL; |
| 363 | info->pin_number_index = 0; | ||
| 363 | 364 | ||
| 364 | /* | 365 | /* |
| 365 | * A Connection() is either an actual resource descriptor (buffer) | 366 | * A Connection() is either an actual resource descriptor (buffer) |
| @@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, | |||
| 437 | } | 438 | } |
| 438 | 439 | ||
| 439 | info->field_bit_position += info->field_bit_length; | 440 | info->field_bit_position += info->field_bit_length; |
| 441 | info->pin_number_index++; /* Index relative to previous Connection() */ | ||
| 440 | break; | 442 | break; |
| 441 | 443 | ||
| 442 | default: | 444 | default: |
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 9957297d1580..8eb8575e8c16 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c | |||
| @@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
| 142 | union acpi_operand_object *region_obj2; | 142 | union acpi_operand_object *region_obj2; |
| 143 | void *region_context = NULL; | 143 | void *region_context = NULL; |
| 144 | struct acpi_connection_info *context; | 144 | struct acpi_connection_info *context; |
| 145 | acpi_physical_address address; | ||
| 145 | 146 | ||
| 146 | ACPI_FUNCTION_TRACE(ev_address_space_dispatch); | 147 | ACPI_FUNCTION_TRACE(ev_address_space_dispatch); |
| 147 | 148 | ||
| @@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
| 231 | /* We have everything we need, we can invoke the address space handler */ | 232 | /* We have everything we need, we can invoke the address space handler */ |
| 232 | 233 | ||
| 233 | handler = handler_desc->address_space.handler; | 234 | handler = handler_desc->address_space.handler; |
| 234 | 235 | address = (region_obj->region.address + region_offset); | |
| 235 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
| 236 | "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", | ||
| 237 | ®ion_obj->region.handler->address_space, handler, | ||
| 238 | ACPI_FORMAT_NATIVE_UINT(region_obj->region.address + | ||
| 239 | region_offset), | ||
| 240 | acpi_ut_get_region_name(region_obj->region. | ||
| 241 | space_id))); | ||
| 242 | 236 | ||
| 243 | /* | 237 | /* |
| 244 | * Special handling for generic_serial_bus and general_purpose_io: | 238 | * Special handling for generic_serial_bus and general_purpose_io: |
| 245 | * There are three extra parameters that must be passed to the | 239 | * There are three extra parameters that must be passed to the |
| 246 | * handler via the context: | 240 | * handler via the context: |
| 247 | * 1) Connection buffer, a resource template from Connection() op. | 241 | * 1) Connection buffer, a resource template from Connection() op |
| 248 | * 2) Length of the above buffer. | 242 | * 2) Length of the above buffer |
| 249 | * 3) Actual access length from the access_as() op. | 243 | * 3) Actual access length from the access_as() op |
| 244 | * | ||
| 245 | * In addition, for general_purpose_io, the Address and bit_width fields | ||
| 246 | * are defined as follows: | ||
| 247 | * 1) Address is the pin number index of the field (bit offset from | ||
| 248 | * the previous Connection) | ||
| 249 | * 2) bit_width is the actual bit length of the field (number of pins) | ||
| 250 | */ | 250 | */ |
| 251 | if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) || | 251 | if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && |
| 252 | (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) && | ||
| 253 | context && field_obj) { | 252 | context && field_obj) { |
| 254 | 253 | ||
| 255 | /* Get the Connection (resource_template) buffer */ | 254 | /* Get the Connection (resource_template) buffer */ |
| @@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
| 258 | context->length = field_obj->field.resource_length; | 257 | context->length = field_obj->field.resource_length; |
| 259 | context->access_length = field_obj->field.access_length; | 258 | context->access_length = field_obj->field.access_length; |
| 260 | } | 259 | } |
| 260 | if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && | ||
| 261 | context && field_obj) { | ||
| 262 | |||
| 263 | /* Get the Connection (resource_template) buffer */ | ||
| 264 | |||
| 265 | context->connection = field_obj->field.resource_buffer; | ||
| 266 | context->length = field_obj->field.resource_length; | ||
| 267 | context->access_length = field_obj->field.access_length; | ||
| 268 | address = field_obj->field.pin_number_index; | ||
| 269 | bit_width = field_obj->field.bit_length; | ||
| 270 | } | ||
| 271 | |||
| 272 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
| 273 | "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", | ||
| 274 | ®ion_obj->region.handler->address_space, handler, | ||
| 275 | ACPI_FORMAT_NATIVE_UINT(address), | ||
| 276 | acpi_ut_get_region_name(region_obj->region. | ||
| 277 | space_id))); | ||
| 261 | 278 | ||
| 262 | if (!(handler_desc->address_space.handler_flags & | 279 | if (!(handler_desc->address_space.handler_flags & |
| 263 | ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { | 280 | ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { |
| @@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
| 271 | 288 | ||
| 272 | /* Call the handler */ | 289 | /* Call the handler */ |
| 273 | 290 | ||
| 274 | status = handler(function, | 291 | status = handler(function, address, bit_width, value, context, |
| 275 | (region_obj->region.address + region_offset), | ||
| 276 | bit_width, value, context, | ||
| 277 | region_obj2->extra.region_context); | 292 | region_obj2->extra.region_context); |
| 278 | 293 | ||
| 279 | if (ACPI_FAILURE(status)) { | 294 | if (ACPI_FAILURE(status)) { |
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 6907ce0c704c..b994845ed359 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
| @@ -253,6 +253,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, | |||
| 253 | buffer = &buffer_desc->integer.value; | 253 | buffer = &buffer_desc->integer.value; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && | ||
| 257 | (obj_desc->field.region_obj->region.space_id == | ||
| 258 | ACPI_ADR_SPACE_GPIO)) { | ||
| 259 | /* | ||
| 260 | * For GPIO (general_purpose_io), the Address will be the bit offset | ||
| 261 | * from the previous Connection() operator, making it effectively a | ||
| 262 | * pin number index. The bit_length is the length of the field, which | ||
| 263 | * is thus the number of pins. | ||
| 264 | */ | ||
| 265 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
| 266 | "GPIO FieldRead [FROM]: Pin %u Bits %u\n", | ||
| 267 | obj_desc->field.pin_number_index, | ||
| 268 | obj_desc->field.bit_length)); | ||
| 269 | |||
| 270 | /* Lock entire transaction if requested */ | ||
| 271 | |||
| 272 | acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); | ||
| 273 | |||
| 274 | /* Perform the write */ | ||
| 275 | |||
| 276 | status = acpi_ex_access_region(obj_desc, 0, | ||
| 277 | (u64 *)buffer, ACPI_READ); | ||
| 278 | acpi_ex_release_global_lock(obj_desc->common_field.field_flags); | ||
| 279 | if (ACPI_FAILURE(status)) { | ||
| 280 | acpi_ut_remove_reference(buffer_desc); | ||
| 281 | } else { | ||
| 282 | *ret_buffer_desc = buffer_desc; | ||
| 283 | } | ||
| 284 | return_ACPI_STATUS(status); | ||
| 285 | } | ||
| 286 | |||
| 256 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | 287 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, |
| 257 | "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n", | 288 | "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n", |
| 258 | obj_desc, obj_desc->common.type, buffer, | 289 | obj_desc, obj_desc->common.type, buffer, |
| @@ -413,6 +444,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, | |||
| 413 | 444 | ||
| 414 | *result_desc = buffer_desc; | 445 | *result_desc = buffer_desc; |
| 415 | return_ACPI_STATUS(status); | 446 | return_ACPI_STATUS(status); |
| 447 | } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && | ||
| 448 | (obj_desc->field.region_obj->region.space_id == | ||
| 449 | ACPI_ADR_SPACE_GPIO)) { | ||
| 450 | /* | ||
| 451 | * For GPIO (general_purpose_io), we will bypass the entire field | ||
| 452 | * mechanism and handoff the bit address and bit width directly to | ||
| 453 | * the handler. The Address will be the bit offset | ||
| 454 | * from the previous Connection() operator, making it effectively a | ||
| 455 | * pin number index. The bit_length is the length of the field, which | ||
| 456 | * is thus the number of pins. | ||
| 457 | */ | ||
| 458 | if (source_desc->common.type != ACPI_TYPE_INTEGER) { | ||
| 459 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); | ||
| 460 | } | ||
| 461 | |||
| 462 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
| 463 | "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n", | ||
| 464 | acpi_ut_get_type_name(source_desc->common. | ||
| 465 | type), | ||
| 466 | source_desc->common.type, | ||
| 467 | (u32)source_desc->integer.value, | ||
| 468 | obj_desc->field.pin_number_index, | ||
| 469 | obj_desc->field.bit_length)); | ||
| 470 | |||
| 471 | buffer = &source_desc->integer.value; | ||
| 472 | |||
| 473 | /* Lock entire transaction if requested */ | ||
| 474 | |||
| 475 | acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); | ||
| 476 | |||
| 477 | /* Perform the write */ | ||
| 478 | |||
| 479 | status = acpi_ex_access_region(obj_desc, 0, | ||
| 480 | (u64 *)buffer, ACPI_WRITE); | ||
| 481 | acpi_ex_release_global_lock(obj_desc->common_field.field_flags); | ||
| 482 | return_ACPI_STATUS(status); | ||
| 416 | } | 483 | } |
| 417 | 484 | ||
| 418 | /* Get a pointer to the data to be written */ | 485 | /* Get a pointer to the data to be written */ |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index ee3f872870bc..118e942005e5 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
| @@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) | |||
| 484 | obj_desc->field.resource_length = info->resource_length; | 484 | obj_desc->field.resource_length = info->resource_length; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | obj_desc->field.pin_number_index = info->pin_number_index; | ||
| 488 | |||
| 487 | /* Allow full data read from EC address space */ | 489 | /* Allow full data read from EC address space */ |
| 488 | 490 | ||
| 489 | if ((obj_desc->field.region_obj->region.space_id == | 491 | if ((obj_desc->field.region_obj->region.space_id == |
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 76f7cff64594..c8ead9f97375 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c | |||
| @@ -99,6 +99,13 @@ static void container_device_detach(struct acpi_device *adev) | |||
| 99 | device_unregister(dev); | 99 | device_unregister(dev); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static void container_device_online(struct acpi_device *adev) | ||
| 103 | { | ||
| 104 | struct device *dev = acpi_driver_data(adev); | ||
| 105 | |||
| 106 | kobject_uevent(&dev->kobj, KOBJ_ONLINE); | ||
| 107 | } | ||
| 108 | |||
| 102 | static struct acpi_scan_handler container_handler = { | 109 | static struct acpi_scan_handler container_handler = { |
| 103 | .ids = container_device_ids, | 110 | .ids = container_device_ids, |
| 104 | .attach = container_device_attach, | 111 | .attach = container_device_attach, |
| @@ -106,6 +113,7 @@ static struct acpi_scan_handler container_handler = { | |||
| 106 | .hotplug = { | 113 | .hotplug = { |
| 107 | .enabled = true, | 114 | .enabled = true, |
| 108 | .demand_offline = true, | 115 | .demand_offline = true, |
| 116 | .notify_online = container_device_online, | ||
| 109 | }, | 117 | }, |
| 110 | }; | 118 | }; |
| 111 | 119 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3bf7764659a4..ae44d8654c82 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -130,7 +130,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, | |||
| 130 | list_for_each_entry(id, &acpi_dev->pnp.ids, list) { | 130 | list_for_each_entry(id, &acpi_dev->pnp.ids, list) { |
| 131 | count = snprintf(&modalias[len], size, "%s:", id->id); | 131 | count = snprintf(&modalias[len], size, "%s:", id->id); |
| 132 | if (count < 0) | 132 | if (count < 0) |
| 133 | return EINVAL; | 133 | return -EINVAL; |
| 134 | if (count >= size) | 134 | if (count >= size) |
| 135 | return -ENOMEM; | 135 | return -ENOMEM; |
| 136 | len += count; | 136 | len += count; |
| @@ -2189,6 +2189,9 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
| 2189 | ok: | 2189 | ok: |
| 2190 | list_for_each_entry(child, &device->children, node) | 2190 | list_for_each_entry(child, &device->children, node) |
| 2191 | acpi_bus_attach(child); | 2191 | acpi_bus_attach(child); |
| 2192 | |||
| 2193 | if (device->handler && device->handler->hotplug.notify_online) | ||
| 2194 | device->handler->hotplug.notify_online(device); | ||
| 2192 | } | 2195 | } |
| 2193 | 2196 | ||
| 2194 | /** | 2197 | /** |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index fcbda105616e..8e7e18567ae6 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -750,6 +750,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
| 750 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), | 750 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), |
| 751 | }, | 751 | }, |
| 752 | }, | 752 | }, |
| 753 | { | ||
| 754 | .callback = video_disable_native_backlight, | ||
| 755 | .ident = "ThinkPad X201s", | ||
| 756 | .matches = { | ||
| 757 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 758 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), | ||
| 759 | }, | ||
| 760 | }, | ||
| 753 | 761 | ||
| 754 | /* The native backlight controls do not work on some older machines */ | 762 | /* The native backlight controls do not work on some older machines */ |
| 755 | { | 763 | { |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 20da3ad1696b..7b270a2e6ed5 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
| @@ -1211,6 +1211,9 @@ void device_del(struct device *dev) | |||
| 1211 | */ | 1211 | */ |
| 1212 | if (platform_notify_remove) | 1212 | if (platform_notify_remove) |
| 1213 | platform_notify_remove(dev); | 1213 | platform_notify_remove(dev); |
| 1214 | if (dev->bus) | ||
| 1215 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | ||
| 1216 | BUS_NOTIFY_REMOVED_DEVICE, dev); | ||
| 1214 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); | 1217 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); |
| 1215 | cleanup_device_parent(dev); | 1218 | cleanup_device_parent(dev); |
| 1216 | kobject_del(&dev->kobj); | 1219 | kobject_del(&dev->kobj); |
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index 551e01061434..95254585db86 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h | |||
| @@ -188,31 +188,31 @@ static struct l3_flagmux_data omap_l3_flagmux_clk3 = { | |||
| 188 | }; | 188 | }; |
| 189 | 189 | ||
| 190 | static struct l3_masters_data omap_l3_masters[] = { | 190 | static struct l3_masters_data omap_l3_masters[] = { |
| 191 | { 0x0 , "MPU"}, | 191 | { 0x00, "MPU"}, |
| 192 | { 0x10, "CS_ADP"}, | 192 | { 0x04, "CS_ADP"}, |
| 193 | { 0x14, "xxx"}, | 193 | { 0x05, "xxx"}, |
| 194 | { 0x20, "DSP"}, | 194 | { 0x08, "DSP"}, |
| 195 | { 0x30, "IVAHD"}, | 195 | { 0x0C, "IVAHD"}, |
| 196 | { 0x40, "ISS"}, | 196 | { 0x10, "ISS"}, |
| 197 | { 0x44, "DucatiM3"}, | 197 | { 0x11, "DucatiM3"}, |
| 198 | { 0x48, "FaceDetect"}, | 198 | { 0x12, "FaceDetect"}, |
| 199 | { 0x50, "SDMA_Rd"}, | 199 | { 0x14, "SDMA_Rd"}, |
| 200 | { 0x54, "SDMA_Wr"}, | 200 | { 0x15, "SDMA_Wr"}, |
| 201 | { 0x58, "xxx"}, | 201 | { 0x16, "xxx"}, |
| 202 | { 0x5C, "xxx"}, | 202 | { 0x17, "xxx"}, |
| 203 | { 0x60, "SGX"}, | 203 | { 0x18, "SGX"}, |
| 204 | { 0x70, "DSS"}, | 204 | { 0x1C, "DSS"}, |
| 205 | { 0x80, "C2C"}, | 205 | { 0x20, "C2C"}, |
| 206 | { 0x88, "xxx"}, | 206 | { 0x22, "xxx"}, |
| 207 | { 0x8C, "xxx"}, | 207 | { 0x23, "xxx"}, |
| 208 | { 0x90, "HSI"}, | 208 | { 0x24, "HSI"}, |
| 209 | { 0xA0, "MMC1"}, | 209 | { 0x28, "MMC1"}, |
| 210 | { 0xA4, "MMC2"}, | 210 | { 0x29, "MMC2"}, |
| 211 | { 0xA8, "MMC6"}, | 211 | { 0x2A, "MMC6"}, |
| 212 | { 0xB0, "UNIPRO1"}, | 212 | { 0x2C, "UNIPRO1"}, |
| 213 | { 0xC0, "USBHOSTHS"}, | 213 | { 0x30, "USBHOSTHS"}, |
| 214 | { 0xC4, "USBOTGHS"}, | 214 | { 0x31, "USBOTGHS"}, |
| 215 | { 0xC8, "USBHOSTFS"} | 215 | { 0x32, "USBHOSTFS"} |
| 216 | }; | 216 | }; |
| 217 | 217 | ||
| 218 | static struct l3_flagmux_data *omap_l3_flagmux[] = { | 218 | static struct l3_flagmux_data *omap_l3_flagmux[] = { |
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 0300c46ee247..32f7c1b36204 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c | |||
| @@ -447,7 +447,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np, | |||
| 447 | int i; | 447 | int i; |
| 448 | 448 | ||
| 449 | num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); | 449 | num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); |
| 450 | if (num_parents <= 0 || num_parents > 1) | 450 | if (num_parents != 2) |
| 451 | return; | 451 | return; |
| 452 | 452 | ||
| 453 | for (i = 0; i < num_parents; ++i) { | 453 | for (i = 0; i < num_parents; ++i) { |
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index bac2ddf49d02..73a8d0ff530c 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c | |||
| @@ -22,7 +22,7 @@ static struct clk_onecell_data clk_data = { | |||
| 22 | .clk_num = ARRAY_SIZE(clk), | 22 | .clk_num = ARRAY_SIZE(clk), |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | static int __init efm32gg_cmu_init(struct device_node *np) | 25 | static void __init efm32gg_cmu_init(struct device_node *np) |
| 26 | { | 26 | { |
| 27 | int i; | 27 | int i; |
| 28 | void __iomem *base; | 28 | void __iomem *base; |
| @@ -33,7 +33,7 @@ static int __init efm32gg_cmu_init(struct device_node *np) | |||
| 33 | base = of_iomap(np, 0); | 33 | base = of_iomap(np, 0); |
| 34 | if (!base) { | 34 | if (!base) { |
| 35 | pr_warn("Failed to map address range for efm32gg,cmu node\n"); | 35 | pr_warn("Failed to map address range for efm32gg,cmu node\n"); |
| 36 | return -EADDRNOTAVAIL; | 36 | return; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL, | 39 | clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL, |
| @@ -76,6 +76,6 @@ static int __init efm32gg_cmu_init(struct device_node *np) | |||
| 76 | clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0", | 76 | clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0", |
| 77 | "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); | 77 | "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); |
| 78 | 78 | ||
| 79 | return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); | 79 | of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); |
| 80 | } | 80 | } |
| 81 | CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); | 81 | CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b76fa69b44cb..bacc06ff939b 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
| @@ -1467,6 +1467,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even | |||
| 1467 | static void clk_change_rate(struct clk *clk) | 1467 | static void clk_change_rate(struct clk *clk) |
| 1468 | { | 1468 | { |
| 1469 | struct clk *child; | 1469 | struct clk *child; |
| 1470 | struct hlist_node *tmp; | ||
| 1470 | unsigned long old_rate; | 1471 | unsigned long old_rate; |
| 1471 | unsigned long best_parent_rate = 0; | 1472 | unsigned long best_parent_rate = 0; |
| 1472 | bool skip_set_rate = false; | 1473 | bool skip_set_rate = false; |
| @@ -1502,7 +1503,11 @@ static void clk_change_rate(struct clk *clk) | |||
| 1502 | if (clk->notifier_count && old_rate != clk->rate) | 1503 | if (clk->notifier_count && old_rate != clk->rate) |
| 1503 | __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); | 1504 | __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); |
| 1504 | 1505 | ||
| 1505 | hlist_for_each_entry(child, &clk->children, child_node) { | 1506 | /* |
| 1507 | * Use safe iteration, as change_rate can actually swap parents | ||
| 1508 | * for certain clock types. | ||
| 1509 | */ | ||
| 1510 | hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { | ||
| 1506 | /* Skip children who will be reparented to another clock */ | 1511 | /* Skip children who will be reparented to another clock */ |
| 1507 | if (child->new_parent && child->new_parent != clk) | 1512 | if (child->new_parent && child->new_parent != clk) |
| 1508 | continue; | 1513 | continue; |
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 4032e510d9aa..3b83b7dd78c7 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c | |||
| @@ -1095,7 +1095,7 @@ static struct clk_branch prng_clk = { | |||
| 1095 | }; | 1095 | }; |
| 1096 | 1096 | ||
| 1097 | static const struct freq_tbl clk_tbl_sdc[] = { | 1097 | static const struct freq_tbl clk_tbl_sdc[] = { |
| 1098 | { 144000, P_PXO, 5, 18,625 }, | 1098 | { 200000, P_PXO, 2, 2, 125 }, |
| 1099 | { 400000, P_PLL8, 4, 1, 240 }, | 1099 | { 400000, P_PLL8, 4, 1, 240 }, |
| 1100 | { 16000000, P_PLL8, 4, 1, 6 }, | 1100 | { 16000000, P_PLL8, 4, 1, 6 }, |
| 1101 | { 17070000, P_PLL8, 1, 2, 45 }, | 1101 | { 17070000, P_PLL8, 1, 2, 45 }, |
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 0d8c6c59a75e..b22a2d2f21e9 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c | |||
| @@ -545,7 +545,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { | |||
| 545 | GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS), | 545 | GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS), |
| 546 | GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), | 546 | GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), |
| 547 | GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), | 547 | GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), |
| 548 | GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), | 548 | GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), |
| 549 | GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), | 549 | GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), |
| 550 | GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), | 550 | GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), |
| 551 | GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), | 551 | GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), |
| @@ -603,7 +603,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { | |||
| 603 | GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS), | 603 | GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS), |
| 604 | GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS), | 604 | GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS), |
| 605 | GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS), | 605 | GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS), |
| 606 | GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS), | 606 | GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS), |
| 607 | GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS), | 607 | GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS), |
| 608 | GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS), | 608 | GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS), |
| 609 | GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS), | 609 | GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS), |
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 4a65b410e4d5..af29359677da 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c | |||
| @@ -139,9 +139,13 @@ static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 139 | static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, | 139 | static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, |
| 140 | unsigned long parent_rate) | 140 | unsigned long parent_rate) |
| 141 | { | 141 | { |
| 142 | struct dra7_atl_desc *cdesc = to_atl_desc(hw); | 142 | struct dra7_atl_desc *cdesc; |
| 143 | u32 divider; | 143 | u32 divider; |
| 144 | 144 | ||
| 145 | if (!hw || !rate) | ||
| 146 | return -EINVAL; | ||
| 147 | |||
| 148 | cdesc = to_atl_desc(hw); | ||
| 145 | divider = ((parent_rate + rate / 2) / rate) - 1; | 149 | divider = ((parent_rate + rate / 2) / rate) - 1; |
| 146 | if (divider > DRA7_ATL_DIVIDER_MASK) | 150 | if (divider > DRA7_ATL_DIVIDER_MASK) |
| 147 | divider = DRA7_ATL_DIVIDER_MASK; | 151 | divider = DRA7_ATL_DIVIDER_MASK; |
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index e6aa10db7bba..a837f703be65 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c | |||
| @@ -211,11 +211,16 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 211 | static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, | 211 | static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, |
| 212 | unsigned long parent_rate) | 212 | unsigned long parent_rate) |
| 213 | { | 213 | { |
| 214 | struct clk_divider *divider = to_clk_divider(hw); | 214 | struct clk_divider *divider; |
| 215 | unsigned int div, value; | 215 | unsigned int div, value; |
| 216 | unsigned long flags = 0; | 216 | unsigned long flags = 0; |
| 217 | u32 val; | 217 | u32 val; |
| 218 | 218 | ||
| 219 | if (!hw || !rate) | ||
| 220 | return -EINVAL; | ||
| 221 | |||
| 222 | divider = to_clk_divider(hw); | ||
| 223 | |||
| 219 | div = DIV_ROUND_UP(parent_rate, rate); | 224 | div = DIV_ROUND_UP(parent_rate, rate); |
| 220 | value = _get_val(divider, div); | 225 | value = _get_val(divider, div); |
| 221 | 226 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d9fdeddcef96..6e93e7f98358 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1289,6 +1289,8 @@ err_get_freq: | |||
| 1289 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1289 | per_cpu(cpufreq_cpu_data, j) = NULL; |
| 1290 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1290 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
| 1291 | 1291 | ||
| 1292 | up_write(&policy->rwsem); | ||
| 1293 | |||
| 1292 | if (cpufreq_driver->exit) | 1294 | if (cpufreq_driver->exit) |
| 1293 | cpufreq_driver->exit(policy); | 1295 | cpufreq_driver->exit(policy); |
| 1294 | err_set_policy_cpu: | 1296 | err_set_policy_cpu: |
| @@ -1656,6 +1658,8 @@ void cpufreq_suspend(void) | |||
| 1656 | if (!cpufreq_driver) | 1658 | if (!cpufreq_driver) |
| 1657 | return; | 1659 | return; |
| 1658 | 1660 | ||
| 1661 | cpufreq_suspended = true; | ||
| 1662 | |||
| 1659 | if (!has_target()) | 1663 | if (!has_target()) |
| 1660 | return; | 1664 | return; |
| 1661 | 1665 | ||
| @@ -1670,8 +1674,6 @@ void cpufreq_suspend(void) | |||
| 1670 | pr_err("%s: Failed to suspend driver: %p\n", __func__, | 1674 | pr_err("%s: Failed to suspend driver: %p\n", __func__, |
| 1671 | policy); | 1675 | policy); |
| 1672 | } | 1676 | } |
| 1673 | |||
| 1674 | cpufreq_suspended = true; | ||
| 1675 | } | 1677 | } |
| 1676 | 1678 | ||
| 1677 | /** | 1679 | /** |
| @@ -1687,13 +1689,13 @@ void cpufreq_resume(void) | |||
| 1687 | if (!cpufreq_driver) | 1689 | if (!cpufreq_driver) |
| 1688 | return; | 1690 | return; |
| 1689 | 1691 | ||
| 1692 | cpufreq_suspended = false; | ||
| 1693 | |||
| 1690 | if (!has_target()) | 1694 | if (!has_target()) |
| 1691 | return; | 1695 | return; |
| 1692 | 1696 | ||
| 1693 | pr_debug("%s: Resuming Governors\n", __func__); | 1697 | pr_debug("%s: Resuming Governors\n", __func__); |
| 1694 | 1698 | ||
| 1695 | cpufreq_suspended = false; | ||
| 1696 | |||
| 1697 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { | 1699 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { |
| 1698 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) | 1700 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) |
| 1699 | pr_err("%s: Failed to resume driver: %p\n", __func__, | 1701 | pr_err("%s: Failed to resume driver: %p\n", __func__, |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 20dc848481e7..4d4e016d755b 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c | |||
| @@ -367,6 +367,10 @@ static int ccp_crypto_init(void) | |||
| 367 | { | 367 | { |
| 368 | int ret; | 368 | int ret; |
| 369 | 369 | ||
| 370 | ret = ccp_present(); | ||
| 371 | if (ret) | ||
| 372 | return ret; | ||
| 373 | |||
| 370 | spin_lock_init(&req_queue_lock); | 374 | spin_lock_init(&req_queue_lock); |
| 371 | INIT_LIST_HEAD(&req_queue.cmds); | 375 | INIT_LIST_HEAD(&req_queue.cmds); |
| 372 | req_queue.backlog = &req_queue.cmds; | 376 | req_queue.backlog = &req_queue.cmds; |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index a7d110652a74..c6e6171eb6d3 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
| @@ -55,6 +55,20 @@ static inline void ccp_del_device(struct ccp_device *ccp) | |||
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | /** | 57 | /** |
| 58 | * ccp_present - check if a CCP device is present | ||
| 59 | * | ||
| 60 | * Returns zero if a CCP device is present, -ENODEV otherwise. | ||
| 61 | */ | ||
| 62 | int ccp_present(void) | ||
| 63 | { | ||
| 64 | if (ccp_get_device()) | ||
| 65 | return 0; | ||
| 66 | |||
| 67 | return -ENODEV; | ||
| 68 | } | ||
| 69 | EXPORT_SYMBOL_GPL(ccp_present); | ||
| 70 | |||
| 71 | /** | ||
| 58 | * ccp_enqueue_cmd - queue an operation for processing by the CCP | 72 | * ccp_enqueue_cmd - queue an operation for processing by the CCP |
| 59 | * | 73 | * |
| 60 | * @cmd: ccp_cmd struct to be processed | 74 | * @cmd: ccp_cmd struct to be processed |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index b707f292b377..65dd1ff93d3b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | |||
| @@ -66,7 +66,7 @@ | |||
| 66 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 | 66 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 |
| 67 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | 67 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) |
| 68 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | 68 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) |
| 69 | #define ADF_DH895XCC_SMIA0_MASK 0xFFFF | 69 | #define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF |
| 70 | #define ADF_DH895XCC_SMIA1_MASK 0x1 | 70 | #define ADF_DH895XCC_SMIA1_MASK 0x1 |
| 71 | /* Error detection and correction */ | 71 | /* Error detection and correction */ |
| 72 | #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) | 72 | #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 4cf7d9a950d7..bbea8243f9e8 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
| @@ -1017,6 +1017,11 @@ static int omap_dma_resume(struct omap_chan *c) | |||
| 1017 | return -EINVAL; | 1017 | return -EINVAL; |
| 1018 | 1018 | ||
| 1019 | if (c->paused) { | 1019 | if (c->paused) { |
| 1020 | mb(); | ||
| 1021 | |||
| 1022 | /* Restore channel link register */ | ||
| 1023 | omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); | ||
| 1024 | |||
| 1020 | omap_dma_start(c, c->desc); | 1025 | omap_dma_start(c, c->desc); |
| 1021 | c->paused = false; | 1026 | c->paused = false; |
| 1022 | } | 1027 | } |
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608a9f3b..aef6a95adef5 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile | |||
| @@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o | |||
| 7 | obj-$(CONFIG_UEFI_CPER) += cper.o | 7 | obj-$(CONFIG_UEFI_CPER) += cper.o |
| 8 | obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o | 8 | obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o |
| 9 | obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o | 9 | obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o |
| 10 | obj-$(CONFIG_EFI_STUB) += libstub/ | 10 | obj-$(CONFIG_EFI_ARM_STUB) += libstub/ |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d62eaaa75397..687476fb39e3 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -377,8 +377,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
| 377 | struct gpio_chip *chip = achip->chip; | 377 | struct gpio_chip *chip = achip->chip; |
| 378 | struct acpi_resource_gpio *agpio; | 378 | struct acpi_resource_gpio *agpio; |
| 379 | struct acpi_resource *ares; | 379 | struct acpi_resource *ares; |
| 380 | int pin_index = (int)address; | ||
| 380 | acpi_status status; | 381 | acpi_status status; |
| 381 | bool pull_up; | 382 | bool pull_up; |
| 383 | int length; | ||
| 382 | int i; | 384 | int i; |
| 383 | 385 | ||
| 384 | status = acpi_buffer_to_resource(achip->conn_info.connection, | 386 | status = acpi_buffer_to_resource(achip->conn_info.connection, |
| @@ -400,7 +402,8 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
| 400 | return AE_BAD_PARAMETER; | 402 | return AE_BAD_PARAMETER; |
| 401 | } | 403 | } |
| 402 | 404 | ||
| 403 | for (i = 0; i < agpio->pin_table_length; i++) { | 405 | length = min(agpio->pin_table_length, (u16)(pin_index + bits)); |
| 406 | for (i = pin_index; i < length; ++i) { | ||
| 404 | unsigned pin = agpio->pin_table[i]; | 407 | unsigned pin = agpio->pin_table[i]; |
| 405 | struct acpi_gpio_connection *conn; | 408 | struct acpi_gpio_connection *conn; |
| 406 | struct gpio_desc *desc; | 409 | struct gpio_desc *desc; |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 15cc0bb65dda..c68d037de656 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -413,12 +413,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, | |||
| 413 | return; | 413 | return; |
| 414 | } | 414 | } |
| 415 | 415 | ||
| 416 | irq_set_chained_handler(parent_irq, parent_handler); | ||
| 417 | /* | 416 | /* |
| 418 | * The parent irqchip is already using the chip_data for this | 417 | * The parent irqchip is already using the chip_data for this |
| 419 | * irqchip, so our callbacks simply use the handler_data. | 418 | * irqchip, so our callbacks simply use the handler_data. |
| 420 | */ | 419 | */ |
| 421 | irq_set_handler_data(parent_irq, gpiochip); | 420 | irq_set_handler_data(parent_irq, gpiochip); |
| 421 | irq_set_chained_handler(parent_irq, parent_handler); | ||
| 422 | } | 422 | } |
| 423 | EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); | 423 | EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); |
| 424 | 424 | ||
| @@ -1674,7 +1674,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, | |||
| 1674 | set_bit(FLAG_OPEN_SOURCE, &desc->flags); | 1674 | set_bit(FLAG_OPEN_SOURCE, &desc->flags); |
| 1675 | 1675 | ||
| 1676 | /* No particular flag request, return here... */ | 1676 | /* No particular flag request, return here... */ |
| 1677 | if (flags & GPIOD_FLAGS_BIT_DIR_SET) | 1677 | if (!(flags & GPIOD_FLAGS_BIT_DIR_SET)) |
| 1678 | return desc; | 1678 | return desc; |
| 1679 | 1679 | ||
| 1680 | /* Process flags */ | 1680 | /* Process flags */ |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index dea99d92fb4a..4b7ed5289217 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
| @@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) | |||
| 709 | BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); | 709 | BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); |
| 710 | BUG_ON(!validate_regs_sorted(ring)); | 710 | BUG_ON(!validate_regs_sorted(ring)); |
| 711 | 711 | ||
| 712 | ret = init_hash_table(ring, cmd_tables, cmd_table_count); | 712 | if (hash_empty(ring->cmd_hash)) { |
| 713 | if (ret) { | 713 | ret = init_hash_table(ring, cmd_tables, cmd_table_count); |
| 714 | DRM_ERROR("CMD: cmd_parser_init failed!\n"); | 714 | if (ret) { |
| 715 | fini_hash_table(ring); | 715 | DRM_ERROR("CMD: cmd_parser_init failed!\n"); |
| 716 | return ret; | 716 | fini_hash_table(ring); |
| 717 | return ret; | ||
| 718 | } | ||
| 717 | } | 719 | } |
| 718 | 720 | ||
| 719 | ring->needs_cmd_parser = true; | 721 | ring->needs_cmd_parser = true; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ca34de7f6a7b..5a9de21637b7 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -732,7 +732,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, | |||
| 732 | if (tmp & HDMI_MODE_SELECT_HDMI) | 732 | if (tmp & HDMI_MODE_SELECT_HDMI) |
| 733 | pipe_config->has_hdmi_sink = true; | 733 | pipe_config->has_hdmi_sink = true; |
| 734 | 734 | ||
| 735 | if (tmp & HDMI_MODE_SELECT_HDMI) | 735 | if (tmp & SDVO_AUDIO_ENABLE) |
| 736 | pipe_config->has_audio = true; | 736 | pipe_config->has_audio = true; |
| 737 | 737 | ||
| 738 | if (!HAS_PCH_SPLIT(dev) && | 738 | if (!HAS_PCH_SPLIT(dev) && |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index fa9565957f9d..3d546c606b43 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -4803,7 +4803,7 @@ struct bonaire_mqd | |||
| 4803 | */ | 4803 | */ |
| 4804 | static int cik_cp_compute_resume(struct radeon_device *rdev) | 4804 | static int cik_cp_compute_resume(struct radeon_device *rdev) |
| 4805 | { | 4805 | { |
| 4806 | int r, i, idx; | 4806 | int r, i, j, idx; |
| 4807 | u32 tmp; | 4807 | u32 tmp; |
| 4808 | bool use_doorbell = true; | 4808 | bool use_doorbell = true; |
| 4809 | u64 hqd_gpu_addr; | 4809 | u64 hqd_gpu_addr; |
| @@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 4922 | mqd->queue_state.cp_hqd_pq_wptr= 0; | 4922 | mqd->queue_state.cp_hqd_pq_wptr= 0; |
| 4923 | if (RREG32(CP_HQD_ACTIVE) & 1) { | 4923 | if (RREG32(CP_HQD_ACTIVE) & 1) { |
| 4924 | WREG32(CP_HQD_DEQUEUE_REQUEST, 1); | 4924 | WREG32(CP_HQD_DEQUEUE_REQUEST, 1); |
| 4925 | for (i = 0; i < rdev->usec_timeout; i++) { | 4925 | for (j = 0; j < rdev->usec_timeout; j++) { |
| 4926 | if (!(RREG32(CP_HQD_ACTIVE) & 1)) | 4926 | if (!(RREG32(CP_HQD_ACTIVE) & 1)) |
| 4927 | break; | 4927 | break; |
| 4928 | udelay(1); | 4928 | udelay(1); |
| @@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) | |||
| 7751 | wptr = RREG32(IH_RB_WPTR); | 7751 | wptr = RREG32(IH_RB_WPTR); |
| 7752 | 7752 | ||
| 7753 | if (wptr & RB_OVERFLOW) { | 7753 | if (wptr & RB_OVERFLOW) { |
| 7754 | wptr &= ~RB_OVERFLOW; | ||
| 7754 | /* When a ring buffer overflow happen start parsing interrupt | 7755 | /* When a ring buffer overflow happen start parsing interrupt |
| 7755 | * from the last not overwritten vector (wptr + 16). Hopefully | 7756 | * from the last not overwritten vector (wptr + 16). Hopefully |
| 7756 | * this should allow us to catchup. | 7757 | * this should allow us to catchup. |
| 7757 | */ | 7758 | */ |
| 7758 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 7759 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
| 7759 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 7760 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
| 7760 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 7761 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
| 7761 | tmp = RREG32(IH_RB_CNTL); | 7762 | tmp = RREG32(IH_RB_CNTL); |
| 7762 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 7763 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
| 7763 | WREG32(IH_RB_CNTL, tmp); | 7764 | WREG32(IH_RB_CNTL, tmp); |
| 7764 | wptr &= ~RB_OVERFLOW; | ||
| 7765 | } | 7765 | } |
| 7766 | return (wptr & rdev->ih.ptr_mask); | 7766 | return (wptr & rdev->ih.ptr_mask); |
| 7767 | } | 7767 | } |
| @@ -8251,6 +8251,7 @@ restart_ih: | |||
| 8251 | /* wptr/rptr are in bytes! */ | 8251 | /* wptr/rptr are in bytes! */ |
| 8252 | rptr += 16; | 8252 | rptr += 16; |
| 8253 | rptr &= rdev->ih.ptr_mask; | 8253 | rptr &= rdev->ih.ptr_mask; |
| 8254 | WREG32(IH_RB_RPTR, rptr); | ||
| 8254 | } | 8255 | } |
| 8255 | if (queue_hotplug) | 8256 | if (queue_hotplug) |
| 8256 | schedule_work(&rdev->hotplug_work); | 8257 | schedule_work(&rdev->hotplug_work); |
| @@ -8259,7 +8260,6 @@ restart_ih: | |||
| 8259 | if (queue_thermal) | 8260 | if (queue_thermal) |
| 8260 | schedule_work(&rdev->pm.dpm.thermal.work); | 8261 | schedule_work(&rdev->pm.dpm.thermal.work); |
| 8261 | rdev->ih.rptr = rptr; | 8262 | rdev->ih.rptr = rptr; |
| 8262 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
| 8263 | atomic_set(&rdev->ih.lock, 0); | 8263 | atomic_set(&rdev->ih.lock, 0); |
| 8264 | 8264 | ||
| 8265 | /* make sure wptr hasn't changed while processing */ | 8265 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index dbca60c7d097..e50807c29f69 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
| 4749 | wptr = RREG32(IH_RB_WPTR); | 4749 | wptr = RREG32(IH_RB_WPTR); |
| 4750 | 4750 | ||
| 4751 | if (wptr & RB_OVERFLOW) { | 4751 | if (wptr & RB_OVERFLOW) { |
| 4752 | wptr &= ~RB_OVERFLOW; | ||
| 4752 | /* When a ring buffer overflow happen start parsing interrupt | 4753 | /* When a ring buffer overflow happen start parsing interrupt |
| 4753 | * from the last not overwritten vector (wptr + 16). Hopefully | 4754 | * from the last not overwritten vector (wptr + 16). Hopefully |
| 4754 | * this should allow us to catchup. | 4755 | * this should allow us to catchup. |
| 4755 | */ | 4756 | */ |
| 4756 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 4757 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
| 4757 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 4758 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
| 4758 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 4759 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
| 4759 | tmp = RREG32(IH_RB_CNTL); | 4760 | tmp = RREG32(IH_RB_CNTL); |
| 4760 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 4761 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
| 4761 | WREG32(IH_RB_CNTL, tmp); | 4762 | WREG32(IH_RB_CNTL, tmp); |
| 4762 | wptr &= ~RB_OVERFLOW; | ||
| 4763 | } | 4763 | } |
| 4764 | return (wptr & rdev->ih.ptr_mask); | 4764 | return (wptr & rdev->ih.ptr_mask); |
| 4765 | } | 4765 | } |
| @@ -5137,6 +5137,7 @@ restart_ih: | |||
| 5137 | /* wptr/rptr are in bytes! */ | 5137 | /* wptr/rptr are in bytes! */ |
| 5138 | rptr += 16; | 5138 | rptr += 16; |
| 5139 | rptr &= rdev->ih.ptr_mask; | 5139 | rptr &= rdev->ih.ptr_mask; |
| 5140 | WREG32(IH_RB_RPTR, rptr); | ||
| 5140 | } | 5141 | } |
| 5141 | if (queue_hotplug) | 5142 | if (queue_hotplug) |
| 5142 | schedule_work(&rdev->hotplug_work); | 5143 | schedule_work(&rdev->hotplug_work); |
| @@ -5145,7 +5146,6 @@ restart_ih: | |||
| 5145 | if (queue_thermal && rdev->pm.dpm_enabled) | 5146 | if (queue_thermal && rdev->pm.dpm_enabled) |
| 5146 | schedule_work(&rdev->pm.dpm.thermal.work); | 5147 | schedule_work(&rdev->pm.dpm.thermal.work); |
| 5147 | rdev->ih.rptr = rptr; | 5148 | rdev->ih.rptr = rptr; |
| 5148 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
| 5149 | atomic_set(&rdev->ih.lock, 0); | 5149 | atomic_set(&rdev->ih.lock, 0); |
| 5150 | 5150 | ||
| 5151 | /* make sure wptr hasn't changed while processing */ | 5151 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3cfb50056f7a..ea5c9af722ef 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -3792,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
| 3792 | wptr = RREG32(IH_RB_WPTR); | 3792 | wptr = RREG32(IH_RB_WPTR); |
| 3793 | 3793 | ||
| 3794 | if (wptr & RB_OVERFLOW) { | 3794 | if (wptr & RB_OVERFLOW) { |
| 3795 | wptr &= ~RB_OVERFLOW; | ||
| 3795 | /* When a ring buffer overflow happen start parsing interrupt | 3796 | /* When a ring buffer overflow happen start parsing interrupt |
| 3796 | * from the last not overwritten vector (wptr + 16). Hopefully | 3797 | * from the last not overwritten vector (wptr + 16). Hopefully |
| 3797 | * this should allow us to catchup. | 3798 | * this should allow us to catchup. |
| 3798 | */ | 3799 | */ |
| 3799 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 3800 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
| 3800 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 3801 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
| 3801 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 3802 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
| 3802 | tmp = RREG32(IH_RB_CNTL); | 3803 | tmp = RREG32(IH_RB_CNTL); |
| 3803 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 3804 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
| 3804 | WREG32(IH_RB_CNTL, tmp); | 3805 | WREG32(IH_RB_CNTL, tmp); |
| 3805 | wptr &= ~RB_OVERFLOW; | ||
| 3806 | } | 3806 | } |
| 3807 | return (wptr & rdev->ih.ptr_mask); | 3807 | return (wptr & rdev->ih.ptr_mask); |
| 3808 | } | 3808 | } |
| @@ -4048,6 +4048,7 @@ restart_ih: | |||
| 4048 | /* wptr/rptr are in bytes! */ | 4048 | /* wptr/rptr are in bytes! */ |
| 4049 | rptr += 16; | 4049 | rptr += 16; |
| 4050 | rptr &= rdev->ih.ptr_mask; | 4050 | rptr &= rdev->ih.ptr_mask; |
| 4051 | WREG32(IH_RB_RPTR, rptr); | ||
| 4051 | } | 4052 | } |
| 4052 | if (queue_hotplug) | 4053 | if (queue_hotplug) |
| 4053 | schedule_work(&rdev->hotplug_work); | 4054 | schedule_work(&rdev->hotplug_work); |
| @@ -4056,7 +4057,6 @@ restart_ih: | |||
| 4056 | if (queue_thermal && rdev->pm.dpm_enabled) | 4057 | if (queue_thermal && rdev->pm.dpm_enabled) |
| 4057 | schedule_work(&rdev->pm.dpm.thermal.work); | 4058 | schedule_work(&rdev->pm.dpm.thermal.work); |
| 4058 | rdev->ih.rptr = rptr; | 4059 | rdev->ih.rptr = rptr; |
| 4059 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
| 4060 | atomic_set(&rdev->ih.lock, 0); | 4060 | atomic_set(&rdev->ih.lock, 0); |
| 4061 | 4061 | ||
| 4062 | /* make sure wptr hasn't changed while processing */ | 4062 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5f05b4c84338..3247bfd14410 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -106,6 +106,7 @@ extern int radeon_vm_block_size; | |||
| 106 | extern int radeon_deep_color; | 106 | extern int radeon_deep_color; |
| 107 | extern int radeon_use_pflipirq; | 107 | extern int radeon_use_pflipirq; |
| 108 | extern int radeon_bapm; | 108 | extern int radeon_bapm; |
| 109 | extern int radeon_backlight; | ||
| 109 | 110 | ||
| 110 | /* | 111 | /* |
| 111 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 112 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 75223dd3a8a3..12c8329644c4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { | |||
| 123 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 | 123 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 |
| 124 | */ | 124 | */ |
| 125 | { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, | 125 | { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, |
| 126 | /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU | ||
| 127 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 | ||
| 128 | */ | ||
| 129 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, | ||
| 126 | /* macbook pro 8.2 */ | 130 | /* macbook pro 8.2 */ |
| 127 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, | 131 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, |
| 128 | { 0, 0, 0, 0, 0 }, | 132 | { 0, 0, 0, 0, 0 }, |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4126fd0937a2..f9d17b29b343 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -181,6 +181,7 @@ int radeon_vm_block_size = -1; | |||
| 181 | int radeon_deep_color = 0; | 181 | int radeon_deep_color = 0; |
| 182 | int radeon_use_pflipirq = 2; | 182 | int radeon_use_pflipirq = 2; |
| 183 | int radeon_bapm = -1; | 183 | int radeon_bapm = -1; |
| 184 | int radeon_backlight = -1; | ||
| 184 | 185 | ||
| 185 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 186 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
| 186 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 187 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
| @@ -263,6 +264,9 @@ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); | |||
| 263 | MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); | 264 | MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); |
| 264 | module_param_named(bapm, radeon_bapm, int, 0444); | 265 | module_param_named(bapm, radeon_bapm, int, 0444); |
| 265 | 266 | ||
| 267 | MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 268 | module_param_named(backlight, radeon_backlight, int, 0444); | ||
| 269 | |||
| 266 | static struct pci_device_id pciidlist[] = { | 270 | static struct pci_device_id pciidlist[] = { |
| 267 | radeon_PCI_IDS | 271 | radeon_PCI_IDS |
| 268 | }; | 272 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3c2094c25b53..15edf23b465c 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 | |||
| 158 | return ret; | 158 | return ret; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, | ||
| 162 | struct drm_connector *connector) | ||
| 163 | { | ||
| 164 | struct drm_device *dev = radeon_encoder->base.dev; | ||
| 165 | struct radeon_device *rdev = dev->dev_private; | ||
| 166 | bool use_bl = false; | ||
| 167 | |||
| 168 | if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))) | ||
| 169 | return; | ||
| 170 | |||
| 171 | if (radeon_backlight == 0) { | ||
| 172 | return; | ||
| 173 | } else if (radeon_backlight == 1) { | ||
| 174 | use_bl = true; | ||
| 175 | } else if (radeon_backlight == -1) { | ||
| 176 | /* Quirks */ | ||
| 177 | /* Amilo Xi 2550 only works with acpi bl */ | ||
| 178 | if ((rdev->pdev->device == 0x9583) && | ||
| 179 | (rdev->pdev->subsystem_vendor == 0x1734) && | ||
| 180 | (rdev->pdev->subsystem_device == 0x1107)) | ||
| 181 | use_bl = false; | ||
| 182 | else | ||
| 183 | use_bl = true; | ||
| 184 | } | ||
| 185 | |||
| 186 | if (use_bl) { | ||
| 187 | if (rdev->is_atom_bios) | ||
| 188 | radeon_atom_backlight_init(radeon_encoder, connector); | ||
| 189 | else | ||
| 190 | radeon_legacy_backlight_init(radeon_encoder, connector); | ||
| 191 | rdev->mode_info.bl_encoder = radeon_encoder; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 161 | void | 195 | void |
| 162 | radeon_link_encoder_connector(struct drm_device *dev) | 196 | radeon_link_encoder_connector(struct drm_device *dev) |
| 163 | { | 197 | { |
| 164 | struct radeon_device *rdev = dev->dev_private; | ||
| 165 | struct drm_connector *connector; | 198 | struct drm_connector *connector; |
| 166 | struct radeon_connector *radeon_connector; | 199 | struct radeon_connector *radeon_connector; |
| 167 | struct drm_encoder *encoder; | 200 | struct drm_encoder *encoder; |
| @@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev) | |||
| 174 | radeon_encoder = to_radeon_encoder(encoder); | 207 | radeon_encoder = to_radeon_encoder(encoder); |
| 175 | if (radeon_encoder->devices & radeon_connector->devices) { | 208 | if (radeon_encoder->devices & radeon_connector->devices) { |
| 176 | drm_mode_connector_attach_encoder(connector, encoder); | 209 | drm_mode_connector_attach_encoder(connector, encoder); |
| 177 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 210 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
| 178 | if (rdev->is_atom_bios) | 211 | radeon_encoder_add_backlight(radeon_encoder, connector); |
| 179 | radeon_atom_backlight_init(radeon_encoder, connector); | ||
| 180 | else | ||
| 181 | radeon_legacy_backlight_init(radeon_encoder, connector); | ||
| 182 | rdev->mode_info.bl_encoder = radeon_encoder; | ||
| 183 | } | ||
| 184 | } | 212 | } |
| 185 | } | 213 | } |
| 186 | } | 214 | } |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6bce40847753..3a0b973e8a96 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -6316,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) | |||
| 6316 | wptr = RREG32(IH_RB_WPTR); | 6316 | wptr = RREG32(IH_RB_WPTR); |
| 6317 | 6317 | ||
| 6318 | if (wptr & RB_OVERFLOW) { | 6318 | if (wptr & RB_OVERFLOW) { |
| 6319 | wptr &= ~RB_OVERFLOW; | ||
| 6319 | /* When a ring buffer overflow happen start parsing interrupt | 6320 | /* When a ring buffer overflow happen start parsing interrupt |
| 6320 | * from the last not overwritten vector (wptr + 16). Hopefully | 6321 | * from the last not overwritten vector (wptr + 16). Hopefully |
| 6321 | * this should allow us to catchup. | 6322 | * this should allow us to catchup. |
| 6322 | */ | 6323 | */ |
| 6323 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | 6324 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", |
| 6324 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 6325 | wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
| 6325 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | 6326 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; |
| 6326 | tmp = RREG32(IH_RB_CNTL); | 6327 | tmp = RREG32(IH_RB_CNTL); |
| 6327 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 6328 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
| 6328 | WREG32(IH_RB_CNTL, tmp); | 6329 | WREG32(IH_RB_CNTL, tmp); |
| 6329 | wptr &= ~RB_OVERFLOW; | ||
| 6330 | } | 6330 | } |
| 6331 | return (wptr & rdev->ih.ptr_mask); | 6331 | return (wptr & rdev->ih.ptr_mask); |
| 6332 | } | 6332 | } |
| @@ -6664,13 +6664,13 @@ restart_ih: | |||
| 6664 | /* wptr/rptr are in bytes! */ | 6664 | /* wptr/rptr are in bytes! */ |
| 6665 | rptr += 16; | 6665 | rptr += 16; |
| 6666 | rptr &= rdev->ih.ptr_mask; | 6666 | rptr &= rdev->ih.ptr_mask; |
| 6667 | WREG32(IH_RB_RPTR, rptr); | ||
| 6667 | } | 6668 | } |
| 6668 | if (queue_hotplug) | 6669 | if (queue_hotplug) |
| 6669 | schedule_work(&rdev->hotplug_work); | 6670 | schedule_work(&rdev->hotplug_work); |
| 6670 | if (queue_thermal && rdev->pm.dpm_enabled) | 6671 | if (queue_thermal && rdev->pm.dpm_enabled) |
| 6671 | schedule_work(&rdev->pm.dpm.thermal.work); | 6672 | schedule_work(&rdev->pm.dpm.thermal.work); |
| 6672 | rdev->ih.rptr = rptr; | 6673 | rdev->ih.rptr = rptr; |
| 6673 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
| 6674 | atomic_set(&rdev->ih.lock, 0); | 6674 | atomic_set(&rdev->ih.lock, 0); |
| 6675 | 6675 | ||
| 6676 | /* make sure wptr hasn't changed while processing */ | 6676 | /* make sure wptr hasn't changed while processing */ |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 4a7cbfad1d74..fcdbde4ec692 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -93,13 +93,29 @@ static ssize_t show_power_crit(struct device *dev, | |||
| 93 | } | 93 | } |
| 94 | static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); | 94 | static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); |
| 95 | 95 | ||
| 96 | static umode_t fam15h_power_is_visible(struct kobject *kobj, | ||
| 97 | struct attribute *attr, | ||
| 98 | int index) | ||
| 99 | { | ||
| 100 | /* power1_input is only reported for Fam15h, Models 00h-0fh */ | ||
| 101 | if (attr == &dev_attr_power1_input.attr && | ||
| 102 | (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf)) | ||
| 103 | return 0; | ||
| 104 | |||
| 105 | return attr->mode; | ||
| 106 | } | ||
| 107 | |||
| 96 | static struct attribute *fam15h_power_attrs[] = { | 108 | static struct attribute *fam15h_power_attrs[] = { |
| 97 | &dev_attr_power1_input.attr, | 109 | &dev_attr_power1_input.attr, |
| 98 | &dev_attr_power1_crit.attr, | 110 | &dev_attr_power1_crit.attr, |
| 99 | NULL | 111 | NULL |
| 100 | }; | 112 | }; |
| 101 | 113 | ||
| 102 | ATTRIBUTE_GROUPS(fam15h_power); | 114 | static const struct attribute_group fam15h_power_group = { |
| 115 | .attrs = fam15h_power_attrs, | ||
| 116 | .is_visible = fam15h_power_is_visible, | ||
| 117 | }; | ||
| 118 | __ATTRIBUTE_GROUPS(fam15h_power); | ||
| 103 | 119 | ||
| 104 | static bool fam15h_power_is_internal_node0(struct pci_dev *f4) | 120 | static bool fam15h_power_is_internal_node0(struct pci_dev *f4) |
| 105 | { | 121 | { |
| @@ -216,7 +232,9 @@ static int fam15h_power_probe(struct pci_dev *pdev, | |||
| 216 | 232 | ||
| 217 | static const struct pci_device_id fam15h_power_id_table[] = { | 233 | static const struct pci_device_id fam15h_power_id_table[] = { |
| 218 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, | 234 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
| 235 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, | ||
| 219 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, | 236 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
| 237 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | ||
| 220 | {} | 238 | {} |
| 221 | }; | 239 | }; |
| 222 | MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); | 240 | MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); |
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c index e42964f07f67..ad571ec795a3 100644 --- a/drivers/hwmon/tmp103.c +++ b/drivers/hwmon/tmp103.c | |||
| @@ -145,7 +145,7 @@ static int tmp103_probe(struct i2c_client *client, | |||
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | i2c_set_clientdata(client, regmap); | 147 | i2c_set_clientdata(client, regmap); |
| 148 | hwmon_dev = hwmon_device_register_with_groups(dev, client->name, | 148 | hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, |
| 149 | regmap, tmp103_groups); | 149 | regmap, tmp103_groups); |
| 150 | return PTR_ERR_OR_ZERO(hwmon_dev); | 150 | return PTR_ERR_OR_ZERO(hwmon_dev); |
| 151 | } | 151 | } |
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index e0228b228256..1722f50f2473 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile | |||
| @@ -2,11 +2,8 @@ | |||
| 2 | # Makefile for the i2c core. | 2 | # Makefile for the i2c core. |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | i2ccore-y := i2c-core.o | ||
| 6 | i2ccore-$(CONFIG_ACPI) += i2c-acpi.o | ||
| 7 | |||
| 8 | obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o | 5 | obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o |
| 9 | obj-$(CONFIG_I2C) += i2ccore.o | 6 | obj-$(CONFIG_I2C) += i2c-core.o |
| 10 | obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o | 7 | obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o |
| 11 | obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o | 8 | obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o |
| 12 | obj-$(CONFIG_I2C_MUX) += i2c-mux.o | 9 | obj-$(CONFIG_I2C_MUX) += i2c-mux.o |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 984492553e95..d9ee43c80cde 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
| @@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
| 497 | desc->wr_len_cmd = dma_size; | 497 | desc->wr_len_cmd = dma_size; |
| 498 | desc->control |= ISMT_DESC_BLK; | 498 | desc->control |= ISMT_DESC_BLK; |
| 499 | priv->dma_buffer[0] = command; | 499 | priv->dma_buffer[0] = command; |
| 500 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); | 500 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); |
| 501 | } else { | 501 | } else { |
| 502 | /* Block Read */ | 502 | /* Block Read */ |
| 503 | dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); | 503 | dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); |
| @@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
| 525 | desc->wr_len_cmd = dma_size; | 525 | desc->wr_len_cmd = dma_size; |
| 526 | desc->control |= ISMT_DESC_I2C; | 526 | desc->control |= ISMT_DESC_I2C; |
| 527 | priv->dma_buffer[0] = command; | 527 | priv->dma_buffer[0] = command; |
| 528 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); | 528 | memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); |
| 529 | } else { | 529 | } else { |
| 530 | /* i2c Block Read */ | 530 | /* i2c Block Read */ |
| 531 | dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n"); | 531 | dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n"); |
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 7170fc892829..65a21fed08b5 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c | |||
| @@ -429,7 +429,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, | |||
| 429 | ret = mxs_i2c_pio_wait_xfer_end(i2c); | 429 | ret = mxs_i2c_pio_wait_xfer_end(i2c); |
| 430 | if (ret) { | 430 | if (ret) { |
| 431 | dev_err(i2c->dev, | 431 | dev_err(i2c->dev, |
| 432 | "PIO: Failed to send SELECT command!\n"); | 432 | "PIO: Failed to send READ command!\n"); |
| 433 | goto cleanup; | 433 | goto cleanup; |
| 434 | } | 434 | } |
| 435 | 435 | ||
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 1cc146cfc1f3..e506fcd3ca04 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
| @@ -76,8 +76,8 @@ | |||
| 76 | #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) | 76 | #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) |
| 77 | #define RCAR_IRQ_STOP (MST) | 77 | #define RCAR_IRQ_STOP (MST) |
| 78 | 78 | ||
| 79 | #define RCAR_IRQ_ACK_SEND (~(MAT | MDE)) | 79 | #define RCAR_IRQ_ACK_SEND (~(MAT | MDE) & 0xFF) |
| 80 | #define RCAR_IRQ_ACK_RECV (~(MAT | MDR)) | 80 | #define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF) |
| 81 | 81 | ||
| 82 | #define ID_LAST_MSG (1 << 0) | 82 | #define ID_LAST_MSG (1 << 0) |
| 83 | #define ID_IOERROR (1 << 1) | 83 | #define ID_IOERROR (1 << 1) |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index e637c32ae517..93cfc837200b 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
| @@ -433,12 +433,11 @@ static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate) | |||
| 433 | unsigned long i2c_rate = clk_get_rate(i2c->clk); | 433 | unsigned long i2c_rate = clk_get_rate(i2c->clk); |
| 434 | unsigned int div; | 434 | unsigned int div; |
| 435 | 435 | ||
| 436 | /* SCL rate = (clk rate) / (8 * DIV) */ | 436 | /* set DIV = DIVH = DIVL |
| 437 | div = DIV_ROUND_UP(i2c_rate, scl_rate * 8); | 437 | * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1)) |
| 438 | 438 | * = (clk rate) / (16 * (DIV + 1)) | |
| 439 | /* The lower and upper half of the CLKDIV reg describe the length of | 439 | */ |
| 440 | * SCL low & high periods. */ | 440 | div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1; |
| 441 | div = DIV_ROUND_UP(div, 2); | ||
| 442 | 441 | ||
| 443 | i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV); | 442 | i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV); |
| 444 | } | 443 | } |
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 87d0371cebb7..efba1ebe16ba 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
| @@ -380,34 +380,33 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev) | |||
| 380 | { | 380 | { |
| 381 | int ret; | 381 | int ret; |
| 382 | if (!i2c_dev->hw->has_single_clk_source) { | 382 | if (!i2c_dev->hw->has_single_clk_source) { |
| 383 | ret = clk_prepare_enable(i2c_dev->fast_clk); | 383 | ret = clk_enable(i2c_dev->fast_clk); |
| 384 | if (ret < 0) { | 384 | if (ret < 0) { |
| 385 | dev_err(i2c_dev->dev, | 385 | dev_err(i2c_dev->dev, |
| 386 | "Enabling fast clk failed, err %d\n", ret); | 386 | "Enabling fast clk failed, err %d\n", ret); |
| 387 | return ret; | 387 | return ret; |
| 388 | } | 388 | } |
| 389 | } | 389 | } |
| 390 | ret = clk_prepare_enable(i2c_dev->div_clk); | 390 | ret = clk_enable(i2c_dev->div_clk); |
| 391 | if (ret < 0) { | 391 | if (ret < 0) { |
| 392 | dev_err(i2c_dev->dev, | 392 | dev_err(i2c_dev->dev, |
| 393 | "Enabling div clk failed, err %d\n", ret); | 393 | "Enabling div clk failed, err %d\n", ret); |
| 394 | clk_disable_unprepare(i2c_dev->fast_clk); | 394 | clk_disable(i2c_dev->fast_clk); |
| 395 | } | 395 | } |
| 396 | return ret; | 396 | return ret; |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev) | 399 | static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev) |
| 400 | { | 400 | { |
| 401 | clk_disable_unprepare(i2c_dev->div_clk); | 401 | clk_disable(i2c_dev->div_clk); |
| 402 | if (!i2c_dev->hw->has_single_clk_source) | 402 | if (!i2c_dev->hw->has_single_clk_source) |
| 403 | clk_disable_unprepare(i2c_dev->fast_clk); | 403 | clk_disable(i2c_dev->fast_clk); |
| 404 | } | 404 | } |
| 405 | 405 | ||
| 406 | static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) | 406 | static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) |
| 407 | { | 407 | { |
| 408 | u32 val; | 408 | u32 val; |
| 409 | int err = 0; | 409 | int err = 0; |
| 410 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; | ||
| 411 | u32 clk_divisor; | 410 | u32 clk_divisor; |
| 412 | 411 | ||
| 413 | err = tegra_i2c_clock_enable(i2c_dev); | 412 | err = tegra_i2c_clock_enable(i2c_dev); |
| @@ -428,9 +427,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) | |||
| 428 | i2c_writel(i2c_dev, val, I2C_CNFG); | 427 | i2c_writel(i2c_dev, val, I2C_CNFG); |
| 429 | i2c_writel(i2c_dev, 0, I2C_INT_MASK); | 428 | i2c_writel(i2c_dev, 0, I2C_INT_MASK); |
| 430 | 429 | ||
| 431 | clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1); | ||
| 432 | clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier); | ||
| 433 | |||
| 434 | /* Make sure clock divisor programmed correctly */ | 430 | /* Make sure clock divisor programmed correctly */ |
| 435 | clk_divisor = i2c_dev->hw->clk_divisor_hs_mode; | 431 | clk_divisor = i2c_dev->hw->clk_divisor_hs_mode; |
| 436 | clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode << | 432 | clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode << |
| @@ -712,6 +708,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
| 712 | void __iomem *base; | 708 | void __iomem *base; |
| 713 | int irq; | 709 | int irq; |
| 714 | int ret = 0; | 710 | int ret = 0; |
| 711 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; | ||
| 715 | 712 | ||
| 716 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 713 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 717 | base = devm_ioremap_resource(&pdev->dev, res); | 714 | base = devm_ioremap_resource(&pdev->dev, res); |
| @@ -777,17 +774,39 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
| 777 | 774 | ||
| 778 | platform_set_drvdata(pdev, i2c_dev); | 775 | platform_set_drvdata(pdev, i2c_dev); |
| 779 | 776 | ||
| 777 | if (!i2c_dev->hw->has_single_clk_source) { | ||
| 778 | ret = clk_prepare(i2c_dev->fast_clk); | ||
| 779 | if (ret < 0) { | ||
| 780 | dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); | ||
| 781 | return ret; | ||
| 782 | } | ||
| 783 | } | ||
| 784 | |||
| 785 | clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1); | ||
| 786 | ret = clk_set_rate(i2c_dev->div_clk, | ||
| 787 | i2c_dev->bus_clk_rate * clk_multiplier); | ||
| 788 | if (ret) { | ||
| 789 | dev_err(i2c_dev->dev, "Clock rate change failed %d\n", ret); | ||
| 790 | goto unprepare_fast_clk; | ||
| 791 | } | ||
| 792 | |||
| 793 | ret = clk_prepare(i2c_dev->div_clk); | ||
| 794 | if (ret < 0) { | ||
| 795 | dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); | ||
| 796 | goto unprepare_fast_clk; | ||
| 797 | } | ||
| 798 | |||
| 780 | ret = tegra_i2c_init(i2c_dev); | 799 | ret = tegra_i2c_init(i2c_dev); |
| 781 | if (ret) { | 800 | if (ret) { |
| 782 | dev_err(&pdev->dev, "Failed to initialize i2c controller"); | 801 | dev_err(&pdev->dev, "Failed to initialize i2c controller"); |
| 783 | return ret; | 802 | goto unprepare_div_clk; |
| 784 | } | 803 | } |
| 785 | 804 | ||
| 786 | ret = devm_request_irq(&pdev->dev, i2c_dev->irq, | 805 | ret = devm_request_irq(&pdev->dev, i2c_dev->irq, |
| 787 | tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); | 806 | tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); |
| 788 | if (ret) { | 807 | if (ret) { |
| 789 | dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); | 808 | dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); |
| 790 | return ret; | 809 | goto unprepare_div_clk; |
| 791 | } | 810 | } |
| 792 | 811 | ||
| 793 | i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); | 812 | i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); |
| @@ -803,16 +822,30 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
| 803 | ret = i2c_add_numbered_adapter(&i2c_dev->adapter); | 822 | ret = i2c_add_numbered_adapter(&i2c_dev->adapter); |
| 804 | if (ret) { | 823 | if (ret) { |
| 805 | dev_err(&pdev->dev, "Failed to add I2C adapter\n"); | 824 | dev_err(&pdev->dev, "Failed to add I2C adapter\n"); |
| 806 | return ret; | 825 | goto unprepare_div_clk; |
| 807 | } | 826 | } |
| 808 | 827 | ||
| 809 | return 0; | 828 | return 0; |
| 829 | |||
| 830 | unprepare_div_clk: | ||
| 831 | clk_unprepare(i2c_dev->div_clk); | ||
| 832 | |||
| 833 | unprepare_fast_clk: | ||
| 834 | if (!i2c_dev->hw->has_single_clk_source) | ||
| 835 | clk_unprepare(i2c_dev->fast_clk); | ||
| 836 | |||
| 837 | return ret; | ||
| 810 | } | 838 | } |
| 811 | 839 | ||
| 812 | static int tegra_i2c_remove(struct platform_device *pdev) | 840 | static int tegra_i2c_remove(struct platform_device *pdev) |
| 813 | { | 841 | { |
| 814 | struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); | 842 | struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); |
| 815 | i2c_del_adapter(&i2c_dev->adapter); | 843 | i2c_del_adapter(&i2c_dev->adapter); |
| 844 | |||
| 845 | clk_unprepare(i2c_dev->div_clk); | ||
| 846 | if (!i2c_dev->hw->has_single_clk_source) | ||
| 847 | clk_unprepare(i2c_dev->fast_clk); | ||
| 848 | |||
| 816 | return 0; | 849 | return 0; |
| 817 | } | 850 | } |
| 818 | 851 | ||
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c deleted file mode 100644 index 0dbc18c15c43..000000000000 --- a/drivers/i2c/i2c-acpi.c +++ /dev/null | |||
| @@ -1,364 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * I2C ACPI code | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Intel Corp | ||
| 5 | * | ||
| 6 | * Author: Lan Tianyu <tianyu.lan@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 14 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 15 | * for more details. | ||
| 16 | */ | ||
| 17 | #define pr_fmt(fmt) "I2C/ACPI : " fmt | ||
| 18 | |||
| 19 | #include <linux/kernel.h> | ||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/err.h> | ||
| 22 | #include <linux/i2c.h> | ||
| 23 | #include <linux/acpi.h> | ||
| 24 | |||
| 25 | struct acpi_i2c_handler_data { | ||
| 26 | struct acpi_connection_info info; | ||
| 27 | struct i2c_adapter *adapter; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct gsb_buffer { | ||
| 31 | u8 status; | ||
| 32 | u8 len; | ||
| 33 | union { | ||
| 34 | u16 wdata; | ||
| 35 | u8 bdata; | ||
| 36 | u8 data[0]; | ||
| 37 | }; | ||
| 38 | } __packed; | ||
| 39 | |||
| 40 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
| 41 | { | ||
| 42 | struct i2c_board_info *info = data; | ||
| 43 | |||
| 44 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
| 45 | struct acpi_resource_i2c_serialbus *sb; | ||
| 46 | |||
| 47 | sb = &ares->data.i2c_serial_bus; | ||
| 48 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
| 49 | info->addr = sb->slave_address; | ||
| 50 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
| 51 | info->flags |= I2C_CLIENT_TEN; | ||
| 52 | } | ||
| 53 | } else if (info->irq < 0) { | ||
| 54 | struct resource r; | ||
| 55 | |||
| 56 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
| 57 | info->irq = r.start; | ||
| 58 | } | ||
| 59 | |||
| 60 | /* Tell the ACPI core to skip this resource */ | ||
| 61 | return 1; | ||
| 62 | } | ||
| 63 | |||
| 64 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
| 65 | void *data, void **return_value) | ||
| 66 | { | ||
| 67 | struct i2c_adapter *adapter = data; | ||
| 68 | struct list_head resource_list; | ||
| 69 | struct i2c_board_info info; | ||
| 70 | struct acpi_device *adev; | ||
| 71 | int ret; | ||
| 72 | |||
| 73 | if (acpi_bus_get_device(handle, &adev)) | ||
| 74 | return AE_OK; | ||
| 75 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
| 76 | return AE_OK; | ||
| 77 | |||
| 78 | memset(&info, 0, sizeof(info)); | ||
| 79 | info.acpi_node.companion = adev; | ||
| 80 | info.irq = -1; | ||
| 81 | |||
| 82 | INIT_LIST_HEAD(&resource_list); | ||
| 83 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
| 84 | acpi_i2c_add_resource, &info); | ||
| 85 | acpi_dev_free_resource_list(&resource_list); | ||
| 86 | |||
| 87 | if (ret < 0 || !info.addr) | ||
| 88 | return AE_OK; | ||
| 89 | |||
| 90 | adev->power.flags.ignore_parent = true; | ||
| 91 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
| 92 | if (!i2c_new_device(adapter, &info)) { | ||
| 93 | adev->power.flags.ignore_parent = false; | ||
| 94 | dev_err(&adapter->dev, | ||
| 95 | "failed to add I2C device %s from ACPI\n", | ||
| 96 | dev_name(&adev->dev)); | ||
| 97 | } | ||
| 98 | |||
| 99 | return AE_OK; | ||
| 100 | } | ||
| 101 | |||
| 102 | /** | ||
| 103 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
| 104 | * @adap: pointer to adapter | ||
| 105 | * | ||
| 106 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
| 107 | * namespace. When a device is found it will be added to the Linux device | ||
| 108 | * model and bound to the corresponding ACPI handle. | ||
| 109 | */ | ||
| 110 | void acpi_i2c_register_devices(struct i2c_adapter *adap) | ||
| 111 | { | ||
| 112 | acpi_handle handle; | ||
| 113 | acpi_status status; | ||
| 114 | |||
| 115 | if (!adap->dev.parent) | ||
| 116 | return; | ||
| 117 | |||
| 118 | handle = ACPI_HANDLE(adap->dev.parent); | ||
| 119 | if (!handle) | ||
| 120 | return; | ||
| 121 | |||
| 122 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
| 123 | acpi_i2c_add_device, NULL, | ||
| 124 | adap, NULL); | ||
| 125 | if (ACPI_FAILURE(status)) | ||
| 126 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | ||
| 127 | } | ||
| 128 | |||
| 129 | #ifdef CONFIG_ACPI_I2C_OPREGION | ||
| 130 | static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | ||
| 131 | u8 cmd, u8 *data, u8 data_len) | ||
| 132 | { | ||
| 133 | |||
| 134 | struct i2c_msg msgs[2]; | ||
| 135 | int ret; | ||
| 136 | u8 *buffer; | ||
| 137 | |||
| 138 | buffer = kzalloc(data_len, GFP_KERNEL); | ||
| 139 | if (!buffer) | ||
| 140 | return AE_NO_MEMORY; | ||
| 141 | |||
| 142 | msgs[0].addr = client->addr; | ||
| 143 | msgs[0].flags = client->flags; | ||
| 144 | msgs[0].len = 1; | ||
| 145 | msgs[0].buf = &cmd; | ||
| 146 | |||
| 147 | msgs[1].addr = client->addr; | ||
| 148 | msgs[1].flags = client->flags | I2C_M_RD; | ||
| 149 | msgs[1].len = data_len; | ||
| 150 | msgs[1].buf = buffer; | ||
| 151 | |||
| 152 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
| 153 | if (ret < 0) | ||
| 154 | dev_err(&client->adapter->dev, "i2c read failed\n"); | ||
| 155 | else | ||
| 156 | memcpy(data, buffer, data_len); | ||
| 157 | |||
| 158 | kfree(buffer); | ||
| 159 | return ret; | ||
| 160 | } | ||
| 161 | |||
| 162 | static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, | ||
| 163 | u8 cmd, u8 *data, u8 data_len) | ||
| 164 | { | ||
| 165 | |||
| 166 | struct i2c_msg msgs[1]; | ||
| 167 | u8 *buffer; | ||
| 168 | int ret = AE_OK; | ||
| 169 | |||
| 170 | buffer = kzalloc(data_len + 1, GFP_KERNEL); | ||
| 171 | if (!buffer) | ||
| 172 | return AE_NO_MEMORY; | ||
| 173 | |||
| 174 | buffer[0] = cmd; | ||
| 175 | memcpy(buffer + 1, data, data_len); | ||
| 176 | |||
| 177 | msgs[0].addr = client->addr; | ||
| 178 | msgs[0].flags = client->flags; | ||
| 179 | msgs[0].len = data_len + 1; | ||
| 180 | msgs[0].buf = buffer; | ||
| 181 | |||
| 182 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
| 183 | if (ret < 0) | ||
| 184 | dev_err(&client->adapter->dev, "i2c write failed\n"); | ||
| 185 | |||
| 186 | kfree(buffer); | ||
| 187 | return ret; | ||
| 188 | } | ||
| 189 | |||
| 190 | static acpi_status | ||
| 191 | acpi_i2c_space_handler(u32 function, acpi_physical_address command, | ||
| 192 | u32 bits, u64 *value64, | ||
| 193 | void *handler_context, void *region_context) | ||
| 194 | { | ||
| 195 | struct gsb_buffer *gsb = (struct gsb_buffer *)value64; | ||
| 196 | struct acpi_i2c_handler_data *data = handler_context; | ||
| 197 | struct acpi_connection_info *info = &data->info; | ||
| 198 | struct acpi_resource_i2c_serialbus *sb; | ||
| 199 | struct i2c_adapter *adapter = data->adapter; | ||
| 200 | struct i2c_client client; | ||
| 201 | struct acpi_resource *ares; | ||
| 202 | u32 accessor_type = function >> 16; | ||
| 203 | u8 action = function & ACPI_IO_MASK; | ||
| 204 | acpi_status ret = AE_OK; | ||
| 205 | int status; | ||
| 206 | |||
| 207 | ret = acpi_buffer_to_resource(info->connection, info->length, &ares); | ||
| 208 | if (ACPI_FAILURE(ret)) | ||
| 209 | return ret; | ||
| 210 | |||
| 211 | if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
| 212 | ret = AE_BAD_PARAMETER; | ||
| 213 | goto err; | ||
| 214 | } | ||
| 215 | |||
| 216 | sb = &ares->data.i2c_serial_bus; | ||
| 217 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
| 218 | ret = AE_BAD_PARAMETER; | ||
| 219 | goto err; | ||
| 220 | } | ||
| 221 | |||
| 222 | memset(&client, 0, sizeof(client)); | ||
| 223 | client.adapter = adapter; | ||
| 224 | client.addr = sb->slave_address; | ||
| 225 | client.flags = 0; | ||
| 226 | |||
| 227 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
| 228 | client.flags |= I2C_CLIENT_TEN; | ||
| 229 | |||
| 230 | switch (accessor_type) { | ||
| 231 | case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: | ||
| 232 | if (action == ACPI_READ) { | ||
| 233 | status = i2c_smbus_read_byte(&client); | ||
| 234 | if (status >= 0) { | ||
| 235 | gsb->bdata = status; | ||
| 236 | status = 0; | ||
| 237 | } | ||
| 238 | } else { | ||
| 239 | status = i2c_smbus_write_byte(&client, gsb->bdata); | ||
| 240 | } | ||
| 241 | break; | ||
| 242 | |||
| 243 | case ACPI_GSB_ACCESS_ATTRIB_BYTE: | ||
| 244 | if (action == ACPI_READ) { | ||
| 245 | status = i2c_smbus_read_byte_data(&client, command); | ||
| 246 | if (status >= 0) { | ||
| 247 | gsb->bdata = status; | ||
| 248 | status = 0; | ||
| 249 | } | ||
| 250 | } else { | ||
| 251 | status = i2c_smbus_write_byte_data(&client, command, | ||
| 252 | gsb->bdata); | ||
| 253 | } | ||
| 254 | break; | ||
| 255 | |||
| 256 | case ACPI_GSB_ACCESS_ATTRIB_WORD: | ||
| 257 | if (action == ACPI_READ) { | ||
| 258 | status = i2c_smbus_read_word_data(&client, command); | ||
| 259 | if (status >= 0) { | ||
| 260 | gsb->wdata = status; | ||
| 261 | status = 0; | ||
| 262 | } | ||
| 263 | } else { | ||
| 264 | status = i2c_smbus_write_word_data(&client, command, | ||
| 265 | gsb->wdata); | ||
| 266 | } | ||
| 267 | break; | ||
| 268 | |||
| 269 | case ACPI_GSB_ACCESS_ATTRIB_BLOCK: | ||
| 270 | if (action == ACPI_READ) { | ||
| 271 | status = i2c_smbus_read_block_data(&client, command, | ||
| 272 | gsb->data); | ||
| 273 | if (status >= 0) { | ||
| 274 | gsb->len = status; | ||
| 275 | status = 0; | ||
| 276 | } | ||
| 277 | } else { | ||
| 278 | status = i2c_smbus_write_block_data(&client, command, | ||
| 279 | gsb->len, gsb->data); | ||
| 280 | } | ||
| 281 | break; | ||
| 282 | |||
| 283 | case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: | ||
| 284 | if (action == ACPI_READ) { | ||
| 285 | status = acpi_gsb_i2c_read_bytes(&client, command, | ||
| 286 | gsb->data, info->access_length); | ||
| 287 | if (status > 0) | ||
| 288 | status = 0; | ||
| 289 | } else { | ||
| 290 | status = acpi_gsb_i2c_write_bytes(&client, command, | ||
| 291 | gsb->data, info->access_length); | ||
| 292 | } | ||
| 293 | break; | ||
| 294 | |||
| 295 | default: | ||
| 296 | pr_info("protocol(0x%02x) is not supported.\n", accessor_type); | ||
| 297 | ret = AE_BAD_PARAMETER; | ||
| 298 | goto err; | ||
| 299 | } | ||
| 300 | |||
| 301 | gsb->status = status; | ||
| 302 | |||
| 303 | err: | ||
| 304 | ACPI_FREE(ares); | ||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | |||
| 308 | |||
| 309 | int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
| 310 | { | ||
| 311 | acpi_handle handle = ACPI_HANDLE(adapter->dev.parent); | ||
| 312 | struct acpi_i2c_handler_data *data; | ||
| 313 | acpi_status status; | ||
| 314 | |||
| 315 | if (!handle) | ||
| 316 | return -ENODEV; | ||
| 317 | |||
| 318 | data = kzalloc(sizeof(struct acpi_i2c_handler_data), | ||
| 319 | GFP_KERNEL); | ||
| 320 | if (!data) | ||
| 321 | return -ENOMEM; | ||
| 322 | |||
| 323 | data->adapter = adapter; | ||
| 324 | status = acpi_bus_attach_private_data(handle, (void *)data); | ||
| 325 | if (ACPI_FAILURE(status)) { | ||
| 326 | kfree(data); | ||
| 327 | return -ENOMEM; | ||
| 328 | } | ||
| 329 | |||
| 330 | status = acpi_install_address_space_handler(handle, | ||
| 331 | ACPI_ADR_SPACE_GSBUS, | ||
| 332 | &acpi_i2c_space_handler, | ||
| 333 | NULL, | ||
| 334 | data); | ||
| 335 | if (ACPI_FAILURE(status)) { | ||
| 336 | dev_err(&adapter->dev, "Error installing i2c space handler\n"); | ||
| 337 | acpi_bus_detach_private_data(handle); | ||
| 338 | kfree(data); | ||
| 339 | return -ENOMEM; | ||
| 340 | } | ||
| 341 | |||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 345 | void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
| 346 | { | ||
| 347 | acpi_handle handle = ACPI_HANDLE(adapter->dev.parent); | ||
| 348 | struct acpi_i2c_handler_data *data; | ||
| 349 | acpi_status status; | ||
| 350 | |||
| 351 | if (!handle) | ||
| 352 | return; | ||
| 353 | |||
| 354 | acpi_remove_address_space_handler(handle, | ||
| 355 | ACPI_ADR_SPACE_GSBUS, | ||
| 356 | &acpi_i2c_space_handler); | ||
| 357 | |||
| 358 | status = acpi_bus_get_private_data(handle, (void **)&data); | ||
| 359 | if (ACPI_SUCCESS(status)) | ||
| 360 | kfree(data); | ||
| 361 | |||
| 362 | acpi_bus_detach_private_data(handle); | ||
| 363 | } | ||
| 364 | #endif | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 632057a44615..ccfbbab82a15 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de> | 27 | OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de> |
| 28 | (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and | 28 | (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and |
| 29 | (c) 2013 Wolfram Sang <wsa@the-dreams.de> | 29 | (c) 2013 Wolfram Sang <wsa@the-dreams.de> |
| 30 | I2C ACPI code Copyright (C) 2014 Intel Corp | ||
| 31 | Author: Lan Tianyu <tianyu.lan@intel.com> | ||
| 30 | */ | 32 | */ |
| 31 | 33 | ||
| 32 | #include <linux/module.h> | 34 | #include <linux/module.h> |
| @@ -78,6 +80,368 @@ void i2c_transfer_trace_unreg(void) | |||
| 78 | static_key_slow_dec(&i2c_trace_msg); | 80 | static_key_slow_dec(&i2c_trace_msg); |
| 79 | } | 81 | } |
| 80 | 82 | ||
| 83 | #if defined(CONFIG_ACPI) | ||
| 84 | struct acpi_i2c_handler_data { | ||
| 85 | struct acpi_connection_info info; | ||
| 86 | struct i2c_adapter *adapter; | ||
| 87 | }; | ||
| 88 | |||
| 89 | struct gsb_buffer { | ||
| 90 | u8 status; | ||
| 91 | u8 len; | ||
| 92 | union { | ||
| 93 | u16 wdata; | ||
| 94 | u8 bdata; | ||
| 95 | u8 data[0]; | ||
| 96 | }; | ||
| 97 | } __packed; | ||
| 98 | |||
| 99 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
| 100 | { | ||
| 101 | struct i2c_board_info *info = data; | ||
| 102 | |||
| 103 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
| 104 | struct acpi_resource_i2c_serialbus *sb; | ||
| 105 | |||
| 106 | sb = &ares->data.i2c_serial_bus; | ||
| 107 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
| 108 | info->addr = sb->slave_address; | ||
| 109 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
| 110 | info->flags |= I2C_CLIENT_TEN; | ||
| 111 | } | ||
| 112 | } else if (info->irq < 0) { | ||
| 113 | struct resource r; | ||
| 114 | |||
| 115 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
| 116 | info->irq = r.start; | ||
| 117 | } | ||
| 118 | |||
| 119 | /* Tell the ACPI core to skip this resource */ | ||
| 120 | return 1; | ||
| 121 | } | ||
| 122 | |||
| 123 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
| 124 | void *data, void **return_value) | ||
| 125 | { | ||
| 126 | struct i2c_adapter *adapter = data; | ||
| 127 | struct list_head resource_list; | ||
| 128 | struct i2c_board_info info; | ||
| 129 | struct acpi_device *adev; | ||
| 130 | int ret; | ||
| 131 | |||
| 132 | if (acpi_bus_get_device(handle, &adev)) | ||
| 133 | return AE_OK; | ||
| 134 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
| 135 | return AE_OK; | ||
| 136 | |||
| 137 | memset(&info, 0, sizeof(info)); | ||
| 138 | info.acpi_node.companion = adev; | ||
| 139 | info.irq = -1; | ||
| 140 | |||
| 141 | INIT_LIST_HEAD(&resource_list); | ||
| 142 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
| 143 | acpi_i2c_add_resource, &info); | ||
| 144 | acpi_dev_free_resource_list(&resource_list); | ||
| 145 | |||
| 146 | if (ret < 0 || !info.addr) | ||
| 147 | return AE_OK; | ||
| 148 | |||
| 149 | adev->power.flags.ignore_parent = true; | ||
| 150 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
| 151 | if (!i2c_new_device(adapter, &info)) { | ||
| 152 | adev->power.flags.ignore_parent = false; | ||
| 153 | dev_err(&adapter->dev, | ||
| 154 | "failed to add I2C device %s from ACPI\n", | ||
| 155 | dev_name(&adev->dev)); | ||
| 156 | } | ||
| 157 | |||
| 158 | return AE_OK; | ||
| 159 | } | ||
| 160 | |||
| 161 | /** | ||
| 162 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
| 163 | * @adap: pointer to adapter | ||
| 164 | * | ||
| 165 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
| 166 | * namespace. When a device is found it will be added to the Linux device | ||
| 167 | * model and bound to the corresponding ACPI handle. | ||
| 168 | */ | ||
| 169 | static void acpi_i2c_register_devices(struct i2c_adapter *adap) | ||
| 170 | { | ||
| 171 | acpi_handle handle; | ||
| 172 | acpi_status status; | ||
| 173 | |||
| 174 | if (!adap->dev.parent) | ||
| 175 | return; | ||
| 176 | |||
| 177 | handle = ACPI_HANDLE(adap->dev.parent); | ||
| 178 | if (!handle) | ||
| 179 | return; | ||
| 180 | |||
| 181 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
| 182 | acpi_i2c_add_device, NULL, | ||
| 183 | adap, NULL); | ||
| 184 | if (ACPI_FAILURE(status)) | ||
| 185 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | ||
| 186 | } | ||
| 187 | |||
| 188 | #else /* CONFIG_ACPI */ | ||
| 189 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } | ||
| 190 | #endif /* CONFIG_ACPI */ | ||
| 191 | |||
| 192 | #ifdef CONFIG_ACPI_I2C_OPREGION | ||
| 193 | static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | ||
| 194 | u8 cmd, u8 *data, u8 data_len) | ||
| 195 | { | ||
| 196 | |||
| 197 | struct i2c_msg msgs[2]; | ||
| 198 | int ret; | ||
| 199 | u8 *buffer; | ||
| 200 | |||
| 201 | buffer = kzalloc(data_len, GFP_KERNEL); | ||
| 202 | if (!buffer) | ||
| 203 | return AE_NO_MEMORY; | ||
| 204 | |||
| 205 | msgs[0].addr = client->addr; | ||
| 206 | msgs[0].flags = client->flags; | ||
| 207 | msgs[0].len = 1; | ||
| 208 | msgs[0].buf = &cmd; | ||
| 209 | |||
| 210 | msgs[1].addr = client->addr; | ||
| 211 | msgs[1].flags = client->flags | I2C_M_RD; | ||
| 212 | msgs[1].len = data_len; | ||
| 213 | msgs[1].buf = buffer; | ||
| 214 | |||
| 215 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
| 216 | if (ret < 0) | ||
| 217 | dev_err(&client->adapter->dev, "i2c read failed\n"); | ||
| 218 | else | ||
| 219 | memcpy(data, buffer, data_len); | ||
| 220 | |||
| 221 | kfree(buffer); | ||
| 222 | return ret; | ||
| 223 | } | ||
| 224 | |||
| 225 | static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, | ||
| 226 | u8 cmd, u8 *data, u8 data_len) | ||
| 227 | { | ||
| 228 | |||
| 229 | struct i2c_msg msgs[1]; | ||
| 230 | u8 *buffer; | ||
| 231 | int ret = AE_OK; | ||
| 232 | |||
| 233 | buffer = kzalloc(data_len + 1, GFP_KERNEL); | ||
| 234 | if (!buffer) | ||
| 235 | return AE_NO_MEMORY; | ||
| 236 | |||
| 237 | buffer[0] = cmd; | ||
| 238 | memcpy(buffer + 1, data, data_len); | ||
| 239 | |||
| 240 | msgs[0].addr = client->addr; | ||
| 241 | msgs[0].flags = client->flags; | ||
| 242 | msgs[0].len = data_len + 1; | ||
| 243 | msgs[0].buf = buffer; | ||
| 244 | |||
| 245 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
| 246 | if (ret < 0) | ||
| 247 | dev_err(&client->adapter->dev, "i2c write failed\n"); | ||
| 248 | |||
| 249 | kfree(buffer); | ||
| 250 | return ret; | ||
| 251 | } | ||
| 252 | |||
| 253 | static acpi_status | ||
| 254 | acpi_i2c_space_handler(u32 function, acpi_physical_address command, | ||
| 255 | u32 bits, u64 *value64, | ||
| 256 | void *handler_context, void *region_context) | ||
| 257 | { | ||
| 258 | struct gsb_buffer *gsb = (struct gsb_buffer *)value64; | ||
| 259 | struct acpi_i2c_handler_data *data = handler_context; | ||
| 260 | struct acpi_connection_info *info = &data->info; | ||
| 261 | struct acpi_resource_i2c_serialbus *sb; | ||
| 262 | struct i2c_adapter *adapter = data->adapter; | ||
| 263 | struct i2c_client client; | ||
| 264 | struct acpi_resource *ares; | ||
| 265 | u32 accessor_type = function >> 16; | ||
| 266 | u8 action = function & ACPI_IO_MASK; | ||
| 267 | acpi_status ret = AE_OK; | ||
| 268 | int status; | ||
| 269 | |||
| 270 | ret = acpi_buffer_to_resource(info->connection, info->length, &ares); | ||
| 271 | if (ACPI_FAILURE(ret)) | ||
| 272 | return ret; | ||
| 273 | |||
| 274 | if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
| 275 | ret = AE_BAD_PARAMETER; | ||
| 276 | goto err; | ||
| 277 | } | ||
| 278 | |||
| 279 | sb = &ares->data.i2c_serial_bus; | ||
| 280 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
| 281 | ret = AE_BAD_PARAMETER; | ||
| 282 | goto err; | ||
| 283 | } | ||
| 284 | |||
| 285 | memset(&client, 0, sizeof(client)); | ||
| 286 | client.adapter = adapter; | ||
| 287 | client.addr = sb->slave_address; | ||
| 288 | client.flags = 0; | ||
| 289 | |||
| 290 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
| 291 | client.flags |= I2C_CLIENT_TEN; | ||
| 292 | |||
| 293 | switch (accessor_type) { | ||
| 294 | case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: | ||
| 295 | if (action == ACPI_READ) { | ||
| 296 | status = i2c_smbus_read_byte(&client); | ||
| 297 | if (status >= 0) { | ||
| 298 | gsb->bdata = status; | ||
| 299 | status = 0; | ||
| 300 | } | ||
| 301 | } else { | ||
| 302 | status = i2c_smbus_write_byte(&client, gsb->bdata); | ||
| 303 | } | ||
| 304 | break; | ||
| 305 | |||
| 306 | case ACPI_GSB_ACCESS_ATTRIB_BYTE: | ||
| 307 | if (action == ACPI_READ) { | ||
| 308 | status = i2c_smbus_read_byte_data(&client, command); | ||
| 309 | if (status >= 0) { | ||
| 310 | gsb->bdata = status; | ||
| 311 | status = 0; | ||
| 312 | } | ||
| 313 | } else { | ||
| 314 | status = i2c_smbus_write_byte_data(&client, command, | ||
| 315 | gsb->bdata); | ||
| 316 | } | ||
| 317 | break; | ||
| 318 | |||
| 319 | case ACPI_GSB_ACCESS_ATTRIB_WORD: | ||
| 320 | if (action == ACPI_READ) { | ||
| 321 | status = i2c_smbus_read_word_data(&client, command); | ||
| 322 | if (status >= 0) { | ||
| 323 | gsb->wdata = status; | ||
| 324 | status = 0; | ||
| 325 | } | ||
| 326 | } else { | ||
| 327 | status = i2c_smbus_write_word_data(&client, command, | ||
| 328 | gsb->wdata); | ||
| 329 | } | ||
| 330 | break; | ||
| 331 | |||
| 332 | case ACPI_GSB_ACCESS_ATTRIB_BLOCK: | ||
| 333 | if (action == ACPI_READ) { | ||
| 334 | status = i2c_smbus_read_block_data(&client, command, | ||
| 335 | gsb->data); | ||
| 336 | if (status >= 0) { | ||
| 337 | gsb->len = status; | ||
| 338 | status = 0; | ||
| 339 | } | ||
| 340 | } else { | ||
| 341 | status = i2c_smbus_write_block_data(&client, command, | ||
| 342 | gsb->len, gsb->data); | ||
| 343 | } | ||
| 344 | break; | ||
| 345 | |||
| 346 | case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: | ||
| 347 | if (action == ACPI_READ) { | ||
| 348 | status = acpi_gsb_i2c_read_bytes(&client, command, | ||
| 349 | gsb->data, info->access_length); | ||
| 350 | if (status > 0) | ||
| 351 | status = 0; | ||
| 352 | } else { | ||
| 353 | status = acpi_gsb_i2c_write_bytes(&client, command, | ||
| 354 | gsb->data, info->access_length); | ||
| 355 | } | ||
| 356 | break; | ||
| 357 | |||
| 358 | default: | ||
| 359 | pr_info("protocol(0x%02x) is not supported.\n", accessor_type); | ||
| 360 | ret = AE_BAD_PARAMETER; | ||
| 361 | goto err; | ||
| 362 | } | ||
| 363 | |||
| 364 | gsb->status = status; | ||
| 365 | |||
| 366 | err: | ||
| 367 | ACPI_FREE(ares); | ||
| 368 | return ret; | ||
| 369 | } | ||
| 370 | |||
| 371 | |||
| 372 | static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
| 373 | { | ||
| 374 | acpi_handle handle; | ||
| 375 | struct acpi_i2c_handler_data *data; | ||
| 376 | acpi_status status; | ||
| 377 | |||
| 378 | if (!adapter->dev.parent) | ||
| 379 | return -ENODEV; | ||
| 380 | |||
| 381 | handle = ACPI_HANDLE(adapter->dev.parent); | ||
| 382 | |||
| 383 | if (!handle) | ||
| 384 | return -ENODEV; | ||
| 385 | |||
| 386 | data = kzalloc(sizeof(struct acpi_i2c_handler_data), | ||
| 387 | GFP_KERNEL); | ||
| 388 | if (!data) | ||
| 389 | return -ENOMEM; | ||
| 390 | |||
| 391 | data->adapter = adapter; | ||
| 392 | status = acpi_bus_attach_private_data(handle, (void *)data); | ||
| 393 | if (ACPI_FAILURE(status)) { | ||
| 394 | kfree(data); | ||
| 395 | return -ENOMEM; | ||
| 396 | } | ||
| 397 | |||
| 398 | status = acpi_install_address_space_handler(handle, | ||
| 399 | ACPI_ADR_SPACE_GSBUS, | ||
| 400 | &acpi_i2c_space_handler, | ||
| 401 | NULL, | ||
| 402 | data); | ||
| 403 | if (ACPI_FAILURE(status)) { | ||
| 404 | dev_err(&adapter->dev, "Error installing i2c space handler\n"); | ||
| 405 | acpi_bus_detach_private_data(handle); | ||
| 406 | kfree(data); | ||
| 407 | return -ENOMEM; | ||
| 408 | } | ||
| 409 | |||
| 410 | return 0; | ||
| 411 | } | ||
| 412 | |||
| 413 | static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
| 414 | { | ||
| 415 | acpi_handle handle; | ||
| 416 | struct acpi_i2c_handler_data *data; | ||
| 417 | acpi_status status; | ||
| 418 | |||
| 419 | if (!adapter->dev.parent) | ||
| 420 | return; | ||
| 421 | |||
| 422 | handle = ACPI_HANDLE(adapter->dev.parent); | ||
| 423 | |||
| 424 | if (!handle) | ||
| 425 | return; | ||
| 426 | |||
| 427 | acpi_remove_address_space_handler(handle, | ||
| 428 | ACPI_ADR_SPACE_GSBUS, | ||
| 429 | &acpi_i2c_space_handler); | ||
| 430 | |||
| 431 | status = acpi_bus_get_private_data(handle, (void **)&data); | ||
| 432 | if (ACPI_SUCCESS(status)) | ||
| 433 | kfree(data); | ||
| 434 | |||
| 435 | acpi_bus_detach_private_data(handle); | ||
| 436 | } | ||
| 437 | #else /* CONFIG_ACPI_I2C_OPREGION */ | ||
| 438 | static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
| 439 | { } | ||
| 440 | |||
| 441 | static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
| 442 | { return 0; } | ||
| 443 | #endif /* CONFIG_ACPI_I2C_OPREGION */ | ||
| 444 | |||
| 81 | /* ------------------------------------------------------------------------- */ | 445 | /* ------------------------------------------------------------------------- */ |
| 82 | 446 | ||
| 83 | static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, | 447 | static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index a3a2e9c1639b..df0c4f605a21 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 105 | umem->length = size; | 105 | umem->length = size; |
| 106 | umem->offset = addr & ~PAGE_MASK; | 106 | umem->offset = addr & ~PAGE_MASK; |
| 107 | umem->page_size = PAGE_SIZE; | 107 | umem->page_size = PAGE_SIZE; |
| 108 | umem->pid = get_task_pid(current, PIDTYPE_PID); | ||
| 108 | /* | 109 | /* |
| 109 | * We ask for writable memory if any access flags other than | 110 | * We ask for writable memory if any access flags other than |
| 110 | * "remote read" are set. "Local write" and "remote write" | 111 | * "remote read" are set. "Local write" and "remote write" |
| @@ -198,6 +199,7 @@ out: | |||
| 198 | if (ret < 0) { | 199 | if (ret < 0) { |
| 199 | if (need_release) | 200 | if (need_release) |
| 200 | __ib_umem_release(context->device, umem, 0); | 201 | __ib_umem_release(context->device, umem, 0); |
| 202 | put_pid(umem->pid); | ||
| 201 | kfree(umem); | 203 | kfree(umem); |
| 202 | } else | 204 | } else |
| 203 | current->mm->pinned_vm = locked; | 205 | current->mm->pinned_vm = locked; |
| @@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem) | |||
| 230 | { | 232 | { |
| 231 | struct ib_ucontext *context = umem->context; | 233 | struct ib_ucontext *context = umem->context; |
| 232 | struct mm_struct *mm; | 234 | struct mm_struct *mm; |
| 235 | struct task_struct *task; | ||
| 233 | unsigned long diff; | 236 | unsigned long diff; |
| 234 | 237 | ||
| 235 | __ib_umem_release(umem->context->device, umem, 1); | 238 | __ib_umem_release(umem->context->device, umem, 1); |
| 236 | 239 | ||
| 237 | mm = get_task_mm(current); | 240 | task = get_pid_task(umem->pid, PIDTYPE_PID); |
| 238 | if (!mm) { | 241 | put_pid(umem->pid); |
| 239 | kfree(umem); | 242 | if (!task) |
| 240 | return; | 243 | goto out; |
| 241 | } | 244 | mm = get_task_mm(task); |
| 245 | put_task_struct(task); | ||
| 246 | if (!mm) | ||
| 247 | goto out; | ||
| 242 | 248 | ||
| 243 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 249 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
| 244 | 250 | ||
| @@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem) | |||
| 262 | } else | 268 | } else |
| 263 | down_write(&mm->mmap_sem); | 269 | down_write(&mm->mmap_sem); |
| 264 | 270 | ||
| 265 | current->mm->pinned_vm -= diff; | 271 | mm->pinned_vm -= diff; |
| 266 | up_write(&mm->mmap_sem); | 272 | up_write(&mm->mmap_sem); |
| 267 | mmput(mm); | 273 | mmput(mm); |
| 274 | out: | ||
| 268 | kfree(umem); | 275 | kfree(umem); |
| 269 | } | 276 | } |
| 270 | EXPORT_SYMBOL(ib_umem_release); | 277 | EXPORT_SYMBOL(ib_umem_release); |
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index e7bee46868d1..abd97247443e 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c | |||
| @@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst, | |||
| 140 | dst->packet_life_time = src->packet_life_time; | 140 | dst->packet_life_time = src->packet_life_time; |
| 141 | dst->preference = src->preference; | 141 | dst->preference = src->preference; |
| 142 | dst->packet_life_time_selector = src->packet_life_time_selector; | 142 | dst->packet_life_time_selector = src->packet_life_time_selector; |
| 143 | |||
| 144 | memset(dst->smac, 0, sizeof(dst->smac)); | ||
| 145 | memset(dst->dmac, 0, sizeof(dst->dmac)); | ||
| 146 | dst->vlan_id = 0xffff; | ||
| 143 | } | 147 | } |
| 144 | EXPORT_SYMBOL(ib_copy_path_rec_from_user); | 148 | EXPORT_SYMBOL(ib_copy_path_rec_from_user); |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index dc66c4506916..1da1252dcdb3 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
| @@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages, | |||
| 54 | 54 | ||
| 55 | /* call with current->mm->mmap_sem held */ | 55 | /* call with current->mm->mmap_sem held */ |
| 56 | static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, | 56 | static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, |
| 57 | struct page **p, struct vm_area_struct **vma) | 57 | struct page **p) |
| 58 | { | 58 | { |
| 59 | unsigned long lock_limit; | 59 | unsigned long lock_limit; |
| 60 | size_t got; | 60 | size_t got; |
| @@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, | |||
| 74 | ret = get_user_pages(current, current->mm, | 74 | ret = get_user_pages(current, current->mm, |
| 75 | start_page + got * PAGE_SIZE, | 75 | start_page + got * PAGE_SIZE, |
| 76 | num_pages - got, 1, 1, | 76 | num_pages - got, 1, 1, |
| 77 | p + got, vma); | 77 | p + got, NULL); |
| 78 | if (ret < 0) | 78 | if (ret < 0) |
| 79 | goto bail_release; | 79 | goto bail_release; |
| 80 | } | 80 | } |
| @@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, | |||
| 165 | 165 | ||
| 166 | down_write(¤t->mm->mmap_sem); | 166 | down_write(¤t->mm->mmap_sem); |
| 167 | 167 | ||
| 168 | ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); | 168 | ret = __ipath_get_user_pages(start_page, num_pages, p); |
| 169 | 169 | ||
| 170 | up_write(¤t->mm->mmap_sem); | 170 | up_write(¤t->mm->mmap_sem); |
| 171 | 171 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index af8256353c7d..bda5994ceb68 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | 59 | ||
| 60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
| 61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
| 62 | #define MLX4_IB_CARD_REV_A0 0xA0 | ||
| 62 | 63 | ||
| 63 | MODULE_AUTHOR("Roland Dreier"); | 64 | MODULE_AUTHOR("Roland Dreier"); |
| 64 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); | 65 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); |
| @@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) | |||
| 119 | return dmfs; | 120 | return dmfs; |
| 120 | } | 121 | } |
| 121 | 122 | ||
| 123 | static int num_ib_ports(struct mlx4_dev *dev) | ||
| 124 | { | ||
| 125 | int ib_ports = 0; | ||
| 126 | int i; | ||
| 127 | |||
| 128 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
| 129 | ib_ports++; | ||
| 130 | |||
| 131 | return ib_ports; | ||
| 132 | } | ||
| 133 | |||
| 122 | static int mlx4_ib_query_device(struct ib_device *ibdev, | 134 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
| 123 | struct ib_device_attr *props) | 135 | struct ib_device_attr *props) |
| 124 | { | 136 | { |
| @@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 126 | struct ib_smp *in_mad = NULL; | 138 | struct ib_smp *in_mad = NULL; |
| 127 | struct ib_smp *out_mad = NULL; | 139 | struct ib_smp *out_mad = NULL; |
| 128 | int err = -ENOMEM; | 140 | int err = -ENOMEM; |
| 141 | int have_ib_ports; | ||
| 129 | 142 | ||
| 130 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 143 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 131 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 144 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| @@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 142 | 155 | ||
| 143 | memset(props, 0, sizeof *props); | 156 | memset(props, 0, sizeof *props); |
| 144 | 157 | ||
| 158 | have_ib_ports = num_ib_ports(dev->dev); | ||
| 159 | |||
| 145 | props->fw_ver = dev->dev->caps.fw_ver; | 160 | props->fw_ver = dev->dev->caps.fw_ver; |
| 146 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 161 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
| 147 | IB_DEVICE_PORT_ACTIVE_EVENT | | 162 | IB_DEVICE_PORT_ACTIVE_EVENT | |
| @@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 152 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 167 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
| 153 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) | 168 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) |
| 154 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 169 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
| 155 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) | 170 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) |
| 156 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 171 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
| 157 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) | 172 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) |
| 158 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; | 173 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; |
| 159 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) | 174 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) |
| 160 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; | 175 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
| 161 | if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) | 176 | if (dev->dev->caps.max_gso_sz && |
| 177 | (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && | ||
| 178 | (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) | ||
| 162 | props->device_cap_flags |= IB_DEVICE_UD_TSO; | 179 | props->device_cap_flags |= IB_DEVICE_UD_TSO; |
| 163 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) | 180 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) |
| 164 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | 181 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; |
| @@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
| 357 | props->state = IB_PORT_DOWN; | 374 | props->state = IB_PORT_DOWN; |
| 358 | props->phys_state = state_to_phys_state(props->state); | 375 | props->phys_state = state_to_phys_state(props->state); |
| 359 | props->active_mtu = IB_MTU_256; | 376 | props->active_mtu = IB_MTU_256; |
| 360 | spin_lock(&iboe->lock); | 377 | spin_lock_bh(&iboe->lock); |
| 361 | ndev = iboe->netdevs[port - 1]; | 378 | ndev = iboe->netdevs[port - 1]; |
| 362 | if (!ndev) | 379 | if (!ndev) |
| 363 | goto out_unlock; | 380 | goto out_unlock; |
| @@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
| 369 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 386 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
| 370 | props->phys_state = state_to_phys_state(props->state); | 387 | props->phys_state = state_to_phys_state(props->state); |
| 371 | out_unlock: | 388 | out_unlock: |
| 372 | spin_unlock(&iboe->lock); | 389 | spin_unlock_bh(&iboe->lock); |
| 373 | out: | 390 | out: |
| 374 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
| 375 | return err; | 392 | return err; |
| @@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | |||
| 811 | if (!mqp->port) | 828 | if (!mqp->port) |
| 812 | return 0; | 829 | return 0; |
| 813 | 830 | ||
| 814 | spin_lock(&mdev->iboe.lock); | 831 | spin_lock_bh(&mdev->iboe.lock); |
| 815 | ndev = mdev->iboe.netdevs[mqp->port - 1]; | 832 | ndev = mdev->iboe.netdevs[mqp->port - 1]; |
| 816 | if (ndev) | 833 | if (ndev) |
| 817 | dev_hold(ndev); | 834 | dev_hold(ndev); |
| 818 | spin_unlock(&mdev->iboe.lock); | 835 | spin_unlock_bh(&mdev->iboe.lock); |
| 819 | 836 | ||
| 820 | if (ndev) { | 837 | if (ndev) { |
| 821 | ret = 1; | 838 | ret = 1; |
| @@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 1292 | mutex_lock(&mqp->mutex); | 1309 | mutex_lock(&mqp->mutex); |
| 1293 | ge = find_gid_entry(mqp, gid->raw); | 1310 | ge = find_gid_entry(mqp, gid->raw); |
| 1294 | if (ge) { | 1311 | if (ge) { |
| 1295 | spin_lock(&mdev->iboe.lock); | 1312 | spin_lock_bh(&mdev->iboe.lock); |
| 1296 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; | 1313 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; |
| 1297 | if (ndev) | 1314 | if (ndev) |
| 1298 | dev_hold(ndev); | 1315 | dev_hold(ndev); |
| 1299 | spin_unlock(&mdev->iboe.lock); | 1316 | spin_unlock_bh(&mdev->iboe.lock); |
| 1300 | if (ndev) | 1317 | if (ndev) |
| 1301 | dev_put(ndev); | 1318 | dev_put(ndev); |
| 1302 | list_del(&ge->list); | 1319 | list_del(&ge->list); |
| @@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work) | |||
| 1417 | int err; | 1434 | int err; |
| 1418 | struct mlx4_dev *dev = gw->dev->dev; | 1435 | struct mlx4_dev *dev = gw->dev->dev; |
| 1419 | 1436 | ||
| 1437 | if (!gw->dev->ib_active) | ||
| 1438 | return; | ||
| 1439 | |||
| 1420 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1440 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| 1421 | if (IS_ERR(mailbox)) { | 1441 | if (IS_ERR(mailbox)) { |
| 1422 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); | 1442 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); |
| @@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work) | |||
| 1447 | int err; | 1467 | int err; |
| 1448 | struct mlx4_dev *dev = gw->dev->dev; | 1468 | struct mlx4_dev *dev = gw->dev->dev; |
| 1449 | 1469 | ||
| 1470 | if (!gw->dev->ib_active) | ||
| 1471 | return; | ||
| 1472 | |||
| 1450 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1473 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| 1451 | if (IS_ERR(mailbox)) { | 1474 | if (IS_ERR(mailbox)) { |
| 1452 | pr_warn("reset gid table failed\n"); | 1475 | pr_warn("reset gid table failed\n"); |
| @@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
| 1581 | return 0; | 1604 | return 0; |
| 1582 | 1605 | ||
| 1583 | iboe = &ibdev->iboe; | 1606 | iboe = &ibdev->iboe; |
| 1584 | spin_lock(&iboe->lock); | 1607 | spin_lock_bh(&iboe->lock); |
| 1585 | 1608 | ||
| 1586 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) | 1609 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) |
| 1587 | if ((netif_is_bond_master(real_dev) && | 1610 | if ((netif_is_bond_master(real_dev) && |
| @@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
| 1591 | update_gid_table(ibdev, port, gid, | 1614 | update_gid_table(ibdev, port, gid, |
| 1592 | event == NETDEV_DOWN, 0); | 1615 | event == NETDEV_DOWN, 0); |
| 1593 | 1616 | ||
| 1594 | spin_unlock(&iboe->lock); | 1617 | spin_unlock_bh(&iboe->lock); |
| 1595 | return 0; | 1618 | return 0; |
| 1596 | 1619 | ||
| 1597 | } | 1620 | } |
| @@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
| 1664 | new_smac = mlx4_mac_to_u64(dev->dev_addr); | 1687 | new_smac = mlx4_mac_to_u64(dev->dev_addr); |
| 1665 | read_unlock(&dev_base_lock); | 1688 | read_unlock(&dev_base_lock); |
| 1666 | 1689 | ||
| 1690 | atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); | ||
| 1691 | |||
| 1692 | /* no need for update QP1 and mac registration in non-SRIOV */ | ||
| 1693 | if (!mlx4_is_mfunc(ibdev->dev)) | ||
| 1694 | return; | ||
| 1695 | |||
| 1667 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); | 1696 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); |
| 1668 | qp = ibdev->qp1_proxy[port - 1]; | 1697 | qp = ibdev->qp1_proxy[port - 1]; |
| 1669 | if (qp) { | 1698 | if (qp) { |
| 1670 | int new_smac_index; | 1699 | int new_smac_index; |
| 1671 | u64 old_smac = qp->pri.smac; | 1700 | u64 old_smac; |
| 1672 | struct mlx4_update_qp_params update_params; | 1701 | struct mlx4_update_qp_params update_params; |
| 1673 | 1702 | ||
| 1703 | mutex_lock(&qp->mutex); | ||
| 1704 | old_smac = qp->pri.smac; | ||
| 1674 | if (new_smac == old_smac) | 1705 | if (new_smac == old_smac) |
| 1675 | goto unlock; | 1706 | goto unlock; |
| 1676 | 1707 | ||
| @@ -1680,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
| 1680 | goto unlock; | 1711 | goto unlock; |
| 1681 | 1712 | ||
| 1682 | update_params.smac_index = new_smac_index; | 1713 | update_params.smac_index = new_smac_index; |
| 1683 | if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, | 1714 | if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, |
| 1684 | &update_params)) { | 1715 | &update_params)) { |
| 1685 | release_mac = new_smac; | 1716 | release_mac = new_smac; |
| 1686 | goto unlock; | 1717 | goto unlock; |
| 1687 | } | 1718 | } |
| 1688 | 1719 | /* if old port was zero, no mac was yet registered for this QP */ | |
| 1720 | if (qp->pri.smac_port) | ||
| 1721 | release_mac = old_smac; | ||
| 1689 | qp->pri.smac = new_smac; | 1722 | qp->pri.smac = new_smac; |
| 1723 | qp->pri.smac_port = port; | ||
| 1690 | qp->pri.smac_index = new_smac_index; | 1724 | qp->pri.smac_index = new_smac_index; |
| 1691 | |||
| 1692 | release_mac = old_smac; | ||
| 1693 | } | 1725 | } |
| 1694 | 1726 | ||
| 1695 | unlock: | 1727 | unlock: |
| 1696 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
| 1697 | if (release_mac != MLX4_IB_INVALID_MAC) | 1728 | if (release_mac != MLX4_IB_INVALID_MAC) |
| 1698 | mlx4_unregister_mac(ibdev->dev, port, release_mac); | 1729 | mlx4_unregister_mac(ibdev->dev, port, release_mac); |
| 1730 | if (qp) | ||
| 1731 | mutex_unlock(&qp->mutex); | ||
| 1732 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
| 1699 | } | 1733 | } |
| 1700 | 1734 | ||
| 1701 | static void mlx4_ib_get_dev_addr(struct net_device *dev, | 1735 | static void mlx4_ib_get_dev_addr(struct net_device *dev, |
| @@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
| 1706 | struct inet6_dev *in6_dev; | 1740 | struct inet6_dev *in6_dev; |
| 1707 | union ib_gid *pgid; | 1741 | union ib_gid *pgid; |
| 1708 | struct inet6_ifaddr *ifp; | 1742 | struct inet6_ifaddr *ifp; |
| 1743 | union ib_gid default_gid; | ||
| 1709 | #endif | 1744 | #endif |
| 1710 | union ib_gid gid; | 1745 | union ib_gid gid; |
| 1711 | 1746 | ||
| @@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
| 1726 | in_dev_put(in_dev); | 1761 | in_dev_put(in_dev); |
| 1727 | } | 1762 | } |
| 1728 | #if IS_ENABLED(CONFIG_IPV6) | 1763 | #if IS_ENABLED(CONFIG_IPV6) |
| 1764 | mlx4_make_default_gid(dev, &default_gid); | ||
| 1729 | /* IPv6 gids */ | 1765 | /* IPv6 gids */ |
| 1730 | in6_dev = in6_dev_get(dev); | 1766 | in6_dev = in6_dev_get(dev); |
| 1731 | if (in6_dev) { | 1767 | if (in6_dev) { |
| 1732 | read_lock_bh(&in6_dev->lock); | 1768 | read_lock_bh(&in6_dev->lock); |
| 1733 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1769 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
| 1734 | pgid = (union ib_gid *)&ifp->addr; | 1770 | pgid = (union ib_gid *)&ifp->addr; |
| 1771 | if (!memcmp(pgid, &default_gid, sizeof(*pgid))) | ||
| 1772 | continue; | ||
| 1735 | update_gid_table(ibdev, port, pgid, 0, 0); | 1773 | update_gid_table(ibdev, port, pgid, 0, 0); |
| 1736 | } | 1774 | } |
| 1737 | read_unlock_bh(&in6_dev->lock); | 1775 | read_unlock_bh(&in6_dev->lock); |
| @@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
| 1753 | struct net_device *dev; | 1791 | struct net_device *dev; |
| 1754 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | 1792 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
| 1755 | int i; | 1793 | int i; |
| 1794 | int err = 0; | ||
| 1756 | 1795 | ||
| 1757 | for (i = 1; i <= ibdev->num_ports; ++i) | 1796 | for (i = 1; i <= ibdev->num_ports; ++i) { |
| 1758 | if (reset_gid_table(ibdev, i)) | 1797 | if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == |
| 1759 | return -1; | 1798 | IB_LINK_LAYER_ETHERNET) { |
| 1799 | err = reset_gid_table(ibdev, i); | ||
| 1800 | if (err) | ||
| 1801 | goto out; | ||
| 1802 | } | ||
| 1803 | } | ||
| 1760 | 1804 | ||
| 1761 | read_lock(&dev_base_lock); | 1805 | read_lock(&dev_base_lock); |
| 1762 | spin_lock(&iboe->lock); | 1806 | spin_lock_bh(&iboe->lock); |
| 1763 | 1807 | ||
| 1764 | for_each_netdev(&init_net, dev) { | 1808 | for_each_netdev(&init_net, dev) { |
| 1765 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1809 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
| 1766 | if (port) | 1810 | /* port will be non-zero only for ETH ports */ |
| 1811 | if (port) { | ||
| 1812 | mlx4_ib_set_default_gid(ibdev, dev, port); | ||
| 1767 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1813 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
| 1814 | } | ||
| 1768 | } | 1815 | } |
| 1769 | 1816 | ||
| 1770 | spin_unlock(&iboe->lock); | 1817 | spin_unlock_bh(&iboe->lock); |
| 1771 | read_unlock(&dev_base_lock); | 1818 | read_unlock(&dev_base_lock); |
| 1772 | 1819 | out: | |
| 1773 | return 0; | 1820 | return err; |
| 1774 | } | 1821 | } |
| 1775 | 1822 | ||
| 1776 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | 1823 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, |
| @@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
| 1784 | 1831 | ||
| 1785 | iboe = &ibdev->iboe; | 1832 | iboe = &ibdev->iboe; |
| 1786 | 1833 | ||
| 1787 | spin_lock(&iboe->lock); | 1834 | spin_lock_bh(&iboe->lock); |
| 1788 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1835 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
| 1789 | enum ib_port_state port_state = IB_PORT_NOP; | 1836 | enum ib_port_state port_state = IB_PORT_NOP; |
| 1790 | struct net_device *old_master = iboe->masters[port - 1]; | 1837 | struct net_device *old_master = iboe->masters[port - 1]; |
| @@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
| 1816 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | 1863 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? |
| 1817 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 1864 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
| 1818 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1865 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); |
| 1819 | } else { | 1866 | if (curr_master) { |
| 1820 | reset_gid_table(ibdev, port); | 1867 | /* if using bonding/team and a slave port is down, we |
| 1821 | } | 1868 | * don't want the bond IP based gids in the table since |
| 1822 | /* if using bonding/team and a slave port is down, we don't the bond IP | 1869 | * flows that select port by gid may get the down port. |
| 1823 | * based gids in the table since flows that select port by gid may get | 1870 | */ |
| 1824 | * the down port. | 1871 | if (port_state == IB_PORT_DOWN) { |
| 1825 | */ | 1872 | reset_gid_table(ibdev, port); |
| 1826 | if (curr_master && (port_state == IB_PORT_DOWN)) { | 1873 | mlx4_ib_set_default_gid(ibdev, |
| 1827 | reset_gid_table(ibdev, port); | 1874 | curr_netdev, |
| 1828 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1875 | port); |
| 1829 | } | 1876 | } else { |
| 1830 | /* if bonding is used it is possible that we add it to masters | 1877 | /* gids from the upper dev (bond/team) |
| 1831 | * only after IP address is assigned to the net bonding | 1878 | * should appear in port's gid table |
| 1832 | * interface. | 1879 | */ |
| 1833 | */ | 1880 | mlx4_ib_get_dev_addr(curr_master, |
| 1834 | if (curr_master && (old_master != curr_master)) { | 1881 | ibdev, port); |
| 1835 | reset_gid_table(ibdev, port); | 1882 | } |
| 1836 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1883 | } |
| 1837 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1884 | /* if bonding is used it is possible that we add it to |
| 1838 | } | 1885 | * masters only after IP address is assigned to the |
| 1886 | * net bonding interface. | ||
| 1887 | */ | ||
| 1888 | if (curr_master && (old_master != curr_master)) { | ||
| 1889 | reset_gid_table(ibdev, port); | ||
| 1890 | mlx4_ib_set_default_gid(ibdev, | ||
| 1891 | curr_netdev, port); | ||
| 1892 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | ||
| 1893 | } | ||
| 1839 | 1894 | ||
| 1840 | if (!curr_master && (old_master != curr_master)) { | 1895 | if (!curr_master && (old_master != curr_master)) { |
| 1896 | reset_gid_table(ibdev, port); | ||
| 1897 | mlx4_ib_set_default_gid(ibdev, | ||
| 1898 | curr_netdev, port); | ||
| 1899 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
| 1900 | } | ||
| 1901 | } else { | ||
| 1841 | reset_gid_table(ibdev, port); | 1902 | reset_gid_table(ibdev, port); |
| 1842 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
| 1843 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
| 1844 | } | 1903 | } |
| 1845 | } | 1904 | } |
| 1846 | 1905 | ||
| 1847 | spin_unlock(&iboe->lock); | 1906 | spin_unlock_bh(&iboe->lock); |
| 1848 | 1907 | ||
| 1849 | if (update_qps_port > 0) | 1908 | if (update_qps_port > 0) |
| 1850 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); | 1909 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); |
| @@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2186 | goto err_steer_free_bitmap; | 2245 | goto err_steer_free_bitmap; |
| 2187 | } | 2246 | } |
| 2188 | 2247 | ||
| 2248 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) | ||
| 2249 | atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); | ||
| 2250 | |||
| 2189 | if (ib_register_device(&ibdev->ib_dev, NULL)) | 2251 | if (ib_register_device(&ibdev->ib_dev, NULL)) |
| 2190 | goto err_steer_free_bitmap; | 2252 | goto err_steer_free_bitmap; |
| 2191 | 2253 | ||
| @@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2222 | } | 2284 | } |
| 2223 | } | 2285 | } |
| 2224 | #endif | 2286 | #endif |
| 2225 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | 2287 | if (mlx4_ib_init_gid_table(ibdev)) |
| 2226 | reset_gid_table(ibdev, i); | 2288 | goto err_notif; |
| 2227 | rtnl_lock(); | ||
| 2228 | mlx4_ib_scan_netdevs(ibdev, NULL, 0); | ||
| 2229 | rtnl_unlock(); | ||
| 2230 | mlx4_ib_init_gid_table(ibdev); | ||
| 2231 | } | 2289 | } |
| 2232 | 2290 | ||
| 2233 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { | 2291 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { |
| @@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
| 2375 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 2433 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
| 2376 | int p; | 2434 | int p; |
| 2377 | 2435 | ||
| 2436 | ibdev->ib_active = false; | ||
| 2437 | flush_workqueue(wq); | ||
| 2438 | |||
| 2378 | mlx4_ib_close_sriov(ibdev); | 2439 | mlx4_ib_close_sriov(ibdev); |
| 2379 | mlx4_ib_mad_cleanup(ibdev); | 2440 | mlx4_ib_mad_cleanup(ibdev); |
| 2380 | ib_unregister_device(&ibdev->ib_dev); | 2441 | ib_unregister_device(&ibdev->ib_dev); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e8cad3926bfc..6eb743f65f6f 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -451,6 +451,7 @@ struct mlx4_ib_iboe { | |||
| 451 | spinlock_t lock; | 451 | spinlock_t lock; |
| 452 | struct net_device *netdevs[MLX4_MAX_PORTS]; | 452 | struct net_device *netdevs[MLX4_MAX_PORTS]; |
| 453 | struct net_device *masters[MLX4_MAX_PORTS]; | 453 | struct net_device *masters[MLX4_MAX_PORTS]; |
| 454 | atomic64_t mac[MLX4_MAX_PORTS]; | ||
| 454 | struct notifier_block nb; | 455 | struct notifier_block nb; |
| 455 | struct notifier_block nb_inet; | 456 | struct notifier_block nb_inet; |
| 456 | struct notifier_block nb_inet6; | 457 | struct notifier_block nb_inet6; |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 9b0e80e59b08..8f9325cfc85d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
| 234 | 0); | 234 | 0); |
| 235 | if (IS_ERR(mmr->umem)) { | 235 | if (IS_ERR(mmr->umem)) { |
| 236 | err = PTR_ERR(mmr->umem); | 236 | err = PTR_ERR(mmr->umem); |
| 237 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ | ||
| 237 | mmr->umem = NULL; | 238 | mmr->umem = NULL; |
| 238 | goto release_mpt_entry; | 239 | goto release_mpt_entry; |
| 239 | } | 240 | } |
| 240 | n = ib_umem_page_count(mmr->umem); | 241 | n = ib_umem_page_count(mmr->umem); |
| 241 | shift = ilog2(mmr->umem->page_size); | 242 | shift = ilog2(mmr->umem->page_size); |
| 242 | 243 | ||
| 243 | mmr->mmr.iova = virt_addr; | ||
| 244 | mmr->mmr.size = length; | ||
| 245 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, | 244 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
| 246 | virt_addr, length, n, shift, | 245 | virt_addr, length, n, shift, |
| 247 | *pmpt_entry); | 246 | *pmpt_entry); |
| @@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
| 249 | ib_umem_release(mmr->umem); | 248 | ib_umem_release(mmr->umem); |
| 250 | goto release_mpt_entry; | 249 | goto release_mpt_entry; |
| 251 | } | 250 | } |
| 251 | mmr->mmr.iova = virt_addr; | ||
| 252 | mmr->mmr.size = length; | ||
| 252 | 253 | ||
| 253 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | 254 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); |
| 254 | if (err) { | 255 | if (err) { |
| @@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
| 262 | * return a failure. But dereg_mr will free the resources. | 263 | * return a failure. But dereg_mr will free the resources. |
| 263 | */ | 264 | */ |
| 264 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | 265 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); |
| 266 | if (!err && flags & IB_MR_REREG_ACCESS) | ||
| 267 | mmr->mmr.access = mr_access_flags; | ||
| 265 | 268 | ||
| 266 | release_mpt_entry: | 269 | release_mpt_entry: |
| 267 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | 270 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index efb9eff8906c..9c5150c3cb31 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
| 964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | 964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) |
| 965 | pr_warn("modify QP %06x to RESET failed.\n", | 965 | pr_warn("modify QP %06x to RESET failed.\n", |
| 966 | qp->mqp.qpn); | 966 | qp->mqp.qpn); |
| 967 | if (qp->pri.smac) { | 967 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
| 968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
| 969 | qp->pri.smac = 0; | 969 | qp->pri.smac = 0; |
| 970 | qp->pri.smac_port = 0; | ||
| 970 | } | 971 | } |
| 971 | if (qp->alt.smac) { | 972 | if (qp->alt.smac) { |
| 972 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 973 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
| @@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | |||
| 1325 | * If one was already assigned, but the new mac differs, | 1326 | * If one was already assigned, but the new mac differs, |
| 1326 | * unregister the old one and register the new one. | 1327 | * unregister the old one and register the new one. |
| 1327 | */ | 1328 | */ |
| 1328 | if (!smac_info->smac || smac_info->smac != smac) { | 1329 | if ((!smac_info->smac && !smac_info->smac_port) || |
| 1330 | smac_info->smac != smac) { | ||
| 1329 | /* register candidate now, unreg if needed, after success */ | 1331 | /* register candidate now, unreg if needed, after success */ |
| 1330 | smac_index = mlx4_register_mac(dev->dev, port, smac); | 1332 | smac_index = mlx4_register_mac(dev->dev, port, smac); |
| 1331 | if (smac_index >= 0) { | 1333 | if (smac_index >= 0) { |
| @@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) | |||
| 1390 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, | 1392 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, |
| 1391 | struct mlx4_qp_context *context) | 1393 | struct mlx4_qp_context *context) |
| 1392 | { | 1394 | { |
| 1393 | struct net_device *ndev; | ||
| 1394 | u64 u64_mac; | 1395 | u64 u64_mac; |
| 1395 | int smac_index; | 1396 | int smac_index; |
| 1396 | 1397 | ||
| 1397 | 1398 | u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); | |
| 1398 | ndev = dev->iboe.netdevs[qp->port - 1]; | ||
| 1399 | if (ndev) { | ||
| 1400 | smac = ndev->dev_addr; | ||
| 1401 | u64_mac = mlx4_mac_to_u64(smac); | ||
| 1402 | } else { | ||
| 1403 | u64_mac = dev->dev->caps.def_mac[qp->port]; | ||
| 1404 | } | ||
| 1405 | 1399 | ||
| 1406 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); | 1400 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); |
| 1407 | if (!qp->pri.smac) { | 1401 | if (!qp->pri.smac && !qp->pri.smac_port) { |
| 1408 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); | 1402 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); |
| 1409 | if (smac_index >= 0) { | 1403 | if (smac_index >= 0) { |
| 1410 | qp->pri.candidate_smac_index = smac_index; | 1404 | qp->pri.candidate_smac_index = smac_index; |
| @@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1432 | int steer_qp = 0; | 1426 | int steer_qp = 0; |
| 1433 | int err = -EINVAL; | 1427 | int err = -EINVAL; |
| 1434 | 1428 | ||
| 1429 | /* APM is not supported under RoCE */ | ||
| 1430 | if (attr_mask & IB_QP_ALT_PATH && | ||
| 1431 | rdma_port_get_link_layer(&dev->ib_dev, qp->port) == | ||
| 1432 | IB_LINK_LAYER_ETHERNET) | ||
| 1433 | return -ENOTSUPP; | ||
| 1434 | |||
| 1435 | context = kzalloc(sizeof *context, GFP_KERNEL); | 1435 | context = kzalloc(sizeof *context, GFP_KERNEL); |
| 1436 | if (!context) | 1436 | if (!context) |
| 1437 | return -ENOMEM; | 1437 | return -ENOMEM; |
| @@ -1682,7 +1682,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1682 | MLX4_IB_LINK_TYPE_ETH; | 1682 | MLX4_IB_LINK_TYPE_ETH; |
| 1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | 1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { |
| 1684 | /* set QP to receive both tunneled & non-tunneled packets */ | 1684 | /* set QP to receive both tunneled & non-tunneled packets */ |
| 1685 | if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) | 1685 | if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) |
| 1686 | context->srqn = cpu_to_be32(7 << 28); | 1686 | context->srqn = cpu_to_be32(7 << 28); |
| 1687 | } | 1687 | } |
| 1688 | } | 1688 | } |
| @@ -1786,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1786 | if (qp->flags & MLX4_IB_QP_NETIF) | 1786 | if (qp->flags & MLX4_IB_QP_NETIF) |
| 1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
| 1788 | } | 1788 | } |
| 1789 | if (qp->pri.smac) { | 1789 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
| 1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
| 1791 | qp->pri.smac = 0; | 1791 | qp->pri.smac = 0; |
| 1792 | qp->pri.smac_port = 0; | ||
| 1792 | } | 1793 | } |
| 1793 | if (qp->alt.smac) { | 1794 | if (qp->alt.smac) { |
| 1794 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 1795 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
| @@ -1812,11 +1813,12 @@ out: | |||
| 1812 | if (err && steer_qp) | 1813 | if (err && steer_qp) |
| 1813 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1814 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
| 1814 | kfree(context); | 1815 | kfree(context); |
| 1815 | if (qp->pri.candidate_smac) { | 1816 | if (qp->pri.candidate_smac || |
| 1817 | (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { | ||
| 1816 | if (err) { | 1818 | if (err) { |
| 1817 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); | 1819 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); |
| 1818 | } else { | 1820 | } else { |
| 1819 | if (qp->pri.smac) | 1821 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) |
| 1820 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1822 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
| 1821 | qp->pri.smac = qp->pri.candidate_smac; | 1823 | qp->pri.smac = qp->pri.candidate_smac; |
| 1822 | qp->pri.smac_index = qp->pri.candidate_smac_index; | 1824 | qp->pri.smac_index = qp->pri.candidate_smac_index; |
| @@ -2089,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
| 2089 | return 0; | 2091 | return 0; |
| 2090 | } | 2092 | } |
| 2091 | 2093 | ||
| 2094 | static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) | ||
| 2095 | { | ||
| 2096 | int i; | ||
| 2097 | |||
| 2098 | for (i = ETH_ALEN; i; i--) { | ||
| 2099 | dst_mac[i - 1] = src_mac & 0xff; | ||
| 2100 | src_mac >>= 8; | ||
| 2101 | } | ||
| 2102 | } | ||
| 2103 | |||
| 2092 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | 2104 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, |
| 2093 | void *wqe, unsigned *mlx_seg_len) | 2105 | void *wqe, unsigned *mlx_seg_len) |
| 2094 | { | 2106 | { |
| @@ -2203,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 2203 | } | 2215 | } |
| 2204 | 2216 | ||
| 2205 | if (is_eth) { | 2217 | if (is_eth) { |
| 2206 | u8 *smac; | ||
| 2207 | struct in6_addr in6; | 2218 | struct in6_addr in6; |
| 2208 | 2219 | ||
| 2209 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; | 2220 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; |
| @@ -2216,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 2216 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); | 2227 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); |
| 2217 | memcpy(&in6, sgid.raw, sizeof(in6)); | 2228 | memcpy(&in6, sgid.raw, sizeof(in6)); |
| 2218 | 2229 | ||
| 2219 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) | 2230 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { |
| 2220 | smac = to_mdev(sqp->qp.ibqp.device)-> | 2231 | u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); |
| 2221 | iboe.netdevs[sqp->qp.port - 1]->dev_addr; | 2232 | u8 smac[ETH_ALEN]; |
| 2222 | else /* use the src mac of the tunnel */ | 2233 | |
| 2223 | smac = ah->av.eth.s_mac; | 2234 | mlx4_u64_to_smac(smac, mac); |
| 2224 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); | 2235 | memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); |
| 2236 | } else { | ||
| 2237 | /* use the src mac of the tunnel */ | ||
| 2238 | memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); | ||
| 2239 | } | ||
| 2240 | |||
| 2225 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) | 2241 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) |
| 2226 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | 2242 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); |
| 2227 | if (!is_vlan) { | 2243 | if (!is_vlan) { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 40f8536c10b0..ac02ce4e8040 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
| @@ -38,7 +38,7 @@ | |||
| 38 | #define OCRDMA_VID_PCP_SHIFT 0xD | 38 | #define OCRDMA_VID_PCP_SHIFT 0xD |
| 39 | 39 | ||
| 40 | static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | 40 | static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, |
| 41 | struct ib_ah_attr *attr, int pdid) | 41 | struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) |
| 42 | { | 42 | { |
| 43 | int status = 0; | 43 | int status = 0; |
| 44 | u16 vlan_tag; bool vlan_enabled = false; | 44 | u16 vlan_tag; bool vlan_enabled = false; |
| @@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
| 49 | memset(ð, 0, sizeof(eth)); | 49 | memset(ð, 0, sizeof(eth)); |
| 50 | memset(&grh, 0, sizeof(grh)); | 50 | memset(&grh, 0, sizeof(grh)); |
| 51 | 51 | ||
| 52 | ah->sgid_index = attr->grh.sgid_index; | 52 | /* VLAN */ |
| 53 | |||
| 54 | vlan_tag = attr->vlan_id; | 53 | vlan_tag = attr->vlan_id; |
| 55 | if (!vlan_tag || (vlan_tag > 0xFFF)) | 54 | if (!vlan_tag || (vlan_tag > 0xFFF)) |
| 56 | vlan_tag = dev->pvid; | 55 | vlan_tag = dev->pvid; |
| @@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
| 65 | eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); | 64 | eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); |
| 66 | eth_sz = sizeof(struct ocrdma_eth_basic); | 65 | eth_sz = sizeof(struct ocrdma_eth_basic); |
| 67 | } | 66 | } |
| 67 | /* MAC */ | ||
| 68 | memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); | 68 | memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); |
| 69 | memcpy(ð.dmac[0], attr->dmac, ETH_ALEN); | ||
| 70 | status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); | 69 | status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); |
| 71 | if (status) | 70 | if (status) |
| 72 | return status; | 71 | return status; |
| 73 | status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, | 72 | ah->sgid_index = attr->grh.sgid_index; |
| 74 | (union ib_gid *)&grh.sgid[0]); | 73 | memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid)); |
| 75 | if (status) | 74 | memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); |
| 76 | return status; | ||
| 77 | 75 | ||
| 78 | grh.tclass_flow = cpu_to_be32((6 << 28) | | 76 | grh.tclass_flow = cpu_to_be32((6 << 28) | |
| 79 | (attr->grh.traffic_class << 24) | | 77 | (attr->grh.traffic_class << 24) | |
| @@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
| 81 | /* 0x1b is next header value in GRH */ | 79 | /* 0x1b is next header value in GRH */ |
| 82 | grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | | 80 | grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | |
| 83 | (0x1b << 8) | attr->grh.hop_limit); | 81 | (0x1b << 8) | attr->grh.hop_limit); |
| 84 | 82 | /* Eth HDR */ | |
| 85 | memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); | ||
| 86 | memcpy(&ah->av->eth_hdr, ð, eth_sz); | 83 | memcpy(&ah->av->eth_hdr, ð, eth_sz); |
| 87 | memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); | 84 | memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); |
| 88 | if (vlan_enabled) | 85 | if (vlan_enabled) |
| @@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |||
| 98 | struct ocrdma_ah *ah; | 95 | struct ocrdma_ah *ah; |
| 99 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | 96 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
| 100 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | 97 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
| 98 | union ib_gid sgid; | ||
| 99 | u8 zmac[ETH_ALEN]; | ||
| 101 | 100 | ||
| 102 | if (!(attr->ah_flags & IB_AH_GRH)) | 101 | if (!(attr->ah_flags & IB_AH_GRH)) |
| 103 | return ERR_PTR(-EINVAL); | 102 | return ERR_PTR(-EINVAL); |
| @@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |||
| 111 | status = ocrdma_alloc_av(dev, ah); | 110 | status = ocrdma_alloc_av(dev, ah); |
| 112 | if (status) | 111 | if (status) |
| 113 | goto av_err; | 112 | goto av_err; |
| 114 | status = set_av_attr(dev, ah, attr, pd->id); | 113 | |
| 114 | status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid); | ||
| 115 | if (status) { | ||
| 116 | pr_err("%s(): Failed to query sgid, status = %d\n", | ||
| 117 | __func__, status); | ||
| 118 | goto av_conf_err; | ||
| 119 | } | ||
| 120 | |||
| 121 | memset(&zmac, 0, ETH_ALEN); | ||
| 122 | if (pd->uctx && | ||
| 123 | memcmp(attr->dmac, &zmac, ETH_ALEN)) { | ||
| 124 | status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, | ||
| 125 | attr->dmac, &attr->vlan_id); | ||
| 126 | if (status) { | ||
| 127 | pr_err("%s(): Failed to resolve dmac from gid." | ||
| 128 | "status = %d\n", __func__, status); | ||
| 129 | goto av_conf_err; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | status = set_av_attr(dev, ah, attr, &sgid, pd->id); | ||
| 115 | if (status) | 134 | if (status) |
| 116 | goto av_conf_err; | 135 | goto av_conf_err; |
| 117 | 136 | ||
| @@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) | |||
| 145 | struct ocrdma_av *av = ah->av; | 164 | struct ocrdma_av *av = ah->av; |
| 146 | struct ocrdma_grh *grh; | 165 | struct ocrdma_grh *grh; |
| 147 | attr->ah_flags |= IB_AH_GRH; | 166 | attr->ah_flags |= IB_AH_GRH; |
| 148 | if (ah->av->valid & Bit(1)) { | 167 | if (ah->av->valid & OCRDMA_AV_VALID) { |
| 149 | grh = (struct ocrdma_grh *)((u8 *)ah->av + | 168 | grh = (struct ocrdma_grh *)((u8 *)ah->av + |
| 150 | sizeof(struct ocrdma_eth_vlan)); | 169 | sizeof(struct ocrdma_eth_vlan)); |
| 151 | attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; | 170 | attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index acb434d16903..8f5f2577f288 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
| 101 | attr->max_srq_sge = dev->attr.max_srq_sge; | 101 | attr->max_srq_sge = dev->attr.max_srq_sge; |
| 102 | attr->max_srq_wr = dev->attr.max_rqe; | 102 | attr->max_srq_wr = dev->attr.max_rqe; |
| 103 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | 103 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; |
| 104 | attr->max_fast_reg_page_list_len = 0; | 104 | attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; |
| 105 | attr->max_pkeys = 1; | 105 | attr->max_pkeys = 1; |
| 106 | return 0; | 106 | return 0; |
| 107 | } | 107 | } |
| @@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |||
| 2846 | if (cq->first_arm) { | 2846 | if (cq->first_arm) { |
| 2847 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); | 2847 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); |
| 2848 | cq->first_arm = false; | 2848 | cq->first_arm = false; |
| 2849 | goto skip_defer; | ||
| 2850 | } | 2849 | } |
| 2851 | cq->deferred_arm = true; | ||
| 2852 | 2850 | ||
| 2853 | skip_defer: | 2851 | cq->deferred_arm = true; |
| 2854 | cq->deferred_sol = sol_needed; | 2852 | cq->deferred_sol = sol_needed; |
| 2855 | spin_unlock_irqrestore(&cq->cq_lock, flags); | 2853 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
| 2856 | 2854 | ||
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 799a0c3bffc4..6abd3ed3cd51 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c | |||
| @@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | |||
| 193 | struct qib_qp_iter *iter; | 193 | struct qib_qp_iter *iter; |
| 194 | loff_t n = *pos; | 194 | loff_t n = *pos; |
| 195 | 195 | ||
| 196 | rcu_read_lock(); | ||
| 196 | iter = qib_qp_iter_init(s->private); | 197 | iter = qib_qp_iter_init(s->private); |
| 197 | if (!iter) | 198 | if (!iter) |
| 198 | return NULL; | 199 | return NULL; |
| @@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | |||
| 224 | 225 | ||
| 225 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | 226 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) |
| 226 | { | 227 | { |
| 227 | /* nothing for now */ | 228 | rcu_read_unlock(); |
| 228 | } | 229 | } |
| 229 | 230 | ||
| 230 | static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) | 231 | static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 7fcc150d603c..6ddc0264aad2 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
| @@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) | |||
| 1325 | struct qib_qp *pqp = iter->qp; | 1325 | struct qib_qp *pqp = iter->qp; |
| 1326 | struct qib_qp *qp; | 1326 | struct qib_qp *qp; |
| 1327 | 1327 | ||
| 1328 | rcu_read_lock(); | ||
| 1329 | for (; n < dev->qp_table_size; n++) { | 1328 | for (; n < dev->qp_table_size; n++) { |
| 1330 | if (pqp) | 1329 | if (pqp) |
| 1331 | qp = rcu_dereference(pqp->next); | 1330 | qp = rcu_dereference(pqp->next); |
| @@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) | |||
| 1333 | qp = rcu_dereference(dev->qp_table[n]); | 1332 | qp = rcu_dereference(dev->qp_table[n]); |
| 1334 | pqp = qp; | 1333 | pqp = qp; |
| 1335 | if (qp) { | 1334 | if (qp) { |
| 1336 | if (iter->qp) | ||
| 1337 | atomic_dec(&iter->qp->refcount); | ||
| 1338 | atomic_inc(&qp->refcount); | ||
| 1339 | rcu_read_unlock(); | ||
| 1340 | iter->qp = qp; | 1335 | iter->qp = qp; |
| 1341 | iter->n = n; | 1336 | iter->n = n; |
| 1342 | return 0; | 1337 | return 0; |
| 1343 | } | 1338 | } |
| 1344 | } | 1339 | } |
| 1345 | rcu_read_unlock(); | ||
| 1346 | if (iter->qp) | ||
| 1347 | atomic_dec(&iter->qp->refcount); | ||
| 1348 | return ret; | 1340 | return ret; |
| 1349 | } | 1341 | } |
| 1350 | 1342 | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 2bc1d2b96298..74f90b2619f6 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
| @@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages, | |||
| 52 | * Call with current->mm->mmap_sem held. | 52 | * Call with current->mm->mmap_sem held. |
| 53 | */ | 53 | */ |
| 54 | static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, | 54 | static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, |
| 55 | struct page **p, struct vm_area_struct **vma) | 55 | struct page **p) |
| 56 | { | 56 | { |
| 57 | unsigned long lock_limit; | 57 | unsigned long lock_limit; |
| 58 | size_t got; | 58 | size_t got; |
| @@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
| 69 | ret = get_user_pages(current, current->mm, | 69 | ret = get_user_pages(current, current->mm, |
| 70 | start_page + got * PAGE_SIZE, | 70 | start_page + got * PAGE_SIZE, |
| 71 | num_pages - got, 1, 1, | 71 | num_pages - got, 1, 1, |
| 72 | p + got, vma); | 72 | p + got, NULL); |
| 73 | if (ret < 0) | 73 | if (ret < 0) |
| 74 | goto bail_release; | 74 | goto bail_release; |
| 75 | } | 75 | } |
| @@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
| 136 | 136 | ||
| 137 | down_write(¤t->mm->mmap_sem); | 137 | down_write(¤t->mm->mmap_sem); |
| 138 | 138 | ||
| 139 | ret = __qib_get_user_pages(start_page, num_pages, p, NULL); | 139 | ret = __qib_get_user_pages(start_page, num_pages, p); |
| 140 | 140 | ||
| 141 | up_write(¤t->mm->mmap_sem); | 141 | up_write(¤t->mm->mmap_sem); |
| 142 | 142 | ||
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 801a1d6937e4..417de1f32960 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -507,7 +507,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) | |||
| 507 | if (err) | 507 | if (err) |
| 508 | goto out_free_dev; | 508 | goto out_free_dev; |
| 509 | 509 | ||
| 510 | if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) { | 510 | if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) { |
| 511 | usnic_err("IOMMU of %s does not support cache coherency\n", | 511 | usnic_err("IOMMU of %s does not support cache coherency\n", |
| 512 | dev_name(dev)); | 512 | dev_name(dev)); |
| 513 | err = -EINVAL; | 513 | err = -EINVAL; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 3edce617c31b..d7562beb5423 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -131,6 +131,12 @@ struct ipoib_cb { | |||
| 131 | u8 hwaddr[INFINIBAND_ALEN]; | 131 | u8 hwaddr[INFINIBAND_ALEN]; |
| 132 | }; | 132 | }; |
| 133 | 133 | ||
| 134 | static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb) | ||
| 135 | { | ||
| 136 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb)); | ||
| 137 | return (struct ipoib_cb *)skb->cb; | ||
| 138 | } | ||
| 139 | |||
| 134 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ | 140 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ |
| 135 | struct ipoib_mcast { | 141 | struct ipoib_mcast { |
| 136 | struct ib_sa_mcmember_rec mcmember; | 142 | struct ib_sa_mcmember_rec mcmember; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1310acf6bf92..13e6e0431592 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 716 | { | 716 | { |
| 717 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 717 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 718 | struct ipoib_neigh *neigh; | 718 | struct ipoib_neigh *neigh; |
| 719 | struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; | 719 | struct ipoib_cb *cb = ipoib_skb_cb(skb); |
| 720 | struct ipoib_header *header; | 720 | struct ipoib_header *header; |
| 721 | unsigned long flags; | 721 | unsigned long flags; |
| 722 | 722 | ||
| @@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
| 813 | const void *daddr, const void *saddr, unsigned len) | 813 | const void *daddr, const void *saddr, unsigned len) |
| 814 | { | 814 | { |
| 815 | struct ipoib_header *header; | 815 | struct ipoib_header *header; |
| 816 | struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; | 816 | struct ipoib_cb *cb = ipoib_skb_cb(skb); |
| 817 | 817 | ||
| 818 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); | 818 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); |
| 819 | 819 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d4e005720d01..ffb83b5f7e80 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 529 | port_attr.state); | 529 | port_attr.state); |
| 530 | return; | 530 | return; |
| 531 | } | 531 | } |
| 532 | priv->local_lid = port_attr.lid; | ||
| 532 | 533 | ||
| 533 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) | 534 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) |
| 534 | ipoib_warn(priv, "ib_query_gid() failed\n"); | 535 | ipoib_warn(priv, "ib_query_gid() failed\n"); |
| 535 | else | 536 | else |
| 536 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | 537 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); |
| 537 | 538 | ||
| 538 | { | ||
| 539 | struct ib_port_attr attr; | ||
| 540 | |||
| 541 | if (!ib_query_port(priv->ca, priv->port, &attr)) | ||
| 542 | priv->local_lid = attr.lid; | ||
| 543 | else | ||
| 544 | ipoib_warn(priv, "ib_query_port failed\n"); | ||
| 545 | } | ||
| 546 | |||
| 547 | if (!priv->broadcast) { | 539 | if (!priv->broadcast) { |
| 548 | struct ipoib_mcast *broadcast; | 540 | struct ipoib_mcast *broadcast; |
| 549 | 541 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 61ee91d88380..93ce62fe1594 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
| @@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 344 | int is_leading) | 344 | int is_leading) |
| 345 | { | 345 | { |
| 346 | struct iscsi_conn *conn = cls_conn->dd_data; | 346 | struct iscsi_conn *conn = cls_conn->dd_data; |
| 347 | struct iscsi_session *session; | ||
| 348 | struct iser_conn *ib_conn; | 347 | struct iser_conn *ib_conn; |
| 349 | struct iscsi_endpoint *ep; | 348 | struct iscsi_endpoint *ep; |
| 350 | int error; | 349 | int error; |
| @@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 363 | } | 362 | } |
| 364 | ib_conn = ep->dd_data; | 363 | ib_conn = ep->dd_data; |
| 365 | 364 | ||
| 366 | session = conn->session; | 365 | mutex_lock(&ib_conn->state_mutex); |
| 367 | if (iser_alloc_rx_descriptors(ib_conn, session)) | 366 | if (ib_conn->state != ISER_CONN_UP) { |
| 368 | return -ENOMEM; | 367 | error = -EINVAL; |
| 368 | iser_err("iser_conn %p state is %d, teardown started\n", | ||
| 369 | ib_conn, ib_conn->state); | ||
| 370 | goto out; | ||
| 371 | } | ||
| 372 | |||
| 373 | error = iser_alloc_rx_descriptors(ib_conn, conn->session); | ||
| 374 | if (error) | ||
| 375 | goto out; | ||
| 369 | 376 | ||
| 370 | /* binds the iSER connection retrieved from the previously | 377 | /* binds the iSER connection retrieved from the previously |
| 371 | * connected ep_handle to the iSCSI layer connection. exchanges | 378 | * connected ep_handle to the iSCSI layer connection. exchanges |
| @@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 375 | conn->dd_data = ib_conn; | 382 | conn->dd_data = ib_conn; |
| 376 | ib_conn->iscsi_conn = conn; | 383 | ib_conn->iscsi_conn = conn; |
| 377 | 384 | ||
| 378 | return 0; | 385 | out: |
| 386 | mutex_unlock(&ib_conn->state_mutex); | ||
| 387 | return error; | ||
| 379 | } | 388 | } |
| 380 | 389 | ||
| 381 | static int | 390 | static int |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index c877dad381cb..9f0e0e34d6ca 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
| @@ -69,7 +69,7 @@ | |||
| 69 | 69 | ||
| 70 | #define DRV_NAME "iser" | 70 | #define DRV_NAME "iser" |
| 71 | #define PFX DRV_NAME ": " | 71 | #define PFX DRV_NAME ": " |
| 72 | #define DRV_VER "1.4" | 72 | #define DRV_VER "1.4.1" |
| 73 | 73 | ||
| 74 | #define iser_dbg(fmt, arg...) \ | 74 | #define iser_dbg(fmt, arg...) \ |
| 75 | do { \ | 75 | do { \ |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 3ef167f97d6f..3bfec4bbda52 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
| @@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
| 73 | { | 73 | { |
| 74 | struct iser_cq_desc *cq_desc; | 74 | struct iser_cq_desc *cq_desc; |
| 75 | struct ib_device_attr *dev_attr = &device->dev_attr; | 75 | struct ib_device_attr *dev_attr = &device->dev_attr; |
| 76 | int ret, i, j; | 76 | int ret, i; |
| 77 | 77 | ||
| 78 | ret = ib_query_device(device->ib_device, dev_attr); | 78 | ret = ib_query_device(device->ib_device, dev_attr); |
| 79 | if (ret) { | 79 | if (ret) { |
| @@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
| 125 | iser_cq_event_callback, | 125 | iser_cq_event_callback, |
| 126 | (void *)&cq_desc[i], | 126 | (void *)&cq_desc[i], |
| 127 | ISER_MAX_RX_CQ_LEN, i); | 127 | ISER_MAX_RX_CQ_LEN, i); |
| 128 | if (IS_ERR(device->rx_cq[i])) | 128 | if (IS_ERR(device->rx_cq[i])) { |
| 129 | device->rx_cq[i] = NULL; | ||
| 129 | goto cq_err; | 130 | goto cq_err; |
| 131 | } | ||
| 130 | 132 | ||
| 131 | device->tx_cq[i] = ib_create_cq(device->ib_device, | 133 | device->tx_cq[i] = ib_create_cq(device->ib_device, |
| 132 | NULL, iser_cq_event_callback, | 134 | NULL, iser_cq_event_callback, |
| 133 | (void *)&cq_desc[i], | 135 | (void *)&cq_desc[i], |
| 134 | ISER_MAX_TX_CQ_LEN, i); | 136 | ISER_MAX_TX_CQ_LEN, i); |
| 135 | 137 | ||
| 136 | if (IS_ERR(device->tx_cq[i])) | 138 | if (IS_ERR(device->tx_cq[i])) { |
| 139 | device->tx_cq[i] = NULL; | ||
| 137 | goto cq_err; | 140 | goto cq_err; |
| 141 | } | ||
| 138 | 142 | ||
| 139 | if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) | 143 | if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) |
| 140 | goto cq_err; | 144 | goto cq_err; |
| @@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
| 160 | handler_err: | 164 | handler_err: |
| 161 | ib_dereg_mr(device->mr); | 165 | ib_dereg_mr(device->mr); |
| 162 | dma_mr_err: | 166 | dma_mr_err: |
| 163 | for (j = 0; j < device->cqs_used; j++) | 167 | for (i = 0; i < device->cqs_used; i++) |
| 164 | tasklet_kill(&device->cq_tasklet[j]); | 168 | tasklet_kill(&device->cq_tasklet[i]); |
| 165 | cq_err: | 169 | cq_err: |
| 166 | for (j = 0; j < i; j++) { | 170 | for (i = 0; i < device->cqs_used; i++) { |
| 167 | if (device->tx_cq[j]) | 171 | if (device->tx_cq[i]) |
| 168 | ib_destroy_cq(device->tx_cq[j]); | 172 | ib_destroy_cq(device->tx_cq[i]); |
| 169 | if (device->rx_cq[j]) | 173 | if (device->rx_cq[i]) |
| 170 | ib_destroy_cq(device->rx_cq[j]); | 174 | ib_destroy_cq(device->rx_cq[i]); |
| 171 | } | 175 | } |
| 172 | ib_dealloc_pd(device->pd); | 176 | ib_dealloc_pd(device->pd); |
| 173 | pd_err: | 177 | pd_err: |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 713e3ddb43bd..40b7d6c0ff17 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
| @@ -466,6 +466,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | |||
| 466 | }, | 466 | }, |
| 467 | }, | 467 | }, |
| 468 | { | 468 | { |
| 469 | /* Asus X450LCP */ | ||
| 470 | .matches = { | ||
| 471 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
| 472 | DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), | ||
| 473 | }, | ||
| 474 | }, | ||
| 475 | { | ||
| 469 | /* Avatar AVIU-145A6 */ | 476 | /* Avatar AVIU-145A6 */ |
| 470 | .matches = { | 477 | .matches = { |
| 471 | DMI_MATCH(DMI_SYS_VENDOR, "Intel"), | 478 | DMI_MATCH(DMI_SYS_VENDOR, "Intel"), |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index ecb0109a5360..505a9adac2d5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -88,6 +88,27 @@ int amd_iommu_max_glx_val = -1; | |||
| 88 | static struct dma_map_ops amd_iommu_dma_ops; | 88 | static struct dma_map_ops amd_iommu_dma_ops; |
| 89 | 89 | ||
| 90 | /* | 90 | /* |
| 91 | * This struct contains device specific data for the IOMMU | ||
| 92 | */ | ||
| 93 | struct iommu_dev_data { | ||
| 94 | struct list_head list; /* For domain->dev_list */ | ||
| 95 | struct list_head dev_data_list; /* For global dev_data_list */ | ||
| 96 | struct list_head alias_list; /* Link alias-groups together */ | ||
| 97 | struct iommu_dev_data *alias_data;/* The alias dev_data */ | ||
| 98 | struct protection_domain *domain; /* Domain the device is bound to */ | ||
| 99 | u16 devid; /* PCI Device ID */ | ||
| 100 | bool iommu_v2; /* Device can make use of IOMMUv2 */ | ||
| 101 | bool passthrough; /* Default for device is pt_domain */ | ||
| 102 | struct { | ||
| 103 | bool enabled; | ||
| 104 | int qdep; | ||
| 105 | } ats; /* ATS state */ | ||
| 106 | bool pri_tlp; /* PASID TLB required for | ||
| 107 | PPR completions */ | ||
| 108 | u32 errata; /* Bitmap for errata to apply */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* | ||
| 91 | * general struct to manage commands send to an IOMMU | 112 | * general struct to manage commands send to an IOMMU |
| 92 | */ | 113 | */ |
| 93 | struct iommu_cmd { | 114 | struct iommu_cmd { |
| @@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid) | |||
| 114 | if (!dev_data) | 135 | if (!dev_data) |
| 115 | return NULL; | 136 | return NULL; |
| 116 | 137 | ||
| 138 | INIT_LIST_HEAD(&dev_data->alias_list); | ||
| 139 | |||
| 117 | dev_data->devid = devid; | 140 | dev_data->devid = devid; |
| 118 | atomic_set(&dev_data->bind, 0); | ||
| 119 | 141 | ||
| 120 | spin_lock_irqsave(&dev_data_list_lock, flags); | 142 | spin_lock_irqsave(&dev_data_list_lock, flags); |
| 121 | list_add_tail(&dev_data->dev_data_list, &dev_data_list); | 143 | list_add_tail(&dev_data->dev_data_list, &dev_data_list); |
| @@ -260,17 +282,13 @@ static bool check_device(struct device *dev) | |||
| 260 | return true; | 282 | return true; |
| 261 | } | 283 | } |
| 262 | 284 | ||
| 263 | static int init_iommu_group(struct device *dev) | 285 | static void init_iommu_group(struct device *dev) |
| 264 | { | 286 | { |
| 265 | struct iommu_group *group; | 287 | struct iommu_group *group; |
| 266 | 288 | ||
| 267 | group = iommu_group_get_for_dev(dev); | 289 | group = iommu_group_get_for_dev(dev); |
| 268 | 290 | if (!IS_ERR(group)) | |
| 269 | if (IS_ERR(group)) | 291 | iommu_group_put(group); |
| 270 | return PTR_ERR(group); | ||
| 271 | |||
| 272 | iommu_group_put(group); | ||
| 273 | return 0; | ||
| 274 | } | 292 | } |
| 275 | 293 | ||
| 276 | static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) | 294 | static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) |
| @@ -340,7 +358,6 @@ static int iommu_init_device(struct device *dev) | |||
| 340 | struct pci_dev *pdev = to_pci_dev(dev); | 358 | struct pci_dev *pdev = to_pci_dev(dev); |
| 341 | struct iommu_dev_data *dev_data; | 359 | struct iommu_dev_data *dev_data; |
| 342 | u16 alias; | 360 | u16 alias; |
| 343 | int ret; | ||
| 344 | 361 | ||
| 345 | if (dev->archdata.iommu) | 362 | if (dev->archdata.iommu) |
| 346 | return 0; | 363 | return 0; |
| @@ -362,12 +379,9 @@ static int iommu_init_device(struct device *dev) | |||
| 362 | return -ENOTSUPP; | 379 | return -ENOTSUPP; |
| 363 | } | 380 | } |
| 364 | dev_data->alias_data = alias_data; | 381 | dev_data->alias_data = alias_data; |
| 365 | } | ||
| 366 | 382 | ||
| 367 | ret = init_iommu_group(dev); | 383 | /* Add device to the alias_list */ |
| 368 | if (ret) { | 384 | list_add(&dev_data->alias_list, &alias_data->alias_list); |
| 369 | free_dev_data(dev_data); | ||
| 370 | return ret; | ||
| 371 | } | 385 | } |
| 372 | 386 | ||
| 373 | if (pci_iommuv2_capable(pdev)) { | 387 | if (pci_iommuv2_capable(pdev)) { |
| @@ -455,6 +469,15 @@ int __init amd_iommu_init_devices(void) | |||
| 455 | goto out_free; | 469 | goto out_free; |
| 456 | } | 470 | } |
| 457 | 471 | ||
| 472 | /* | ||
| 473 | * Initialize IOMMU groups only after iommu_init_device() has | ||
| 474 | * had a chance to populate any IVRS defined aliases. | ||
| 475 | */ | ||
| 476 | for_each_pci_dev(pdev) { | ||
| 477 | if (check_device(&pdev->dev)) | ||
| 478 | init_iommu_group(&pdev->dev); | ||
| 479 | } | ||
| 480 | |||
| 458 | return 0; | 481 | return 0; |
| 459 | 482 | ||
| 460 | out_free: | 483 | out_free: |
| @@ -1368,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom, | |||
| 1368 | count = PAGE_SIZE_PTE_COUNT(page_size); | 1391 | count = PAGE_SIZE_PTE_COUNT(page_size); |
| 1369 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); | 1392 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); |
| 1370 | 1393 | ||
| 1394 | if (!pte) | ||
| 1395 | return -ENOMEM; | ||
| 1396 | |||
| 1371 | for (i = 0; i < count; ++i) | 1397 | for (i = 0; i < count; ++i) |
| 1372 | if (IOMMU_PTE_PRESENT(pte[i])) | 1398 | if (IOMMU_PTE_PRESENT(pte[i])) |
| 1373 | return -EBUSY; | 1399 | return -EBUSY; |
| @@ -2122,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data) | |||
| 2122 | static int __attach_device(struct iommu_dev_data *dev_data, | 2148 | static int __attach_device(struct iommu_dev_data *dev_data, |
| 2123 | struct protection_domain *domain) | 2149 | struct protection_domain *domain) |
| 2124 | { | 2150 | { |
| 2151 | struct iommu_dev_data *head, *entry; | ||
| 2125 | int ret; | 2152 | int ret; |
| 2126 | 2153 | ||
| 2127 | /* lock domain */ | 2154 | /* lock domain */ |
| 2128 | spin_lock(&domain->lock); | 2155 | spin_lock(&domain->lock); |
| 2129 | 2156 | ||
| 2130 | if (dev_data->alias_data != NULL) { | 2157 | head = dev_data; |
| 2131 | struct iommu_dev_data *alias_data = dev_data->alias_data; | ||
| 2132 | 2158 | ||
| 2133 | /* Some sanity checks */ | 2159 | if (head->alias_data != NULL) |
| 2134 | ret = -EBUSY; | 2160 | head = head->alias_data; |
| 2135 | if (alias_data->domain != NULL && | ||
| 2136 | alias_data->domain != domain) | ||
| 2137 | goto out_unlock; | ||
| 2138 | 2161 | ||
| 2139 | if (dev_data->domain != NULL && | 2162 | /* Now we have the root of the alias group, if any */ |
| 2140 | dev_data->domain != domain) | ||
| 2141 | goto out_unlock; | ||
| 2142 | 2163 | ||
| 2143 | /* Do real assignment */ | 2164 | ret = -EBUSY; |
| 2144 | if (alias_data->domain == NULL) | 2165 | if (head->domain != NULL) |
| 2145 | do_attach(alias_data, domain); | 2166 | goto out_unlock; |
| 2146 | |||
| 2147 | atomic_inc(&alias_data->bind); | ||
| 2148 | } | ||
| 2149 | 2167 | ||
| 2150 | if (dev_data->domain == NULL) | 2168 | /* Attach alias group root */ |
| 2151 | do_attach(dev_data, domain); | 2169 | do_attach(head, domain); |
| 2152 | 2170 | ||
| 2153 | atomic_inc(&dev_data->bind); | 2171 | /* Attach other devices in the alias group */ |
| 2172 | list_for_each_entry(entry, &head->alias_list, alias_list) | ||
| 2173 | do_attach(entry, domain); | ||
| 2154 | 2174 | ||
| 2155 | ret = 0; | 2175 | ret = 0; |
| 2156 | 2176 | ||
| @@ -2298,6 +2318,7 @@ static int attach_device(struct device *dev, | |||
| 2298 | */ | 2318 | */ |
| 2299 | static void __detach_device(struct iommu_dev_data *dev_data) | 2319 | static void __detach_device(struct iommu_dev_data *dev_data) |
| 2300 | { | 2320 | { |
| 2321 | struct iommu_dev_data *head, *entry; | ||
| 2301 | struct protection_domain *domain; | 2322 | struct protection_domain *domain; |
| 2302 | unsigned long flags; | 2323 | unsigned long flags; |
| 2303 | 2324 | ||
| @@ -2307,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data) | |||
| 2307 | 2328 | ||
| 2308 | spin_lock_irqsave(&domain->lock, flags); | 2329 | spin_lock_irqsave(&domain->lock, flags); |
| 2309 | 2330 | ||
| 2310 | if (dev_data->alias_data != NULL) { | 2331 | head = dev_data; |
| 2311 | struct iommu_dev_data *alias_data = dev_data->alias_data; | 2332 | if (head->alias_data != NULL) |
| 2333 | head = head->alias_data; | ||
| 2312 | 2334 | ||
| 2313 | if (atomic_dec_and_test(&alias_data->bind)) | 2335 | list_for_each_entry(entry, &head->alias_list, alias_list) |
| 2314 | do_detach(alias_data); | 2336 | do_detach(entry); |
| 2315 | } | ||
| 2316 | 2337 | ||
| 2317 | if (atomic_dec_and_test(&dev_data->bind)) | 2338 | do_detach(head); |
| 2318 | do_detach(dev_data); | ||
| 2319 | 2339 | ||
| 2320 | spin_unlock_irqrestore(&domain->lock, flags); | 2340 | spin_unlock_irqrestore(&domain->lock, flags); |
| 2321 | 2341 | ||
| @@ -2415,6 +2435,7 @@ static int device_change_notifier(struct notifier_block *nb, | |||
| 2415 | case BUS_NOTIFY_ADD_DEVICE: | 2435 | case BUS_NOTIFY_ADD_DEVICE: |
| 2416 | 2436 | ||
| 2417 | iommu_init_device(dev); | 2437 | iommu_init_device(dev); |
| 2438 | init_iommu_group(dev); | ||
| 2418 | 2439 | ||
| 2419 | /* | 2440 | /* |
| 2420 | * dev_data is still NULL and | 2441 | * dev_data is still NULL and |
| @@ -3158,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain) | |||
| 3158 | entry = list_first_entry(&domain->dev_list, | 3179 | entry = list_first_entry(&domain->dev_list, |
| 3159 | struct iommu_dev_data, list); | 3180 | struct iommu_dev_data, list); |
| 3160 | __detach_device(entry); | 3181 | __detach_device(entry); |
| 3161 | atomic_set(&entry->bind, 0); | ||
| 3162 | } | 3182 | } |
| 3163 | 3183 | ||
| 3164 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 3184 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| @@ -3384,20 +3404,20 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
| 3384 | return paddr; | 3404 | return paddr; |
| 3385 | } | 3405 | } |
| 3386 | 3406 | ||
| 3387 | static int amd_iommu_domain_has_cap(struct iommu_domain *domain, | 3407 | static bool amd_iommu_capable(enum iommu_cap cap) |
| 3388 | unsigned long cap) | ||
| 3389 | { | 3408 | { |
| 3390 | switch (cap) { | 3409 | switch (cap) { |
| 3391 | case IOMMU_CAP_CACHE_COHERENCY: | 3410 | case IOMMU_CAP_CACHE_COHERENCY: |
| 3392 | return 1; | 3411 | return true; |
| 3393 | case IOMMU_CAP_INTR_REMAP: | 3412 | case IOMMU_CAP_INTR_REMAP: |
| 3394 | return irq_remapping_enabled; | 3413 | return (irq_remapping_enabled == 1); |
| 3395 | } | 3414 | } |
| 3396 | 3415 | ||
| 3397 | return 0; | 3416 | return false; |
| 3398 | } | 3417 | } |
| 3399 | 3418 | ||
| 3400 | static const struct iommu_ops amd_iommu_ops = { | 3419 | static const struct iommu_ops amd_iommu_ops = { |
| 3420 | .capable = amd_iommu_capable, | ||
| 3401 | .domain_init = amd_iommu_domain_init, | 3421 | .domain_init = amd_iommu_domain_init, |
| 3402 | .domain_destroy = amd_iommu_domain_destroy, | 3422 | .domain_destroy = amd_iommu_domain_destroy, |
| 3403 | .attach_dev = amd_iommu_attach_device, | 3423 | .attach_dev = amd_iommu_attach_device, |
| @@ -3405,7 +3425,6 @@ static const struct iommu_ops amd_iommu_ops = { | |||
| 3405 | .map = amd_iommu_map, | 3425 | .map = amd_iommu_map, |
| 3406 | .unmap = amd_iommu_unmap, | 3426 | .unmap = amd_iommu_unmap, |
| 3407 | .iova_to_phys = amd_iommu_iova_to_phys, | 3427 | .iova_to_phys = amd_iommu_iova_to_phys, |
| 3408 | .domain_has_cap = amd_iommu_domain_has_cap, | ||
| 3409 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, | 3428 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, |
| 3410 | }; | 3429 | }; |
| 3411 | 3430 | ||
| @@ -4235,7 +4254,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |||
| 4235 | return 0; | 4254 | return 0; |
| 4236 | } | 4255 | } |
| 4237 | 4256 | ||
| 4238 | static int setup_hpet_msi(unsigned int irq, unsigned int id) | 4257 | static int alloc_hpet_msi(unsigned int irq, unsigned int id) |
| 4239 | { | 4258 | { |
| 4240 | struct irq_2_irte *irte_info; | 4259 | struct irq_2_irte *irte_info; |
| 4241 | struct irq_cfg *cfg; | 4260 | struct irq_cfg *cfg; |
| @@ -4274,6 +4293,6 @@ struct irq_remap_ops amd_iommu_irq_ops = { | |||
| 4274 | .compose_msi_msg = compose_msi_msg, | 4293 | .compose_msi_msg = compose_msi_msg, |
| 4275 | .msi_alloc_irq = msi_alloc_irq, | 4294 | .msi_alloc_irq = msi_alloc_irq, |
| 4276 | .msi_setup_irq = msi_setup_irq, | 4295 | .msi_setup_irq = msi_setup_irq, |
| 4277 | .setup_hpet_msi = setup_hpet_msi, | 4296 | .alloc_hpet_msi = alloc_hpet_msi, |
| 4278 | }; | 4297 | }; |
| 4279 | #endif | 4298 | #endif |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 3783e0b44df6..b0522f15730f 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -712,7 +712,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, | |||
| 712 | set_iommu_for_device(iommu, devid); | 712 | set_iommu_for_device(iommu, devid); |
| 713 | } | 713 | } |
| 714 | 714 | ||
| 715 | static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) | 715 | static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) |
| 716 | { | 716 | { |
| 717 | struct devid_map *entry; | 717 | struct devid_map *entry; |
| 718 | struct list_head *list; | 718 | struct list_head *list; |
| @@ -731,6 +731,8 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) | |||
| 731 | pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", | 731 | pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", |
| 732 | type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); | 732 | type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); |
| 733 | 733 | ||
| 734 | *devid = entry->devid; | ||
| 735 | |||
| 734 | return 0; | 736 | return 0; |
| 735 | } | 737 | } |
| 736 | 738 | ||
| @@ -739,7 +741,7 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) | |||
| 739 | return -ENOMEM; | 741 | return -ENOMEM; |
| 740 | 742 | ||
| 741 | entry->id = id; | 743 | entry->id = id; |
| 742 | entry->devid = devid; | 744 | entry->devid = *devid; |
| 743 | entry->cmd_line = cmd_line; | 745 | entry->cmd_line = cmd_line; |
| 744 | 746 | ||
| 745 | list_add_tail(&entry->list, list); | 747 | list_add_tail(&entry->list, list); |
| @@ -754,7 +756,7 @@ static int __init add_early_maps(void) | |||
| 754 | for (i = 0; i < early_ioapic_map_size; ++i) { | 756 | for (i = 0; i < early_ioapic_map_size; ++i) { |
| 755 | ret = add_special_device(IVHD_SPECIAL_IOAPIC, | 757 | ret = add_special_device(IVHD_SPECIAL_IOAPIC, |
| 756 | early_ioapic_map[i].id, | 758 | early_ioapic_map[i].id, |
| 757 | early_ioapic_map[i].devid, | 759 | &early_ioapic_map[i].devid, |
| 758 | early_ioapic_map[i].cmd_line); | 760 | early_ioapic_map[i].cmd_line); |
| 759 | if (ret) | 761 | if (ret) |
| 760 | return ret; | 762 | return ret; |
| @@ -763,7 +765,7 @@ static int __init add_early_maps(void) | |||
| 763 | for (i = 0; i < early_hpet_map_size; ++i) { | 765 | for (i = 0; i < early_hpet_map_size; ++i) { |
| 764 | ret = add_special_device(IVHD_SPECIAL_HPET, | 766 | ret = add_special_device(IVHD_SPECIAL_HPET, |
| 765 | early_hpet_map[i].id, | 767 | early_hpet_map[i].id, |
| 766 | early_hpet_map[i].devid, | 768 | &early_hpet_map[i].devid, |
| 767 | early_hpet_map[i].cmd_line); | 769 | early_hpet_map[i].cmd_line); |
| 768 | if (ret) | 770 | if (ret) |
| 769 | return ret; | 771 | return ret; |
| @@ -978,10 +980,17 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
| 978 | PCI_SLOT(devid), | 980 | PCI_SLOT(devid), |
| 979 | PCI_FUNC(devid)); | 981 | PCI_FUNC(devid)); |
| 980 | 982 | ||
| 981 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); | 983 | ret = add_special_device(type, handle, &devid, false); |
| 982 | ret = add_special_device(type, handle, devid, false); | ||
| 983 | if (ret) | 984 | if (ret) |
| 984 | return ret; | 985 | return ret; |
| 986 | |||
| 987 | /* | ||
| 988 | * add_special_device might update the devid in case a | ||
| 989 | * command-line override is present. So call | ||
| 990 | * set_dev_entry_from_acpi after add_special_device. | ||
| 991 | */ | ||
| 992 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); | ||
| 993 | |||
| 985 | break; | 994 | break; |
| 986 | } | 995 | } |
| 987 | default: | 996 | default: |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 8e43b7cba133..cec51a8ba844 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -418,27 +418,6 @@ struct protection_domain { | |||
| 418 | }; | 418 | }; |
| 419 | 419 | ||
| 420 | /* | 420 | /* |
| 421 | * This struct contains device specific data for the IOMMU | ||
| 422 | */ | ||
| 423 | struct iommu_dev_data { | ||
| 424 | struct list_head list; /* For domain->dev_list */ | ||
| 425 | struct list_head dev_data_list; /* For global dev_data_list */ | ||
| 426 | struct iommu_dev_data *alias_data;/* The alias dev_data */ | ||
| 427 | struct protection_domain *domain; /* Domain the device is bound to */ | ||
| 428 | atomic_t bind; /* Domain attach reference count */ | ||
| 429 | u16 devid; /* PCI Device ID */ | ||
| 430 | bool iommu_v2; /* Device can make use of IOMMUv2 */ | ||
| 431 | bool passthrough; /* Default for device is pt_domain */ | ||
| 432 | struct { | ||
| 433 | bool enabled; | ||
| 434 | int qdep; | ||
| 435 | } ats; /* ATS state */ | ||
| 436 | bool pri_tlp; /* PASID TLB required for | ||
| 437 | PPR completions */ | ||
| 438 | u32 errata; /* Bitmap for errata to apply */ | ||
| 439 | }; | ||
| 440 | |||
| 441 | /* | ||
| 442 | * For dynamic growth the aperture size is split into ranges of 128MB of | 421 | * For dynamic growth the aperture size is split into ranges of 128MB of |
| 443 | * DMA address space each. This struct represents one such range. | 422 | * DMA address space each. This struct represents one such range. |
| 444 | */ | 423 | */ |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 37dc3dd0df96..60558f794922 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -1557,20 +1557,19 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
| 1557 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); | 1557 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); |
| 1558 | } | 1558 | } |
| 1559 | 1559 | ||
| 1560 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, | 1560 | static bool arm_smmu_capable(enum iommu_cap cap) |
| 1561 | unsigned long cap) | ||
| 1562 | { | 1561 | { |
| 1563 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
| 1564 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
| 1565 | u32 features = smmu ? smmu->features : 0; | ||
| 1566 | |||
| 1567 | switch (cap) { | 1562 | switch (cap) { |
| 1568 | case IOMMU_CAP_CACHE_COHERENCY: | 1563 | case IOMMU_CAP_CACHE_COHERENCY: |
| 1569 | return features & ARM_SMMU_FEAT_COHERENT_WALK; | 1564 | /* |
| 1565 | * Return true here as the SMMU can always send out coherent | ||
| 1566 | * requests. | ||
| 1567 | */ | ||
| 1568 | return true; | ||
| 1570 | case IOMMU_CAP_INTR_REMAP: | 1569 | case IOMMU_CAP_INTR_REMAP: |
| 1571 | return 1; /* MSIs are just memory writes */ | 1570 | return true; /* MSIs are just memory writes */ |
| 1572 | default: | 1571 | default: |
| 1573 | return 0; | 1572 | return false; |
| 1574 | } | 1573 | } |
| 1575 | } | 1574 | } |
| 1576 | 1575 | ||
| @@ -1646,6 +1645,7 @@ static void arm_smmu_remove_device(struct device *dev) | |||
| 1646 | } | 1645 | } |
| 1647 | 1646 | ||
| 1648 | static const struct iommu_ops arm_smmu_ops = { | 1647 | static const struct iommu_ops arm_smmu_ops = { |
| 1648 | .capable = arm_smmu_capable, | ||
| 1649 | .domain_init = arm_smmu_domain_init, | 1649 | .domain_init = arm_smmu_domain_init, |
| 1650 | .domain_destroy = arm_smmu_domain_destroy, | 1650 | .domain_destroy = arm_smmu_domain_destroy, |
| 1651 | .attach_dev = arm_smmu_attach_dev, | 1651 | .attach_dev = arm_smmu_attach_dev, |
| @@ -1653,7 +1653,6 @@ static const struct iommu_ops arm_smmu_ops = { | |||
| 1653 | .map = arm_smmu_map, | 1653 | .map = arm_smmu_map, |
| 1654 | .unmap = arm_smmu_unmap, | 1654 | .unmap = arm_smmu_unmap, |
| 1655 | .iova_to_phys = arm_smmu_iova_to_phys, | 1655 | .iova_to_phys = arm_smmu_iova_to_phys, |
| 1656 | .domain_has_cap = arm_smmu_domain_has_cap, | ||
| 1657 | .add_device = arm_smmu_add_device, | 1656 | .add_device = arm_smmu_add_device, |
| 1658 | .remove_device = arm_smmu_remove_device, | 1657 | .remove_device = arm_smmu_remove_device, |
| 1659 | .pgsize_bitmap = (SECTION_SIZE | | 1658 | .pgsize_bitmap = (SECTION_SIZE | |
| @@ -1886,7 +1885,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1886 | return 0; | 1885 | return 0; |
| 1887 | } | 1886 | } |
| 1888 | 1887 | ||
| 1889 | static struct of_device_id arm_smmu_of_match[] = { | 1888 | static const struct of_device_id arm_smmu_of_match[] = { |
| 1890 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, | 1889 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, |
| 1891 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, | 1890 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, |
| 1892 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, | 1891 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 06d268abe951..c5c61cabd6e3 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
| @@ -155,6 +155,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) | |||
| 155 | if (event == BUS_NOTIFY_ADD_DEVICE) { | 155 | if (event == BUS_NOTIFY_ADD_DEVICE) { |
| 156 | for (tmp = dev; tmp; tmp = tmp->bus->self) { | 156 | for (tmp = dev; tmp; tmp = tmp->bus->self) { |
| 157 | level--; | 157 | level--; |
| 158 | info->path[level].bus = tmp->bus->number; | ||
| 158 | info->path[level].device = PCI_SLOT(tmp->devfn); | 159 | info->path[level].device = PCI_SLOT(tmp->devfn); |
| 159 | info->path[level].function = PCI_FUNC(tmp->devfn); | 160 | info->path[level].function = PCI_FUNC(tmp->devfn); |
| 160 | if (pci_is_root_bus(tmp->bus)) | 161 | if (pci_is_root_bus(tmp->bus)) |
| @@ -177,17 +178,33 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, | |||
| 177 | int i; | 178 | int i; |
| 178 | 179 | ||
| 179 | if (info->bus != bus) | 180 | if (info->bus != bus) |
| 180 | return false; | 181 | goto fallback; |
| 181 | if (info->level != count) | 182 | if (info->level != count) |
| 182 | return false; | 183 | goto fallback; |
| 183 | 184 | ||
| 184 | for (i = 0; i < count; i++) { | 185 | for (i = 0; i < count; i++) { |
| 185 | if (path[i].device != info->path[i].device || | 186 | if (path[i].device != info->path[i].device || |
| 186 | path[i].function != info->path[i].function) | 187 | path[i].function != info->path[i].function) |
| 187 | return false; | 188 | goto fallback; |
| 188 | } | 189 | } |
| 189 | 190 | ||
| 190 | return true; | 191 | return true; |
| 192 | |||
| 193 | fallback: | ||
| 194 | |||
| 195 | if (count != 1) | ||
| 196 | return false; | ||
| 197 | |||
| 198 | i = info->level - 1; | ||
| 199 | if (bus == info->path[i].bus && | ||
| 200 | path[0].device == info->path[i].device && | ||
| 201 | path[0].function == info->path[i].function) { | ||
| 202 | pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n", | ||
| 203 | bus, path[0].device, path[0].function); | ||
| 204 | return true; | ||
| 205 | } | ||
| 206 | |||
| 207 | return false; | ||
| 191 | } | 208 | } |
| 192 | 209 | ||
| 193 | /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ | 210 | /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ |
| @@ -247,7 +264,7 @@ int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, | |||
| 247 | 264 | ||
| 248 | for_each_active_dev_scope(devices, count, index, tmp) | 265 | for_each_active_dev_scope(devices, count, index, tmp) |
| 249 | if (tmp == &info->dev->dev) { | 266 | if (tmp == &info->dev->dev) { |
| 250 | rcu_assign_pointer(devices[index].dev, NULL); | 267 | RCU_INIT_POINTER(devices[index].dev, NULL); |
| 251 | synchronize_rcu(); | 268 | synchronize_rcu(); |
| 252 | put_device(tmp); | 269 | put_device(tmp); |
| 253 | return 1; | 270 | return 1; |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index d037e87a1fe5..74233186f6f7 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | typedef u32 sysmmu_iova_t; | 32 | typedef u32 sysmmu_iova_t; |
| 33 | typedef u32 sysmmu_pte_t; | 33 | typedef u32 sysmmu_pte_t; |
| 34 | 34 | ||
| 35 | /* We does not consider super section mapping (16MB) */ | 35 | /* We do not consider super section mapping (16MB) */ |
| 36 | #define SECT_ORDER 20 | 36 | #define SECT_ORDER 20 |
| 37 | #define LPAGE_ORDER 16 | 37 | #define LPAGE_ORDER 16 |
| 38 | #define SPAGE_ORDER 12 | 38 | #define SPAGE_ORDER 12 |
| @@ -307,7 +307,7 @@ static void show_fault_information(const char *name, | |||
| 307 | 307 | ||
| 308 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | 308 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) |
| 309 | { | 309 | { |
| 310 | /* SYSMMU is in blocked when interrupt occurred. */ | 310 | /* SYSMMU is in blocked state when interrupt occurred. */ |
| 311 | struct sysmmu_drvdata *data = dev_id; | 311 | struct sysmmu_drvdata *data = dev_id; |
| 312 | enum exynos_sysmmu_inttype itype; | 312 | enum exynos_sysmmu_inttype itype; |
| 313 | sysmmu_iova_t addr = -1; | 313 | sysmmu_iova_t addr = -1; |
| @@ -567,8 +567,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, | |||
| 567 | /* | 567 | /* |
| 568 | * L2TLB invalidation required | 568 | * L2TLB invalidation required |
| 569 | * 4KB page: 1 invalidation | 569 | * 4KB page: 1 invalidation |
| 570 | * 64KB page: 16 invalidation | 570 | * 64KB page: 16 invalidations |
| 571 | * 1MB page: 64 invalidation | 571 | * 1MB page: 64 invalidations |
| 572 | * because it is set-associative TLB | 572 | * because it is set-associative TLB |
| 573 | * with 8-way and 64 sets. | 573 | * with 8-way and 64 sets. |
| 574 | * 1MB page can be cached in one of all sets. | 574 | * 1MB page can be cached in one of all sets. |
| @@ -714,7 +714,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) | |||
| 714 | if (!priv->lv2entcnt) | 714 | if (!priv->lv2entcnt) |
| 715 | goto err_counter; | 715 | goto err_counter; |
| 716 | 716 | ||
| 717 | /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */ | 717 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
| 718 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { | 718 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { |
| 719 | priv->pgtable[i + 0] = ZERO_LV2LINK; | 719 | priv->pgtable[i + 0] = ZERO_LV2LINK; |
| 720 | priv->pgtable[i + 1] = ZERO_LV2LINK; | 720 | priv->pgtable[i + 1] = ZERO_LV2LINK; |
| @@ -861,14 +861,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv, | |||
| 861 | pgtable_flush(sent, sent + 1); | 861 | pgtable_flush(sent, sent + 1); |
| 862 | 862 | ||
| 863 | /* | 863 | /* |
| 864 | * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache | 864 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
| 865 | * may caches the address of zero_l2_table. This function | 865 | * FLPD cache may cache the address of zero_l2_table. This |
| 866 | * replaces the zero_l2_table with new L2 page table to write | 866 | * function replaces the zero_l2_table with new L2 page table |
| 867 | * valid mappings. | 867 | * to write valid mappings. |
| 868 | * Accessing the valid area may cause page fault since FLPD | 868 | * Accessing the valid area may cause page fault since FLPD |
| 869 | * cache may still caches zero_l2_table for the valid area | 869 | * cache may still cache zero_l2_table for the valid area |
| 870 | * instead of new L2 page table that have the mapping | 870 | * instead of new L2 page table that has the mapping |
| 871 | * information of the valid area | 871 | * information of the valid area. |
| 872 | * Thus any replacement of zero_l2_table with other valid L2 | 872 | * Thus any replacement of zero_l2_table with other valid L2 |
| 873 | * page table must involve FLPD cache invalidation for System | 873 | * page table must involve FLPD cache invalidation for System |
| 874 | * MMU v3.3. | 874 | * MMU v3.3. |
| @@ -963,27 +963,27 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, | |||
| 963 | /* | 963 | /* |
| 964 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: | 964 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: |
| 965 | * | 965 | * |
| 966 | * System MMU v3.x have an advanced logic to improve address translation | 966 | * System MMU v3.x has advanced logic to improve address translation |
| 967 | * performance with caching more page table entries by a page table walk. | 967 | * performance with caching more page table entries by a page table walk. |
| 968 | * However, the logic has a bug that caching fault page table entries and System | 968 | * However, the logic has a bug that while caching faulty page table entries, |
| 969 | * MMU reports page fault if the cached fault entry is hit even though the fault | 969 | * System MMU reports page fault if the cached fault entry is hit even though |
| 970 | * entry is updated to a valid entry after the entry is cached. | 970 | * the fault entry is updated to a valid entry after the entry is cached. |
| 971 | * To prevent caching fault page table entries which may be updated to valid | 971 | * To prevent caching faulty page table entries which may be updated to valid |
| 972 | * entries later, the virtual memory manager should care about the w/a about the | 972 | * entries later, the virtual memory manager should care about the workaround |
| 973 | * problem. The followings describe w/a. | 973 | * for the problem. The following describes the workaround. |
| 974 | * | 974 | * |
| 975 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB | 975 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB |
| 976 | * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug) | 976 | * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). |
| 977 | * | 977 | * |
| 978 | * Precisely, any start address of I/O virtual region must be aligned by | 978 | * Precisely, any start address of I/O virtual region must be aligned with |
| 979 | * the following sizes for System MMU v3.1 and v3.2. | 979 | * the following sizes for System MMU v3.1 and v3.2. |
| 980 | * System MMU v3.1: 128KiB | 980 | * System MMU v3.1: 128KiB |
| 981 | * System MMU v3.2: 256KiB | 981 | * System MMU v3.2: 256KiB |
| 982 | * | 982 | * |
| 983 | * Because System MMU v3.3 caches page table entries more aggressively, it needs | 983 | * Because System MMU v3.3 caches page table entries more aggressively, it needs |
| 984 | * more w/a. | 984 | * more workarounds. |
| 985 | * - Any two consecutive I/O virtual regions must be have a hole of larger size | 985 | * - Any two consecutive I/O virtual regions must have a hole of size larger |
| 986 | * than or equal size to 128KiB. | 986 | * than or equal to 128KiB. |
| 987 | * - Start address of an I/O virtual region must be aligned by 128KiB. | 987 | * - Start address of an I/O virtual region must be aligned by 128KiB. |
| 988 | */ | 988 | */ |
| 989 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, | 989 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, |
| @@ -1061,7 +1061,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, | |||
| 1061 | goto err; | 1061 | goto err; |
| 1062 | } | 1062 | } |
| 1063 | 1063 | ||
| 1064 | *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */ | 1064 | /* workaround for h/w bug in System MMU v3.3 */ |
| 1065 | *ent = ZERO_LV2LINK; | ||
| 1065 | pgtable_flush(ent, ent + 1); | 1066 | pgtable_flush(ent, ent + 1); |
| 1066 | size = SECT_SIZE; | 1067 | size = SECT_SIZE; |
| 1067 | goto done; | 1068 | goto done; |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 56feed7cec15..c828f80d48b0 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
| @@ -411,8 +411,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, | |||
| 411 | return get_phys_addr(dma_domain, iova); | 411 | return get_phys_addr(dma_domain, iova); |
| 412 | } | 412 | } |
| 413 | 413 | ||
| 414 | static int fsl_pamu_domain_has_cap(struct iommu_domain *domain, | 414 | static bool fsl_pamu_capable(enum iommu_cap cap) |
| 415 | unsigned long cap) | ||
| 416 | { | 415 | { |
| 417 | return cap == IOMMU_CAP_CACHE_COHERENCY; | 416 | return cap == IOMMU_CAP_CACHE_COHERENCY; |
| 418 | } | 417 | } |
| @@ -1080,6 +1079,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain) | |||
| 1080 | } | 1079 | } |
| 1081 | 1080 | ||
| 1082 | static const struct iommu_ops fsl_pamu_ops = { | 1081 | static const struct iommu_ops fsl_pamu_ops = { |
| 1082 | .capable = fsl_pamu_capable, | ||
| 1083 | .domain_init = fsl_pamu_domain_init, | 1083 | .domain_init = fsl_pamu_domain_init, |
| 1084 | .domain_destroy = fsl_pamu_domain_destroy, | 1084 | .domain_destroy = fsl_pamu_domain_destroy, |
| 1085 | .attach_dev = fsl_pamu_attach_device, | 1085 | .attach_dev = fsl_pamu_attach_device, |
| @@ -1089,7 +1089,6 @@ static const struct iommu_ops fsl_pamu_ops = { | |||
| 1089 | .domain_get_windows = fsl_pamu_get_windows, | 1089 | .domain_get_windows = fsl_pamu_get_windows, |
| 1090 | .domain_set_windows = fsl_pamu_set_windows, | 1090 | .domain_set_windows = fsl_pamu_set_windows, |
| 1091 | .iova_to_phys = fsl_pamu_iova_to_phys, | 1091 | .iova_to_phys = fsl_pamu_iova_to_phys, |
| 1092 | .domain_has_cap = fsl_pamu_domain_has_cap, | ||
| 1093 | .domain_set_attr = fsl_pamu_set_domain_attr, | 1092 | .domain_set_attr = fsl_pamu_set_domain_attr, |
| 1094 | .domain_get_attr = fsl_pamu_get_domain_attr, | 1093 | .domain_get_attr = fsl_pamu_get_domain_attr, |
| 1095 | .add_device = fsl_pamu_add_device, | 1094 | .add_device = fsl_pamu_add_device, |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5619f264862d..a27d6cb1a793 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -3865,8 +3865,7 @@ static int device_notifier(struct notifier_block *nb, | |||
| 3865 | if (iommu_dummy(dev)) | 3865 | if (iommu_dummy(dev)) |
| 3866 | return 0; | 3866 | return 0; |
| 3867 | 3867 | ||
| 3868 | if (action != BUS_NOTIFY_UNBOUND_DRIVER && | 3868 | if (action != BUS_NOTIFY_REMOVED_DEVICE) |
| 3869 | action != BUS_NOTIFY_DEL_DEVICE) | ||
| 3870 | return 0; | 3869 | return 0; |
| 3871 | 3870 | ||
| 3872 | /* | 3871 | /* |
| @@ -4415,17 +4414,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | |||
| 4415 | return phys; | 4414 | return phys; |
| 4416 | } | 4415 | } |
| 4417 | 4416 | ||
| 4418 | static int intel_iommu_domain_has_cap(struct iommu_domain *domain, | 4417 | static bool intel_iommu_capable(enum iommu_cap cap) |
| 4419 | unsigned long cap) | ||
| 4420 | { | 4418 | { |
| 4421 | struct dmar_domain *dmar_domain = domain->priv; | ||
| 4422 | |||
| 4423 | if (cap == IOMMU_CAP_CACHE_COHERENCY) | 4419 | if (cap == IOMMU_CAP_CACHE_COHERENCY) |
| 4424 | return dmar_domain->iommu_snooping; | 4420 | return domain_update_iommu_snooping(NULL) == 1; |
| 4425 | if (cap == IOMMU_CAP_INTR_REMAP) | 4421 | if (cap == IOMMU_CAP_INTR_REMAP) |
| 4426 | return irq_remapping_enabled; | 4422 | return irq_remapping_enabled == 1; |
| 4427 | 4423 | ||
| 4428 | return 0; | 4424 | return false; |
| 4429 | } | 4425 | } |
| 4430 | 4426 | ||
| 4431 | static int intel_iommu_add_device(struct device *dev) | 4427 | static int intel_iommu_add_device(struct device *dev) |
| @@ -4464,6 +4460,7 @@ static void intel_iommu_remove_device(struct device *dev) | |||
| 4464 | } | 4460 | } |
| 4465 | 4461 | ||
| 4466 | static const struct iommu_ops intel_iommu_ops = { | 4462 | static const struct iommu_ops intel_iommu_ops = { |
| 4463 | .capable = intel_iommu_capable, | ||
| 4467 | .domain_init = intel_iommu_domain_init, | 4464 | .domain_init = intel_iommu_domain_init, |
| 4468 | .domain_destroy = intel_iommu_domain_destroy, | 4465 | .domain_destroy = intel_iommu_domain_destroy, |
| 4469 | .attach_dev = intel_iommu_attach_device, | 4466 | .attach_dev = intel_iommu_attach_device, |
| @@ -4471,7 +4468,6 @@ static const struct iommu_ops intel_iommu_ops = { | |||
| 4471 | .map = intel_iommu_map, | 4468 | .map = intel_iommu_map, |
| 4472 | .unmap = intel_iommu_unmap, | 4469 | .unmap = intel_iommu_unmap, |
| 4473 | .iova_to_phys = intel_iommu_iova_to_phys, | 4470 | .iova_to_phys = intel_iommu_iova_to_phys, |
| 4474 | .domain_has_cap = intel_iommu_domain_has_cap, | ||
| 4475 | .add_device = intel_iommu_add_device, | 4471 | .add_device = intel_iommu_add_device, |
| 4476 | .remove_device = intel_iommu_remove_device, | 4472 | .remove_device = intel_iommu_remove_device, |
| 4477 | .pgsize_bitmap = INTEL_IOMMU_PGSIZES, | 4473 | .pgsize_bitmap = INTEL_IOMMU_PGSIZES, |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 0df41f6264f5..7c80661b35c1 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
| @@ -438,8 +438,7 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) | |||
| 438 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | 438 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); |
| 439 | 439 | ||
| 440 | /* Set interrupt-remapping table pointer */ | 440 | /* Set interrupt-remapping table pointer */ |
| 441 | iommu->gcmd |= DMA_GCMD_SIRTP; | 441 | writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); |
| 442 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | ||
| 443 | 442 | ||
| 444 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 443 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 445 | readl, (sts & DMA_GSTS_IRTPS), sts); | 444 | readl, (sts & DMA_GSTS_IRTPS), sts); |
| @@ -1139,7 +1138,7 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |||
| 1139 | return ret; | 1138 | return ret; |
| 1140 | } | 1139 | } |
| 1141 | 1140 | ||
| 1142 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) | 1141 | static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) |
| 1143 | { | 1142 | { |
| 1144 | int ret = -1; | 1143 | int ret = -1; |
| 1145 | struct intel_iommu *iommu; | 1144 | struct intel_iommu *iommu; |
| @@ -1170,5 +1169,5 @@ struct irq_remap_ops intel_irq_remap_ops = { | |||
| 1170 | .compose_msi_msg = intel_compose_msi_msg, | 1169 | .compose_msi_msg = intel_compose_msi_msg, |
| 1171 | .msi_alloc_irq = intel_msi_alloc_irq, | 1170 | .msi_alloc_irq = intel_msi_alloc_irq, |
| 1172 | .msi_setup_irq = intel_msi_setup_irq, | 1171 | .msi_setup_irq = intel_msi_setup_irq, |
| 1173 | .setup_hpet_msi = intel_setup_hpet_msi, | 1172 | .alloc_hpet_msi = intel_alloc_hpet_msi, |
| 1174 | }; | 1173 | }; |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0639b9274b11..ed8b04867b1f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
| 31 | #include <linux/err.h> | 31 | #include <linux/err.h> |
| 32 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
| 33 | #include <linux/bitops.h> | ||
| 33 | #include <trace/events/iommu.h> | 34 | #include <trace/events/iommu.h> |
| 34 | 35 | ||
| 35 | static struct kset *iommu_group_kset; | 36 | static struct kset *iommu_group_kset; |
| @@ -519,6 +520,9 @@ int iommu_group_id(struct iommu_group *group) | |||
| 519 | } | 520 | } |
| 520 | EXPORT_SYMBOL_GPL(iommu_group_id); | 521 | EXPORT_SYMBOL_GPL(iommu_group_id); |
| 521 | 522 | ||
| 523 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, | ||
| 524 | unsigned long *devfns); | ||
| 525 | |||
| 522 | /* | 526 | /* |
| 523 | * To consider a PCI device isolated, we require ACS to support Source | 527 | * To consider a PCI device isolated, we require ACS to support Source |
| 524 | * Validation, Request Redirection, Completer Redirection, and Upstream | 528 | * Validation, Request Redirection, Completer Redirection, and Upstream |
| @@ -529,6 +533,86 @@ EXPORT_SYMBOL_GPL(iommu_group_id); | |||
| 529 | */ | 533 | */ |
| 530 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | 534 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) |
| 531 | 535 | ||
| 536 | /* | ||
| 537 | * For multifunction devices which are not isolated from each other, find | ||
| 538 | * all the other non-isolated functions and look for existing groups. For | ||
| 539 | * each function, we also need to look for aliases to or from other devices | ||
| 540 | * that may already have a group. | ||
| 541 | */ | ||
| 542 | static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, | ||
| 543 | unsigned long *devfns) | ||
| 544 | { | ||
| 545 | struct pci_dev *tmp = NULL; | ||
| 546 | struct iommu_group *group; | ||
| 547 | |||
| 548 | if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) | ||
| 549 | return NULL; | ||
| 550 | |||
| 551 | for_each_pci_dev(tmp) { | ||
| 552 | if (tmp == pdev || tmp->bus != pdev->bus || | ||
| 553 | PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || | ||
| 554 | pci_acs_enabled(tmp, REQ_ACS_FLAGS)) | ||
| 555 | continue; | ||
| 556 | |||
| 557 | group = get_pci_alias_group(tmp, devfns); | ||
| 558 | if (group) { | ||
| 559 | pci_dev_put(tmp); | ||
| 560 | return group; | ||
| 561 | } | ||
| 562 | } | ||
| 563 | |||
| 564 | return NULL; | ||
| 565 | } | ||
| 566 | |||
| 567 | /* | ||
| 568 | * Look for aliases to or from the given device for exisiting groups. The | ||
| 569 | * dma_alias_devfn only supports aliases on the same bus, therefore the search | ||
| 570 | * space is quite small (especially since we're really only looking at pcie | ||
| 571 | * device, and therefore only expect multiple slots on the root complex or | ||
| 572 | * downstream switch ports). It's conceivable though that a pair of | ||
| 573 | * multifunction devices could have aliases between them that would cause a | ||
| 574 | * loop. To prevent this, we use a bitmap to track where we've been. | ||
| 575 | */ | ||
| 576 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, | ||
| 577 | unsigned long *devfns) | ||
| 578 | { | ||
| 579 | struct pci_dev *tmp = NULL; | ||
| 580 | struct iommu_group *group; | ||
| 581 | |||
| 582 | if (test_and_set_bit(pdev->devfn & 0xff, devfns)) | ||
| 583 | return NULL; | ||
| 584 | |||
| 585 | group = iommu_group_get(&pdev->dev); | ||
| 586 | if (group) | ||
| 587 | return group; | ||
| 588 | |||
| 589 | for_each_pci_dev(tmp) { | ||
| 590 | if (tmp == pdev || tmp->bus != pdev->bus) | ||
| 591 | continue; | ||
| 592 | |||
| 593 | /* We alias them or they alias us */ | ||
| 594 | if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && | ||
| 595 | pdev->dma_alias_devfn == tmp->devfn) || | ||
| 596 | ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && | ||
| 597 | tmp->dma_alias_devfn == pdev->devfn)) { | ||
| 598 | |||
| 599 | group = get_pci_alias_group(tmp, devfns); | ||
| 600 | if (group) { | ||
| 601 | pci_dev_put(tmp); | ||
| 602 | return group; | ||
| 603 | } | ||
| 604 | |||
| 605 | group = get_pci_function_alias_group(tmp, devfns); | ||
| 606 | if (group) { | ||
| 607 | pci_dev_put(tmp); | ||
| 608 | return group; | ||
| 609 | } | ||
| 610 | } | ||
| 611 | } | ||
| 612 | |||
| 613 | return NULL; | ||
| 614 | } | ||
| 615 | |||
| 532 | struct group_for_pci_data { | 616 | struct group_for_pci_data { |
| 533 | struct pci_dev *pdev; | 617 | struct pci_dev *pdev; |
| 534 | struct iommu_group *group; | 618 | struct iommu_group *group; |
| @@ -557,7 +641,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) | |||
| 557 | struct group_for_pci_data data; | 641 | struct group_for_pci_data data; |
| 558 | struct pci_bus *bus; | 642 | struct pci_bus *bus; |
| 559 | struct iommu_group *group = NULL; | 643 | struct iommu_group *group = NULL; |
| 560 | struct pci_dev *tmp; | 644 | u64 devfns[4] = { 0 }; |
| 561 | 645 | ||
| 562 | /* | 646 | /* |
| 563 | * Find the upstream DMA alias for the device. A device must not | 647 | * Find the upstream DMA alias for the device. A device must not |
| @@ -591,76 +675,21 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) | |||
| 591 | } | 675 | } |
| 592 | 676 | ||
| 593 | /* | 677 | /* |
| 594 | * Next we need to consider DMA alias quirks. If one device aliases | 678 | * Look for existing groups on device aliases. If we alias another |
| 595 | * to another, they should be grouped together. It's theoretically | 679 | * device or another device aliases us, use the same group. |
| 596 | * possible that aliases could create chains of devices where each | ||
| 597 | * device aliases another device. If we then factor in multifunction | ||
| 598 | * ACS grouping requirements, each alias could incorporate a new slot | ||
| 599 | * with multiple functions, each with aliases. This is all extremely | ||
| 600 | * unlikely as DMA alias quirks are typically only used for PCIe | ||
| 601 | * devices where we usually have a single slot per bus. Furthermore, | ||
| 602 | * the alias quirk is usually to another function within the slot | ||
| 603 | * (and ACS multifunction is not supported) or to a different slot | ||
| 604 | * that doesn't physically exist. The likely scenario is therefore | ||
| 605 | * that everything on the bus gets grouped together. To reduce the | ||
| 606 | * problem space, share the IOMMU group for all devices on the bus | ||
| 607 | * if a DMA alias quirk is present on the bus. | ||
| 608 | */ | 680 | */ |
| 609 | tmp = NULL; | 681 | group = get_pci_alias_group(pdev, (unsigned long *)devfns); |
| 610 | for_each_pci_dev(tmp) { | 682 | if (group) |
| 611 | if (tmp->bus != pdev->bus || | 683 | return group; |
| 612 | !(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) | ||
| 613 | continue; | ||
| 614 | |||
| 615 | pci_dev_put(tmp); | ||
| 616 | tmp = NULL; | ||
| 617 | |||
| 618 | /* We have an alias quirk, search for an existing group */ | ||
| 619 | for_each_pci_dev(tmp) { | ||
| 620 | struct iommu_group *group_tmp; | ||
| 621 | |||
| 622 | if (tmp->bus != pdev->bus) | ||
| 623 | continue; | ||
| 624 | |||
| 625 | group_tmp = iommu_group_get(&tmp->dev); | ||
| 626 | if (!group) { | ||
| 627 | group = group_tmp; | ||
| 628 | continue; | ||
| 629 | } | ||
| 630 | |||
| 631 | if (group_tmp) { | ||
| 632 | WARN_ON(group != group_tmp); | ||
| 633 | iommu_group_put(group_tmp); | ||
| 634 | } | ||
| 635 | } | ||
| 636 | |||
| 637 | return group ? group : iommu_group_alloc(); | ||
| 638 | } | ||
| 639 | |||
| 640 | /* | ||
| 641 | * Non-multifunction devices or multifunction devices supporting | ||
| 642 | * ACS get their own group. | ||
| 643 | */ | ||
| 644 | if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) | ||
| 645 | return iommu_group_alloc(); | ||
| 646 | 684 | ||
| 647 | /* | 685 | /* |
| 648 | * Multifunction devices not supporting ACS share a group with other | 686 | * Look for existing groups on non-isolated functions on the same |
| 649 | * similar devices in the same slot. | 687 | * slot and aliases of those funcions, if any. No need to clear |
| 688 | * the search bitmap, the tested devfns are still valid. | ||
| 650 | */ | 689 | */ |
| 651 | tmp = NULL; | 690 | group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); |
| 652 | for_each_pci_dev(tmp) { | 691 | if (group) |
| 653 | if (tmp == pdev || tmp->bus != pdev->bus || | 692 | return group; |
| 654 | PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || | ||
| 655 | pci_acs_enabled(tmp, REQ_ACS_FLAGS)) | ||
| 656 | continue; | ||
| 657 | |||
| 658 | group = iommu_group_get(&tmp->dev); | ||
| 659 | if (group) { | ||
| 660 | pci_dev_put(tmp); | ||
| 661 | return group; | ||
| 662 | } | ||
| 663 | } | ||
| 664 | 693 | ||
| 665 | /* No shared group found, allocate new */ | 694 | /* No shared group found, allocate new */ |
| 666 | return iommu_group_alloc(); | 695 | return iommu_group_alloc(); |
| @@ -770,18 +799,26 @@ static int iommu_bus_notifier(struct notifier_block *nb, | |||
| 770 | return 0; | 799 | return 0; |
| 771 | } | 800 | } |
| 772 | 801 | ||
| 773 | static struct notifier_block iommu_bus_nb = { | 802 | static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) |
| 774 | .notifier_call = iommu_bus_notifier, | ||
| 775 | }; | ||
| 776 | |||
| 777 | static void iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) | ||
| 778 | { | 803 | { |
| 804 | int err; | ||
| 805 | struct notifier_block *nb; | ||
| 779 | struct iommu_callback_data cb = { | 806 | struct iommu_callback_data cb = { |
| 780 | .ops = ops, | 807 | .ops = ops, |
| 781 | }; | 808 | }; |
| 782 | 809 | ||
| 783 | bus_register_notifier(bus, &iommu_bus_nb); | 810 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
| 784 | bus_for_each_dev(bus, NULL, &cb, add_iommu_group); | 811 | if (!nb) |
| 812 | return -ENOMEM; | ||
| 813 | |||
| 814 | nb->notifier_call = iommu_bus_notifier; | ||
| 815 | |||
| 816 | err = bus_register_notifier(bus, nb); | ||
| 817 | if (err) { | ||
| 818 | kfree(nb); | ||
| 819 | return err; | ||
| 820 | } | ||
| 821 | return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); | ||
| 785 | } | 822 | } |
| 786 | 823 | ||
| 787 | /** | 824 | /** |
| @@ -805,9 +842,7 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) | |||
| 805 | bus->iommu_ops = ops; | 842 | bus->iommu_ops = ops; |
| 806 | 843 | ||
| 807 | /* Do IOMMU specific setup for this bus-type */ | 844 | /* Do IOMMU specific setup for this bus-type */ |
| 808 | iommu_bus_init(bus, ops); | 845 | return iommu_bus_init(bus, ops); |
| 809 | |||
| 810 | return 0; | ||
| 811 | } | 846 | } |
| 812 | EXPORT_SYMBOL_GPL(bus_set_iommu); | 847 | EXPORT_SYMBOL_GPL(bus_set_iommu); |
| 813 | 848 | ||
| @@ -817,6 +852,15 @@ bool iommu_present(struct bus_type *bus) | |||
| 817 | } | 852 | } |
| 818 | EXPORT_SYMBOL_GPL(iommu_present); | 853 | EXPORT_SYMBOL_GPL(iommu_present); |
| 819 | 854 | ||
| 855 | bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) | ||
| 856 | { | ||
| 857 | if (!bus->iommu_ops || !bus->iommu_ops->capable) | ||
| 858 | return false; | ||
| 859 | |||
| 860 | return bus->iommu_ops->capable(cap); | ||
| 861 | } | ||
| 862 | EXPORT_SYMBOL_GPL(iommu_capable); | ||
| 863 | |||
| 820 | /** | 864 | /** |
| 821 | * iommu_set_fault_handler() - set a fault handler for an iommu domain | 865 | * iommu_set_fault_handler() - set a fault handler for an iommu domain |
| 822 | * @domain: iommu domain | 866 | * @domain: iommu domain |
| @@ -947,16 +991,6 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) | |||
| 947 | } | 991 | } |
| 948 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); | 992 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); |
| 949 | 993 | ||
| 950 | int iommu_domain_has_cap(struct iommu_domain *domain, | ||
| 951 | unsigned long cap) | ||
| 952 | { | ||
| 953 | if (unlikely(domain->ops->domain_has_cap == NULL)) | ||
| 954 | return 0; | ||
| 955 | |||
| 956 | return domain->ops->domain_has_cap(domain, cap); | ||
| 957 | } | ||
| 958 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | ||
| 959 | |||
| 960 | static size_t iommu_pgsize(struct iommu_domain *domain, | 994 | static size_t iommu_pgsize(struct iommu_domain *domain, |
| 961 | unsigned long addr_merge, size_t size) | 995 | unsigned long addr_merge, size_t size) |
| 962 | { | 996 | { |
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 33c439524080..74a1767c89b5 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
| 13 | #include <asm/x86_init.h> | 13 | #include <asm/x86_init.h> |
| 14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
| 15 | #include <asm/hpet.h> | ||
| 15 | 16 | ||
| 16 | #include "irq_remapping.h" | 17 | #include "irq_remapping.h" |
| 17 | 18 | ||
| @@ -345,10 +346,16 @@ static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, | |||
| 345 | 346 | ||
| 346 | int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) | 347 | int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) |
| 347 | { | 348 | { |
| 348 | if (!remap_ops || !remap_ops->setup_hpet_msi) | 349 | int ret; |
| 350 | |||
| 351 | if (!remap_ops || !remap_ops->alloc_hpet_msi) | ||
| 349 | return -ENODEV; | 352 | return -ENODEV; |
| 350 | 353 | ||
| 351 | return remap_ops->setup_hpet_msi(irq, id); | 354 | ret = remap_ops->alloc_hpet_msi(irq, id); |
| 355 | if (ret) | ||
| 356 | return -EINVAL; | ||
| 357 | |||
| 358 | return default_setup_hpet_msi(irq, id); | ||
| 352 | } | 359 | } |
| 353 | 360 | ||
| 354 | void panic_if_irq_remap(const char *msg) | 361 | void panic_if_irq_remap(const char *msg) |
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index 90c4dae5a46b..fde250f86e60 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h | |||
| @@ -80,7 +80,7 @@ struct irq_remap_ops { | |||
| 80 | int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int); | 80 | int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int); |
| 81 | 81 | ||
| 82 | /* Setup interrupt remapping for an HPET MSI */ | 82 | /* Setup interrupt remapping for an HPET MSI */ |
| 83 | int (*setup_hpet_msi)(unsigned int, unsigned int); | 83 | int (*alloc_hpet_msi)(unsigned int, unsigned int); |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | extern struct irq_remap_ops intel_irq_remap_ops; | 86 | extern struct irq_remap_ops intel_irq_remap_ops; |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 49f41d6e02f1..6e3dcc289d59 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
| @@ -603,10 +603,9 @@ fail: | |||
| 603 | return ret; | 603 | return ret; |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | static int msm_iommu_domain_has_cap(struct iommu_domain *domain, | 606 | static bool msm_iommu_capable(enum iommu_cap cap) |
| 607 | unsigned long cap) | ||
| 608 | { | 607 | { |
| 609 | return 0; | 608 | return false; |
| 610 | } | 609 | } |
| 611 | 610 | ||
| 612 | static void print_ctx_regs(void __iomem *base, int ctx) | 611 | static void print_ctx_regs(void __iomem *base, int ctx) |
| @@ -675,6 +674,7 @@ fail: | |||
| 675 | } | 674 | } |
| 676 | 675 | ||
| 677 | static const struct iommu_ops msm_iommu_ops = { | 676 | static const struct iommu_ops msm_iommu_ops = { |
| 677 | .capable = msm_iommu_capable, | ||
| 678 | .domain_init = msm_iommu_domain_init, | 678 | .domain_init = msm_iommu_domain_init, |
| 679 | .domain_destroy = msm_iommu_domain_destroy, | 679 | .domain_destroy = msm_iommu_domain_destroy, |
| 680 | .attach_dev = msm_iommu_attach_dev, | 680 | .attach_dev = msm_iommu_attach_dev, |
| @@ -682,7 +682,6 @@ static const struct iommu_ops msm_iommu_ops = { | |||
| 682 | .map = msm_iommu_map, | 682 | .map = msm_iommu_map, |
| 683 | .unmap = msm_iommu_unmap, | 683 | .unmap = msm_iommu_unmap, |
| 684 | .iova_to_phys = msm_iommu_iova_to_phys, | 684 | .iova_to_phys = msm_iommu_iova_to_phys, |
| 685 | .domain_has_cap = msm_iommu_domain_has_cap, | ||
| 686 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, | 685 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
| 687 | }; | 686 | }; |
| 688 | 687 | ||
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index e202b0c24120..36278870e84a 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/of.h> | 26 | #include <linux/of.h> |
| 27 | #include <linux/of_iommu.h> | 27 | #include <linux/of_iommu.h> |
| 28 | #include <linux/of_irq.h> | 28 | #include <linux/of_irq.h> |
| 29 | #include <linux/of_platform.h> | ||
| 29 | 30 | ||
| 30 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
| 31 | 32 | ||
| @@ -892,19 +893,11 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | |||
| 892 | goto err_enable; | 893 | goto err_enable; |
| 893 | flush_iotlb_all(obj); | 894 | flush_iotlb_all(obj); |
| 894 | 895 | ||
| 895 | if (!try_module_get(obj->owner)) { | ||
| 896 | err = -ENODEV; | ||
| 897 | goto err_module; | ||
| 898 | } | ||
| 899 | |||
| 900 | spin_unlock(&obj->iommu_lock); | 896 | spin_unlock(&obj->iommu_lock); |
| 901 | 897 | ||
| 902 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | 898 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
| 903 | return obj; | 899 | return obj; |
| 904 | 900 | ||
| 905 | err_module: | ||
| 906 | if (obj->refcount == 1) | ||
| 907 | iommu_disable(obj); | ||
| 908 | err_enable: | 901 | err_enable: |
| 909 | obj->refcount--; | 902 | obj->refcount--; |
| 910 | spin_unlock(&obj->iommu_lock); | 903 | spin_unlock(&obj->iommu_lock); |
| @@ -925,8 +918,6 @@ static void omap_iommu_detach(struct omap_iommu *obj) | |||
| 925 | if (--obj->refcount == 0) | 918 | if (--obj->refcount == 0) |
| 926 | iommu_disable(obj); | 919 | iommu_disable(obj); |
| 927 | 920 | ||
| 928 | module_put(obj->owner); | ||
| 929 | |||
| 930 | obj->iopgd = NULL; | 921 | obj->iopgd = NULL; |
| 931 | 922 | ||
| 932 | spin_unlock(&obj->iommu_lock); | 923 | spin_unlock(&obj->iommu_lock); |
| @@ -1006,7 +997,7 @@ static int omap_iommu_remove(struct platform_device *pdev) | |||
| 1006 | return 0; | 997 | return 0; |
| 1007 | } | 998 | } |
| 1008 | 999 | ||
| 1009 | static struct of_device_id omap_iommu_of_match[] = { | 1000 | static const struct of_device_id omap_iommu_of_match[] = { |
| 1010 | { .compatible = "ti,omap2-iommu" }, | 1001 | { .compatible = "ti,omap2-iommu" }, |
| 1011 | { .compatible = "ti,omap4-iommu" }, | 1002 | { .compatible = "ti,omap4-iommu" }, |
| 1012 | { .compatible = "ti,dra7-iommu" }, | 1003 | { .compatible = "ti,dra7-iommu" }, |
| @@ -1091,6 +1082,11 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1091 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | 1082 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
| 1092 | int ret = 0; | 1083 | int ret = 0; |
| 1093 | 1084 | ||
| 1085 | if (!arch_data || !arch_data->name) { | ||
| 1086 | dev_err(dev, "device doesn't have an associated iommu\n"); | ||
| 1087 | return -EINVAL; | ||
| 1088 | } | ||
| 1089 | |||
| 1094 | spin_lock(&omap_domain->lock); | 1090 | spin_lock(&omap_domain->lock); |
| 1095 | 1091 | ||
| 1096 | /* only a single device is supported per domain for now */ | 1092 | /* only a single device is supported per domain for now */ |
| @@ -1239,6 +1235,7 @@ static int omap_iommu_add_device(struct device *dev) | |||
| 1239 | { | 1235 | { |
| 1240 | struct omap_iommu_arch_data *arch_data; | 1236 | struct omap_iommu_arch_data *arch_data; |
| 1241 | struct device_node *np; | 1237 | struct device_node *np; |
| 1238 | struct platform_device *pdev; | ||
| 1242 | 1239 | ||
| 1243 | /* | 1240 | /* |
| 1244 | * Allocate the archdata iommu structure for DT-based devices. | 1241 | * Allocate the archdata iommu structure for DT-based devices. |
| @@ -1253,13 +1250,19 @@ static int omap_iommu_add_device(struct device *dev) | |||
| 1253 | if (!np) | 1250 | if (!np) |
| 1254 | return 0; | 1251 | return 0; |
| 1255 | 1252 | ||
| 1253 | pdev = of_find_device_by_node(np); | ||
| 1254 | if (WARN_ON(!pdev)) { | ||
| 1255 | of_node_put(np); | ||
| 1256 | return -EINVAL; | ||
| 1257 | } | ||
| 1258 | |||
| 1256 | arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); | 1259 | arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); |
| 1257 | if (!arch_data) { | 1260 | if (!arch_data) { |
| 1258 | of_node_put(np); | 1261 | of_node_put(np); |
| 1259 | return -ENOMEM; | 1262 | return -ENOMEM; |
| 1260 | } | 1263 | } |
| 1261 | 1264 | ||
| 1262 | arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL); | 1265 | arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL); |
| 1263 | dev->archdata.iommu = arch_data; | 1266 | dev->archdata.iommu = arch_data; |
| 1264 | 1267 | ||
| 1265 | of_node_put(np); | 1268 | of_node_put(np); |
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 1275a822934b..4f1b68c08c15 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h | |||
| @@ -28,7 +28,6 @@ struct iotlb_entry { | |||
| 28 | 28 | ||
| 29 | struct omap_iommu { | 29 | struct omap_iommu { |
| 30 | const char *name; | 30 | const char *name; |
| 31 | struct module *owner; | ||
| 32 | void __iomem *regbase; | 31 | void __iomem *regbase; |
| 33 | struct device *dev; | 32 | struct device *dev; |
| 34 | void *isr_priv; | 33 | void *isr_priv; |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index b10a8ecede8e..a6d76abf2c06 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
| @@ -303,13 +303,13 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | |||
| 303 | return pa; | 303 | return pa; |
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | static int gart_iommu_domain_has_cap(struct iommu_domain *domain, | 306 | static bool gart_iommu_capable(enum iommu_cap cap) |
| 307 | unsigned long cap) | ||
| 308 | { | 307 | { |
| 309 | return 0; | 308 | return false; |
| 310 | } | 309 | } |
| 311 | 310 | ||
| 312 | static const struct iommu_ops gart_iommu_ops = { | 311 | static const struct iommu_ops gart_iommu_ops = { |
| 312 | .capable = gart_iommu_capable, | ||
| 313 | .domain_init = gart_iommu_domain_init, | 313 | .domain_init = gart_iommu_domain_init, |
| 314 | .domain_destroy = gart_iommu_domain_destroy, | 314 | .domain_destroy = gart_iommu_domain_destroy, |
| 315 | .attach_dev = gart_iommu_attach_dev, | 315 | .attach_dev = gart_iommu_attach_dev, |
| @@ -317,7 +317,6 @@ static const struct iommu_ops gart_iommu_ops = { | |||
| 317 | .map = gart_iommu_map, | 317 | .map = gart_iommu_map, |
| 318 | .unmap = gart_iommu_unmap, | 318 | .unmap = gart_iommu_unmap, |
| 319 | .iova_to_phys = gart_iommu_iova_to_phys, | 319 | .iova_to_phys = gart_iommu_iova_to_phys, |
| 320 | .domain_has_cap = gart_iommu_domain_has_cap, | ||
| 321 | .pgsize_bitmap = GART_IOMMU_PGSIZES, | 320 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
| 322 | }; | 321 | }; |
| 323 | 322 | ||
| @@ -416,7 +415,7 @@ static const struct dev_pm_ops tegra_gart_pm_ops = { | |||
| 416 | .resume = tegra_gart_resume, | 415 | .resume = tegra_gart_resume, |
| 417 | }; | 416 | }; |
| 418 | 417 | ||
| 419 | static struct of_device_id tegra_gart_of_match[] = { | 418 | static const struct of_device_id tegra_gart_of_match[] = { |
| 420 | { .compatible = "nvidia,tegra20-gart", }, | 419 | { .compatible = "nvidia,tegra20-gart", }, |
| 421 | { }, | 420 | { }, |
| 422 | }; | 421 | }; |
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 3ded3894623c..3afdf43f732a 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
| @@ -780,10 +780,9 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, | |||
| 780 | return PFN_PHYS(pfn); | 780 | return PFN_PHYS(pfn); |
| 781 | } | 781 | } |
| 782 | 782 | ||
| 783 | static int smmu_iommu_domain_has_cap(struct iommu_domain *domain, | 783 | static bool smmu_iommu_capable(enum iommu_cap cap) |
| 784 | unsigned long cap) | ||
| 785 | { | 784 | { |
| 786 | return 0; | 785 | return false; |
| 787 | } | 786 | } |
| 788 | 787 | ||
| 789 | static int smmu_iommu_attach_dev(struct iommu_domain *domain, | 788 | static int smmu_iommu_attach_dev(struct iommu_domain *domain, |
| @@ -949,6 +948,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 949 | } | 948 | } |
| 950 | 949 | ||
| 951 | static const struct iommu_ops smmu_iommu_ops = { | 950 | static const struct iommu_ops smmu_iommu_ops = { |
| 951 | .capable = smmu_iommu_capable, | ||
| 952 | .domain_init = smmu_iommu_domain_init, | 952 | .domain_init = smmu_iommu_domain_init, |
| 953 | .domain_destroy = smmu_iommu_domain_destroy, | 953 | .domain_destroy = smmu_iommu_domain_destroy, |
| 954 | .attach_dev = smmu_iommu_attach_dev, | 954 | .attach_dev = smmu_iommu_attach_dev, |
| @@ -956,7 +956,6 @@ static const struct iommu_ops smmu_iommu_ops = { | |||
| 956 | .map = smmu_iommu_map, | 956 | .map = smmu_iommu_map, |
| 957 | .unmap = smmu_iommu_unmap, | 957 | .unmap = smmu_iommu_unmap, |
| 958 | .iova_to_phys = smmu_iommu_iova_to_phys, | 958 | .iova_to_phys = smmu_iommu_iova_to_phys, |
| 959 | .domain_has_cap = smmu_iommu_domain_has_cap, | ||
| 960 | .pgsize_bitmap = SMMU_IOMMU_PGSIZES, | 959 | .pgsize_bitmap = SMMU_IOMMU_PGSIZES, |
| 961 | }; | 960 | }; |
| 962 | 961 | ||
| @@ -1260,7 +1259,7 @@ static const struct dev_pm_ops tegra_smmu_pm_ops = { | |||
| 1260 | .resume = tegra_smmu_resume, | 1259 | .resume = tegra_smmu_resume, |
| 1261 | }; | 1260 | }; |
| 1262 | 1261 | ||
| 1263 | static struct of_device_id tegra_smmu_of_match[] = { | 1262 | static const struct of_device_id tegra_smmu_of_match[] = { |
| 1264 | { .compatible = "nvidia,tegra30-smmu", }, | 1263 | { .compatible = "nvidia,tegra30-smmu", }, |
| 1265 | { }, | 1264 | { }, |
| 1266 | }; | 1265 | }; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d7690f86fdb9..55de4f6f7eaf 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
| 540 | has_nonrot_disk = 0; | 540 | has_nonrot_disk = 0; |
| 541 | choose_next_idle = 0; | 541 | choose_next_idle = 0; |
| 542 | 542 | ||
| 543 | if (conf->mddev->recovery_cp < MaxSector && | 543 | choose_first = (conf->mddev->recovery_cp < this_sector + sectors); |
| 544 | (this_sector + sectors >= conf->next_resync)) | ||
| 545 | choose_first = 1; | ||
| 546 | else | ||
| 547 | choose_first = 0; | ||
| 548 | 544 | ||
| 549 | for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { | 545 | for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { |
| 550 | sector_t dist; | 546 | sector_t dist; |
| @@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf) | |||
| 831 | * there is no normal IO happeing. It must arrange to call | 827 | * there is no normal IO happeing. It must arrange to call |
| 832 | * lower_barrier when the particular background IO completes. | 828 | * lower_barrier when the particular background IO completes. |
| 833 | */ | 829 | */ |
| 834 | static void raise_barrier(struct r1conf *conf) | 830 | static void raise_barrier(struct r1conf *conf, sector_t sector_nr) |
| 835 | { | 831 | { |
| 836 | spin_lock_irq(&conf->resync_lock); | 832 | spin_lock_irq(&conf->resync_lock); |
| 837 | 833 | ||
| @@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf) | |||
| 841 | 837 | ||
| 842 | /* block any new IO from starting */ | 838 | /* block any new IO from starting */ |
| 843 | conf->barrier++; | 839 | conf->barrier++; |
| 840 | conf->next_resync = sector_nr; | ||
| 844 | 841 | ||
| 845 | /* For these conditions we must wait: | 842 | /* For these conditions we must wait: |
| 846 | * A: while the array is in frozen state | 843 | * A: while the array is in frozen state |
| @@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf) | |||
| 849 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning | 846 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning |
| 850 | * next resync will reach to the window which normal bios are | 847 | * next resync will reach to the window which normal bios are |
| 851 | * handling. | 848 | * handling. |
| 849 | * D: while there are any active requests in the current window. | ||
| 852 | */ | 850 | */ |
| 853 | wait_event_lock_irq(conf->wait_barrier, | 851 | wait_event_lock_irq(conf->wait_barrier, |
| 854 | !conf->array_frozen && | 852 | !conf->array_frozen && |
| 855 | conf->barrier < RESYNC_DEPTH && | 853 | conf->barrier < RESYNC_DEPTH && |
| 854 | conf->current_window_requests == 0 && | ||
| 856 | (conf->start_next_window >= | 855 | (conf->start_next_window >= |
| 857 | conf->next_resync + RESYNC_SECTORS), | 856 | conf->next_resync + RESYNC_SECTORS), |
| 858 | conf->resync_lock); | 857 | conf->resync_lock); |
| 859 | 858 | ||
| 859 | conf->nr_pending++; | ||
| 860 | spin_unlock_irq(&conf->resync_lock); | 860 | spin_unlock_irq(&conf->resync_lock); |
| 861 | } | 861 | } |
| 862 | 862 | ||
| @@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf) | |||
| 866 | BUG_ON(conf->barrier <= 0); | 866 | BUG_ON(conf->barrier <= 0); |
| 867 | spin_lock_irqsave(&conf->resync_lock, flags); | 867 | spin_lock_irqsave(&conf->resync_lock, flags); |
| 868 | conf->barrier--; | 868 | conf->barrier--; |
| 869 | conf->nr_pending--; | ||
| 869 | spin_unlock_irqrestore(&conf->resync_lock, flags); | 870 | spin_unlock_irqrestore(&conf->resync_lock, flags); |
| 870 | wake_up(&conf->wait_barrier); | 871 | wake_up(&conf->wait_barrier); |
| 871 | } | 872 | } |
| @@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) | |||
| 877 | if (conf->array_frozen || !bio) | 878 | if (conf->array_frozen || !bio) |
| 878 | wait = true; | 879 | wait = true; |
| 879 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { | 880 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { |
| 880 | if (conf->next_resync < RESYNC_WINDOW_SECTORS) | 881 | if ((conf->mddev->curr_resync_completed |
| 881 | wait = true; | 882 | >= bio_end_sector(bio)) || |
| 882 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS | 883 | (conf->next_resync + NEXT_NORMALIO_DISTANCE |
| 883 | >= bio_end_sector(bio)) || | 884 | <= bio->bi_iter.bi_sector)) |
| 884 | (conf->next_resync + NEXT_NORMALIO_DISTANCE | ||
| 885 | <= bio->bi_iter.bi_sector)) | ||
| 886 | wait = false; | 885 | wait = false; |
| 887 | else | 886 | else |
| 888 | wait = true; | 887 | wait = true; |
| @@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
| 919 | } | 918 | } |
| 920 | 919 | ||
| 921 | if (bio && bio_data_dir(bio) == WRITE) { | 920 | if (bio && bio_data_dir(bio) == WRITE) { |
| 922 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE | 921 | if (bio->bi_iter.bi_sector >= |
| 923 | <= bio->bi_iter.bi_sector) { | 922 | conf->mddev->curr_resync_completed) { |
| 924 | if (conf->start_next_window == MaxSector) | 923 | if (conf->start_next_window == MaxSector) |
| 925 | conf->start_next_window = | 924 | conf->start_next_window = |
| 926 | conf->next_resync + | 925 | conf->next_resync + |
| @@ -1186,6 +1185,7 @@ read_again: | |||
| 1186 | atomic_read(&bitmap->behind_writes) == 0); | 1185 | atomic_read(&bitmap->behind_writes) == 0); |
| 1187 | } | 1186 | } |
| 1188 | r1_bio->read_disk = rdisk; | 1187 | r1_bio->read_disk = rdisk; |
| 1188 | r1_bio->start_next_window = 0; | ||
| 1189 | 1189 | ||
| 1190 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1190 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
| 1191 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, | 1191 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, |
| @@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf) | |||
| 1548 | mempool_destroy(conf->r1buf_pool); | 1548 | mempool_destroy(conf->r1buf_pool); |
| 1549 | conf->r1buf_pool = NULL; | 1549 | conf->r1buf_pool = NULL; |
| 1550 | 1550 | ||
| 1551 | spin_lock_irq(&conf->resync_lock); | ||
| 1551 | conf->next_resync = 0; | 1552 | conf->next_resync = 0; |
| 1552 | conf->start_next_window = MaxSector; | 1553 | conf->start_next_window = MaxSector; |
| 1554 | conf->current_window_requests += | ||
| 1555 | conf->next_window_requests; | ||
| 1556 | conf->next_window_requests = 0; | ||
| 1557 | spin_unlock_irq(&conf->resync_lock); | ||
| 1553 | } | 1558 | } |
| 1554 | 1559 | ||
| 1555 | static int raid1_spare_active(struct mddev *mddev) | 1560 | static int raid1_spare_active(struct mddev *mddev) |
| @@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
| 2150 | d--; | 2155 | d--; |
| 2151 | rdev = conf->mirrors[d].rdev; | 2156 | rdev = conf->mirrors[d].rdev; |
| 2152 | if (rdev && | 2157 | if (rdev && |
| 2153 | test_bit(In_sync, &rdev->flags)) | 2158 | !test_bit(Faulty, &rdev->flags)) |
| 2154 | r1_sync_page_io(rdev, sect, s, | 2159 | r1_sync_page_io(rdev, sect, s, |
| 2155 | conf->tmppage, WRITE); | 2160 | conf->tmppage, WRITE); |
| 2156 | } | 2161 | } |
| @@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
| 2162 | d--; | 2167 | d--; |
| 2163 | rdev = conf->mirrors[d].rdev; | 2168 | rdev = conf->mirrors[d].rdev; |
| 2164 | if (rdev && | 2169 | if (rdev && |
| 2165 | test_bit(In_sync, &rdev->flags)) { | 2170 | !test_bit(Faulty, &rdev->flags)) { |
| 2166 | if (r1_sync_page_io(rdev, sect, s, | 2171 | if (r1_sync_page_io(rdev, sect, s, |
| 2167 | conf->tmppage, READ)) { | 2172 | conf->tmppage, READ)) { |
| 2168 | atomic_add(s, &rdev->corrected_errors); | 2173 | atomic_add(s, &rdev->corrected_errors); |
| @@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
| 2541 | 2546 | ||
| 2542 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | 2547 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
| 2543 | r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); | 2548 | r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); |
| 2544 | raise_barrier(conf); | ||
| 2545 | 2549 | ||
| 2546 | conf->next_resync = sector_nr; | 2550 | raise_barrier(conf, sector_nr); |
| 2547 | 2551 | ||
| 2548 | rcu_read_lock(); | 2552 | rcu_read_lock(); |
| 2549 | /* | 2553 | /* |
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c index 103ef6bad2e2..be763150b8aa 100644 --- a/drivers/media/common/cx2341x.c +++ b/drivers/media/common/cx2341x.c | |||
| @@ -1490,6 +1490,7 @@ static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, | |||
| 1490 | { | 1490 | { |
| 1491 | struct v4l2_ctrl_config cfg; | 1491 | struct v4l2_ctrl_config cfg; |
| 1492 | 1492 | ||
| 1493 | memset(&cfg, 0, sizeof(cfg)); | ||
| 1493 | cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); | 1494 | cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); |
| 1494 | cfg.ops = &cx2341x_ops; | 1495 | cfg.ops = &cx2341x_ops; |
| 1495 | cfg.id = id; | 1496 | cfg.id = id; |
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index 72fb5838cae0..7975c6608e20 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c | |||
| @@ -1095,6 +1095,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config, | |||
| 1095 | sizeof(state->tuner_i2c_adapter.name)); | 1095 | sizeof(state->tuner_i2c_adapter.name)); |
| 1096 | state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo; | 1096 | state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo; |
| 1097 | state->tuner_i2c_adapter.algo_data = NULL; | 1097 | state->tuner_i2c_adapter.algo_data = NULL; |
| 1098 | state->tuner_i2c_adapter.dev.parent = i2c->dev.parent; | ||
| 1098 | i2c_set_adapdata(&state->tuner_i2c_adapter, state); | 1099 | i2c_set_adapdata(&state->tuner_i2c_adapter, state); |
| 1099 | if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) { | 1100 | if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) { |
| 1100 | err("tuner i2c bus could not be initialized\n"); | 1101 | err("tuner i2c bus could not be initialized\n"); |
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index d4fa213ba74a..de88b980a837 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c | |||
| @@ -2325,7 +2325,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd) | |||
| 2325 | v4l2_info(sd, "HDCP keys read: %s%s\n", | 2325 | v4l2_info(sd, "HDCP keys read: %s%s\n", |
| 2326 | (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", | 2326 | (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", |
| 2327 | (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); | 2327 | (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); |
| 2328 | if (!is_hdmi(sd)) { | 2328 | if (is_hdmi(sd)) { |
| 2329 | bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; | 2329 | bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; |
| 2330 | bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; | 2330 | bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; |
| 2331 | bool audio_mute = io_read(sd, 0x65) & 0x40; | 2331 | bool audio_mute = io_read(sd, 0x65) & 0x40; |
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c index 998919e97dfe..7b35e633118d 100644 --- a/drivers/media/radio/radio-miropcm20.c +++ b/drivers/media/radio/radio-miropcm20.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/io.h> | ||
| 30 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
| 31 | #include <linux/videodev2.h> | 32 | #include <linux/videodev2.h> |
| 32 | #include <linux/kthread.h> | 33 | #include <linux/kthread.h> |
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 90dec2955f1c..29abc379551e 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c | |||
| @@ -1342,7 +1342,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, | |||
| 1342 | struct em28xx *dev = video_drvdata(file); | 1342 | struct em28xx *dev = video_drvdata(file); |
| 1343 | struct em28xx_v4l2 *v4l2 = dev->v4l2; | 1343 | struct em28xx_v4l2 *v4l2 = dev->v4l2; |
| 1344 | 1344 | ||
| 1345 | if (v4l2->streaming_users > 0) | 1345 | if (vb2_is_busy(&v4l2->vb_vidq)) |
| 1346 | return -EBUSY; | 1346 | return -EBUSY; |
| 1347 | 1347 | ||
| 1348 | vidioc_try_fmt_vid_cap(file, priv, f); | 1348 | vidioc_try_fmt_vid_cap(file, priv, f); |
| @@ -1883,8 +1883,9 @@ static int em28xx_v4l2_open(struct file *filp) | |||
| 1883 | return -EINVAL; | 1883 | return -EINVAL; |
| 1884 | } | 1884 | } |
| 1885 | 1885 | ||
| 1886 | em28xx_videodbg("open dev=%s type=%s\n", | 1886 | em28xx_videodbg("open dev=%s type=%s users=%d\n", |
| 1887 | video_device_node_name(vdev), v4l2_type_names[fh_type]); | 1887 | video_device_node_name(vdev), v4l2_type_names[fh_type], |
| 1888 | v4l2->users); | ||
| 1888 | 1889 | ||
| 1889 | if (mutex_lock_interruptible(&dev->lock)) | 1890 | if (mutex_lock_interruptible(&dev->lock)) |
| 1890 | return -ERESTARTSYS; | 1891 | return -ERESTARTSYS; |
| @@ -1897,9 +1898,7 @@ static int em28xx_v4l2_open(struct file *filp) | |||
| 1897 | return ret; | 1898 | return ret; |
| 1898 | } | 1899 | } |
| 1899 | 1900 | ||
| 1900 | if (v4l2_fh_is_singular_file(filp)) { | 1901 | if (v4l2->users == 0) { |
| 1901 | em28xx_videodbg("first opened filehandle, initializing device\n"); | ||
| 1902 | |||
| 1903 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); | 1902 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); |
| 1904 | 1903 | ||
| 1905 | if (vdev->vfl_type != VFL_TYPE_RADIO) | 1904 | if (vdev->vfl_type != VFL_TYPE_RADIO) |
| @@ -1910,8 +1909,6 @@ static int em28xx_v4l2_open(struct file *filp) | |||
| 1910 | * of some i2c devices | 1909 | * of some i2c devices |
| 1911 | */ | 1910 | */ |
| 1912 | em28xx_wake_i2c(dev); | 1911 | em28xx_wake_i2c(dev); |
| 1913 | } else { | ||
| 1914 | em28xx_videodbg("further filehandles are already opened\n"); | ||
| 1915 | } | 1912 | } |
| 1916 | 1913 | ||
| 1917 | if (vdev->vfl_type == VFL_TYPE_RADIO) { | 1914 | if (vdev->vfl_type == VFL_TYPE_RADIO) { |
| @@ -1921,6 +1918,7 @@ static int em28xx_v4l2_open(struct file *filp) | |||
| 1921 | 1918 | ||
| 1922 | kref_get(&dev->ref); | 1919 | kref_get(&dev->ref); |
| 1923 | kref_get(&v4l2->ref); | 1920 | kref_get(&v4l2->ref); |
| 1921 | v4l2->users++; | ||
| 1924 | 1922 | ||
| 1925 | mutex_unlock(&dev->lock); | 1923 | mutex_unlock(&dev->lock); |
| 1926 | 1924 | ||
| @@ -2027,11 +2025,12 @@ static int em28xx_v4l2_close(struct file *filp) | |||
| 2027 | struct em28xx_v4l2 *v4l2 = dev->v4l2; | 2025 | struct em28xx_v4l2 *v4l2 = dev->v4l2; |
| 2028 | int errCode; | 2026 | int errCode; |
| 2029 | 2027 | ||
| 2030 | mutex_lock(&dev->lock); | 2028 | em28xx_videodbg("users=%d\n", v4l2->users); |
| 2031 | 2029 | ||
| 2032 | if (v4l2_fh_is_singular_file(filp)) { | 2030 | vb2_fop_release(filp); |
| 2033 | em28xx_videodbg("last opened filehandle, shutting down device\n"); | 2031 | mutex_lock(&dev->lock); |
| 2034 | 2032 | ||
| 2033 | if (v4l2->users == 1) { | ||
| 2035 | /* No sense to try to write to the device */ | 2034 | /* No sense to try to write to the device */ |
| 2036 | if (dev->disconnected) | 2035 | if (dev->disconnected) |
| 2037 | goto exit; | 2036 | goto exit; |
| @@ -2050,12 +2049,10 @@ static int em28xx_v4l2_close(struct file *filp) | |||
| 2050 | em28xx_errdev("cannot change alternate number to " | 2049 | em28xx_errdev("cannot change alternate number to " |
| 2051 | "0 (error=%i)\n", errCode); | 2050 | "0 (error=%i)\n", errCode); |
| 2052 | } | 2051 | } |
| 2053 | } else { | ||
| 2054 | em28xx_videodbg("further opened filehandles left\n"); | ||
| 2055 | } | 2052 | } |
| 2056 | 2053 | ||
| 2057 | exit: | 2054 | exit: |
| 2058 | vb2_fop_release(filp); | 2055 | v4l2->users--; |
| 2059 | kref_put(&v4l2->ref, em28xx_free_v4l2); | 2056 | kref_put(&v4l2->ref, em28xx_free_v4l2); |
| 2060 | mutex_unlock(&dev->lock); | 2057 | mutex_unlock(&dev->lock); |
| 2061 | kref_put(&dev->ref, em28xx_free_device); | 2058 | kref_put(&dev->ref, em28xx_free_device); |
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 84ef8efdb148..4360338e7b31 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h | |||
| @@ -524,6 +524,7 @@ struct em28xx_v4l2 { | |||
| 524 | int sensor_yres; | 524 | int sensor_yres; |
| 525 | int sensor_xtal; | 525 | int sensor_xtal; |
| 526 | 526 | ||
| 527 | int users; /* user count for exclusive use */ | ||
| 527 | int streaming_users; /* number of actively streaming users */ | 528 | int streaming_users; /* number of actively streaming users */ |
| 528 | 529 | ||
| 529 | u32 frequency; /* selected tuner frequency */ | 530 | u32 frequency; /* selected tuner frequency */ |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index c359006074a8..25d3ae2188cb 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
| @@ -971,6 +971,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | |||
| 971 | * to the userspace. | 971 | * to the userspace. |
| 972 | */ | 972 | */ |
| 973 | req->count = allocated_buffers; | 973 | req->count = allocated_buffers; |
| 974 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | ||
| 974 | 975 | ||
| 975 | return 0; | 976 | return 0; |
| 976 | } | 977 | } |
| @@ -1018,6 +1019,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create | |||
| 1018 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); | 1019 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); |
| 1019 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); | 1020 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); |
| 1020 | q->memory = create->memory; | 1021 | q->memory = create->memory; |
| 1022 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | ||
| 1021 | } | 1023 | } |
| 1022 | 1024 | ||
| 1023 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); | 1025 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); |
| @@ -1130,7 +1132,7 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr); | |||
| 1130 | */ | 1132 | */ |
| 1131 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) | 1133 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) |
| 1132 | { | 1134 | { |
| 1133 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 1135 | if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) |
| 1134 | return NULL; | 1136 | return NULL; |
| 1135 | 1137 | ||
| 1136 | return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); | 1138 | return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); |
| @@ -1165,13 +1167,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
| 1165 | if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) | 1167 | if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) |
| 1166 | return; | 1168 | return; |
| 1167 | 1169 | ||
| 1168 | if (!q->start_streaming_called) { | 1170 | if (WARN_ON(state != VB2_BUF_STATE_DONE && |
| 1169 | if (WARN_ON(state != VB2_BUF_STATE_QUEUED)) | 1171 | state != VB2_BUF_STATE_ERROR && |
| 1170 | state = VB2_BUF_STATE_QUEUED; | 1172 | state != VB2_BUF_STATE_QUEUED)) |
| 1171 | } else if (WARN_ON(state != VB2_BUF_STATE_DONE && | 1173 | state = VB2_BUF_STATE_ERROR; |
| 1172 | state != VB2_BUF_STATE_ERROR)) { | ||
| 1173 | state = VB2_BUF_STATE_ERROR; | ||
| 1174 | } | ||
| 1175 | 1174 | ||
| 1176 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 1175 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
| 1177 | /* | 1176 | /* |
| @@ -1762,6 +1761,12 @@ static int vb2_start_streaming(struct vb2_queue *q) | |||
| 1762 | q->start_streaming_called = 0; | 1761 | q->start_streaming_called = 0; |
| 1763 | 1762 | ||
| 1764 | dprintk(1, "driver refused to start streaming\n"); | 1763 | dprintk(1, "driver refused to start streaming\n"); |
| 1764 | /* | ||
| 1765 | * If you see this warning, then the driver isn't cleaning up properly | ||
| 1766 | * after a failed start_streaming(). See the start_streaming() | ||
| 1767 | * documentation in videobuf2-core.h for more information how buffers | ||
| 1768 | * should be returned to vb2 in start_streaming(). | ||
| 1769 | */ | ||
| 1765 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { | 1770 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
| 1766 | unsigned i; | 1771 | unsigned i; |
| 1767 | 1772 | ||
| @@ -1777,6 +1782,12 @@ static int vb2_start_streaming(struct vb2_queue *q) | |||
| 1777 | /* Must be zero now */ | 1782 | /* Must be zero now */ |
| 1778 | WARN_ON(atomic_read(&q->owned_by_drv_count)); | 1783 | WARN_ON(atomic_read(&q->owned_by_drv_count)); |
| 1779 | } | 1784 | } |
| 1785 | /* | ||
| 1786 | * If done_list is not empty, then start_streaming() didn't call | ||
| 1787 | * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or | ||
| 1788 | * STATE_DONE. | ||
| 1789 | */ | ||
| 1790 | WARN_ON(!list_empty(&q->done_list)); | ||
| 1780 | return ret; | 1791 | return ret; |
| 1781 | } | 1792 | } |
| 1782 | 1793 | ||
| @@ -1812,6 +1823,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) | |||
| 1812 | */ | 1823 | */ |
| 1813 | list_add_tail(&vb->queued_entry, &q->queued_list); | 1824 | list_add_tail(&vb->queued_entry, &q->queued_list); |
| 1814 | q->queued_count++; | 1825 | q->queued_count++; |
| 1826 | q->waiting_for_buffers = false; | ||
| 1815 | vb->state = VB2_BUF_STATE_QUEUED; | 1827 | vb->state = VB2_BUF_STATE_QUEUED; |
| 1816 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { | 1828 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
| 1817 | /* | 1829 | /* |
| @@ -2123,6 +2135,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q) | |||
| 2123 | if (q->start_streaming_called) | 2135 | if (q->start_streaming_called) |
| 2124 | call_void_qop(q, stop_streaming, q); | 2136 | call_void_qop(q, stop_streaming, q); |
| 2125 | 2137 | ||
| 2138 | /* | ||
| 2139 | * If you see this warning, then the driver isn't cleaning up properly | ||
| 2140 | * in stop_streaming(). See the stop_streaming() documentation in | ||
| 2141 | * videobuf2-core.h for more information how buffers should be returned | ||
| 2142 | * to vb2 in stop_streaming(). | ||
| 2143 | */ | ||
| 2126 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { | 2144 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
| 2127 | for (i = 0; i < q->num_buffers; ++i) | 2145 | for (i = 0; i < q->num_buffers; ++i) |
| 2128 | if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) | 2146 | if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) |
| @@ -2272,6 +2290,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) | |||
| 2272 | * their normal dequeued state. | 2290 | * their normal dequeued state. |
| 2273 | */ | 2291 | */ |
| 2274 | __vb2_queue_cancel(q); | 2292 | __vb2_queue_cancel(q); |
| 2293 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | ||
| 2275 | 2294 | ||
| 2276 | dprintk(3, "successful\n"); | 2295 | dprintk(3, "successful\n"); |
| 2277 | return 0; | 2296 | return 0; |
| @@ -2590,10 +2609,17 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) | |||
| 2590 | } | 2609 | } |
| 2591 | 2610 | ||
| 2592 | /* | 2611 | /* |
| 2593 | * There is nothing to wait for if no buffer has been queued and the | 2612 | * There is nothing to wait for if the queue isn't streaming, or if the |
| 2594 | * queue isn't streaming, or if the error flag is set. | 2613 | * error flag is set. |
| 2614 | */ | ||
| 2615 | if (!vb2_is_streaming(q) || q->error) | ||
| 2616 | return res | POLLERR; | ||
| 2617 | /* | ||
| 2618 | * For compatibility with vb1: if QBUF hasn't been called yet, then | ||
| 2619 | * return POLLERR as well. This only affects capture queues, output | ||
| 2620 | * queues will always initialize waiting_for_buffers to false. | ||
| 2595 | */ | 2621 | */ |
| 2596 | if ((list_empty(&q->queued_list) && !vb2_is_streaming(q)) || q->error) | 2622 | if (q->waiting_for_buffers) |
| 2597 | return res | POLLERR; | 2623 | return res | POLLERR; |
| 2598 | 2624 | ||
| 2599 | /* | 2625 | /* |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index adefc31bb853..9b163a440f89 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
| @@ -113,7 +113,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
| 113 | goto fail_pages_alloc; | 113 | goto fail_pages_alloc; |
| 114 | 114 | ||
| 115 | ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, | 115 | ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, |
| 116 | buf->num_pages, 0, size, gfp_flags); | 116 | buf->num_pages, 0, size, GFP_KERNEL); |
| 117 | if (ret) | 117 | if (ret) |
| 118 | goto fail_table_alloc; | 118 | goto fail_table_alloc; |
| 119 | 119 | ||
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig index a34a11d2fef2..63ca9841db10 100644 --- a/drivers/message/fusion/Kconfig +++ b/drivers/message/fusion/Kconfig | |||
| @@ -29,7 +29,7 @@ config FUSION_SPI | |||
| 29 | config FUSION_FC | 29 | config FUSION_FC |
| 30 | tristate "Fusion MPT ScsiHost drivers for FC" | 30 | tristate "Fusion MPT ScsiHost drivers for FC" |
| 31 | depends on PCI && SCSI | 31 | depends on PCI && SCSI |
| 32 | select SCSI_FC_ATTRS | 32 | depends on SCSI_FC_ATTRS |
| 33 | ---help--- | 33 | ---help--- |
| 34 | SCSI HOST support for a Fiber Channel host adapters. | 34 | SCSI HOST support for a Fiber Channel host adapters. |
| 35 | 35 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f0f5eab0fab1..798ae69fb63c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " | |||
| 175 | "the same MAC; 0 for none (default), " | 175 | "the same MAC; 0 for none (default), " |
| 176 | "1 for active, 2 for follow"); | 176 | "1 for active, 2 for follow"); |
| 177 | module_param(all_slaves_active, int, 0); | 177 | module_param(all_slaves_active, int, 0); |
| 178 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | 178 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " |
| 179 | "by setting active flag for all slaves; " | 179 | "by setting active flag for all slaves; " |
| 180 | "0 for never (default), 1 for always."); | 180 | "0 for never (default), 1 for always."); |
| 181 | module_param(resend_igmp, int, 0); | 181 | module_param(resend_igmp, int, 0); |
| @@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
| 3659 | else | 3659 | else |
| 3660 | bond_xmit_slave_id(bond, skb, 0); | 3660 | bond_xmit_slave_id(bond, skb, 0); |
| 3661 | } else { | 3661 | } else { |
| 3662 | slave_id = bond_rr_gen_slave_id(bond); | 3662 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); |
| 3663 | bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); | 3663 | |
| 3664 | if (likely(slave_cnt)) { | ||
| 3665 | slave_id = bond_rr_gen_slave_id(bond); | ||
| 3666 | bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); | ||
| 3667 | } else { | ||
| 3668 | dev_kfree_skb_any(skb); | ||
| 3669 | } | ||
| 3664 | } | 3670 | } |
| 3665 | 3671 | ||
| 3666 | return NETDEV_TX_OK; | 3672 | return NETDEV_TX_OK; |
| @@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
| 3691 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | 3697 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) |
| 3692 | { | 3698 | { |
| 3693 | struct bonding *bond = netdev_priv(bond_dev); | 3699 | struct bonding *bond = netdev_priv(bond_dev); |
| 3700 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); | ||
| 3694 | 3701 | ||
| 3695 | bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt); | 3702 | if (likely(slave_cnt)) |
| 3703 | bond_xmit_slave_id(bond, skb, | ||
| 3704 | bond_xmit_hash(bond, skb) % slave_cnt); | ||
| 3705 | else | ||
| 3706 | dev_kfree_skb_any(skb); | ||
| 3696 | 3707 | ||
| 3697 | return NETDEV_TX_OK; | 3708 | return NETDEV_TX_OK; |
| 3698 | } | 3709 | } |
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f07fa89b5fd5..05e1aa090add 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c | |||
| @@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev) | |||
| 1123 | struct at91_priv *priv = netdev_priv(dev); | 1123 | struct at91_priv *priv = netdev_priv(dev); |
| 1124 | int err; | 1124 | int err; |
| 1125 | 1125 | ||
| 1126 | clk_enable(priv->clk); | 1126 | err = clk_prepare_enable(priv->clk); |
| 1127 | if (err) | ||
| 1128 | return err; | ||
| 1127 | 1129 | ||
| 1128 | /* check or determine and set bittime */ | 1130 | /* check or determine and set bittime */ |
| 1129 | err = open_candev(dev); | 1131 | err = open_candev(dev); |
| @@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev) | |||
| 1149 | out_close: | 1151 | out_close: |
| 1150 | close_candev(dev); | 1152 | close_candev(dev); |
| 1151 | out: | 1153 | out: |
| 1152 | clk_disable(priv->clk); | 1154 | clk_disable_unprepare(priv->clk); |
| 1153 | 1155 | ||
| 1154 | return err; | 1156 | return err; |
| 1155 | } | 1157 | } |
| @@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev) | |||
| 1166 | at91_chip_stop(dev, CAN_STATE_STOPPED); | 1168 | at91_chip_stop(dev, CAN_STATE_STOPPED); |
| 1167 | 1169 | ||
| 1168 | free_irq(dev->irq, dev); | 1170 | free_irq(dev->irq, dev); |
| 1169 | clk_disable(priv->clk); | 1171 | clk_disable_unprepare(priv->clk); |
| 1170 | 1172 | ||
| 1171 | close_candev(dev); | 1173 | close_candev(dev); |
| 1172 | 1174 | ||
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 109cb44291f5..fb279d6ae484 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
| @@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) | |||
| 97 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); | 97 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); |
| 98 | writel(ctrl, priv->raminit_ctrlreg); | 98 | writel(ctrl, priv->raminit_ctrlreg); |
| 99 | ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); | 99 | ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); |
| 100 | c_can_hw_raminit_wait_ti(priv, ctrl, mask); | 100 | c_can_hw_raminit_wait_ti(priv, mask, ctrl); |
| 101 | 101 | ||
| 102 | if (enable) { | 102 | if (enable) { |
| 103 | /* Set start bit and wait for the done bit. */ | 103 | /* Set start bit and wait for the done bit. */ |
| 104 | ctrl |= CAN_RAMINIT_START_MASK(priv->instance); | 104 | ctrl |= CAN_RAMINIT_START_MASK(priv->instance); |
| 105 | writel(ctrl, priv->raminit_ctrlreg); | 105 | writel(ctrl, priv->raminit_ctrlreg); |
| 106 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); | 106 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); |
| 107 | c_can_hw_raminit_wait_ti(priv, ctrl, mask); | 107 | c_can_hw_raminit_wait_ti(priv, mask, ctrl); |
| 108 | } | 108 | } |
| 109 | spin_unlock(&raminit_lock); | 109 | spin_unlock(&raminit_lock); |
| 110 | } | 110 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 944aa5d3af6e..6586309329e6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
| @@ -62,7 +62,7 @@ | |||
| 62 | #define FLEXCAN_MCR_BCC BIT(16) | 62 | #define FLEXCAN_MCR_BCC BIT(16) |
| 63 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) | 63 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) |
| 64 | #define FLEXCAN_MCR_AEN BIT(12) | 64 | #define FLEXCAN_MCR_AEN BIT(12) |
| 65 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) | 65 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) |
| 66 | #define FLEXCAN_MCR_IDAM_A (0 << 8) | 66 | #define FLEXCAN_MCR_IDAM_A (0 << 8) |
| 67 | #define FLEXCAN_MCR_IDAM_B (1 << 8) | 67 | #define FLEXCAN_MCR_IDAM_B (1 << 8) |
| 68 | #define FLEXCAN_MCR_IDAM_C (2 << 8) | 68 | #define FLEXCAN_MCR_IDAM_C (2 << 8) |
| @@ -125,7 +125,9 @@ | |||
| 125 | FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) | 125 | FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) |
| 126 | 126 | ||
| 127 | /* FLEXCAN interrupt flag register (IFLAG) bits */ | 127 | /* FLEXCAN interrupt flag register (IFLAG) bits */ |
| 128 | #define FLEXCAN_TX_BUF_ID 8 | 128 | /* Errata ERR005829 step7: Reserve first valid MB */ |
| 129 | #define FLEXCAN_TX_BUF_RESERVED 8 | ||
| 130 | #define FLEXCAN_TX_BUF_ID 9 | ||
| 129 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) | 131 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) |
| 130 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) | 132 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) |
| 131 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) | 133 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) |
| @@ -136,6 +138,17 @@ | |||
| 136 | 138 | ||
| 137 | /* FLEXCAN message buffers */ | 139 | /* FLEXCAN message buffers */ |
| 138 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) | 140 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) |
| 141 | #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) | ||
| 142 | #define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) | ||
| 143 | #define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) | ||
| 144 | #define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24) | ||
| 145 | #define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) | ||
| 146 | |||
| 147 | #define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) | ||
| 148 | #define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) | ||
| 149 | #define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) | ||
| 150 | #define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) | ||
| 151 | |||
| 139 | #define FLEXCAN_MB_CNT_SRR BIT(22) | 152 | #define FLEXCAN_MB_CNT_SRR BIT(22) |
| 140 | #define FLEXCAN_MB_CNT_IDE BIT(21) | 153 | #define FLEXCAN_MB_CNT_IDE BIT(21) |
| 141 | #define FLEXCAN_MB_CNT_RTR BIT(20) | 154 | #define FLEXCAN_MB_CNT_RTR BIT(20) |
| @@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv) | |||
| 298 | flexcan_write(reg, ®s->mcr); | 311 | flexcan_write(reg, ®s->mcr); |
| 299 | 312 | ||
| 300 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 313 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
| 301 | usleep_range(10, 20); | 314 | udelay(10); |
| 302 | 315 | ||
| 303 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) | 316 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) |
| 304 | return -ETIMEDOUT; | 317 | return -ETIMEDOUT; |
| @@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) | |||
| 317 | flexcan_write(reg, ®s->mcr); | 330 | flexcan_write(reg, ®s->mcr); |
| 318 | 331 | ||
| 319 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 332 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
| 320 | usleep_range(10, 20); | 333 | udelay(10); |
| 321 | 334 | ||
| 322 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 335 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
| 323 | return -ETIMEDOUT; | 336 | return -ETIMEDOUT; |
| @@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) | |||
| 336 | flexcan_write(reg, ®s->mcr); | 349 | flexcan_write(reg, ®s->mcr); |
| 337 | 350 | ||
| 338 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 351 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
| 339 | usleep_range(100, 200); | 352 | udelay(100); |
| 340 | 353 | ||
| 341 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 354 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
| 342 | return -ETIMEDOUT; | 355 | return -ETIMEDOUT; |
| @@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv) | |||
| 355 | flexcan_write(reg, ®s->mcr); | 368 | flexcan_write(reg, ®s->mcr); |
| 356 | 369 | ||
| 357 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 370 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
| 358 | usleep_range(10, 20); | 371 | udelay(10); |
| 359 | 372 | ||
| 360 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) | 373 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) |
| 361 | return -ETIMEDOUT; | 374 | return -ETIMEDOUT; |
| @@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) | |||
| 370 | 383 | ||
| 371 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | 384 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); |
| 372 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) | 385 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) |
| 373 | usleep_range(10, 20); | 386 | udelay(10); |
| 374 | 387 | ||
| 375 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) | 388 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) |
| 376 | return -ETIMEDOUT; | 389 | return -ETIMEDOUT; |
| @@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 428 | flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); | 441 | flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); |
| 429 | flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | 442 | flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); |
| 430 | 443 | ||
| 444 | /* Errata ERR005829 step8: | ||
| 445 | * Write twice INACTIVE(0x8) code to first MB. | ||
| 446 | */ | ||
| 447 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
| 448 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
| 449 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
| 450 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
| 451 | |||
| 431 | return NETDEV_TX_OK; | 452 | return NETDEV_TX_OK; |
| 432 | } | 453 | } |
| 433 | 454 | ||
| @@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
| 744 | stats->tx_bytes += can_get_echo_skb(dev, 0); | 765 | stats->tx_bytes += can_get_echo_skb(dev, 0); |
| 745 | stats->tx_packets++; | 766 | stats->tx_packets++; |
| 746 | can_led_event(dev, CAN_LED_EVENT_TX); | 767 | can_led_event(dev, CAN_LED_EVENT_TX); |
| 768 | /* after sending a RTR frame mailbox is in RX mode */ | ||
| 769 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
| 770 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | ||
| 747 | flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); | 771 | flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); |
| 748 | netif_wake_queue(dev); | 772 | netif_wake_queue(dev); |
| 749 | } | 773 | } |
| @@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 801 | struct flexcan_regs __iomem *regs = priv->base; | 825 | struct flexcan_regs __iomem *regs = priv->base; |
| 802 | int err; | 826 | int err; |
| 803 | u32 reg_mcr, reg_ctrl; | 827 | u32 reg_mcr, reg_ctrl; |
| 828 | int i; | ||
| 804 | 829 | ||
| 805 | /* enable module */ | 830 | /* enable module */ |
| 806 | err = flexcan_chip_enable(priv); | 831 | err = flexcan_chip_enable(priv); |
| @@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 867 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); | 892 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); |
| 868 | flexcan_write(reg_ctrl, ®s->ctrl); | 893 | flexcan_write(reg_ctrl, ®s->ctrl); |
| 869 | 894 | ||
| 870 | /* Abort any pending TX, mark Mailbox as INACTIVE */ | 895 | /* clear and invalidate all mailboxes first */ |
| 871 | flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), | 896 | for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) { |
| 897 | flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, | ||
| 898 | ®s->cantxfg[i].can_ctrl); | ||
| 899 | } | ||
| 900 | |||
| 901 | /* Errata ERR005829: mark first TX mailbox as INACTIVE */ | ||
| 902 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
| 903 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
| 904 | |||
| 905 | /* mark TX mailbox as INACTIVE */ | ||
| 906 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
| 872 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | 907 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); |
| 873 | 908 | ||
| 874 | /* acceptance mask/acceptance code (accept everything) */ | 909 | /* acceptance mask/acceptance code (accept everything) */ |
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 7a85590fefb9..e5fac368068a 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c | |||
| @@ -70,6 +70,8 @@ struct peak_pci_chan { | |||
| 70 | #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ | 70 | #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ |
| 71 | #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ | 71 | #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ |
| 72 | #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ | 72 | #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ |
| 73 | #define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */ | ||
| 74 | #define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */ | ||
| 73 | 75 | ||
| 74 | #define PEAK_PCI_CHAN_MAX 4 | 76 | #define PEAK_PCI_CHAN_MAX 4 |
| 75 | 77 | ||
| @@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = { | |||
| 87 | {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | 89 | {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
| 88 | #ifdef CONFIG_CAN_PEAK_PCIEC | 90 | #ifdef CONFIG_CAN_PEAK_PCIEC |
| 89 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | 91 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
| 92 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | ||
| 90 | #endif | 93 | #endif |
| 91 | {0,} | 94 | {0,} |
| 92 | }; | 95 | }; |
| @@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 653 | * This must be done *before* register_sja1000dev() but | 656 | * This must be done *before* register_sja1000dev() but |
| 654 | * *after* devices linkage | 657 | * *after* devices linkage |
| 655 | */ | 658 | */ |
| 656 | if (pdev->device == PEAK_PCIEC_DEVICE_ID) { | 659 | if (pdev->device == PEAK_PCIEC_DEVICE_ID || |
| 660 | pdev->device == PEAK_PCIEC34_DEVICE_ID) { | ||
| 657 | err = peak_pciec_probe(pdev, dev); | 661 | err = peak_pciec_probe(pdev, dev); |
| 658 | if (err) { | 662 | if (err) { |
| 659 | dev_err(&pdev->dev, | 663 | dev_err(&pdev->dev, |
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 3fe45c705933..8ca49f04acec 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
| @@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2129 | int entry = vp->cur_tx % TX_RING_SIZE; | 2129 | int entry = vp->cur_tx % TX_RING_SIZE; |
| 2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | 2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; |
| 2131 | unsigned long flags; | 2131 | unsigned long flags; |
| 2132 | dma_addr_t dma_addr; | ||
| 2132 | 2133 | ||
| 2133 | if (vortex_debug > 6) { | 2134 | if (vortex_debug > 6) { |
| 2134 | pr_debug("boomerang_start_xmit()\n"); | 2135 | pr_debug("boomerang_start_xmit()\n"); |
| @@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2163 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | 2164 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); |
| 2164 | 2165 | ||
| 2165 | if (!skb_shinfo(skb)->nr_frags) { | 2166 | if (!skb_shinfo(skb)->nr_frags) { |
| 2166 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2167 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, |
| 2167 | skb->len, PCI_DMA_TODEVICE)); | 2168 | PCI_DMA_TODEVICE); |
| 2169 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
| 2170 | goto out_dma_err; | ||
| 2171 | |||
| 2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
| 2168 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | 2173 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); |
| 2169 | } else { | 2174 | } else { |
| 2170 | int i; | 2175 | int i; |
| 2171 | 2176 | ||
| 2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2177 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, |
| 2173 | skb_headlen(skb), PCI_DMA_TODEVICE)); | 2178 | skb_headlen(skb), PCI_DMA_TODEVICE); |
| 2179 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
| 2180 | goto out_dma_err; | ||
| 2181 | |||
| 2182 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
| 2174 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); | 2183 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); |
| 2175 | 2184 | ||
| 2176 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2185 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 2177 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2186 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2178 | 2187 | ||
| 2188 | dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, | ||
| 2189 | 0, | ||
| 2190 | frag->size, | ||
| 2191 | DMA_TO_DEVICE); | ||
| 2192 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { | ||
| 2193 | for(i = i-1; i >= 0; i--) | ||
| 2194 | dma_unmap_page(&VORTEX_PCI(vp)->dev, | ||
| 2195 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), | ||
| 2196 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), | ||
| 2197 | DMA_TO_DEVICE); | ||
| 2198 | |||
| 2199 | pci_unmap_single(VORTEX_PCI(vp), | ||
| 2200 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | ||
| 2201 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), | ||
| 2202 | PCI_DMA_TODEVICE); | ||
| 2203 | |||
| 2204 | goto out_dma_err; | ||
| 2205 | } | ||
| 2206 | |||
| 2179 | vp->tx_ring[entry].frag[i+1].addr = | 2207 | vp->tx_ring[entry].frag[i+1].addr = |
| 2180 | cpu_to_le32(skb_frag_dma_map( | 2208 | cpu_to_le32(dma_addr); |
| 2181 | &VORTEX_PCI(vp)->dev, | ||
| 2182 | frag, | ||
| 2183 | frag->page_offset, frag->size, DMA_TO_DEVICE)); | ||
| 2184 | 2209 | ||
| 2185 | if (i == skb_shinfo(skb)->nr_frags-1) | 2210 | if (i == skb_shinfo(skb)->nr_frags-1) |
| 2186 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); | 2211 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); |
| @@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2189 | } | 2214 | } |
| 2190 | } | 2215 | } |
| 2191 | #else | 2216 | #else |
| 2192 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | 2217 | dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); |
| 2218 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
| 2219 | goto out_dma_err; | ||
| 2220 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | ||
| 2193 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | 2221 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); |
| 2194 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | 2222 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); |
| 2195 | #endif | 2223 | #endif |
| @@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2217 | skb_tx_timestamp(skb); | 2245 | skb_tx_timestamp(skb); |
| 2218 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | 2246 | iowrite16(DownUnstall, ioaddr + EL3_CMD); |
| 2219 | spin_unlock_irqrestore(&vp->lock, flags); | 2247 | spin_unlock_irqrestore(&vp->lock, flags); |
| 2248 | out: | ||
| 2220 | return NETDEV_TX_OK; | 2249 | return NETDEV_TX_OK; |
| 2250 | out_dma_err: | ||
| 2251 | dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); | ||
| 2252 | goto out; | ||
| 2221 | } | 2253 | } |
| 2222 | 2254 | ||
| 2223 | /* The interrupt handler does all of the Rx thread work and cleans up | 2255 | /* The interrupt handler does all of the Rx thread work and cleans up |
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index fe5cfeace6e3..5919394d9f58 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
| @@ -30,6 +30,17 @@ | |||
| 30 | #define DRV_VERSION "1.0" | 30 | #define DRV_VERSION "1.0" |
| 31 | 31 | ||
| 32 | /** | 32 | /** |
| 33 | * arc_emac_tx_avail - Return the number of available slots in the tx ring. | ||
| 34 | * @priv: Pointer to ARC EMAC private data structure. | ||
| 35 | * | ||
| 36 | * returns: the number of slots available for transmission in tx the ring. | ||
| 37 | */ | ||
| 38 | static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) | ||
| 39 | { | ||
| 40 | return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; | ||
| 41 | } | ||
| 42 | |||
| 43 | /** | ||
| 33 | * arc_emac_adjust_link - Adjust the PHY link duplex. | 44 | * arc_emac_adjust_link - Adjust the PHY link duplex. |
| 34 | * @ndev: Pointer to the net_device structure. | 45 | * @ndev: Pointer to the net_device structure. |
| 35 | * | 46 | * |
| @@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) | |||
| 180 | txbd->info = 0; | 191 | txbd->info = 0; |
| 181 | 192 | ||
| 182 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; | 193 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
| 183 | |||
| 184 | if (netif_queue_stopped(ndev)) | ||
| 185 | netif_wake_queue(ndev); | ||
| 186 | } | 194 | } |
| 195 | |||
| 196 | /* Ensure that txbd_dirty is visible to tx() before checking | ||
| 197 | * for queue stopped. | ||
| 198 | */ | ||
| 199 | smp_mb(); | ||
| 200 | |||
| 201 | if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) | ||
| 202 | netif_wake_queue(ndev); | ||
| 187 | } | 203 | } |
| 188 | 204 | ||
| 189 | /** | 205 | /** |
| @@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) | |||
| 298 | work_done = arc_emac_rx(ndev, budget); | 314 | work_done = arc_emac_rx(ndev, budget); |
| 299 | if (work_done < budget) { | 315 | if (work_done < budget) { |
| 300 | napi_complete(napi); | 316 | napi_complete(napi); |
| 301 | arc_reg_or(priv, R_ENABLE, RXINT_MASK); | 317 | arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
| 302 | } | 318 | } |
| 303 | 319 | ||
| 304 | return work_done; | 320 | return work_done; |
| @@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) | |||
| 327 | /* Reset all flags except "MDIO complete" */ | 343 | /* Reset all flags except "MDIO complete" */ |
| 328 | arc_reg_set(priv, R_STATUS, status); | 344 | arc_reg_set(priv, R_STATUS, status); |
| 329 | 345 | ||
| 330 | if (status & RXINT_MASK) { | 346 | if (status & (RXINT_MASK | TXINT_MASK)) { |
| 331 | if (likely(napi_schedule_prep(&priv->napi))) { | 347 | if (likely(napi_schedule_prep(&priv->napi))) { |
| 332 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK); | 348 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
| 333 | __napi_schedule(&priv->napi); | 349 | __napi_schedule(&priv->napi); |
| 334 | } | 350 | } |
| 335 | } | 351 | } |
| @@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev) | |||
| 440 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); | 456 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); |
| 441 | 457 | ||
| 442 | /* Enable interrupts */ | 458 | /* Enable interrupts */ |
| 443 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 459 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
| 444 | 460 | ||
| 445 | /* Set CONTROL */ | 461 | /* Set CONTROL */ |
| 446 | arc_reg_set(priv, R_CTRL, | 462 | arc_reg_set(priv, R_CTRL, |
| @@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev) | |||
| 511 | netif_stop_queue(ndev); | 527 | netif_stop_queue(ndev); |
| 512 | 528 | ||
| 513 | /* Disable interrupts */ | 529 | /* Disable interrupts */ |
| 514 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 530 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
| 515 | 531 | ||
| 516 | /* Disable EMAC */ | 532 | /* Disable EMAC */ |
| 517 | arc_reg_clr(priv, R_CTRL, EN_MASK); | 533 | arc_reg_clr(priv, R_CTRL, EN_MASK); |
| @@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
| 574 | 590 | ||
| 575 | len = max_t(unsigned int, ETH_ZLEN, skb->len); | 591 | len = max_t(unsigned int, ETH_ZLEN, skb->len); |
| 576 | 592 | ||
| 577 | /* EMAC still holds this buffer in its possession. | 593 | if (unlikely(!arc_emac_tx_avail(priv))) { |
| 578 | * CPU must not modify this buffer descriptor | ||
| 579 | */ | ||
| 580 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { | ||
| 581 | netif_stop_queue(ndev); | 594 | netif_stop_queue(ndev); |
| 595 | netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); | ||
| 582 | return NETDEV_TX_BUSY; | 596 | return NETDEV_TX_BUSY; |
| 583 | } | 597 | } |
| 584 | 598 | ||
| @@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
| 607 | /* Increment index to point to the next BD */ | 621 | /* Increment index to point to the next BD */ |
| 608 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; | 622 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; |
| 609 | 623 | ||
| 610 | /* Get "info" of the next BD */ | 624 | /* Ensure that tx_clean() sees the new txbd_curr before |
| 611 | info = &priv->txbd[*txbd_curr].info; | 625 | * checking the queue status. This prevents an unneeded wake |
| 626 | * of the queue in tx_clean(). | ||
| 627 | */ | ||
| 628 | smp_mb(); | ||
| 612 | 629 | ||
| 613 | /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ | 630 | if (!arc_emac_tx_avail(priv)) { |
| 614 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) | ||
| 615 | netif_stop_queue(ndev); | 631 | netif_stop_queue(ndev); |
| 632 | /* Refresh tx_dirty */ | ||
| 633 | smp_mb(); | ||
| 634 | if (arc_emac_tx_avail(priv)) | ||
| 635 | netif_start_queue(ndev); | ||
| 636 | } | ||
| 616 | 637 | ||
| 617 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 638 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
| 618 | 639 | ||
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 4a7028d65912..d588136b23b9 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
| @@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, | |||
| 1697 | hwstat->tx_underruns + | 1697 | hwstat->tx_underruns + |
| 1698 | hwstat->tx_excessive_cols + | 1698 | hwstat->tx_excessive_cols + |
| 1699 | hwstat->tx_late_cols); | 1699 | hwstat->tx_late_cols); |
| 1700 | nstat->multicast = hwstat->tx_multicast_pkts; | 1700 | nstat->multicast = hwstat->rx_multicast_pkts; |
| 1701 | nstat->collisions = hwstat->tx_total_cols; | 1701 | nstat->collisions = hwstat->tx_total_cols; |
| 1702 | 1702 | ||
| 1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6f4e18644bd4..d9b9170ed2fc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
| 534 | while ((processed < to_process) && (processed < budget)) { | 534 | while ((processed < to_process) && (processed < budget)) { |
| 535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
| 536 | skb = cb->skb; | 536 | skb = cb->skb; |
| 537 | |||
| 538 | processed++; | ||
| 539 | priv->rx_read_ptr++; | ||
| 540 | |||
| 541 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
| 542 | priv->rx_read_ptr = 0; | ||
| 543 | |||
| 544 | /* We do not have a backing SKB, so we do not a corresponding | ||
| 545 | * DMA mapping for this incoming packet since | ||
| 546 | * bcm_sysport_rx_refill always either has both skb and mapping | ||
| 547 | * or none. | ||
| 548 | */ | ||
| 549 | if (unlikely(!skb)) { | ||
| 550 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
| 551 | ndev->stats.rx_dropped++; | ||
| 552 | ndev->stats.rx_errors++; | ||
| 553 | goto refill; | ||
| 554 | } | ||
| 555 | |||
| 537 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | 556 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), |
| 538 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 557 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
| 539 | 558 | ||
| @@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
| 543 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & | 562 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & |
| 544 | DESC_STATUS_MASK; | 563 | DESC_STATUS_MASK; |
| 545 | 564 | ||
| 546 | processed++; | ||
| 547 | priv->rx_read_ptr++; | ||
| 548 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
| 549 | priv->rx_read_ptr = 0; | ||
| 550 | |||
| 551 | netif_dbg(priv, rx_status, ndev, | 565 | netif_dbg(priv, rx_status, ndev, |
| 552 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", | 566 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", |
| 553 | p_index, priv->rx_c_index, priv->rx_read_ptr, | 567 | p_index, priv->rx_c_index, priv->rx_read_ptr, |
| 554 | len, status); | 568 | len, status); |
| 555 | 569 | ||
| 556 | if (unlikely(!skb)) { | ||
| 557 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
| 558 | ndev->stats.rx_dropped++; | ||
| 559 | ndev->stats.rx_errors++; | ||
| 560 | goto refill; | ||
| 561 | } | ||
| 562 | |||
| 563 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { | 570 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { |
| 564 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); | 571 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); |
| 565 | ndev->stats.rx_dropped++; | 572 | ndev->stats.rx_dropped++; |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3f9d4de8173c..5cc9cae21ed5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
| @@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 875 | int last_tx_cn, last_c_index, num_tx_bds; | 875 | int last_tx_cn, last_c_index, num_tx_bds; |
| 876 | struct enet_cb *tx_cb_ptr; | 876 | struct enet_cb *tx_cb_ptr; |
| 877 | struct netdev_queue *txq; | 877 | struct netdev_queue *txq; |
| 878 | unsigned int bds_compl; | ||
| 878 | unsigned int c_index; | 879 | unsigned int c_index; |
| 879 | 880 | ||
| 880 | /* Compute how many buffers are transmitted since last xmit call */ | 881 | /* Compute how many buffers are transmitted since last xmit call */ |
| @@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 899 | /* Reclaim transmitted buffers */ | 900 | /* Reclaim transmitted buffers */ |
| 900 | while (last_tx_cn-- > 0) { | 901 | while (last_tx_cn-- > 0) { |
| 901 | tx_cb_ptr = ring->cbs + last_c_index; | 902 | tx_cb_ptr = ring->cbs + last_c_index; |
| 903 | bds_compl = 0; | ||
| 902 | if (tx_cb_ptr->skb) { | 904 | if (tx_cb_ptr->skb) { |
| 905 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | ||
| 903 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 906 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
| 904 | dma_unmap_single(&dev->dev, | 907 | dma_unmap_single(&dev->dev, |
| 905 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 908 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
| @@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 916 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); | 919 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
| 917 | } | 920 | } |
| 918 | dev->stats.tx_packets++; | 921 | dev->stats.tx_packets++; |
| 919 | ring->free_bds += 1; | 922 | ring->free_bds += bds_compl; |
| 920 | 923 | ||
| 921 | last_c_index++; | 924 | last_c_index++; |
| 922 | last_c_index &= (num_tx_bds - 1); | 925 | last_c_index &= (num_tx_bds - 1); |
| @@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1274 | 1277 | ||
| 1275 | while ((rxpktprocessed < rxpkttoprocess) && | 1278 | while ((rxpktprocessed < rxpkttoprocess) && |
| 1276 | (rxpktprocessed < budget)) { | 1279 | (rxpktprocessed < budget)) { |
| 1280 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
| 1281 | skb = cb->skb; | ||
| 1282 | |||
| 1283 | rxpktprocessed++; | ||
| 1284 | |||
| 1285 | priv->rx_read_ptr++; | ||
| 1286 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
| 1287 | |||
| 1288 | /* We do not have a backing SKB, so we do not have a | ||
| 1289 | * corresponding DMA mapping for this incoming packet since | ||
| 1290 | * bcmgenet_rx_refill always either has both skb and mapping or | ||
| 1291 | * none. | ||
| 1292 | */ | ||
| 1293 | if (unlikely(!skb)) { | ||
| 1294 | dev->stats.rx_dropped++; | ||
| 1295 | dev->stats.rx_errors++; | ||
| 1296 | goto refill; | ||
| 1297 | } | ||
| 1298 | |||
| 1277 | /* Unmap the packet contents such that we can use the | 1299 | /* Unmap the packet contents such that we can use the |
| 1278 | * RSV from the 64 bytes descriptor when enabled and save | 1300 | * RSV from the 64 bytes descriptor when enabled and save |
| 1279 | * a 32-bits register read | 1301 | * a 32-bits register read |
| 1280 | */ | 1302 | */ |
| 1281 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
| 1282 | skb = cb->skb; | ||
| 1283 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | 1303 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), |
| 1284 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1304 | priv->rx_buf_len, DMA_FROM_DEVICE); |
| 1285 | 1305 | ||
| @@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1307 | __func__, p_index, priv->rx_c_index, | 1327 | __func__, p_index, priv->rx_c_index, |
| 1308 | priv->rx_read_ptr, dma_length_status); | 1328 | priv->rx_read_ptr, dma_length_status); |
| 1309 | 1329 | ||
| 1310 | rxpktprocessed++; | ||
| 1311 | |||
| 1312 | priv->rx_read_ptr++; | ||
| 1313 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
| 1314 | |||
| 1315 | /* out of memory, just drop packets at the hardware level */ | ||
| 1316 | if (unlikely(!skb)) { | ||
| 1317 | dev->stats.rx_dropped++; | ||
| 1318 | dev->stats.rx_errors++; | ||
| 1319 | goto refill; | ||
| 1320 | } | ||
| 1321 | |||
| 1322 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | 1330 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
| 1323 | netif_err(priv, rx_status, dev, | 1331 | netif_err(priv, rx_status, dev, |
| 1324 | "dropping fragmented packet!\n"); | 1332 | "dropping fragmented packet!\n"); |
| @@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev) | |||
| 1736 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | 1744 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); |
| 1737 | } | 1745 | } |
| 1738 | 1746 | ||
| 1747 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
| 1748 | { | ||
| 1749 | int ret = 0; | ||
| 1750 | int timeout = 0; | ||
| 1751 | u32 reg; | ||
| 1752 | |||
| 1753 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
| 1754 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
| 1755 | reg &= ~DMA_EN; | ||
| 1756 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
| 1757 | |||
| 1758 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
| 1759 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
| 1760 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
| 1761 | if (reg & DMA_DISABLED) | ||
| 1762 | break; | ||
| 1763 | |||
| 1764 | udelay(1); | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | if (timeout == DMA_TIMEOUT_VAL) { | ||
| 1768 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
| 1769 | ret = -ETIMEDOUT; | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
| 1773 | usleep_range(10000, 20000); | ||
| 1774 | |||
| 1775 | /* Disable RDMA */ | ||
| 1776 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
| 1777 | reg &= ~DMA_EN; | ||
| 1778 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
| 1779 | |||
| 1780 | timeout = 0; | ||
| 1781 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
| 1782 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
| 1783 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
| 1784 | if (reg & DMA_DISABLED) | ||
| 1785 | break; | ||
| 1786 | |||
| 1787 | udelay(1); | ||
| 1788 | } | ||
| 1789 | |||
| 1790 | if (timeout == DMA_TIMEOUT_VAL) { | ||
| 1791 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
| 1792 | ret = -ETIMEDOUT; | ||
| 1793 | } | ||
| 1794 | |||
| 1795 | return ret; | ||
| 1796 | } | ||
| 1797 | |||
| 1739 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1798 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
| 1740 | { | 1799 | { |
| 1741 | int i; | 1800 | int i; |
| 1742 | 1801 | ||
| 1743 | /* disable DMA */ | 1802 | /* disable DMA */ |
| 1744 | bcmgenet_rdma_writel(priv, 0, DMA_CTRL); | 1803 | bcmgenet_dma_teardown(priv); |
| 1745 | bcmgenet_tdma_writel(priv, 0, DMA_CTRL); | ||
| 1746 | 1804 | ||
| 1747 | for (i = 0; i < priv->num_tx_bds; i++) { | 1805 | for (i = 0; i < priv->num_tx_bds; i++) { |
| 1748 | if (priv->tx_cbs[i].skb != NULL) { | 1806 | if (priv->tx_cbs[i].skb != NULL) { |
| @@ -2101,57 +2159,6 @@ err_clk_disable: | |||
| 2101 | return ret; | 2159 | return ret; |
| 2102 | } | 2160 | } |
| 2103 | 2161 | ||
| 2104 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
| 2105 | { | ||
| 2106 | int ret = 0; | ||
| 2107 | int timeout = 0; | ||
| 2108 | u32 reg; | ||
| 2109 | |||
| 2110 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
| 2111 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
| 2112 | reg &= ~DMA_EN; | ||
| 2113 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
| 2114 | |||
| 2115 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
| 2116 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
| 2117 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
| 2118 | if (reg & DMA_DISABLED) | ||
| 2119 | break; | ||
| 2120 | |||
| 2121 | udelay(1); | ||
| 2122 | } | ||
| 2123 | |||
| 2124 | if (timeout == DMA_TIMEOUT_VAL) { | ||
| 2125 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
| 2126 | ret = -ETIMEDOUT; | ||
| 2127 | } | ||
| 2128 | |||
| 2129 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
| 2130 | usleep_range(10000, 20000); | ||
| 2131 | |||
| 2132 | /* Disable RDMA */ | ||
| 2133 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
| 2134 | reg &= ~DMA_EN; | ||
| 2135 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
| 2136 | |||
| 2137 | timeout = 0; | ||
| 2138 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
| 2139 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
| 2140 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
| 2141 | if (reg & DMA_DISABLED) | ||
| 2142 | break; | ||
| 2143 | |||
| 2144 | udelay(1); | ||
| 2145 | } | ||
| 2146 | |||
| 2147 | if (timeout == DMA_TIMEOUT_VAL) { | ||
| 2148 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
| 2149 | ret = -ETIMEDOUT; | ||
| 2150 | } | ||
| 2151 | |||
| 2152 | return ret; | ||
| 2153 | } | ||
| 2154 | |||
| 2155 | static void bcmgenet_netif_stop(struct net_device *dev) | 2162 | static void bcmgenet_netif_stop(struct net_device *dev) |
| 2156 | { | 2163 | { |
| 2157 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2164 | struct bcmgenet_priv *priv = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index cb77ae93d89a..e7d3a620d96a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 7914 | 7914 | ||
| 7915 | entry = tnapi->tx_prod; | 7915 | entry = tnapi->tx_prod; |
| 7916 | base_flags = 0; | 7916 | base_flags = 0; |
| 7917 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
| 7918 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
| 7919 | 7917 | ||
| 7920 | mss = skb_shinfo(skb)->gso_size; | 7918 | mss = skb_shinfo(skb)->gso_size; |
| 7921 | if (mss) { | 7919 | if (mss) { |
| @@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 7929 | 7927 | ||
| 7930 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; | 7928 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; |
| 7931 | 7929 | ||
| 7930 | /* HW/FW can not correctly segment packets that have been | ||
| 7931 | * vlan encapsulated. | ||
| 7932 | */ | ||
| 7933 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
| 7934 | skb->protocol == htons(ETH_P_8021AD)) | ||
| 7935 | return tg3_tso_bug(tp, tnapi, txq, skb); | ||
| 7936 | |||
| 7932 | if (!skb_is_gso_v6(skb)) { | 7937 | if (!skb_is_gso_v6(skb)) { |
| 7933 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 7938 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
| 7934 | tg3_flag(tp, TSO_BUG)) | 7939 | tg3_flag(tp, TSO_BUG)) |
| @@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 7979 | base_flags |= tsflags << 12; | 7984 | base_flags |= tsflags << 12; |
| 7980 | } | 7985 | } |
| 7981 | } | 7986 | } |
| 7987 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
| 7988 | /* HW/FW can not correctly checksum packets that have been | ||
| 7989 | * vlan encapsulated. | ||
| 7990 | */ | ||
| 7991 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
| 7992 | skb->protocol == htons(ETH_P_8021AD)) { | ||
| 7993 | if (skb_checksum_help(skb)) | ||
| 7994 | goto drop; | ||
| 7995 | } else { | ||
| 7996 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
| 7997 | } | ||
| 7982 | } | 7998 | } |
| 7983 | 7999 | ||
| 7984 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && | 8000 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8c34811a1128..e5be511a3c38 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6478 | struct port_info *pi; | 6478 | struct port_info *pi; |
| 6479 | bool highdma = false; | 6479 | bool highdma = false; |
| 6480 | struct adapter *adapter = NULL; | 6480 | struct adapter *adapter = NULL; |
| 6481 | void __iomem *regs; | ||
| 6481 | 6482 | ||
| 6482 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | 6483 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); |
| 6483 | 6484 | ||
| @@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6494 | goto out_release_regions; | 6495 | goto out_release_regions; |
| 6495 | } | 6496 | } |
| 6496 | 6497 | ||
| 6498 | regs = pci_ioremap_bar(pdev, 0); | ||
| 6499 | if (!regs) { | ||
| 6500 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
| 6501 | err = -ENOMEM; | ||
| 6502 | goto out_disable_device; | ||
| 6503 | } | ||
| 6504 | |||
| 6505 | /* We control everything through one PF */ | ||
| 6506 | func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); | ||
| 6507 | if (func != ent->driver_data) { | ||
| 6508 | iounmap(regs); | ||
| 6509 | pci_disable_device(pdev); | ||
| 6510 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
| 6511 | goto sriov; | ||
| 6512 | } | ||
| 6513 | |||
| 6497 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 6514 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
| 6498 | highdma = true; | 6515 | highdma = true; |
| 6499 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 6516 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
| 6500 | if (err) { | 6517 | if (err) { |
| 6501 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | 6518 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " |
| 6502 | "coherent allocations\n"); | 6519 | "coherent allocations\n"); |
| 6503 | goto out_disable_device; | 6520 | goto out_unmap_bar0; |
| 6504 | } | 6521 | } |
| 6505 | } else { | 6522 | } else { |
| 6506 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 6523 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 6507 | if (err) { | 6524 | if (err) { |
| 6508 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | 6525 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
| 6509 | goto out_disable_device; | 6526 | goto out_unmap_bar0; |
| 6510 | } | 6527 | } |
| 6511 | } | 6528 | } |
| 6512 | 6529 | ||
| @@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6518 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | 6535 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
| 6519 | if (!adapter) { | 6536 | if (!adapter) { |
| 6520 | err = -ENOMEM; | 6537 | err = -ENOMEM; |
| 6521 | goto out_disable_device; | 6538 | goto out_unmap_bar0; |
| 6522 | } | 6539 | } |
| 6523 | 6540 | ||
| 6524 | adapter->workq = create_singlethread_workqueue("cxgb4"); | 6541 | adapter->workq = create_singlethread_workqueue("cxgb4"); |
| @@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6530 | /* PCI device has been enabled */ | 6547 | /* PCI device has been enabled */ |
| 6531 | adapter->flags |= DEV_ENABLED; | 6548 | adapter->flags |= DEV_ENABLED; |
| 6532 | 6549 | ||
| 6533 | adapter->regs = pci_ioremap_bar(pdev, 0); | 6550 | adapter->regs = regs; |
| 6534 | if (!adapter->regs) { | ||
| 6535 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
| 6536 | err = -ENOMEM; | ||
| 6537 | goto out_free_adapter; | ||
| 6538 | } | ||
| 6539 | |||
| 6540 | /* We control everything through one PF */ | ||
| 6541 | func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); | ||
| 6542 | if (func != ent->driver_data) { | ||
| 6543 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
| 6544 | goto sriov; | ||
| 6545 | } | ||
| 6546 | |||
| 6547 | adapter->pdev = pdev; | 6551 | adapter->pdev = pdev; |
| 6548 | adapter->pdev_dev = &pdev->dev; | 6552 | adapter->pdev_dev = &pdev->dev; |
| 6549 | adapter->mbox = func; | 6553 | adapter->mbox = func; |
| @@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6560 | 6564 | ||
| 6561 | err = t4_prep_adapter(adapter); | 6565 | err = t4_prep_adapter(adapter); |
| 6562 | if (err) | 6566 | if (err) |
| 6563 | goto out_unmap_bar0; | 6567 | goto out_free_adapter; |
| 6568 | |||
| 6564 | 6569 | ||
| 6565 | if (!is_t4(adapter->params.chip)) { | 6570 | if (!is_t4(adapter->params.chip)) { |
| 6566 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; | 6571 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; |
| @@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6577 | dev_err(&pdev->dev, | 6582 | dev_err(&pdev->dev, |
| 6578 | "Incorrect number of egress queues per page\n"); | 6583 | "Incorrect number of egress queues per page\n"); |
| 6579 | err = -EINVAL; | 6584 | err = -EINVAL; |
| 6580 | goto out_unmap_bar0; | 6585 | goto out_free_adapter; |
| 6581 | } | 6586 | } |
| 6582 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), | 6587 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), |
| 6583 | pci_resource_len(pdev, 2)); | 6588 | pci_resource_len(pdev, 2)); |
| 6584 | if (!adapter->bar2) { | 6589 | if (!adapter->bar2) { |
| 6585 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); | 6590 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); |
| 6586 | err = -ENOMEM; | 6591 | err = -ENOMEM; |
| 6587 | goto out_unmap_bar0; | 6592 | goto out_free_adapter; |
| 6588 | } | 6593 | } |
| 6589 | } | 6594 | } |
| 6590 | 6595 | ||
| @@ -6722,13 +6727,13 @@ sriov: | |||
| 6722 | out_unmap_bar: | 6727 | out_unmap_bar: |
| 6723 | if (!is_t4(adapter->params.chip)) | 6728 | if (!is_t4(adapter->params.chip)) |
| 6724 | iounmap(adapter->bar2); | 6729 | iounmap(adapter->bar2); |
| 6725 | out_unmap_bar0: | ||
| 6726 | iounmap(adapter->regs); | ||
| 6727 | out_free_adapter: | 6730 | out_free_adapter: |
| 6728 | if (adapter->workq) | 6731 | if (adapter->workq) |
| 6729 | destroy_workqueue(adapter->workq); | 6732 | destroy_workqueue(adapter->workq); |
| 6730 | 6733 | ||
| 6731 | kfree(adapter); | 6734 | kfree(adapter); |
| 6735 | out_unmap_bar0: | ||
| 6736 | iounmap(regs); | ||
| 6732 | out_disable_device: | 6737 | out_disable_device: |
| 6733 | pci_disable_pcie_error_reporting(pdev); | 6738 | pci_disable_pcie_error_reporting(pdev); |
| 6734 | pci_disable_device(pdev); | 6739 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9b33057a9477..70089c29d307 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
| @@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) | |||
| 1399 | const void *mac_addr; | 1399 | const void *mac_addr; |
| 1400 | 1400 | ||
| 1401 | if (!IS_ENABLED(CONFIG_OF) || !np) | 1401 | if (!IS_ENABLED(CONFIG_OF) || !np) |
| 1402 | return NULL; | 1402 | return ERR_PTR(-ENXIO); |
| 1403 | 1403 | ||
| 1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
| 1405 | if (!pdata) | 1405 | if (!pdata) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 65a4a0f88ea0..02a2e90d581a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( | |||
| 2389 | } | 2389 | } |
| 2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); | 2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); |
| 2391 | 2391 | ||
| 2392 | static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) | ||
| 2393 | { | ||
| 2394 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); | ||
| 2395 | int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) | ||
| 2396 | + 1; | ||
| 2397 | int max_port = min_port + | ||
| 2398 | bitmap_weight(actv_ports.ports, dev->caps.num_ports); | ||
| 2399 | |||
| 2400 | if (port < min_port) | ||
| 2401 | port = min_port; | ||
| 2402 | else if (port >= max_port) | ||
| 2403 | port = max_port - 1; | ||
| 2404 | |||
| 2405 | return port; | ||
| 2406 | } | ||
| 2407 | |||
| 2392 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | 2408 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) |
| 2393 | { | 2409 | { |
| 2394 | struct mlx4_priv *priv = mlx4_priv(dev); | 2410 | struct mlx4_priv *priv = mlx4_priv(dev); |
| @@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | |||
| 2402 | if (slave < 0) | 2418 | if (slave < 0) |
| 2403 | return -EINVAL; | 2419 | return -EINVAL; |
| 2404 | 2420 | ||
| 2421 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
| 2405 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2422 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
| 2406 | s_info->mac = mac; | 2423 | s_info->mac = mac; |
| 2407 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", | 2424 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", |
| @@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) | |||
| 2428 | if (slave < 0) | 2445 | if (slave < 0) |
| 2429 | return -EINVAL; | 2446 | return -EINVAL; |
| 2430 | 2447 | ||
| 2448 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
| 2431 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2449 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; |
| 2432 | 2450 | ||
| 2433 | if ((0 == vlan) && (0 == qos)) | 2451 | if ((0 == vlan) && (0 == qos)) |
| @@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, | |||
| 2455 | struct mlx4_priv *priv; | 2473 | struct mlx4_priv *priv; |
| 2456 | 2474 | ||
| 2457 | priv = mlx4_priv(dev); | 2475 | priv = mlx4_priv(dev); |
| 2476 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
| 2458 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 2477 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
| 2459 | 2478 | ||
| 2460 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 2479 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
| @@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) | |||
| 2482 | if (slave < 0) | 2501 | if (slave < 0) |
| 2483 | return -EINVAL; | 2502 | return -EINVAL; |
| 2484 | 2503 | ||
| 2504 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
| 2485 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2505 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
| 2486 | s_info->spoofchk = setting; | 2506 | s_info->spoofchk = setting; |
| 2487 | 2507 | ||
| @@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat | |||
| 2535 | if (slave < 0) | 2555 | if (slave < 0) |
| 2536 | return -EINVAL; | 2556 | return -EINVAL; |
| 2537 | 2557 | ||
| 2558 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
| 2538 | switch (link_state) { | 2559 | switch (link_state) { |
| 2539 | case IFLA_VF_LINK_STATE_AUTO: | 2560 | case IFLA_VF_LINK_STATE_AUTO: |
| 2540 | /* get current link state */ | 2561 | /* get current link state */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784fc..35ff2925110a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, | |||
| 487 | struct mlx4_en_dev *mdev = priv->mdev; | 487 | struct mlx4_en_dev *mdev = priv->mdev; |
| 488 | int err; | 488 | int err; |
| 489 | 489 | ||
| 490 | if (pause->autoneg) | ||
| 491 | return -EINVAL; | ||
| 492 | |||
| 490 | priv->prof->tx_pause = pause->tx_pause != 0; | 493 | priv->prof->tx_pause = pause->tx_pause != 0; |
| 491 | priv->prof->rx_pause = pause->rx_pause != 0; | 494 | priv->prof->rx_pause = pause->rx_pause != 0; |
| 492 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 495 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7d717eccb7b0..193a6adb5d04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
| @@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
| 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | /* Must protect against concurrent access */ | ||
| 301 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | 302 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, |
| 302 | struct mlx4_mpt_entry ***mpt_entry) | 303 | struct mlx4_mpt_entry ***mpt_entry) |
| 303 | { | 304 | { |
| @@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
| 305 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | 306 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); |
| 306 | struct mlx4_cmd_mailbox *mailbox = NULL; | 307 | struct mlx4_cmd_mailbox *mailbox = NULL; |
| 307 | 308 | ||
| 308 | /* Make sure that at this point we have single-threaded access only */ | ||
| 309 | |||
| 310 | if (mmr->enabled != MLX4_MPT_EN_HW) | 309 | if (mmr->enabled != MLX4_MPT_EN_HW) |
| 311 | return -EINVAL; | 310 | return -EINVAL; |
| 312 | 311 | ||
| 313 | err = mlx4_HW2SW_MPT(dev, NULL, key); | 312 | err = mlx4_HW2SW_MPT(dev, NULL, key); |
| 314 | |||
| 315 | if (err) { | 313 | if (err) { |
| 316 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | 314 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); |
| 317 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | 315 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); |
| @@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
| 333 | 0, MLX4_CMD_QUERY_MPT, | 331 | 0, MLX4_CMD_QUERY_MPT, |
| 334 | MLX4_CMD_TIME_CLASS_B, | 332 | MLX4_CMD_TIME_CLASS_B, |
| 335 | MLX4_CMD_WRAPPED); | 333 | MLX4_CMD_WRAPPED); |
| 336 | |||
| 337 | if (err) | 334 | if (err) |
| 338 | goto free_mailbox; | 335 | goto free_mailbox; |
| 339 | 336 | ||
| @@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
| 378 | err = mlx4_SW2HW_MPT(dev, mailbox, key); | 375 | err = mlx4_SW2HW_MPT(dev, mailbox, key); |
| 379 | } | 376 | } |
| 380 | 377 | ||
| 381 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | 378 | if (!err) { |
| 382 | if (!err) | 379 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; |
| 383 | mmr->enabled = MLX4_MPT_EN_HW; | 380 | mmr->enabled = MLX4_MPT_EN_HW; |
| 381 | } | ||
| 384 | return err; | 382 | return err; |
| 385 | } | 383 | } |
| 386 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | 384 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); |
| @@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | |||
| 400 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | 398 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, |
| 401 | u32 pdn) | 399 | u32 pdn) |
| 402 | { | 400 | { |
| 403 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); | 401 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; |
| 404 | /* The wrapper function will put the slave's id here */ | 402 | /* The wrapper function will put the slave's id here */ |
| 405 | if (mlx4_is_mfunc(dev)) | 403 | if (mlx4_is_mfunc(dev)) |
| 406 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | 404 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; |
| 407 | mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | | 405 | |
| 406 | mpt_entry->pd_flags = cpu_to_be32(pd_flags | | ||
| 408 | (pdn & MLX4_MPT_PD_MASK) | 407 | (pdn & MLX4_MPT_PD_MASK) |
| 409 | | MLX4_MPT_PD_FLAG_EN_INV); | 408 | | MLX4_MPT_PD_FLAG_EN_INV); |
| 410 | return 0; | 409 | return 0; |
| @@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
| 600 | { | 599 | { |
| 601 | int err; | 600 | int err; |
| 602 | 601 | ||
| 603 | mpt_entry->start = cpu_to_be64(mr->iova); | 602 | mpt_entry->start = cpu_to_be64(iova); |
| 604 | mpt_entry->length = cpu_to_be64(mr->size); | 603 | mpt_entry->length = cpu_to_be64(size); |
| 605 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | 604 | mpt_entry->entity_size = cpu_to_be32(page_shift); |
| 606 | 605 | ||
| 607 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | 606 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
| 608 | if (err) | 607 | if (err) |
| 609 | return err; | 608 | return err; |
| 610 | 609 | ||
| 610 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | | ||
| 611 | MLX4_MPT_PD_FLAG_EN_INV); | ||
| 612 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | | ||
| 613 | MLX4_MPT_FLAG_SW_OWNS); | ||
| 611 | if (mr->mtt.order < 0) { | 614 | if (mr->mtt.order < 0) { |
| 612 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | 615 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); |
| 613 | mpt_entry->mtt_addr = 0; | 616 | mpt_entry->mtt_addr = 0; |
| @@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
| 617 | if (mr->mtt.page_shift == 0) | 620 | if (mr->mtt.page_shift == 0) |
| 618 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | 621 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); |
| 619 | } | 622 | } |
| 623 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | ||
| 624 | /* fast register MR in free state */ | ||
| 625 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | ||
| 626 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | ||
| 627 | MLX4_MPT_PD_FLAG_RAE); | ||
| 628 | } else { | ||
| 629 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | ||
| 630 | } | ||
| 620 | mr->enabled = MLX4_MPT_EN_SW; | 631 | mr->enabled = MLX4_MPT_EN_SW; |
| 621 | 632 | ||
| 622 | return 0; | 633 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 9ba0c1ca10d5..94eeb2c7d7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
| @@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev, | |||
| 103 | int i; | 103 | int i; |
| 104 | 104 | ||
| 105 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | 105 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { |
| 106 | if ((mac & MLX4_MAC_MASK) == | 106 | if (table->refs[i] && |
| 107 | (MLX4_MAC_MASK & mac) == | ||
| 107 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) | 108 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) |
| 108 | return i; | 109 | return i; |
| 109 | } | 110 | } |
| @@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) | |||
| 165 | 166 | ||
| 166 | mutex_lock(&table->mutex); | 167 | mutex_lock(&table->mutex); |
| 167 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | 168 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { |
| 168 | if (free < 0 && !table->entries[i]) { | 169 | if (!table->refs[i]) { |
| 169 | free = i; | 170 | if (free < 0) |
| 171 | free = i; | ||
| 170 | continue; | 172 | continue; |
| 171 | } | 173 | } |
| 172 | 174 | ||
| 173 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | 175 | if ((MLX4_MAC_MASK & mac) == |
| 176 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | ||
| 174 | /* MAC already registered, increment ref count */ | 177 | /* MAC already registered, increment ref count */ |
| 175 | err = i; | 178 | err = i; |
| 176 | ++table->refs[i]; | 179 | ++table->refs[i]; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3b..2301365c79c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
| @@ -390,13 +390,14 @@ err_icm: | |||
| 390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
| 391 | 391 | ||
| 392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | 392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC |
| 393 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | 393 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
| 394 | enum mlx4_update_qp_attr attr, | 394 | enum mlx4_update_qp_attr attr, |
| 395 | struct mlx4_update_qp_params *params) | 395 | struct mlx4_update_qp_params *params) |
| 396 | { | 396 | { |
| 397 | struct mlx4_cmd_mailbox *mailbox; | 397 | struct mlx4_cmd_mailbox *mailbox; |
| 398 | struct mlx4_update_qp_context *cmd; | 398 | struct mlx4_update_qp_context *cmd; |
| 399 | u64 pri_addr_path_mask = 0; | 399 | u64 pri_addr_path_mask = 0; |
| 400 | u64 qp_mask = 0; | ||
| 400 | int err = 0; | 401 | int err = 0; |
| 401 | 402 | ||
| 402 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 403 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| @@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
| 413 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; | 414 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; |
| 414 | } | 415 | } |
| 415 | 416 | ||
| 417 | if (attr & MLX4_UPDATE_QP_VSD) { | ||
| 418 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; | ||
| 419 | if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) | ||
| 420 | cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); | ||
| 421 | } | ||
| 422 | |||
| 416 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); | 423 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); |
| 424 | cmd->qp_mask = cpu_to_be64(qp_mask); | ||
| 417 | 425 | ||
| 418 | err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, | 426 | err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, |
| 419 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, | 427 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, |
| 420 | MLX4_CMD_NATIVE); | 428 | MLX4_CMD_NATIVE); |
| 421 | 429 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1089367fed22..5d2498dcf536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
| 702 | struct mlx4_qp_context *qpc = inbox->buf + 8; | 702 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
| 703 | struct mlx4_vport_oper_state *vp_oper; | 703 | struct mlx4_vport_oper_state *vp_oper; |
| 704 | struct mlx4_priv *priv; | 704 | struct mlx4_priv *priv; |
| 705 | u32 qp_type; | ||
| 705 | int port; | 706 | int port; |
| 706 | 707 | ||
| 707 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; | 708 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
| 708 | priv = mlx4_priv(dev); | 709 | priv = mlx4_priv(dev); |
| 709 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 710 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
| 711 | qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; | ||
| 710 | 712 | ||
| 711 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 713 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
| 712 | /* the reserved QPs (special, proxy, tunnel) | 714 | /* the reserved QPs (special, proxy, tunnel) |
| @@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
| 715 | if (mlx4_is_qp_reserved(dev, qpn)) | 717 | if (mlx4_is_qp_reserved(dev, qpn)) |
| 716 | return 0; | 718 | return 0; |
| 717 | 719 | ||
| 718 | /* force strip vlan by clear vsd */ | 720 | /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ |
| 719 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | 721 | if (qp_type == MLX4_QP_ST_UD || |
| 722 | (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { | ||
| 723 | if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { | ||
| 724 | *(__be32 *)inbox->buf = | ||
| 725 | cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | | ||
| 726 | MLX4_QP_OPTPAR_VLAN_STRIPPING); | ||
| 727 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | ||
| 728 | } else { | ||
| 729 | struct mlx4_update_qp_params params = {.flags = 0}; | ||
| 730 | |||
| 731 | mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); | ||
| 732 | } | ||
| 733 | } | ||
| 720 | 734 | ||
| 721 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && | 735 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && |
| 722 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { | 736 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { |
| @@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3998 | } | 4012 | } |
| 3999 | 4013 | ||
| 4000 | port = (rqp->sched_queue >> 6 & 1) + 1; | 4014 | port = (rqp->sched_queue >> 6 & 1) + 1; |
| 4001 | smac_index = cmd->qp_context.pri_path.grh_mylmc; | 4015 | |
| 4002 | err = mac_find_smac_ix_in_slave(dev, slave, port, | 4016 | if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { |
| 4003 | smac_index, &mac); | 4017 | smac_index = cmd->qp_context.pri_path.grh_mylmc; |
| 4004 | if (err) { | 4018 | err = mac_find_smac_ix_in_slave(dev, slave, port, |
| 4005 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | 4019 | smac_index, &mac); |
| 4006 | qpn, smac_index); | 4020 | |
| 4007 | goto err_mac; | 4021 | if (err) { |
| 4022 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | ||
| 4023 | qpn, smac_index); | ||
| 4024 | goto err_mac; | ||
| 4025 | } | ||
| 4008 | } | 4026 | } |
| 4009 | 4027 | ||
| 4010 | err = mlx4_cmd(dev, inbox->dma, | 4028 | err = mlx4_cmd(dev, inbox->dma, |
| @@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | |||
| 4818 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; | 4836 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
| 4819 | 4837 | ||
| 4820 | upd_context = mailbox->buf; | 4838 | upd_context = mailbox->buf; |
| 4821 | upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); | 4839 | upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); |
| 4822 | 4840 | ||
| 4823 | spin_lock_irq(mlx4_tlock(dev)); | 4841 | spin_lock_irq(mlx4_tlock(dev)); |
| 4824 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { | 4842 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 979c6980639f..a42293092ea4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c | |||
| @@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |||
| 290 | /* Read the hardware TX timestamp if one was recorded */ | 290 | /* Read the hardware TX timestamp if one was recorded */ |
| 291 | if (unlikely(re.s.tstamp)) { | 291 | if (unlikely(re.s.tstamp)) { |
| 292 | struct skb_shared_hwtstamps ts; | 292 | struct skb_shared_hwtstamps ts; |
| 293 | u64 ns; | ||
| 294 | |||
| 293 | memset(&ts, 0, sizeof(ts)); | 295 | memset(&ts, 0, sizeof(ts)); |
| 294 | /* Read the timestamp */ | 296 | /* Read the timestamp */ |
| 295 | u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); | 297 | ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); |
| 296 | /* Remove the timestamp from the FIFO */ | 298 | /* Remove the timestamp from the FIFO */ |
| 297 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); | 299 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); |
| 298 | /* Tell the kernel about the timestamp */ | 300 | /* Tell the kernel about the timestamp */ |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 44c8be1c6805..5f7a35212796 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | |||
| @@ -7,6 +7,7 @@ config PCH_GBE | |||
| 7 | depends on PCI && (X86_32 || COMPILE_TEST) | 7 | depends on PCI && (X86_32 || COMPILE_TEST) |
| 8 | select MII | 8 | select MII |
| 9 | select PTP_1588_CLOCK_PCH | 9 | select PTP_1588_CLOCK_PCH |
| 10 | select NET_PTP_CLASSIFY | ||
| 10 | ---help--- | 11 | ---help--- |
| 11 | This is a gigabit ethernet driver for EG20T PCH. | 12 | This is a gigabit ethernet driver for EG20T PCH. |
| 12 | EG20T PCH is the platform controller hub that is used in Intel's | 13 | EG20T PCH is the platform controller hub that is used in Intel's |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91652e7235e4..0921302553c6 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev, | |||
| 1783 | netdev_features_t features) | 1783 | netdev_features_t features) |
| 1784 | { | 1784 | { |
| 1785 | struct rtl8169_private *tp = netdev_priv(dev); | 1785 | struct rtl8169_private *tp = netdev_priv(dev); |
| 1786 | netdev_features_t changed = features ^ dev->features; | ||
| 1787 | void __iomem *ioaddr = tp->mmio_addr; | 1786 | void __iomem *ioaddr = tp->mmio_addr; |
| 1787 | u32 rx_config; | ||
| 1788 | 1788 | ||
| 1789 | if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | | 1789 | rx_config = RTL_R32(RxConfig); |
| 1790 | NETIF_F_HW_VLAN_CTAG_RX))) | 1790 | if (features & NETIF_F_RXALL) |
| 1791 | return; | 1791 | rx_config |= (AcceptErr | AcceptRunt); |
| 1792 | else | ||
| 1793 | rx_config &= ~(AcceptErr | AcceptRunt); | ||
| 1792 | 1794 | ||
| 1793 | if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { | 1795 | RTL_W32(RxConfig, rx_config); |
| 1794 | if (features & NETIF_F_RXCSUM) | ||
| 1795 | tp->cp_cmd |= RxChkSum; | ||
| 1796 | else | ||
| 1797 | tp->cp_cmd &= ~RxChkSum; | ||
| 1798 | 1796 | ||
| 1799 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | 1797 | if (features & NETIF_F_RXCSUM) |
| 1800 | tp->cp_cmd |= RxVlan; | 1798 | tp->cp_cmd |= RxChkSum; |
| 1801 | else | 1799 | else |
| 1802 | tp->cp_cmd &= ~RxVlan; | 1800 | tp->cp_cmd &= ~RxChkSum; |
| 1803 | 1801 | ||
| 1804 | RTL_W16(CPlusCmd, tp->cp_cmd); | 1802 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
| 1805 | RTL_R16(CPlusCmd); | 1803 | tp->cp_cmd |= RxVlan; |
| 1806 | } | 1804 | else |
| 1807 | if (changed & NETIF_F_RXALL) { | 1805 | tp->cp_cmd &= ~RxVlan; |
| 1808 | int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); | 1806 | |
| 1809 | if (features & NETIF_F_RXALL) | 1807 | tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); |
| 1810 | tmp |= (AcceptErr | AcceptRunt); | 1808 | |
| 1811 | RTL_W32(RxConfig, tmp); | 1809 | RTL_W16(CPlusCmd, tp->cp_cmd); |
| 1812 | } | 1810 | RTL_R16(CPlusCmd); |
| 1813 | } | 1811 | } |
| 1814 | 1812 | ||
| 1815 | static int rtl8169_set_features(struct net_device *dev, | 1813 | static int rtl8169_set_features(struct net_device *dev, |
| @@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev, | |||
| 1817 | { | 1815 | { |
| 1818 | struct rtl8169_private *tp = netdev_priv(dev); | 1816 | struct rtl8169_private *tp = netdev_priv(dev); |
| 1819 | 1817 | ||
| 1818 | features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; | ||
| 1819 | |||
| 1820 | rtl_lock_work(tp); | 1820 | rtl_lock_work(tp); |
| 1821 | __rtl8169_set_features(dev, features); | 1821 | if (features ^ dev->features) |
| 1822 | __rtl8169_set_features(dev, features); | ||
| 1822 | rtl_unlock_work(tp); | 1823 | rtl_unlock_work(tp); |
| 1823 | 1824 | ||
| 1824 | return 0; | 1825 | return 0; |
| @@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) | |||
| 7118 | } | 7119 | } |
| 7119 | } | 7120 | } |
| 7120 | 7121 | ||
| 7121 | static int | 7122 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 7122 | rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
| 7123 | { | 7123 | { |
| 7124 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; | 7124 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
| 7125 | const unsigned int region = cfg->region; | 7125 | const unsigned int region = cfg->region; |
| @@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7194 | goto err_out_mwi_2; | 7194 | goto err_out_mwi_2; |
| 7195 | } | 7195 | } |
| 7196 | 7196 | ||
| 7197 | tp->cp_cmd = RxChkSum; | 7197 | tp->cp_cmd = 0; |
| 7198 | 7198 | ||
| 7199 | if ((sizeof(dma_addr_t) > 4) && | 7199 | if ((sizeof(dma_addr_t) > 4) && |
| 7200 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 7200 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
| @@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7235 | 7235 | ||
| 7236 | pci_set_master(pdev); | 7236 | pci_set_master(pdev); |
| 7237 | 7237 | ||
| 7238 | /* | ||
| 7239 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
| 7240 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
| 7241 | */ | ||
| 7242 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | ||
| 7243 | tp->cp_cmd |= RxVlan; | ||
| 7244 | |||
| 7245 | rtl_init_mdio_ops(tp); | 7238 | rtl_init_mdio_ops(tp); |
| 7246 | rtl_init_pll_power_ops(tp); | 7239 | rtl_init_pll_power_ops(tp); |
| 7247 | rtl_init_jumbo_ops(tp); | 7240 | rtl_init_jumbo_ops(tp); |
| @@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7302 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | 7295 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | |
| 7303 | NETIF_F_HIGHDMA; | 7296 | NETIF_F_HIGHDMA; |
| 7304 | 7297 | ||
| 7298 | tp->cp_cmd |= RxChkSum | RxVlan; | ||
| 7299 | |||
| 7300 | /* | ||
| 7301 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
| 7302 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
| 7303 | */ | ||
| 7305 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | 7304 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) |
| 7306 | /* 8110SCd requires hardware Rx VLAN - disallow toggling */ | 7305 | /* Disallow toggling */ |
| 7307 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; | 7306 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; |
| 7308 | 7307 | ||
| 7309 | if (tp->txd_version == RTL_TD_0) | 7308 | if (tp->txd_version == RTL_TD_0) |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 0537381cd2f6..6859437b59fb 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
| @@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) | |||
| 2933 | u32 crc; | 2933 | u32 crc; |
| 2934 | int bit; | 2934 | int bit; |
| 2935 | 2935 | ||
| 2936 | if (!efx_dev_registered(efx)) | ||
| 2937 | return; | ||
| 2938 | |||
| 2936 | netif_addr_lock_bh(net_dev); | 2939 | netif_addr_lock_bh(net_dev); |
| 2937 | 2940 | ||
| 2938 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); | 2941 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 23c89ab5a6ad..f67539650c38 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
| @@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
| 350 | if (IS_ERR(desc)) | 350 | if (IS_ERR(desc)) |
| 351 | return PTR_ERR(desc); | 351 | return PTR_ERR(desc); |
| 352 | 352 | ||
| 353 | if (desc->hdr.state != VIO_DESC_READY) | ||
| 354 | return 1; | ||
| 355 | |||
| 356 | rmb(); | ||
| 357 | |||
| 353 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", | 358 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", |
| 354 | desc->hdr.state, desc->hdr.ack, | 359 | desc->hdr.state, desc->hdr.ack, |
| 355 | desc->size, desc->ncookies, | 360 | desc->size, desc->ncookies, |
| 356 | desc->cookies[0].cookie_addr, | 361 | desc->cookies[0].cookie_addr, |
| 357 | desc->cookies[0].cookie_size); | 362 | desc->cookies[0].cookie_size); |
| 358 | 363 | ||
| 359 | if (desc->hdr.state != VIO_DESC_READY) | ||
| 360 | return 1; | ||
| 361 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); | 364 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); |
| 362 | if (err == -ECONNRESET) | 365 | if (err == -ECONNRESET) |
| 363 | return err; | 366 | return err; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 999fb72688d2..e2a00287f8eb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
| 699 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); | 699 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); |
| 700 | 700 | ||
| 701 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { | 701 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { |
| 702 | bool ndev_status = false; | ||
| 703 | struct cpsw_slave *slave = priv->slaves; | ||
| 704 | int n; | ||
| 705 | |||
| 706 | if (priv->data.dual_emac) { | ||
| 707 | /* In dual emac mode check for all interfaces */ | ||
| 708 | for (n = priv->data.slaves; n; n--, slave++) | ||
| 709 | if (netif_running(slave->ndev)) | ||
| 710 | ndev_status = true; | ||
| 711 | } | ||
| 712 | |||
| 713 | if (ndev_status && (status >= 0)) { | ||
| 714 | /* The packet received is for the interface which | ||
| 715 | * is already down and the other interface is up | ||
| 716 | * and running, intead of freeing which results | ||
| 717 | * in reducing of the number of rx descriptor in | ||
| 718 | * DMA engine, requeue skb back to cpdma. | ||
| 719 | */ | ||
| 720 | new_skb = skb; | ||
| 721 | goto requeue; | ||
| 722 | } | ||
| 723 | |||
| 702 | /* the interface is going down, skbs are purged */ | 724 | /* the interface is going down, skbs are purged */ |
| 703 | dev_kfree_skb_any(skb); | 725 | dev_kfree_skb_any(skb); |
| 704 | return; | 726 | return; |
| @@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
| 717 | new_skb = skb; | 739 | new_skb = skb; |
| 718 | } | 740 | } |
| 719 | 741 | ||
| 742 | requeue: | ||
| 720 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, | 743 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, |
| 721 | skb_tailroom(new_skb), 0); | 744 | skb_tailroom(new_skb), 0); |
| 722 | if (WARN_ON(ret < 0)) | 745 | if (WARN_ON(ret < 0)) |
| @@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev) | |||
| 2311 | struct net_device *ndev = platform_get_drvdata(pdev); | 2334 | struct net_device *ndev = platform_get_drvdata(pdev); |
| 2312 | struct cpsw_priv *priv = netdev_priv(ndev); | 2335 | struct cpsw_priv *priv = netdev_priv(ndev); |
| 2313 | 2336 | ||
| 2314 | if (netif_running(ndev)) | 2337 | if (priv->data.dual_emac) { |
| 2315 | cpsw_ndo_stop(ndev); | 2338 | int i; |
| 2316 | 2339 | ||
| 2317 | for_each_slave(priv, soft_reset_slave); | 2340 | for (i = 0; i < priv->data.slaves; i++) { |
| 2341 | if (netif_running(priv->slaves[i].ndev)) | ||
| 2342 | cpsw_ndo_stop(priv->slaves[i].ndev); | ||
| 2343 | soft_reset_slave(priv->slaves + i); | ||
| 2344 | } | ||
| 2345 | } else { | ||
| 2346 | if (netif_running(ndev)) | ||
| 2347 | cpsw_ndo_stop(ndev); | ||
| 2348 | for_each_slave(priv, soft_reset_slave); | ||
| 2349 | } | ||
| 2318 | 2350 | ||
| 2319 | pm_runtime_put_sync(&pdev->dev); | 2351 | pm_runtime_put_sync(&pdev->dev); |
| 2320 | 2352 | ||
| @@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev) | |||
| 2328 | { | 2360 | { |
| 2329 | struct platform_device *pdev = to_platform_device(dev); | 2361 | struct platform_device *pdev = to_platform_device(dev); |
| 2330 | struct net_device *ndev = platform_get_drvdata(pdev); | 2362 | struct net_device *ndev = platform_get_drvdata(pdev); |
| 2363 | struct cpsw_priv *priv = netdev_priv(ndev); | ||
| 2331 | 2364 | ||
| 2332 | pm_runtime_get_sync(&pdev->dev); | 2365 | pm_runtime_get_sync(&pdev->dev); |
| 2333 | 2366 | ||
| 2334 | /* Select default pin state */ | 2367 | /* Select default pin state */ |
| 2335 | pinctrl_pm_select_default_state(&pdev->dev); | 2368 | pinctrl_pm_select_default_state(&pdev->dev); |
| 2336 | 2369 | ||
| 2337 | if (netif_running(ndev)) | 2370 | if (priv->data.dual_emac) { |
| 2338 | cpsw_ndo_open(ndev); | 2371 | int i; |
| 2372 | |||
| 2373 | for (i = 0; i < priv->data.slaves; i++) { | ||
| 2374 | if (netif_running(priv->slaves[i].ndev)) | ||
| 2375 | cpsw_ndo_open(priv->slaves[i].ndev); | ||
| 2376 | } | ||
| 2377 | } else { | ||
| 2378 | if (netif_running(ndev)) | ||
| 2379 | cpsw_ndo_open(ndev); | ||
| 2380 | } | ||
| 2339 | return 0; | 2381 | return 0; |
| 2340 | } | 2382 | } |
| 2341 | 2383 | ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a96955597755..726edabff26b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/netpoll.h> | 36 | #include <linux/netpoll.h> |
| 37 | 37 | ||
| 38 | #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) | 38 | #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) |
| 39 | #define MACVLAN_BC_QUEUE_LEN 1000 | ||
| 39 | 40 | ||
| 40 | struct macvlan_port { | 41 | struct macvlan_port { |
| 41 | struct net_device *dev; | 42 | struct net_device *dev; |
| @@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, | |||
| 248 | goto err; | 249 | goto err; |
| 249 | 250 | ||
| 250 | spin_lock(&port->bc_queue.lock); | 251 | spin_lock(&port->bc_queue.lock); |
| 251 | if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { | 252 | if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { |
| 252 | __skb_queue_tail(&port->bc_queue, nskb); | 253 | __skb_queue_tail(&port->bc_queue, nskb); |
| 253 | err = 0; | 254 | err = 0; |
| 254 | } | 255 | } |
| @@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
| 806 | features, | 807 | features, |
| 807 | mask); | 808 | mask); |
| 808 | features |= ALWAYS_ON_FEATURES; | 809 | features |= ALWAYS_ON_FEATURES; |
| 810 | features &= ~NETIF_F_NETNS_LOCAL; | ||
| 809 | 811 | ||
| 810 | return features; | 812 | return features; |
| 811 | } | 813 | } |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fd0ea7c50ee6..011dbda2b2f1 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
| @@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = { | |||
| 592 | .phy_id = PHY_ID_KSZ9031, | 592 | .phy_id = PHY_ID_KSZ9031, |
| 593 | .phy_id_mask = 0x00fffff0, | 593 | .phy_id_mask = 0x00fffff0, |
| 594 | .name = "Micrel KSZ9031 Gigabit PHY", | 594 | .name = "Micrel KSZ9031 Gigabit PHY", |
| 595 | .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | 595 | .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), |
| 596 | | SUPPORTED_Asym_Pause), | ||
| 597 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | 596 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
| 598 | .config_init = ksz9031_config_init, | 597 | .config_init = ksz9031_config_init, |
| 599 | .config_aneg = genphy_config_aneg, | 598 | .config_aneg = genphy_config_aneg, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 87f710476217..74760e8143e3 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -2019,7 +2019,7 @@ static int rtl8153_enable(struct r8152 *tp) | |||
| 2019 | return rtl_enable(tp); | 2019 | return rtl_enable(tp); |
| 2020 | } | 2020 | } |
| 2021 | 2021 | ||
| 2022 | static void rtl8152_disable(struct r8152 *tp) | 2022 | static void rtl_disable(struct r8152 *tp) |
| 2023 | { | 2023 | { |
| 2024 | u32 ocp_data; | 2024 | u32 ocp_data; |
| 2025 | int i; | 2025 | int i; |
| @@ -2232,6 +2232,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp) | |||
| 2232 | LINKENA | DIS_SDSAVE); | 2232 | LINKENA | DIS_SDSAVE); |
| 2233 | } | 2233 | } |
| 2234 | 2234 | ||
| 2235 | static void rtl8152_disable(struct r8152 *tp) | ||
| 2236 | { | ||
| 2237 | r8152b_disable_aldps(tp); | ||
| 2238 | rtl_disable(tp); | ||
| 2239 | r8152b_enable_aldps(tp); | ||
| 2240 | } | ||
| 2241 | |||
| 2235 | static void r8152b_hw_phy_cfg(struct r8152 *tp) | 2242 | static void r8152b_hw_phy_cfg(struct r8152 *tp) |
| 2236 | { | 2243 | { |
| 2237 | u16 data; | 2244 | u16 data; |
| @@ -2242,11 +2249,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) | |||
| 2242 | r8152_mdio_write(tp, MII_BMCR, data); | 2249 | r8152_mdio_write(tp, MII_BMCR, data); |
| 2243 | } | 2250 | } |
| 2244 | 2251 | ||
| 2245 | r8152b_disable_aldps(tp); | ||
| 2246 | |||
| 2247 | rtl_clear_bp(tp); | 2252 | rtl_clear_bp(tp); |
| 2248 | 2253 | ||
| 2249 | r8152b_enable_aldps(tp); | ||
| 2250 | set_bit(PHY_RESET, &tp->flags); | 2254 | set_bit(PHY_RESET, &tp->flags); |
| 2251 | } | 2255 | } |
| 2252 | 2256 | ||
| @@ -2255,9 +2259,6 @@ static void r8152b_exit_oob(struct r8152 *tp) | |||
| 2255 | u32 ocp_data; | 2259 | u32 ocp_data; |
| 2256 | int i; | 2260 | int i; |
| 2257 | 2261 | ||
| 2258 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
| 2259 | return; | ||
| 2260 | |||
| 2261 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); | 2262 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); |
| 2262 | ocp_data &= ~RCR_ACPT_ALL; | 2263 | ocp_data &= ~RCR_ACPT_ALL; |
| 2263 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 2264 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
| @@ -2347,7 +2348,7 @@ static void r8152b_enter_oob(struct r8152 *tp) | |||
| 2347 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); | 2348 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); |
| 2348 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); | 2349 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); |
| 2349 | 2350 | ||
| 2350 | rtl8152_disable(tp); | 2351 | rtl_disable(tp); |
| 2351 | 2352 | ||
| 2352 | for (i = 0; i < 1000; i++) { | 2353 | for (i = 0; i < 1000; i++) { |
| 2353 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 2354 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
| @@ -2485,9 +2486,6 @@ static void r8153_first_init(struct r8152 *tp) | |||
| 2485 | u32 ocp_data; | 2486 | u32 ocp_data; |
| 2486 | int i; | 2487 | int i; |
| 2487 | 2488 | ||
| 2488 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
| 2489 | return; | ||
| 2490 | |||
| 2491 | rxdy_gated_en(tp, true); | 2489 | rxdy_gated_en(tp, true); |
| 2492 | r8153_teredo_off(tp); | 2490 | r8153_teredo_off(tp); |
| 2493 | 2491 | ||
| @@ -2560,7 +2558,7 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
| 2560 | ocp_data &= ~NOW_IS_OOB; | 2558 | ocp_data &= ~NOW_IS_OOB; |
| 2561 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); | 2559 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); |
| 2562 | 2560 | ||
| 2563 | rtl8152_disable(tp); | 2561 | rtl_disable(tp); |
| 2564 | 2562 | ||
| 2565 | for (i = 0; i < 1000; i++) { | 2563 | for (i = 0; i < 1000; i++) { |
| 2566 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 2564 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
| @@ -2624,6 +2622,13 @@ static void r8153_enable_aldps(struct r8152 *tp) | |||
| 2624 | ocp_reg_write(tp, OCP_POWER_CFG, data); | 2622 | ocp_reg_write(tp, OCP_POWER_CFG, data); |
| 2625 | } | 2623 | } |
| 2626 | 2624 | ||
| 2625 | static void rtl8153_disable(struct r8152 *tp) | ||
| 2626 | { | ||
| 2627 | r8153_disable_aldps(tp); | ||
| 2628 | rtl_disable(tp); | ||
| 2629 | r8153_enable_aldps(tp); | ||
| 2630 | } | ||
| 2631 | |||
| 2627 | static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) | 2632 | static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) |
| 2628 | { | 2633 | { |
| 2629 | u16 bmcr, anar, gbcr; | 2634 | u16 bmcr, anar, gbcr; |
| @@ -2714,6 +2719,16 @@ out: | |||
| 2714 | return ret; | 2719 | return ret; |
| 2715 | } | 2720 | } |
| 2716 | 2721 | ||
| 2722 | static void rtl8152_up(struct r8152 *tp) | ||
| 2723 | { | ||
| 2724 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
| 2725 | return; | ||
| 2726 | |||
| 2727 | r8152b_disable_aldps(tp); | ||
| 2728 | r8152b_exit_oob(tp); | ||
| 2729 | r8152b_enable_aldps(tp); | ||
| 2730 | } | ||
| 2731 | |||
| 2717 | static void rtl8152_down(struct r8152 *tp) | 2732 | static void rtl8152_down(struct r8152 *tp) |
| 2718 | { | 2733 | { |
| 2719 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { | 2734 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { |
| @@ -2727,6 +2742,16 @@ static void rtl8152_down(struct r8152 *tp) | |||
| 2727 | r8152b_enable_aldps(tp); | 2742 | r8152b_enable_aldps(tp); |
| 2728 | } | 2743 | } |
| 2729 | 2744 | ||
| 2745 | static void rtl8153_up(struct r8152 *tp) | ||
| 2746 | { | ||
| 2747 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
| 2748 | return; | ||
| 2749 | |||
| 2750 | r8153_disable_aldps(tp); | ||
| 2751 | r8153_first_init(tp); | ||
| 2752 | r8153_enable_aldps(tp); | ||
| 2753 | } | ||
| 2754 | |||
| 2730 | static void rtl8153_down(struct r8152 *tp) | 2755 | static void rtl8153_down(struct r8152 *tp) |
| 2731 | { | 2756 | { |
| 2732 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { | 2757 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { |
| @@ -2946,6 +2971,8 @@ static void r8152b_init(struct r8152 *tp) | |||
| 2946 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 2971 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
| 2947 | return; | 2972 | return; |
| 2948 | 2973 | ||
| 2974 | r8152b_disable_aldps(tp); | ||
| 2975 | |||
| 2949 | if (tp->version == RTL_VER_01) { | 2976 | if (tp->version == RTL_VER_01) { |
| 2950 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); | 2977 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); |
| 2951 | ocp_data &= ~LED_MODE_MASK; | 2978 | ocp_data &= ~LED_MODE_MASK; |
| @@ -2984,6 +3011,7 @@ static void r8153_init(struct r8152 *tp) | |||
| 2984 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3011 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
| 2985 | return; | 3012 | return; |
| 2986 | 3013 | ||
| 3014 | r8153_disable_aldps(tp); | ||
| 2987 | r8153_u1u2en(tp, false); | 3015 | r8153_u1u2en(tp, false); |
| 2988 | 3016 | ||
| 2989 | for (i = 0; i < 500; i++) { | 3017 | for (i = 0; i < 500; i++) { |
| @@ -3392,7 +3420,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
| 3392 | ops->init = r8152b_init; | 3420 | ops->init = r8152b_init; |
| 3393 | ops->enable = rtl8152_enable; | 3421 | ops->enable = rtl8152_enable; |
| 3394 | ops->disable = rtl8152_disable; | 3422 | ops->disable = rtl8152_disable; |
| 3395 | ops->up = r8152b_exit_oob; | 3423 | ops->up = rtl8152_up; |
| 3396 | ops->down = rtl8152_down; | 3424 | ops->down = rtl8152_down; |
| 3397 | ops->unload = rtl8152_unload; | 3425 | ops->unload = rtl8152_unload; |
| 3398 | ret = 0; | 3426 | ret = 0; |
| @@ -3400,8 +3428,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
| 3400 | case PRODUCT_ID_RTL8153: | 3428 | case PRODUCT_ID_RTL8153: |
| 3401 | ops->init = r8153_init; | 3429 | ops->init = r8153_init; |
| 3402 | ops->enable = rtl8153_enable; | 3430 | ops->enable = rtl8153_enable; |
| 3403 | ops->disable = rtl8152_disable; | 3431 | ops->disable = rtl8153_disable; |
| 3404 | ops->up = r8153_first_init; | 3432 | ops->up = rtl8153_up; |
| 3405 | ops->down = rtl8153_down; | 3433 | ops->down = rtl8153_down; |
| 3406 | ops->unload = rtl8153_unload; | 3434 | ops->unload = rtl8153_unload; |
| 3407 | ret = 0; | 3435 | ret = 0; |
| @@ -3416,8 +3444,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
| 3416 | case PRODUCT_ID_SAMSUNG: | 3444 | case PRODUCT_ID_SAMSUNG: |
| 3417 | ops->init = r8153_init; | 3445 | ops->init = r8153_init; |
| 3418 | ops->enable = rtl8153_enable; | 3446 | ops->enable = rtl8153_enable; |
| 3419 | ops->disable = rtl8152_disable; | 3447 | ops->disable = rtl8153_disable; |
| 3420 | ops->up = r8153_first_init; | 3448 | ops->up = rtl8153_up; |
| 3421 | ops->down = rtl8153_down; | 3449 | ops->down = rtl8153_down; |
| 3422 | ops->unload = rtl8153_unload; | 3450 | ops->unload = rtl8153_unload; |
| 3423 | ret = 0; | 3451 | ret = 0; |
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c index 733be5178481..6ad44470d0f2 100644 --- a/drivers/net/wireless/ath/ath9k/common-beacon.c +++ b/drivers/net/wireless/ath/ath9k/common-beacon.c | |||
| @@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
| 57 | struct ath9k_beacon_state *bs) | 57 | struct ath9k_beacon_state *bs) |
| 58 | { | 58 | { |
| 59 | struct ath_common *common = ath9k_hw_common(ah); | 59 | struct ath_common *common = ath9k_hw_common(ah); |
| 60 | int dtim_intval, sleepduration; | 60 | int dtim_intval; |
| 61 | u64 tsf; | 61 | u64 tsf; |
| 62 | 62 | ||
| 63 | /* No need to configure beacon if we are not associated */ | 63 | /* No need to configure beacon if we are not associated */ |
| @@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
| 75 | * last beacon we received (which may be none). | 75 | * last beacon we received (which may be none). |
| 76 | */ | 76 | */ |
| 77 | dtim_intval = conf->intval * conf->dtim_period; | 77 | dtim_intval = conf->intval * conf->dtim_period; |
| 78 | sleepduration = ah->hw->conf.listen_interval * conf->intval; | ||
| 79 | 78 | ||
| 80 | /* | 79 | /* |
| 81 | * Pull nexttbtt forward to reflect the current | 80 | * Pull nexttbtt forward to reflect the current |
| @@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
| 113 | */ | 112 | */ |
| 114 | 113 | ||
| 115 | bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), | 114 | bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), |
| 116 | sleepduration)); | 115 | conf->intval)); |
| 117 | if (bs->bs_sleepduration > bs->bs_dtimperiod) | 116 | if (bs->bs_sleepduration > bs->bs_dtimperiod) |
| 118 | bs->bs_sleepduration = bs->bs_dtimperiod; | 117 | bs->bs_sleepduration = bs->bs_dtimperiod; |
| 119 | 118 | ||
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bb86eb2ffc95..f0484b1b617e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
| @@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, | |||
| 978 | struct ath_hw *ah = common->ah; | 978 | struct ath_hw *ah = common->ah; |
| 979 | struct ath_htc_rx_status *rxstatus; | 979 | struct ath_htc_rx_status *rxstatus; |
| 980 | struct ath_rx_status rx_stats; | 980 | struct ath_rx_status rx_stats; |
| 981 | bool decrypt_error; | 981 | bool decrypt_error = false; |
| 982 | 982 | ||
| 983 | if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { | 983 | if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { |
| 984 | ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", | 984 | ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e6ac8d2e610c..4b148bbb2bf6 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
| 513 | * touch anything. Note this can happen early | 513 | * touch anything. Note this can happen early |
| 514 | * on if the IRQ is shared. | 514 | * on if the IRQ is shared. |
| 515 | */ | 515 | */ |
| 516 | if (test_bit(ATH_OP_INVALID, &common->op_flags)) | 516 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) |
| 517 | return IRQ_NONE; | 517 | return IRQ_NONE; |
| 518 | 518 | ||
| 519 | /* shared irq, not for us */ | 519 | /* shared irq, not for us */ |
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b8e2561ea645..fe3dc126b149 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig | |||
| @@ -27,10 +27,17 @@ config BRCMFMAC | |||
| 27 | one of the bus interface support. If you choose to build a module, | 27 | one of the bus interface support. If you choose to build a module, |
| 28 | it'll be called brcmfmac.ko. | 28 | it'll be called brcmfmac.ko. |
| 29 | 29 | ||
| 30 | config BRCMFMAC_PROTO_BCDC | ||
| 31 | bool | ||
| 32 | |||
| 33 | config BRCMFMAC_PROTO_MSGBUF | ||
| 34 | bool | ||
| 35 | |||
| 30 | config BRCMFMAC_SDIO | 36 | config BRCMFMAC_SDIO |
| 31 | bool "SDIO bus interface support for FullMAC driver" | 37 | bool "SDIO bus interface support for FullMAC driver" |
| 32 | depends on (MMC = y || MMC = BRCMFMAC) | 38 | depends on (MMC = y || MMC = BRCMFMAC) |
| 33 | depends on BRCMFMAC | 39 | depends on BRCMFMAC |
| 40 | select BRCMFMAC_PROTO_BCDC | ||
| 34 | select FW_LOADER | 41 | select FW_LOADER |
| 35 | default y | 42 | default y |
| 36 | ---help--- | 43 | ---help--- |
| @@ -42,6 +49,7 @@ config BRCMFMAC_USB | |||
| 42 | bool "USB bus interface support for FullMAC driver" | 49 | bool "USB bus interface support for FullMAC driver" |
| 43 | depends on (USB = y || USB = BRCMFMAC) | 50 | depends on (USB = y || USB = BRCMFMAC) |
| 44 | depends on BRCMFMAC | 51 | depends on BRCMFMAC |
| 52 | select BRCMFMAC_PROTO_BCDC | ||
| 45 | select FW_LOADER | 53 | select FW_LOADER |
| 46 | ---help--- | 54 | ---help--- |
| 47 | This option enables the USB bus interface support for Broadcom | 55 | This option enables the USB bus interface support for Broadcom |
| @@ -52,6 +60,8 @@ config BRCMFMAC_PCIE | |||
| 52 | bool "PCIE bus interface support for FullMAC driver" | 60 | bool "PCIE bus interface support for FullMAC driver" |
| 53 | depends on BRCMFMAC | 61 | depends on BRCMFMAC |
| 54 | depends on PCI | 62 | depends on PCI |
| 63 | depends on HAS_DMA | ||
| 64 | select BRCMFMAC_PROTO_MSGBUF | ||
| 55 | select FW_LOADER | 65 | select FW_LOADER |
| 56 | ---help--- | 66 | ---help--- |
| 57 | This option enables the PCIE bus interface support for Broadcom | 67 | This option enables the PCIE bus interface support for Broadcom |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index c35adf4bc70b..90a977fe9a64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile | |||
| @@ -30,16 +30,18 @@ brcmfmac-objs += \ | |||
| 30 | fwsignal.o \ | 30 | fwsignal.o \ |
| 31 | p2p.o \ | 31 | p2p.o \ |
| 32 | proto.o \ | 32 | proto.o \ |
| 33 | bcdc.o \ | ||
| 34 | commonring.o \ | ||
| 35 | flowring.o \ | ||
| 36 | msgbuf.o \ | ||
| 37 | dhd_common.o \ | 33 | dhd_common.o \ |
| 38 | dhd_linux.o \ | 34 | dhd_linux.o \ |
| 39 | firmware.o \ | 35 | firmware.o \ |
| 40 | feature.o \ | 36 | feature.o \ |
| 41 | btcoex.o \ | 37 | btcoex.o \ |
| 42 | vendor.o | 38 | vendor.o |
| 39 | brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ | ||
| 40 | bcdc.o | ||
| 41 | brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ | ||
| 42 | commonring.o \ | ||
| 43 | flowring.o \ | ||
| 44 | msgbuf.o | ||
| 43 | brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ | 45 | brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ |
| 44 | dhd_sdio.o \ | 46 | dhd_sdio.o \ |
| 45 | bcmsdh.o | 47 | bcmsdh.o |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h index 17e8c039ff32..6003179c0ceb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h | |||
| @@ -16,9 +16,12 @@ | |||
| 16 | #ifndef BRCMFMAC_BCDC_H | 16 | #ifndef BRCMFMAC_BCDC_H |
| 17 | #define BRCMFMAC_BCDC_H | 17 | #define BRCMFMAC_BCDC_H |
| 18 | 18 | ||
| 19 | 19 | #ifdef CONFIG_BRCMFMAC_PROTO_BCDC | |
| 20 | int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); | 20 | int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); |
| 21 | void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); | 21 | void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); |
| 22 | 22 | #else | |
| 23 | static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } | ||
| 24 | static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} | ||
| 25 | #endif | ||
| 23 | 26 | ||
| 24 | #endif /* BRCMFMAC_BCDC_H */ | 27 | #endif /* BRCMFMAC_BCDC_H */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 4f1daabc551b..44fc85f68f7a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c | |||
| @@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, | |||
| 185 | ifevent->action, ifevent->ifidx, ifevent->bssidx, | 185 | ifevent->action, ifevent->ifidx, ifevent->bssidx, |
| 186 | ifevent->flags, ifevent->role); | 186 | ifevent->flags, ifevent->role); |
| 187 | 187 | ||
| 188 | if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { | 188 | /* The P2P Device interface event must not be ignored |
| 189 | * contrary to what firmware tells us. The only way to | ||
| 190 | * distinguish the P2P Device is by looking at the ifidx | ||
| 191 | * and bssidx received. | ||
| 192 | */ | ||
| 193 | if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) && | ||
| 194 | (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) { | ||
| 189 | brcmf_dbg(EVENT, "event can be ignored\n"); | 195 | brcmf_dbg(EVENT, "event can be ignored\n"); |
| 190 | return; | 196 | return; |
| 191 | } | 197 | } |
| @@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, | |||
| 210 | return; | 216 | return; |
| 211 | } | 217 | } |
| 212 | 218 | ||
| 213 | if (ifevent->action == BRCMF_E_IF_CHANGE) | 219 | if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) |
| 214 | brcmf_fws_reset_interface(ifp); | 220 | brcmf_fws_reset_interface(ifp); |
| 215 | 221 | ||
| 216 | err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); | 222 | err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); |
| 217 | 223 | ||
| 218 | if (ifevent->action == BRCMF_E_IF_DEL) { | 224 | if (ifp && ifevent->action == BRCMF_E_IF_DEL) { |
| 219 | brcmf_fws_del_interface(ifp); | 225 | brcmf_fws_del_interface(ifp); |
| 220 | brcmf_del_if(drvr, ifevent->bssidx); | 226 | brcmf_del_if(drvr, ifevent->bssidx); |
| 221 | } | 227 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index dd20b1862d44..cbf033f59109 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h | |||
| @@ -172,6 +172,8 @@ enum brcmf_fweh_event_code { | |||
| 172 | #define BRCMF_E_IF_ROLE_STA 0 | 172 | #define BRCMF_E_IF_ROLE_STA 0 |
| 173 | #define BRCMF_E_IF_ROLE_AP 1 | 173 | #define BRCMF_E_IF_ROLE_AP 1 |
| 174 | #define BRCMF_E_IF_ROLE_WDS 2 | 174 | #define BRCMF_E_IF_ROLE_WDS 2 |
| 175 | #define BRCMF_E_IF_ROLE_P2P_GO 3 | ||
| 176 | #define BRCMF_E_IF_ROLE_P2P_CLIENT 4 | ||
| 175 | 177 | ||
| 176 | /** | 178 | /** |
| 177 | * definitions for event packet validation. | 179 | * definitions for event packet validation. |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h index f901ae52bf2b..77a51b8c1e12 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #ifndef BRCMFMAC_MSGBUF_H | 15 | #ifndef BRCMFMAC_MSGBUF_H |
| 16 | #define BRCMFMAC_MSGBUF_H | 16 | #define BRCMFMAC_MSGBUF_H |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF | ||
| 18 | 19 | ||
| 19 | #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 | 20 | #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 |
| 20 | #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 | 21 | #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 |
| @@ -32,9 +33,15 @@ | |||
| 32 | 33 | ||
| 33 | 34 | ||
| 34 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev); | 35 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev); |
| 36 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); | ||
| 35 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); | 37 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); |
| 36 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); | 38 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); |
| 37 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); | 39 | #else |
| 38 | 40 | static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) | |
| 41 | { | ||
| 42 | return 0; | ||
| 43 | } | ||
| 44 | static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {} | ||
| 45 | #endif | ||
| 39 | 46 | ||
| 40 | #endif /* BRCMFMAC_MSGBUF_H */ | 47 | #endif /* BRCMFMAC_MSGBUF_H */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 02fe706fc9ec..16a246bfc343 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
| @@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) | |||
| 497 | static void | 497 | static void |
| 498 | brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) | 498 | brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) |
| 499 | { | 499 | { |
| 500 | struct net_device *ndev = wdev->netdev; | 500 | struct brcmf_cfg80211_vif *vif; |
| 501 | struct brcmf_if *ifp = netdev_priv(ndev); | 501 | struct brcmf_if *ifp; |
| 502 | |||
| 503 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | ||
| 504 | ifp = vif->ifp; | ||
| 502 | 505 | ||
| 503 | if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || | 506 | if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || |
| 504 | (wdev->iftype == NL80211_IFTYPE_AP) || | 507 | (wdev->iftype == NL80211_IFTYPE_AP) || |
| @@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg, | |||
| 4918 | struct brcmu_chan ch; | 4921 | struct brcmu_chan ch; |
| 4919 | int i; | 4922 | int i; |
| 4920 | 4923 | ||
| 4921 | for (i = 0; i <= total; i++) { | 4924 | for (i = 0; i < total; i++) { |
| 4922 | ch.chspec = (u16)le32_to_cpu(chlist->element[i]); | 4925 | ch.chspec = (u16)le32_to_cpu(chlist->element[i]); |
| 4923 | cfg->d11inf.decchspec(&ch); | 4926 | cfg->d11inf.decchspec(&ch); |
| 4924 | 4927 | ||
| @@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) | |||
| 5143 | 5146 | ||
| 5144 | ch.band = BRCMU_CHAN_BAND_2G; | 5147 | ch.band = BRCMU_CHAN_BAND_2G; |
| 5145 | ch.bw = BRCMU_CHAN_BW_40; | 5148 | ch.bw = BRCMU_CHAN_BW_40; |
| 5149 | ch.sb = BRCMU_CHAN_SB_NONE; | ||
| 5146 | ch.chnum = 0; | 5150 | ch.chnum = 0; |
| 5147 | cfg->d11inf.encchspec(&ch); | 5151 | cfg->d11inf.encchspec(&ch); |
| 5148 | 5152 | ||
| @@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) | |||
| 5176 | 5180 | ||
| 5177 | brcmf_update_bw40_channel_flag(&band->channels[j], &ch); | 5181 | brcmf_update_bw40_channel_flag(&band->channels[j], &ch); |
| 5178 | } | 5182 | } |
| 5183 | kfree(pbuf); | ||
| 5179 | } | 5184 | } |
| 5180 | return err; | 5185 | return err; |
| 5181 | } | 5186 | } |
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index 760c45c34ef3..1513dbc79c14 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include "commands.h" | 40 | #include "commands.h" |
| 41 | #include "power.h" | 41 | #include "power.h" |
| 42 | 42 | ||
| 43 | static bool force_cam; | 43 | static bool force_cam = true; |
| 44 | module_param(force_cam, bool, 0644); | 44 | module_param(force_cam, bool, 0644); |
| 45 | MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); | 45 | MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); |
| 46 | 46 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index d67a37a786aa..d53adc245497 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
| @@ -83,6 +83,8 @@ | |||
| 83 | #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ | 83 | #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ |
| 84 | #define IWL3160_NVM_VERSION 0x709 | 84 | #define IWL3160_NVM_VERSION 0x709 |
| 85 | #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ | 85 | #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ |
| 86 | #define IWL3165_NVM_VERSION 0x709 | ||
| 87 | #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ | ||
| 86 | #define IWL7265_NVM_VERSION 0x0a1d | 88 | #define IWL7265_NVM_VERSION 0x0a1d |
| 87 | #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ | 89 | #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ |
| 88 | 90 | ||
| @@ -92,6 +94,9 @@ | |||
| 92 | #define IWL3160_FW_PRE "iwlwifi-3160-" | 94 | #define IWL3160_FW_PRE "iwlwifi-3160-" |
| 93 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" | 95 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" |
| 94 | 96 | ||
| 97 | #define IWL3165_FW_PRE "iwlwifi-3165-" | ||
| 98 | #define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" | ||
| 99 | |||
| 95 | #define IWL7265_FW_PRE "iwlwifi-7265-" | 100 | #define IWL7265_FW_PRE "iwlwifi-7265-" |
| 96 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" | 101 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" |
| 97 | 102 | ||
| @@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { | |||
| 213 | {0}, | 218 | {0}, |
| 214 | }; | 219 | }; |
| 215 | 220 | ||
| 221 | const struct iwl_cfg iwl3165_2ac_cfg = { | ||
| 222 | .name = "Intel(R) Dual Band Wireless AC 3165", | ||
| 223 | .fw_name_pre = IWL3165_FW_PRE, | ||
| 224 | IWL_DEVICE_7000, | ||
| 225 | .ht_params = &iwl7000_ht_params, | ||
| 226 | .nvm_ver = IWL3165_NVM_VERSION, | ||
| 227 | .nvm_calib_ver = IWL3165_TX_POWER_VERSION, | ||
| 228 | .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, | ||
| 229 | }; | ||
| 230 | |||
| 216 | const struct iwl_cfg iwl7265_2ac_cfg = { | 231 | const struct iwl_cfg iwl7265_2ac_cfg = { |
| 217 | .name = "Intel(R) Dual Band Wireless AC 7265", | 232 | .name = "Intel(R) Dual Band Wireless AC 7265", |
| 218 | .fw_name_pre = IWL7265_FW_PRE, | 233 | .fw_name_pre = IWL7265_FW_PRE, |
| @@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = { | |||
| 245 | 260 | ||
| 246 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 261 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
| 247 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | 262 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); |
| 263 | MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | ||
| 248 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 264 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 8da596db9abe..3d7cc37420ae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h | |||
| @@ -120,6 +120,8 @@ enum iwl_led_mode { | |||
| 120 | #define IWL_LONG_WD_TIMEOUT 10000 | 120 | #define IWL_LONG_WD_TIMEOUT 10000 |
| 121 | #define IWL_MAX_WD_TIMEOUT 120000 | 121 | #define IWL_MAX_WD_TIMEOUT 120000 |
| 122 | 122 | ||
| 123 | #define IWL_DEFAULT_MAX_TX_POWER 22 | ||
| 124 | |||
| 123 | /* Antenna presence definitions */ | 125 | /* Antenna presence definitions */ |
| 124 | #define ANT_NONE 0x0 | 126 | #define ANT_NONE 0x0 |
| 125 | #define ANT_A BIT(0) | 127 | #define ANT_A BIT(0) |
| @@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg; | |||
| 335 | extern const struct iwl_cfg iwl3160_2ac_cfg; | 337 | extern const struct iwl_cfg iwl3160_2ac_cfg; |
| 336 | extern const struct iwl_cfg iwl3160_2n_cfg; | 338 | extern const struct iwl_cfg iwl3160_2n_cfg; |
| 337 | extern const struct iwl_cfg iwl3160_n_cfg; | 339 | extern const struct iwl_cfg iwl3160_n_cfg; |
| 340 | extern const struct iwl_cfg iwl3165_2ac_cfg; | ||
| 338 | extern const struct iwl_cfg iwl7265_2ac_cfg; | 341 | extern const struct iwl_cfg iwl7265_2ac_cfg; |
| 339 | extern const struct iwl_cfg iwl7265_2n_cfg; | 342 | extern const struct iwl_cfg iwl7265_2n_cfg; |
| 340 | extern const struct iwl_cfg iwl7265_n_cfg; | 343 | extern const struct iwl_cfg iwl7265_n_cfg; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 018af2957d3b..354255f08754 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | |||
| @@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = { | |||
| 146 | #define LAST_2GHZ_HT_PLUS 9 | 146 | #define LAST_2GHZ_HT_PLUS 9 |
| 147 | #define LAST_5GHZ_HT 161 | 147 | #define LAST_5GHZ_HT 161 |
| 148 | 148 | ||
| 149 | #define DEFAULT_MAX_TX_POWER 16 | ||
| 150 | |||
| 151 | /* rate data (static) */ | 149 | /* rate data (static) */ |
| 152 | static struct ieee80211_rate iwl_cfg80211_rates[] = { | 150 | static struct ieee80211_rate iwl_cfg80211_rates[] = { |
| 153 | { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, | 151 | { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, |
| @@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
| 295 | * Default value - highest tx power value. max_power | 293 | * Default value - highest tx power value. max_power |
| 296 | * is not used in mvm, and is used for backwards compatibility | 294 | * is not used in mvm, and is used for backwards compatibility |
| 297 | */ | 295 | */ |
| 298 | channel->max_power = DEFAULT_MAX_TX_POWER; | 296 | channel->max_power = IWL_DEFAULT_MAX_TX_POWER; |
| 299 | is_5ghz = channel->band == IEEE80211_BAND_5GHZ; | 297 | is_5ghz = channel->band == IEEE80211_BAND_5GHZ; |
| 300 | IWL_DEBUG_EEPROM(dev, | 298 | IWL_DEBUG_EEPROM(dev, |
| 301 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", | 299 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 2291bbcaaeab..ce71625f497f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c | |||
| @@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) | |||
| 585 | lockdep_assert_held(&mvm->mutex); | 585 | lockdep_assert_held(&mvm->mutex); |
| 586 | 586 | ||
| 587 | if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { | 587 | if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { |
| 588 | u32 mode; | ||
| 589 | |||
| 590 | switch (mvm->bt_force_ant_mode) { | 588 | switch (mvm->bt_force_ant_mode) { |
| 591 | case BT_FORCE_ANT_BT: | 589 | case BT_FORCE_ANT_BT: |
| 592 | mode = BT_COEX_BT; | 590 | mode = BT_COEX_BT; |
| @@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
| 756 | struct iwl_bt_iterator_data *data = _data; | 754 | struct iwl_bt_iterator_data *data = _data; |
| 757 | struct iwl_mvm *mvm = data->mvm; | 755 | struct iwl_mvm *mvm = data->mvm; |
| 758 | struct ieee80211_chanctx_conf *chanctx_conf; | 756 | struct ieee80211_chanctx_conf *chanctx_conf; |
| 759 | enum ieee80211_smps_mode smps_mode; | 757 | /* default smps_mode is AUTOMATIC - only used for client modes */ |
| 758 | enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; | ||
| 760 | u32 bt_activity_grading; | 759 | u32 bt_activity_grading; |
| 761 | int ave_rssi; | 760 | int ave_rssi; |
| 762 | 761 | ||
| @@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
| 764 | 763 | ||
| 765 | switch (vif->type) { | 764 | switch (vif->type) { |
| 766 | case NL80211_IFTYPE_STATION: | 765 | case NL80211_IFTYPE_STATION: |
| 767 | /* default smps_mode for BSS / P2P client is AUTOMATIC */ | ||
| 768 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | ||
| 769 | break; | 766 | break; |
| 770 | case NL80211_IFTYPE_AP: | 767 | case NL80211_IFTYPE_AP: |
| 771 | if (!mvmvif->ap_ibss_active) | 768 | if (!mvmvif->ap_ibss_active) |
| @@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
| 797 | else if (bt_activity_grading >= BT_LOW_TRAFFIC) | 794 | else if (bt_activity_grading >= BT_LOW_TRAFFIC) |
| 798 | smps_mode = IEEE80211_SMPS_DYNAMIC; | 795 | smps_mode = IEEE80211_SMPS_DYNAMIC; |
| 799 | 796 | ||
| 800 | /* relax SMPS contraints for next association */ | 797 | /* relax SMPS constraints for next association */ |
| 801 | if (!vif->bss_conf.assoc) | 798 | if (!vif->bss_conf.assoc) |
| 802 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 799 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
| 803 | 800 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 2e90ff795c13..87e517bffedc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c | |||
| @@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, | |||
| 74 | 74 | ||
| 75 | switch (param) { | 75 | switch (param) { |
| 76 | case MVM_DEBUGFS_PM_KEEP_ALIVE: { | 76 | case MVM_DEBUGFS_PM_KEEP_ALIVE: { |
| 77 | struct ieee80211_hw *hw = mvm->hw; | 77 | int dtimper = vif->bss_conf.dtim_period ?: 1; |
| 78 | int dtimper = hw->conf.ps_dtim_period ?: 1; | ||
| 79 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; | 78 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; |
| 80 | 79 | ||
| 81 | IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); | 80 | IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 95f5b3274efb..9a922f3bd16b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h | |||
| @@ -1563,14 +1563,14 @@ enum iwl_sf_scenario { | |||
| 1563 | 1563 | ||
| 1564 | /** | 1564 | /** |
| 1565 | * Smart Fifo configuration command. | 1565 | * Smart Fifo configuration command. |
| 1566 | * @state: smart fifo state, types listed in iwl_sf_sate. | 1566 | * @state: smart fifo state, types listed in enum %iwl_sf_sate. |
| 1567 | * @watermark: Minimum allowed availabe free space in RXF for transient state. | 1567 | * @watermark: Minimum allowed availabe free space in RXF for transient state. |
| 1568 | * @long_delay_timeouts: aging and idle timer values for each scenario | 1568 | * @long_delay_timeouts: aging and idle timer values for each scenario |
| 1569 | * in long delay state. | 1569 | * in long delay state. |
| 1570 | * @full_on_timeouts: timer values for each scenario in full on state. | 1570 | * @full_on_timeouts: timer values for each scenario in full on state. |
| 1571 | */ | 1571 | */ |
| 1572 | struct iwl_sf_cfg_cmd { | 1572 | struct iwl_sf_cfg_cmd { |
| 1573 | enum iwl_sf_state state; | 1573 | __le32 state; |
| 1574 | __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; | 1574 | __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; |
| 1575 | __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; | 1575 | __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; |
| 1576 | __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; | 1576 | __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 0e523e28cabf..8242e689ddb1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | |||
| @@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, | |||
| 721 | !force_assoc_off) { | 721 | !force_assoc_off) { |
| 722 | u32 dtim_offs; | 722 | u32 dtim_offs; |
| 723 | 723 | ||
| 724 | /* Allow beacons to pass through as long as we are not | ||
| 725 | * associated, or we do not have dtim period information. | ||
| 726 | */ | ||
| 727 | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); | ||
| 728 | |||
| 729 | /* | 724 | /* |
| 730 | * The DTIM count counts down, so when it is N that means N | 725 | * The DTIM count counts down, so when it is N that means N |
| 731 | * more beacon intervals happen until the DTIM TBTT. Therefore | 726 | * more beacon intervals happen until the DTIM TBTT. Therefore |
| @@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, | |||
| 759 | ctxt_sta->is_assoc = cpu_to_le32(1); | 754 | ctxt_sta->is_assoc = cpu_to_le32(1); |
| 760 | } else { | 755 | } else { |
| 761 | ctxt_sta->is_assoc = cpu_to_le32(0); | 756 | ctxt_sta->is_assoc = cpu_to_le32(0); |
| 757 | |||
| 758 | /* Allow beacons to pass through as long as we are not | ||
| 759 | * associated, or we do not have dtim period information. | ||
| 760 | */ | ||
| 761 | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); | ||
| 762 | } | 762 | } |
| 763 | 763 | ||
| 764 | ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); | 764 | ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 7c8796584c25..cdc272d776e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
| @@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
| 396 | else | 396 | else |
| 397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | 397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; |
| 398 | 398 | ||
| 399 | /* TODO: enable that only for firmwares that don't crash */ | 399 | if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) { |
| 400 | /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */ | 400 | hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; |
| 401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; | 401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; |
| 402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; | 402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; |
| 403 | /* we create the 802.11 header and zero length SSID IE. */ | 403 | /* we create the 802.11 header and zero length SSID IE. */ |
| 404 | hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; | 404 | hw->wiphy->max_sched_scan_ie_len = |
| 405 | SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; | ||
| 406 | } | ||
| 405 | 407 | ||
| 406 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | | 408 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | |
| 407 | NL80211_FEATURE_LOW_PRIORITY_SCAN | | 409 | NL80211_FEATURE_LOW_PRIORITY_SCAN | |
| @@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
| 1524 | */ | 1526 | */ |
| 1525 | iwl_mvm_remove_time_event(mvm, mvmvif, | 1527 | iwl_mvm_remove_time_event(mvm, mvmvif, |
| 1526 | &mvmvif->time_event_data); | 1528 | &mvmvif->time_event_data); |
| 1527 | } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | | ||
| 1528 | BSS_CHANGED_QOS)) { | ||
| 1529 | ret = iwl_mvm_power_update_mac(mvm); | ||
| 1530 | if (ret) | ||
| 1531 | IWL_ERR(mvm, "failed to update power mode\n"); | ||
| 1532 | } | 1529 | } |
| 1533 | 1530 | ||
| 1534 | if (changes & BSS_CHANGED_BEACON_INFO) { | 1531 | if (changes & BSS_CHANGED_BEACON_INFO) { |
| @@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
| 1536 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); | 1533 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); |
| 1537 | } | 1534 | } |
| 1538 | 1535 | ||
| 1536 | if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { | ||
| 1537 | ret = iwl_mvm_power_update_mac(mvm); | ||
| 1538 | if (ret) | ||
| 1539 | IWL_ERR(mvm, "failed to update power mode\n"); | ||
| 1540 | } | ||
| 1541 | |||
| 1539 | if (changes & BSS_CHANGED_TXPOWER) { | 1542 | if (changes & BSS_CHANGED_TXPOWER) { |
| 1540 | IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", | 1543 | IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", |
| 1541 | bss_conf->txpower); | 1544 | bss_conf->txpower); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2b2d10800a55..d9769a23c68b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c | |||
| @@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, | |||
| 281 | struct ieee80211_vif *vif, | 281 | struct ieee80211_vif *vif, |
| 282 | struct iwl_mac_power_cmd *cmd) | 282 | struct iwl_mac_power_cmd *cmd) |
| 283 | { | 283 | { |
| 284 | struct ieee80211_hw *hw = mvm->hw; | ||
| 285 | struct ieee80211_chanctx_conf *chanctx_conf; | 284 | struct ieee80211_chanctx_conf *chanctx_conf; |
| 286 | struct ieee80211_channel *chan; | 285 | struct ieee80211_channel *chan; |
| 287 | int dtimper, dtimper_msec; | 286 | int dtimper, dtimper_msec; |
| @@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, | |||
| 292 | 291 | ||
| 293 | cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, | 292 | cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, |
| 294 | mvmvif->color)); | 293 | mvmvif->color)); |
| 295 | dtimper = hw->conf.ps_dtim_period ?: 1; | 294 | dtimper = vif->bss_conf.dtim_period; |
| 296 | 295 | ||
| 297 | /* | 296 | /* |
| 298 | * Regardless of power management state the driver must set | 297 | * Regardless of power management state the driver must set |
| @@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, | |||
| 885 | iwl_mvm_power_build_cmd(mvm, vif, &cmd); | 884 | iwl_mvm_power_build_cmd(mvm, vif, &cmd); |
| 886 | if (enable) { | 885 | if (enable) { |
| 887 | /* configure skip over dtim up to 300 msec */ | 886 | /* configure skip over dtim up to 300 msec */ |
| 888 | int dtimper = mvm->hw->conf.ps_dtim_period ?: 1; | 887 | int dtimper = vif->bss_conf.dtim_period ?: 1; |
| 889 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; | 888 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; |
| 890 | 889 | ||
| 891 | if (WARN_ON(!dtimper_msec)) | 890 | if (WARN_ON(!dtimper_msec)) |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4b98987fc413..bf5cd8c8b0f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c | |||
| @@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, | |||
| 149 | le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); | 149 | le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); |
| 150 | energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> | 150 | energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> |
| 151 | IWL_RX_INFO_ENERGY_ANT_A_POS; | 151 | IWL_RX_INFO_ENERGY_ANT_A_POS; |
| 152 | energy_a = energy_a ? -energy_a : -256; | 152 | energy_a = energy_a ? -energy_a : S8_MIN; |
| 153 | energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> | 153 | energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> |
| 154 | IWL_RX_INFO_ENERGY_ANT_B_POS; | 154 | IWL_RX_INFO_ENERGY_ANT_B_POS; |
| 155 | energy_b = energy_b ? -energy_b : -256; | 155 | energy_b = energy_b ? -energy_b : S8_MIN; |
| 156 | energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> | 156 | energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> |
| 157 | IWL_RX_INFO_ENERGY_ANT_C_POS; | 157 | IWL_RX_INFO_ENERGY_ANT_C_POS; |
| 158 | energy_c = energy_c ? -energy_c : -256; | 158 | energy_c = energy_c ? -energy_c : S8_MIN; |
| 159 | max_energy = max(energy_a, energy_b); | 159 | max_energy = max(energy_a, energy_b); |
| 160 | max_energy = max(max_energy, energy_c); | 160 | max_energy = max(max_energy, energy_c); |
| 161 | 161 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index 7edfd15efc9d..e843b67f2201 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c | |||
| @@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, | |||
| 172 | enum iwl_sf_state new_state) | 172 | enum iwl_sf_state new_state) |
| 173 | { | 173 | { |
| 174 | struct iwl_sf_cfg_cmd sf_cmd = { | 174 | struct iwl_sf_cfg_cmd sf_cmd = { |
| 175 | .state = new_state, | 175 | .state = cpu_to_le32(new_state), |
| 176 | }; | 176 | }; |
| 177 | struct ieee80211_sta *sta; | 177 | struct ieee80211_sta *sta; |
| 178 | int ret = 0; | 178 | int ret = 0; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index dbc870713882..9ee410bf6da2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
| @@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, | |||
| 168 | 168 | ||
| 169 | /* | 169 | /* |
| 170 | * for data packets, rate info comes from the table inside the fw. This | 170 | * for data packets, rate info comes from the table inside the fw. This |
| 171 | * table is controlled by LINK_QUALITY commands | 171 | * table is controlled by LINK_QUALITY commands. Exclude ctrl port |
| 172 | * frames like EAPOLs which should be treated as mgmt frames. This | ||
| 173 | * avoids them being sent initially in high rates which increases the | ||
| 174 | * chances for completion of the 4-Way handshake. | ||
| 172 | */ | 175 | */ |
| 173 | 176 | ||
| 174 | if (ieee80211_is_data(fc) && sta) { | 177 | if (ieee80211_is_data(fc) && sta && |
| 178 | !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) { | ||
| 175 | tx_cmd->initial_rate_index = 0; | 179 | tx_cmd->initial_rate_index = 0; |
| 176 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); | 180 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); |
| 177 | return; | 181 | return; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f0e722ced080..073a68b97a72 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
| @@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 352 | {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, | 352 | {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, |
| 353 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, | 353 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, |
| 354 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, | 354 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, |
| 355 | {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, | ||
| 356 | {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, | ||
| 355 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, | 357 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, |
| 356 | {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, | 358 | {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, |
| 357 | {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, | 359 | {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, |
| 358 | {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, |
| 359 | 361 | ||
| 362 | /* 3165 Series */ | ||
| 363 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | ||
| 364 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, | ||
| 365 | |||
| 360 | /* 7265 Series */ | 366 | /* 7265 Series */ |
| 361 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 367 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
| 362 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, |
| @@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 378 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, | 384 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, |
| 379 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, | 385 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, |
| 380 | {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, | 386 | {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, |
| 387 | {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, | ||
| 381 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, | 388 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, |
| 382 | {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, | 389 | {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, |
| 383 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, | 390 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, |
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index f868333271aa..963a4a5dc88e 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c | |||
| @@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, | |||
| 501 | targets->sens_res = | 501 | targets->sens_res = |
| 502 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); | 502 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); |
| 503 | targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; | 503 | targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; |
| 504 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], | ||
| 505 | skb->data[MICROREAD_EMCF_A_LEN]); | ||
| 506 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; | 504 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; |
| 505 | if (targets->nfcid1_len > sizeof(targets->nfcid1)) { | ||
| 506 | r = -EINVAL; | ||
| 507 | goto exit_free; | ||
| 508 | } | ||
| 509 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], | ||
| 510 | targets->nfcid1_len); | ||
| 507 | break; | 511 | break; |
| 508 | case MICROREAD_GATE_ID_MREAD_ISO_A_3: | 512 | case MICROREAD_GATE_ID_MREAD_ISO_A_3: |
| 509 | targets->supported_protocols = | 513 | targets->supported_protocols = |
| @@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, | |||
| 511 | targets->sens_res = | 515 | targets->sens_res = |
| 512 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); | 516 | be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); |
| 513 | targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; | 517 | targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; |
| 514 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], | ||
| 515 | skb->data[MICROREAD_EMCF_A3_LEN]); | ||
| 516 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; | 518 | targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; |
| 519 | if (targets->nfcid1_len > sizeof(targets->nfcid1)) { | ||
| 520 | r = -EINVAL; | ||
| 521 | goto exit_free; | ||
| 522 | } | ||
| 523 | memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], | ||
| 524 | targets->nfcid1_len); | ||
| 517 | break; | 525 | break; |
| 518 | case MICROREAD_GATE_ID_MREAD_ISO_B: | 526 | case MICROREAD_GATE_ID_MREAD_ISO_B: |
| 519 | targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; | 527 | targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; |
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile index db7a38ae05f7..7d688f97aa27 100644 --- a/drivers/nfc/st21nfca/Makefile +++ b/drivers/nfc/st21nfca/Makefile | |||
| @@ -2,7 +2,8 @@ | |||
| 2 | # Makefile for ST21NFCA HCI based NFC driver | 2 | # Makefile for ST21NFCA HCI based NFC driver |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | st21nfca_i2c-objs = i2c.o | 5 | st21nfca_hci-objs = st21nfca.o st21nfca_dep.o |
| 6 | obj-$(CONFIG_NFC_ST21NFCA) += st21nfca_hci.o | ||
| 6 | 7 | ||
| 7 | obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o st21nfca_dep.o | 8 | st21nfca_i2c-objs = i2c.o |
| 8 | obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o | 9 | obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o |
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile index 13d9f03b2fea..f4d835dd15f2 100644 --- a/drivers/nfc/st21nfcb/Makefile +++ b/drivers/nfc/st21nfcb/Makefile | |||
| @@ -2,7 +2,8 @@ | |||
| 2 | # Makefile for ST21NFCB NCI based NFC driver | 2 | # Makefile for ST21NFCB NCI based NFC driver |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | st21nfcb_i2c-objs = i2c.o | 5 | st21nfcb_nci-objs = ndlc.o st21nfcb.o |
| 6 | obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o | ||
| 6 | 7 | ||
| 7 | obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb.o ndlc.o | 8 | st21nfcb_i2c-objs = i2c.o |
| 8 | obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o | 9 | obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o |
diff --git a/drivers/of/base.c b/drivers/of/base.c index d8574adf0d62..293ed4b687ba 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -138,6 +138,9 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp) | |||
| 138 | /* Important: Don't leak passwords */ | 138 | /* Important: Don't leak passwords */ |
| 139 | bool secure = strncmp(pp->name, "security-", 9) == 0; | 139 | bool secure = strncmp(pp->name, "security-", 9) == 0; |
| 140 | 140 | ||
| 141 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
| 142 | return 0; | ||
| 143 | |||
| 141 | if (!of_kset || !of_node_is_attached(np)) | 144 | if (!of_kset || !of_node_is_attached(np)) |
| 142 | return 0; | 145 | return 0; |
| 143 | 146 | ||
| @@ -158,6 +161,9 @@ int __of_attach_node_sysfs(struct device_node *np) | |||
| 158 | struct property *pp; | 161 | struct property *pp; |
| 159 | int rc; | 162 | int rc; |
| 160 | 163 | ||
| 164 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
| 165 | return 0; | ||
| 166 | |||
| 161 | if (!of_kset) | 167 | if (!of_kset) |
| 162 | return 0; | 168 | return 0; |
| 163 | 169 | ||
| @@ -1713,6 +1719,9 @@ int __of_remove_property(struct device_node *np, struct property *prop) | |||
| 1713 | 1719 | ||
| 1714 | void __of_remove_property_sysfs(struct device_node *np, struct property *prop) | 1720 | void __of_remove_property_sysfs(struct device_node *np, struct property *prop) |
| 1715 | { | 1721 | { |
| 1722 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
| 1723 | return; | ||
| 1724 | |||
| 1716 | /* at early boot, bail here and defer setup to of_init() */ | 1725 | /* at early boot, bail here and defer setup to of_init() */ |
| 1717 | if (of_kset && of_node_is_attached(np)) | 1726 | if (of_kset && of_node_is_attached(np)) |
| 1718 | sysfs_remove_bin_file(&np->kobj, &prop->attr); | 1727 | sysfs_remove_bin_file(&np->kobj, &prop->attr); |
| @@ -1777,6 +1786,9 @@ int __of_update_property(struct device_node *np, struct property *newprop, | |||
| 1777 | void __of_update_property_sysfs(struct device_node *np, struct property *newprop, | 1786 | void __of_update_property_sysfs(struct device_node *np, struct property *newprop, |
| 1778 | struct property *oldprop) | 1787 | struct property *oldprop) |
| 1779 | { | 1788 | { |
| 1789 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
| 1790 | return; | ||
| 1791 | |||
| 1780 | /* At early boot, bail out and defer setup to of_init() */ | 1792 | /* At early boot, bail out and defer setup to of_init() */ |
| 1781 | if (!of_kset) | 1793 | if (!of_kset) |
| 1782 | return; | 1794 | return; |
| @@ -1847,6 +1859,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
| 1847 | { | 1859 | { |
| 1848 | struct property *pp; | 1860 | struct property *pp; |
| 1849 | 1861 | ||
| 1862 | of_aliases = of_find_node_by_path("/aliases"); | ||
| 1850 | of_chosen = of_find_node_by_path("/chosen"); | 1863 | of_chosen = of_find_node_by_path("/chosen"); |
| 1851 | if (of_chosen == NULL) | 1864 | if (of_chosen == NULL) |
| 1852 | of_chosen = of_find_node_by_path("/chosen@0"); | 1865 | of_chosen = of_find_node_by_path("/chosen@0"); |
| @@ -1862,7 +1875,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
| 1862 | of_stdout = of_find_node_by_path(name); | 1875 | of_stdout = of_find_node_by_path(name); |
| 1863 | } | 1876 | } |
| 1864 | 1877 | ||
| 1865 | of_aliases = of_find_node_by_path("/aliases"); | ||
| 1866 | if (!of_aliases) | 1878 | if (!of_aliases) |
| 1867 | return; | 1879 | return; |
| 1868 | 1880 | ||
| @@ -1986,7 +1998,7 @@ bool of_console_check(struct device_node *dn, char *name, int index) | |||
| 1986 | { | 1998 | { |
| 1987 | if (!dn || dn != of_stdout || console_set_on_cmdline) | 1999 | if (!dn || dn != of_stdout || console_set_on_cmdline) |
| 1988 | return false; | 2000 | return false; |
| 1989 | return add_preferred_console(name, index, NULL); | 2001 | return !add_preferred_console(name, index, NULL); |
| 1990 | } | 2002 | } |
| 1991 | EXPORT_SYMBOL_GPL(of_console_check); | 2003 | EXPORT_SYMBOL_GPL(of_console_check); |
| 1992 | 2004 | ||
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 54fecc49a1fe..f297891d8529 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c | |||
| @@ -45,6 +45,9 @@ void __of_detach_node_sysfs(struct device_node *np) | |||
| 45 | { | 45 | { |
| 46 | struct property *pp; | 46 | struct property *pp; |
| 47 | 47 | ||
| 48 | if (!IS_ENABLED(CONFIG_SYSFS)) | ||
| 49 | return; | ||
| 50 | |||
| 48 | BUG_ON(!of_node_is_initialized(np)); | 51 | BUG_ON(!of_node_is_initialized(np)); |
| 49 | if (!of_kset) | 52 | if (!of_kset) |
| 50 | return; | 53 | return; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 79cb8313c7d8..d1ffca8b34ea 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -928,7 +928,11 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, | |||
| 928 | void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) | 928 | void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) |
| 929 | { | 929 | { |
| 930 | const u64 phys_offset = __pa(PAGE_OFFSET); | 930 | const u64 phys_offset = __pa(PAGE_OFFSET); |
| 931 | base &= PAGE_MASK; | 931 | |
| 932 | if (!PAGE_ALIGNED(base)) { | ||
| 933 | size -= PAGE_SIZE - (base & ~PAGE_MASK); | ||
| 934 | base = PAGE_ALIGN(base); | ||
| 935 | } | ||
| 932 | size &= PAGE_MASK; | 936 | size &= PAGE_MASK; |
| 933 | 937 | ||
| 934 | if (base > MAX_PHYS_ADDR) { | 938 | if (base > MAX_PHYS_ADDR) { |
| @@ -937,10 +941,10 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) | |||
| 937 | return; | 941 | return; |
| 938 | } | 942 | } |
| 939 | 943 | ||
| 940 | if (base + size > MAX_PHYS_ADDR) { | 944 | if (base + size - 1 > MAX_PHYS_ADDR) { |
| 941 | pr_warning("Ignoring memory range 0x%lx - 0x%llx\n", | 945 | pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", |
| 942 | ULONG_MAX, base + size); | 946 | ((u64)MAX_PHYS_ADDR) + 1, base + size); |
| 943 | size = MAX_PHYS_ADDR - base; | 947 | size = MAX_PHYS_ADDR - base + 1; |
| 944 | } | 948 | } |
| 945 | 949 | ||
| 946 | if (base + size < phys_offset) { | 950 | if (base + size < phys_offset) { |
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 0f54ab6260df..3651c3871d5b 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c | |||
| @@ -278,7 +278,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun | |||
| 278 | { | 278 | { |
| 279 | struct hardware_path hwpath; | 279 | struct hardware_path hwpath; |
| 280 | unsigned short i; | 280 | unsigned short i; |
| 281 | char in[count+1], *temp; | 281 | char in[64], *temp; |
| 282 | struct device *dev; | 282 | struct device *dev; |
| 283 | int ret; | 283 | int ret; |
| 284 | 284 | ||
| @@ -286,8 +286,9 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun | |||
| 286 | return -EINVAL; | 286 | return -EINVAL; |
| 287 | 287 | ||
| 288 | /* We'll use a local copy of buf */ | 288 | /* We'll use a local copy of buf */ |
| 289 | memset(in, 0, count+1); | 289 | count = min_t(size_t, count, sizeof(in)-1); |
| 290 | strncpy(in, buf, count); | 290 | strncpy(in, buf, count); |
| 291 | in[count] = '\0'; | ||
| 291 | 292 | ||
| 292 | /* Let's clean up the target. 0xff is a blank pattern */ | 293 | /* Let's clean up the target. 0xff is a blank pattern */ |
| 293 | memset(&hwpath, 0xff, sizeof(hwpath)); | 294 | memset(&hwpath, 0xff, sizeof(hwpath)); |
| @@ -393,14 +394,15 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count | |||
| 393 | { | 394 | { |
| 394 | unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */ | 395 | unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */ |
| 395 | unsigned short i; | 396 | unsigned short i; |
| 396 | char in[count+1], *temp; | 397 | char in[64], *temp; |
| 397 | 398 | ||
| 398 | if (!entry || !buf || !count) | 399 | if (!entry || !buf || !count) |
| 399 | return -EINVAL; | 400 | return -EINVAL; |
| 400 | 401 | ||
| 401 | /* We'll use a local copy of buf */ | 402 | /* We'll use a local copy of buf */ |
| 402 | memset(in, 0, count+1); | 403 | count = min_t(size_t, count, sizeof(in)-1); |
| 403 | strncpy(in, buf, count); | 404 | strncpy(in, buf, count); |
| 405 | in[count] = '\0'; | ||
| 404 | 406 | ||
| 405 | /* Let's clean up the target. 0 is a blank pattern */ | 407 | /* Let's clean up the target. 0 is a blank pattern */ |
| 406 | memset(&layers, 0, sizeof(layers)); | 408 | memset(&layers, 0, sizeof(layers)); |
| @@ -755,7 +757,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, | |||
| 755 | { | 757 | { |
| 756 | struct pdcspath_entry *pathentry; | 758 | struct pdcspath_entry *pathentry; |
| 757 | unsigned char flags; | 759 | unsigned char flags; |
| 758 | char in[count+1], *temp; | 760 | char in[8], *temp; |
| 759 | char c; | 761 | char c; |
| 760 | 762 | ||
| 761 | if (!capable(CAP_SYS_ADMIN)) | 763 | if (!capable(CAP_SYS_ADMIN)) |
| @@ -765,8 +767,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, | |||
| 765 | return -EINVAL; | 767 | return -EINVAL; |
| 766 | 768 | ||
| 767 | /* We'll use a local copy of buf */ | 769 | /* We'll use a local copy of buf */ |
| 768 | memset(in, 0, count+1); | 770 | count = min_t(size_t, count, sizeof(in)-1); |
| 769 | strncpy(in, buf, count); | 771 | strncpy(in, buf, count); |
| 772 | in[count] = '\0'; | ||
| 770 | 773 | ||
| 771 | /* Current flags are stored in primary boot path entry */ | 774 | /* Current flags are stored in primary boot path entry */ |
| 772 | pathentry = &pdcspath_entry_primary; | 775 | pathentry = &pdcspath_entry_primary; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 5e01ae39ec46..2a412fa3b338 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -160,7 +160,7 @@ static void pcie_wait_cmd(struct controller *ctrl) | |||
| 160 | ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) | 160 | ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) |
| 161 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); | 161 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); |
| 162 | else | 162 | else |
| 163 | rc = pcie_poll_cmd(ctrl, timeout); | 163 | rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout)); |
| 164 | 164 | ||
| 165 | /* | 165 | /* |
| 166 | * Controllers with errata like Intel CF118 don't generate | 166 | * Controllers with errata like Intel CF118 don't generate |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e3cf8a2e6292..4170113cde61 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -775,7 +775,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
| 775 | /* Check if setup is sensible at all */ | 775 | /* Check if setup is sensible at all */ |
| 776 | if (!pass && | 776 | if (!pass && |
| 777 | (primary != bus->number || secondary <= bus->number || | 777 | (primary != bus->number || secondary <= bus->number || |
| 778 | secondary > subordinate || subordinate > bus->busn_res.end)) { | 778 | secondary > subordinate)) { |
| 779 | dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", | 779 | dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", |
| 780 | secondary, subordinate); | 780 | secondary, subordinate); |
| 781 | broken = 1; | 781 | broken = 1; |
| @@ -838,23 +838,18 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
| 838 | goto out; | 838 | goto out; |
| 839 | } | 839 | } |
| 840 | 840 | ||
| 841 | if (max >= bus->busn_res.end) { | ||
| 842 | dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n", | ||
| 843 | max, &bus->busn_res); | ||
| 844 | goto out; | ||
| 845 | } | ||
| 846 | |||
| 847 | /* Clear errors */ | 841 | /* Clear errors */ |
| 848 | pci_write_config_word(dev, PCI_STATUS, 0xffff); | 842 | pci_write_config_word(dev, PCI_STATUS, 0xffff); |
| 849 | 843 | ||
| 850 | /* The bus will already exist if we are rescanning */ | 844 | /* Prevent assigning a bus number that already exists. |
| 845 | * This can happen when a bridge is hot-plugged, so in | ||
| 846 | * this case we only re-scan this bus. */ | ||
| 851 | child = pci_find_bus(pci_domain_nr(bus), max+1); | 847 | child = pci_find_bus(pci_domain_nr(bus), max+1); |
| 852 | if (!child) { | 848 | if (!child) { |
| 853 | child = pci_add_new_bus(bus, dev, max+1); | 849 | child = pci_add_new_bus(bus, dev, max+1); |
| 854 | if (!child) | 850 | if (!child) |
| 855 | goto out; | 851 | goto out; |
| 856 | pci_bus_insert_busn_res(child, max+1, | 852 | pci_bus_insert_busn_res(child, max+1, 0xff); |
| 857 | bus->busn_res.end); | ||
| 858 | } | 853 | } |
| 859 | max++; | 854 | max++; |
| 860 | buses = (buses & 0xff000000) | 855 | buses = (buses & 0xff000000) |
| @@ -913,11 +908,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
| 913 | /* | 908 | /* |
| 914 | * Set the subordinate bus number to its real value. | 909 | * Set the subordinate bus number to its real value. |
| 915 | */ | 910 | */ |
| 916 | if (max > bus->busn_res.end) { | ||
| 917 | dev_warn(&dev->dev, "max busn %02x is outside %pR\n", | ||
| 918 | max, &bus->busn_res); | ||
| 919 | max = bus->busn_res.end; | ||
| 920 | } | ||
| 921 | pci_bus_update_busn_res_end(child, max); | 911 | pci_bus_update_busn_res_end(child, max); |
| 922 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); | 912 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); |
| 923 | } | 913 | } |
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index 8225b89de810..c384fec6d173 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c | |||
| @@ -232,6 +232,7 @@ static struct platform_driver efi_rtc_driver = { | |||
| 232 | 232 | ||
| 233 | module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); | 233 | module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); |
| 234 | 234 | ||
| 235 | MODULE_ALIAS("platform:rtc-efi"); | ||
| 235 | MODULE_AUTHOR("dann frazier <dannf@hp.com>"); | 236 | MODULE_AUTHOR("dann frazier <dannf@hp.com>"); |
| 236 | MODULE_LICENSE("GPL"); | 237 | MODULE_LICENSE("GPL"); |
| 237 | MODULE_DESCRIPTION("EFI RTC driver"); | 238 | MODULE_DESCRIPTION("EFI RTC driver"); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 18a3358eb1d4..bd85fb4978e0 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -43,7 +43,7 @@ config SCSI_DMA | |||
| 43 | config SCSI_NETLINK | 43 | config SCSI_NETLINK |
| 44 | bool | 44 | bool |
| 45 | default n | 45 | default n |
| 46 | select NET | 46 | depends on NET |
| 47 | 47 | ||
| 48 | config SCSI_PROC_FS | 48 | config SCSI_PROC_FS |
| 49 | bool "legacy /proc/scsi/ support" | 49 | bool "legacy /proc/scsi/ support" |
| @@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS | |||
| 257 | 257 | ||
| 258 | config SCSI_FC_ATTRS | 258 | config SCSI_FC_ATTRS |
| 259 | tristate "FiberChannel Transport Attributes" | 259 | tristate "FiberChannel Transport Attributes" |
| 260 | depends on SCSI | 260 | depends on SCSI && NET |
| 261 | select SCSI_NETLINK | 261 | select SCSI_NETLINK |
| 262 | help | 262 | help |
| 263 | If you wish to export transport-specific information about | 263 | If you wish to export transport-specific information about |
| @@ -585,28 +585,28 @@ config HYPERV_STORAGE | |||
| 585 | 585 | ||
| 586 | config LIBFC | 586 | config LIBFC |
| 587 | tristate "LibFC module" | 587 | tristate "LibFC module" |
| 588 | select SCSI_FC_ATTRS | 588 | depends on SCSI_FC_ATTRS |
| 589 | select CRC32 | 589 | select CRC32 |
| 590 | ---help--- | 590 | ---help--- |
| 591 | Fibre Channel library module | 591 | Fibre Channel library module |
| 592 | 592 | ||
| 593 | config LIBFCOE | 593 | config LIBFCOE |
| 594 | tristate "LibFCoE module" | 594 | tristate "LibFCoE module" |
| 595 | select LIBFC | 595 | depends on LIBFC |
| 596 | ---help--- | 596 | ---help--- |
| 597 | Library for Fibre Channel over Ethernet module | 597 | Library for Fibre Channel over Ethernet module |
| 598 | 598 | ||
| 599 | config FCOE | 599 | config FCOE |
| 600 | tristate "FCoE module" | 600 | tristate "FCoE module" |
| 601 | depends on PCI | 601 | depends on PCI |
| 602 | select LIBFCOE | 602 | depends on LIBFCOE |
| 603 | ---help--- | 603 | ---help--- |
| 604 | Fibre Channel over Ethernet module | 604 | Fibre Channel over Ethernet module |
| 605 | 605 | ||
| 606 | config FCOE_FNIC | 606 | config FCOE_FNIC |
| 607 | tristate "Cisco FNIC Driver" | 607 | tristate "Cisco FNIC Driver" |
| 608 | depends on PCI && X86 | 608 | depends on PCI && X86 |
| 609 | select LIBFCOE | 609 | depends on LIBFCOE |
| 610 | help | 610 | help |
| 611 | This is support for the Cisco PCI-Express FCoE HBA. | 611 | This is support for the Cisco PCI-Express FCoE HBA. |
| 612 | 612 | ||
| @@ -816,7 +816,7 @@ config SCSI_IBMVSCSI | |||
| 816 | config SCSI_IBMVFC | 816 | config SCSI_IBMVFC |
| 817 | tristate "IBM Virtual FC support" | 817 | tristate "IBM Virtual FC support" |
| 818 | depends on PPC_PSERIES && SCSI | 818 | depends on PPC_PSERIES && SCSI |
| 819 | select SCSI_FC_ATTRS | 819 | depends on SCSI_FC_ATTRS |
| 820 | help | 820 | help |
| 821 | This is the IBM POWER Virtual FC Client | 821 | This is the IBM POWER Virtual FC Client |
| 822 | 822 | ||
| @@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig" | |||
| 1266 | config SCSI_LPFC | 1266 | config SCSI_LPFC |
| 1267 | tristate "Emulex LightPulse Fibre Channel Support" | 1267 | tristate "Emulex LightPulse Fibre Channel Support" |
| 1268 | depends on PCI && SCSI | 1268 | depends on PCI && SCSI |
| 1269 | select SCSI_FC_ATTRS | 1269 | depends on SCSI_FC_ATTRS |
| 1270 | select CRC_T10DIF | 1270 | select CRC_T10DIF |
| 1271 | help | 1271 | help |
| 1272 | This lpfc driver supports the Emulex LightPulse | 1272 | This lpfc driver supports the Emulex LightPulse |
| @@ -1676,7 +1676,7 @@ config SCSI_SUNESP | |||
| 1676 | config ZFCP | 1676 | config ZFCP |
| 1677 | tristate "FCP host bus adapter driver for IBM eServer zSeries" | 1677 | tristate "FCP host bus adapter driver for IBM eServer zSeries" |
| 1678 | depends on S390 && QDIO && SCSI | 1678 | depends on S390 && QDIO && SCSI |
| 1679 | select SCSI_FC_ATTRS | 1679 | depends on SCSI_FC_ATTRS |
| 1680 | help | 1680 | help |
| 1681 | If you want to access SCSI devices attached to your IBM eServer | 1681 | If you want to access SCSI devices attached to your IBM eServer |
| 1682 | zSeries by means of Fibre Channel interfaces say Y. | 1682 | zSeries by means of Fibre Channel interfaces say Y. |
| @@ -1704,7 +1704,7 @@ config SCSI_PM8001 | |||
| 1704 | config SCSI_BFA_FC | 1704 | config SCSI_BFA_FC |
| 1705 | tristate "Brocade BFA Fibre Channel Support" | 1705 | tristate "Brocade BFA Fibre Channel Support" |
| 1706 | depends on PCI && SCSI | 1706 | depends on PCI && SCSI |
| 1707 | select SCSI_FC_ATTRS | 1707 | depends on SCSI_FC_ATTRS |
| 1708 | help | 1708 | help |
| 1709 | This bfa driver supports all Brocade PCIe FC/FCOE host adapters. | 1709 | This bfa driver supports all Brocade PCIe FC/FCOE host adapters. |
| 1710 | 1710 | ||
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index f245d543d7b1..097882882649 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig | |||
| @@ -1,11 +1,12 @@ | |||
| 1 | config SCSI_BNX2X_FCOE | 1 | config SCSI_BNX2X_FCOE |
| 2 | tristate "QLogic NetXtreme II FCoE support" | 2 | tristate "QLogic NetXtreme II FCoE support" |
| 3 | depends on PCI | 3 | depends on PCI |
| 4 | depends on (IPV6 || IPV6=n) | ||
| 5 | depends on LIBFC | ||
| 6 | depends on LIBFCOE | ||
| 4 | select NETDEVICES | 7 | select NETDEVICES |
| 5 | select ETHERNET | 8 | select ETHERNET |
| 6 | select NET_VENDOR_BROADCOM | 9 | select NET_VENDOR_BROADCOM |
| 7 | select LIBFC | ||
| 8 | select LIBFCOE | ||
| 9 | select CNIC | 10 | select CNIC |
| 10 | ---help--- | 11 | ---help--- |
| 11 | This driver supports FCoE offload for the QLogic NetXtreme II | 12 | This driver supports FCoE offload for the QLogic NetXtreme II |
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 44ce54e536e5..ba30ff86d581 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig | |||
| @@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI | |||
| 2 | tristate "QLogic NetXtreme II iSCSI support" | 2 | tristate "QLogic NetXtreme II iSCSI support" |
| 3 | depends on NET | 3 | depends on NET |
| 4 | depends on PCI | 4 | depends on PCI |
| 5 | depends on (IPV6 || IPV6=n) | ||
| 5 | select SCSI_ISCSI_ATTRS | 6 | select SCSI_ISCSI_ATTRS |
| 6 | select NETDEVICES | 7 | select NETDEVICES |
| 7 | select ETHERNET | 8 | select ETHERNET |
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig index 4d03b032aa10..7c7e5085968b 100644 --- a/drivers/scsi/csiostor/Kconfig +++ b/drivers/scsi/csiostor/Kconfig | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | config SCSI_CHELSIO_FCOE | 1 | config SCSI_CHELSIO_FCOE |
| 2 | tristate "Chelsio Communications FCoE support" | 2 | tristate "Chelsio Communications FCoE support" |
| 3 | depends on PCI && SCSI | 3 | depends on PCI && SCSI |
| 4 | select SCSI_FC_ATTRS | 4 | depends on SCSI_FC_ATTRS |
| 5 | select FW_LOADER | 5 | select FW_LOADER |
| 6 | help | 6 | help |
| 7 | This driver supports FCoE Offload functionality over | 7 | This driver supports FCoE Offload functionality over |
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 23d607218ae8..113e6c9826a1 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | config SCSI_QLA_FC | 1 | config SCSI_QLA_FC |
| 2 | tristate "QLogic QLA2XXX Fibre Channel Support" | 2 | tristate "QLogic QLA2XXX Fibre Channel Support" |
| 3 | depends on PCI && SCSI | 3 | depends on PCI && SCSI |
| 4 | select SCSI_FC_ATTRS | 4 | depends on SCSI_FC_ATTRS |
| 5 | select FW_LOADER | 5 | select FW_LOADER |
| 6 | ---help--- | 6 | ---help--- |
| 7 | This qla2xxx driver supports all QLogic Fibre Channel | 7 | This qla2xxx driver supports all QLogic Fibre Channel |
| @@ -31,7 +31,7 @@ config SCSI_QLA_FC | |||
| 31 | config TCM_QLA2XXX | 31 | config TCM_QLA2XXX |
| 32 | tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" | 32 | tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" |
| 33 | depends on SCSI_QLA_FC && TARGET_CORE | 33 | depends on SCSI_QLA_FC && TARGET_CORE |
| 34 | select LIBFC | 34 | depends on LIBFC |
| 35 | select BTREE | 35 | select BTREE |
| 36 | default n | 36 | default n |
| 37 | ---help--- | 37 | ---help--- |
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c index 447458e696a9..7e1f120f2b32 100644 --- a/drivers/soc/qcom/qcom_gsbi.c +++ b/drivers/soc/qcom/qcom_gsbi.c | |||
| @@ -22,44 +22,63 @@ | |||
| 22 | #define GSBI_CTRL_REG 0x0000 | 22 | #define GSBI_CTRL_REG 0x0000 |
| 23 | #define GSBI_PROTOCOL_SHIFT 4 | 23 | #define GSBI_PROTOCOL_SHIFT 4 |
| 24 | 24 | ||
| 25 | struct gsbi_info { | ||
| 26 | struct clk *hclk; | ||
| 27 | u32 mode; | ||
| 28 | u32 crci; | ||
| 29 | }; | ||
| 30 | |||
| 25 | static int gsbi_probe(struct platform_device *pdev) | 31 | static int gsbi_probe(struct platform_device *pdev) |
| 26 | { | 32 | { |
| 27 | struct device_node *node = pdev->dev.of_node; | 33 | struct device_node *node = pdev->dev.of_node; |
| 28 | struct resource *res; | 34 | struct resource *res; |
| 29 | void __iomem *base; | 35 | void __iomem *base; |
| 30 | struct clk *hclk; | 36 | struct gsbi_info *gsbi; |
| 31 | u32 mode, crci = 0; | 37 | |
| 38 | gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL); | ||
| 39 | |||
| 40 | if (!gsbi) | ||
| 41 | return -ENOMEM; | ||
| 32 | 42 | ||
| 33 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 43 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 34 | base = devm_ioremap_resource(&pdev->dev, res); | 44 | base = devm_ioremap_resource(&pdev->dev, res); |
| 35 | if (IS_ERR(base)) | 45 | if (IS_ERR(base)) |
| 36 | return PTR_ERR(base); | 46 | return PTR_ERR(base); |
| 37 | 47 | ||
| 38 | if (of_property_read_u32(node, "qcom,mode", &mode)) { | 48 | if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) { |
| 39 | dev_err(&pdev->dev, "missing mode configuration\n"); | 49 | dev_err(&pdev->dev, "missing mode configuration\n"); |
| 40 | return -EINVAL; | 50 | return -EINVAL; |
| 41 | } | 51 | } |
| 42 | 52 | ||
| 43 | /* not required, so default to 0 if not present */ | 53 | /* not required, so default to 0 if not present */ |
| 44 | of_property_read_u32(node, "qcom,crci", &crci); | 54 | of_property_read_u32(node, "qcom,crci", &gsbi->crci); |
| 45 | 55 | ||
| 46 | dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci); | 56 | dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", |
| 57 | gsbi->mode, gsbi->crci); | ||
| 58 | gsbi->hclk = devm_clk_get(&pdev->dev, "iface"); | ||
| 59 | if (IS_ERR(gsbi->hclk)) | ||
| 60 | return PTR_ERR(gsbi->hclk); | ||
| 47 | 61 | ||
| 48 | hclk = devm_clk_get(&pdev->dev, "iface"); | 62 | clk_prepare_enable(gsbi->hclk); |
| 49 | if (IS_ERR(hclk)) | ||
| 50 | return PTR_ERR(hclk); | ||
| 51 | 63 | ||
| 52 | clk_prepare_enable(hclk); | 64 | writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci, |
| 53 | |||
| 54 | writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci, | ||
| 55 | base + GSBI_CTRL_REG); | 65 | base + GSBI_CTRL_REG); |
| 56 | 66 | ||
| 57 | /* make sure the gsbi control write is not reordered */ | 67 | /* make sure the gsbi control write is not reordered */ |
| 58 | wmb(); | 68 | wmb(); |
| 59 | 69 | ||
| 60 | clk_disable_unprepare(hclk); | 70 | platform_set_drvdata(pdev, gsbi); |
| 71 | |||
| 72 | return of_platform_populate(node, NULL, NULL, &pdev->dev); | ||
| 73 | } | ||
| 74 | |||
| 75 | static int gsbi_remove(struct platform_device *pdev) | ||
| 76 | { | ||
| 77 | struct gsbi_info *gsbi = platform_get_drvdata(pdev); | ||
| 78 | |||
| 79 | clk_disable_unprepare(gsbi->hclk); | ||
| 61 | 80 | ||
| 62 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | 81 | return 0; |
| 63 | } | 82 | } |
| 64 | 83 | ||
| 65 | static const struct of_device_id gsbi_dt_match[] = { | 84 | static const struct of_device_id gsbi_dt_match[] = { |
| @@ -76,6 +95,7 @@ static struct platform_driver gsbi_driver = { | |||
| 76 | .of_match_table = gsbi_dt_match, | 95 | .of_match_table = gsbi_dt_match, |
| 77 | }, | 96 | }, |
| 78 | .probe = gsbi_probe, | 97 | .probe = gsbi_probe, |
| 98 | .remove = gsbi_remove, | ||
| 79 | }; | 99 | }; |
| 80 | 100 | ||
| 81 | module_platform_driver(gsbi_driver); | 101 | module_platform_driver(gsbi_driver); |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 0734fbe5b651..562f686b4cba 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -713,14 +713,14 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
| 713 | list_add(&group->next, &domain->group_list); | 713 | list_add(&group->next, &domain->group_list); |
| 714 | 714 | ||
| 715 | if (!allow_unsafe_interrupts && | 715 | if (!allow_unsafe_interrupts && |
| 716 | !iommu_domain_has_cap(domain->domain, IOMMU_CAP_INTR_REMAP)) { | 716 | !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { |
| 717 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", | 717 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
| 718 | __func__); | 718 | __func__); |
| 719 | ret = -EPERM; | 719 | ret = -EPERM; |
| 720 | goto out_detach; | 720 | goto out_detach; |
| 721 | } | 721 | } |
| 722 | 722 | ||
| 723 | if (iommu_domain_has_cap(domain->domain, IOMMU_CAP_CACHE_COHERENCY)) | 723 | if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) |
| 724 | domain->prot |= IOMMU_CACHE; | 724 | domain->prot |= IOMMU_CACHE; |
| 725 | 725 | ||
| 726 | /* | 726 | /* |
diff --git a/fs/buffer.c b/fs/buffer.c index 8f05111bbb8b..3588a80854b2 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -1022,7 +1022,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
| 1022 | bh = page_buffers(page); | 1022 | bh = page_buffers(page); |
| 1023 | if (bh->b_size == size) { | 1023 | if (bh->b_size == size) { |
| 1024 | end_block = init_page_buffers(page, bdev, | 1024 | end_block = init_page_buffers(page, bdev, |
| 1025 | index << sizebits, size); | 1025 | (sector_t)index << sizebits, |
| 1026 | size); | ||
| 1026 | goto done; | 1027 | goto done; |
| 1027 | } | 1028 | } |
| 1028 | if (!try_to_free_buffers(page)) | 1029 | if (!try_to_free_buffers(page)) |
| @@ -1043,7 +1044,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
| 1043 | */ | 1044 | */ |
| 1044 | spin_lock(&inode->i_mapping->private_lock); | 1045 | spin_lock(&inode->i_mapping->private_lock); |
| 1045 | link_dev_buffers(page, bh); | 1046 | link_dev_buffers(page, bh); |
| 1046 | end_block = init_page_buffers(page, bdev, index << sizebits, size); | 1047 | end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, |
| 1048 | size); | ||
| 1047 | spin_unlock(&inode->i_mapping->private_lock); | 1049 | spin_unlock(&inode->i_mapping->private_lock); |
| 1048 | done: | 1050 | done: |
| 1049 | ret = (block < end_block) ? 1 : -ENXIO; | 1051 | ret = (block < end_block) ? 1 : -ENXIO; |
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c index d749731dc0ee..fbb08e97438d 100644 --- a/fs/cachefiles/bind.c +++ b/fs/cachefiles/bind.c | |||
| @@ -50,18 +50,18 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) | |||
| 50 | cache->brun_percent < 100); | 50 | cache->brun_percent < 100); |
| 51 | 51 | ||
| 52 | if (*args) { | 52 | if (*args) { |
| 53 | pr_err("'bind' command doesn't take an argument"); | 53 | pr_err("'bind' command doesn't take an argument\n"); |
| 54 | return -EINVAL; | 54 | return -EINVAL; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | if (!cache->rootdirname) { | 57 | if (!cache->rootdirname) { |
| 58 | pr_err("No cache directory specified"); | 58 | pr_err("No cache directory specified\n"); |
| 59 | return -EINVAL; | 59 | return -EINVAL; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | /* don't permit already bound caches to be re-bound */ | 62 | /* don't permit already bound caches to be re-bound */ |
| 63 | if (test_bit(CACHEFILES_READY, &cache->flags)) { | 63 | if (test_bit(CACHEFILES_READY, &cache->flags)) { |
| 64 | pr_err("Cache already bound"); | 64 | pr_err("Cache already bound\n"); |
| 65 | return -EBUSY; | 65 | return -EBUSY; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| @@ -248,7 +248,7 @@ error_open_root: | |||
| 248 | kmem_cache_free(cachefiles_object_jar, fsdef); | 248 | kmem_cache_free(cachefiles_object_jar, fsdef); |
| 249 | error_root_object: | 249 | error_root_object: |
| 250 | cachefiles_end_secure(cache, saved_cred); | 250 | cachefiles_end_secure(cache, saved_cred); |
| 251 | pr_err("Failed to register: %d", ret); | 251 | pr_err("Failed to register: %d\n", ret); |
| 252 | return ret; | 252 | return ret; |
| 253 | } | 253 | } |
| 254 | 254 | ||
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index b078d3081d6c..ce1b115dcc28 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c | |||
| @@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_poll(struct file *file, | |||
| 315 | static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, | 315 | static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, |
| 316 | char *args) | 316 | char *args) |
| 317 | { | 317 | { |
| 318 | pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%"); | 318 | pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n"); |
| 319 | 319 | ||
| 320 | return -EINVAL; | 320 | return -EINVAL; |
| 321 | } | 321 | } |
| @@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) | |||
| 475 | _enter(",%s", args); | 475 | _enter(",%s", args); |
| 476 | 476 | ||
| 477 | if (!*args) { | 477 | if (!*args) { |
| 478 | pr_err("Empty directory specified"); | 478 | pr_err("Empty directory specified\n"); |
| 479 | return -EINVAL; | 479 | return -EINVAL; |
| 480 | } | 480 | } |
| 481 | 481 | ||
| 482 | if (cache->rootdirname) { | 482 | if (cache->rootdirname) { |
| 483 | pr_err("Second cache directory specified"); | 483 | pr_err("Second cache directory specified\n"); |
| 484 | return -EEXIST; | 484 | return -EEXIST; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| @@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) | |||
| 503 | _enter(",%s", args); | 503 | _enter(",%s", args); |
| 504 | 504 | ||
| 505 | if (!*args) { | 505 | if (!*args) { |
| 506 | pr_err("Empty security context specified"); | 506 | pr_err("Empty security context specified\n"); |
| 507 | return -EINVAL; | 507 | return -EINVAL; |
| 508 | } | 508 | } |
| 509 | 509 | ||
| 510 | if (cache->secctx) { | 510 | if (cache->secctx) { |
| 511 | pr_err("Second security context specified"); | 511 | pr_err("Second security context specified\n"); |
| 512 | return -EINVAL; | 512 | return -EINVAL; |
| 513 | } | 513 | } |
| 514 | 514 | ||
| @@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args) | |||
| 531 | _enter(",%s", args); | 531 | _enter(",%s", args); |
| 532 | 532 | ||
| 533 | if (!*args) { | 533 | if (!*args) { |
| 534 | pr_err("Empty tag specified"); | 534 | pr_err("Empty tag specified\n"); |
| 535 | return -EINVAL; | 535 | return -EINVAL; |
| 536 | } | 536 | } |
| 537 | 537 | ||
| @@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) | |||
| 562 | goto inval; | 562 | goto inval; |
| 563 | 563 | ||
| 564 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { | 564 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { |
| 565 | pr_err("cull applied to unready cache"); | 565 | pr_err("cull applied to unready cache\n"); |
| 566 | return -EIO; | 566 | return -EIO; |
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { | 569 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { |
| 570 | pr_err("cull applied to dead cache"); | 570 | pr_err("cull applied to dead cache\n"); |
| 571 | return -EIO; | 571 | return -EIO; |
| 572 | } | 572 | } |
| 573 | 573 | ||
| @@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) | |||
| 587 | 587 | ||
| 588 | notdir: | 588 | notdir: |
| 589 | path_put(&path); | 589 | path_put(&path); |
| 590 | pr_err("cull command requires dirfd to be a directory"); | 590 | pr_err("cull command requires dirfd to be a directory\n"); |
| 591 | return -ENOTDIR; | 591 | return -ENOTDIR; |
| 592 | 592 | ||
| 593 | inval: | 593 | inval: |
| 594 | pr_err("cull command requires dirfd and filename"); | 594 | pr_err("cull command requires dirfd and filename\n"); |
| 595 | return -EINVAL; | 595 | return -EINVAL; |
| 596 | } | 596 | } |
| 597 | 597 | ||
| @@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args) | |||
| 614 | return 0; | 614 | return 0; |
| 615 | 615 | ||
| 616 | inval: | 616 | inval: |
| 617 | pr_err("debug command requires mask"); | 617 | pr_err("debug command requires mask\n"); |
| 618 | return -EINVAL; | 618 | return -EINVAL; |
| 619 | } | 619 | } |
| 620 | 620 | ||
| @@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) | |||
| 634 | goto inval; | 634 | goto inval; |
| 635 | 635 | ||
| 636 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { | 636 | if (!test_bit(CACHEFILES_READY, &cache->flags)) { |
| 637 | pr_err("inuse applied to unready cache"); | 637 | pr_err("inuse applied to unready cache\n"); |
| 638 | return -EIO; | 638 | return -EIO; |
| 639 | } | 639 | } |
| 640 | 640 | ||
| 641 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { | 641 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { |
| 642 | pr_err("inuse applied to dead cache"); | 642 | pr_err("inuse applied to dead cache\n"); |
| 643 | return -EIO; | 643 | return -EIO; |
| 644 | } | 644 | } |
| 645 | 645 | ||
| @@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) | |||
| 659 | 659 | ||
| 660 | notdir: | 660 | notdir: |
| 661 | path_put(&path); | 661 | path_put(&path); |
| 662 | pr_err("inuse command requires dirfd to be a directory"); | 662 | pr_err("inuse command requires dirfd to be a directory\n"); |
| 663 | return -ENOTDIR; | 663 | return -ENOTDIR; |
| 664 | 664 | ||
| 665 | inval: | 665 | inval: |
| 666 | pr_err("inuse command requires dirfd and filename"); | 666 | pr_err("inuse command requires dirfd and filename\n"); |
| 667 | return -EINVAL; | 667 | return -EINVAL; |
| 668 | } | 668 | } |
| 669 | 669 | ||
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 3d50998abf57..8c52472d2efa 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h | |||
| @@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, | |||
| 255 | 255 | ||
| 256 | #define cachefiles_io_error(___cache, FMT, ...) \ | 256 | #define cachefiles_io_error(___cache, FMT, ...) \ |
| 257 | do { \ | 257 | do { \ |
| 258 | pr_err("I/O Error: " FMT, ##__VA_ARGS__); \ | 258 | pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \ |
| 259 | fscache_io_error(&(___cache)->cache); \ | 259 | fscache_io_error(&(___cache)->cache); \ |
| 260 | set_bit(CACHEFILES_DEAD, &(___cache)->flags); \ | 260 | set_bit(CACHEFILES_DEAD, &(___cache)->flags); \ |
| 261 | } while (0) | 261 | } while (0) |
diff --git a/fs/cachefiles/main.c b/fs/cachefiles/main.c index 180edfb45f66..711f13d8c2de 100644 --- a/fs/cachefiles/main.c +++ b/fs/cachefiles/main.c | |||
| @@ -84,7 +84,7 @@ error_proc: | |||
| 84 | error_object_jar: | 84 | error_object_jar: |
| 85 | misc_deregister(&cachefiles_dev); | 85 | misc_deregister(&cachefiles_dev); |
| 86 | error_dev: | 86 | error_dev: |
| 87 | pr_err("failed to register: %d", ret); | 87 | pr_err("failed to register: %d\n", ret); |
| 88 | return ret; | 88 | return ret; |
| 89 | } | 89 | } |
| 90 | 90 | ||
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 5bf2b41e66d3..dad7d9542a24 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
| @@ -543,7 +543,7 @@ lookup_again: | |||
| 543 | next, next->d_inode, next->d_inode->i_ino); | 543 | next, next->d_inode, next->d_inode->i_ino); |
| 544 | 544 | ||
| 545 | } else if (!S_ISDIR(next->d_inode->i_mode)) { | 545 | } else if (!S_ISDIR(next->d_inode->i_mode)) { |
| 546 | pr_err("inode %lu is not a directory", | 546 | pr_err("inode %lu is not a directory\n", |
| 547 | next->d_inode->i_ino); | 547 | next->d_inode->i_ino); |
| 548 | ret = -ENOBUFS; | 548 | ret = -ENOBUFS; |
| 549 | goto error; | 549 | goto error; |
| @@ -574,7 +574,7 @@ lookup_again: | |||
| 574 | } else if (!S_ISDIR(next->d_inode->i_mode) && | 574 | } else if (!S_ISDIR(next->d_inode->i_mode) && |
| 575 | !S_ISREG(next->d_inode->i_mode) | 575 | !S_ISREG(next->d_inode->i_mode) |
| 576 | ) { | 576 | ) { |
| 577 | pr_err("inode %lu is not a file or directory", | 577 | pr_err("inode %lu is not a file or directory\n", |
| 578 | next->d_inode->i_ino); | 578 | next->d_inode->i_ino); |
| 579 | ret = -ENOBUFS; | 579 | ret = -ENOBUFS; |
| 580 | goto error; | 580 | goto error; |
| @@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, | |||
| 768 | ASSERT(subdir->d_inode); | 768 | ASSERT(subdir->d_inode); |
| 769 | 769 | ||
| 770 | if (!S_ISDIR(subdir->d_inode->i_mode)) { | 770 | if (!S_ISDIR(subdir->d_inode->i_mode)) { |
| 771 | pr_err("%s is not a directory", dirname); | 771 | pr_err("%s is not a directory\n", dirname); |
| 772 | ret = -EIO; | 772 | ret = -EIO; |
| 773 | goto check_error; | 773 | goto check_error; |
| 774 | } | 774 | } |
| @@ -779,7 +779,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, | |||
| 779 | !subdir->d_inode->i_op->lookup || | 779 | !subdir->d_inode->i_op->lookup || |
| 780 | !subdir->d_inode->i_op->mkdir || | 780 | !subdir->d_inode->i_op->mkdir || |
| 781 | !subdir->d_inode->i_op->create || | 781 | !subdir->d_inode->i_op->create || |
| 782 | !subdir->d_inode->i_op->rename || | 782 | (!subdir->d_inode->i_op->rename && |
| 783 | !subdir->d_inode->i_op->rename2) || | ||
| 783 | !subdir->d_inode->i_op->rmdir || | 784 | !subdir->d_inode->i_op->rmdir || |
| 784 | !subdir->d_inode->i_op->unlink) | 785 | !subdir->d_inode->i_op->unlink) |
| 785 | goto check_error; | 786 | goto check_error; |
| @@ -795,13 +796,13 @@ check_error: | |||
| 795 | mkdir_error: | 796 | mkdir_error: |
| 796 | mutex_unlock(&dir->d_inode->i_mutex); | 797 | mutex_unlock(&dir->d_inode->i_mutex); |
| 797 | dput(subdir); | 798 | dput(subdir); |
| 798 | pr_err("mkdir %s failed with error %d", dirname, ret); | 799 | pr_err("mkdir %s failed with error %d\n", dirname, ret); |
| 799 | return ERR_PTR(ret); | 800 | return ERR_PTR(ret); |
| 800 | 801 | ||
| 801 | lookup_error: | 802 | lookup_error: |
| 802 | mutex_unlock(&dir->d_inode->i_mutex); | 803 | mutex_unlock(&dir->d_inode->i_mutex); |
| 803 | ret = PTR_ERR(subdir); | 804 | ret = PTR_ERR(subdir); |
| 804 | pr_err("Lookup %s failed with error %d", dirname, ret); | 805 | pr_err("Lookup %s failed with error %d\n", dirname, ret); |
| 805 | return ERR_PTR(ret); | 806 | return ERR_PTR(ret); |
| 806 | 807 | ||
| 807 | nomem_d_alloc: | 808 | nomem_d_alloc: |
| @@ -891,7 +892,7 @@ lookup_error: | |||
| 891 | if (ret == -EIO) { | 892 | if (ret == -EIO) { |
| 892 | cachefiles_io_error(cache, "Lookup failed"); | 893 | cachefiles_io_error(cache, "Lookup failed"); |
| 893 | } else if (ret != -ENOMEM) { | 894 | } else if (ret != -ENOMEM) { |
| 894 | pr_err("Internal error: %d", ret); | 895 | pr_err("Internal error: %d\n", ret); |
| 895 | ret = -EIO; | 896 | ret = -EIO; |
| 896 | } | 897 | } |
| 897 | 898 | ||
| @@ -950,7 +951,7 @@ error: | |||
| 950 | } | 951 | } |
| 951 | 952 | ||
| 952 | if (ret != -ENOMEM) { | 953 | if (ret != -ENOMEM) { |
| 953 | pr_err("Internal error: %d", ret); | 954 | pr_err("Internal error: %d\n", ret); |
| 954 | ret = -EIO; | 955 | ret = -EIO; |
| 955 | } | 956 | } |
| 956 | 957 | ||
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 4b1fb5ca65b8..25e745b8eb1b 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
| @@ -151,7 +151,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op) | |||
| 151 | struct cachefiles_one_read *monitor; | 151 | struct cachefiles_one_read *monitor; |
| 152 | struct cachefiles_object *object; | 152 | struct cachefiles_object *object; |
| 153 | struct fscache_retrieval *op; | 153 | struct fscache_retrieval *op; |
| 154 | struct pagevec pagevec; | ||
| 155 | int error, max; | 154 | int error, max; |
| 156 | 155 | ||
| 157 | op = container_of(_op, struct fscache_retrieval, op); | 156 | op = container_of(_op, struct fscache_retrieval, op); |
| @@ -160,8 +159,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op) | |||
| 160 | 159 | ||
| 161 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); | 160 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); |
| 162 | 161 | ||
| 163 | pagevec_init(&pagevec, 0); | ||
| 164 | |||
| 165 | max = 8; | 162 | max = 8; |
| 166 | spin_lock_irq(&object->work_lock); | 163 | spin_lock_irq(&object->work_lock); |
| 167 | 164 | ||
| @@ -396,7 +393,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |||
| 396 | { | 393 | { |
| 397 | struct cachefiles_object *object; | 394 | struct cachefiles_object *object; |
| 398 | struct cachefiles_cache *cache; | 395 | struct cachefiles_cache *cache; |
| 399 | struct pagevec pagevec; | ||
| 400 | struct inode *inode; | 396 | struct inode *inode; |
| 401 | sector_t block0, block; | 397 | sector_t block0, block; |
| 402 | unsigned shift; | 398 | unsigned shift; |
| @@ -427,8 +423,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |||
| 427 | op->op.flags |= FSCACHE_OP_ASYNC; | 423 | op->op.flags |= FSCACHE_OP_ASYNC; |
| 428 | op->op.processor = cachefiles_read_copier; | 424 | op->op.processor = cachefiles_read_copier; |
| 429 | 425 | ||
| 430 | pagevec_init(&pagevec, 0); | ||
| 431 | |||
| 432 | /* we assume the absence or presence of the first block is a good | 426 | /* we assume the absence or presence of the first block is a good |
| 433 | * enough indication for the page as a whole | 427 | * enough indication for the page as a whole |
| 434 | * - TODO: don't use bmap() for this as it is _not_ actually good | 428 | * - TODO: don't use bmap() for this as it is _not_ actually good |
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 1ad51ffbb275..acbc1f094fb1 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c | |||
| @@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) | |||
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | if (ret != -EEXIST) { | 53 | if (ret != -EEXIST) { |
| 54 | pr_err("Can't set xattr on %*.*s [%lu] (err %d)", | 54 | pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n", |
| 55 | dentry->d_name.len, dentry->d_name.len, | 55 | dentry->d_name.len, dentry->d_name.len, |
| 56 | dentry->d_name.name, dentry->d_inode->i_ino, | 56 | dentry->d_name.name, dentry->d_inode->i_ino, |
| 57 | -ret); | 57 | -ret); |
| @@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) | |||
| 64 | if (ret == -ERANGE) | 64 | if (ret == -ERANGE) |
| 65 | goto bad_type_length; | 65 | goto bad_type_length; |
| 66 | 66 | ||
| 67 | pr_err("Can't read xattr on %*.*s [%lu] (err %d)", | 67 | pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n", |
| 68 | dentry->d_name.len, dentry->d_name.len, | 68 | dentry->d_name.len, dentry->d_name.len, |
| 69 | dentry->d_name.name, dentry->d_inode->i_ino, | 69 | dentry->d_name.name, dentry->d_inode->i_ino, |
| 70 | -ret); | 70 | -ret); |
| @@ -85,14 +85,14 @@ error: | |||
| 85 | return ret; | 85 | return ret; |
| 86 | 86 | ||
| 87 | bad_type_length: | 87 | bad_type_length: |
| 88 | pr_err("Cache object %lu type xattr length incorrect", | 88 | pr_err("Cache object %lu type xattr length incorrect\n", |
| 89 | dentry->d_inode->i_ino); | 89 | dentry->d_inode->i_ino); |
| 90 | ret = -EIO; | 90 | ret = -EIO; |
| 91 | goto error; | 91 | goto error; |
| 92 | 92 | ||
| 93 | bad_type: | 93 | bad_type: |
| 94 | xtype[2] = 0; | 94 | xtype[2] = 0; |
| 95 | pr_err("Cache object %*.*s [%lu] type %s not %s", | 95 | pr_err("Cache object %*.*s [%lu] type %s not %s\n", |
| 96 | dentry->d_name.len, dentry->d_name.len, | 96 | dentry->d_name.len, dentry->d_name.len, |
| 97 | dentry->d_name.name, dentry->d_inode->i_ino, | 97 | dentry->d_name.name, dentry->d_inode->i_ino, |
| 98 | xtype, type); | 98 | xtype, type); |
| @@ -293,7 +293,7 @@ error: | |||
| 293 | return ret; | 293 | return ret; |
| 294 | 294 | ||
| 295 | bad_type_length: | 295 | bad_type_length: |
| 296 | pr_err("Cache object %lu xattr length incorrect", | 296 | pr_err("Cache object %lu xattr length incorrect\n", |
| 297 | dentry->d_inode->i_ino); | 297 | dentry->d_inode->i_ino); |
| 298 | ret = -EIO; | 298 | ret = -EIO; |
| 299 | goto error; | 299 | goto error; |
diff --git a/fs/dcache.c b/fs/dcache.c index 7a5b51440afa..cb25a1a5e307 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -2372,7 +2372,8 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) | |||
| 2372 | } | 2372 | } |
| 2373 | EXPORT_SYMBOL(dentry_update_name_case); | 2373 | EXPORT_SYMBOL(dentry_update_name_case); |
| 2374 | 2374 | ||
| 2375 | static void switch_names(struct dentry *dentry, struct dentry *target) | 2375 | static void switch_names(struct dentry *dentry, struct dentry *target, |
| 2376 | bool exchange) | ||
| 2376 | { | 2377 | { |
| 2377 | if (dname_external(target)) { | 2378 | if (dname_external(target)) { |
| 2378 | if (dname_external(dentry)) { | 2379 | if (dname_external(dentry)) { |
| @@ -2406,13 +2407,19 @@ static void switch_names(struct dentry *dentry, struct dentry *target) | |||
| 2406 | */ | 2407 | */ |
| 2407 | unsigned int i; | 2408 | unsigned int i; |
| 2408 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); | 2409 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); |
| 2410 | if (!exchange) { | ||
| 2411 | memcpy(dentry->d_iname, target->d_name.name, | ||
| 2412 | target->d_name.len + 1); | ||
| 2413 | dentry->d_name.hash_len = target->d_name.hash_len; | ||
| 2414 | return; | ||
| 2415 | } | ||
| 2409 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { | 2416 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { |
| 2410 | swap(((long *) &dentry->d_iname)[i], | 2417 | swap(((long *) &dentry->d_iname)[i], |
| 2411 | ((long *) &target->d_iname)[i]); | 2418 | ((long *) &target->d_iname)[i]); |
| 2412 | } | 2419 | } |
| 2413 | } | 2420 | } |
| 2414 | } | 2421 | } |
| 2415 | swap(dentry->d_name.len, target->d_name.len); | 2422 | swap(dentry->d_name.hash_len, target->d_name.hash_len); |
| 2416 | } | 2423 | } |
| 2417 | 2424 | ||
| 2418 | static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) | 2425 | static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) |
| @@ -2442,25 +2449,29 @@ static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) | |||
| 2442 | } | 2449 | } |
| 2443 | } | 2450 | } |
| 2444 | 2451 | ||
| 2445 | static void dentry_unlock_parents_for_move(struct dentry *dentry, | 2452 | static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) |
| 2446 | struct dentry *target) | ||
| 2447 | { | 2453 | { |
| 2448 | if (target->d_parent != dentry->d_parent) | 2454 | if (target->d_parent != dentry->d_parent) |
| 2449 | spin_unlock(&dentry->d_parent->d_lock); | 2455 | spin_unlock(&dentry->d_parent->d_lock); |
| 2450 | if (target->d_parent != target) | 2456 | if (target->d_parent != target) |
| 2451 | spin_unlock(&target->d_parent->d_lock); | 2457 | spin_unlock(&target->d_parent->d_lock); |
| 2458 | spin_unlock(&target->d_lock); | ||
| 2459 | spin_unlock(&dentry->d_lock); | ||
| 2452 | } | 2460 | } |
| 2453 | 2461 | ||
| 2454 | /* | 2462 | /* |
| 2455 | * When switching names, the actual string doesn't strictly have to | 2463 | * When switching names, the actual string doesn't strictly have to |
| 2456 | * be preserved in the target - because we're dropping the target | 2464 | * be preserved in the target - because we're dropping the target |
| 2457 | * anyway. As such, we can just do a simple memcpy() to copy over | 2465 | * anyway. As such, we can just do a simple memcpy() to copy over |
| 2458 | * the new name before we switch. | 2466 | * the new name before we switch, unless we are going to rehash |
| 2459 | * | 2467 | * it. Note that if we *do* unhash the target, we are not allowed |
| 2460 | * Note that we have to be a lot more careful about getting the hash | 2468 | * to rehash it without giving it a new name/hash key - whether |
| 2461 | * switched - we have to switch the hash value properly even if it | 2469 | * we swap or overwrite the names here, resulting name won't match |
| 2462 | * then no longer matches the actual (corrupted) string of the target. | 2470 | * the reality in filesystem; it's only there for d_path() purposes. |
| 2463 | * The hash value has to match the hash queue that the dentry is on.. | 2471 | * Note that all of this is happening under rename_lock, so the |
| 2472 | * any hash lookup seeing it in the middle of manipulations will | ||
| 2473 | * be discarded anyway. So we do not care what happens to the hash | ||
| 2474 | * key in that case. | ||
| 2464 | */ | 2475 | */ |
| 2465 | /* | 2476 | /* |
| 2466 | * __d_move - move a dentry | 2477 | * __d_move - move a dentry |
| @@ -2506,36 +2517,30 @@ static void __d_move(struct dentry *dentry, struct dentry *target, | |||
| 2506 | d_hash(dentry->d_parent, dentry->d_name.hash)); | 2517 | d_hash(dentry->d_parent, dentry->d_name.hash)); |
| 2507 | } | 2518 | } |
| 2508 | 2519 | ||
| 2509 | list_del(&dentry->d_u.d_child); | ||
| 2510 | list_del(&target->d_u.d_child); | ||
| 2511 | |||
| 2512 | /* Switch the names.. */ | 2520 | /* Switch the names.. */ |
| 2513 | switch_names(dentry, target); | 2521 | switch_names(dentry, target, exchange); |
| 2514 | swap(dentry->d_name.hash, target->d_name.hash); | ||
| 2515 | 2522 | ||
| 2516 | /* ... and switch the parents */ | 2523 | /* ... and switch them in the tree */ |
| 2517 | if (IS_ROOT(dentry)) { | 2524 | if (IS_ROOT(dentry)) { |
| 2525 | /* splicing a tree */ | ||
| 2518 | dentry->d_parent = target->d_parent; | 2526 | dentry->d_parent = target->d_parent; |
| 2519 | target->d_parent = target; | 2527 | target->d_parent = target; |
| 2520 | INIT_LIST_HEAD(&target->d_u.d_child); | 2528 | list_del_init(&target->d_u.d_child); |
| 2529 | list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | ||
| 2521 | } else { | 2530 | } else { |
| 2531 | /* swapping two dentries */ | ||
| 2522 | swap(dentry->d_parent, target->d_parent); | 2532 | swap(dentry->d_parent, target->d_parent); |
| 2523 | 2533 | list_move(&target->d_u.d_child, &target->d_parent->d_subdirs); | |
| 2524 | /* And add them back to the (new) parent lists */ | 2534 | list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); |
| 2525 | list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); | 2535 | if (exchange) |
| 2536 | fsnotify_d_move(target); | ||
| 2537 | fsnotify_d_move(dentry); | ||
| 2526 | } | 2538 | } |
| 2527 | 2539 | ||
| 2528 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | ||
| 2529 | |||
| 2530 | write_seqcount_end(&target->d_seq); | 2540 | write_seqcount_end(&target->d_seq); |
| 2531 | write_seqcount_end(&dentry->d_seq); | 2541 | write_seqcount_end(&dentry->d_seq); |
| 2532 | 2542 | ||
| 2533 | dentry_unlock_parents_for_move(dentry, target); | 2543 | dentry_unlock_for_move(dentry, target); |
| 2534 | if (exchange) | ||
| 2535 | fsnotify_d_move(target); | ||
| 2536 | spin_unlock(&target->d_lock); | ||
| 2537 | fsnotify_d_move(dentry); | ||
| 2538 | spin_unlock(&dentry->d_lock); | ||
| 2539 | } | 2544 | } |
| 2540 | 2545 | ||
| 2541 | /* | 2546 | /* |
| @@ -2633,45 +2638,6 @@ out_err: | |||
| 2633 | return ret; | 2638 | return ret; |
| 2634 | } | 2639 | } |
| 2635 | 2640 | ||
| 2636 | /* | ||
| 2637 | * Prepare an anonymous dentry for life in the superblock's dentry tree as a | ||
| 2638 | * named dentry in place of the dentry to be replaced. | ||
| 2639 | * returns with anon->d_lock held! | ||
| 2640 | */ | ||
| 2641 | static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) | ||
| 2642 | { | ||
| 2643 | struct dentry *dparent; | ||
| 2644 | |||
| 2645 | dentry_lock_for_move(anon, dentry); | ||
| 2646 | |||
| 2647 | write_seqcount_begin(&dentry->d_seq); | ||
| 2648 | write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED); | ||
| 2649 | |||
| 2650 | dparent = dentry->d_parent; | ||
| 2651 | |||
| 2652 | switch_names(dentry, anon); | ||
| 2653 | swap(dentry->d_name.hash, anon->d_name.hash); | ||
| 2654 | |||
| 2655 | dentry->d_parent = dentry; | ||
| 2656 | list_del_init(&dentry->d_u.d_child); | ||
| 2657 | anon->d_parent = dparent; | ||
| 2658 | if (likely(!d_unhashed(anon))) { | ||
| 2659 | hlist_bl_lock(&anon->d_sb->s_anon); | ||
| 2660 | __hlist_bl_del(&anon->d_hash); | ||
| 2661 | anon->d_hash.pprev = NULL; | ||
| 2662 | hlist_bl_unlock(&anon->d_sb->s_anon); | ||
| 2663 | } | ||
| 2664 | list_move(&anon->d_u.d_child, &dparent->d_subdirs); | ||
| 2665 | |||
| 2666 | write_seqcount_end(&dentry->d_seq); | ||
| 2667 | write_seqcount_end(&anon->d_seq); | ||
| 2668 | |||
| 2669 | dentry_unlock_parents_for_move(anon, dentry); | ||
| 2670 | spin_unlock(&dentry->d_lock); | ||
| 2671 | |||
| 2672 | /* anon->d_lock still locked, returns locked */ | ||
| 2673 | } | ||
| 2674 | |||
| 2675 | /** | 2641 | /** |
| 2676 | * d_splice_alias - splice a disconnected dentry into the tree if one exists | 2642 | * d_splice_alias - splice a disconnected dentry into the tree if one exists |
| 2677 | * @inode: the inode which may have a disconnected dentry | 2643 | * @inode: the inode which may have a disconnected dentry |
| @@ -2717,10 +2683,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |||
| 2717 | return ERR_PTR(-EIO); | 2683 | return ERR_PTR(-EIO); |
| 2718 | } | 2684 | } |
| 2719 | write_seqlock(&rename_lock); | 2685 | write_seqlock(&rename_lock); |
| 2720 | __d_materialise_dentry(dentry, new); | 2686 | __d_move(new, dentry, false); |
| 2721 | write_sequnlock(&rename_lock); | 2687 | write_sequnlock(&rename_lock); |
| 2722 | _d_rehash(new); | ||
| 2723 | spin_unlock(&new->d_lock); | ||
| 2724 | spin_unlock(&inode->i_lock); | 2688 | spin_unlock(&inode->i_lock); |
| 2725 | security_d_instantiate(new, inode); | 2689 | security_d_instantiate(new, inode); |
| 2726 | iput(inode); | 2690 | iput(inode); |
| @@ -2780,7 +2744,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
| 2780 | } else if (IS_ROOT(alias)) { | 2744 | } else if (IS_ROOT(alias)) { |
| 2781 | /* Is this an anonymous mountpoint that we | 2745 | /* Is this an anonymous mountpoint that we |
| 2782 | * could splice into our tree? */ | 2746 | * could splice into our tree? */ |
| 2783 | __d_materialise_dentry(dentry, alias); | 2747 | __d_move(alias, dentry, false); |
| 2784 | write_sequnlock(&rename_lock); | 2748 | write_sequnlock(&rename_lock); |
| 2785 | goto found; | 2749 | goto found; |
| 2786 | } else { | 2750 | } else { |
| @@ -2807,13 +2771,9 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
| 2807 | actual = __d_instantiate_unique(dentry, inode); | 2771 | actual = __d_instantiate_unique(dentry, inode); |
| 2808 | if (!actual) | 2772 | if (!actual) |
| 2809 | actual = dentry; | 2773 | actual = dentry; |
| 2810 | else | ||
| 2811 | BUG_ON(!d_unhashed(actual)); | ||
| 2812 | 2774 | ||
| 2813 | spin_lock(&actual->d_lock); | 2775 | d_rehash(actual); |
| 2814 | found: | 2776 | found: |
| 2815 | _d_rehash(actual); | ||
| 2816 | spin_unlock(&actual->d_lock); | ||
| 2817 | spin_unlock(&inode->i_lock); | 2777 | spin_unlock(&inode->i_lock); |
| 2818 | out_nolock: | 2778 | out_nolock: |
| 2819 | if (actual == dentry) { | 2779 | if (actual == dentry) { |
diff --git a/fs/direct-io.c b/fs/direct-io.c index c3116404ab49..e181b6b2e297 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -158,7 +158,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) | |||
| 158 | { | 158 | { |
| 159 | ssize_t ret; | 159 | ssize_t ret; |
| 160 | 160 | ||
| 161 | ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES, | 161 | ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, |
| 162 | &sdio->from); | 162 | &sdio->from); |
| 163 | 163 | ||
| 164 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { | 164 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index d3b4539f1651..da032daf0e0d 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
| @@ -982,6 +982,7 @@ nomem: | |||
| 982 | submit_op_failed: | 982 | submit_op_failed: |
| 983 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 983 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); |
| 984 | spin_unlock(&cookie->lock); | 984 | spin_unlock(&cookie->lock); |
| 985 | fscache_unuse_cookie(object); | ||
| 985 | kfree(op); | 986 | kfree(op); |
| 986 | _leave(" [EIO]"); | 987 | _leave(" [EIO]"); |
| 987 | return transit_to(KILL_OBJECT); | 988 | return transit_to(KILL_OBJECT); |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 85332b9d19d1..de33b3fccca6 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
| @@ -44,6 +44,19 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa | |||
| 44 | EXPORT_SYMBOL(__fscache_wait_on_page_write); | 44 | EXPORT_SYMBOL(__fscache_wait_on_page_write); |
| 45 | 45 | ||
| 46 | /* | 46 | /* |
| 47 | * wait for a page to finish being written to the cache. Put a timeout here | ||
| 48 | * since we might be called recursively via parent fs. | ||
| 49 | */ | ||
| 50 | static | ||
| 51 | bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) | ||
| 52 | { | ||
| 53 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); | ||
| 54 | |||
| 55 | return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), | ||
| 56 | HZ); | ||
| 57 | } | ||
| 58 | |||
| 59 | /* | ||
| 47 | * decide whether a page can be released, possibly by cancelling a store to it | 60 | * decide whether a page can be released, possibly by cancelling a store to it |
| 48 | * - we're allowed to sleep if __GFP_WAIT is flagged | 61 | * - we're allowed to sleep if __GFP_WAIT is flagged |
| 49 | */ | 62 | */ |
| @@ -115,7 +128,10 @@ page_busy: | |||
| 115 | } | 128 | } |
| 116 | 129 | ||
| 117 | fscache_stat(&fscache_n_store_vmscan_wait); | 130 | fscache_stat(&fscache_n_store_vmscan_wait); |
| 118 | __fscache_wait_on_page_write(cookie, page); | 131 | if (!release_page_wait_timeout(cookie, page)) |
| 132 | _debug("fscache writeout timeout page: %p{%lx}", | ||
| 133 | page, page->index); | ||
| 134 | |||
| 119 | gfp &= ~__GFP_WAIT; | 135 | gfp &= ~__GFP_WAIT; |
| 120 | goto try_again; | 136 | goto try_again; |
| 121 | } | 137 | } |
| @@ -182,7 +198,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
| 182 | { | 198 | { |
| 183 | struct fscache_operation *op; | 199 | struct fscache_operation *op; |
| 184 | struct fscache_object *object; | 200 | struct fscache_object *object; |
| 185 | bool wake_cookie; | 201 | bool wake_cookie = false; |
| 186 | 202 | ||
| 187 | _enter("%p", cookie); | 203 | _enter("%p", cookie); |
| 188 | 204 | ||
| @@ -212,15 +228,16 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
| 212 | 228 | ||
| 213 | __fscache_use_cookie(cookie); | 229 | __fscache_use_cookie(cookie); |
| 214 | if (fscache_submit_exclusive_op(object, op) < 0) | 230 | if (fscache_submit_exclusive_op(object, op) < 0) |
| 215 | goto nobufs; | 231 | goto nobufs_dec; |
| 216 | spin_unlock(&cookie->lock); | 232 | spin_unlock(&cookie->lock); |
| 217 | fscache_stat(&fscache_n_attr_changed_ok); | 233 | fscache_stat(&fscache_n_attr_changed_ok); |
| 218 | fscache_put_operation(op); | 234 | fscache_put_operation(op); |
| 219 | _leave(" = 0"); | 235 | _leave(" = 0"); |
| 220 | return 0; | 236 | return 0; |
| 221 | 237 | ||
| 222 | nobufs: | 238 | nobufs_dec: |
| 223 | wake_cookie = __fscache_unuse_cookie(cookie); | 239 | wake_cookie = __fscache_unuse_cookie(cookie); |
| 240 | nobufs: | ||
| 224 | spin_unlock(&cookie->lock); | 241 | spin_unlock(&cookie->lock); |
| 225 | kfree(op); | 242 | kfree(op); |
| 226 | if (wake_cookie) | 243 | if (wake_cookie) |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 912061ac4baf..caa8d95b24e8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -1305,6 +1305,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, | |||
| 1305 | size_t start; | 1305 | size_t start; |
| 1306 | ssize_t ret = iov_iter_get_pages(ii, | 1306 | ssize_t ret = iov_iter_get_pages(ii, |
| 1307 | &req->pages[req->num_pages], | 1307 | &req->pages[req->num_pages], |
| 1308 | *nbytesp - nbytes, | ||
| 1308 | req->max_pages - req->num_pages, | 1309 | req->max_pages - req->num_pages, |
| 1309 | &start); | 1310 | &start); |
| 1310 | if (ret < 0) | 1311 | if (ret < 0) |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 6252b173a465..d071e7f23de2 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/buffer_head.h> | 24 | #include <linux/buffer_head.h> |
| 25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
| 26 | #include <linux/mpage.h> | 26 | #include <linux/mpage.h> |
| 27 | #include <linux/pagemap.h> | ||
| 27 | #include <linux/writeback.h> | 28 | #include <linux/writeback.h> |
| 28 | #include <linux/aio.h> | 29 | #include <linux/aio.h> |
| 29 | #include "nilfs.h" | 30 | #include "nilfs.h" |
| @@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) | |||
| 219 | 220 | ||
| 220 | static int nilfs_set_page_dirty(struct page *page) | 221 | static int nilfs_set_page_dirty(struct page *page) |
| 221 | { | 222 | { |
| 223 | struct inode *inode = page->mapping->host; | ||
| 222 | int ret = __set_page_dirty_nobuffers(page); | 224 | int ret = __set_page_dirty_nobuffers(page); |
| 223 | 225 | ||
| 224 | if (page_has_buffers(page)) { | 226 | if (page_has_buffers(page)) { |
| 225 | struct inode *inode = page->mapping->host; | ||
| 226 | unsigned nr_dirty = 0; | 227 | unsigned nr_dirty = 0; |
| 227 | struct buffer_head *bh, *head; | 228 | struct buffer_head *bh, *head; |
| 228 | 229 | ||
| @@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page) | |||
| 245 | 246 | ||
| 246 | if (nr_dirty) | 247 | if (nr_dirty) |
| 247 | nilfs_set_file_dirty(inode, nr_dirty); | 248 | nilfs_set_file_dirty(inode, nr_dirty); |
| 249 | } else if (ret) { | ||
| 250 | unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
| 251 | |||
| 252 | nilfs_set_file_dirty(inode, nr_dirty); | ||
| 248 | } | 253 | } |
| 249 | return ret; | 254 | return ret; |
| 250 | } | 255 | } |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 3ec906ef5d9a..e3cfa0227026 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
| @@ -655,12 +655,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | |||
| 655 | clear_bit(bit, res->refmap); | 655 | clear_bit(bit, res->refmap); |
| 656 | } | 656 | } |
| 657 | 657 | ||
| 658 | 658 | static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |
| 659 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
| 660 | struct dlm_lock_resource *res) | 659 | struct dlm_lock_resource *res) |
| 661 | { | 660 | { |
| 662 | assert_spin_locked(&res->spinlock); | ||
| 663 | |||
| 664 | res->inflight_locks++; | 661 | res->inflight_locks++; |
| 665 | 662 | ||
| 666 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, | 663 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
| @@ -668,6 +665,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |||
| 668 | __builtin_return_address(0)); | 665 | __builtin_return_address(0)); |
| 669 | } | 666 | } |
| 670 | 667 | ||
| 668 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
| 669 | struct dlm_lock_resource *res) | ||
| 670 | { | ||
| 671 | assert_spin_locked(&res->spinlock); | ||
| 672 | __dlm_lockres_grab_inflight_ref(dlm, res); | ||
| 673 | } | ||
| 674 | |||
| 671 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | 675 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
| 672 | struct dlm_lock_resource *res) | 676 | struct dlm_lock_resource *res) |
| 673 | { | 677 | { |
| @@ -894,10 +898,8 @@ lookup: | |||
| 894 | /* finally add the lockres to its hash bucket */ | 898 | /* finally add the lockres to its hash bucket */ |
| 895 | __dlm_insert_lockres(dlm, res); | 899 | __dlm_insert_lockres(dlm, res); |
| 896 | 900 | ||
| 897 | /* Grab inflight ref to pin the resource */ | 901 | /* since this lockres is new it doesn't not require the spinlock */ |
| 898 | spin_lock(&res->spinlock); | 902 | __dlm_lockres_grab_inflight_ref(dlm, res); |
| 899 | dlm_lockres_grab_inflight_ref(dlm, res); | ||
| 900 | spin_unlock(&res->spinlock); | ||
| 901 | 903 | ||
| 902 | /* get an extra ref on the mle in case this is a BLOCK | 904 | /* get an extra ref on the mle in case this is a BLOCK |
| 903 | * if so, the creator of the BLOCK may try to put the last | 905 | * if so, the creator of the BLOCK may try to put the last |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index ddb662b32447..4142546aedae 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
| @@ -2532,6 +2532,7 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb) | |||
| 2532 | kfree(osb->journal); | 2532 | kfree(osb->journal); |
| 2533 | kfree(osb->local_alloc_copy); | 2533 | kfree(osb->local_alloc_copy); |
| 2534 | kfree(osb->uuid_str); | 2534 | kfree(osb->uuid_str); |
| 2535 | kfree(osb->vol_label); | ||
| 2535 | ocfs2_put_dlm_debug(osb->osb_dlm_debug); | 2536 | ocfs2_put_dlm_debug(osb->osb_dlm_debug); |
| 2536 | memset(osb, 0, sizeof(struct ocfs2_super)); | 2537 | memset(osb, 0, sizeof(struct ocfs2_super)); |
| 2537 | } | 2538 | } |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dfc791c42d64..c34156888d70 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -931,23 +931,32 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
| 931 | while (addr < end) { | 931 | while (addr < end) { |
| 932 | struct vm_area_struct *vma = find_vma(walk->mm, addr); | 932 | struct vm_area_struct *vma = find_vma(walk->mm, addr); |
| 933 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); | 933 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
| 934 | unsigned long vm_end; | 934 | /* End of address space hole, which we mark as non-present. */ |
| 935 | unsigned long hole_end; | ||
| 935 | 936 | ||
| 936 | if (!vma) { | 937 | if (vma) |
| 937 | vm_end = end; | 938 | hole_end = min(end, vma->vm_start); |
| 938 | } else { | 939 | else |
| 939 | vm_end = min(end, vma->vm_end); | 940 | hole_end = end; |
| 940 | if (vma->vm_flags & VM_SOFTDIRTY) | 941 | |
| 941 | pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); | 942 | for (; addr < hole_end; addr += PAGE_SIZE) { |
| 943 | err = add_to_pagemap(addr, &pme, pm); | ||
| 944 | if (err) | ||
| 945 | goto out; | ||
| 942 | } | 946 | } |
| 943 | 947 | ||
| 944 | for (; addr < vm_end; addr += PAGE_SIZE) { | 948 | if (!vma) |
| 949 | break; | ||
| 950 | |||
| 951 | /* Addresses in the VMA. */ | ||
| 952 | if (vma->vm_flags & VM_SOFTDIRTY) | ||
| 953 | pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); | ||
| 954 | for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { | ||
| 945 | err = add_to_pagemap(addr, &pme, pm); | 955 | err = add_to_pagemap(addr, &pme, pm); |
| 946 | if (err) | 956 | if (err) |
| 947 | goto out; | 957 | goto out; |
| 948 | } | 958 | } |
| 949 | } | 959 | } |
| 950 | |||
| 951 | out: | 960 | out: |
| 952 | return err; | 961 | return err; |
| 953 | } | 962 | } |
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index a9cc75ffa925..7caa01652888 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c | |||
| @@ -298,7 +298,10 @@ cg_found: | |||
| 298 | ufsi->i_oeftflag = 0; | 298 | ufsi->i_oeftflag = 0; |
| 299 | ufsi->i_dir_start_lookup = 0; | 299 | ufsi->i_dir_start_lookup = 0; |
| 300 | memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); | 300 | memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); |
| 301 | insert_inode_hash(inode); | 301 | if (insert_inode_locked(inode) < 0) { |
| 302 | err = -EIO; | ||
| 303 | goto failed; | ||
| 304 | } | ||
| 302 | mark_inode_dirty(inode); | 305 | mark_inode_dirty(inode); |
| 303 | 306 | ||
| 304 | if (uspi->fs_magic == UFS2_MAGIC) { | 307 | if (uspi->fs_magic == UFS2_MAGIC) { |
| @@ -337,6 +340,7 @@ cg_found: | |||
| 337 | fail_remove_inode: | 340 | fail_remove_inode: |
| 338 | unlock_ufs(sb); | 341 | unlock_ufs(sb); |
| 339 | clear_nlink(inode); | 342 | clear_nlink(inode); |
| 343 | unlock_new_inode(inode); | ||
| 340 | iput(inode); | 344 | iput(inode); |
| 341 | UFSD("EXIT (FAILED): err %d\n", err); | 345 | UFSD("EXIT (FAILED): err %d\n", err); |
| 342 | return ERR_PTR(err); | 346 | return ERR_PTR(err); |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 2df62a73f20c..fd65deb4b5f0 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
| @@ -38,10 +38,12 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) | |||
| 38 | { | 38 | { |
| 39 | int err = ufs_add_link(dentry, inode); | 39 | int err = ufs_add_link(dentry, inode); |
| 40 | if (!err) { | 40 | if (!err) { |
| 41 | unlock_new_inode(inode); | ||
| 41 | d_instantiate(dentry, inode); | 42 | d_instantiate(dentry, inode); |
| 42 | return 0; | 43 | return 0; |
| 43 | } | 44 | } |
| 44 | inode_dec_link_count(inode); | 45 | inode_dec_link_count(inode); |
| 46 | unlock_new_inode(inode); | ||
| 45 | iput(inode); | 47 | iput(inode); |
| 46 | return err; | 48 | return err; |
| 47 | } | 49 | } |
| @@ -155,6 +157,7 @@ out_notlocked: | |||
| 155 | 157 | ||
| 156 | out_fail: | 158 | out_fail: |
| 157 | inode_dec_link_count(inode); | 159 | inode_dec_link_count(inode); |
| 160 | unlock_new_inode(inode); | ||
| 158 | iput(inode); | 161 | iput(inode); |
| 159 | goto out; | 162 | goto out; |
| 160 | } | 163 | } |
| @@ -210,6 +213,7 @@ out: | |||
| 210 | out_fail: | 213 | out_fail: |
| 211 | inode_dec_link_count(inode); | 214 | inode_dec_link_count(inode); |
| 212 | inode_dec_link_count(inode); | 215 | inode_dec_link_count(inode); |
| 216 | unlock_new_inode(inode); | ||
| 213 | iput (inode); | 217 | iput (inode); |
| 214 | inode_dec_link_count(dir); | 218 | inode_dec_link_count(dir); |
| 215 | unlock_ufs(dir->i_sb); | 219 | unlock_ufs(dir->i_sb); |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index d91e59b79f0d..57ee0528aacb 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
| @@ -118,6 +118,7 @@ struct acpi_device; | |||
| 118 | struct acpi_hotplug_profile { | 118 | struct acpi_hotplug_profile { |
| 119 | struct kobject kobj; | 119 | struct kobject kobj; |
| 120 | int (*scan_dependent)(struct acpi_device *adev); | 120 | int (*scan_dependent)(struct acpi_device *adev); |
| 121 | void (*notify_online)(struct acpi_device *adev); | ||
| 121 | bool enabled:1; | 122 | bool enabled:1; |
| 122 | bool demand_offline:1; | 123 | bool demand_offline:1; |
| 123 | }; | 124 | }; |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index ebcc9d146219..7f437036baa4 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
| @@ -27,6 +27,13 @@ struct ccp_cmd; | |||
| 27 | defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) | 27 | defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) |
| 28 | 28 | ||
| 29 | /** | 29 | /** |
| 30 | * ccp_present - check if a CCP device is present | ||
| 31 | * | ||
| 32 | * Returns zero if a CCP device is present, -ENODEV otherwise. | ||
| 33 | */ | ||
| 34 | int ccp_present(void); | ||
| 35 | |||
| 36 | /** | ||
| 30 | * ccp_enqueue_cmd - queue an operation for processing by the CCP | 37 | * ccp_enqueue_cmd - queue an operation for processing by the CCP |
| 31 | * | 38 | * |
| 32 | * @cmd: ccp_cmd struct to be processed | 39 | * @cmd: ccp_cmd struct to be processed |
| @@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd); | |||
| 53 | 60 | ||
| 54 | #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ | 61 | #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ |
| 55 | 62 | ||
| 63 | static inline int ccp_present(void) | ||
| 64 | { | ||
| 65 | return -ENODEV; | ||
| 66 | } | ||
| 67 | |||
| 56 | static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) | 68 | static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) |
| 57 | { | 69 | { |
| 58 | return -ENODEV; | 70 | return -ENODEV; |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index ade2390ffe92..6e39c9bb0dae 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -93,12 +93,12 @@ extern int cpuset_slab_spread_node(void); | |||
| 93 | 93 | ||
| 94 | static inline int cpuset_do_page_mem_spread(void) | 94 | static inline int cpuset_do_page_mem_spread(void) |
| 95 | { | 95 | { |
| 96 | return current->flags & PF_SPREAD_PAGE; | 96 | return task_spread_page(current); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static inline int cpuset_do_slab_mem_spread(void) | 99 | static inline int cpuset_do_slab_mem_spread(void) |
| 100 | { | 100 | { |
| 101 | return current->flags & PF_SPREAD_SLAB; | 101 | return task_spread_slab(current); |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | extern int current_cpuset_is_being_rebound(void); | 104 | extern int current_cpuset_is_being_rebound(void); |
diff --git a/include/linux/device.h b/include/linux/device.h index 43d183aeb25b..d0d5c5db509d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus, | |||
| 181 | * with the device lock held in the core, so be careful. | 181 | * with the device lock held in the core, so be careful. |
| 182 | */ | 182 | */ |
| 183 | #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ | 183 | #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ |
| 184 | #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ | 184 | #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ |
| 185 | #define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be | 185 | #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ |
| 186 | #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be | ||
| 186 | bound */ | 187 | bound */ |
| 187 | #define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ | 188 | #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ |
| 188 | #define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be | 189 | #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be |
| 189 | unbound */ | 190 | unbound */ |
| 190 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound | 191 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound |
| 191 | from the device */ | 192 | from the device */ |
| 192 | 193 | ||
| 193 | extern struct kset *bus_get_kset(struct bus_type *bus); | 194 | extern struct kset *bus_get_kset(struct bus_type *bus); |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 1deece46a0ca..593fff99e6bf 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
| @@ -56,13 +56,19 @@ struct dmar_drhd_unit { | |||
| 56 | struct intel_iommu *iommu; | 56 | struct intel_iommu *iommu; |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | struct dmar_pci_path { | ||
| 60 | u8 bus; | ||
| 61 | u8 device; | ||
| 62 | u8 function; | ||
| 63 | }; | ||
| 64 | |||
| 59 | struct dmar_pci_notify_info { | 65 | struct dmar_pci_notify_info { |
| 60 | struct pci_dev *dev; | 66 | struct pci_dev *dev; |
| 61 | unsigned long event; | 67 | unsigned long event; |
| 62 | int bus; | 68 | int bus; |
| 63 | u16 seg; | 69 | u16 seg; |
| 64 | u16 level; | 70 | u16 level; |
| 65 | struct acpi_dmar_pci_path path[]; | 71 | struct dmar_pci_path path[]; |
| 66 | } __attribute__((packed)); | 72 | } __attribute__((packed)); |
| 67 | 73 | ||
| 68 | extern struct rw_semaphore dmar_global_lock; | 74 | extern struct rw_semaphore dmar_global_lock; |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a95efeb53a8b..b556e0ab946f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node | |||
| 577 | } | 577 | } |
| 578 | #endif /* CONFIG_OF */ | 578 | #endif /* CONFIG_OF */ |
| 579 | 579 | ||
| 580 | #ifdef CONFIG_ACPI | ||
| 581 | void acpi_i2c_register_devices(struct i2c_adapter *adap); | ||
| 582 | #else | ||
| 583 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } | ||
| 584 | #endif /* CONFIG_ACPI */ | ||
| 585 | |||
| 586 | #ifdef CONFIG_ACPI_I2C_OPREGION | ||
| 587 | int acpi_i2c_install_space_handler(struct i2c_adapter *adapter); | ||
| 588 | void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter); | ||
| 589 | #else | ||
| 590 | static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
| 591 | { } | ||
| 592 | static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
| 593 | { return 0; } | ||
| 594 | #endif /* CONFIG_ACPI_I2C_OPREGION */ | ||
| 595 | |||
| 596 | #endif /* _LINUX_I2C_H */ | 580 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 20f9a527922a..379a6179fd96 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -57,8 +57,11 @@ struct iommu_domain { | |||
| 57 | struct iommu_domain_geometry geometry; | 57 | struct iommu_domain_geometry geometry; |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 | 60 | enum iommu_cap { |
| 61 | #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ | 61 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA |
| 62 | transactions */ | ||
| 63 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ | ||
| 64 | }; | ||
| 62 | 65 | ||
| 63 | /* | 66 | /* |
| 64 | * Following constraints are specifc to FSL_PAMUV1: | 67 | * Following constraints are specifc to FSL_PAMUV1: |
| @@ -94,7 +97,6 @@ enum iommu_attr { | |||
| 94 | * @map: map a physically contiguous memory region to an iommu domain | 97 | * @map: map a physically contiguous memory region to an iommu domain |
| 95 | * @unmap: unmap a physically contiguous memory region from an iommu domain | 98 | * @unmap: unmap a physically contiguous memory region from an iommu domain |
| 96 | * @iova_to_phys: translate iova to physical address | 99 | * @iova_to_phys: translate iova to physical address |
| 97 | * @domain_has_cap: domain capabilities query | ||
| 98 | * @add_device: add device to iommu grouping | 100 | * @add_device: add device to iommu grouping |
| 99 | * @remove_device: remove device from iommu grouping | 101 | * @remove_device: remove device from iommu grouping |
| 100 | * @domain_get_attr: Query domain attributes | 102 | * @domain_get_attr: Query domain attributes |
| @@ -102,6 +104,7 @@ enum iommu_attr { | |||
| 102 | * @pgsize_bitmap: bitmap of supported page sizes | 104 | * @pgsize_bitmap: bitmap of supported page sizes |
| 103 | */ | 105 | */ |
| 104 | struct iommu_ops { | 106 | struct iommu_ops { |
| 107 | bool (*capable)(enum iommu_cap); | ||
| 105 | int (*domain_init)(struct iommu_domain *domain); | 108 | int (*domain_init)(struct iommu_domain *domain); |
| 106 | void (*domain_destroy)(struct iommu_domain *domain); | 109 | void (*domain_destroy)(struct iommu_domain *domain); |
| 107 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | 110 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); |
| @@ -111,8 +114,6 @@ struct iommu_ops { | |||
| 111 | size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, | 114 | size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, |
| 112 | size_t size); | 115 | size_t size); |
| 113 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); | 116 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); |
| 114 | int (*domain_has_cap)(struct iommu_domain *domain, | ||
| 115 | unsigned long cap); | ||
| 116 | int (*add_device)(struct device *dev); | 117 | int (*add_device)(struct device *dev); |
| 117 | void (*remove_device)(struct device *dev); | 118 | void (*remove_device)(struct device *dev); |
| 118 | int (*device_group)(struct device *dev, unsigned int *groupid); | 119 | int (*device_group)(struct device *dev, unsigned int *groupid); |
| @@ -142,6 +143,7 @@ struct iommu_ops { | |||
| 142 | 143 | ||
| 143 | extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); | 144 | extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); |
| 144 | extern bool iommu_present(struct bus_type *bus); | 145 | extern bool iommu_present(struct bus_type *bus); |
| 146 | extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); | ||
| 145 | extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); | 147 | extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); |
| 146 | extern struct iommu_group *iommu_group_get_by_id(int id); | 148 | extern struct iommu_group *iommu_group_get_by_id(int id); |
| 147 | extern void iommu_domain_free(struct iommu_domain *domain); | 149 | extern void iommu_domain_free(struct iommu_domain *domain); |
| @@ -154,8 +156,6 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 154 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 156 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
| 155 | size_t size); | 157 | size_t size); |
| 156 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); | 158 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); |
| 157 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | ||
| 158 | unsigned long cap); | ||
| 159 | extern void iommu_set_fault_handler(struct iommu_domain *domain, | 159 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
| 160 | iommu_fault_handler_t handler, void *token); | 160 | iommu_fault_handler_t handler, void *token); |
| 161 | 161 | ||
| @@ -250,6 +250,11 @@ static inline bool iommu_present(struct bus_type *bus) | |||
| 250 | return false; | 250 | return false; |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) | ||
| 254 | { | ||
| 255 | return false; | ||
| 256 | } | ||
| 257 | |||
| 253 | static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) | 258 | static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
| 254 | { | 259 | { |
| 255 | return NULL; | 260 | return NULL; |
| @@ -304,12 +309,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad | |||
| 304 | return 0; | 309 | return 0; |
| 305 | } | 310 | } |
| 306 | 311 | ||
| 307 | static inline int iommu_domain_has_cap(struct iommu_domain *domain, | ||
| 308 | unsigned long cap) | ||
| 309 | { | ||
| 310 | return 0; | ||
| 311 | } | ||
| 312 | |||
| 313 | static inline void iommu_set_fault_handler(struct iommu_domain *domain, | 312 | static inline void iommu_set_fault_handler(struct iommu_domain *domain, |
| 314 | iommu_fault_handler_t handler, void *token) | 313 | iommu_fault_handler_t handler, void *token) |
| 315 | { | 314 | { |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 511c6e0d21a9..a5b7d7cfcedf 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -209,6 +209,7 @@ enum { | |||
| 209 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, | 209 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, |
| 210 | MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, | 210 | MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, |
| 211 | MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, | 211 | MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, |
| 212 | MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, | ||
| 212 | }; | 213 | }; |
| 213 | 214 | ||
| 214 | enum mlx4_event { | 215 | enum mlx4_event { |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 7040dc98ff8b..5f4e36cf0091 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -56,7 +56,8 @@ enum mlx4_qp_optpar { | |||
| 56 | MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, | 56 | MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, |
| 57 | MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, | 57 | MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, |
| 58 | MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, | 58 | MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, |
| 59 | MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20 | 59 | MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20, |
| 60 | MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21, | ||
| 60 | }; | 61 | }; |
| 61 | 62 | ||
| 62 | enum mlx4_qp_state { | 63 | enum mlx4_qp_state { |
| @@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg { | |||
| 423 | 424 | ||
| 424 | enum mlx4_update_qp_attr { | 425 | enum mlx4_update_qp_attr { |
| 425 | MLX4_UPDATE_QP_SMAC = 1 << 0, | 426 | MLX4_UPDATE_QP_SMAC = 1 << 0, |
| 427 | MLX4_UPDATE_QP_VSD = 1 << 2, | ||
| 428 | MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 | ||
| 429 | }; | ||
| 430 | |||
| 431 | enum mlx4_update_qp_params_flags { | ||
| 432 | MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0, | ||
| 426 | }; | 433 | }; |
| 427 | 434 | ||
| 428 | struct mlx4_update_qp_params { | 435 | struct mlx4_update_qp_params { |
| 429 | u8 smac_index; | 436 | u8 smac_index; |
| 437 | u32 flags; | ||
| 430 | }; | 438 | }; |
| 431 | 439 | ||
| 432 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | 440 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
| 433 | enum mlx4_update_qp_attr attr, | 441 | enum mlx4_update_qp_attr attr, |
| 434 | struct mlx4_update_qp_params *params); | 442 | struct mlx4_update_qp_params *params); |
| 435 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 443 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 3dfbf237cd8f..ef5894ca8e50 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
| @@ -71,6 +71,7 @@ void percpu_ref_reinit(struct percpu_ref *ref); | |||
| 71 | void percpu_ref_exit(struct percpu_ref *ref); | 71 | void percpu_ref_exit(struct percpu_ref *ref); |
| 72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
| 73 | percpu_ref_func_t *confirm_kill); | 73 | percpu_ref_func_t *confirm_kill); |
| 74 | void __percpu_ref_kill_expedited(struct percpu_ref *ref); | ||
| 74 | 75 | ||
| 75 | /** | 76 | /** |
| 76 | * percpu_ref_kill - drop the initial ref | 77 | * percpu_ref_kill - drop the initial ref |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5c2c885ee52b..b867a4dab38a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1903,8 +1903,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, | |||
| 1903 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ | 1903 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
| 1904 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ | 1904 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
| 1905 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1905 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
| 1906 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | ||
| 1907 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | ||
| 1908 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ | 1906 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
| 1909 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1907 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
| 1910 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1908 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
| @@ -1957,17 +1955,31 @@ static inline void memalloc_noio_restore(unsigned int flags) | |||
| 1957 | } | 1955 | } |
| 1958 | 1956 | ||
| 1959 | /* Per-process atomic flags. */ | 1957 | /* Per-process atomic flags. */ |
| 1960 | #define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */ | 1958 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
| 1959 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ | ||
| 1960 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ | ||
| 1961 | 1961 | ||
| 1962 | static inline bool task_no_new_privs(struct task_struct *p) | ||
| 1963 | { | ||
| 1964 | return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); | ||
| 1965 | } | ||
| 1966 | 1962 | ||
| 1967 | static inline void task_set_no_new_privs(struct task_struct *p) | 1963 | #define TASK_PFA_TEST(name, func) \ |
| 1968 | { | 1964 | static inline bool task_##func(struct task_struct *p) \ |
| 1969 | set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); | 1965 | { return test_bit(PFA_##name, &p->atomic_flags); } |
| 1970 | } | 1966 | #define TASK_PFA_SET(name, func) \ |
| 1967 | static inline void task_set_##func(struct task_struct *p) \ | ||
| 1968 | { set_bit(PFA_##name, &p->atomic_flags); } | ||
| 1969 | #define TASK_PFA_CLEAR(name, func) \ | ||
| 1970 | static inline void task_clear_##func(struct task_struct *p) \ | ||
| 1971 | { clear_bit(PFA_##name, &p->atomic_flags); } | ||
| 1972 | |||
| 1973 | TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) | ||
| 1974 | TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) | ||
| 1975 | |||
| 1976 | TASK_PFA_TEST(SPREAD_PAGE, spread_page) | ||
| 1977 | TASK_PFA_SET(SPREAD_PAGE, spread_page) | ||
| 1978 | TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) | ||
| 1979 | |||
| 1980 | TASK_PFA_TEST(SPREAD_SLAB, spread_slab) | ||
| 1981 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) | ||
| 1982 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) | ||
| 1971 | 1983 | ||
| 1972 | /* | 1984 | /* |
| 1973 | * task->jobctl flags | 1985 | * task->jobctl flags |
| @@ -2608,9 +2620,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct | |||
| 2608 | task_thread_info(p)->task = p; | 2620 | task_thread_info(p)->task = p; |
| 2609 | } | 2621 | } |
| 2610 | 2622 | ||
| 2623 | /* | ||
| 2624 | * Return the address of the last usable long on the stack. | ||
| 2625 | * | ||
| 2626 | * When the stack grows down, this is just above the thread | ||
| 2627 | * info struct. Going any lower will corrupt the threadinfo. | ||
| 2628 | * | ||
| 2629 | * When the stack grows up, this is the highest address. | ||
| 2630 | * Beyond that position, we corrupt data on the next page. | ||
| 2631 | */ | ||
| 2611 | static inline unsigned long *end_of_stack(struct task_struct *p) | 2632 | static inline unsigned long *end_of_stack(struct task_struct *p) |
| 2612 | { | 2633 | { |
| 2634 | #ifdef CONFIG_STACK_GROWSUP | ||
| 2635 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; | ||
| 2636 | #else | ||
| 2613 | return (unsigned long *)(task_thread_info(p) + 1); | 2637 | return (unsigned long *)(task_thread_info(p) + 1); |
| 2638 | #endif | ||
| 2614 | } | 2639 | } |
| 2615 | 2640 | ||
| 2616 | #endif | 2641 | #endif |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 48d64e6ab292..290fbf0b6b8a 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -84,7 +84,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i); | |||
| 84 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | 84 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, |
| 85 | unsigned long nr_segs, size_t count); | 85 | unsigned long nr_segs, size_t count); |
| 86 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, | 86 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, |
| 87 | unsigned maxpages, size_t *start); | 87 | size_t maxsize, unsigned maxpages, size_t *start); |
| 88 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | 88 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, |
| 89 | size_t maxsize, size_t *start); | 89 | size_t maxsize, size_t *start); |
| 90 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 90 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a0cc2e95ed1b..b996e6cde6bb 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
| 419 | alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ | 419 | alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ |
| 420 | 1, (name)) | 420 | 1, (name)) |
| 421 | #define create_singlethread_workqueue(name) \ | 421 | #define create_singlethread_workqueue(name) \ |
| 422 | alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) | 422 | alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) |
| 423 | 423 | ||
| 424 | extern void destroy_workqueue(struct workqueue_struct *wq); | 424 | extern void destroy_workqueue(struct workqueue_struct *wq); |
| 425 | 425 | ||
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index fc910a622451..2fefcf491aa8 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
| @@ -295,7 +295,7 @@ struct vb2_buffer { | |||
| 295 | * can return an error if hardware fails, in that case all | 295 | * can return an error if hardware fails, in that case all |
| 296 | * buffers that have been already given by the @buf_queue | 296 | * buffers that have been already given by the @buf_queue |
| 297 | * callback are to be returned by the driver by calling | 297 | * callback are to be returned by the driver by calling |
| 298 | * @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED). | 298 | * @vb2_buffer_done(VB2_BUF_STATE_QUEUED). |
| 299 | * If you need a minimum number of buffers before you can | 299 | * If you need a minimum number of buffers before you can |
| 300 | * start streaming, then set @min_buffers_needed in the | 300 | * start streaming, then set @min_buffers_needed in the |
| 301 | * vb2_queue structure. If that is non-zero then | 301 | * vb2_queue structure. If that is non-zero then |
| @@ -380,6 +380,9 @@ struct v4l2_fh; | |||
| 380 | * @start_streaming_called: start_streaming() was called successfully and we | 380 | * @start_streaming_called: start_streaming() was called successfully and we |
| 381 | * started streaming. | 381 | * started streaming. |
| 382 | * @error: a fatal error occurred on the queue | 382 | * @error: a fatal error occurred on the queue |
| 383 | * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for | ||
| 384 | * buffers. Only set for capture queues if qbuf has not yet been | ||
| 385 | * called since poll() needs to return POLLERR in that situation. | ||
| 383 | * @fileio: file io emulator internal data, used only if emulator is active | 386 | * @fileio: file io emulator internal data, used only if emulator is active |
| 384 | * @threadio: thread io internal data, used only if thread is active | 387 | * @threadio: thread io internal data, used only if thread is active |
| 385 | */ | 388 | */ |
| @@ -417,6 +420,7 @@ struct vb2_queue { | |||
| 417 | unsigned int streaming:1; | 420 | unsigned int streaming:1; |
| 418 | unsigned int start_streaming_called:1; | 421 | unsigned int start_streaming_called:1; |
| 419 | unsigned int error:1; | 422 | unsigned int error:1; |
| 423 | unsigned int waiting_for_buffers:1; | ||
| 420 | 424 | ||
| 421 | struct vb2_fileio_data *fileio; | 425 | struct vb2_fileio_data *fileio; |
| 422 | struct vb2_threadio_data *threadio; | 426 | struct vb2_threadio_data *threadio; |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f679877bb601..ec51e673b4b6 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
| @@ -204,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk); | |||
| 204 | 204 | ||
| 205 | int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); | 205 | int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); |
| 206 | int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); | 206 | int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); |
| 207 | void ipv6_ac_destroy_dev(struct inet6_dev *idev); | ||
| 207 | bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, | 208 | bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, |
| 208 | const struct in6_addr *addr); | 209 | const struct in6_addr *addr); |
| 209 | bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, | 210 | bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, |
diff --git a/include/net/dst.h b/include/net/dst.h index 71c60f42be48..a8ae4e760778 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
| @@ -480,6 +480,7 @@ void dst_init(void); | |||
| 480 | /* Flags for xfrm_lookup flags argument. */ | 480 | /* Flags for xfrm_lookup flags argument. */ |
| 481 | enum { | 481 | enum { |
| 482 | XFRM_LOOKUP_ICMP = 1 << 0, | 482 | XFRM_LOOKUP_ICMP = 1 << 0, |
| 483 | XFRM_LOOKUP_QUEUE = 1 << 1, | ||
| 483 | }; | 484 | }; |
| 484 | 485 | ||
| 485 | struct flowi; | 486 | struct flowi; |
| @@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, | |||
| 490 | int flags) | 491 | int flags) |
| 491 | { | 492 | { |
| 492 | return dst_orig; | 493 | return dst_orig; |
| 493 | } | 494 | } |
| 495 | |||
| 496 | static inline struct dst_entry *xfrm_lookup_route(struct net *net, | ||
| 497 | struct dst_entry *dst_orig, | ||
| 498 | const struct flowi *fl, | ||
| 499 | struct sock *sk, | ||
| 500 | int flags) | ||
| 501 | { | ||
| 502 | return dst_orig; | ||
| 503 | } | ||
| 494 | 504 | ||
| 495 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | 505 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
| 496 | { | 506 | { |
| @@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
| 502 | const struct flowi *fl, struct sock *sk, | 512 | const struct flowi *fl, struct sock *sk, |
| 503 | int flags); | 513 | int flags); |
| 504 | 514 | ||
| 515 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | ||
| 516 | const struct flowi *fl, struct sock *sk, | ||
| 517 | int flags); | ||
| 518 | |||
| 505 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ | 519 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
| 506 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | 520 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
| 507 | { | 521 | { |
diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 93695f0e22a5..af10c2cf8a1d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h | |||
| @@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net, | |||
| 394 | return netlink_set_err(net->genl_sock, portid, group, code); | 394 | return netlink_set_err(net->genl_sock, portid, group, code); |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | static inline int genl_has_listeners(struct genl_family *family, | ||
| 398 | struct sock *sk, unsigned int group) | ||
| 399 | { | ||
| 400 | if (WARN_ON_ONCE(group >= family->n_mcgrps)) | ||
| 401 | return -EINVAL; | ||
| 402 | group = family->mcgrp_offset + group; | ||
| 403 | return netlink_has_listeners(sk, group); | ||
| 404 | } | ||
| 397 | #endif /* __NET_GENERIC_NETLINK_H */ | 405 | #endif /* __NET_GENERIC_NETLINK_H */ |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a3cfb8ebeb53..620e086c0cbe 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -231,7 +231,8 @@ struct qdisc_skb_cb { | |||
| 231 | unsigned int pkt_len; | 231 | unsigned int pkt_len; |
| 232 | u16 slave_dev_queue_mapping; | 232 | u16 slave_dev_queue_mapping; |
| 233 | u16 _pad; | 233 | u16 _pad; |
| 234 | unsigned char data[24]; | 234 | #define QDISC_CB_PRIV_LEN 20 |
| 235 | unsigned char data[QDISC_CB_PRIV_LEN]; | ||
| 235 | }; | 236 | }; |
| 236 | 237 | ||
| 237 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 238 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 1ea0b65c4cfb..a2bf41e0bde9 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h | |||
| @@ -47,6 +47,7 @@ struct ib_umem { | |||
| 47 | int writable; | 47 | int writable; |
| 48 | int hugetlb; | 48 | int hugetlb; |
| 49 | struct work_struct work; | 49 | struct work_struct work; |
| 50 | struct pid *pid; | ||
| 50 | struct mm_struct *mm; | 51 | struct mm_struct *mm; |
| 51 | unsigned long diff; | 52 | unsigned long diff; |
| 52 | struct sg_table sg_head; | 53 | struct sg_table sg_head; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 940aced4ed00..3a73f995a81e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -3985,7 +3985,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | |||
| 3985 | 3985 | ||
| 3986 | l = cgroup_pidlist_find_create(cgrp, type); | 3986 | l = cgroup_pidlist_find_create(cgrp, type); |
| 3987 | if (!l) { | 3987 | if (!l) { |
| 3988 | mutex_unlock(&cgrp->pidlist_mutex); | ||
| 3989 | pidlist_free(array); | 3988 | pidlist_free(array); |
| 3990 | return -ENOMEM; | 3989 | return -ENOMEM; |
| 3991 | } | 3990 | } |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 22874d7cf2c0..52cb04c993b7 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -365,13 +365,14 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, | |||
| 365 | struct task_struct *tsk) | 365 | struct task_struct *tsk) |
| 366 | { | 366 | { |
| 367 | if (is_spread_page(cs)) | 367 | if (is_spread_page(cs)) |
| 368 | tsk->flags |= PF_SPREAD_PAGE; | 368 | task_set_spread_page(tsk); |
| 369 | else | 369 | else |
| 370 | tsk->flags &= ~PF_SPREAD_PAGE; | 370 | task_clear_spread_page(tsk); |
| 371 | |||
| 371 | if (is_spread_slab(cs)) | 372 | if (is_spread_slab(cs)) |
| 372 | tsk->flags |= PF_SPREAD_SLAB; | 373 | task_set_spread_slab(tsk); |
| 373 | else | 374 | else |
| 374 | tsk->flags &= ~PF_SPREAD_SLAB; | 375 | task_clear_spread_slab(tsk); |
| 375 | } | 376 | } |
| 376 | 377 | ||
| 377 | /* | 378 | /* |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index c4b8093c80b3..f1604d8cf489 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -725,14 +725,6 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
| 725 | clear_bit(bit, addr); | 725 | clear_bit(bit, addr); |
| 726 | } | 726 | } |
| 727 | 727 | ||
| 728 | static void memory_bm_clear_current(struct memory_bitmap *bm) | ||
| 729 | { | ||
| 730 | int bit; | ||
| 731 | |||
| 732 | bit = max(bm->cur.node_bit - 1, 0); | ||
| 733 | clear_bit(bit, bm->cur.node->data); | ||
| 734 | } | ||
| 735 | |||
| 736 | static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | 728 | static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) |
| 737 | { | 729 | { |
| 738 | void *addr; | 730 | void *addr; |
| @@ -1341,35 +1333,23 @@ static struct memory_bitmap copy_bm; | |||
| 1341 | 1333 | ||
| 1342 | void swsusp_free(void) | 1334 | void swsusp_free(void) |
| 1343 | { | 1335 | { |
| 1344 | unsigned long fb_pfn, fr_pfn; | 1336 | struct zone *zone; |
| 1345 | 1337 | unsigned long pfn, max_zone_pfn; | |
| 1346 | memory_bm_position_reset(forbidden_pages_map); | ||
| 1347 | memory_bm_position_reset(free_pages_map); | ||
| 1348 | |||
| 1349 | loop: | ||
| 1350 | fr_pfn = memory_bm_next_pfn(free_pages_map); | ||
| 1351 | fb_pfn = memory_bm_next_pfn(forbidden_pages_map); | ||
| 1352 | |||
| 1353 | /* | ||
| 1354 | * Find the next bit set in both bitmaps. This is guaranteed to | ||
| 1355 | * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP. | ||
| 1356 | */ | ||
| 1357 | do { | ||
| 1358 | if (fb_pfn < fr_pfn) | ||
| 1359 | fb_pfn = memory_bm_next_pfn(forbidden_pages_map); | ||
| 1360 | if (fr_pfn < fb_pfn) | ||
| 1361 | fr_pfn = memory_bm_next_pfn(free_pages_map); | ||
| 1362 | } while (fb_pfn != fr_pfn); | ||
| 1363 | |||
| 1364 | if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) { | ||
| 1365 | struct page *page = pfn_to_page(fr_pfn); | ||
| 1366 | 1338 | ||
| 1367 | memory_bm_clear_current(forbidden_pages_map); | 1339 | for_each_populated_zone(zone) { |
| 1368 | memory_bm_clear_current(free_pages_map); | 1340 | max_zone_pfn = zone_end_pfn(zone); |
| 1369 | __free_page(page); | 1341 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
| 1370 | goto loop; | 1342 | if (pfn_valid(pfn)) { |
| 1343 | struct page *page = pfn_to_page(pfn); | ||
| 1344 | |||
| 1345 | if (swsusp_page_is_forbidden(page) && | ||
| 1346 | swsusp_page_is_free(page)) { | ||
| 1347 | swsusp_unset_page_forbidden(page); | ||
| 1348 | swsusp_unset_page_free(page); | ||
| 1349 | __free_page(page); | ||
| 1350 | } | ||
| 1351 | } | ||
| 1371 | } | 1352 | } |
| 1372 | |||
| 1373 | nr_copy_pages = 0; | 1353 | nr_copy_pages = 0; |
| 1374 | nr_meta_pages = 0; | 1354 | nr_meta_pages = 0; |
| 1375 | restore_pblist = NULL; | 1355 | restore_pblist = NULL; |
diff --git a/lib/genalloc.c b/lib/genalloc.c index bdb9a456bcbb..38d2db82228c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np, | |||
| 588 | if (!np_pool) | 588 | if (!np_pool) |
| 589 | return NULL; | 589 | return NULL; |
| 590 | pdev = of_find_device_by_node(np_pool); | 590 | pdev = of_find_device_by_node(np_pool); |
| 591 | of_node_put(np_pool); | ||
| 591 | if (!pdev) | 592 | if (!pdev) |
| 592 | return NULL; | 593 | return NULL; |
| 593 | return dev_get_gen_pool(&pdev->dev); | 594 | return dev_get_gen_pool(&pdev->dev); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index fe5a3342e960..a89cf09a8268 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
| @@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |||
| 184 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); | 184 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
| 185 | } | 185 | } |
| 186 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | 186 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |
| 187 | |||
| 188 | /* | ||
| 189 | * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by | ||
| 190 | * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18 | ||
| 191 | * devel cycle. Do not use anywhere else. | ||
| 192 | */ | ||
| 193 | void __percpu_ref_kill_expedited(struct percpu_ref *ref) | ||
| 194 | { | ||
| 195 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | ||
| 196 | "percpu_ref_kill() called more than once on %pf!", | ||
| 197 | ref->release); | ||
| 198 | |||
| 199 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | ||
| 200 | synchronize_sched_expedited(); | ||
| 201 | percpu_ref_kill_rcu(&ref->rcu); | ||
| 202 | } | ||
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a2c78810ebc1..7b36e4d40ed7 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/hash.h> | 23 | #include <linux/hash.h> |
| 24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
| 25 | #include <linux/rhashtable.h> | 25 | #include <linux/rhashtable.h> |
| 26 | #include <linux/log2.h> | ||
| 27 | 26 | ||
| 28 | #define HASH_DEFAULT_SIZE 64UL | 27 | #define HASH_DEFAULT_SIZE 64UL |
| 29 | #define HASH_MIN_SIZE 4UL | 28 | #define HASH_MIN_SIZE 4UL |
diff --git a/mm/iov_iter.c b/mm/iov_iter.c index ab88dc0ea1d3..9a09f2034fcc 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c | |||
| @@ -310,7 +310,7 @@ void iov_iter_init(struct iov_iter *i, int direction, | |||
| 310 | EXPORT_SYMBOL(iov_iter_init); | 310 | EXPORT_SYMBOL(iov_iter_init); |
| 311 | 311 | ||
| 312 | static ssize_t get_pages_iovec(struct iov_iter *i, | 312 | static ssize_t get_pages_iovec(struct iov_iter *i, |
| 313 | struct page **pages, unsigned maxpages, | 313 | struct page **pages, size_t maxsize, unsigned maxpages, |
| 314 | size_t *start) | 314 | size_t *start) |
| 315 | { | 315 | { |
| 316 | size_t offset = i->iov_offset; | 316 | size_t offset = i->iov_offset; |
| @@ -323,6 +323,8 @@ static ssize_t get_pages_iovec(struct iov_iter *i, | |||
| 323 | len = iov->iov_len - offset; | 323 | len = iov->iov_len - offset; |
| 324 | if (len > i->count) | 324 | if (len > i->count) |
| 325 | len = i->count; | 325 | len = i->count; |
| 326 | if (len > maxsize) | ||
| 327 | len = maxsize; | ||
| 326 | addr = (unsigned long)iov->iov_base + offset; | 328 | addr = (unsigned long)iov->iov_base + offset; |
| 327 | len += *start = addr & (PAGE_SIZE - 1); | 329 | len += *start = addr & (PAGE_SIZE - 1); |
| 328 | if (len > maxpages * PAGE_SIZE) | 330 | if (len > maxpages * PAGE_SIZE) |
| @@ -588,13 +590,15 @@ static unsigned long alignment_bvec(const struct iov_iter *i) | |||
| 588 | } | 590 | } |
| 589 | 591 | ||
| 590 | static ssize_t get_pages_bvec(struct iov_iter *i, | 592 | static ssize_t get_pages_bvec(struct iov_iter *i, |
| 591 | struct page **pages, unsigned maxpages, | 593 | struct page **pages, size_t maxsize, unsigned maxpages, |
| 592 | size_t *start) | 594 | size_t *start) |
| 593 | { | 595 | { |
| 594 | const struct bio_vec *bvec = i->bvec; | 596 | const struct bio_vec *bvec = i->bvec; |
| 595 | size_t len = bvec->bv_len - i->iov_offset; | 597 | size_t len = bvec->bv_len - i->iov_offset; |
| 596 | if (len > i->count) | 598 | if (len > i->count) |
| 597 | len = i->count; | 599 | len = i->count; |
| 600 | if (len > maxsize) | ||
| 601 | len = maxsize; | ||
| 598 | /* can't be more than PAGE_SIZE */ | 602 | /* can't be more than PAGE_SIZE */ |
| 599 | *start = bvec->bv_offset + i->iov_offset; | 603 | *start = bvec->bv_offset + i->iov_offset; |
| 600 | 604 | ||
| @@ -711,13 +715,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) | |||
| 711 | EXPORT_SYMBOL(iov_iter_alignment); | 715 | EXPORT_SYMBOL(iov_iter_alignment); |
| 712 | 716 | ||
| 713 | ssize_t iov_iter_get_pages(struct iov_iter *i, | 717 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
| 714 | struct page **pages, unsigned maxpages, | 718 | struct page **pages, size_t maxsize, unsigned maxpages, |
| 715 | size_t *start) | 719 | size_t *start) |
| 716 | { | 720 | { |
| 717 | if (i->type & ITER_BVEC) | 721 | if (i->type & ITER_BVEC) |
| 718 | return get_pages_bvec(i, pages, maxpages, start); | 722 | return get_pages_bvec(i, pages, maxsize, maxpages, start); |
| 719 | else | 723 | else |
| 720 | return get_pages_iovec(i, pages, maxpages, start); | 724 | return get_pages_iovec(i, pages, maxsize, maxpages, start); |
| 721 | } | 725 | } |
| 722 | EXPORT_SYMBOL(iov_iter_get_pages); | 726 | EXPORT_SYMBOL(iov_iter_get_pages); |
| 723 | 727 | ||
diff --git a/mm/memory.c b/mm/memory.c index adeac306610f..e229970e4223 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps); | |||
| 118 | unsigned long zero_pfn __read_mostly; | 118 | unsigned long zero_pfn __read_mostly; |
| 119 | unsigned long highest_memmap_pfn __read_mostly; | 119 | unsigned long highest_memmap_pfn __read_mostly; |
| 120 | 120 | ||
| 121 | EXPORT_SYMBOL(zero_pfn); | ||
| 122 | |||
| 121 | /* | 123 | /* |
| 122 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() | 124 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
| 123 | */ | 125 | */ |
| @@ -1125,7 +1127,7 @@ again: | |||
| 1125 | addr) != page->index) { | 1127 | addr) != page->index) { |
| 1126 | pte_t ptfile = pgoff_to_pte(page->index); | 1128 | pte_t ptfile = pgoff_to_pte(page->index); |
| 1127 | if (pte_soft_dirty(ptent)) | 1129 | if (pte_soft_dirty(ptent)) |
| 1128 | pte_file_mksoft_dirty(ptfile); | 1130 | ptfile = pte_file_mksoft_dirty(ptfile); |
| 1129 | set_pte_at(mm, addr, pte, ptfile); | 1131 | set_pte_at(mm, addr, pte, ptfile); |
| 1130 | } | 1132 | } |
| 1131 | if (PageAnon(page)) | 1133 | if (PageAnon(page)) |
diff --git a/mm/shmem.c b/mm/shmem.c index 0e5fb225007c..469f90d56051 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -2367,8 +2367,10 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc | |||
| 2367 | 2367 | ||
| 2368 | if (new_dentry->d_inode) { | 2368 | if (new_dentry->d_inode) { |
| 2369 | (void) shmem_unlink(new_dir, new_dentry); | 2369 | (void) shmem_unlink(new_dir, new_dentry); |
| 2370 | if (they_are_dirs) | 2370 | if (they_are_dirs) { |
| 2371 | drop_nlink(new_dentry->d_inode); | ||
| 2371 | drop_nlink(old_dir); | 2372 | drop_nlink(old_dir); |
| 2373 | } | ||
| 2372 | } else if (they_are_dirs) { | 2374 | } else if (they_are_dirs) { |
| 2373 | drop_nlink(old_dir); | 2375 | drop_nlink(old_dir); |
| 2374 | inc_nlink(new_dir); | 2376 | inc_nlink(new_dir); |
| @@ -2124,7 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 2124 | int | 2124 | int |
| 2125 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | 2125 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
| 2126 | { | 2126 | { |
| 2127 | size_t left_over, freelist_size, ralign; | 2127 | size_t left_over, freelist_size; |
| 2128 | size_t ralign = BYTES_PER_WORD; | ||
| 2128 | gfp_t gfp; | 2129 | gfp_t gfp; |
| 2129 | int err; | 2130 | int err; |
| 2130 | size_t size = cachep->size; | 2131 | size_t size = cachep->size; |
| @@ -2157,14 +2158,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
| 2157 | size &= ~(BYTES_PER_WORD - 1); | 2158 | size &= ~(BYTES_PER_WORD - 1); |
| 2158 | } | 2159 | } |
| 2159 | 2160 | ||
| 2160 | /* | ||
| 2161 | * Redzoning and user store require word alignment or possibly larger. | ||
| 2162 | * Note this will be overridden by architecture or caller mandated | ||
| 2163 | * alignment if either is greater than BYTES_PER_WORD. | ||
| 2164 | */ | ||
| 2165 | if (flags & SLAB_STORE_USER) | ||
| 2166 | ralign = BYTES_PER_WORD; | ||
| 2167 | |||
| 2168 | if (flags & SLAB_RED_ZONE) { | 2161 | if (flags & SLAB_RED_ZONE) { |
| 2169 | ralign = REDZONE_ALIGN; | 2162 | ralign = REDZONE_ALIGN; |
| 2170 | /* If redzoning, ensure that the second redzone is suitably | 2163 | /* If redzoning, ensure that the second redzone is suitably |
| @@ -2994,7 +2987,7 @@ out: | |||
| 2994 | 2987 | ||
| 2995 | #ifdef CONFIG_NUMA | 2988 | #ifdef CONFIG_NUMA |
| 2996 | /* | 2989 | /* |
| 2997 | * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set. | 2990 | * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. |
| 2998 | * | 2991 | * |
| 2999 | * If we are in_interrupt, then process context, including cpusets and | 2992 | * If we are in_interrupt, then process context, including cpusets and |
| 3000 | * mempolicy, may not apply and should not be used for allocation policy. | 2993 | * mempolicy, may not apply and should not be used for allocation policy. |
| @@ -3226,7 +3219,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) | |||
| 3226 | { | 3219 | { |
| 3227 | void *objp; | 3220 | void *objp; |
| 3228 | 3221 | ||
| 3229 | if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) { | 3222 | if (current->mempolicy || cpuset_do_slab_mem_spread()) { |
| 3230 | objp = alternate_node_alloc(cache, flags); | 3223 | objp = alternate_node_alloc(cache, flags); |
| 3231 | if (objp) | 3224 | if (objp) |
| 3232 | goto out; | 3225 | goto out; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 62a7fa2e3569..b6c04cbcfdc5 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
| @@ -309,6 +309,9 @@ struct br_input_skb_cb { | |||
| 309 | int igmp; | 309 | int igmp; |
| 310 | int mrouters_only; | 310 | int mrouters_only; |
| 311 | #endif | 311 | #endif |
| 312 | #ifdef CONFIG_BRIDGE_VLAN_FILTERING | ||
| 313 | bool vlan_filtered; | ||
| 314 | #endif | ||
| 312 | }; | 315 | }; |
| 313 | 316 | ||
| 314 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) | 317 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index e1bcd653899b..3ba57fcdcd13 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
| @@ -27,9 +27,13 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags) | |||
| 27 | { | 27 | { |
| 28 | if (flags & BRIDGE_VLAN_INFO_PVID) | 28 | if (flags & BRIDGE_VLAN_INFO_PVID) |
| 29 | __vlan_add_pvid(v, vid); | 29 | __vlan_add_pvid(v, vid); |
| 30 | else | ||
| 31 | __vlan_delete_pvid(v, vid); | ||
| 30 | 32 | ||
| 31 | if (flags & BRIDGE_VLAN_INFO_UNTAGGED) | 33 | if (flags & BRIDGE_VLAN_INFO_UNTAGGED) |
| 32 | set_bit(vid, v->untagged_bitmap); | 34 | set_bit(vid, v->untagged_bitmap); |
| 35 | else | ||
| 36 | clear_bit(vid, v->untagged_bitmap); | ||
| 33 | } | 37 | } |
| 34 | 38 | ||
| 35 | static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) | 39 | static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) |
| @@ -125,7 +129,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, | |||
| 125 | { | 129 | { |
| 126 | u16 vid; | 130 | u16 vid; |
| 127 | 131 | ||
| 128 | if (!br->vlan_enabled) | 132 | /* If this packet was not filtered at input, let it pass */ |
| 133 | if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) | ||
| 129 | goto out; | 134 | goto out; |
| 130 | 135 | ||
| 131 | /* Vlan filter table must be configured at this point. The | 136 | /* Vlan filter table must be configured at this point. The |
| @@ -164,8 +169,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 164 | /* If VLAN filtering is disabled on the bridge, all packets are | 169 | /* If VLAN filtering is disabled on the bridge, all packets are |
| 165 | * permitted. | 170 | * permitted. |
| 166 | */ | 171 | */ |
| 167 | if (!br->vlan_enabled) | 172 | if (!br->vlan_enabled) { |
| 173 | BR_INPUT_SKB_CB(skb)->vlan_filtered = false; | ||
| 168 | return true; | 174 | return true; |
| 175 | } | ||
| 169 | 176 | ||
| 170 | /* If there are no vlan in the permitted list, all packets are | 177 | /* If there are no vlan in the permitted list, all packets are |
| 171 | * rejected. | 178 | * rejected. |
| @@ -173,6 +180,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 173 | if (!v) | 180 | if (!v) |
| 174 | goto drop; | 181 | goto drop; |
| 175 | 182 | ||
| 183 | BR_INPUT_SKB_CB(skb)->vlan_filtered = true; | ||
| 176 | proto = br->vlan_proto; | 184 | proto = br->vlan_proto; |
| 177 | 185 | ||
| 178 | /* If vlan tx offload is disabled on bridge device and frame was | 186 | /* If vlan tx offload is disabled on bridge device and frame was |
| @@ -251,7 +259,8 @@ bool br_allowed_egress(struct net_bridge *br, | |||
| 251 | { | 259 | { |
| 252 | u16 vid; | 260 | u16 vid; |
| 253 | 261 | ||
| 254 | if (!br->vlan_enabled) | 262 | /* If this packet was not filtered at input, let it pass */ |
| 263 | if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) | ||
| 255 | return true; | 264 | return true; |
| 256 | 265 | ||
| 257 | if (!v) | 266 | if (!v) |
| @@ -270,6 +279,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) | |||
| 270 | struct net_bridge *br = p->br; | 279 | struct net_bridge *br = p->br; |
| 271 | struct net_port_vlans *v; | 280 | struct net_port_vlans *v; |
| 272 | 281 | ||
| 282 | /* If filtering was disabled at input, let it pass. */ | ||
| 273 | if (!br->vlan_enabled) | 283 | if (!br->vlan_enabled) |
| 274 | return true; | 284 | return true; |
| 275 | 285 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index ab9a16530c36..cf8a95f48cff 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -4809,9 +4809,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev, | |||
| 4809 | sysfs_remove_link(&(dev->dev.kobj), linkname); | 4809 | sysfs_remove_link(&(dev->dev.kobj), linkname); |
| 4810 | } | 4810 | } |
| 4811 | 4811 | ||
| 4812 | #define netdev_adjacent_is_neigh_list(dev, dev_list) \ | 4812 | static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, |
| 4813 | (dev_list == &dev->adj_list.upper || \ | 4813 | struct net_device *adj_dev, |
| 4814 | dev_list == &dev->adj_list.lower) | 4814 | struct list_head *dev_list) |
| 4815 | { | ||
| 4816 | return (dev_list == &dev->adj_list.upper || | ||
| 4817 | dev_list == &dev->adj_list.lower) && | ||
| 4818 | net_eq(dev_net(dev), dev_net(adj_dev)); | ||
| 4819 | } | ||
| 4815 | 4820 | ||
| 4816 | static int __netdev_adjacent_dev_insert(struct net_device *dev, | 4821 | static int __netdev_adjacent_dev_insert(struct net_device *dev, |
| 4817 | struct net_device *adj_dev, | 4822 | struct net_device *adj_dev, |
| @@ -4841,7 +4846,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, | |||
| 4841 | pr_debug("dev_hold for %s, because of link added from %s to %s\n", | 4846 | pr_debug("dev_hold for %s, because of link added from %s to %s\n", |
| 4842 | adj_dev->name, dev->name, adj_dev->name); | 4847 | adj_dev->name, dev->name, adj_dev->name); |
| 4843 | 4848 | ||
| 4844 | if (netdev_adjacent_is_neigh_list(dev, dev_list)) { | 4849 | if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { |
| 4845 | ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); | 4850 | ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); |
| 4846 | if (ret) | 4851 | if (ret) |
| 4847 | goto free_adj; | 4852 | goto free_adj; |
| @@ -4862,7 +4867,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, | |||
| 4862 | return 0; | 4867 | return 0; |
| 4863 | 4868 | ||
| 4864 | remove_symlinks: | 4869 | remove_symlinks: |
| 4865 | if (netdev_adjacent_is_neigh_list(dev, dev_list)) | 4870 | if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) |
| 4866 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); | 4871 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); |
| 4867 | free_adj: | 4872 | free_adj: |
| 4868 | kfree(adj); | 4873 | kfree(adj); |
| @@ -4895,8 +4900,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, | |||
| 4895 | if (adj->master) | 4900 | if (adj->master) |
| 4896 | sysfs_remove_link(&(dev->dev.kobj), "master"); | 4901 | sysfs_remove_link(&(dev->dev.kobj), "master"); |
| 4897 | 4902 | ||
| 4898 | if (netdev_adjacent_is_neigh_list(dev, dev_list) && | 4903 | if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) |
| 4899 | net_eq(dev_net(dev),dev_net(adj_dev))) | ||
| 4900 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); | 4904 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); |
| 4901 | 4905 | ||
| 4902 | list_del_rcu(&adj->list); | 4906 | list_del_rcu(&adj->list); |
diff --git a/net/core/sock.c b/net/core/sock.c index d372b4bd3f99..9c3f823e76a9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -1866,7 +1866,7 @@ EXPORT_SYMBOL(sock_alloc_send_skb); | |||
| 1866 | * skb_page_frag_refill - check that a page_frag contains enough room | 1866 | * skb_page_frag_refill - check that a page_frag contains enough room |
| 1867 | * @sz: minimum size of the fragment we want to get | 1867 | * @sz: minimum size of the fragment we want to get |
| 1868 | * @pfrag: pointer to page_frag | 1868 | * @pfrag: pointer to page_frag |
| 1869 | * @prio: priority for memory allocation | 1869 | * @gfp: priority for memory allocation |
| 1870 | * | 1870 | * |
| 1871 | * Note: While this allocator tries to use high order pages, there is | 1871 | * Note: While this allocator tries to use high order pages, there is |
| 1872 | * no guarantee that allocations succeed. Therefore, @sz MUST be | 1872 | * no guarantee that allocations succeed. Therefore, @sz MUST be |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index afed1aac2638..bd41dd1948b6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
| @@ -79,10 +79,10 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst, | |||
| 79 | idst->saddr = saddr; | 79 | idst->saddr = saddr; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static void tunnel_dst_set(struct ip_tunnel *t, | 82 | static noinline void tunnel_dst_set(struct ip_tunnel *t, |
| 83 | struct dst_entry *dst, __be32 saddr) | 83 | struct dst_entry *dst, __be32 saddr) |
| 84 | { | 84 | { |
| 85 | __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr); | 85 | __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static void tunnel_dst_reset(struct ip_tunnel *t) | 88 | static void tunnel_dst_reset(struct ip_tunnel *t) |
| @@ -106,7 +106,7 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, | |||
| 106 | struct dst_entry *dst; | 106 | struct dst_entry *dst; |
| 107 | 107 | ||
| 108 | rcu_read_lock(); | 108 | rcu_read_lock(); |
| 109 | idst = this_cpu_ptr(t->dst_cache); | 109 | idst = raw_cpu_ptr(t->dst_cache); |
| 110 | dst = rcu_dereference(idst->dst); | 110 | dst = rcu_dereference(idst->dst); |
| 111 | if (dst && !atomic_inc_not_zero(&dst->__refcnt)) | 111 | if (dst && !atomic_inc_not_zero(&dst->__refcnt)) |
| 112 | dst = NULL; | 112 | dst = NULL; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index eaa4b000c7b4..173e7ea54c70 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -2265,9 +2265,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, | |||
| 2265 | return rt; | 2265 | return rt; |
| 2266 | 2266 | ||
| 2267 | if (flp4->flowi4_proto) | 2267 | if (flp4->flowi4_proto) |
| 2268 | rt = (struct rtable *) xfrm_lookup(net, &rt->dst, | 2268 | rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, |
| 2269 | flowi4_to_flowi(flp4), | 2269 | flowi4_to_flowi(flp4), |
| 2270 | sk, 0); | 2270 | sk, 0); |
| 2271 | 2271 | ||
| 2272 | return rt; | 2272 | return rt; |
| 2273 | } | 2273 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index fc1fac2a0528..3342ee64f2e3 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -3094,11 +3094,13 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
| 3094 | 3094 | ||
| 3095 | write_unlock_bh(&idev->lock); | 3095 | write_unlock_bh(&idev->lock); |
| 3096 | 3096 | ||
| 3097 | /* Step 5: Discard multicast list */ | 3097 | /* Step 5: Discard anycast and multicast list */ |
| 3098 | if (how) | 3098 | if (how) { |
| 3099 | ipv6_ac_destroy_dev(idev); | ||
| 3099 | ipv6_mc_destroy_dev(idev); | 3100 | ipv6_mc_destroy_dev(idev); |
| 3100 | else | 3101 | } else { |
| 3101 | ipv6_mc_down(idev); | 3102 | ipv6_mc_down(idev); |
| 3103 | } | ||
| 3102 | 3104 | ||
| 3103 | idev->tstamp = jiffies; | 3105 | idev->tstamp = jiffies; |
| 3104 | 3106 | ||
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index ff2de7d9d8e6..9a386842fd62 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
| @@ -351,6 +351,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr) | |||
| 351 | return __ipv6_dev_ac_dec(idev, addr); | 351 | return __ipv6_dev_ac_dec(idev, addr); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | void ipv6_ac_destroy_dev(struct inet6_dev *idev) | ||
| 355 | { | ||
| 356 | struct ifacaddr6 *aca; | ||
| 357 | |||
| 358 | write_lock_bh(&idev->lock); | ||
| 359 | while ((aca = idev->ac_list) != NULL) { | ||
| 360 | idev->ac_list = aca->aca_next; | ||
| 361 | write_unlock_bh(&idev->lock); | ||
| 362 | |||
| 363 | addrconf_leave_solict(idev, &aca->aca_addr); | ||
| 364 | |||
| 365 | dst_hold(&aca->aca_rt->dst); | ||
| 366 | ip6_del_rt(aca->aca_rt); | ||
| 367 | |||
| 368 | aca_put(aca); | ||
| 369 | |||
| 370 | write_lock_bh(&idev->lock); | ||
| 371 | } | ||
| 372 | write_unlock_bh(&idev->lock); | ||
| 373 | } | ||
| 374 | |||
| 354 | /* | 375 | /* |
| 355 | * check if the interface has this anycast address | 376 | * check if the interface has this anycast address |
| 356 | * called with rcu_read_lock() | 377 | * called with rcu_read_lock() |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 315a55d66079..0a3448b2888f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1009,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
| 1009 | if (final_dst) | 1009 | if (final_dst) |
| 1010 | fl6->daddr = *final_dst; | 1010 | fl6->daddr = *final_dst; |
| 1011 | 1011 | ||
| 1012 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | 1012 | return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
| 1013 | } | 1013 | } |
| 1014 | EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); | 1014 | EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); |
| 1015 | 1015 | ||
| @@ -1041,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
| 1041 | if (final_dst) | 1041 | if (final_dst) |
| 1042 | fl6->daddr = *final_dst; | 1042 | fl6->daddr = *final_dst; |
| 1043 | 1043 | ||
| 1044 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | 1044 | return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
| 1045 | } | 1045 | } |
| 1046 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); | 1046 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); |
| 1047 | 1047 | ||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 441875f03750..a1e433b88c66 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -1822,7 +1822,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
| 1822 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; | 1822 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; |
| 1823 | if (sdata->vif.bss_conf.use_short_slot) | 1823 | if (sdata->vif.bss_conf.use_short_slot) |
| 1824 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; | 1824 | sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; |
| 1825 | sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period; | 1825 | sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; |
| 1826 | sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; | 1826 | sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; |
| 1827 | 1827 | ||
| 1828 | sinfo->sta_flags.set = 0; | 1828 | sinfo->sta_flags.set = 0; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 91d66b7e64ac..64dc864a417f 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -78,11 +78,12 @@ static const struct genl_multicast_group ovs_dp_vport_multicast_group = { | |||
| 78 | 78 | ||
| 79 | /* Check if need to build a reply message. | 79 | /* Check if need to build a reply message. |
| 80 | * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ | 80 | * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ |
| 81 | static bool ovs_must_notify(struct genl_info *info, | 81 | static bool ovs_must_notify(struct genl_family *family, struct genl_info *info, |
| 82 | const struct genl_multicast_group *grp) | 82 | unsigned int group) |
| 83 | { | 83 | { |
| 84 | return info->nlhdr->nlmsg_flags & NLM_F_ECHO || | 84 | return info->nlhdr->nlmsg_flags & NLM_F_ECHO || |
| 85 | netlink_has_listeners(genl_info_net(info)->genl_sock, 0); | 85 | genl_has_listeners(family, genl_info_net(info)->genl_sock, |
| 86 | group); | ||
| 86 | } | 87 | } |
| 87 | 88 | ||
| 88 | static void ovs_notify(struct genl_family *family, | 89 | static void ovs_notify(struct genl_family *family, |
| @@ -763,7 +764,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act | |||
| 763 | { | 764 | { |
| 764 | struct sk_buff *skb; | 765 | struct sk_buff *skb; |
| 765 | 766 | ||
| 766 | if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group)) | 767 | if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0)) |
| 767 | return NULL; | 768 | return NULL; |
| 768 | 769 | ||
| 769 | skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL); | 770 | skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL); |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 02a86a27fd84..0f62326c0f5e 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
| @@ -54,7 +54,7 @@ static int rfkill_gpio_set_power(void *data, bool blocked) | |||
| 54 | if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled) | 54 | if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled) |
| 55 | clk_disable(rfkill->clk); | 55 | clk_disable(rfkill->clk); |
| 56 | 56 | ||
| 57 | rfkill->clk_enabled = blocked; | 57 | rfkill->clk_enabled = !blocked; |
| 58 | 58 | ||
| 59 | return 0; | 59 | return 0; |
| 60 | } | 60 | } |
| @@ -163,6 +163,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = { | |||
| 163 | { "LNV4752", RFKILL_TYPE_GPS }, | 163 | { "LNV4752", RFKILL_TYPE_GPS }, |
| 164 | { }, | 164 | { }, |
| 165 | }; | 165 | }; |
| 166 | MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match); | ||
| 166 | #endif | 167 | #endif |
| 167 | 168 | ||
| 168 | static struct platform_driver rfkill_gpio_driver = { | 169 | static struct platform_driver rfkill_gpio_driver = { |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index b45d080e64a7..1b24191167f1 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
| @@ -1143,7 +1143,7 @@ static long rxrpc_read(const struct key *key, | |||
| 1143 | if (copy_to_user(xdr, (s), _l) != 0) \ | 1143 | if (copy_to_user(xdr, (s), _l) != 0) \ |
| 1144 | goto fault; \ | 1144 | goto fault; \ |
| 1145 | if (_l & 3 && \ | 1145 | if (_l & 3 && \ |
| 1146 | copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ | 1146 | copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ |
| 1147 | goto fault; \ | 1147 | goto fault; \ |
| 1148 | xdr += (_l + 3) >> 2; \ | 1148 | xdr += (_l + 3) >> 2; \ |
| 1149 | } while(0) | 1149 | } while(0) |
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index ed30e436128b..fb666d1e4de3 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c | |||
| @@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) | |||
| 133 | --sch->q.qlen; | 133 | --sch->q.qlen; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | /* private part of skb->cb[] that a qdisc is allowed to use | ||
| 137 | * is limited to QDISC_CB_PRIV_LEN bytes. | ||
| 138 | * As a flow key might be too large, we store a part of it only. | ||
| 139 | */ | ||
| 140 | #define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3) | ||
| 141 | |||
| 136 | struct choke_skb_cb { | 142 | struct choke_skb_cb { |
| 137 | u16 classid; | 143 | u16 classid; |
| 138 | u8 keys_valid; | 144 | u8 keys_valid; |
| 139 | struct flow_keys keys; | 145 | u8 keys[QDISC_CB_PRIV_LEN - 3]; |
| 140 | }; | 146 | }; |
| 141 | 147 | ||
| 142 | static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) | 148 | static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) |
| @@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb) | |||
| 163 | static bool choke_match_flow(struct sk_buff *skb1, | 169 | static bool choke_match_flow(struct sk_buff *skb1, |
| 164 | struct sk_buff *skb2) | 170 | struct sk_buff *skb2) |
| 165 | { | 171 | { |
| 172 | struct flow_keys temp; | ||
| 173 | |||
| 166 | if (skb1->protocol != skb2->protocol) | 174 | if (skb1->protocol != skb2->protocol) |
| 167 | return false; | 175 | return false; |
| 168 | 176 | ||
| 169 | if (!choke_skb_cb(skb1)->keys_valid) { | 177 | if (!choke_skb_cb(skb1)->keys_valid) { |
| 170 | choke_skb_cb(skb1)->keys_valid = 1; | 178 | choke_skb_cb(skb1)->keys_valid = 1; |
| 171 | skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys); | 179 | skb_flow_dissect(skb1, &temp); |
| 180 | memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN); | ||
| 172 | } | 181 | } |
| 173 | 182 | ||
| 174 | if (!choke_skb_cb(skb2)->keys_valid) { | 183 | if (!choke_skb_cb(skb2)->keys_valid) { |
| 175 | choke_skb_cb(skb2)->keys_valid = 1; | 184 | choke_skb_cb(skb2)->keys_valid = 1; |
| 176 | skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys); | 185 | skb_flow_dissect(skb2, &temp); |
| 186 | memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN); | ||
| 177 | } | 187 | } |
| 178 | 188 | ||
| 179 | return !memcmp(&choke_skb_cb(skb1)->keys, | 189 | return !memcmp(&choke_skb_cb(skb1)->keys, |
| 180 | &choke_skb_cb(skb2)->keys, | 190 | &choke_skb_cb(skb2)->keys, |
| 181 | sizeof(struct flow_keys)); | 191 | CHOKE_K_LEN); |
| 182 | } | 192 | } |
| 183 | 193 | ||
| 184 | /* | 194 | /* |
diff --git a/net/socket.c b/net/socket.c index 2e2586e2dee1..4cdbc107606f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1996,6 +1996,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
| 1996 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | 1996 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
| 1997 | return -EFAULT; | 1997 | return -EFAULT; |
| 1998 | 1998 | ||
| 1999 | if (kmsg->msg_name == NULL) | ||
| 2000 | kmsg->msg_namelen = 0; | ||
| 2001 | |||
| 1999 | if (kmsg->msg_namelen < 0) | 2002 | if (kmsg->msg_namelen < 0) |
| 2000 | return -EINVAL; | 2003 | return -EINVAL; |
| 2001 | 2004 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index df7b1332a1ec..7257164af91b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -6969,6 +6969,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp) | |||
| 6969 | struct nlattr *data = ((void **)skb->cb)[2]; | 6969 | struct nlattr *data = ((void **)skb->cb)[2]; |
| 6970 | enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE; | 6970 | enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE; |
| 6971 | 6971 | ||
| 6972 | /* clear CB data for netlink core to own from now on */ | ||
| 6973 | memset(skb->cb, 0, sizeof(skb->cb)); | ||
| 6974 | |||
| 6972 | nla_nest_end(skb, data); | 6975 | nla_nest_end(skb, data); |
| 6973 | genlmsg_end(skb, hdr); | 6976 | genlmsg_end(skb, hdr); |
| 6974 | 6977 | ||
| @@ -9294,6 +9297,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb) | |||
| 9294 | void *hdr = ((void **)skb->cb)[1]; | 9297 | void *hdr = ((void **)skb->cb)[1]; |
| 9295 | struct nlattr *data = ((void **)skb->cb)[2]; | 9298 | struct nlattr *data = ((void **)skb->cb)[2]; |
| 9296 | 9299 | ||
| 9300 | /* clear CB data for netlink core to own from now on */ | ||
| 9301 | memset(skb->cb, 0, sizeof(skb->cb)); | ||
| 9302 | |||
| 9297 | if (WARN_ON(!rdev->cur_cmd_info)) { | 9303 | if (WARN_ON(!rdev->cur_cmd_info)) { |
| 9298 | kfree_skb(skb); | 9304 | kfree_skb(skb); |
| 9299 | return -EINVAL; | 9305 | return -EINVAL; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index beeed602aeb3..fdde51f4271a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -39,6 +39,11 @@ | |||
| 39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) | 39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) |
| 40 | #define XFRM_MAX_QUEUE_LEN 100 | 40 | #define XFRM_MAX_QUEUE_LEN 100 |
| 41 | 41 | ||
| 42 | struct xfrm_flo { | ||
| 43 | struct dst_entry *dst_orig; | ||
| 44 | u8 flags; | ||
| 45 | }; | ||
| 46 | |||
| 42 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); | 47 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); |
| 43 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] | 48 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] |
| 44 | __read_mostly; | 49 | __read_mostly; |
| @@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) | |||
| 1877 | } | 1882 | } |
| 1878 | 1883 | ||
| 1879 | static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, | 1884 | static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, |
| 1880 | struct dst_entry *dst, | 1885 | struct xfrm_flo *xflo, |
| 1881 | const struct flowi *fl, | 1886 | const struct flowi *fl, |
| 1882 | int num_xfrms, | 1887 | int num_xfrms, |
| 1883 | u16 family) | 1888 | u16 family) |
| 1884 | { | 1889 | { |
| 1885 | int err; | 1890 | int err; |
| 1886 | struct net_device *dev; | 1891 | struct net_device *dev; |
| 1892 | struct dst_entry *dst; | ||
| 1887 | struct dst_entry *dst1; | 1893 | struct dst_entry *dst1; |
| 1888 | struct xfrm_dst *xdst; | 1894 | struct xfrm_dst *xdst; |
| 1889 | 1895 | ||
| @@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, | |||
| 1891 | if (IS_ERR(xdst)) | 1897 | if (IS_ERR(xdst)) |
| 1892 | return xdst; | 1898 | return xdst; |
| 1893 | 1899 | ||
| 1894 | if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0) | 1900 | if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || |
| 1901 | net->xfrm.sysctl_larval_drop || | ||
| 1902 | num_xfrms <= 0) | ||
| 1895 | return xdst; | 1903 | return xdst; |
| 1896 | 1904 | ||
| 1905 | dst = xflo->dst_orig; | ||
| 1897 | dst1 = &xdst->u.dst; | 1906 | dst1 = &xdst->u.dst; |
| 1898 | dst_hold(dst); | 1907 | dst_hold(dst); |
| 1899 | xdst->route = dst; | 1908 | xdst->route = dst; |
| @@ -1935,7 +1944,7 @@ static struct flow_cache_object * | |||
| 1935 | xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, | 1944 | xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, |
| 1936 | struct flow_cache_object *oldflo, void *ctx) | 1945 | struct flow_cache_object *oldflo, void *ctx) |
| 1937 | { | 1946 | { |
| 1938 | struct dst_entry *dst_orig = (struct dst_entry *)ctx; | 1947 | struct xfrm_flo *xflo = (struct xfrm_flo *)ctx; |
| 1939 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | 1948 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
| 1940 | struct xfrm_dst *xdst, *new_xdst; | 1949 | struct xfrm_dst *xdst, *new_xdst; |
| 1941 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; | 1950 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; |
| @@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, | |||
| 1976 | goto make_dummy_bundle; | 1985 | goto make_dummy_bundle; |
| 1977 | } | 1986 | } |
| 1978 | 1987 | ||
| 1979 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); | 1988 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, |
| 1989 | xflo->dst_orig); | ||
| 1980 | if (IS_ERR(new_xdst)) { | 1990 | if (IS_ERR(new_xdst)) { |
| 1981 | err = PTR_ERR(new_xdst); | 1991 | err = PTR_ERR(new_xdst); |
| 1982 | if (err != -EAGAIN) | 1992 | if (err != -EAGAIN) |
| @@ -2010,7 +2020,7 @@ make_dummy_bundle: | |||
| 2010 | /* We found policies, but there's no bundles to instantiate: | 2020 | /* We found policies, but there's no bundles to instantiate: |
| 2011 | * either because the policy blocks, has no transformations or | 2021 | * either because the policy blocks, has no transformations or |
| 2012 | * we could not build template (no xfrm_states).*/ | 2022 | * we could not build template (no xfrm_states).*/ |
| 2013 | xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family); | 2023 | xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); |
| 2014 | if (IS_ERR(xdst)) { | 2024 | if (IS_ERR(xdst)) { |
| 2015 | xfrm_pols_put(pols, num_pols); | 2025 | xfrm_pols_put(pols, num_pols); |
| 2016 | return ERR_CAST(xdst); | 2026 | return ERR_CAST(xdst); |
| @@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
| 2104 | } | 2114 | } |
| 2105 | 2115 | ||
| 2106 | if (xdst == NULL) { | 2116 | if (xdst == NULL) { |
| 2117 | struct xfrm_flo xflo; | ||
| 2118 | |||
| 2119 | xflo.dst_orig = dst_orig; | ||
| 2120 | xflo.flags = flags; | ||
| 2121 | |||
| 2107 | /* To accelerate a bit... */ | 2122 | /* To accelerate a bit... */ |
| 2108 | if ((dst_orig->flags & DST_NOXFRM) || | 2123 | if ((dst_orig->flags & DST_NOXFRM) || |
| 2109 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) | 2124 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) |
| 2110 | goto nopol; | 2125 | goto nopol; |
| 2111 | 2126 | ||
| 2112 | flo = flow_cache_lookup(net, fl, family, dir, | 2127 | flo = flow_cache_lookup(net, fl, family, dir, |
| 2113 | xfrm_bundle_lookup, dst_orig); | 2128 | xfrm_bundle_lookup, &xflo); |
| 2114 | if (flo == NULL) | 2129 | if (flo == NULL) |
| 2115 | goto nopol; | 2130 | goto nopol; |
| 2116 | if (IS_ERR(flo)) { | 2131 | if (IS_ERR(flo)) { |
| @@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
| 2138 | xfrm_pols_put(pols, drop_pols); | 2153 | xfrm_pols_put(pols, drop_pols); |
| 2139 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | 2154 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
| 2140 | 2155 | ||
| 2141 | return make_blackhole(net, family, dst_orig); | 2156 | return ERR_PTR(-EREMOTE); |
| 2142 | } | 2157 | } |
| 2143 | 2158 | ||
| 2144 | err = -EAGAIN; | 2159 | err = -EAGAIN; |
| @@ -2195,6 +2210,23 @@ dropdst: | |||
| 2195 | } | 2210 | } |
| 2196 | EXPORT_SYMBOL(xfrm_lookup); | 2211 | EXPORT_SYMBOL(xfrm_lookup); |
| 2197 | 2212 | ||
| 2213 | /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). | ||
| 2214 | * Otherwise we may send out blackholed packets. | ||
| 2215 | */ | ||
| 2216 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | ||
| 2217 | const struct flowi *fl, | ||
| 2218 | struct sock *sk, int flags) | ||
| 2219 | { | ||
| 2220 | struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, | ||
| 2221 | flags | XFRM_LOOKUP_QUEUE); | ||
| 2222 | |||
| 2223 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) | ||
| 2224 | return make_blackhole(net, dst_orig->ops->family, dst_orig); | ||
| 2225 | |||
| 2226 | return dst; | ||
| 2227 | } | ||
| 2228 | EXPORT_SYMBOL(xfrm_lookup_route); | ||
| 2229 | |||
| 2198 | static inline int | 2230 | static inline int |
| 2199 | xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) | 2231 | xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) |
| 2200 | { | 2232 | { |
| @@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
| 2460 | 2492 | ||
| 2461 | skb_dst_force(skb); | 2493 | skb_dst_force(skb); |
| 2462 | 2494 | ||
| 2463 | dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); | 2495 | dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); |
| 2464 | if (IS_ERR(dst)) { | 2496 | if (IS_ERR(dst)) { |
| 2465 | res = 0; | 2497 | res = 0; |
| 2466 | dst = NULL; | 2498 | dst = NULL; |
diff --git a/scripts/tags.sh b/scripts/tags.sh index cbfd269a6011..293828bfd4ac 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
| @@ -197,6 +197,9 @@ exuberant() | |||
| 197 | --regex-c++='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ | 197 | --regex-c++='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ |
| 198 | --regex-c++='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ | 198 | --regex-c++='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ |
| 199 | --regex-c++='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ | 199 | --regex-c++='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ |
| 200 | --regex-c++='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/' \ | ||
| 201 | --regex-c++='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/' \ | ||
| 202 | --regex-c++='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'\ | ||
| 200 | --regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \ | 203 | --regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \ |
| 201 | --regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \ | 204 | --regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \ |
| 202 | --regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/' \ | 205 | --regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/' \ |
| @@ -260,6 +263,9 @@ emacs() | |||
| 260 | --regex='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ | 263 | --regex='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/' \ |
| 261 | --regex='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ | 264 | --regex='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/' \ |
| 262 | --regex='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ | 265 | --regex='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \ |
| 266 | --regex='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/' \ | ||
| 267 | --regex='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/' \ | ||
| 268 | --regex='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/' \ | ||
| 263 | --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' \ | 269 | --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' \ |
| 264 | --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \ | 270 | --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \ |
| 265 | --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\ | 271 | --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\ |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 9acc77eae487..0032278567ad 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
| @@ -1782,14 +1782,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, | |||
| 1782 | { | 1782 | { |
| 1783 | struct snd_pcm_hw_params *params = arg; | 1783 | struct snd_pcm_hw_params *params = arg; |
| 1784 | snd_pcm_format_t format; | 1784 | snd_pcm_format_t format; |
| 1785 | int channels, width; | 1785 | int channels; |
| 1786 | ssize_t frame_size; | ||
| 1786 | 1787 | ||
| 1787 | params->fifo_size = substream->runtime->hw.fifo_size; | 1788 | params->fifo_size = substream->runtime->hw.fifo_size; |
| 1788 | if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { | 1789 | if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { |
| 1789 | format = params_format(params); | 1790 | format = params_format(params); |
| 1790 | channels = params_channels(params); | 1791 | channels = params_channels(params); |
| 1791 | width = snd_pcm_format_physical_width(format); | 1792 | frame_size = snd_pcm_format_size(format, channels); |
| 1792 | params->fifo_size /= width * channels; | 1793 | if (frame_size > 0) |
| 1794 | params->fifo_size /= (unsigned)frame_size; | ||
| 1793 | } | 1795 | } |
| 1794 | return 0; | 1796 | return 0; |
| 1795 | } | 1797 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 6e5d0cb4e3d7..47ccb8f44adb 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -777,6 +777,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { | |||
| 777 | { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" }, | 777 | { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" }, |
| 778 | { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" }, | 778 | { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" }, |
| 779 | { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, | 779 | { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, |
| 780 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, | ||
| 780 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, | 781 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, |
| 781 | {} | 782 | {} |
| 782 | }; | 783 | }; |
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c index f65fc0987cfb..b7a7c805d63f 100644 --- a/sound/usb/caiaq/control.c +++ b/sound/usb/caiaq/control.c | |||
| @@ -100,15 +100,19 @@ static int control_put(struct snd_kcontrol *kcontrol, | |||
| 100 | struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card); | 100 | struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card); |
| 101 | int pos = kcontrol->private_value; | 101 | int pos = kcontrol->private_value; |
| 102 | int v = ucontrol->value.integer.value[0]; | 102 | int v = ucontrol->value.integer.value[0]; |
| 103 | unsigned char cmd = EP1_CMD_WRITE_IO; | 103 | unsigned char cmd; |
| 104 | 104 | ||
| 105 | if (cdev->chip.usb_id == | 105 | switch (cdev->chip.usb_id) { |
| 106 | USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1)) | 106 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER): |
| 107 | cmd = EP1_CMD_DIMM_LEDS; | 107 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1): |
| 108 | 108 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2): | |
| 109 | if (cdev->chip.usb_id == | 109 | case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER): |
| 110 | USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER)) | ||
| 111 | cmd = EP1_CMD_DIMM_LEDS; | 110 | cmd = EP1_CMD_DIMM_LEDS; |
| 111 | break; | ||
| 112 | default: | ||
| 113 | cmd = EP1_CMD_WRITE_IO; | ||
| 114 | break; | ||
| 115 | } | ||
| 112 | 116 | ||
| 113 | if (pos & CNT_INTVAL) { | 117 | if (pos & CNT_INTVAL) { |
| 114 | int i = pos & ~CNT_INTVAL; | 118 | int i = pos & ~CNT_INTVAL; |
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index 01124ef3690a..416baedfc89f 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c | |||
| @@ -71,7 +71,7 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, | |||
| 71 | struct vgic_lr lr_desc) | 71 | struct vgic_lr lr_desc) |
| 72 | { | 72 | { |
| 73 | if (!(lr_desc.state & LR_STATE_MASK)) | 73 | if (!(lr_desc.state & LR_STATE_MASK)) |
| 74 | set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); | 74 | __set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) | 77 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) |
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 714b94932312..45ee080573f4 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
| @@ -191,8 +191,7 @@ int kvm_assign_device(struct kvm *kvm, | |||
| 191 | return r; | 191 | return r; |
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain, | 194 | noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY); |
| 195 | IOMMU_CAP_CACHE_COHERENCY); | ||
| 196 | 195 | ||
| 197 | /* Check if need to update IOMMU page table for guest memory */ | 196 | /* Check if need to update IOMMU page table for guest memory */ |
| 198 | if (noncoherent != kvm->arch.iommu_noncoherent) { | 197 | if (noncoherent != kvm->arch.iommu_noncoherent) { |
| @@ -254,8 +253,7 @@ int kvm_iommu_map_guest(struct kvm *kvm) | |||
| 254 | } | 253 | } |
| 255 | 254 | ||
| 256 | if (!allow_unsafe_assigned_interrupts && | 255 | if (!allow_unsafe_assigned_interrupts && |
| 257 | !iommu_domain_has_cap(kvm->arch.iommu_domain, | 256 | !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) { |
| 258 | IOMMU_CAP_INTR_REMAP)) { | ||
| 259 | printk(KERN_WARNING "%s: No interrupt remapping support," | 257 | printk(KERN_WARNING "%s: No interrupt remapping support," |
| 260 | " disallowing device assignment." | 258 | " disallowing device assignment." |
| 261 | " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" | 259 | " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 33712fb26eb1..95519bc959ed 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -110,7 +110,7 @@ static bool largepages_enabled = true; | |||
| 110 | bool kvm_is_mmio_pfn(pfn_t pfn) | 110 | bool kvm_is_mmio_pfn(pfn_t pfn) |
| 111 | { | 111 | { |
| 112 | if (pfn_valid(pfn)) | 112 | if (pfn_valid(pfn)) |
| 113 | return PageReserved(pfn_to_page(pfn)); | 113 | return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); |
| 114 | 114 | ||
| 115 | return true; | 115 | return true; |
| 116 | } | 116 | } |
| @@ -1725,7 +1725,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target) | |||
| 1725 | rcu_read_lock(); | 1725 | rcu_read_lock(); |
| 1726 | pid = rcu_dereference(target->pid); | 1726 | pid = rcu_dereference(target->pid); |
| 1727 | if (pid) | 1727 | if (pid) |
| 1728 | task = get_pid_task(target->pid, PIDTYPE_PID); | 1728 | task = get_pid_task(pid, PIDTYPE_PID); |
| 1729 | rcu_read_unlock(); | 1729 | rcu_read_unlock(); |
| 1730 | if (!task) | 1730 | if (!task) |
| 1731 | return ret; | 1731 | return ret; |
