diff options
| author | Ingo Molnar <mingo@kernel.org> | 2018-12-17 11:46:26 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-12-17 11:46:26 -0500 |
| commit | 76aea1eeb98d2d75d9297fda777efeffe3657aeb (patch) | |
| tree | 18fdf94ec846b553f1e60438279a3b449897189b | |
| parent | 43b9e4febc66b98d83cc1560196d56ac7fef3c32 (diff) | |
| parent | 7566ec393f4161572ba6f11ad5171fd5d59b0fbd (diff) | |
Merge tag 'v4.20-rc7' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
437 files changed, 4850 insertions, 2108 deletions
| @@ -2541,6 +2541,10 @@ S: Ormond | |||
| 2541 | S: Victoria 3163 | 2541 | S: Victoria 3163 |
| 2542 | S: Australia | 2542 | S: Australia |
| 2543 | 2543 | ||
| 2544 | N: Eric Miao | ||
| 2545 | E: eric.y.miao@gmail.com | ||
| 2546 | D: MMP support | ||
| 2547 | |||
| 2544 | N: Pauline Middelink | 2548 | N: Pauline Middelink |
| 2545 | E: middelin@polyware.nl | 2549 | E: middelin@polyware.nl |
| 2546 | D: General low-level bug fixes, /proc fixes, identd support | 2550 | D: General low-level bug fixes, /proc fixes, identd support |
| @@ -4115,6 +4119,10 @@ S: 1507 145th Place SE #B5 | |||
| 4115 | S: Bellevue, Washington 98007 | 4119 | S: Bellevue, Washington 98007 |
| 4116 | S: USA | 4120 | S: USA |
| 4117 | 4121 | ||
| 4122 | N: Haojian Zhuang | ||
| 4123 | E: haojian.zhuang@gmail.com | ||
| 4124 | D: MMP support | ||
| 4125 | |||
| 4118 | N: Richard Zidlicky | 4126 | N: Richard Zidlicky |
| 4119 | E: rz@linux-m68k.org, rdzidlic@geocities.com | 4127 | E: rz@linux-m68k.org, rdzidlic@geocities.com |
| 4120 | W: http://www.geocities.com/rdzidlic | 4128 | W: http://www.geocities.com/rdzidlic |
diff --git a/Documentation/ABI/testing/sysfs-class-net-dsa b/Documentation/ABI/testing/sysfs-class-net-dsa index f240221e071e..985d84c585c6 100644 --- a/Documentation/ABI/testing/sysfs-class-net-dsa +++ b/Documentation/ABI/testing/sysfs-class-net-dsa | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | What: /sys/class/net/<iface>/tagging | 1 | What: /sys/class/net/<iface>/dsa/tagging |
| 2 | Date: August 2018 | 2 | Date: August 2018 |
| 3 | KernelVersion: 4.20 | 3 | KernelVersion: 4.20 |
| 4 | Contact: netdev@vger.kernel.org | 4 | Contact: netdev@vger.kernel.org |
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index dbe96cb5558e..6a6d67acaf69 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst | |||
| @@ -187,6 +187,8 @@ Takes xa_lock internally: | |||
| 187 | * :c:func:`xa_erase_bh` | 187 | * :c:func:`xa_erase_bh` |
| 188 | * :c:func:`xa_erase_irq` | 188 | * :c:func:`xa_erase_irq` |
| 189 | * :c:func:`xa_cmpxchg` | 189 | * :c:func:`xa_cmpxchg` |
| 190 | * :c:func:`xa_cmpxchg_bh` | ||
| 191 | * :c:func:`xa_cmpxchg_irq` | ||
| 190 | * :c:func:`xa_store_range` | 192 | * :c:func:`xa_store_range` |
| 191 | * :c:func:`xa_alloc` | 193 | * :c:func:`xa_alloc` |
| 192 | * :c:func:`xa_alloc_bh` | 194 | * :c:func:`xa_alloc_bh` |
| @@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process | |||
| 263 | context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` | 265 | context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` |
| 264 | in the interrupt handler. Some of the more common patterns have helper | 266 | in the interrupt handler. Some of the more common patterns have helper |
| 265 | functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, | 267 | functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, |
| 266 | :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. | 268 | :c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh` |
| 269 | and :c:func:`xa_cmpxchg_irq`. | ||
| 267 | 270 | ||
| 268 | Sometimes you need to protect access to the XArray with a mutex because | 271 | Sometimes you need to protect access to the XArray with a mutex because |
| 269 | that lock sits above another mutex in the locking hierarchy. That does | 272 | that lock sits above another mutex in the locking hierarchy. That does |
diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt index 2ec489eebe72..b646bbcf7f92 100644 --- a/Documentation/devicetree/bindings/clock/clock-bindings.txt +++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt | |||
| @@ -168,3 +168,19 @@ a shared clock is forbidden. | |||
| 168 | 168 | ||
| 169 | Configuration of common clocks, which affect multiple consumer devices can | 169 | Configuration of common clocks, which affect multiple consumer devices can |
| 170 | be similarly specified in the clock provider node. | 170 | be similarly specified in the clock provider node. |
| 171 | |||
| 172 | ==Protected clocks== | ||
| 173 | |||
| 174 | Some platforms or firmwares may not fully expose all the clocks to the OS, such | ||
| 175 | as in situations where those clks are used by drivers running in ARM secure | ||
| 176 | execution levels. Such a configuration can be specified in device tree with the | ||
| 177 | protected-clocks property in the form of a clock specifier list. This property should | ||
| 178 | only be specified in the node that is providing the clocks being protected: | ||
| 179 | |||
| 180 | clock-controller@a000f000 { | ||
| 181 | compatible = "vendor,clk95; | ||
| 182 | reg = <0xa000f000 0x1000> | ||
| 183 | #clocks-cells = <1>; | ||
| 184 | ... | ||
| 185 | protected-clocks = <UART3_CLK>, <SPI5_CLK>; | ||
| 186 | }; | ||
diff --git a/Documentation/devicetree/bindings/input/input-reset.txt b/Documentation/devicetree/bindings/input/input-reset.txt index 2bb2626fdb78..1ca6cc5ebf8e 100644 --- a/Documentation/devicetree/bindings/input/input-reset.txt +++ b/Documentation/devicetree/bindings/input/input-reset.txt | |||
| @@ -12,7 +12,7 @@ The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define | |||
| 12 | a set of keys. | 12 | a set of keys. |
| 13 | 13 | ||
| 14 | Required property: | 14 | Required property: |
| 15 | sysrq-reset-seq: array of Linux keycodes, one keycode per cell. | 15 | keyset: array of Linux keycodes, one keycode per cell. |
| 16 | 16 | ||
| 17 | Optional property: | 17 | Optional property: |
| 18 | timeout-ms: duration keys must be pressed together in milliseconds before | 18 | timeout-ms: duration keys must be pressed together in milliseconds before |
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.txt b/Documentation/devicetree/bindings/media/rockchip-vpu.txt deleted file mode 100644 index 35dc464ad7c8..000000000000 --- a/Documentation/devicetree/bindings/media/rockchip-vpu.txt +++ /dev/null | |||
| @@ -1,29 +0,0 @@ | |||
| 1 | device-tree bindings for rockchip VPU codec | ||
| 2 | |||
| 3 | Rockchip (Video Processing Unit) present in various Rockchip platforms, | ||
| 4 | such as RK3288 and RK3399. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | - compatible: value should be one of the following | ||
| 8 | "rockchip,rk3288-vpu"; | ||
| 9 | "rockchip,rk3399-vpu"; | ||
| 10 | - interrupts: encoding and decoding interrupt specifiers | ||
| 11 | - interrupt-names: should be "vepu" and "vdpu" | ||
| 12 | - clocks: phandle to VPU aclk, hclk clocks | ||
| 13 | - clock-names: should be "aclk" and "hclk" | ||
| 14 | - power-domains: phandle to power domain node | ||
| 15 | - iommus: phandle to a iommu node | ||
| 16 | |||
| 17 | Example: | ||
| 18 | SoC-specific DT entry: | ||
| 19 | vpu: video-codec@ff9a0000 { | ||
| 20 | compatible = "rockchip,rk3288-vpu"; | ||
| 21 | reg = <0x0 0xff9a0000 0x0 0x800>; | ||
| 22 | interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, | ||
| 23 | <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; | ||
| 24 | interrupt-names = "vepu", "vdpu"; | ||
| 25 | clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>; | ||
| 26 | clock-names = "aclk", "hclk"; | ||
| 27 | power-domains = <&power RK3288_PD_VIDEO>; | ||
| 28 | iommus = <&vpu_mmu>; | ||
| 29 | }; | ||
diff --git a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst index 0f8b31874002..de131f00c249 100644 --- a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst +++ b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _media_ioc_request_alloc: | 27 | .. _media_ioc_request_alloc: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst index 6dd2d7fea714..5d2604345e19 100644 --- a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst +++ b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _media_request_ioc_queue: | 27 | .. _media_request_ioc_queue: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst index febe888494c8..ec61960c81ce 100644 --- a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst +++ b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _media_request_ioc_reinit: | 27 | .. _media_request_ioc_reinit: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst index 5f4a23029c48..945113dcb218 100644 --- a/Documentation/media/uapi/mediactl/request-api.rst +++ b/Documentation/media/uapi/mediactl/request-api.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _media-request-api: | 27 | .. _media-request-api: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/mediactl/request-func-close.rst b/Documentation/media/uapi/mediactl/request-func-close.rst index 098d7f2b9548..dcf3f35bcf17 100644 --- a/Documentation/media/uapi/mediactl/request-func-close.rst +++ b/Documentation/media/uapi/mediactl/request-func-close.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _request-func-close: | 27 | .. _request-func-close: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/mediactl/request-func-ioctl.rst b/Documentation/media/uapi/mediactl/request-func-ioctl.rst index ff7b072a6999..11a22f887843 100644 --- a/Documentation/media/uapi/mediactl/request-func-ioctl.rst +++ b/Documentation/media/uapi/mediactl/request-func-ioctl.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _request-func-ioctl: | 27 | .. _request-func-ioctl: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/mediactl/request-func-poll.rst b/Documentation/media/uapi/mediactl/request-func-poll.rst index 85191254f381..2609fd54d519 100644 --- a/Documentation/media/uapi/mediactl/request-func-poll.rst +++ b/Documentation/media/uapi/mediactl/request-func-poll.rst | |||
| @@ -1,4 +1,28 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | 1 | .. This file is dual-licensed: you can use it either under the terms |
| 2 | .. of the GPL or the GFDL 1.1+ license, at your option. Note that this | ||
| 3 | .. dual licensing only applies to this file, and not this project as a | ||
| 4 | .. whole. | ||
| 5 | .. | ||
| 6 | .. a) This file is free software; you can redistribute it and/or | ||
| 7 | .. modify it under the terms of the GNU General Public License as | ||
| 8 | .. published by the Free Software Foundation; either version 2 of | ||
| 9 | .. the License, or (at your option) any later version. | ||
| 10 | .. | ||
| 11 | .. This file is distributed in the hope that it will be useful, | ||
| 12 | .. but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | .. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | .. GNU General Public License for more details. | ||
| 15 | .. | ||
| 16 | .. Or, alternatively, | ||
| 17 | .. | ||
| 18 | .. b) Permission is granted to copy, distribute and/or modify this | ||
| 19 | .. document under the terms of the GNU Free Documentation License, | ||
| 20 | .. Version 1.1 or any later version published by the Free Software | ||
| 21 | .. Foundation, with no Invariant Sections, no Front-Cover Texts | ||
| 22 | .. and no Back-Cover Texts. A copy of the license is included at | ||
| 23 | .. Documentation/media/uapi/fdl-appendix.rst. | ||
| 24 | .. | ||
| 25 | .. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections | ||
| 2 | 26 | ||
| 3 | .. _request-func-poll: | 27 | .. _request-func-poll: |
| 4 | 28 | ||
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst index 65a1d873196b..027358b91082 100644 --- a/Documentation/media/uapi/v4l/extended-controls.rst +++ b/Documentation/media/uapi/v4l/extended-controls.rst | |||
| @@ -1505,6 +1505,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type - | |||
| 1505 | configuring a stateless hardware decoding pipeline for MPEG-2. | 1505 | configuring a stateless hardware decoding pipeline for MPEG-2. |
| 1506 | The bitstream parameters are defined according to :ref:`mpeg2part2`. | 1506 | The bitstream parameters are defined according to :ref:`mpeg2part2`. |
| 1507 | 1507 | ||
| 1508 | .. note:: | ||
| 1509 | |||
| 1510 | This compound control is not yet part of the public kernel API and | ||
| 1511 | it is expected to change. | ||
| 1512 | |||
| 1508 | .. c:type:: v4l2_ctrl_mpeg2_slice_params | 1513 | .. c:type:: v4l2_ctrl_mpeg2_slice_params |
| 1509 | 1514 | ||
| 1510 | .. cssclass:: longtable | 1515 | .. cssclass:: longtable |
| @@ -1625,6 +1630,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type - | |||
| 1625 | Specifies quantization matrices (as extracted from the bitstream) for the | 1630 | Specifies quantization matrices (as extracted from the bitstream) for the |
| 1626 | associated MPEG-2 slice data. | 1631 | associated MPEG-2 slice data. |
| 1627 | 1632 | ||
| 1633 | .. note:: | ||
| 1634 | |||
| 1635 | This compound control is not yet part of the public kernel API and | ||
| 1636 | it is expected to change. | ||
| 1637 | |||
| 1628 | .. c:type:: v4l2_ctrl_mpeg2_quantization | 1638 | .. c:type:: v4l2_ctrl_mpeg2_quantization |
| 1629 | 1639 | ||
| 1630 | .. cssclass:: longtable | 1640 | .. cssclass:: longtable |
diff --git a/MAINTAINERS b/MAINTAINERS index 6682420421c1..3318f30903b2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1472,6 +1472,7 @@ F: drivers/clk/sirf/ | |||
| 1472 | F: drivers/clocksource/timer-prima2.c | 1472 | F: drivers/clocksource/timer-prima2.c |
| 1473 | F: drivers/clocksource/timer-atlas7.c | 1473 | F: drivers/clocksource/timer-atlas7.c |
| 1474 | N: [^a-z]sirf | 1474 | N: [^a-z]sirf |
| 1475 | X: drivers/gnss | ||
| 1475 | 1476 | ||
| 1476 | ARM/EBSA110 MACHINE SUPPORT | 1477 | ARM/EBSA110 MACHINE SUPPORT |
| 1477 | M: Russell King <linux@armlinux.org.uk> | 1478 | M: Russell King <linux@armlinux.org.uk> |
| @@ -1738,13 +1739,17 @@ ARM/Mediatek SoC support | |||
| 1738 | M: Matthias Brugger <matthias.bgg@gmail.com> | 1739 | M: Matthias Brugger <matthias.bgg@gmail.com> |
| 1739 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1740 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1740 | L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) | 1741 | L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) |
| 1742 | W: https://mtk.bcnfs.org/ | ||
| 1743 | C: irc://chat.freenode.net/linux-mediatek | ||
| 1741 | S: Maintained | 1744 | S: Maintained |
| 1742 | F: arch/arm/boot/dts/mt6* | 1745 | F: arch/arm/boot/dts/mt6* |
| 1743 | F: arch/arm/boot/dts/mt7* | 1746 | F: arch/arm/boot/dts/mt7* |
| 1744 | F: arch/arm/boot/dts/mt8* | 1747 | F: arch/arm/boot/dts/mt8* |
| 1745 | F: arch/arm/mach-mediatek/ | 1748 | F: arch/arm/mach-mediatek/ |
| 1746 | F: arch/arm64/boot/dts/mediatek/ | 1749 | F: arch/arm64/boot/dts/mediatek/ |
| 1750 | F: drivers/soc/mediatek/ | ||
| 1747 | N: mtk | 1751 | N: mtk |
| 1752 | N: mt[678] | ||
| 1748 | K: mediatek | 1753 | K: mediatek |
| 1749 | 1754 | ||
| 1750 | ARM/Mediatek USB3 PHY DRIVER | 1755 | ARM/Mediatek USB3 PHY DRIVER |
| @@ -3271,11 +3276,16 @@ S: Maintained | |||
| 3271 | F: sound/pci/oxygen/ | 3276 | F: sound/pci/oxygen/ |
| 3272 | 3277 | ||
| 3273 | C-SKY ARCHITECTURE | 3278 | C-SKY ARCHITECTURE |
| 3274 | M: Guo Ren <ren_guo@c-sky.com> | 3279 | M: Guo Ren <guoren@kernel.org> |
| 3275 | T: git https://github.com/c-sky/csky-linux.git | 3280 | T: git https://github.com/c-sky/csky-linux.git |
| 3276 | S: Supported | 3281 | S: Supported |
| 3277 | F: arch/csky/ | 3282 | F: arch/csky/ |
| 3278 | F: Documentation/devicetree/bindings/csky/ | 3283 | F: Documentation/devicetree/bindings/csky/ |
| 3284 | F: drivers/irqchip/irq-csky-* | ||
| 3285 | F: Documentation/devicetree/bindings/interrupt-controller/csky,* | ||
| 3286 | F: drivers/clocksource/timer-gx6605s.c | ||
| 3287 | F: drivers/clocksource/timer-mp-csky.c | ||
| 3288 | F: Documentation/devicetree/bindings/timer/csky,* | ||
| 3279 | K: csky | 3289 | K: csky |
| 3280 | N: csky | 3290 | N: csky |
| 3281 | 3291 | ||
| @@ -4837,6 +4847,7 @@ F: include/uapi/drm/vmwgfx_drm.h | |||
| 4837 | 4847 | ||
| 4838 | DRM DRIVERS | 4848 | DRM DRIVERS |
| 4839 | M: David Airlie <airlied@linux.ie> | 4849 | M: David Airlie <airlied@linux.ie> |
| 4850 | M: Daniel Vetter <daniel@ffwll.ch> | ||
| 4840 | L: dri-devel@lists.freedesktop.org | 4851 | L: dri-devel@lists.freedesktop.org |
| 4841 | T: git git://anongit.freedesktop.org/drm/drm | 4852 | T: git git://anongit.freedesktop.org/drm/drm |
| 4842 | B: https://bugs.freedesktop.org/ | 4853 | B: https://bugs.freedesktop.org/ |
| @@ -6316,6 +6327,7 @@ F: include/uapi/linux/gigaset_dev.h | |||
| 6316 | 6327 | ||
| 6317 | GNSS SUBSYSTEM | 6328 | GNSS SUBSYSTEM |
| 6318 | M: Johan Hovold <johan@kernel.org> | 6329 | M: Johan Hovold <johan@kernel.org> |
| 6330 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git | ||
| 6319 | S: Maintained | 6331 | S: Maintained |
| 6320 | F: Documentation/ABI/testing/sysfs-class-gnss | 6332 | F: Documentation/ABI/testing/sysfs-class-gnss |
| 6321 | F: Documentation/devicetree/bindings/gnss/ | 6333 | F: Documentation/devicetree/bindings/gnss/ |
| @@ -9998,12 +10010,9 @@ S: Odd Fixes | |||
| 9998 | F: drivers/media/radio/radio-miropcm20* | 10010 | F: drivers/media/radio/radio-miropcm20* |
| 9999 | 10011 | ||
| 10000 | MMP SUPPORT | 10012 | MMP SUPPORT |
| 10001 | M: Eric Miao <eric.y.miao@gmail.com> | 10013 | R: Lubomir Rintel <lkundrak@v3.sk> |
| 10002 | M: Haojian Zhuang <haojian.zhuang@gmail.com> | ||
| 10003 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 10014 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 10004 | T: git git://github.com/hzhuang1/linux.git | 10015 | S: Odd Fixes |
| 10005 | T: git git://git.linaro.org/people/ycmiao/pxa-linux.git | ||
| 10006 | S: Maintained | ||
| 10007 | F: arch/arm/boot/dts/mmp* | 10016 | F: arch/arm/boot/dts/mmp* |
| 10008 | F: arch/arm/mach-mmp/ | 10017 | F: arch/arm/mach-mmp/ |
| 10009 | 10018 | ||
| @@ -13883,6 +13892,13 @@ F: drivers/md/raid* | |||
| 13883 | F: include/linux/raid/ | 13892 | F: include/linux/raid/ |
| 13884 | F: include/uapi/linux/raid/ | 13893 | F: include/uapi/linux/raid/ |
| 13885 | 13894 | ||
| 13895 | SOCIONEXT (SNI) AVE NETWORK DRIVER | ||
| 13896 | M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> | ||
| 13897 | L: netdev@vger.kernel.org | ||
| 13898 | S: Maintained | ||
| 13899 | F: drivers/net/ethernet/socionext/sni_ave.c | ||
| 13900 | F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt | ||
| 13901 | |||
| 13886 | SOCIONEXT (SNI) NETSEC NETWORK DRIVER | 13902 | SOCIONEXT (SNI) NETSEC NETWORK DRIVER |
| 13887 | M: Jassi Brar <jaswinder.singh@linaro.org> | 13903 | M: Jassi Brar <jaswinder.singh@linaro.org> |
| 13888 | L: netdev@vger.kernel.org | 13904 | L: netdev@vger.kernel.org |
| @@ -2,7 +2,7 @@ | |||
| 2 | VERSION = 4 | 2 | VERSION = 4 |
| 3 | PATCHLEVEL = 20 | 3 | PATCHLEVEL = 20 |
| 4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
| 5 | EXTRAVERSION = -rc5 | 5 | EXTRAVERSION = -rc7 |
| 6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
| 7 | 7 | ||
| 8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index a37fd990bd55..4b5b1b244f86 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c | |||
| @@ -634,6 +634,7 @@ setup_arch(char **cmdline_p) | |||
| 634 | 634 | ||
| 635 | /* Find our memory. */ | 635 | /* Find our memory. */ |
| 636 | setup_memory(kernel_end); | 636 | setup_memory(kernel_end); |
| 637 | memblock_set_bottom_up(true); | ||
| 637 | 638 | ||
| 638 | /* First guess at cpu cache sizes. Do this before init_arch. */ | 639 | /* First guess at cpu cache sizes. Do this before init_arch. */ |
| 639 | determine_cpu_caches(cpu->type); | 640 | determine_cpu_caches(cpu->type); |
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 74846553e3f1..d0b73371e985 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c | |||
| @@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end) | |||
| 144 | if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) | 144 | if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) |
| 145 | panic("kernel loaded out of ram"); | 145 | panic("kernel loaded out of ram"); |
| 146 | 146 | ||
| 147 | memblock_add(PFN_PHYS(node_min_pfn), | ||
| 148 | (node_max_pfn - node_min_pfn) << PAGE_SHIFT); | ||
| 149 | |||
| 147 | /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. | 150 | /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. |
| 148 | Note that we round this down, not up - node memory | 151 | Note that we round this down, not up - node memory |
| 149 | has much larger alignment than 8Mb, so it's safe. */ | 152 | has much larger alignment than 8Mb, so it's safe. */ |
| 150 | node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); | 153 | node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); |
| 151 | 154 | ||
| 152 | memblock_add(PFN_PHYS(node_min_pfn), | ||
| 153 | (node_max_pfn - node_min_pfn) << PAGE_SHIFT); | ||
| 154 | |||
| 155 | NODE_DATA(nid)->node_start_pfn = node_min_pfn; | 155 | NODE_DATA(nid)->node_start_pfn = node_min_pfn; |
| 156 | NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; | 156 | NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; |
| 157 | 157 | ||
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c9e2a1323536..6dd783557330 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
| @@ -109,7 +109,7 @@ endmenu | |||
| 109 | 109 | ||
| 110 | choice | 110 | choice |
| 111 | prompt "ARC Instruction Set" | 111 | prompt "ARC Instruction Set" |
| 112 | default ISA_ARCOMPACT | 112 | default ISA_ARCV2 |
| 113 | 113 | ||
| 114 | config ISA_ARCOMPACT | 114 | config ISA_ARCOMPACT |
| 115 | bool "ARCompact ISA" | 115 | bool "ARCompact ISA" |
| @@ -176,13 +176,11 @@ endchoice | |||
| 176 | 176 | ||
| 177 | config CPU_BIG_ENDIAN | 177 | config CPU_BIG_ENDIAN |
| 178 | bool "Enable Big Endian Mode" | 178 | bool "Enable Big Endian Mode" |
| 179 | default n | ||
| 180 | help | 179 | help |
| 181 | Build kernel for Big Endian Mode of ARC CPU | 180 | Build kernel for Big Endian Mode of ARC CPU |
| 182 | 181 | ||
| 183 | config SMP | 182 | config SMP |
| 184 | bool "Symmetric Multi-Processing" | 183 | bool "Symmetric Multi-Processing" |
| 185 | default n | ||
| 186 | select ARC_MCIP if ISA_ARCV2 | 184 | select ARC_MCIP if ISA_ARCV2 |
| 187 | help | 185 | help |
| 188 | This enables support for systems with more than one CPU. | 186 | This enables support for systems with more than one CPU. |
| @@ -254,7 +252,6 @@ config ARC_CACHE_PAGES | |||
| 254 | config ARC_CACHE_VIPT_ALIASING | 252 | config ARC_CACHE_VIPT_ALIASING |
| 255 | bool "Support VIPT Aliasing D$" | 253 | bool "Support VIPT Aliasing D$" |
| 256 | depends on ARC_HAS_DCACHE && ISA_ARCOMPACT | 254 | depends on ARC_HAS_DCACHE && ISA_ARCOMPACT |
| 257 | default n | ||
| 258 | 255 | ||
| 259 | endif #ARC_CACHE | 256 | endif #ARC_CACHE |
| 260 | 257 | ||
| @@ -262,7 +259,6 @@ config ARC_HAS_ICCM | |||
| 262 | bool "Use ICCM" | 259 | bool "Use ICCM" |
| 263 | help | 260 | help |
| 264 | Single Cycle RAMS to store Fast Path Code | 261 | Single Cycle RAMS to store Fast Path Code |
| 265 | default n | ||
| 266 | 262 | ||
| 267 | config ARC_ICCM_SZ | 263 | config ARC_ICCM_SZ |
| 268 | int "ICCM Size in KB" | 264 | int "ICCM Size in KB" |
| @@ -273,7 +269,6 @@ config ARC_HAS_DCCM | |||
| 273 | bool "Use DCCM" | 269 | bool "Use DCCM" |
| 274 | help | 270 | help |
| 275 | Single Cycle RAMS to store Fast Path Data | 271 | Single Cycle RAMS to store Fast Path Data |
| 276 | default n | ||
| 277 | 272 | ||
| 278 | config ARC_DCCM_SZ | 273 | config ARC_DCCM_SZ |
| 279 | int "DCCM Size in KB" | 274 | int "DCCM Size in KB" |
| @@ -366,13 +361,11 @@ if ISA_ARCOMPACT | |||
| 366 | 361 | ||
| 367 | config ARC_COMPACT_IRQ_LEVELS | 362 | config ARC_COMPACT_IRQ_LEVELS |
| 368 | bool "Setup Timer IRQ as high Priority" | 363 | bool "Setup Timer IRQ as high Priority" |
| 369 | default n | ||
| 370 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy | 364 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy |
| 371 | depends on !SMP | 365 | depends on !SMP |
| 372 | 366 | ||
| 373 | config ARC_FPU_SAVE_RESTORE | 367 | config ARC_FPU_SAVE_RESTORE |
| 374 | bool "Enable FPU state persistence across context switch" | 368 | bool "Enable FPU state persistence across context switch" |
| 375 | default n | ||
| 376 | help | 369 | help |
| 377 | Double Precision Floating Point unit had dedicated regs which | 370 | Double Precision Floating Point unit had dedicated regs which |
| 378 | need to be saved/restored across context-switch. | 371 | need to be saved/restored across context-switch. |
| @@ -453,7 +446,6 @@ config HIGHMEM | |||
| 453 | 446 | ||
| 454 | config ARC_HAS_PAE40 | 447 | config ARC_HAS_PAE40 |
| 455 | bool "Support for the 40-bit Physical Address Extension" | 448 | bool "Support for the 40-bit Physical Address Extension" |
| 456 | default n | ||
| 457 | depends on ISA_ARCV2 | 449 | depends on ISA_ARCV2 |
| 458 | select HIGHMEM | 450 | select HIGHMEM |
| 459 | select PHYS_ADDR_T_64BIT | 451 | select PHYS_ADDR_T_64BIT |
| @@ -496,7 +488,6 @@ config HZ | |||
| 496 | 488 | ||
| 497 | config ARC_METAWARE_HLINK | 489 | config ARC_METAWARE_HLINK |
| 498 | bool "Support for Metaware debugger assisted Host access" | 490 | bool "Support for Metaware debugger assisted Host access" |
| 499 | default n | ||
| 500 | help | 491 | help |
| 501 | This options allows a Linux userland apps to directly access | 492 | This options allows a Linux userland apps to directly access |
| 502 | host file system (open/creat/read/write etc) with help from | 493 | host file system (open/creat/read/write etc) with help from |
| @@ -524,13 +515,11 @@ config ARC_DW2_UNWIND | |||
| 524 | 515 | ||
| 525 | config ARC_DBG_TLB_PARANOIA | 516 | config ARC_DBG_TLB_PARANOIA |
| 526 | bool "Paranoia Checks in Low Level TLB Handlers" | 517 | bool "Paranoia Checks in Low Level TLB Handlers" |
| 527 | default n | ||
| 528 | 518 | ||
| 529 | endif | 519 | endif |
| 530 | 520 | ||
| 531 | config ARC_UBOOT_SUPPORT | 521 | config ARC_UBOOT_SUPPORT |
| 532 | bool "Support uboot arg Handling" | 522 | bool "Support uboot arg Handling" |
| 533 | default n | ||
| 534 | help | 523 | help |
| 535 | ARC Linux by default checks for uboot provided args as pointers to | 524 | ARC Linux by default checks for uboot provided args as pointers to |
| 536 | external cmdline or DTB. This however breaks in absence of uboot, | 525 | external cmdline or DTB. This however breaks in absence of uboot, |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index c64c505d966c..df00578c279d 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | # published by the Free Software Foundation. | 6 | # published by the Free Software Foundation. |
| 7 | # | 7 | # |
| 8 | 8 | ||
| 9 | KBUILD_DEFCONFIG := nsim_700_defconfig | 9 | KBUILD_DEFCONFIG := nsim_hs_defconfig |
| 10 | 10 | ||
| 11 | cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ | 11 | cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ |
| 12 | cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 | 12 | cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 |
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index ef149f59929a..43f17b51ee89 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts | |||
| @@ -222,6 +222,21 @@ | |||
| 222 | bus-width = <4>; | 222 | bus-width = <4>; |
| 223 | dma-coherent; | 223 | dma-coherent; |
| 224 | }; | 224 | }; |
| 225 | |||
| 226 | gpio: gpio@3000 { | ||
| 227 | compatible = "snps,dw-apb-gpio"; | ||
| 228 | reg = <0x3000 0x20>; | ||
| 229 | #address-cells = <1>; | ||
| 230 | #size-cells = <0>; | ||
| 231 | |||
| 232 | gpio_port_a: gpio-controller@0 { | ||
| 233 | compatible = "snps,dw-apb-gpio-port"; | ||
| 234 | gpio-controller; | ||
| 235 | #gpio-cells = <2>; | ||
| 236 | snps,nr-gpios = <24>; | ||
| 237 | reg = <0>; | ||
| 238 | }; | ||
| 239 | }; | ||
| 225 | }; | 240 | }; |
| 226 | 241 | ||
| 227 | memory@80000000 { | 242 | memory@80000000 { |
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 41bc08be6a3b..020d4493edfd 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig | |||
| @@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y | |||
| 14 | # CONFIG_VM_EVENT_COUNTERS is not set | 14 | # CONFIG_VM_EVENT_COUNTERS is not set |
| 15 | # CONFIG_SLUB_DEBUG is not set | 15 | # CONFIG_SLUB_DEBUG is not set |
| 16 | # CONFIG_COMPAT_BRK is not set | 16 | # CONFIG_COMPAT_BRK is not set |
| 17 | CONFIG_ISA_ARCOMPACT=y | ||
| 17 | CONFIG_MODULES=y | 18 | CONFIG_MODULES=y |
| 18 | CONFIG_MODULE_FORCE_LOAD=y | 19 | CONFIG_MODULE_FORCE_LOAD=y |
| 19 | CONFIG_MODULE_UNLOAD=y | 20 | CONFIG_MODULE_UNLOAD=y |
| @@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y | |||
| 95 | CONFIG_NTFS_FS=y | 96 | CONFIG_NTFS_FS=y |
| 96 | CONFIG_TMPFS=y | 97 | CONFIG_TMPFS=y |
| 97 | CONFIG_NFS_FS=y | 98 | CONFIG_NFS_FS=y |
| 99 | CONFIG_NFS_V3_ACL=y | ||
| 98 | CONFIG_NLS_CODEPAGE_437=y | 100 | CONFIG_NLS_CODEPAGE_437=y |
| 99 | CONFIG_NLS_ISO8859_1=y | 101 | CONFIG_NLS_ISO8859_1=y |
| 100 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 102 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 1e1c4a8011b5..666314fffc60 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig | |||
| @@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y | |||
| 94 | CONFIG_NTFS_FS=y | 94 | CONFIG_NTFS_FS=y |
| 95 | CONFIG_TMPFS=y | 95 | CONFIG_TMPFS=y |
| 96 | CONFIG_NFS_FS=y | 96 | CONFIG_NFS_FS=y |
| 97 | CONFIG_NFS_V3_ACL=y | ||
| 97 | CONFIG_NLS_CODEPAGE_437=y | 98 | CONFIG_NLS_CODEPAGE_437=y |
| 98 | CONFIG_NLS_ISO8859_1=y | 99 | CONFIG_NLS_ISO8859_1=y |
| 99 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 100 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 6b0c0cfd5c30..429832b8560b 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig | |||
| @@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y | |||
| 97 | CONFIG_NTFS_FS=y | 97 | CONFIG_NTFS_FS=y |
| 98 | CONFIG_TMPFS=y | 98 | CONFIG_TMPFS=y |
| 99 | CONFIG_NFS_FS=y | 99 | CONFIG_NFS_FS=y |
| 100 | CONFIG_NFS_V3_ACL=y | ||
| 100 | CONFIG_NLS_CODEPAGE_437=y | 101 | CONFIG_NLS_CODEPAGE_437=y |
| 101 | CONFIG_NLS_ISO8859_1=y | 102 | CONFIG_NLS_ISO8859_1=y |
| 102 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 103 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 1dec2b4bc5e6..87b23b7fb781 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
| @@ -45,6 +45,9 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
| 45 | CONFIG_SERIAL_8250_DW=y | 45 | CONFIG_SERIAL_8250_DW=y |
| 46 | CONFIG_SERIAL_OF_PLATFORM=y | 46 | CONFIG_SERIAL_OF_PLATFORM=y |
| 47 | # CONFIG_HW_RANDOM is not set | 47 | # CONFIG_HW_RANDOM is not set |
| 48 | CONFIG_GPIOLIB=y | ||
| 49 | CONFIG_GPIO_SYSFS=y | ||
| 50 | CONFIG_GPIO_DWAPB=y | ||
| 48 | # CONFIG_HWMON is not set | 51 | # CONFIG_HWMON is not set |
| 49 | CONFIG_DRM=y | 52 | CONFIG_DRM=y |
| 50 | # CONFIG_DRM_FBDEV_EMULATION is not set | 53 | # CONFIG_DRM_FBDEV_EMULATION is not set |
| @@ -65,6 +68,7 @@ CONFIG_EXT3_FS=y | |||
| 65 | CONFIG_VFAT_FS=y | 68 | CONFIG_VFAT_FS=y |
| 66 | CONFIG_TMPFS=y | 69 | CONFIG_TMPFS=y |
| 67 | CONFIG_NFS_FS=y | 70 | CONFIG_NFS_FS=y |
| 71 | CONFIG_NFS_V3_ACL=y | ||
| 68 | CONFIG_NLS_CODEPAGE_437=y | 72 | CONFIG_NLS_CODEPAGE_437=y |
| 69 | CONFIG_NLS_ISO8859_1=y | 73 | CONFIG_NLS_ISO8859_1=y |
| 70 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 74 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 31ba224bbfb4..6e84060e7c90 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig | |||
| @@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y | |||
| 15 | CONFIG_EMBEDDED=y | 15 | CONFIG_EMBEDDED=y |
| 16 | CONFIG_PERF_EVENTS=y | 16 | CONFIG_PERF_EVENTS=y |
| 17 | # CONFIG_COMPAT_BRK is not set | 17 | # CONFIG_COMPAT_BRK is not set |
| 18 | CONFIG_ISA_ARCOMPACT=y | ||
| 18 | CONFIG_KPROBES=y | 19 | CONFIG_KPROBES=y |
| 19 | CONFIG_MODULES=y | 20 | CONFIG_MODULES=y |
| 20 | CONFIG_MODULE_FORCE_LOAD=y | 21 | CONFIG_MODULE_FORCE_LOAD=y |
| @@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y | |||
| 73 | CONFIG_TMPFS=y | 74 | CONFIG_TMPFS=y |
| 74 | # CONFIG_MISC_FILESYSTEMS is not set | 75 | # CONFIG_MISC_FILESYSTEMS is not set |
| 75 | CONFIG_NFS_FS=y | 76 | CONFIG_NFS_FS=y |
| 77 | CONFIG_NFS_V3_ACL=y | ||
| 76 | CONFIG_ROOT_NFS=y | 78 | CONFIG_ROOT_NFS=y |
| 77 | CONFIG_DEBUG_INFO=y | 79 | CONFIG_DEBUG_INFO=y |
| 78 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 80 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 8e0b8b134cd9..219c2a65294b 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig | |||
| @@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y | |||
| 15 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 16 | # CONFIG_SLUB_DEBUG is not set | 16 | # CONFIG_SLUB_DEBUG is not set |
| 17 | # CONFIG_COMPAT_BRK is not set | 17 | # CONFIG_COMPAT_BRK is not set |
| 18 | CONFIG_ISA_ARCOMPACT=y | ||
| 18 | CONFIG_KPROBES=y | 19 | CONFIG_KPROBES=y |
| 19 | CONFIG_MODULES=y | 20 | CONFIG_MODULES=y |
| 20 | # CONFIG_LBDAF is not set | 21 | # CONFIG_LBDAF is not set |
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index f14eeff7d308..35dfc6491a09 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig | |||
| @@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y | |||
| 15 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 16 | # CONFIG_SLUB_DEBUG is not set | 16 | # CONFIG_SLUB_DEBUG is not set |
| 17 | # CONFIG_COMPAT_BRK is not set | 17 | # CONFIG_COMPAT_BRK is not set |
| 18 | CONFIG_ISA_ARCOMPACT=y | ||
| 18 | CONFIG_KPROBES=y | 19 | CONFIG_KPROBES=y |
| 19 | CONFIG_MODULES=y | 20 | CONFIG_MODULES=y |
| 20 | # CONFIG_LBDAF is not set | 21 | # CONFIG_LBDAF is not set |
| @@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y | |||
| 66 | CONFIG_TMPFS=y | 67 | CONFIG_TMPFS=y |
| 67 | # CONFIG_MISC_FILESYSTEMS is not set | 68 | # CONFIG_MISC_FILESYSTEMS is not set |
| 68 | CONFIG_NFS_FS=y | 69 | CONFIG_NFS_FS=y |
| 70 | CONFIG_NFS_V3_ACL=y | ||
| 69 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 71 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 70 | # CONFIG_ENABLE_MUST_CHECK is not set | 72 | # CONFIG_ENABLE_MUST_CHECK is not set |
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 025298a48305..1638e5bc9672 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig | |||
| @@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y | |||
| 65 | CONFIG_TMPFS=y | 65 | CONFIG_TMPFS=y |
| 66 | # CONFIG_MISC_FILESYSTEMS is not set | 66 | # CONFIG_MISC_FILESYSTEMS is not set |
| 67 | CONFIG_NFS_FS=y | 67 | CONFIG_NFS_FS=y |
| 68 | CONFIG_NFS_V3_ACL=y | ||
| 68 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 69 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 69 | # CONFIG_ENABLE_MUST_CHECK is not set | 70 | # CONFIG_ENABLE_MUST_CHECK is not set |
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index df7b77b13b82..11cfbdb0f441 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig | |||
| @@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y | |||
| 76 | CONFIG_TMPFS=y | 76 | CONFIG_TMPFS=y |
| 77 | # CONFIG_MISC_FILESYSTEMS is not set | 77 | # CONFIG_MISC_FILESYSTEMS is not set |
| 78 | CONFIG_NFS_FS=y | 78 | CONFIG_NFS_FS=y |
| 79 | CONFIG_NFS_V3_ACL=y | ||
| 79 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 80 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 80 | # CONFIG_ENABLE_MUST_CHECK is not set | 81 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 81 | CONFIG_FTRACE=y | 82 | CONFIG_FTRACE=y |
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index a7f65313f84a..e71ade3cf9c8 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig | |||
| @@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y | |||
| 19 | # CONFIG_AIO is not set | 19 | # CONFIG_AIO is not set |
| 20 | CONFIG_EMBEDDED=y | 20 | CONFIG_EMBEDDED=y |
| 21 | # CONFIG_COMPAT_BRK is not set | 21 | # CONFIG_COMPAT_BRK is not set |
| 22 | CONFIG_ISA_ARCOMPACT=y | ||
| 22 | CONFIG_SLAB=y | 23 | CONFIG_SLAB=y |
| 23 | CONFIG_MODULES=y | 24 | CONFIG_MODULES=y |
| 24 | CONFIG_MODULE_FORCE_LOAD=y | 25 | CONFIG_MODULE_FORCE_LOAD=y |
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index db47c3541f15..1e59a2e9c602 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig | |||
| @@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y | |||
| 85 | CONFIG_TMPFS=y | 85 | CONFIG_TMPFS=y |
| 86 | CONFIG_JFFS2_FS=y | 86 | CONFIG_JFFS2_FS=y |
| 87 | CONFIG_NFS_FS=y | 87 | CONFIG_NFS_FS=y |
| 88 | CONFIG_NFS_V3_ACL=y | ||
| 88 | CONFIG_NLS_CODEPAGE_437=y | 89 | CONFIG_NLS_CODEPAGE_437=y |
| 89 | CONFIG_NLS_ISO8859_1=y | 90 | CONFIG_NLS_ISO8859_1=y |
| 90 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 91 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index a8ac5e917d9a..b5c3f6c54b03 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig | |||
| @@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y | |||
| 90 | CONFIG_TMPFS=y | 90 | CONFIG_TMPFS=y |
| 91 | CONFIG_JFFS2_FS=y | 91 | CONFIG_JFFS2_FS=y |
| 92 | CONFIG_NFS_FS=y | 92 | CONFIG_NFS_FS=y |
| 93 | CONFIG_NFS_V3_ACL=y | ||
| 93 | CONFIG_NLS_CODEPAGE_437=y | 94 | CONFIG_NLS_CODEPAGE_437=y |
| 94 | CONFIG_NLS_ISO8859_1=y | 95 | CONFIG_NLS_ISO8859_1=y |
| 95 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 96 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index ff7d3232764a..f393b663413e 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h | |||
| @@ -113,7 +113,9 @@ extern unsigned long perip_base, perip_end; | |||
| 113 | 113 | ||
| 114 | /* IO coherency related Auxiliary registers */ | 114 | /* IO coherency related Auxiliary registers */ |
| 115 | #define ARC_REG_IO_COH_ENABLE 0x500 | 115 | #define ARC_REG_IO_COH_ENABLE 0x500 |
| 116 | #define ARC_IO_COH_ENABLE_BIT BIT(0) | ||
| 116 | #define ARC_REG_IO_COH_PARTIAL 0x501 | 117 | #define ARC_REG_IO_COH_PARTIAL 0x501 |
| 118 | #define ARC_IO_COH_PARTIAL_BIT BIT(0) | ||
| 117 | #define ARC_REG_IO_COH_AP0_BASE 0x508 | 119 | #define ARC_REG_IO_COH_AP0_BASE 0x508 |
| 118 | #define ARC_REG_IO_COH_AP0_SIZE 0x509 | 120 | #define ARC_REG_IO_COH_AP0_SIZE 0x509 |
| 119 | 121 | ||
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index c22b181e8206..2f39d9b3886e 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
| 13 | #include <asm/byteorder.h> | 13 | #include <asm/byteorder.h> |
| 14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 15 | #include <asm/unaligned.h> | ||
| 15 | 16 | ||
| 16 | #ifdef CONFIG_ISA_ARCV2 | 17 | #ifdef CONFIG_ISA_ARCV2 |
| 17 | #include <asm/barrier.h> | 18 | #include <asm/barrier.h> |
| @@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) | |||
| 94 | return w; | 95 | return w; |
| 95 | } | 96 | } |
| 96 | 97 | ||
| 98 | /* | ||
| 99 | * {read,write}s{b,w,l}() repeatedly access the same IO address in | ||
| 100 | * native endianness in 8-, 16-, 32-bit chunks {into,from} memory, | ||
| 101 | * @count times | ||
| 102 | */ | ||
| 103 | #define __raw_readsx(t,f) \ | ||
| 104 | static inline void __raw_reads##f(const volatile void __iomem *addr, \ | ||
| 105 | void *ptr, unsigned int count) \ | ||
| 106 | { \ | ||
| 107 | bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ | ||
| 108 | u##t *buf = ptr; \ | ||
| 109 | \ | ||
| 110 | if (!count) \ | ||
| 111 | return; \ | ||
| 112 | \ | ||
| 113 | /* Some ARC CPU's don't support unaligned accesses */ \ | ||
| 114 | if (is_aligned) { \ | ||
| 115 | do { \ | ||
| 116 | u##t x = __raw_read##f(addr); \ | ||
| 117 | *buf++ = x; \ | ||
| 118 | } while (--count); \ | ||
| 119 | } else { \ | ||
| 120 | do { \ | ||
| 121 | u##t x = __raw_read##f(addr); \ | ||
| 122 | put_unaligned(x, buf++); \ | ||
| 123 | } while (--count); \ | ||
| 124 | } \ | ||
| 125 | } | ||
| 126 | |||
| 127 | #define __raw_readsb __raw_readsb | ||
| 128 | __raw_readsx(8, b) | ||
| 129 | #define __raw_readsw __raw_readsw | ||
| 130 | __raw_readsx(16, w) | ||
| 131 | #define __raw_readsl __raw_readsl | ||
| 132 | __raw_readsx(32, l) | ||
| 133 | |||
| 97 | #define __raw_writeb __raw_writeb | 134 | #define __raw_writeb __raw_writeb |
| 98 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | 135 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) |
| 99 | { | 136 | { |
| @@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) | |||
| 126 | 163 | ||
| 127 | } | 164 | } |
| 128 | 165 | ||
| 166 | #define __raw_writesx(t,f) \ | ||
| 167 | static inline void __raw_writes##f(volatile void __iomem *addr, \ | ||
| 168 | const void *ptr, unsigned int count) \ | ||
| 169 | { \ | ||
| 170 | bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ | ||
| 171 | const u##t *buf = ptr; \ | ||
| 172 | \ | ||
| 173 | if (!count) \ | ||
| 174 | return; \ | ||
| 175 | \ | ||
| 176 | /* Some ARC CPU's don't support unaligned accesses */ \ | ||
| 177 | if (is_aligned) { \ | ||
| 178 | do { \ | ||
| 179 | __raw_write##f(*buf++, addr); \ | ||
| 180 | } while (--count); \ | ||
| 181 | } else { \ | ||
| 182 | do { \ | ||
| 183 | __raw_write##f(get_unaligned(buf++), addr); \ | ||
| 184 | } while (--count); \ | ||
| 185 | } \ | ||
| 186 | } | ||
| 187 | |||
| 188 | #define __raw_writesb __raw_writesb | ||
| 189 | __raw_writesx(8, b) | ||
| 190 | #define __raw_writesw __raw_writesw | ||
| 191 | __raw_writesx(16, w) | ||
| 192 | #define __raw_writesl __raw_writesl | ||
| 193 | __raw_writesx(32, l) | ||
| 194 | |||
| 129 | /* | 195 | /* |
| 130 | * MMIO can also get buffered/optimized in micro-arch, so barriers needed | 196 | * MMIO can also get buffered/optimized in micro-arch, so barriers needed |
| 131 | * Based on ARM model for the typical use case | 197 | * Based on ARM model for the typical use case |
| @@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) | |||
| 141 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) | 207 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) |
| 142 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) | 208 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) |
| 143 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) | 209 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) |
| 210 | #define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); }) | ||
| 211 | #define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); }) | ||
| 212 | #define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); }) | ||
| 144 | 213 | ||
| 145 | #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) | 214 | #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) |
| 146 | #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) | 215 | #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) |
| 147 | #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) | 216 | #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) |
| 217 | #define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); }) | ||
| 218 | #define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); }) | ||
| 219 | #define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); }) | ||
| 148 | 220 | ||
| 149 | /* | 221 | /* |
| 150 | * Relaxed API for drivers which can handle barrier ordering themselves | 222 | * Relaxed API for drivers which can handle barrier ordering themselves |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index b2cae79a25d7..eea8c5ce6335 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
| @@ -243,7 +243,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | |||
| 243 | { | 243 | { |
| 244 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; | 244 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; |
| 245 | struct bcr_identity *core = &cpu->core; | 245 | struct bcr_identity *core = &cpu->core; |
| 246 | int i, n = 0; | 246 | int i, n = 0, ua = 0; |
| 247 | 247 | ||
| 248 | FIX_PTR(cpu); | 248 | FIX_PTR(cpu); |
| 249 | 249 | ||
| @@ -263,10 +263,13 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | |||
| 263 | IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), | 263 | IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), |
| 264 | IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); | 264 | IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); |
| 265 | 265 | ||
| 266 | n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", | 266 | #ifdef __ARC_UNALIGNED__ |
| 267 | ua = 1; | ||
| 268 | #endif | ||
| 269 | n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s", | ||
| 267 | IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), | 270 | IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), |
| 268 | IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), | 271 | IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), |
| 269 | IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); | 272 | IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua)); |
| 270 | 273 | ||
| 271 | if (i) | 274 | if (i) |
| 272 | n += scnprintf(buf + n, len - n, "\n\t\t: "); | 275 | n += scnprintf(buf + n, len - n, "\n\t\t: "); |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index f2701c13a66b..cf9619d4efb4 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
| @@ -1145,6 +1145,20 @@ noinline void __init arc_ioc_setup(void) | |||
| 1145 | unsigned int ioc_base, mem_sz; | 1145 | unsigned int ioc_base, mem_sz; |
| 1146 | 1146 | ||
| 1147 | /* | 1147 | /* |
| 1148 | * If IOC was already enabled (due to bootloader) it technically needs to | ||
| 1149 | * be reconfigured with aperture base,size corresponding to Linux memory map | ||
| 1150 | * which will certainly be different than uboot's. But disabling and | ||
| 1151 | * reenabling IOC when DMA might be potentially active is tricky business. | ||
| 1152 | * To avoid random memory issues later, just panic here and ask user to | ||
| 1153 | * upgrade bootloader to one which doesn't enable IOC | ||
| 1154 | */ | ||
| 1155 | if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT) | ||
| 1156 | panic("IOC already enabled, please upgrade bootloader!\n"); | ||
| 1157 | |||
| 1158 | if (!ioc_enable) | ||
| 1159 | return; | ||
| 1160 | |||
| 1161 | /* | ||
| 1148 | * As for today we don't support both IOC and ZONE_HIGHMEM enabled | 1162 | * As for today we don't support both IOC and ZONE_HIGHMEM enabled |
| 1149 | * simultaneously. This happens because as of today IOC aperture covers | 1163 | * simultaneously. This happens because as of today IOC aperture covers |
| 1150 | * only ZONE_NORMAL (low mem) and any dma transactions outside this | 1164 | * only ZONE_NORMAL (low mem) and any dma transactions outside this |
| @@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void) | |||
| 1187 | panic("IOC Aperture start must be aligned to the size of the aperture"); | 1201 | panic("IOC Aperture start must be aligned to the size of the aperture"); |
| 1188 | 1202 | ||
| 1189 | write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); | 1203 | write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); |
| 1190 | write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); | 1204 | write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT); |
| 1191 | write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); | 1205 | write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT); |
| 1192 | 1206 | ||
| 1193 | /* Re-enable L1 dcache */ | 1207 | /* Re-enable L1 dcache */ |
| 1194 | __dc_enable(); | 1208 | __dc_enable(); |
| @@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void) | |||
| 1265 | if (is_isa_arcv2() && l2_line_sz && !slc_enable) | 1279 | if (is_isa_arcv2() && l2_line_sz && !slc_enable) |
| 1266 | arc_slc_disable(); | 1280 | arc_slc_disable(); |
| 1267 | 1281 | ||
| 1268 | if (is_isa_arcv2() && ioc_enable) | 1282 | if (is_isa_arcv2() && ioc_exists) |
| 1269 | arc_ioc_setup(); | 1283 | arc_ioc_setup(); |
| 1270 | 1284 | ||
| 1271 | if (is_isa_arcv2() && l2_line_sz && slc_enable) { | 1285 | if (is_isa_arcv2() && l2_line_sz && slc_enable) { |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index c9da6102eb4f..e2d9fc3fea01 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
| @@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) | |||
| 66 | struct vm_area_struct *vma = NULL; | 66 | struct vm_area_struct *vma = NULL; |
| 67 | struct task_struct *tsk = current; | 67 | struct task_struct *tsk = current; |
| 68 | struct mm_struct *mm = tsk->mm; | 68 | struct mm_struct *mm = tsk->mm; |
| 69 | int si_code; | 69 | int si_code = 0; |
| 70 | int ret; | 70 | int ret; |
| 71 | vm_fault_t fault; | 71 | vm_fault_t fault; |
| 72 | int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ | 72 | int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ |
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index f2a1d25eb6cf..83e0fbc4a1a1 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | /* The voltage to the MMC card is hardwired at 3.3V */ | 47 | /* The voltage to the MMC card is hardwired at 3.3V */ |
| 48 | vmmc: fixedregulator@0 { | 48 | vmmc: regulator-vmmc { |
| 49 | compatible = "regulator-fixed"; | 49 | compatible = "regulator-fixed"; |
| 50 | regulator-name = "vmmc"; | 50 | regulator-name = "vmmc"; |
| 51 | regulator-min-microvolt = <3300000>; | 51 | regulator-min-microvolt = <3300000>; |
| @@ -53,7 +53,7 @@ | |||
| 53 | regulator-boot-on; | 53 | regulator-boot-on; |
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| 56 | veth: fixedregulator@0 { | 56 | veth: regulator-veth { |
| 57 | compatible = "regulator-fixed"; | 57 | compatible = "regulator-fixed"; |
| 58 | regulator-name = "veth"; | 58 | regulator-name = "veth"; |
| 59 | regulator-min-microvolt = <3300000>; | 59 | regulator-min-microvolt = <3300000>; |
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts index 7f9cbdf33a51..2f6aa24a0b67 100644 --- a/arch/arm/boot/dts/arm-realview-pb11mp.dts +++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts | |||
| @@ -145,7 +145,7 @@ | |||
| 145 | }; | 145 | }; |
| 146 | 146 | ||
| 147 | /* The voltage to the MMC card is hardwired at 3.3V */ | 147 | /* The voltage to the MMC card is hardwired at 3.3V */ |
| 148 | vmmc: fixedregulator@0 { | 148 | vmmc: regulator-vmmc { |
| 149 | compatible = "regulator-fixed"; | 149 | compatible = "regulator-fixed"; |
| 150 | regulator-name = "vmmc"; | 150 | regulator-name = "vmmc"; |
| 151 | regulator-min-microvolt = <3300000>; | 151 | regulator-min-microvolt = <3300000>; |
| @@ -153,7 +153,7 @@ | |||
| 153 | regulator-boot-on; | 153 | regulator-boot-on; |
| 154 | }; | 154 | }; |
| 155 | 155 | ||
| 156 | veth: fixedregulator@0 { | 156 | veth: regulator-veth { |
| 157 | compatible = "regulator-fixed"; | 157 | compatible = "regulator-fixed"; |
| 158 | regulator-name = "veth"; | 158 | regulator-name = "veth"; |
| 159 | regulator-min-microvolt = <3300000>; | 159 | regulator-min-microvolt = <3300000>; |
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts index 4adb85e66be3..93762244be7f 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | wifi_pwrseq: wifi-pwrseq { | 32 | wifi_pwrseq: wifi-pwrseq { |
| 33 | compatible = "mmc-pwrseq-simple"; | 33 | compatible = "mmc-pwrseq-simple"; |
| 34 | reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; | 34 | reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; |
| 35 | }; | 35 | }; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts index c318bcbc6ba7..89e6fd547c75 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | wifi_pwrseq: wifi-pwrseq { | 27 | wifi_pwrseq: wifi-pwrseq { |
| 28 | compatible = "mmc-pwrseq-simple"; | 28 | compatible = "mmc-pwrseq-simple"; |
| 29 | reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; | 29 | reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; |
| 30 | }; | 30 | }; |
| 31 | }; | 31 | }; |
| 32 | 32 | ||
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts index d8aac4a2d02a..177d21fdeb28 100644 --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts | |||
| @@ -86,13 +86,17 @@ | |||
| 86 | compatible = "regulator-fixed"; | 86 | compatible = "regulator-fixed"; |
| 87 | regulator-min-microvolt = <3300000>; | 87 | regulator-min-microvolt = <3300000>; |
| 88 | regulator-max-microvolt = <3300000>; | 88 | regulator-max-microvolt = <3300000>; |
| 89 | clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; | ||
| 90 | clock-names = "slow"; | ||
| 91 | regulator-name = "reg_wlan"; | 89 | regulator-name = "reg_wlan"; |
| 92 | startup-delay-us = <70000>; | 90 | startup-delay-us = <70000>; |
| 93 | gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; | 91 | gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; |
| 94 | enable-active-high; | 92 | enable-active-high; |
| 95 | }; | 93 | }; |
| 94 | |||
| 95 | usdhc2_pwrseq: usdhc2_pwrseq { | ||
| 96 | compatible = "mmc-pwrseq-simple"; | ||
| 97 | clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; | ||
| 98 | clock-names = "ext_clock"; | ||
| 99 | }; | ||
| 96 | }; | 100 | }; |
| 97 | 101 | ||
| 98 | &adc1 { | 102 | &adc1 { |
| @@ -375,6 +379,7 @@ | |||
| 375 | bus-width = <4>; | 379 | bus-width = <4>; |
| 376 | non-removable; | 380 | non-removable; |
| 377 | vmmc-supply = <®_wlan>; | 381 | vmmc-supply = <®_wlan>; |
| 382 | mmc-pwrseq = <&usdhc2_pwrseq>; | ||
| 378 | cap-power-off-card; | 383 | cap-power-off-card; |
| 379 | keep-power-in-suspend; | 384 | keep-power-in-suspend; |
| 380 | status = "okay"; | 385 | status = "okay"; |
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi index 21973eb55671..f27b3849d3ff 100644 --- a/arch/arm/boot/dts/imx7d-pico.dtsi +++ b/arch/arm/boot/dts/imx7d-pico.dtsi | |||
| @@ -100,6 +100,19 @@ | |||
| 100 | regulator-min-microvolt = <1800000>; | 100 | regulator-min-microvolt = <1800000>; |
| 101 | regulator-max-microvolt = <1800000>; | 101 | regulator-max-microvolt = <1800000>; |
| 102 | }; | 102 | }; |
| 103 | |||
| 104 | usdhc2_pwrseq: usdhc2_pwrseq { | ||
| 105 | compatible = "mmc-pwrseq-simple"; | ||
| 106 | clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; | ||
| 107 | clock-names = "ext_clock"; | ||
| 108 | }; | ||
| 109 | }; | ||
| 110 | |||
| 111 | &clks { | ||
| 112 | assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>, | ||
| 113 | <&clks IMX7D_CLKO2_ROOT_DIV>; | ||
| 114 | assigned-clock-parents = <&clks IMX7D_CKIL>; | ||
| 115 | assigned-clock-rates = <0>, <32768>; | ||
| 103 | }; | 116 | }; |
| 104 | 117 | ||
| 105 | &i2c4 { | 118 | &i2c4 { |
| @@ -199,12 +212,13 @@ | |||
| 199 | 212 | ||
| 200 | &usdhc2 { /* Wifi SDIO */ | 213 | &usdhc2 { /* Wifi SDIO */ |
| 201 | pinctrl-names = "default"; | 214 | pinctrl-names = "default"; |
| 202 | pinctrl-0 = <&pinctrl_usdhc2>; | 215 | pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>; |
| 203 | no-1-8-v; | 216 | no-1-8-v; |
| 204 | non-removable; | 217 | non-removable; |
| 205 | keep-power-in-suspend; | 218 | keep-power-in-suspend; |
| 206 | wakeup-source; | 219 | wakeup-source; |
| 207 | vmmc-supply = <®_ap6212>; | 220 | vmmc-supply = <®_ap6212>; |
| 221 | mmc-pwrseq = <&usdhc2_pwrseq>; | ||
| 208 | status = "okay"; | 222 | status = "okay"; |
| 209 | }; | 223 | }; |
| 210 | 224 | ||
| @@ -301,6 +315,12 @@ | |||
| 301 | }; | 315 | }; |
| 302 | 316 | ||
| 303 | &iomuxc_lpsr { | 317 | &iomuxc_lpsr { |
| 318 | pinctrl_wifi_clk: wificlkgrp { | ||
| 319 | fsl,pins = < | ||
| 320 | MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d | ||
| 321 | >; | ||
| 322 | }; | ||
| 323 | |||
| 304 | pinctrl_wdog: wdoggrp { | 324 | pinctrl_wdog: wdoggrp { |
| 305 | fsl,pins = < | 325 | fsl,pins = < |
| 306 | MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74 | 326 | MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74 |
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts index 742d2946b08b..583a5a01642f 100644 --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts | |||
| @@ -314,8 +314,8 @@ | |||
| 314 | 314 | ||
| 315 | ®_dldo3 { | 315 | ®_dldo3 { |
| 316 | regulator-always-on; | 316 | regulator-always-on; |
| 317 | regulator-min-microvolt = <2500000>; | 317 | regulator-min-microvolt = <3300000>; |
| 318 | regulator-max-microvolt = <2500000>; | 318 | regulator-max-microvolt = <3300000>; |
| 319 | regulator-name = "vcc-pd"; | 319 | regulator-name = "vcc-pd"; |
| 320 | }; | 320 | }; |
| 321 | 321 | ||
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c index 243a108a940b..fd0053e47a15 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sx.c +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c | |||
| @@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void) | |||
| 110 | * except for power up sw2iso which need to be | 110 | * except for power up sw2iso which need to be |
| 111 | * larger than LDO ramp up time. | 111 | * larger than LDO ramp up time. |
| 112 | */ | 112 | */ |
| 113 | imx_gpc_set_arm_power_up_timing(2, 1); | 113 | imx_gpc_set_arm_power_up_timing(0xf, 1); |
| 114 | imx_gpc_set_arm_power_down_timing(1, 1); | 114 | imx_gpc_set_arm_power_down_timing(1, 1); |
| 115 | 115 | ||
| 116 | return cpuidle_register(&imx6sx_cpuidle_driver, NULL); | 116 | return cpuidle_register(&imx6sx_cpuidle_driver, NULL); |
diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h index 446edaeb78a7..a96abcf521b4 100644 --- a/arch/arm/mach-mmp/cputype.h +++ b/arch/arm/mach-mmp/cputype.h | |||
| @@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void) | |||
| 44 | #define cpu_is_pxa910() (0) | 44 | #define cpu_is_pxa910() (0) |
| 45 | #endif | 45 | #endif |
| 46 | 46 | ||
| 47 | #ifdef CONFIG_CPU_MMP2 | 47 | #if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT) |
| 48 | static inline int cpu_is_mmp2(void) | 48 | static inline int cpu_is_mmp2(void) |
| 49 | { | 49 | { |
| 50 | return (((read_cpuid_id() >> 8) & 0xff) == 0x58); | 50 | return (((read_cpuid_id() >> 8) & 0xff) == 0x58) && |
| 51 | (((mmp_chip_id & 0xfff) == 0x410) || | ||
| 52 | ((mmp_chip_id & 0xfff) == 0x610)); | ||
| 51 | } | 53 | } |
| 52 | #else | 54 | #else |
| 53 | #define cpu_is_mmp2() (0) | 55 | #define cpu_is_mmp2() (0) |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 215df435bfb9..2149b47a0c5a 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
| @@ -360,14 +360,16 @@ v7_dma_inv_range: | |||
| 360 | ALT_UP(W(nop)) | 360 | ALT_UP(W(nop)) |
| 361 | #endif | 361 | #endif |
| 362 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 362 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
| 363 | addne r0, r0, r2 | ||
| 363 | 364 | ||
| 364 | tst r1, r3 | 365 | tst r1, r3 |
| 365 | bic r1, r1, r3 | 366 | bic r1, r1, r3 |
| 366 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line | 367 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line |
| 367 | 1: | ||
| 368 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line | ||
| 369 | add r0, r0, r2 | ||
| 370 | cmp r0, r1 | 368 | cmp r0, r1 |
| 369 | 1: | ||
| 370 | mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line | ||
| 371 | addlo r0, r0, r2 | ||
| 372 | cmplo r0, r1 | ||
| 371 | blo 1b | 373 | blo 1b |
| 372 | dsb st | 374 | dsb st |
| 373 | ret lr | 375 | ret lr |
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index 788486e830d3..32aa2a2aa260 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S | |||
| @@ -73,9 +73,11 @@ | |||
| 73 | /* | 73 | /* |
| 74 | * dcimvac: Invalidate data cache line by MVA to PoC | 74 | * dcimvac: Invalidate data cache line by MVA to PoC |
| 75 | */ | 75 | */ |
| 76 | .macro dcimvac, rt, tmp | 76 | .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo |
| 77 | v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC | 77 | .macro dcimvac\c, rt, tmp |
| 78 | v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c | ||
| 78 | .endm | 79 | .endm |
| 80 | .endr | ||
| 79 | 81 | ||
| 80 | /* | 82 | /* |
| 81 | * dccmvau: Clean data cache line by MVA to PoU | 83 | * dccmvau: Clean data cache line by MVA to PoU |
| @@ -369,14 +371,16 @@ v7m_dma_inv_range: | |||
| 369 | tst r0, r3 | 371 | tst r0, r3 |
| 370 | bic r0, r0, r3 | 372 | bic r0, r0, r3 |
| 371 | dccimvacne r0, r3 | 373 | dccimvacne r0, r3 |
| 374 | addne r0, r0, r2 | ||
| 372 | subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac | 375 | subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac |
| 373 | tst r1, r3 | 376 | tst r1, r3 |
| 374 | bic r1, r1, r3 | 377 | bic r1, r1, r3 |
| 375 | dccimvacne r1, r3 | 378 | dccimvacne r1, r3 |
| 376 | 1: | ||
| 377 | dcimvac r0, r3 | ||
| 378 | add r0, r0, r2 | ||
| 379 | cmp r0, r1 | 379 | cmp r0, r1 |
| 380 | 1: | ||
| 381 | dcimvaclo r0, r3 | ||
| 382 | addlo r0, r0, r2 | ||
| 383 | cmplo r0, r1 | ||
| 380 | blo 1b | 384 | blo 1b |
| 381 | dsb st | 385 | dsb st |
| 382 | ret lr | 386 | ret lr |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 661fe48ab78d..78de138aa66d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -829,7 +829,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
| 829 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 829 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 830 | unsigned long attrs) | 830 | unsigned long attrs) |
| 831 | { | 831 | { |
| 832 | int ret; | 832 | int ret = -ENXIO; |
| 833 | unsigned long nr_vma_pages = vma_pages(vma); | 833 | unsigned long nr_vma_pages = vma_pages(vma); |
| 834 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | 834 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 835 | unsigned long pfn = dma_to_pfn(dev, dma_addr); | 835 | unsigned long pfn = dma_to_pfn(dev, dma_addr); |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 81d0efb055c6..19516fbc2c55 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
| @@ -274,6 +274,13 @@ | |||
| 274 | .endm | 274 | .endm |
| 275 | 275 | ||
| 276 | .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 | 276 | .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 |
| 277 | /* | ||
| 278 | * If we are building for big.Little with branch predictor hardening, | ||
| 279 | * we need the processor function tables to remain available after boot. | ||
| 280 | */ | ||
| 281 | #if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) | ||
| 282 | .section ".rodata" | ||
| 283 | #endif | ||
| 277 | .type \name\()_processor_functions, #object | 284 | .type \name\()_processor_functions, #object |
| 278 | .align 2 | 285 | .align 2 |
| 279 | ENTRY(\name\()_processor_functions) | 286 | ENTRY(\name\()_processor_functions) |
| @@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions) | |||
| 309 | .endif | 316 | .endif |
| 310 | 317 | ||
| 311 | .size \name\()_processor_functions, . - \name\()_processor_functions | 318 | .size \name\()_processor_functions, . - \name\()_processor_functions |
| 319 | #if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) | ||
| 320 | .previous | ||
| 321 | #endif | ||
| 312 | .endm | 322 | .endm |
| 313 | 323 | ||
| 314 | .macro define_cache_functions name:req | 324 | .macro define_cache_functions name:req |
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index b2aa9b32bff2..2c118a6ab358 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c | |||
| @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or | |||
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | /* Copy arch-dep-instance from template. */ | 249 | /* Copy arch-dep-instance from template. */ |
| 250 | memcpy(code, &optprobe_template_entry, | 250 | memcpy(code, (unsigned char *)optprobe_template_entry, |
| 251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); | 251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); |
| 252 | 252 | ||
| 253 | /* Adjust buffer according to instruction. */ | 253 | /* Adjust buffer according to instruction. */ |
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi index 64632c873888..01ea662afba8 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi | |||
| @@ -20,28 +20,24 @@ | |||
| 20 | compatible = "arm,cortex-a72", "arm,armv8"; | 20 | compatible = "arm,cortex-a72", "arm,armv8"; |
| 21 | reg = <0x000>; | 21 | reg = <0x000>; |
| 22 | enable-method = "psci"; | 22 | enable-method = "psci"; |
| 23 | cpu-idle-states = <&CPU_SLEEP_0>; | ||
| 24 | }; | 23 | }; |
| 25 | cpu1: cpu@1 { | 24 | cpu1: cpu@1 { |
| 26 | device_type = "cpu"; | 25 | device_type = "cpu"; |
| 27 | compatible = "arm,cortex-a72", "arm,armv8"; | 26 | compatible = "arm,cortex-a72", "arm,armv8"; |
| 28 | reg = <0x001>; | 27 | reg = <0x001>; |
| 29 | enable-method = "psci"; | 28 | enable-method = "psci"; |
| 30 | cpu-idle-states = <&CPU_SLEEP_0>; | ||
| 31 | }; | 29 | }; |
| 32 | cpu2: cpu@100 { | 30 | cpu2: cpu@100 { |
| 33 | device_type = "cpu"; | 31 | device_type = "cpu"; |
| 34 | compatible = "arm,cortex-a72", "arm,armv8"; | 32 | compatible = "arm,cortex-a72", "arm,armv8"; |
| 35 | reg = <0x100>; | 33 | reg = <0x100>; |
| 36 | enable-method = "psci"; | 34 | enable-method = "psci"; |
| 37 | cpu-idle-states = <&CPU_SLEEP_0>; | ||
| 38 | }; | 35 | }; |
| 39 | cpu3: cpu@101 { | 36 | cpu3: cpu@101 { |
| 40 | device_type = "cpu"; | 37 | device_type = "cpu"; |
| 41 | compatible = "arm,cortex-a72", "arm,armv8"; | 38 | compatible = "arm,cortex-a72", "arm,armv8"; |
| 42 | reg = <0x101>; | 39 | reg = <0x101>; |
| 43 | enable-method = "psci"; | 40 | enable-method = "psci"; |
| 44 | cpu-idle-states = <&CPU_SLEEP_0>; | ||
| 45 | }; | 41 | }; |
| 46 | }; | 42 | }; |
| 47 | }; | 43 | }; |
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 073610ac0a53..7d94c1fa592a 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi | |||
| @@ -28,33 +28,6 @@ | |||
| 28 | method = "smc"; | 28 | method = "smc"; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | cpus { | ||
| 32 | #address-cells = <1>; | ||
| 33 | #size-cells = <0>; | ||
| 34 | |||
| 35 | idle_states { | ||
| 36 | entry_method = "arm,pcsi"; | ||
| 37 | |||
| 38 | CPU_SLEEP_0: cpu-sleep-0 { | ||
| 39 | compatible = "arm,idle-state"; | ||
| 40 | local-timer-stop; | ||
| 41 | arm,psci-suspend-param = <0x0010000>; | ||
| 42 | entry-latency-us = <80>; | ||
| 43 | exit-latency-us = <160>; | ||
| 44 | min-residency-us = <320>; | ||
| 45 | }; | ||
| 46 | |||
| 47 | CLUSTER_SLEEP_0: cluster-sleep-0 { | ||
| 48 | compatible = "arm,idle-state"; | ||
| 49 | local-timer-stop; | ||
| 50 | arm,psci-suspend-param = <0x1010000>; | ||
| 51 | entry-latency-us = <500>; | ||
| 52 | exit-latency-us = <1000>; | ||
| 53 | min-residency-us = <2500>; | ||
| 54 | }; | ||
| 55 | }; | ||
| 56 | }; | ||
| 57 | |||
| 58 | ap806 { | 31 | ap806 { |
| 59 | #address-cells = <2>; | 32 | #address-cells = <2>; |
| 60 | #size-cells = <2>; | 33 | #size-cells = <2>; |
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts index 5d6005c9b097..710c5c3d87d3 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts | |||
| @@ -16,8 +16,13 @@ | |||
| 16 | model = "Bananapi BPI-R64"; | 16 | model = "Bananapi BPI-R64"; |
| 17 | compatible = "bananapi,bpi-r64", "mediatek,mt7622"; | 17 | compatible = "bananapi,bpi-r64", "mediatek,mt7622"; |
| 18 | 18 | ||
| 19 | aliases { | ||
| 20 | serial0 = &uart0; | ||
| 21 | }; | ||
| 22 | |||
| 19 | chosen { | 23 | chosen { |
| 20 | bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512"; | 24 | stdout-path = "serial0:115200n8"; |
| 25 | bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512"; | ||
| 21 | }; | 26 | }; |
| 22 | 27 | ||
| 23 | cpus { | 28 | cpus { |
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index dcad0869b84c..3f783348c66a 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts | |||
| @@ -17,8 +17,13 @@ | |||
| 17 | model = "MediaTek MT7622 RFB1 board"; | 17 | model = "MediaTek MT7622 RFB1 board"; |
| 18 | compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; | 18 | compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; |
| 19 | 19 | ||
| 20 | aliases { | ||
| 21 | serial0 = &uart0; | ||
| 22 | }; | ||
| 23 | |||
| 20 | chosen { | 24 | chosen { |
| 21 | bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512"; | 25 | stdout-path = "serial0:115200n8"; |
| 26 | bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512"; | ||
| 22 | }; | 27 | }; |
| 23 | 28 | ||
| 24 | cpus { | 29 | cpus { |
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index fe0c875f1d95..14a1028ca3a6 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi | |||
| @@ -227,16 +227,6 @@ | |||
| 227 | #reset-cells = <1>; | 227 | #reset-cells = <1>; |
| 228 | }; | 228 | }; |
| 229 | 229 | ||
| 230 | timer: timer@10004000 { | ||
| 231 | compatible = "mediatek,mt7622-timer", | ||
| 232 | "mediatek,mt6577-timer"; | ||
| 233 | reg = <0 0x10004000 0 0x80>; | ||
| 234 | interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>; | ||
| 235 | clocks = <&infracfg CLK_INFRA_APXGPT_PD>, | ||
| 236 | <&topckgen CLK_TOP_RTC>; | ||
| 237 | clock-names = "system-clk", "rtc-clk"; | ||
| 238 | }; | ||
| 239 | |||
| 240 | scpsys: scpsys@10006000 { | 230 | scpsys: scpsys@10006000 { |
| 241 | compatible = "mediatek,mt7622-scpsys", | 231 | compatible = "mediatek,mt7622-scpsys", |
| 242 | "syscon"; | 232 | "syscon"; |
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index d667eee4e6d0..b3def0358177 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts | |||
| @@ -343,6 +343,12 @@ | |||
| 343 | }; | 343 | }; |
| 344 | }; | 344 | }; |
| 345 | 345 | ||
| 346 | &gcc { | ||
| 347 | protected-clocks = <GCC_QSPI_CORE_CLK>, | ||
| 348 | <GCC_QSPI_CORE_CLK_SRC>, | ||
| 349 | <GCC_QSPI_CNOC_PERIPH_AHB_CLK>; | ||
| 350 | }; | ||
| 351 | |||
| 346 | &i2c10 { | 352 | &i2c10 { |
| 347 | status = "okay"; | 353 | status = "okay"; |
| 348 | clock-frequency = <400000>; | 354 | clock-frequency = <400000>; |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b96442960aea..f0a5c9531e8b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
| @@ -35,15 +35,6 @@ | |||
| 35 | #define PCI_IO_SIZE SZ_16M | 35 | #define PCI_IO_SIZE SZ_16M |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * Log2 of the upper bound of the size of a struct page. Used for sizing | ||
| 39 | * the vmemmap region only, does not affect actual memory footprint. | ||
| 40 | * We don't use sizeof(struct page) directly since taking its size here | ||
| 41 | * requires its definition to be available at this point in the inclusion | ||
| 42 | * chain, and it may not be a power of 2 in the first place. | ||
| 43 | */ | ||
| 44 | #define STRUCT_PAGE_MAX_SHIFT 6 | ||
| 45 | |||
| 46 | /* | ||
| 47 | * VMEMMAP_SIZE - allows the whole linear region to be covered by | 38 | * VMEMMAP_SIZE - allows the whole linear region to be covered by |
| 48 | * a struct page array | 39 | * a struct page array |
| 49 | */ | 40 | */ |
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 6b2686d54411..29cdc99688f3 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c | |||
| @@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, | |||
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | memcpy((void *)dst, src_start, length); | 216 | memcpy((void *)dst, src_start, length); |
| 217 | flush_icache_range(dst, dst + length); | 217 | __flush_icache_range(dst, dst + length); |
| 218 | 218 | ||
| 219 | pgdp = pgd_offset_raw(allocator(mask), dst_addr); | 219 | pgdp = pgd_offset_raw(allocator(mask), dst_addr); |
| 220 | if (pgd_none(READ_ONCE(*pgdp))) { | 220 | if (pgd_none(READ_ONCE(*pgdp))) { |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index a3ac26284845..a53704406099 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
| @@ -429,9 +429,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 429 | prot, | 429 | prot, |
| 430 | __builtin_return_address(0)); | 430 | __builtin_return_address(0)); |
| 431 | if (addr) { | 431 | if (addr) { |
| 432 | memset(addr, 0, size); | ||
| 433 | if (!coherent) | 432 | if (!coherent) |
| 434 | __dma_flush_area(page_to_virt(page), iosize); | 433 | __dma_flush_area(page_to_virt(page), iosize); |
| 434 | memset(addr, 0, size); | ||
| 435 | } else { | 435 | } else { |
| 436 | iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); | 436 | iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); |
| 437 | dma_release_from_contiguous(dev, page, | 437 | dma_release_from_contiguous(dev, page, |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9b432d9fcada..0340e45655c6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
| @@ -610,14 +610,6 @@ void __init mem_init(void) | |||
| 610 | BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); | 610 | BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); |
| 611 | #endif | 611 | #endif |
| 612 | 612 | ||
| 613 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
| 614 | /* | ||
| 615 | * Make sure we chose the upper bound of sizeof(struct page) | ||
| 616 | * correctly when sizing the VMEMMAP array. | ||
| 617 | */ | ||
| 618 | BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); | ||
| 619 | #endif | ||
| 620 | |||
| 621 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { | 613 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
| 622 | extern int sysctl_overcommit_memory; | 614 | extern int sysctl_overcommit_memory; |
| 623 | /* | 615 | /* |
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index c410aa4fff1a..b2905c0485a7 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel) | 17 | static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel) |
| 18 | { | 18 | { |
| 19 | pgd &= ~(1<<31); | 19 | pgd -= PAGE_OFFSET; |
| 20 | pgd += PHYS_OFFSET; | 20 | pgd += PHYS_OFFSET; |
| 21 | pgd |= 1; | 21 | pgd |= 1; |
| 22 | setup_pgd(pgd, kernel); | 22 | setup_pgd(pgd, kernel); |
| @@ -29,7 +29,7 @@ static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel) | |||
| 29 | 29 | ||
| 30 | static inline unsigned long tlb_get_pgd(void) | 30 | static inline unsigned long tlb_get_pgd(void) |
| 31 | { | 31 | { |
| 32 | return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1; | 32 | return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) | 35 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index d047a09d660f..1085385e1f06 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
| @@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS | |||
| 71 | KBUILD_CFLAGS_KERNEL += -mlong-calls | 71 | KBUILD_CFLAGS_KERNEL += -mlong-calls |
| 72 | endif | 72 | endif |
| 73 | 73 | ||
| 74 | # Without this, "ld -r" results in .text sections that are too big (> 0x40000) | ||
| 75 | # for branches to reach stubs. And multiple .text sections trigger a warning | ||
| 76 | # when creating the sysfs module information section. | ||
| 77 | ifndef CONFIG_64BIT | ||
| 78 | KBUILD_CFLAGS_MODULE += -ffunction-sections | ||
| 79 | endif | ||
| 80 | |||
| 74 | # select which processor to optimise for | 81 | # select which processor to optimise for |
| 75 | cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100 | 82 | cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100 |
| 76 | cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 | 83 | cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 39354365f54a..ed9883169190 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
| @@ -197,7 +197,7 @@ $(obj)/empty.c: | |||
| 197 | $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S | 197 | $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S |
| 198 | $(Q)cp $< $@ | 198 | $(Q)cp $< $@ |
| 199 | 199 | ||
| 200 | $(obj)/serial.c: $(obj)/autoconf.h | 200 | $(srctree)/$(src)/serial.c: $(obj)/autoconf.h |
| 201 | 201 | ||
| 202 | $(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/% | 202 | $(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/% |
| 203 | $(Q)cp $< $@ | 203 | $(Q)cp $< $@ |
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index 32dfe6d083f3..9b9d17437373 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | RELA = 7 | 15 | RELA = 7 |
| 16 | RELACOUNT = 0x6ffffff9 | 16 | RELACOUNT = 0x6ffffff9 |
| 17 | 17 | ||
| 18 | .text | 18 | .data |
| 19 | /* A procedure descriptor used when booting this as a COFF file. | 19 | /* A procedure descriptor used when booting this as a COFF file. |
| 20 | * When making COFF, this comes first in the link and we're | 20 | * When making COFF, this comes first in the link and we're |
| 21 | * linked at 0x500000. | 21 | * linked at 0x500000. |
| @@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9 | |||
| 23 | .globl _zimage_start_opd | 23 | .globl _zimage_start_opd |
| 24 | _zimage_start_opd: | 24 | _zimage_start_opd: |
| 25 | .long 0x500000, 0, 0, 0 | 25 | .long 0x500000, 0, 0, 0 |
| 26 | .text | ||
| 27 | b _zimage_start | ||
| 26 | 28 | ||
| 27 | #ifdef __powerpc64__ | 29 | #ifdef __powerpc64__ |
| 28 | .balign 8 | 30 | .balign 8 |
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 8bf1b6351716..16a49819da9a 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h | |||
| @@ -26,6 +26,8 @@ | |||
| 26 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
| 27 | #include <asm/reg.h> | 27 | #include <asm/reg.h> |
| 28 | 28 | ||
| 29 | #define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs | ||
| 30 | |||
| 29 | /* | 31 | /* |
| 30 | * Overload regs->result to specify whether we should use the MSR (result | 32 | * Overload regs->result to specify whether we should use the MSR (result |
| 31 | * is zero) or the SIAR (result is non zero). | 33 | * is zero) or the SIAR (result is non zero). |
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index a658091a19f9..3712152206f3 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | # UAPI Header export list | 1 | # UAPI Header export list |
| 2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
| 3 | 3 | ||
| 4 | generic-y += bpf_perf_event.h | ||
| 5 | generic-y += param.h | 4 | generic-y += param.h |
| 6 | generic-y += poll.h | 5 | generic-y += poll.h |
| 7 | generic-y += resource.h | 6 | generic-y += resource.h |
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..b551b741653d --- /dev/null +++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ | ||
| 3 | #define _UAPI__ASM_BPF_PERF_EVENT_H__ | ||
| 4 | |||
| 5 | #include <asm/ptrace.h> | ||
| 6 | |||
| 7 | typedef struct user_pt_regs bpf_user_pt_regs_t; | ||
| 8 | |||
| 9 | #endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ | ||
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 33b34a58fc62..5b9dce17f0c9 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
| @@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void) | |||
| 372 | 372 | ||
| 373 | /* Now find out if one of these is out firmware console */ | 373 | /* Now find out if one of these is out firmware console */ |
| 374 | path = of_get_property(of_chosen, "linux,stdout-path", NULL); | 374 | path = of_get_property(of_chosen, "linux,stdout-path", NULL); |
| 375 | if (path == NULL) | ||
| 376 | path = of_get_property(of_chosen, "stdout-path", NULL); | ||
| 375 | if (path != NULL) { | 377 | if (path != NULL) { |
| 376 | stdout = of_find_node_by_path(path); | 378 | stdout = of_find_node_by_path(path); |
| 377 | if (stdout) | 379 | if (stdout) |
| @@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void) | |||
| 595 | /* We are getting a weird phandle from OF ... */ | 597 | /* We are getting a weird phandle from OF ... */ |
| 596 | /* ... So use the full path instead */ | 598 | /* ... So use the full path instead */ |
| 597 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); | 599 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); |
| 600 | if (name == NULL) | ||
| 601 | name = of_get_property(of_chosen, "stdout-path", NULL); | ||
| 598 | if (name == NULL) { | 602 | if (name == NULL) { |
| 599 | DBG(" no linux,stdout-path !\n"); | 603 | DBG(" no stdout-path !\n"); |
| 600 | return -ENODEV; | 604 | return -ENODEV; |
| 601 | } | 605 | } |
| 602 | prom_stdout = of_find_node_by_path(name); | 606 | prom_stdout = of_find_node_by_path(name); |
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c index dab616a33b8d..f2197654be07 100644 --- a/arch/powerpc/kernel/msi.c +++ b/arch/powerpc/kernel/msi.c | |||
| @@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev) | |||
| 34 | { | 34 | { |
| 35 | struct pci_controller *phb = pci_bus_to_host(dev->bus); | 35 | struct pci_controller *phb = pci_bus_to_host(dev->bus); |
| 36 | 36 | ||
| 37 | phb->controller_ops.teardown_msi_irqs(dev); | 37 | /* |
| 38 | * We can be called even when arch_setup_msi_irqs() returns -ENOSYS, | ||
| 39 | * so check the pointer again. | ||
| 40 | */ | ||
| 41 | if (phb->controller_ops.teardown_msi_irqs) | ||
| 42 | phb->controller_ops.teardown_msi_irqs(dev); | ||
| 38 | } | 43 | } |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index afb819f4ca68..714c3480c52d 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -3266,12 +3266,17 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
| 3266 | user_exit(); | 3266 | user_exit(); |
| 3267 | 3267 | ||
| 3268 | if (test_thread_flag(TIF_SYSCALL_EMU)) { | 3268 | if (test_thread_flag(TIF_SYSCALL_EMU)) { |
| 3269 | ptrace_report_syscall(regs); | ||
| 3270 | /* | 3269 | /* |
| 3270 | * A nonzero return code from tracehook_report_syscall_entry() | ||
| 3271 | * tells us to prevent the syscall execution, but we are not | ||
| 3272 | * going to execute it anyway. | ||
| 3273 | * | ||
| 3271 | * Returning -1 will skip the syscall execution. We want to | 3274 | * Returning -1 will skip the syscall execution. We want to |
| 3272 | * avoid clobbering any register also, thus, not 'gotoing' | 3275 | * avoid clobbering any register also, thus, not 'gotoing' |
| 3273 | * skip label. | 3276 | * skip label. |
| 3274 | */ | 3277 | */ |
| 3278 | if (tracehook_report_syscall_entry(regs)) | ||
| 3279 | ; | ||
| 3275 | return -1; | 3280 | return -1; |
| 3276 | } | 3281 | } |
| 3277 | 3282 | ||
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index 2b74f8adf4d0..6aa41669ac1a 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/hugetlb.h> | 19 | #include <linux/hugetlb.h> |
| 20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
| 22 | #include <linux/highmem.h> | ||
| 22 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
| 23 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
| 24 | #include <asm/fixmap.h> | 25 | #include <asm/fixmap.h> |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7a9886f98b0c..a5091c034747 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, | |||
| 188 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); | 188 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); |
| 189 | 189 | ||
| 190 | for (; start < end; start += page_size) { | 190 | for (; start < end; start += page_size) { |
| 191 | void *p; | 191 | void *p = NULL; |
| 192 | int rc; | 192 | int rc; |
| 193 | 193 | ||
| 194 | if (vmemmap_populated(start, page_size)) | 194 | if (vmemmap_populated(start, page_size)) |
| 195 | continue; | 195 | continue; |
| 196 | 196 | ||
| 197 | /* | ||
| 198 | * Allocate from the altmap first if we have one. This may | ||
| 199 | * fail due to alignment issues when using 16MB hugepages, so | ||
| 200 | * fall back to system memory if the altmap allocation fail. | ||
| 201 | */ | ||
| 197 | if (altmap) | 202 | if (altmap) |
| 198 | p = altmap_alloc_block_buf(page_size, altmap); | 203 | p = altmap_alloc_block_buf(page_size, altmap); |
| 199 | else | 204 | if (!p) |
| 200 | p = vmemmap_alloc_block_buf(page_size, node); | 205 | p = vmemmap_alloc_block_buf(page_size, node); |
| 201 | if (!p) | 206 | if (!p) |
| 202 | return -ENOMEM; | 207 | return -ENOMEM; |
| @@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, | |||
| 255 | { | 260 | { |
| 256 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | 261 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
| 257 | unsigned long page_order = get_order(page_size); | 262 | unsigned long page_order = get_order(page_size); |
| 263 | unsigned long alt_start = ~0, alt_end = ~0; | ||
| 264 | unsigned long base_pfn; | ||
| 258 | 265 | ||
| 259 | start = _ALIGN_DOWN(start, page_size); | 266 | start = _ALIGN_DOWN(start, page_size); |
| 267 | if (altmap) { | ||
| 268 | alt_start = altmap->base_pfn; | ||
| 269 | alt_end = altmap->base_pfn + altmap->reserve + | ||
| 270 | altmap->free + altmap->alloc + altmap->align; | ||
| 271 | } | ||
| 260 | 272 | ||
| 261 | pr_debug("vmemmap_free %lx...%lx\n", start, end); | 273 | pr_debug("vmemmap_free %lx...%lx\n", start, end); |
| 262 | 274 | ||
| @@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, | |||
| 280 | page = pfn_to_page(addr >> PAGE_SHIFT); | 292 | page = pfn_to_page(addr >> PAGE_SHIFT); |
| 281 | section_base = pfn_to_page(vmemmap_section_start(start)); | 293 | section_base = pfn_to_page(vmemmap_section_start(start)); |
| 282 | nr_pages = 1 << page_order; | 294 | nr_pages = 1 << page_order; |
| 295 | base_pfn = PHYS_PFN(addr); | ||
| 283 | 296 | ||
| 284 | if (altmap) { | 297 | if (base_pfn >= alt_start && base_pfn < alt_end) { |
| 285 | vmem_altmap_free(altmap, nr_pages); | 298 | vmem_altmap_free(altmap, nr_pages); |
| 286 | } else if (PageReserved(page)) { | 299 | } else if (PageReserved(page)) { |
| 287 | /* allocated from bootmem */ | 300 | /* allocated from bootmem */ |
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 17482f5de3e2..9393e231cbc2 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c | |||
| @@ -891,6 +891,55 @@ cond_branch: | |||
| 891 | return 0; | 891 | return 0; |
| 892 | } | 892 | } |
| 893 | 893 | ||
| 894 | /* Fix the branch target addresses for subprog calls */ | ||
| 895 | static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, | ||
| 896 | struct codegen_context *ctx, u32 *addrs) | ||
| 897 | { | ||
| 898 | const struct bpf_insn *insn = fp->insnsi; | ||
| 899 | bool func_addr_fixed; | ||
| 900 | u64 func_addr; | ||
| 901 | u32 tmp_idx; | ||
| 902 | int i, ret; | ||
| 903 | |||
| 904 | for (i = 0; i < fp->len; i++) { | ||
| 905 | /* | ||
| 906 | * During the extra pass, only the branch target addresses for | ||
| 907 | * the subprog calls need to be fixed. All other instructions | ||
| 908 | * can left untouched. | ||
| 909 | * | ||
| 910 | * The JITed image length does not change because we already | ||
| 911 | * ensure that the JITed instruction sequence for these calls | ||
| 912 | * are of fixed length by padding them with NOPs. | ||
| 913 | */ | ||
| 914 | if (insn[i].code == (BPF_JMP | BPF_CALL) && | ||
| 915 | insn[i].src_reg == BPF_PSEUDO_CALL) { | ||
| 916 | ret = bpf_jit_get_func_addr(fp, &insn[i], true, | ||
| 917 | &func_addr, | ||
| 918 | &func_addr_fixed); | ||
| 919 | if (ret < 0) | ||
| 920 | return ret; | ||
| 921 | |||
| 922 | /* | ||
| 923 | * Save ctx->idx as this would currently point to the | ||
| 924 | * end of the JITed image and set it to the offset of | ||
| 925 | * the instruction sequence corresponding to the | ||
| 926 | * subprog call temporarily. | ||
| 927 | */ | ||
| 928 | tmp_idx = ctx->idx; | ||
| 929 | ctx->idx = addrs[i] / 4; | ||
| 930 | bpf_jit_emit_func_call_rel(image, ctx, func_addr); | ||
| 931 | |||
| 932 | /* | ||
| 933 | * Restore ctx->idx here. This is safe as the length | ||
| 934 | * of the JITed sequence remains unchanged. | ||
| 935 | */ | ||
| 936 | ctx->idx = tmp_idx; | ||
| 937 | } | ||
| 938 | } | ||
| 939 | |||
| 940 | return 0; | ||
| 941 | } | ||
| 942 | |||
| 894 | struct powerpc64_jit_data { | 943 | struct powerpc64_jit_data { |
| 895 | struct bpf_binary_header *header; | 944 | struct bpf_binary_header *header; |
| 896 | u32 *addrs; | 945 | u32 *addrs; |
| @@ -989,6 +1038,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) | |||
| 989 | skip_init_ctx: | 1038 | skip_init_ctx: |
| 990 | code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); | 1039 | code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); |
| 991 | 1040 | ||
| 1041 | if (extra_pass) { | ||
| 1042 | /* | ||
| 1043 | * Do not touch the prologue and epilogue as they will remain | ||
| 1044 | * unchanged. Only fix the branch target address for subprog | ||
| 1045 | * calls in the body. | ||
| 1046 | * | ||
| 1047 | * This does not change the offsets and lengths of the subprog | ||
| 1048 | * call instruction sequences and hence, the size of the JITed | ||
| 1049 | * image as well. | ||
| 1050 | */ | ||
| 1051 | bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); | ||
| 1052 | |||
| 1053 | /* There is no need to perform the usual passes. */ | ||
| 1054 | goto skip_codegen_passes; | ||
| 1055 | } | ||
| 1056 | |||
| 992 | /* Code generation passes 1-2 */ | 1057 | /* Code generation passes 1-2 */ |
| 993 | for (pass = 1; pass < 3; pass++) { | 1058 | for (pass = 1; pass < 3; pass++) { |
| 994 | /* Now build the prologue, body code & epilogue for real. */ | 1059 | /* Now build the prologue, body code & epilogue for real. */ |
| @@ -1002,6 +1067,7 @@ skip_init_ctx: | |||
| 1002 | proglen - (cgctx.idx * 4), cgctx.seen); | 1067 | proglen - (cgctx.idx * 4), cgctx.seen); |
| 1003 | } | 1068 | } |
| 1004 | 1069 | ||
| 1070 | skip_codegen_passes: | ||
| 1005 | if (bpf_jit_enable > 1) | 1071 | if (bpf_jit_enable > 1) |
| 1006 | /* | 1072 | /* |
| 1007 | * Note that we output the base address of the code_base | 1073 | * Note that we output the base address of the code_base |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 2e4bd32154b5..472b784f01eb 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
| @@ -140,8 +140,7 @@ config IBMEBUS | |||
| 140 | Bus device driver for GX bus based adapters. | 140 | Bus device driver for GX bus based adapters. |
| 141 | 141 | ||
| 142 | config PAPR_SCM | 142 | config PAPR_SCM |
| 143 | depends on PPC_PSERIES && MEMORY_HOTPLUG | 143 | depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM |
| 144 | select LIBNVDIMM | ||
| 145 | tristate "Support for the PAPR Storage Class Memory interface" | 144 | tristate "Support for the PAPR Storage Class Memory interface" |
| 146 | help | 145 | help |
| 147 | Enable access to hypervisor provided storage class memory. | 146 | Enable access to hypervisor provided storage class memory. |
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index ee9372b65ca5..7d6457ab5d34 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c | |||
| @@ -55,7 +55,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
| 55 | do { | 55 | do { |
| 56 | rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, | 56 | rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, |
| 57 | p->blocks, BIND_ANY_ADDR, token); | 57 | p->blocks, BIND_ANY_ADDR, token); |
| 58 | token = be64_to_cpu(ret[0]); | 58 | token = ret[0]; |
| 59 | cond_resched(); | 59 | cond_resched(); |
| 60 | } while (rc == H_BUSY); | 60 | } while (rc == H_BUSY); |
| 61 | 61 | ||
| @@ -64,7 +64,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
| 64 | return -ENXIO; | 64 | return -ENXIO; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | p->bound_addr = be64_to_cpu(ret[1]); | 67 | p->bound_addr = ret[1]; |
| 68 | 68 | ||
| 69 | dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); | 69 | dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); |
| 70 | 70 | ||
| @@ -82,7 +82,7 @@ static int drc_pmem_unbind(struct papr_scm_priv *p) | |||
| 82 | do { | 82 | do { |
| 83 | rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, | 83 | rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, |
| 84 | p->bound_addr, p->blocks, token); | 84 | p->bound_addr, p->blocks, token); |
| 85 | token = be64_to_cpu(ret); | 85 | token = ret[0]; |
| 86 | cond_resched(); | 86 | cond_resched(); |
| 87 | } while (rc == H_BUSY); | 87 | } while (rc == H_BUSY); |
| 88 | 88 | ||
| @@ -223,6 +223,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) | |||
| 223 | goto err; | 223 | goto err; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | if (nvdimm_bus_check_dimm_count(p->bus, 1)) | ||
| 227 | goto err; | ||
| 228 | |||
| 226 | /* now add the region */ | 229 | /* now add the region */ |
| 227 | 230 | ||
| 228 | memset(&mapping, 0, sizeof(mapping)); | 231 | memset(&mapping, 0, sizeof(mapping)); |
| @@ -257,9 +260,12 @@ err: nvdimm_bus_unregister(p->bus); | |||
| 257 | 260 | ||
| 258 | static int papr_scm_probe(struct platform_device *pdev) | 261 | static int papr_scm_probe(struct platform_device *pdev) |
| 259 | { | 262 | { |
| 260 | uint32_t drc_index, metadata_size, unit_cap[2]; | ||
| 261 | struct device_node *dn = pdev->dev.of_node; | 263 | struct device_node *dn = pdev->dev.of_node; |
| 264 | u32 drc_index, metadata_size; | ||
| 265 | u64 blocks, block_size; | ||
| 262 | struct papr_scm_priv *p; | 266 | struct papr_scm_priv *p; |
| 267 | const char *uuid_str; | ||
| 268 | u64 uuid[2]; | ||
| 263 | int rc; | 269 | int rc; |
| 264 | 270 | ||
| 265 | /* check we have all the required DT properties */ | 271 | /* check we have all the required DT properties */ |
| @@ -268,8 +274,18 @@ static int papr_scm_probe(struct platform_device *pdev) | |||
| 268 | return -ENODEV; | 274 | return -ENODEV; |
| 269 | } | 275 | } |
| 270 | 276 | ||
| 271 | if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) { | 277 | if (of_property_read_u64(dn, "ibm,block-size", &block_size)) { |
| 272 | dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn); | 278 | dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn); |
| 279 | return -ENODEV; | ||
| 280 | } | ||
| 281 | |||
| 282 | if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) { | ||
| 283 | dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn); | ||
| 284 | return -ENODEV; | ||
| 285 | } | ||
| 286 | |||
| 287 | if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) { | ||
| 288 | dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn); | ||
| 273 | return -ENODEV; | 289 | return -ENODEV; |
| 274 | } | 290 | } |
| 275 | 291 | ||
| @@ -282,8 +298,13 @@ static int papr_scm_probe(struct platform_device *pdev) | |||
| 282 | 298 | ||
| 283 | p->dn = dn; | 299 | p->dn = dn; |
| 284 | p->drc_index = drc_index; | 300 | p->drc_index = drc_index; |
| 285 | p->block_size = unit_cap[0]; | 301 | p->block_size = block_size; |
| 286 | p->blocks = unit_cap[1]; | 302 | p->blocks = blocks; |
| 303 | |||
| 304 | /* We just need to ensure that set cookies are unique across */ | ||
| 305 | uuid_parse(uuid_str, (uuid_t *) uuid); | ||
| 306 | p->nd_set.cookie1 = uuid[0]; | ||
| 307 | p->nd_set.cookie2 = uuid[1]; | ||
| 287 | 308 | ||
| 288 | /* might be zero */ | 309 | /* might be zero */ |
| 289 | p->metadata_size = metadata_size; | 310 | p->metadata_size = metadata_size; |
| @@ -296,7 +317,7 @@ static int papr_scm_probe(struct platform_device *pdev) | |||
| 296 | 317 | ||
| 297 | /* setup the resource for the newly bound range */ | 318 | /* setup the resource for the newly bound range */ |
| 298 | p->res.start = p->bound_addr; | 319 | p->res.start = p->bound_addr; |
| 299 | p->res.end = p->bound_addr + p->blocks * p->block_size; | 320 | p->res.end = p->bound_addr + p->blocks * p->block_size - 1; |
| 300 | p->res.name = pdev->name; | 321 | p->res.name = pdev->name; |
| 301 | p->res.flags = IORESOURCE_MEM; | 322 | p->res.flags = IORESOURCE_MEM; |
| 302 | 323 | ||
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 98cb8c802b1a..4f7f235f15f8 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define __IO_PREFIX generic | 24 | #define __IO_PREFIX generic |
| 25 | #include <asm/io_generic.h> | 25 | #include <asm/io_generic.h> |
| 26 | #include <asm/io_trapped.h> | 26 | #include <asm/io_trapped.h> |
| 27 | #include <asm-generic/pci_iomap.h> | ||
| 27 | #include <mach/mangle-port.h> | 28 | #include <mach/mangle-port.h> |
| 28 | 29 | ||
| 29 | #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) | 30 | #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 40d008b0bd3e..05eb016fc41b 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
| @@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, | |||
| 108 | /* Allocate and initialize the free area map. */ | 108 | /* Allocate and initialize the free area map. */ |
| 109 | sz = num_tsb_entries / 8; | 109 | sz = num_tsb_entries / 8; |
| 110 | sz = (sz + 7UL) & ~7UL; | 110 | sz = (sz + 7UL) & ~7UL; |
| 111 | iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node); | 111 | iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); |
| 112 | if (!iommu->tbl.map) | 112 | if (!iommu->tbl.map) |
| 113 | return -ENOMEM; | 113 | return -ENOMEM; |
| 114 | memset(iommu->tbl.map, 0, sz); | ||
| 115 | 114 | ||
| 116 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, | 115 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
| 117 | (tlb_type != hypervisor ? iommu_flushall : NULL), | 116 | (tlb_type != hypervisor ? iommu_flushall : NULL), |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 4c5b3fcbed94..e800ce13cc6e 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
| @@ -683,6 +683,7 @@ void do_signal32(struct pt_regs * regs) | |||
| 683 | regs->tpc -= 4; | 683 | regs->tpc -= 4; |
| 684 | regs->tnpc -= 4; | 684 | regs->tnpc -= 4; |
| 685 | pt_regs_clear_syscall(regs); | 685 | pt_regs_clear_syscall(regs); |
| 686 | /* fall through */ | ||
| 686 | case ERESTART_RESTARTBLOCK: | 687 | case ERESTART_RESTARTBLOCK: |
| 687 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | 688 | regs->u_regs[UREG_G1] = __NR_restart_syscall; |
| 688 | regs->tpc -= 4; | 689 | regs->tpc -= 4; |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 5665261cee37..83953780ca01 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
| @@ -508,6 +508,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
| 508 | regs->pc -= 4; | 508 | regs->pc -= 4; |
| 509 | regs->npc -= 4; | 509 | regs->npc -= 4; |
| 510 | pt_regs_clear_syscall(regs); | 510 | pt_regs_clear_syscall(regs); |
| 511 | /* fall through */ | ||
| 511 | case ERESTART_RESTARTBLOCK: | 512 | case ERESTART_RESTARTBLOCK: |
| 512 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | 513 | regs->u_regs[UREG_G1] = __NR_restart_syscall; |
| 513 | regs->pc -= 4; | 514 | regs->pc -= 4; |
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index e9de1803a22e..ca70787efd8e 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
| @@ -533,6 +533,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
| 533 | regs->tpc -= 4; | 533 | regs->tpc -= 4; |
| 534 | regs->tnpc -= 4; | 534 | regs->tnpc -= 4; |
| 535 | pt_regs_clear_syscall(regs); | 535 | pt_regs_clear_syscall(regs); |
| 536 | /* fall through */ | ||
| 536 | case ERESTART_RESTARTBLOCK: | 537 | case ERESTART_RESTARTBLOCK: |
| 537 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | 538 | regs->u_regs[UREG_G1] = __NR_restart_syscall; |
| 538 | regs->tpc -= 4; | 539 | regs->tpc -= 4; |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f5d7f4134524..75ef499a66e2 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -220,9 +220,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | |||
| 220 | 220 | ||
| 221 | # Avoid indirect branches in kernel to deal with Spectre | 221 | # Avoid indirect branches in kernel to deal with Spectre |
| 222 | ifdef CONFIG_RETPOLINE | 222 | ifdef CONFIG_RETPOLINE |
| 223 | ifeq ($(RETPOLINE_CFLAGS),) | ||
| 224 | $(error You are building kernel with non-retpoline compiler, please update your compiler.) | ||
| 225 | endif | ||
| 226 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) | 223 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) |
| 227 | endif | 224 | endif |
| 228 | 225 | ||
| @@ -307,6 +304,13 @@ ifndef CC_HAVE_ASM_GOTO | |||
| 307 | @echo Compiler lacks asm-goto support. | 304 | @echo Compiler lacks asm-goto support. |
| 308 | @exit 1 | 305 | @exit 1 |
| 309 | endif | 306 | endif |
| 307 | ifdef CONFIG_RETPOLINE | ||
| 308 | ifeq ($(RETPOLINE_CFLAGS),) | ||
| 309 | @echo "You are building kernel with non-retpoline compiler." >&2 | ||
| 310 | @echo "Please update your compiler." >&2 | ||
| 311 | @false | ||
| 312 | endif | ||
| 313 | endif | ||
| 310 | 314 | ||
| 311 | archclean: | 315 | archclean: |
| 312 | $(Q)rm -rf $(objtree)/arch/i386 | 316 | $(Q)rm -rf $(objtree)/arch/i386 |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 8b4c5e001157..544ac4fafd11 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | |||
| 1 | /* ----------------------------------------------------------------------- | 2 | /* ----------------------------------------------------------------------- |
| 2 | * | 3 | * |
| 3 | * Copyright 2011 Intel Corporation; author Matt Fleming | 4 | * Copyright 2011 Intel Corporation; author Matt Fleming |
| @@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext, | |||
| 634 | return status; | 635 | return status; |
| 635 | } | 636 | } |
| 636 | 637 | ||
| 638 | static efi_status_t allocate_e820(struct boot_params *params, | ||
| 639 | struct setup_data **e820ext, | ||
| 640 | u32 *e820ext_size) | ||
| 641 | { | ||
| 642 | unsigned long map_size, desc_size, buff_size; | ||
| 643 | struct efi_boot_memmap boot_map; | ||
| 644 | efi_memory_desc_t *map; | ||
| 645 | efi_status_t status; | ||
| 646 | __u32 nr_desc; | ||
| 647 | |||
| 648 | boot_map.map = ↦ | ||
| 649 | boot_map.map_size = &map_size; | ||
| 650 | boot_map.desc_size = &desc_size; | ||
| 651 | boot_map.desc_ver = NULL; | ||
| 652 | boot_map.key_ptr = NULL; | ||
| 653 | boot_map.buff_size = &buff_size; | ||
| 654 | |||
| 655 | status = efi_get_memory_map(sys_table, &boot_map); | ||
| 656 | if (status != EFI_SUCCESS) | ||
| 657 | return status; | ||
| 658 | |||
| 659 | nr_desc = buff_size / desc_size; | ||
| 660 | |||
| 661 | if (nr_desc > ARRAY_SIZE(params->e820_table)) { | ||
| 662 | u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table); | ||
| 663 | |||
| 664 | status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size); | ||
| 665 | if (status != EFI_SUCCESS) | ||
| 666 | return status; | ||
| 667 | } | ||
| 668 | |||
| 669 | return EFI_SUCCESS; | ||
| 670 | } | ||
| 671 | |||
| 637 | struct exit_boot_struct { | 672 | struct exit_boot_struct { |
| 638 | struct boot_params *boot_params; | 673 | struct boot_params *boot_params; |
| 639 | struct efi_info *efi; | 674 | struct efi_info *efi; |
| 640 | struct setup_data *e820ext; | ||
| 641 | __u32 e820ext_size; | ||
| 642 | }; | 675 | }; |
| 643 | 676 | ||
| 644 | static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, | 677 | static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, |
| 645 | struct efi_boot_memmap *map, | 678 | struct efi_boot_memmap *map, |
| 646 | void *priv) | 679 | void *priv) |
| 647 | { | 680 | { |
| 648 | static bool first = true; | ||
| 649 | const char *signature; | 681 | const char *signature; |
| 650 | __u32 nr_desc; | 682 | __u32 nr_desc; |
| 651 | efi_status_t status; | 683 | efi_status_t status; |
| 652 | struct exit_boot_struct *p = priv; | 684 | struct exit_boot_struct *p = priv; |
| 653 | 685 | ||
| 654 | if (first) { | ||
| 655 | nr_desc = *map->buff_size / *map->desc_size; | ||
| 656 | if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) { | ||
| 657 | u32 nr_e820ext = nr_desc - | ||
| 658 | ARRAY_SIZE(p->boot_params->e820_table); | ||
| 659 | |||
| 660 | status = alloc_e820ext(nr_e820ext, &p->e820ext, | ||
| 661 | &p->e820ext_size); | ||
| 662 | if (status != EFI_SUCCESS) | ||
| 663 | return status; | ||
| 664 | } | ||
| 665 | first = false; | ||
| 666 | } | ||
| 667 | |||
| 668 | signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE | 686 | signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE |
| 669 | : EFI32_LOADER_SIGNATURE; | 687 | : EFI32_LOADER_SIGNATURE; |
| 670 | memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); | 688 | memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); |
| @@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) | |||
| 687 | { | 705 | { |
| 688 | unsigned long map_sz, key, desc_size, buff_size; | 706 | unsigned long map_sz, key, desc_size, buff_size; |
| 689 | efi_memory_desc_t *mem_map; | 707 | efi_memory_desc_t *mem_map; |
| 690 | struct setup_data *e820ext; | 708 | struct setup_data *e820ext = NULL; |
| 691 | __u32 e820ext_size; | 709 | __u32 e820ext_size = 0; |
| 692 | efi_status_t status; | 710 | efi_status_t status; |
| 693 | __u32 desc_version; | 711 | __u32 desc_version; |
| 694 | struct efi_boot_memmap map; | 712 | struct efi_boot_memmap map; |
| @@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) | |||
| 702 | map.buff_size = &buff_size; | 720 | map.buff_size = &buff_size; |
| 703 | priv.boot_params = boot_params; | 721 | priv.boot_params = boot_params; |
| 704 | priv.efi = &boot_params->efi_info; | 722 | priv.efi = &boot_params->efi_info; |
| 705 | priv.e820ext = NULL; | 723 | |
| 706 | priv.e820ext_size = 0; | 724 | status = allocate_e820(boot_params, &e820ext, &e820ext_size); |
| 725 | if (status != EFI_SUCCESS) | ||
| 726 | return status; | ||
| 707 | 727 | ||
| 708 | /* Might as well exit boot services now */ | 728 | /* Might as well exit boot services now */ |
| 709 | status = efi_exit_boot_services(sys_table, handle, &map, &priv, | 729 | status = efi_exit_boot_services(sys_table, handle, &map, &priv, |
| @@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) | |||
| 711 | if (status != EFI_SUCCESS) | 731 | if (status != EFI_SUCCESS) |
| 712 | return status; | 732 | return status; |
| 713 | 733 | ||
| 714 | e820ext = priv.e820ext; | ||
| 715 | e820ext_size = priv.e820ext_size; | ||
| 716 | |||
| 717 | /* Historic? */ | 734 | /* Historic? */ |
| 718 | boot_params->alt_mem_k = 32 * 1024; | 735 | boot_params->alt_mem_k = 32 * 1024; |
| 719 | 736 | ||
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index ce25d84023c0..1f0efdb7b629 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
| @@ -566,6 +566,7 @@ ENTRY(interrupt_entry) | |||
| 566 | 566 | ||
| 567 | ret | 567 | ret |
| 568 | END(interrupt_entry) | 568 | END(interrupt_entry) |
| 569 | _ASM_NOKPROBE(interrupt_entry) | ||
| 569 | 570 | ||
| 570 | 571 | ||
| 571 | /* Interrupt entry/exit. */ | 572 | /* Interrupt entry/exit. */ |
| @@ -766,6 +767,7 @@ native_irq_return_ldt: | |||
| 766 | jmp native_irq_return_iret | 767 | jmp native_irq_return_iret |
| 767 | #endif | 768 | #endif |
| 768 | END(common_interrupt) | 769 | END(common_interrupt) |
| 770 | _ASM_NOKPROBE(common_interrupt) | ||
| 769 | 771 | ||
| 770 | /* | 772 | /* |
| 771 | * APIC interrupts. | 773 | * APIC interrupts. |
| @@ -780,6 +782,7 @@ ENTRY(\sym) | |||
| 780 | call \do_sym /* rdi points to pt_regs */ | 782 | call \do_sym /* rdi points to pt_regs */ |
| 781 | jmp ret_from_intr | 783 | jmp ret_from_intr |
| 782 | END(\sym) | 784 | END(\sym) |
| 785 | _ASM_NOKPROBE(\sym) | ||
| 783 | .endm | 786 | .endm |
| 784 | 787 | ||
| 785 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ | 788 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ |
| @@ -960,6 +963,7 @@ ENTRY(\sym) | |||
| 960 | 963 | ||
| 961 | jmp error_exit | 964 | jmp error_exit |
| 962 | .endif | 965 | .endif |
| 966 | _ASM_NOKPROBE(\sym) | ||
| 963 | END(\sym) | 967 | END(\sym) |
| 964 | .endm | 968 | .endm |
| 965 | 969 | ||
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 141d415a8c80..0624bf2266fd 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile | |||
| @@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) | |||
| 47 | CPPFLAGS_vdso.lds += -P -C | 47 | CPPFLAGS_vdso.lds += -P -C |
| 48 | 48 | ||
| 49 | VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \ | 49 | VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \ |
| 50 | -z max-page-size=4096 -z common-page-size=4096 | 50 | -z max-page-size=4096 |
| 51 | 51 | ||
| 52 | $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE | 52 | $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE |
| 53 | $(call if_changed,vdso) | 53 | $(call if_changed,vdso) |
| @@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg | |||
| 98 | 98 | ||
| 99 | CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) | 99 | CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) |
| 100 | VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \ | 100 | VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \ |
| 101 | -z max-page-size=4096 -z common-page-size=4096 | 101 | -z max-page-size=4096 |
| 102 | 102 | ||
| 103 | # x32-rebranded versions | 103 | # x32-rebranded versions |
| 104 | vobjx32s-y := $(vobjs-y:.o=-x32.o) | 104 | vobjx32s-y := $(vobjs-y:.o=-x32.o) |
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index a07ffd23e4dd..f6f6ef436599 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h | |||
| @@ -36,6 +36,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) | |||
| 36 | */ | 36 | */ |
| 37 | if (boot_params->sentinel) { | 37 | if (boot_params->sentinel) { |
| 38 | /* fields in boot_params are left uninitialized, clear them */ | 38 | /* fields in boot_params are left uninitialized, clear them */ |
| 39 | boot_params->acpi_rsdp_addr = 0; | ||
| 39 | memset(&boot_params->ext_ramdisk_image, 0, | 40 | memset(&boot_params->ext_ramdisk_image, 0, |
| 40 | (char *)&boot_params->efi_info - | 41 | (char *)&boot_params->efi_info - |
| 41 | (char *)&boot_params->ext_ramdisk_image); | 42 | (char *)&boot_params->ext_ramdisk_image); |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 40b16b270656..6adf6e6c2933 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
| @@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) | |||
| 189 | int len = 0, ret; | 189 | int len = 0, ret; |
| 190 | 190 | ||
| 191 | while (len < RELATIVEJUMP_SIZE) { | 191 | while (len < RELATIVEJUMP_SIZE) { |
| 192 | ret = __copy_instruction(dest + len, src + len, real, &insn); | 192 | ret = __copy_instruction(dest + len, src + len, real + len, &insn); |
| 193 | if (!ret || !can_boost(&insn, src + len)) | 193 | if (!ret || !can_boost(&insn, src + len)) |
| 194 | return -EINVAL; | 194 | return -EINVAL; |
| 195 | len += ret; | 195 | len += ret; |
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 7476b3b097e1..7138bc7a265c 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c | |||
| @@ -183,7 +183,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) | |||
| 183 | num--; | 183 | num--; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | if (efi_x >= si->lfb_width) { | 186 | if (efi_x + font->width > si->lfb_width) { |
| 187 | efi_x = 0; | 187 | efi_x = 0; |
| 188 | efi_y += font->height; | 188 | efi_y += font->height; |
| 189 | } | 189 | } |
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 3a27d31fcda6..97337214bec4 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c | |||
| @@ -638,7 +638,7 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd) | |||
| 638 | bfqd->queue_weights_tree.rb_node->rb_right) | 638 | bfqd->queue_weights_tree.rb_node->rb_right) |
| 639 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 639 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 640 | ) || | 640 | ) || |
| 641 | (bfqd->num_active_groups > 0 | 641 | (bfqd->num_groups_with_pending_reqs > 0 |
| 642 | #endif | 642 | #endif |
| 643 | ); | 643 | ); |
| 644 | } | 644 | } |
| @@ -802,7 +802,21 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd, | |||
| 802 | */ | 802 | */ |
| 803 | break; | 803 | break; |
| 804 | } | 804 | } |
| 805 | bfqd->num_active_groups--; | 805 | |
| 806 | /* | ||
| 807 | * The decrement of num_groups_with_pending_reqs is | ||
| 808 | * not performed immediately upon the deactivation of | ||
| 809 | * entity, but it is delayed to when it also happens | ||
| 810 | * that the first leaf descendant bfqq of entity gets | ||
| 811 | * all its pending requests completed. The following | ||
| 812 | * instructions perform this delayed decrement, if | ||
| 813 | * needed. See the comments on | ||
| 814 | * num_groups_with_pending_reqs for details. | ||
| 815 | */ | ||
| 816 | if (entity->in_groups_with_pending_reqs) { | ||
| 817 | entity->in_groups_with_pending_reqs = false; | ||
| 818 | bfqd->num_groups_with_pending_reqs--; | ||
| 819 | } | ||
| 806 | } | 820 | } |
| 807 | } | 821 | } |
| 808 | 822 | ||
| @@ -3529,27 +3543,44 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) | |||
| 3529 | * fact, if there are active groups, then, for condition (i) | 3543 | * fact, if there are active groups, then, for condition (i) |
| 3530 | * to become false, it is enough that an active group contains | 3544 | * to become false, it is enough that an active group contains |
| 3531 | * more active processes or sub-groups than some other active | 3545 | * more active processes or sub-groups than some other active |
| 3532 | * group. We address this issue with the following bi-modal | 3546 | * group. More precisely, for condition (i) to hold because of |
| 3533 | * behavior, implemented in the function | 3547 | * such a group, it is not even necessary that the group is |
| 3548 | * (still) active: it is sufficient that, even if the group | ||
| 3549 | * has become inactive, some of its descendant processes still | ||
| 3550 | * have some request already dispatched but still waiting for | ||
| 3551 | * completion. In fact, requests have still to be guaranteed | ||
| 3552 | * their share of the throughput even after being | ||
| 3553 | * dispatched. In this respect, it is easy to show that, if a | ||
| 3554 | * group frequently becomes inactive while still having | ||
| 3555 | * in-flight requests, and if, when this happens, the group is | ||
| 3556 | * not considered in the calculation of whether the scenario | ||
| 3557 | * is asymmetric, then the group may fail to be guaranteed its | ||
| 3558 | * fair share of the throughput (basically because idling may | ||
| 3559 | * not be performed for the descendant processes of the group, | ||
| 3560 | * but it had to be). We address this issue with the | ||
| 3561 | * following bi-modal behavior, implemented in the function | ||
| 3534 | * bfq_symmetric_scenario(). | 3562 | * bfq_symmetric_scenario(). |
| 3535 | * | 3563 | * |
| 3536 | * If there are active groups, then the scenario is tagged as | 3564 | * If there are groups with requests waiting for completion |
| 3565 | * (as commented above, some of these groups may even be | ||
| 3566 | * already inactive), then the scenario is tagged as | ||
| 3537 | * asymmetric, conservatively, without checking any of the | 3567 | * asymmetric, conservatively, without checking any of the |
| 3538 | * conditions (i) and (ii). So the device is idled for bfqq. | 3568 | * conditions (i) and (ii). So the device is idled for bfqq. |
| 3539 | * This behavior matches also the fact that groups are created | 3569 | * This behavior matches also the fact that groups are created |
| 3540 | * exactly if controlling I/O (to preserve bandwidth and | 3570 | * exactly if controlling I/O is a primary concern (to |
| 3541 | * latency guarantees) is a primary concern. | 3571 | * preserve bandwidth and latency guarantees). |
| 3542 | * | 3572 | * |
| 3543 | * On the opposite end, if there are no active groups, then | 3573 | * On the opposite end, if there are no groups with requests |
| 3544 | * only condition (i) is actually controlled, i.e., provided | 3574 | * waiting for completion, then only condition (i) is actually |
| 3545 | * that condition (i) holds, idling is not performed, | 3575 | * controlled, i.e., provided that condition (i) holds, idling |
| 3546 | * regardless of whether condition (ii) holds. In other words, | 3576 | * is not performed, regardless of whether condition (ii) |
| 3547 | * only if condition (i) does not hold, then idling is | 3577 | * holds. In other words, only if condition (i) does not hold, |
| 3548 | * allowed, and the device tends to be prevented from queueing | 3578 | * then idling is allowed, and the device tends to be |
| 3549 | * many requests, possibly of several processes. Since there | 3579 | * prevented from queueing many requests, possibly of several |
| 3550 | * are no active groups, then, to control condition (i) it is | 3580 | * processes. Since there are no groups with requests waiting |
| 3551 | * enough to check whether all active queues have the same | 3581 | * for completion, then, to control condition (i) it is enough |
| 3552 | * weight. | 3582 | * to check just whether all the queues with requests waiting |
| 3583 | * for completion also have the same weight. | ||
| 3553 | * | 3584 | * |
| 3554 | * Not checking condition (ii) evidently exposes bfqq to the | 3585 | * Not checking condition (ii) evidently exposes bfqq to the |
| 3555 | * risk of getting less throughput than its fair share. | 3586 | * risk of getting less throughput than its fair share. |
| @@ -3607,10 +3638,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) | |||
| 3607 | * bfqq is weight-raised is checked explicitly here. More | 3638 | * bfqq is weight-raised is checked explicitly here. More |
| 3608 | * precisely, the compound condition below takes into account | 3639 | * precisely, the compound condition below takes into account |
| 3609 | * also the fact that, even if bfqq is being weight-raised, | 3640 | * also the fact that, even if bfqq is being weight-raised, |
| 3610 | * the scenario is still symmetric if all active queues happen | 3641 | * the scenario is still symmetric if all queues with requests |
| 3611 | * to be weight-raised. Actually, we should be even more | 3642 | * waiting for completion happen to be |
| 3612 | * precise here, and differentiate between interactive weight | 3643 | * weight-raised. Actually, we should be even more precise |
| 3613 | * raising and soft real-time weight raising. | 3644 | * here, and differentiate between interactive weight raising |
| 3645 | * and soft real-time weight raising. | ||
| 3614 | * | 3646 | * |
| 3615 | * As a side note, it is worth considering that the above | 3647 | * As a side note, it is worth considering that the above |
| 3616 | * device-idling countermeasures may however fail in the | 3648 | * device-idling countermeasures may however fail in the |
| @@ -5417,7 +5449,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) | |||
| 5417 | bfqd->idle_slice_timer.function = bfq_idle_slice_timer; | 5449 | bfqd->idle_slice_timer.function = bfq_idle_slice_timer; |
| 5418 | 5450 | ||
| 5419 | bfqd->queue_weights_tree = RB_ROOT; | 5451 | bfqd->queue_weights_tree = RB_ROOT; |
| 5420 | bfqd->num_active_groups = 0; | 5452 | bfqd->num_groups_with_pending_reqs = 0; |
| 5421 | 5453 | ||
| 5422 | INIT_LIST_HEAD(&bfqd->active_list); | 5454 | INIT_LIST_HEAD(&bfqd->active_list); |
| 5423 | INIT_LIST_HEAD(&bfqd->idle_list); | 5455 | INIT_LIST_HEAD(&bfqd->idle_list); |
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 77651d817ecd..0b02bf302de0 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h | |||
| @@ -196,6 +196,9 @@ struct bfq_entity { | |||
| 196 | 196 | ||
| 197 | /* flag, set to request a weight, ioprio or ioprio_class change */ | 197 | /* flag, set to request a weight, ioprio or ioprio_class change */ |
| 198 | int prio_changed; | 198 | int prio_changed; |
| 199 | |||
| 200 | /* flag, set if the entity is counted in groups_with_pending_reqs */ | ||
| 201 | bool in_groups_with_pending_reqs; | ||
| 199 | }; | 202 | }; |
| 200 | 203 | ||
| 201 | struct bfq_group; | 204 | struct bfq_group; |
| @@ -448,10 +451,54 @@ struct bfq_data { | |||
| 448 | * bfq_weights_tree_[add|remove] for further details). | 451 | * bfq_weights_tree_[add|remove] for further details). |
| 449 | */ | 452 | */ |
| 450 | struct rb_root queue_weights_tree; | 453 | struct rb_root queue_weights_tree; |
| 454 | |||
| 451 | /* | 455 | /* |
| 452 | * number of groups with requests still waiting for completion | 456 | * Number of groups with at least one descendant process that |
| 457 | * has at least one request waiting for completion. Note that | ||
| 458 | * this accounts for also requests already dispatched, but not | ||
| 459 | * yet completed. Therefore this number of groups may differ | ||
| 460 | * (be larger) than the number of active groups, as a group is | ||
| 461 | * considered active only if its corresponding entity has | ||
| 462 | * descendant queues with at least one request queued. This | ||
| 463 | * number is used to decide whether a scenario is symmetric. | ||
| 464 | * For a detailed explanation see comments on the computation | ||
| 465 | * of the variable asymmetric_scenario in the function | ||
| 466 | * bfq_better_to_idle(). | ||
| 467 | * | ||
| 468 | * However, it is hard to compute this number exactly, for | ||
| 469 | * groups with multiple descendant processes. Consider a group | ||
| 470 | * that is inactive, i.e., that has no descendant process with | ||
| 471 | * pending I/O inside BFQ queues. Then suppose that | ||
| 472 | * num_groups_with_pending_reqs is still accounting for this | ||
| 473 | * group, because the group has descendant processes with some | ||
| 474 | * I/O request still in flight. num_groups_with_pending_reqs | ||
| 475 | * should be decremented when the in-flight request of the | ||
| 476 | * last descendant process is finally completed (assuming that | ||
| 477 | * nothing else has changed for the group in the meantime, in | ||
| 478 | * terms of composition of the group and active/inactive state of child | ||
| 479 | * groups and processes). To accomplish this, an additional | ||
| 480 | * pending-request counter must be added to entities, and must | ||
| 481 | * be updated correctly. To avoid this additional field and operations, | ||
| 482 | * we resort to the following tradeoff between simplicity and | ||
| 483 | * accuracy: for an inactive group that is still counted in | ||
| 484 | * num_groups_with_pending_reqs, we decrement | ||
| 485 | * num_groups_with_pending_reqs when the first descendant | ||
| 486 | * process of the group remains with no request waiting for | ||
| 487 | * completion. | ||
| 488 | * | ||
| 489 | * Even this simpler decrement strategy requires a little | ||
| 490 | * carefulness: to avoid multiple decrements, we flag a group, | ||
| 491 | * more precisely an entity representing a group, as still | ||
| 492 | * counted in num_groups_with_pending_reqs when it becomes | ||
| 493 | * inactive. Then, when the first descendant queue of the | ||
| 494 | * entity remains with no request waiting for completion, | ||
| 495 | * num_groups_with_pending_reqs is decremented, and this flag | ||
| 496 | * is reset. After this flag is reset for the entity, | ||
| 497 | * num_groups_with_pending_reqs won't be decremented any | ||
| 498 | * longer in case a new descendant queue of the entity remains | ||
| 499 | * with no request waiting for completion. | ||
| 453 | */ | 500 | */ |
| 454 | unsigned int num_active_groups; | 501 | unsigned int num_groups_with_pending_reqs; |
| 455 | 502 | ||
| 456 | /* | 503 | /* |
| 457 | * Number of bfq_queues containing requests (including the | 504 | * Number of bfq_queues containing requests (including the |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 4b0d5fb69160..63e0f12be7c9 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
| @@ -1012,7 +1012,10 @@ static void __bfq_activate_entity(struct bfq_entity *entity, | |||
| 1012 | container_of(entity, struct bfq_group, entity); | 1012 | container_of(entity, struct bfq_group, entity); |
| 1013 | struct bfq_data *bfqd = bfqg->bfqd; | 1013 | struct bfq_data *bfqd = bfqg->bfqd; |
| 1014 | 1014 | ||
| 1015 | bfqd->num_active_groups++; | 1015 | if (!entity->in_groups_with_pending_reqs) { |
| 1016 | entity->in_groups_with_pending_reqs = true; | ||
| 1017 | bfqd->num_groups_with_pending_reqs++; | ||
| 1018 | } | ||
| 1016 | } | 1019 | } |
| 1017 | #endif | 1020 | #endif |
| 1018 | 1021 | ||
diff --git a/block/bio.c b/block/bio.c index 4f4d9884443b..4d86e90654b2 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -1261,7 +1261,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 1261 | if (ret) | 1261 | if (ret) |
| 1262 | goto cleanup; | 1262 | goto cleanup; |
| 1263 | } else { | 1263 | } else { |
| 1264 | zero_fill_bio(bio); | 1264 | if (bmd->is_our_pages) |
| 1265 | zero_fill_bio(bio); | ||
| 1265 | iov_iter_advance(iter, bio->bi_iter.bi_size); | 1266 | iov_iter_advance(iter, bio->bi_iter.bi_size); |
| 1266 | } | 1267 | } |
| 1267 | 1268 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3f91c6e5b17a..6a7566244de3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1764,7 +1764,7 @@ insert: | |||
| 1764 | if (bypass_insert) | 1764 | if (bypass_insert) |
| 1765 | return BLK_STS_RESOURCE; | 1765 | return BLK_STS_RESOURCE; |
| 1766 | 1766 | ||
| 1767 | blk_mq_sched_insert_request(rq, false, run_queue, false); | 1767 | blk_mq_request_bypass_insert(rq, run_queue); |
| 1768 | return BLK_STS_OK; | 1768 | return BLK_STS_OK; |
| 1769 | } | 1769 | } |
| 1770 | 1770 | ||
| @@ -1780,7 +1780,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | |||
| 1780 | 1780 | ||
| 1781 | ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); | 1781 | ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); |
| 1782 | if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) | 1782 | if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) |
| 1783 | blk_mq_sched_insert_request(rq, false, true, false); | 1783 | blk_mq_request_bypass_insert(rq, true); |
| 1784 | else if (ret != BLK_STS_OK) | 1784 | else if (ret != BLK_STS_OK) |
| 1785 | blk_mq_end_request(rq, ret); | 1785 | blk_mq_end_request(rq, ret); |
| 1786 | 1786 | ||
| @@ -1815,7 +1815,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | |||
| 1815 | if (ret != BLK_STS_OK) { | 1815 | if (ret != BLK_STS_OK) { |
| 1816 | if (ret == BLK_STS_RESOURCE || | 1816 | if (ret == BLK_STS_RESOURCE || |
| 1817 | ret == BLK_STS_DEV_RESOURCE) { | 1817 | ret == BLK_STS_DEV_RESOURCE) { |
| 1818 | list_add(&rq->queuelist, list); | 1818 | blk_mq_request_bypass_insert(rq, |
| 1819 | list_empty(list)); | ||
| 1819 | break; | 1820 | break; |
| 1820 | } | 1821 | } |
| 1821 | blk_mq_end_request(rq, ret); | 1822 | blk_mq_end_request(rq, ret); |
diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 13ba2011a306..a327bef07642 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c | |||
| @@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones) | |||
| 378 | struct page *page; | 378 | struct page *page; |
| 379 | int order; | 379 | int order; |
| 380 | 380 | ||
| 381 | for (order = get_order(size); order > 0; order--) { | 381 | for (order = get_order(size); order >= 0; order--) { |
| 382 | page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order); | 382 | page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order); |
| 383 | if (page) { | 383 | if (page) { |
| 384 | *nr_zones = min_t(unsigned int, *nr_zones, | 384 | *nr_zones = min_t(unsigned int, *nr_zones, |
diff --git a/crypto/Kconfig b/crypto/Kconfig index f7a235db56aa..05c91eb10ca1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -1812,7 +1812,7 @@ config CRYPTO_USER_API_AEAD | |||
| 1812 | cipher algorithms. | 1812 | cipher algorithms. |
| 1813 | 1813 | ||
| 1814 | config CRYPTO_STATS | 1814 | config CRYPTO_STATS |
| 1815 | bool "Crypto usage statistics for User-space" | 1815 | bool |
| 1816 | help | 1816 | help |
| 1817 | This option enables the gathering of crypto stats. | 1817 | This option enables the gathering of crypto stats. |
| 1818 | This will collect: | 1818 | This will collect: |
diff --git a/crypto/cbc.c b/crypto/cbc.c index b761b1f9c6ca..dd5f332fd566 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c | |||
| @@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 140 | spawn = skcipher_instance_ctx(inst); | 140 | spawn = skcipher_instance_ctx(inst); |
| 141 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), | 141 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), |
| 142 | CRYPTO_ALG_TYPE_MASK); | 142 | CRYPTO_ALG_TYPE_MASK); |
| 143 | crypto_mod_put(alg); | ||
| 144 | if (err) | 143 | if (err) |
| 145 | goto err_free_inst; | 144 | goto err_put_alg; |
| 146 | 145 | ||
| 147 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); | 146 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); |
| 148 | if (err) | 147 | if (err) |
| @@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 174 | err = skcipher_register_instance(tmpl, inst); | 173 | err = skcipher_register_instance(tmpl, inst); |
| 175 | if (err) | 174 | if (err) |
| 176 | goto err_drop_spawn; | 175 | goto err_drop_spawn; |
| 176 | crypto_mod_put(alg); | ||
| 177 | 177 | ||
| 178 | out: | 178 | out: |
| 179 | return err; | 179 | return err; |
| 180 | 180 | ||
| 181 | err_drop_spawn: | 181 | err_drop_spawn: |
| 182 | crypto_drop_spawn(spawn); | 182 | crypto_drop_spawn(spawn); |
| 183 | err_put_alg: | ||
| 184 | crypto_mod_put(alg); | ||
| 183 | err_free_inst: | 185 | err_free_inst: |
| 184 | kfree(inst); | 186 | kfree(inst); |
| 185 | goto out; | 187 | goto out; |
diff --git a/crypto/cfb.c b/crypto/cfb.c index a0d68c09e1b9..20987d0e09d8 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c | |||
| @@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 286 | spawn = skcipher_instance_ctx(inst); | 286 | spawn = skcipher_instance_ctx(inst); |
| 287 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), | 287 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), |
| 288 | CRYPTO_ALG_TYPE_MASK); | 288 | CRYPTO_ALG_TYPE_MASK); |
| 289 | crypto_mod_put(alg); | ||
| 290 | if (err) | 289 | if (err) |
| 291 | goto err_free_inst; | 290 | goto err_put_alg; |
| 292 | 291 | ||
| 293 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); | 292 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); |
| 294 | if (err) | 293 | if (err) |
| @@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 317 | err = skcipher_register_instance(tmpl, inst); | 316 | err = skcipher_register_instance(tmpl, inst); |
| 318 | if (err) | 317 | if (err) |
| 319 | goto err_drop_spawn; | 318 | goto err_drop_spawn; |
| 319 | crypto_mod_put(alg); | ||
| 320 | 320 | ||
| 321 | out: | 321 | out: |
| 322 | return err; | 322 | return err; |
| 323 | 323 | ||
| 324 | err_drop_spawn: | 324 | err_drop_spawn: |
| 325 | crypto_drop_spawn(spawn); | 325 | crypto_drop_spawn(spawn); |
| 326 | err_put_alg: | ||
| 327 | crypto_mod_put(alg); | ||
| 326 | err_free_inst: | 328 | err_free_inst: |
| 327 | kfree(inst); | 329 | kfree(inst); |
| 328 | goto out; | 330 | goto out; |
diff --git a/crypto/pcbc.c b/crypto/pcbc.c index ef802f6e9642..8aa10144407c 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c | |||
| @@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 244 | spawn = skcipher_instance_ctx(inst); | 244 | spawn = skcipher_instance_ctx(inst); |
| 245 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), | 245 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), |
| 246 | CRYPTO_ALG_TYPE_MASK); | 246 | CRYPTO_ALG_TYPE_MASK); |
| 247 | crypto_mod_put(alg); | ||
| 248 | if (err) | 247 | if (err) |
| 249 | goto err_free_inst; | 248 | goto err_put_alg; |
| 250 | 249 | ||
| 251 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); | 250 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); |
| 252 | if (err) | 251 | if (err) |
| @@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 275 | err = skcipher_register_instance(tmpl, inst); | 274 | err = skcipher_register_instance(tmpl, inst); |
| 276 | if (err) | 275 | if (err) |
| 277 | goto err_drop_spawn; | 276 | goto err_drop_spawn; |
| 277 | crypto_mod_put(alg); | ||
| 278 | 278 | ||
| 279 | out: | 279 | out: |
| 280 | return err; | 280 | return err; |
| 281 | 281 | ||
| 282 | err_drop_spawn: | 282 | err_drop_spawn: |
| 283 | crypto_drop_spawn(spawn); | 283 | crypto_drop_spawn(spawn); |
| 284 | err_put_alg: | ||
| 285 | crypto_mod_put(alg); | ||
| 284 | err_free_inst: | 286 | err_free_inst: |
| 285 | kfree(inst); | 287 | kfree(inst); |
| 286 | goto out; | 288 | goto out; |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 14d9f5bea015..5912d30020c7 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
| @@ -1308,7 +1308,7 @@ static ssize_t scrub_store(struct device *dev, | |||
| 1308 | if (nd_desc) { | 1308 | if (nd_desc) { |
| 1309 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 1309 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1310 | 1310 | ||
| 1311 | rc = acpi_nfit_ars_rescan(acpi_desc, 0); | 1311 | rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); |
| 1312 | } | 1312 | } |
| 1313 | device_unlock(dev); | 1313 | device_unlock(dev); |
| 1314 | if (rc) | 1314 | if (rc) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a7f5202a4815..b8c3f9e6af89 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4602 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4602 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4603 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4603 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4604 | { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4604 | { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4605 | { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4605 | { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4606 | { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4606 | 4607 | ||
| 4607 | /* | 4608 | /* |
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c index ad8d483a35cd..ca7d37e2c7be 100644 --- a/drivers/clk/mmp/clk.c +++ b/drivers/clk/mmp/clk.c | |||
| @@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id, | |||
| 183 | pr_err("CLK %d has invalid pointer %p\n", id, clk); | 183 | pr_err("CLK %d has invalid pointer %p\n", id, clk); |
| 184 | return; | 184 | return; |
| 185 | } | 185 | } |
| 186 | if (id > unit->nr_clks) { | 186 | if (id >= unit->nr_clks) { |
| 187 | pr_err("CLK %d is invalid\n", id); | 187 | pr_err("CLK %d is invalid\n", id); |
| 188 | return; | 188 | return; |
| 189 | } | 189 | } |
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 9781b1bf5998..9235a331b588 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c | |||
| @@ -200,11 +200,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec, | |||
| 200 | unsigned int idx = clkspec->args[1]; | 200 | unsigned int idx = clkspec->args[1]; |
| 201 | 201 | ||
| 202 | if (type == CP110_CLK_TYPE_CORE) { | 202 | if (type == CP110_CLK_TYPE_CORE) { |
| 203 | if (idx > CP110_MAX_CORE_CLOCKS) | 203 | if (idx >= CP110_MAX_CORE_CLOCKS) |
| 204 | return ERR_PTR(-EINVAL); | 204 | return ERR_PTR(-EINVAL); |
| 205 | return clk_data->hws[idx]; | 205 | return clk_data->hws[idx]; |
| 206 | } else if (type == CP110_CLK_TYPE_GATABLE) { | 206 | } else if (type == CP110_CLK_TYPE_GATABLE) { |
| 207 | if (idx > CP110_MAX_GATABLE_CLOCKS) | 207 | if (idx >= CP110_MAX_GATABLE_CLOCKS) |
| 208 | return ERR_PTR(-EINVAL); | 208 | return ERR_PTR(-EINVAL); |
| 209 | return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx]; | 209 | return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx]; |
| 210 | } | 210 | } |
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index db9b2471ac40..0a48ed56833b 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c | |||
| @@ -191,6 +191,22 @@ int qcom_cc_register_sleep_clk(struct device *dev) | |||
| 191 | } | 191 | } |
| 192 | EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk); | 192 | EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk); |
| 193 | 193 | ||
| 194 | /* Drop 'protected-clocks' from the list of clocks to register */ | ||
| 195 | static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc) | ||
| 196 | { | ||
| 197 | struct device_node *np = dev->of_node; | ||
| 198 | struct property *prop; | ||
| 199 | const __be32 *p; | ||
| 200 | u32 i; | ||
| 201 | |||
| 202 | of_property_for_each_u32(np, "protected-clocks", prop, p, i) { | ||
| 203 | if (i >= cc->num_rclks) | ||
| 204 | continue; | ||
| 205 | |||
| 206 | cc->rclks[i] = NULL; | ||
| 207 | } | ||
| 208 | } | ||
| 209 | |||
| 194 | static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec, | 210 | static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec, |
| 195 | void *data) | 211 | void *data) |
| 196 | { | 212 | { |
| @@ -251,6 +267,8 @@ int qcom_cc_really_probe(struct platform_device *pdev, | |||
| 251 | cc->rclks = rclks; | 267 | cc->rclks = rclks; |
| 252 | cc->num_rclks = num_clks; | 268 | cc->num_rclks = num_clks; |
| 253 | 269 | ||
| 270 | qcom_cc_drop_protected(dev, cc); | ||
| 271 | |||
| 254 | for (i = 0; i < num_clks; i++) { | 272 | for (i = 0; i < num_clks; i++) { |
| 255 | if (!rclks[i]) | 273 | if (!rclks[i]) |
| 256 | continue; | 274 | continue; |
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index ef1b267cb058..64da032bb9ed 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c | |||
| @@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = { | |||
| 297 | .hw.init = &(struct clk_init_data){ | 297 | .hw.init = &(struct clk_init_data){ |
| 298 | .name = "gpll0_out_main", | 298 | .name = "gpll0_out_main", |
| 299 | .parent_names = (const char *[]) | 299 | .parent_names = (const char *[]) |
| 300 | { "gpll0_sleep_clk_src" }, | 300 | { "cxo" }, |
| 301 | .num_parents = 1, | 301 | .num_parents = 1, |
| 302 | .ops = &clk_alpha_pll_ops, | 302 | .ops = &clk_alpha_pll_ops, |
| 303 | }, | 303 | }, |
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index 9d7d297f0ea8..f65cc0ff76ab 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c | |||
| @@ -128,7 +128,7 @@ static const struct zynqmp_eemi_ops *eemi_ops; | |||
| 128 | */ | 128 | */ |
| 129 | static inline int zynqmp_is_valid_clock(u32 clk_id) | 129 | static inline int zynqmp_is_valid_clock(u32 clk_id) |
| 130 | { | 130 | { |
| 131 | if (clk_id > clock_max_idx) | 131 | if (clk_id >= clock_max_idx) |
| 132 | return -ENODEV; | 132 | return -ENODEV; |
| 133 | 133 | ||
| 134 | return clock[clk_id].valid; | 134 | return clock[clk_id].valid; |
| @@ -279,6 +279,9 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id, | |||
| 279 | qdata.arg1 = clk_id; | 279 | qdata.arg1 = clk_id; |
| 280 | 280 | ||
| 281 | ret = eemi_ops->query_data(qdata, ret_payload); | 281 | ret = eemi_ops->query_data(qdata, ret_payload); |
| 282 | if (ret) | ||
| 283 | return ERR_PTR(ret); | ||
| 284 | |||
| 282 | mult = ret_payload[1]; | 285 | mult = ret_payload[1]; |
| 283 | div = ret_payload[2]; | 286 | div = ret_payload[2]; |
| 284 | 287 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index d0c3e50b39fb..1fc488e90f36 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
| @@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
| 1059 | /* | 1059 | /* |
| 1060 | * Program FIFO size of channels. | 1060 | * Program FIFO size of channels. |
| 1061 | * | 1061 | * |
| 1062 | * By default full FIFO (1024 bytes) is assigned to channel 0. Here we | 1062 | * By default full FIFO (512 bytes) is assigned to channel 0. Here we |
| 1063 | * slice FIFO on equal parts between channels. | 1063 | * slice FIFO on equal parts between channels. |
| 1064 | */ | 1064 | */ |
| 1065 | static void idma32_fifo_partition(struct dw_dma *dw) | 1065 | static void idma32_fifo_partition(struct dw_dma *dw) |
| 1066 | { | 1066 | { |
| 1067 | u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) | | 1067 | u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | |
| 1068 | IDMA32C_FP_UPDATE; | 1068 | IDMA32C_FP_UPDATE; |
| 1069 | u64 fifo_partition = 0; | 1069 | u64 fifo_partition = 0; |
| 1070 | 1070 | ||
| @@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw) | |||
| 1077 | /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ | 1077 | /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ |
| 1078 | fifo_partition |= value << 32; | 1078 | fifo_partition |= value << 32; |
| 1079 | 1079 | ||
| 1080 | /* Program FIFO Partition registers - 128 bytes for each channel */ | 1080 | /* Program FIFO Partition registers - 64 bytes per channel */ |
| 1081 | idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); | 1081 | idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); |
| 1082 | idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); | 1082 | idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); |
| 1083 | } | 1083 | } |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e661..cb1b44d78a1f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
| 26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
| 27 | #include <linux/dmapool.h> | ||
| 28 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
| 29 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 30 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
| @@ -33,6 +32,7 @@ | |||
| 33 | #include <linux/of_address.h> | 32 | #include <linux/of_address.h> |
| 34 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
| 35 | #include <linux/of_dma.h> | 34 | #include <linux/of_dma.h> |
| 35 | #include <linux/workqueue.h> | ||
| 36 | 36 | ||
| 37 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
| 38 | #include <linux/platform_data/dma-imx-sdma.h> | 38 | #include <linux/platform_data/dma-imx-sdma.h> |
| @@ -376,7 +376,7 @@ struct sdma_channel { | |||
| 376 | u32 shp_addr, per_addr; | 376 | u32 shp_addr, per_addr; |
| 377 | enum dma_status status; | 377 | enum dma_status status; |
| 378 | struct imx_dma_data data; | 378 | struct imx_dma_data data; |
| 379 | struct dma_pool *bd_pool; | 379 | struct work_struct terminate_worker; |
| 380 | }; | 380 | }; |
| 381 | 381 | ||
| 382 | #define IMX_DMA_SG_LOOP BIT(0) | 382 | #define IMX_DMA_SG_LOOP BIT(0) |
| @@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan) | |||
| 1027 | 1027 | ||
| 1028 | return 0; | 1028 | return 0; |
| 1029 | } | 1029 | } |
| 1030 | 1030 | static void sdma_channel_terminate_work(struct work_struct *work) | |
| 1031 | static int sdma_disable_channel_with_delay(struct dma_chan *chan) | ||
| 1032 | { | 1031 | { |
| 1033 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1032 | struct sdma_channel *sdmac = container_of(work, struct sdma_channel, |
| 1033 | terminate_worker); | ||
| 1034 | unsigned long flags; | 1034 | unsigned long flags; |
| 1035 | LIST_HEAD(head); | 1035 | LIST_HEAD(head); |
| 1036 | 1036 | ||
| 1037 | sdma_disable_channel(chan); | ||
| 1038 | spin_lock_irqsave(&sdmac->vc.lock, flags); | ||
| 1039 | vchan_get_all_descriptors(&sdmac->vc, &head); | ||
| 1040 | sdmac->desc = NULL; | ||
| 1041 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); | ||
| 1042 | vchan_dma_desc_free_list(&sdmac->vc, &head); | ||
| 1043 | |||
| 1044 | /* | 1037 | /* |
| 1045 | * According to NXP R&D team a delay of one BD SDMA cost time | 1038 | * According to NXP R&D team a delay of one BD SDMA cost time |
| 1046 | * (maximum is 1ms) should be added after disable of the channel | 1039 | * (maximum is 1ms) should be added after disable of the channel |
| 1047 | * bit, to ensure SDMA core has really been stopped after SDMA | 1040 | * bit, to ensure SDMA core has really been stopped after SDMA |
| 1048 | * clients call .device_terminate_all. | 1041 | * clients call .device_terminate_all. |
| 1049 | */ | 1042 | */ |
| 1050 | mdelay(1); | 1043 | usleep_range(1000, 2000); |
| 1044 | |||
| 1045 | spin_lock_irqsave(&sdmac->vc.lock, flags); | ||
| 1046 | vchan_get_all_descriptors(&sdmac->vc, &head); | ||
| 1047 | sdmac->desc = NULL; | ||
| 1048 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); | ||
| 1049 | vchan_dma_desc_free_list(&sdmac->vc, &head); | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | static int sdma_disable_channel_async(struct dma_chan *chan) | ||
| 1053 | { | ||
| 1054 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 1055 | |||
| 1056 | sdma_disable_channel(chan); | ||
| 1057 | |||
| 1058 | if (sdmac->desc) | ||
| 1059 | schedule_work(&sdmac->terminate_worker); | ||
| 1051 | 1060 | ||
| 1052 | return 0; | 1061 | return 0; |
| 1053 | } | 1062 | } |
| 1054 | 1063 | ||
| 1064 | static void sdma_channel_synchronize(struct dma_chan *chan) | ||
| 1065 | { | ||
| 1066 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 1067 | |||
| 1068 | vchan_synchronize(&sdmac->vc); | ||
| 1069 | |||
| 1070 | flush_work(&sdmac->terminate_worker); | ||
| 1071 | } | ||
| 1072 | |||
| 1055 | static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) | 1073 | static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) |
| 1056 | { | 1074 | { |
| 1057 | struct sdma_engine *sdma = sdmac->sdma; | 1075 | struct sdma_engine *sdma = sdmac->sdma; |
| @@ -1192,10 +1210,11 @@ out: | |||
| 1192 | 1210 | ||
| 1193 | static int sdma_alloc_bd(struct sdma_desc *desc) | 1211 | static int sdma_alloc_bd(struct sdma_desc *desc) |
| 1194 | { | 1212 | { |
| 1213 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | ||
| 1195 | int ret = 0; | 1214 | int ret = 0; |
| 1196 | 1215 | ||
| 1197 | desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT, | 1216 | desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, |
| 1198 | &desc->bd_phys); | 1217 | GFP_NOWAIT); |
| 1199 | if (!desc->bd) { | 1218 | if (!desc->bd) { |
| 1200 | ret = -ENOMEM; | 1219 | ret = -ENOMEM; |
| 1201 | goto out; | 1220 | goto out; |
| @@ -1206,7 +1225,9 @@ out: | |||
| 1206 | 1225 | ||
| 1207 | static void sdma_free_bd(struct sdma_desc *desc) | 1226 | static void sdma_free_bd(struct sdma_desc *desc) |
| 1208 | { | 1227 | { |
| 1209 | dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys); | 1228 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
| 1229 | |||
| 1230 | dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); | ||
| 1210 | } | 1231 | } |
| 1211 | 1232 | ||
| 1212 | static void sdma_desc_free(struct virt_dma_desc *vd) | 1233 | static void sdma_desc_free(struct virt_dma_desc *vd) |
| @@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
| 1272 | if (ret) | 1293 | if (ret) |
| 1273 | goto disable_clk_ahb; | 1294 | goto disable_clk_ahb; |
| 1274 | 1295 | ||
| 1275 | sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev, | ||
| 1276 | sizeof(struct sdma_buffer_descriptor), | ||
| 1277 | 32, 0); | ||
| 1278 | |||
| 1279 | return 0; | 1296 | return 0; |
| 1280 | 1297 | ||
| 1281 | disable_clk_ahb: | 1298 | disable_clk_ahb: |
| @@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
| 1290 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1307 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
| 1291 | struct sdma_engine *sdma = sdmac->sdma; | 1308 | struct sdma_engine *sdma = sdmac->sdma; |
| 1292 | 1309 | ||
| 1293 | sdma_disable_channel_with_delay(chan); | 1310 | sdma_disable_channel_async(chan); |
| 1311 | |||
| 1312 | sdma_channel_synchronize(chan); | ||
| 1294 | 1313 | ||
| 1295 | if (sdmac->event_id0) | 1314 | if (sdmac->event_id0) |
| 1296 | sdma_event_disable(sdmac, sdmac->event_id0); | 1315 | sdma_event_disable(sdmac, sdmac->event_id0); |
| @@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
| 1304 | 1323 | ||
| 1305 | clk_disable(sdma->clk_ipg); | 1324 | clk_disable(sdma->clk_ipg); |
| 1306 | clk_disable(sdma->clk_ahb); | 1325 | clk_disable(sdma->clk_ahb); |
| 1307 | |||
| 1308 | dma_pool_destroy(sdmac->bd_pool); | ||
| 1309 | sdmac->bd_pool = NULL; | ||
| 1310 | } | 1326 | } |
| 1311 | 1327 | ||
| 1312 | static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, | 1328 | static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, |
| @@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev) | |||
| 1999 | 2015 | ||
| 2000 | sdmac->channel = i; | 2016 | sdmac->channel = i; |
| 2001 | sdmac->vc.desc_free = sdma_desc_free; | 2017 | sdmac->vc.desc_free = sdma_desc_free; |
| 2018 | INIT_WORK(&sdmac->terminate_worker, | ||
| 2019 | sdma_channel_terminate_work); | ||
| 2002 | /* | 2020 | /* |
| 2003 | * Add the channel to the DMAC list. Do not add channel 0 though | 2021 | * Add the channel to the DMAC list. Do not add channel 0 though |
| 2004 | * because we need it internally in the SDMA driver. This also means | 2022 | * because we need it internally in the SDMA driver. This also means |
| @@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev) | |||
| 2050 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; | 2068 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; |
| 2051 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 2069 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
| 2052 | sdma->dma_device.device_config = sdma_config; | 2070 | sdma->dma_device.device_config = sdma_config; |
| 2053 | sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; | 2071 | sdma->dma_device.device_terminate_all = sdma_disable_channel_async; |
| 2072 | sdma->dma_device.device_synchronize = sdma_channel_synchronize; | ||
| 2054 | sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; | 2073 | sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; |
| 2055 | sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; | 2074 | sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; |
| 2056 | sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; | 2075 | sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; |
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 1497da367710..e507ec36c0d3 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c | |||
| @@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan) | |||
| 723 | 723 | ||
| 724 | desc_phys = lower_32_bits(c->desc_phys); | 724 | desc_phys = lower_32_bits(c->desc_phys); |
| 725 | desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); | 725 | desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); |
| 726 | if (!cdd->chan_busy[desc_num]) | 726 | if (!cdd->chan_busy[desc_num]) { |
| 727 | struct cppi41_channel *cc, *_ct; | ||
| 728 | |||
| 729 | /* | ||
| 730 | * channels might still be in the pendling list if | ||
| 731 | * cppi41_dma_issue_pending() is called after | ||
| 732 | * cppi41_runtime_suspend() is called | ||
| 733 | */ | ||
| 734 | list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { | ||
| 735 | if (cc != c) | ||
| 736 | continue; | ||
| 737 | list_del(&cc->node); | ||
| 738 | break; | ||
| 739 | } | ||
| 727 | return 0; | 740 | return 0; |
| 741 | } | ||
| 728 | 742 | ||
| 729 | ret = cppi41_tear_down_chan(c); | 743 | ret = cppi41_tear_down_chan(c); |
| 730 | if (ret) | 744 | if (ret) |
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index 71d014edd167..2c22836d3ffd 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c | |||
| @@ -168,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active) | |||
| 168 | else | 168 | else |
| 169 | timeout = SIRF_HIBERNATE_TIMEOUT; | 169 | timeout = SIRF_HIBERNATE_TIMEOUT; |
| 170 | 170 | ||
| 171 | while (retries-- > 0) { | 171 | do { |
| 172 | sirf_pulse_on_off(data); | 172 | sirf_pulse_on_off(data); |
| 173 | ret = sirf_wait_for_power_state(data, active, timeout); | 173 | ret = sirf_wait_for_power_state(data, active, timeout); |
| 174 | if (ret < 0) { | 174 | if (ret < 0) { |
| @@ -179,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active) | |||
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | break; | 181 | break; |
| 182 | } | 182 | } while (retries--); |
| 183 | 183 | ||
| 184 | if (retries == 0) | 184 | if (retries < 0) |
| 185 | return -ETIMEDOUT; | 185 | return -ETIMEDOUT; |
| 186 | 186 | ||
| 187 | return 0; | 187 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 104b2e0d893b..b0fc116296cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -233,7 +233,7 @@ enum amdgpu_kiq_irq { | |||
| 233 | 233 | ||
| 234 | #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ | 234 | #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ |
| 235 | #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ | 235 | #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ |
| 236 | #define MAX_KIQ_REG_TRY 20 | 236 | #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ |
| 237 | 237 | ||
| 238 | int amdgpu_device_ip_set_clockgating_state(void *dev, | 238 | int amdgpu_device_ip_set_clockgating_state(void *dev, |
| 239 | enum amd_ip_block_type block_type, | 239 | enum amd_ip_block_type block_type, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 8816c697b205..387f1cf1dc20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
| 330 | case CHIP_TOPAZ: | 330 | case CHIP_TOPAZ: |
| 331 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || | 331 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || |
| 332 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || | 332 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || |
| 333 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { | 333 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) || |
| 334 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) || | ||
| 335 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) { | ||
| 334 | info->is_kicker = true; | 336 | info->is_kicker = true; |
| 335 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); | 337 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); |
| 336 | } else | 338 | } else |
| @@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
| 351 | if (type == CGS_UCODE_ID_SMU) { | 353 | if (type == CGS_UCODE_ID_SMU) { |
| 352 | if (((adev->pdev->device == 0x67ef) && | 354 | if (((adev->pdev->device == 0x67ef) && |
| 353 | ((adev->pdev->revision == 0xe0) || | 355 | ((adev->pdev->revision == 0xe0) || |
| 354 | (adev->pdev->revision == 0xe2) || | ||
| 355 | (adev->pdev->revision == 0xe5))) || | 356 | (adev->pdev->revision == 0xe5))) || |
| 356 | ((adev->pdev->device == 0x67ff) && | 357 | ((adev->pdev->device == 0x67ff) && |
| 357 | ((adev->pdev->revision == 0xcf) || | 358 | ((adev->pdev->revision == 0xcf) || |
| @@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
| 359 | (adev->pdev->revision == 0xff)))) { | 360 | (adev->pdev->revision == 0xff)))) { |
| 360 | info->is_kicker = true; | 361 | info->is_kicker = true; |
| 361 | strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); | 362 | strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); |
| 362 | } else | 363 | } else if ((adev->pdev->device == 0x67ef) && |
| 364 | (adev->pdev->revision == 0xe2)) { | ||
| 365 | info->is_kicker = true; | ||
| 366 | strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); | ||
| 367 | } else { | ||
| 363 | strcpy(fw_name, "amdgpu/polaris11_smc.bin"); | 368 | strcpy(fw_name, "amdgpu/polaris11_smc.bin"); |
| 369 | } | ||
| 364 | } else if (type == CGS_UCODE_ID_SMU_SK) { | 370 | } else if (type == CGS_UCODE_ID_SMU_SK) { |
| 365 | strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); | 371 | strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); |
| 366 | } | 372 | } |
| @@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
| 375 | (adev->pdev->revision == 0xe7) || | 381 | (adev->pdev->revision == 0xe7) || |
| 376 | (adev->pdev->revision == 0xef))) || | 382 | (adev->pdev->revision == 0xef))) || |
| 377 | ((adev->pdev->device == 0x6fdf) && | 383 | ((adev->pdev->device == 0x6fdf) && |
| 378 | (adev->pdev->revision == 0xef))) { | 384 | ((adev->pdev->revision == 0xef) || |
| 385 | (adev->pdev->revision == 0xff)))) { | ||
| 379 | info->is_kicker = true; | 386 | info->is_kicker = true; |
| 380 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); | 387 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); |
| 381 | } else | 388 | } else if ((adev->pdev->device == 0x67df) && |
| 389 | ((adev->pdev->revision == 0xe1) || | ||
| 390 | (adev->pdev->revision == 0xf7))) { | ||
| 391 | info->is_kicker = true; | ||
| 392 | strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); | ||
| 393 | } else { | ||
| 382 | strcpy(fw_name, "amdgpu/polaris10_smc.bin"); | 394 | strcpy(fw_name, "amdgpu/polaris10_smc.bin"); |
| 395 | } | ||
| 383 | } else if (type == CGS_UCODE_ID_SMU_SK) { | 396 | } else if (type == CGS_UCODE_ID_SMU_SK) { |
| 384 | strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); | 397 | strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); |
| 385 | } | 398 | } |
| 386 | break; | 399 | break; |
| 387 | case CHIP_POLARIS12: | 400 | case CHIP_POLARIS12: |
| 388 | strcpy(fw_name, "amdgpu/polaris12_smc.bin"); | 401 | if (((adev->pdev->device == 0x6987) && |
| 402 | ((adev->pdev->revision == 0xc0) || | ||
| 403 | (adev->pdev->revision == 0xc3))) || | ||
| 404 | ((adev->pdev->device == 0x6981) && | ||
| 405 | ((adev->pdev->revision == 0x00) || | ||
| 406 | (adev->pdev->revision == 0x01) || | ||
| 407 | (adev->pdev->revision == 0x10)))) { | ||
| 408 | info->is_kicker = true; | ||
| 409 | strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); | ||
| 410 | } else { | ||
| 411 | strcpy(fw_name, "amdgpu/polaris12_smc.bin"); | ||
| 412 | } | ||
| 389 | break; | 413 | break; |
| 390 | case CHIP_VEGAM: | 414 | case CHIP_VEGAM: |
| 391 | strcpy(fw_name, "amdgpu/vegam_smc.bin"); | 415 | strcpy(fw_name, "amdgpu/vegam_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 663043c8f0f5..0acc8dee2cb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs | |||
| 124 | goto free_chunk; | 124 | goto free_chunk; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | mutex_lock(&p->ctx->lock); | ||
| 128 | |||
| 127 | /* skip guilty context job */ | 129 | /* skip guilty context job */ |
| 128 | if (atomic_read(&p->ctx->guilty) == 1) { | 130 | if (atomic_read(&p->ctx->guilty) == 1) { |
| 129 | ret = -ECANCELED; | 131 | ret = -ECANCELED; |
| 130 | goto free_chunk; | 132 | goto free_chunk; |
| 131 | } | 133 | } |
| 132 | 134 | ||
| 133 | mutex_lock(&p->ctx->lock); | ||
| 134 | |||
| 135 | /* get chunks */ | 135 | /* get chunks */ |
| 136 | chunk_array_user = u64_to_user_ptr(cs->in.chunks); | 136 | chunk_array_user = u64_to_user_ptr(cs->in.chunks); |
| 137 | if (copy_from_user(chunk_array, chunk_array_user, | 137 | if (copy_from_user(chunk_array, chunk_array_user, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f9b54236102d..95f4c4139fc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
| @@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { | |||
| 39 | [AMDGPU_HW_IP_UVD_ENC] = 1, | 39 | [AMDGPU_HW_IP_UVD_ENC] = 1, |
| 40 | [AMDGPU_HW_IP_VCN_DEC] = 1, | 40 | [AMDGPU_HW_IP_VCN_DEC] = 1, |
| 41 | [AMDGPU_HW_IP_VCN_ENC] = 1, | 41 | [AMDGPU_HW_IP_VCN_ENC] = 1, |
| 42 | [AMDGPU_HW_IP_VCN_JPEG] = 1, | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | static int amdgput_ctx_total_num_entities(void) | 45 | static int amdgput_ctx_total_num_entities(void) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8de55f7f1a3a..74b611e8a1b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = { | |||
| 872 | {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 872 | {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
| 873 | {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 873 | {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
| 874 | {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 874 | {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
| 875 | {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | ||
| 876 | {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | ||
| 877 | {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | ||
| 875 | {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 878 | {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
| 879 | {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | ||
| 880 | {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | ||
| 881 | {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | ||
| 876 | {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 882 | {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
| 877 | /* Vega 12 */ | 883 | /* Vega 12 */ |
| 878 | {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, | 884 | {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, |
| @@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = { | |||
| 885 | {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, | 891 | {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, |
| 886 | {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, | 892 | {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, |
| 887 | {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, | 893 | {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, |
| 894 | {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, | ||
| 888 | {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, | 895 | {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, |
| 889 | {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, | 896 | {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, |
| 890 | /* Raven */ | 897 | /* Raven */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 81732a84c2ab..8f3d44e5e787 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 467 | if (!info->return_size || !info->return_pointer) | 467 | if (!info->return_size || !info->return_pointer) |
| 468 | return -EINVAL; | 468 | return -EINVAL; |
| 469 | 469 | ||
| 470 | /* Ensure IB tests are run on ring */ | ||
| 471 | flush_delayed_work(&adev->late_init_work); | ||
| 472 | |||
| 473 | switch (info->query) { | 470 | switch (info->query) { |
| 474 | case AMDGPU_INFO_ACCEL_WORKING: | 471 | case AMDGPU_INFO_ACCEL_WORKING: |
| 475 | ui32 = adev->accel_working; | 472 | ui32 = adev->accel_working; |
| @@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
| 950 | struct amdgpu_fpriv *fpriv; | 947 | struct amdgpu_fpriv *fpriv; |
| 951 | int r, pasid; | 948 | int r, pasid; |
| 952 | 949 | ||
| 950 | /* Ensure IB tests are run on ring */ | ||
| 951 | flush_delayed_work(&adev->late_init_work); | ||
| 952 | |||
| 953 | file_priv->driver_priv = NULL; | 953 | file_priv->driver_priv = NULL; |
| 954 | 954 | ||
| 955 | r = pm_runtime_get_sync(dev->dev); | 955 | r = pm_runtime_get_sync(dev->dev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1d3265c97b70..747c068379dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); | |||
| 56 | MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); | 56 | MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); |
| 57 | MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); | 57 | MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); |
| 58 | MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); | 58 | MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); |
| 59 | MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); | ||
| 60 | MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); | ||
| 61 | MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); | ||
| 59 | 62 | ||
| 60 | static const u32 golden_settings_tonga_a11[] = | 63 | static const u32 golden_settings_tonga_a11[] = |
| 61 | { | 64 | { |
| @@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) | |||
| 224 | chip_name = "tonga"; | 227 | chip_name = "tonga"; |
| 225 | break; | 228 | break; |
| 226 | case CHIP_POLARIS11: | 229 | case CHIP_POLARIS11: |
| 227 | chip_name = "polaris11"; | 230 | if (((adev->pdev->device == 0x67ef) && |
| 231 | ((adev->pdev->revision == 0xe0) || | ||
| 232 | (adev->pdev->revision == 0xe5))) || | ||
| 233 | ((adev->pdev->device == 0x67ff) && | ||
| 234 | ((adev->pdev->revision == 0xcf) || | ||
| 235 | (adev->pdev->revision == 0xef) || | ||
| 236 | (adev->pdev->revision == 0xff)))) | ||
| 237 | chip_name = "polaris11_k"; | ||
| 238 | else if ((adev->pdev->device == 0x67ef) && | ||
| 239 | (adev->pdev->revision == 0xe2)) | ||
| 240 | chip_name = "polaris11_k"; | ||
| 241 | else | ||
| 242 | chip_name = "polaris11"; | ||
| 228 | break; | 243 | break; |
| 229 | case CHIP_POLARIS10: | 244 | case CHIP_POLARIS10: |
| 230 | chip_name = "polaris10"; | 245 | if ((adev->pdev->device == 0x67df) && |
| 246 | ((adev->pdev->revision == 0xe1) || | ||
| 247 | (adev->pdev->revision == 0xf7))) | ||
| 248 | chip_name = "polaris10_k"; | ||
| 249 | else | ||
| 250 | chip_name = "polaris10"; | ||
| 231 | break; | 251 | break; |
| 232 | case CHIP_POLARIS12: | 252 | case CHIP_POLARIS12: |
| 233 | chip_name = "polaris12"; | 253 | if (((adev->pdev->device == 0x6987) && |
| 254 | ((adev->pdev->revision == 0xc0) || | ||
| 255 | (adev->pdev->revision == 0xc3))) || | ||
| 256 | ((adev->pdev->device == 0x6981) && | ||
| 257 | ((adev->pdev->revision == 0x00) || | ||
| 258 | (adev->pdev->revision == 0x01) || | ||
| 259 | (adev->pdev->revision == 0x10)))) | ||
| 260 | chip_name = "polaris12_k"; | ||
| 261 | else | ||
| 262 | chip_name = "polaris12"; | ||
| 234 | break; | 263 | break; |
| 235 | case CHIP_FIJI: | 264 | case CHIP_FIJI: |
| 236 | case CHIP_CARRIZO: | 265 | case CHIP_CARRIZO: |
| @@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) | |||
| 337 | const struct mc_firmware_header_v1_0 *hdr; | 366 | const struct mc_firmware_header_v1_0 *hdr; |
| 338 | const __le32 *fw_data = NULL; | 367 | const __le32 *fw_data = NULL; |
| 339 | const __le32 *io_mc_regs = NULL; | 368 | const __le32 *io_mc_regs = NULL; |
| 340 | u32 data, vbios_version; | 369 | u32 data; |
| 341 | int i, ucode_size, regs_size; | 370 | int i, ucode_size, regs_size; |
| 342 | 371 | ||
| 343 | /* Skip MC ucode loading on SR-IOV capable boards. | 372 | /* Skip MC ucode loading on SR-IOV capable boards. |
| @@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) | |||
| 348 | if (amdgpu_sriov_bios(adev)) | 377 | if (amdgpu_sriov_bios(adev)) |
| 349 | return 0; | 378 | return 0; |
| 350 | 379 | ||
| 351 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | ||
| 352 | data = RREG32(mmMC_SEQ_IO_DEBUG_DATA); | ||
| 353 | vbios_version = data & 0xf; | ||
| 354 | |||
| 355 | if (vbios_version == 0) | ||
| 356 | return 0; | ||
| 357 | |||
| 358 | if (!adev->gmc.fw) | 380 | if (!adev->gmc.fw) |
| 359 | return -EINVAL; | 381 | return -EINVAL; |
| 360 | 382 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index eae90922fdbe..322e09b5b448 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |||
| @@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); | |||
| 48 | static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); | 48 | static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); |
| 49 | static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); | 49 | static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); |
| 50 | static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); | 50 | static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); |
| 51 | static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); | ||
| 51 | 52 | ||
| 52 | /** | 53 | /** |
| 53 | * vcn_v1_0_early_init - set function pointers | 54 | * vcn_v1_0_early_init - set function pointers |
| @@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle) | |||
| 222 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; | 223 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; |
| 223 | 224 | ||
| 224 | if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) | 225 | if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) |
| 225 | vcn_v1_0_stop(adev); | 226 | vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); |
| 226 | 227 | ||
| 227 | ring->ready = false; | 228 | ring->ready = false; |
| 228 | 229 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index a9f18ea7e354..e4ded890b1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
| @@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = { | |||
| 337 | { 0x6864, &vega10_device_info }, /* Vega10 */ | 337 | { 0x6864, &vega10_device_info }, /* Vega10 */ |
| 338 | { 0x6867, &vega10_device_info }, /* Vega10 */ | 338 | { 0x6867, &vega10_device_info }, /* Vega10 */ |
| 339 | { 0x6868, &vega10_device_info }, /* Vega10 */ | 339 | { 0x6868, &vega10_device_info }, /* Vega10 */ |
| 340 | { 0x6869, &vega10_device_info }, /* Vega10 */ | ||
| 341 | { 0x686A, &vega10_device_info }, /* Vega10 */ | ||
| 342 | { 0x686B, &vega10_device_info }, /* Vega10 */ | ||
| 340 | { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ | 343 | { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ |
| 344 | { 0x686D, &vega10_device_info }, /* Vega10 */ | ||
| 345 | { 0x686E, &vega10_device_info }, /* Vega10 */ | ||
| 346 | { 0x686F, &vega10_device_info }, /* Vega10 */ | ||
| 341 | { 0x687F, &vega10_device_info }, /* Vega10 */ | 347 | { 0x687F, &vega10_device_info }, /* Vega10 */ |
| 342 | { 0x66a0, &vega20_device_info }, /* Vega20 */ | 348 | { 0x66a0, &vega20_device_info }, /* Vega20 */ |
| 343 | { 0x66a1, &vega20_device_info }, /* Vega20 */ | 349 | { 0x66a1, &vega20_device_info }, /* Vega20 */ |
| 344 | { 0x66a2, &vega20_device_info }, /* Vega20 */ | 350 | { 0x66a2, &vega20_device_info }, /* Vega20 */ |
| 345 | { 0x66a3, &vega20_device_info }, /* Vega20 */ | 351 | { 0x66a3, &vega20_device_info }, /* Vega20 */ |
| 352 | { 0x66a4, &vega20_device_info }, /* Vega20 */ | ||
| 346 | { 0x66a7, &vega20_device_info }, /* Vega20 */ | 353 | { 0x66a7, &vega20_device_info }, /* Vega20 */ |
| 347 | { 0x66af, &vega20_device_info } /* Vega20 */ | 354 | { 0x66af, &vega20_device_info } /* Vega20 */ |
| 348 | }; | 355 | }; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ca925200fe09..5a6edf65c9ea 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -2554,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info, | |||
| 2554 | 2554 | ||
| 2555 | cea_revision = drm_connector->display_info.cea_rev; | 2555 | cea_revision = drm_connector->display_info.cea_rev; |
| 2556 | 2556 | ||
| 2557 | strncpy(audio_info->display_name, | 2557 | strscpy(audio_info->display_name, |
| 2558 | edid_caps->display_name, | 2558 | edid_caps->display_name, |
| 2559 | AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1); | 2559 | AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); |
| 2560 | 2560 | ||
| 2561 | if (cea_revision >= 3) { | 2561 | if (cea_revision >= 3) { |
| 2562 | audio_info->mode_count = edid_caps->audio_mode_count; | 2562 | audio_info->mode_count = edid_caps->audio_mode_count; |
| @@ -3042,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) | |||
| 3042 | state->underscan_enable = false; | 3042 | state->underscan_enable = false; |
| 3043 | state->underscan_hborder = 0; | 3043 | state->underscan_hborder = 0; |
| 3044 | state->underscan_vborder = 0; | 3044 | state->underscan_vborder = 0; |
| 3045 | state->max_bpc = 8; | ||
| 3045 | 3046 | ||
| 3046 | __drm_atomic_helper_connector_reset(connector, &state->base); | 3047 | __drm_atomic_helper_connector_reset(connector, &state->base); |
| 3047 | } | 3048 | } |
| @@ -3063,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) | |||
| 3063 | 3064 | ||
| 3064 | new_state->freesync_capable = state->freesync_capable; | 3065 | new_state->freesync_capable = state->freesync_capable; |
| 3065 | new_state->freesync_enable = state->freesync_enable; | 3066 | new_state->freesync_enable = state->freesync_enable; |
| 3067 | new_state->max_bpc = state->max_bpc; | ||
| 3066 | 3068 | ||
| 3067 | return &new_state->base; | 3069 | return &new_state->base; |
| 3068 | } | 3070 | } |
| @@ -3650,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder, | |||
| 3650 | mode->hdisplay = hdisplay; | 3652 | mode->hdisplay = hdisplay; |
| 3651 | mode->vdisplay = vdisplay; | 3653 | mode->vdisplay = vdisplay; |
| 3652 | mode->type &= ~DRM_MODE_TYPE_PREFERRED; | 3654 | mode->type &= ~DRM_MODE_TYPE_PREFERRED; |
| 3653 | strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); | 3655 | strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); |
| 3654 | 3656 | ||
| 3655 | return mode; | 3657 | return mode; |
| 3656 | 3658 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b459867a05b2..a6bcb90e8419 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
| @@ -2512,6 +2512,8 @@ static void pplib_apply_display_requirements( | |||
| 2512 | dc, | 2512 | dc, |
| 2513 | context->bw.dce.sclk_khz); | 2513 | context->bw.dce.sclk_khz); |
| 2514 | 2514 | ||
| 2515 | pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; | ||
| 2516 | |||
| 2515 | pp_display_cfg->min_engine_clock_deep_sleep_khz | 2517 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
| 2516 | = context->bw.dce.sclk_deep_sleep_khz; | 2518 | = context->bw.dce.sclk_deep_sleep_khz; |
| 2517 | 2519 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 85119c2bdcc8..a2a7e0e94aa6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | |||
| @@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) | |||
| 80 | PHM_FUNC_CHECK(hwmgr); | 80 | PHM_FUNC_CHECK(hwmgr); |
| 81 | adev = hwmgr->adev; | 81 | adev = hwmgr->adev; |
| 82 | 82 | ||
| 83 | if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) { | 83 | /* Skip for suspend/resume case */ |
| 84 | if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) | ||
| 85 | && adev->in_suspend) { | ||
| 84 | pr_info("dpm has been enabled\n"); | 86 | pr_info("dpm has been enabled\n"); |
| 85 | return 0; | 87 | return 0; |
| 86 | } | 88 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 47ac92369739..0173d0480024 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, | |||
| 352 | 352 | ||
| 353 | switch (task_id) { | 353 | switch (task_id) { |
| 354 | case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: | 354 | case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: |
| 355 | ret = phm_pre_display_configuration_changed(hwmgr); | ||
| 356 | if (ret) | ||
| 357 | return ret; | ||
| 355 | ret = phm_set_cpu_power_state(hwmgr); | 358 | ret = phm_set_cpu_power_state(hwmgr); |
| 356 | if (ret) | 359 | if (ret) |
| 357 | return ret; | 360 | return ret; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 91ffb7bc4ee7..56437866d120 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | |||
| @@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, | |||
| 265 | if (skip) | 265 | if (skip) |
| 266 | return 0; | 266 | return 0; |
| 267 | 267 | ||
| 268 | phm_pre_display_configuration_changed(hwmgr); | ||
| 269 | |||
| 270 | phm_display_configuration_changed(hwmgr); | 268 | phm_display_configuration_changed(hwmgr); |
| 271 | 269 | ||
| 272 | if (hwmgr->ps) | 270 | if (hwmgr->ps) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 88f6b35ea6fe..b61a01f55284 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
| @@ -3589,8 +3589,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons | |||
| 3589 | } | 3589 | } |
| 3590 | 3590 | ||
| 3591 | if (i >= sclk_table->count) { | 3591 | if (i >= sclk_table->count) { |
| 3592 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; | 3592 | if (sclk > sclk_table->dpm_levels[i-1].value) { |
| 3593 | sclk_table->dpm_levels[i-1].value = sclk; | 3593 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
| 3594 | sclk_table->dpm_levels[i-1].value = sclk; | ||
| 3595 | } | ||
| 3594 | } else { | 3596 | } else { |
| 3595 | /* TODO: Check SCLK in DAL's minimum clocks | 3597 | /* TODO: Check SCLK in DAL's minimum clocks |
| 3596 | * in case DeepSleep divider update is required. | 3598 | * in case DeepSleep divider update is required. |
| @@ -3607,8 +3609,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons | |||
| 3607 | } | 3609 | } |
| 3608 | 3610 | ||
| 3609 | if (i >= mclk_table->count) { | 3611 | if (i >= mclk_table->count) { |
| 3610 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; | 3612 | if (mclk > mclk_table->dpm_levels[i-1].value) { |
| 3611 | mclk_table->dpm_levels[i-1].value = mclk; | 3613 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
| 3614 | mclk_table->dpm_levels[i-1].value = mclk; | ||
| 3615 | } | ||
| 3612 | } | 3616 | } |
| 3613 | 3617 | ||
| 3614 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) | 3618 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index e2bc6e0c229f..79c86247d0ac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
| @@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co | |||
| 3266 | } | 3266 | } |
| 3267 | 3267 | ||
| 3268 | if (i >= sclk_table->count) { | 3268 | if (i >= sclk_table->count) { |
| 3269 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; | 3269 | if (sclk > sclk_table->dpm_levels[i-1].value) { |
| 3270 | sclk_table->dpm_levels[i-1].value = sclk; | 3270 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
| 3271 | sclk_table->dpm_levels[i-1].value = sclk; | ||
| 3272 | } | ||
| 3271 | } | 3273 | } |
| 3272 | 3274 | ||
| 3273 | for (i = 0; i < mclk_table->count; i++) { | 3275 | for (i = 0; i < mclk_table->count; i++) { |
| @@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co | |||
| 3276 | } | 3278 | } |
| 3277 | 3279 | ||
| 3278 | if (i >= mclk_table->count) { | 3280 | if (i >= mclk_table->count) { |
| 3279 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; | 3281 | if (mclk > mclk_table->dpm_levels[i-1].value) { |
| 3280 | mclk_table->dpm_levels[i-1].value = mclk; | 3282 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
| 3283 | mclk_table->dpm_levels[i-1].value = mclk; | ||
| 3284 | } | ||
| 3281 | } | 3285 | } |
| 3282 | 3286 | ||
| 3283 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) | 3287 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index b4eadd47f3a4..3b7fce5d7258 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | |||
| @@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) | |||
| 130 | data->registry_data.disable_auto_wattman = 1; | 130 | data->registry_data.disable_auto_wattman = 1; |
| 131 | data->registry_data.auto_wattman_debug = 0; | 131 | data->registry_data.auto_wattman_debug = 0; |
| 132 | data->registry_data.auto_wattman_sample_period = 100; | 132 | data->registry_data.auto_wattman_sample_period = 100; |
| 133 | data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD; | 133 | data->registry_data.fclk_gfxclk_ratio = 0; |
| 134 | data->registry_data.auto_wattman_threshold = 50; | 134 | data->registry_data.auto_wattman_threshold = 50; |
| 135 | data->registry_data.gfxoff_controlled_by_driver = 1; | 135 | data->registry_data.gfxoff_controlled_by_driver = 1; |
| 136 | data->gfxoff_allowed = false; | 136 | data->gfxoff_allowed = false; |
| @@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level( | |||
| 1660 | return i; | 1660 | return i; |
| 1661 | } | 1661 | } |
| 1662 | 1662 | ||
| 1663 | static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) | 1663 | static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) |
| 1664 | { | 1664 | { |
| 1665 | struct vega20_hwmgr *data = | 1665 | struct vega20_hwmgr *data = |
| 1666 | (struct vega20_hwmgr *)(hwmgr->backend); | 1666 | (struct vega20_hwmgr *)(hwmgr->backend); |
| 1667 | uint32_t min_freq; | 1667 | uint32_t min_freq; |
| 1668 | int ret = 0; | 1668 | int ret = 0; |
| 1669 | 1669 | ||
| 1670 | if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { | 1670 | if (data->smu_features[GNLD_DPM_GFXCLK].enabled && |
| 1671 | (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { | ||
| 1671 | min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; | 1672 | min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; |
| 1672 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1673 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| 1673 | hwmgr, PPSMC_MSG_SetSoftMinByFreq, | 1674 | hwmgr, PPSMC_MSG_SetSoftMinByFreq, |
| @@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) | |||
| 1676 | return ret); | 1677 | return ret); |
| 1677 | } | 1678 | } |
| 1678 | 1679 | ||
| 1679 | if (data->smu_features[GNLD_DPM_UCLK].enabled) { | 1680 | if (data->smu_features[GNLD_DPM_UCLK].enabled && |
| 1681 | (feature_mask & FEATURE_DPM_UCLK_MASK)) { | ||
| 1680 | min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; | 1682 | min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; |
| 1681 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1683 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| 1682 | hwmgr, PPSMC_MSG_SetSoftMinByFreq, | 1684 | hwmgr, PPSMC_MSG_SetSoftMinByFreq, |
| @@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) | |||
| 1692 | return ret); | 1694 | return ret); |
| 1693 | } | 1695 | } |
| 1694 | 1696 | ||
| 1695 | if (data->smu_features[GNLD_DPM_UVD].enabled) { | 1697 | if (data->smu_features[GNLD_DPM_UVD].enabled && |
| 1698 | (feature_mask & FEATURE_DPM_UVD_MASK)) { | ||
| 1696 | min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; | 1699 | min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; |
| 1697 | 1700 | ||
| 1698 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1701 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) | |||
| 1710 | return ret); | 1713 | return ret); |
| 1711 | } | 1714 | } |
| 1712 | 1715 | ||
| 1713 | if (data->smu_features[GNLD_DPM_VCE].enabled) { | 1716 | if (data->smu_features[GNLD_DPM_VCE].enabled && |
| 1717 | (feature_mask & FEATURE_DPM_VCE_MASK)) { | ||
| 1714 | min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; | 1718 | min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; |
| 1715 | 1719 | ||
| 1716 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1720 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) | |||
| 1720 | return ret); | 1724 | return ret); |
| 1721 | } | 1725 | } |
| 1722 | 1726 | ||
| 1723 | if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { | 1727 | if (data->smu_features[GNLD_DPM_SOCCLK].enabled && |
| 1728 | (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { | ||
| 1724 | min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; | 1729 | min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; |
| 1725 | 1730 | ||
| 1726 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1731 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) | |||
| 1733 | return ret; | 1738 | return ret; |
| 1734 | } | 1739 | } |
| 1735 | 1740 | ||
| 1736 | static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) | 1741 | static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) |
| 1737 | { | 1742 | { |
| 1738 | struct vega20_hwmgr *data = | 1743 | struct vega20_hwmgr *data = |
| 1739 | (struct vega20_hwmgr *)(hwmgr->backend); | 1744 | (struct vega20_hwmgr *)(hwmgr->backend); |
| 1740 | uint32_t max_freq; | 1745 | uint32_t max_freq; |
| 1741 | int ret = 0; | 1746 | int ret = 0; |
| 1742 | 1747 | ||
| 1743 | if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { | 1748 | if (data->smu_features[GNLD_DPM_GFXCLK].enabled && |
| 1749 | (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { | ||
| 1744 | max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; | 1750 | max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; |
| 1745 | 1751 | ||
| 1746 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1752 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) | |||
| 1750 | return ret); | 1756 | return ret); |
| 1751 | } | 1757 | } |
| 1752 | 1758 | ||
| 1753 | if (data->smu_features[GNLD_DPM_UCLK].enabled) { | 1759 | if (data->smu_features[GNLD_DPM_UCLK].enabled && |
| 1760 | (feature_mask & FEATURE_DPM_UCLK_MASK)) { | ||
| 1754 | max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; | 1761 | max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; |
| 1755 | 1762 | ||
| 1756 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1763 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) | |||
| 1760 | return ret); | 1767 | return ret); |
| 1761 | } | 1768 | } |
| 1762 | 1769 | ||
| 1763 | if (data->smu_features[GNLD_DPM_UVD].enabled) { | 1770 | if (data->smu_features[GNLD_DPM_UVD].enabled && |
| 1771 | (feature_mask & FEATURE_DPM_UVD_MASK)) { | ||
| 1764 | max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; | 1772 | max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; |
| 1765 | 1773 | ||
| 1766 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1774 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) | |||
| 1777 | return ret); | 1785 | return ret); |
| 1778 | } | 1786 | } |
| 1779 | 1787 | ||
| 1780 | if (data->smu_features[GNLD_DPM_VCE].enabled) { | 1788 | if (data->smu_features[GNLD_DPM_VCE].enabled && |
| 1789 | (feature_mask & FEATURE_DPM_VCE_MASK)) { | ||
| 1781 | max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; | 1790 | max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; |
| 1782 | 1791 | ||
| 1783 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1792 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) | |||
| 1787 | return ret); | 1796 | return ret); |
| 1788 | } | 1797 | } |
| 1789 | 1798 | ||
| 1790 | if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { | 1799 | if (data->smu_features[GNLD_DPM_SOCCLK].enabled && |
| 1800 | (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { | ||
| 1791 | max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; | 1801 | max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; |
| 1792 | 1802 | ||
| 1793 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | 1803 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( |
| @@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) | |||
| 2126 | data->dpm_table.mem_table.dpm_state.soft_max_level = | 2136 | data->dpm_table.mem_table.dpm_state.soft_max_level = |
| 2127 | data->dpm_table.mem_table.dpm_levels[soft_level].value; | 2137 | data->dpm_table.mem_table.dpm_levels[soft_level].value; |
| 2128 | 2138 | ||
| 2129 | ret = vega20_upload_dpm_min_level(hwmgr); | 2139 | ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); |
| 2130 | PP_ASSERT_WITH_CODE(!ret, | 2140 | PP_ASSERT_WITH_CODE(!ret, |
| 2131 | "Failed to upload boot level to highest!", | 2141 | "Failed to upload boot level to highest!", |
| 2132 | return ret); | 2142 | return ret); |
| 2133 | 2143 | ||
| 2134 | ret = vega20_upload_dpm_max_level(hwmgr); | 2144 | ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); |
| 2135 | PP_ASSERT_WITH_CODE(!ret, | 2145 | PP_ASSERT_WITH_CODE(!ret, |
| 2136 | "Failed to upload dpm max level to highest!", | 2146 | "Failed to upload dpm max level to highest!", |
| 2137 | return ret); | 2147 | return ret); |
| @@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) | |||
| 2158 | data->dpm_table.mem_table.dpm_state.soft_max_level = | 2168 | data->dpm_table.mem_table.dpm_state.soft_max_level = |
| 2159 | data->dpm_table.mem_table.dpm_levels[soft_level].value; | 2169 | data->dpm_table.mem_table.dpm_levels[soft_level].value; |
| 2160 | 2170 | ||
| 2161 | ret = vega20_upload_dpm_min_level(hwmgr); | 2171 | ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); |
| 2162 | PP_ASSERT_WITH_CODE(!ret, | 2172 | PP_ASSERT_WITH_CODE(!ret, |
| 2163 | "Failed to upload boot level to highest!", | 2173 | "Failed to upload boot level to highest!", |
| 2164 | return ret); | 2174 | return ret); |
| 2165 | 2175 | ||
| 2166 | ret = vega20_upload_dpm_max_level(hwmgr); | 2176 | ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); |
| 2167 | PP_ASSERT_WITH_CODE(!ret, | 2177 | PP_ASSERT_WITH_CODE(!ret, |
| 2168 | "Failed to upload dpm max level to highest!", | 2178 | "Failed to upload dpm max level to highest!", |
| 2169 | return ret); | 2179 | return ret); |
| @@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | |||
| 2176 | { | 2186 | { |
| 2177 | int ret = 0; | 2187 | int ret = 0; |
| 2178 | 2188 | ||
| 2179 | ret = vega20_upload_dpm_min_level(hwmgr); | 2189 | ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); |
| 2180 | PP_ASSERT_WITH_CODE(!ret, | 2190 | PP_ASSERT_WITH_CODE(!ret, |
| 2181 | "Failed to upload DPM Bootup Levels!", | 2191 | "Failed to upload DPM Bootup Levels!", |
| 2182 | return ret); | 2192 | return ret); |
| 2183 | 2193 | ||
| 2184 | ret = vega20_upload_dpm_max_level(hwmgr); | 2194 | ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); |
| 2185 | PP_ASSERT_WITH_CODE(!ret, | 2195 | PP_ASSERT_WITH_CODE(!ret, |
| 2186 | "Failed to upload DPM Max Levels!", | 2196 | "Failed to upload DPM Max Levels!", |
| 2187 | return ret); | 2197 | return ret); |
| @@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, | |||
| 2239 | data->dpm_table.gfx_table.dpm_state.soft_max_level = | 2249 | data->dpm_table.gfx_table.dpm_state.soft_max_level = |
| 2240 | data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; | 2250 | data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; |
| 2241 | 2251 | ||
| 2242 | ret = vega20_upload_dpm_min_level(hwmgr); | 2252 | ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); |
| 2243 | PP_ASSERT_WITH_CODE(!ret, | 2253 | PP_ASSERT_WITH_CODE(!ret, |
| 2244 | "Failed to upload boot level to lowest!", | 2254 | "Failed to upload boot level to lowest!", |
| 2245 | return ret); | 2255 | return ret); |
| 2246 | 2256 | ||
| 2247 | ret = vega20_upload_dpm_max_level(hwmgr); | 2257 | ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); |
| 2248 | PP_ASSERT_WITH_CODE(!ret, | 2258 | PP_ASSERT_WITH_CODE(!ret, |
| 2249 | "Failed to upload dpm max level to highest!", | 2259 | "Failed to upload dpm max level to highest!", |
| 2250 | return ret); | 2260 | return ret); |
| @@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, | |||
| 2259 | data->dpm_table.mem_table.dpm_state.soft_max_level = | 2269 | data->dpm_table.mem_table.dpm_state.soft_max_level = |
| 2260 | data->dpm_table.mem_table.dpm_levels[soft_max_level].value; | 2270 | data->dpm_table.mem_table.dpm_levels[soft_max_level].value; |
| 2261 | 2271 | ||
| 2262 | ret = vega20_upload_dpm_min_level(hwmgr); | 2272 | ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK); |
| 2263 | PP_ASSERT_WITH_CODE(!ret, | 2273 | PP_ASSERT_WITH_CODE(!ret, |
| 2264 | "Failed to upload boot level to lowest!", | 2274 | "Failed to upload boot level to lowest!", |
| 2265 | return ret); | 2275 | return ret); |
| 2266 | 2276 | ||
| 2267 | ret = vega20_upload_dpm_max_level(hwmgr); | 2277 | ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK); |
| 2268 | PP_ASSERT_WITH_CODE(!ret, | 2278 | PP_ASSERT_WITH_CODE(!ret, |
| 2269 | "Failed to upload dpm max level to highest!", | 2279 | "Failed to upload dpm max level to highest!", |
| 2270 | return ret); | 2280 | return ret); |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h index 62f36ba2435b..c1a99dfe4913 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h | |||
| @@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result; | |||
| 386 | #define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) | 386 | #define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) |
| 387 | #define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) | 387 | #define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) |
| 388 | 388 | ||
| 389 | #define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415) | ||
| 390 | |||
| 389 | #define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) | 391 | #define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) |
| 390 | #define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) | 392 | #define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) |
| 391 | #define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) | 393 | #define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 872d3824337b..a1e0ac9ae248 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
| @@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) | |||
| 1985 | 1985 | ||
| 1986 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); | 1986 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); |
| 1987 | 1987 | ||
| 1988 | /* Apply avfs cks-off voltages to avoid the overshoot | ||
| 1989 | * when switching to the highest sclk frequency | ||
| 1990 | */ | ||
| 1991 | if (data->apply_avfs_cks_off_voltage) | ||
| 1992 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); | ||
| 1993 | |||
| 1988 | return 0; | 1994 | return 0; |
| 1989 | } | 1995 | } |
| 1990 | 1996 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 99d5e4f98f49..a6edd5df33b0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
| @@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); | |||
| 37 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); | 37 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); |
| 38 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); | 38 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); |
| 39 | MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); | 39 | MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); |
| 40 | MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin"); | ||
| 40 | MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); | 41 | MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); |
| 41 | MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); | 42 | MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); |
| 42 | MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); | 43 | MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); |
| 44 | MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin"); | ||
| 43 | MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); | 45 | MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); |
| 46 | MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin"); | ||
| 44 | MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); | 47 | MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); |
| 45 | MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); | 48 | MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); |
| 46 | MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); | 49 | MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); |
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 0cd827e11fa2..de26df0c6044 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c | |||
| @@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev, | |||
| 263 | { | 263 | { |
| 264 | struct ast_framebuffer *afb = &afbdev->afb; | 264 | struct ast_framebuffer *afb = &afbdev->afb; |
| 265 | 265 | ||
| 266 | drm_crtc_force_disable_all(dev); | ||
| 266 | drm_fb_helper_unregister_fbi(&afbdev->helper); | 267 | drm_fb_helper_unregister_fbi(&afbdev->helper); |
| 267 | 268 | ||
| 268 | if (afb->obj) { | 269 | if (afb->obj) { |
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 680566d97adc..10243965ee7c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c | |||
| @@ -54,7 +54,7 @@ | |||
| 54 | #define SN_AUX_ADDR_7_0_REG 0x76 | 54 | #define SN_AUX_ADDR_7_0_REG 0x76 |
| 55 | #define SN_AUX_LENGTH_REG 0x77 | 55 | #define SN_AUX_LENGTH_REG 0x77 |
| 56 | #define SN_AUX_CMD_REG 0x78 | 56 | #define SN_AUX_CMD_REG 0x78 |
| 57 | #define AUX_CMD_SEND BIT(1) | 57 | #define AUX_CMD_SEND BIT(0) |
| 58 | #define AUX_CMD_REQ(x) ((x) << 4) | 58 | #define AUX_CMD_REQ(x) ((x) << 4) |
| 59 | #define SN_AUX_RDATA_REG(x) (0x79 + (x)) | 59 | #define SN_AUX_RDATA_REG(x) (0x79 + (x)) |
| 60 | #define SN_SSC_CONFIG_REG 0x93 | 60 | #define SN_SSC_CONFIG_REG 0x93 |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index dd852a25d375..9d64f874f965 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc, | |||
| 71 | #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) | 71 | #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) |
| 72 | static bool drm_leak_fbdev_smem = false; | 72 | static bool drm_leak_fbdev_smem = false; |
| 73 | module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); | 73 | module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); |
| 74 | MODULE_PARM_DESC(fbdev_emulation, | 74 | MODULE_PARM_DESC(drm_leak_fbdev_smem, |
| 75 | "Allow unsafe leaking fbdev physical smem address [default=false]"); | 75 | "Allow unsafe leaking fbdev physical smem address [default=false]"); |
| 76 | #endif | 76 | #endif |
| 77 | 77 | ||
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 0c4eb4a9ab31..51e06defc8d8 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h | |||
| @@ -104,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); | |||
| 104 | int drm_sysfs_connector_add(struct drm_connector *connector); | 104 | int drm_sysfs_connector_add(struct drm_connector *connector); |
| 105 | void drm_sysfs_connector_remove(struct drm_connector *connector); | 105 | void drm_sysfs_connector_remove(struct drm_connector *connector); |
| 106 | 106 | ||
| 107 | void drm_sysfs_lease_event(struct drm_device *dev); | ||
| 108 | |||
| 107 | /* drm_gem.c */ | 109 | /* drm_gem.c */ |
| 108 | int drm_gem_init(struct drm_device *dev); | 110 | int drm_gem_init(struct drm_device *dev); |
| 109 | void drm_gem_destroy(struct drm_device *dev); | 111 | void drm_gem_destroy(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 24a177ea5417..c61680ad962d 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c | |||
| @@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master) | |||
| 296 | 296 | ||
| 297 | if (master->lessor) { | 297 | if (master->lessor) { |
| 298 | /* Tell the master to check the lessee list */ | 298 | /* Tell the master to check the lessee list */ |
| 299 | drm_sysfs_hotplug_event(dev); | 299 | drm_sysfs_lease_event(dev); |
| 300 | drm_master_put(&master->lessor); | 300 | drm_master_put(&master->lessor); |
| 301 | } | 301 | } |
| 302 | 302 | ||
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index b3c1daad1169..ecb7b33002bb 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
| @@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) | |||
| 301 | connector->kdev = NULL; | 301 | connector->kdev = NULL; |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | void drm_sysfs_lease_event(struct drm_device *dev) | ||
| 305 | { | ||
| 306 | char *event_string = "LEASE=1"; | ||
| 307 | char *envp[] = { event_string, NULL }; | ||
| 308 | |||
| 309 | DRM_DEBUG("generating lease event\n"); | ||
| 310 | |||
| 311 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); | ||
| 312 | } | ||
| 313 | |||
| 304 | /** | 314 | /** |
| 305 | * drm_sysfs_hotplug_event - generate a DRM uevent | 315 | * drm_sysfs_hotplug_event - generate a DRM uevent |
| 306 | * @dev: DRM device | 316 | * @dev: DRM device |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 481896fb712a..85e6736f0a32 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c | |||
| @@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
| 235 | plane->bpp = skl_pixel_formats[fmt].bpp; | 235 | plane->bpp = skl_pixel_formats[fmt].bpp; |
| 236 | plane->drm_format = skl_pixel_formats[fmt].drm_format; | 236 | plane->drm_format = skl_pixel_formats[fmt].drm_format; |
| 237 | } else { | 237 | } else { |
| 238 | plane->tiled = !!(val & DISPPLANE_TILED); | 238 | plane->tiled = val & DISPPLANE_TILED; |
| 239 | fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); | 239 | fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); |
| 240 | plane->bpp = bdw_pixel_formats[fmt].bpp; | 240 | plane->bpp = bdw_pixel_formats[fmt].bpp; |
| 241 | plane->drm_format = bdw_pixel_formats[fmt].drm_format; | 241 | plane->drm_format = bdw_pixel_formats[fmt].drm_format; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ffdbbac4400e..47062ee979cf 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
| 1444 | 1444 | ||
| 1445 | intel_uncore_sanitize(dev_priv); | 1445 | intel_uncore_sanitize(dev_priv); |
| 1446 | 1446 | ||
| 1447 | intel_gt_init_workarounds(dev_priv); | ||
| 1447 | i915_gem_load_init_fences(dev_priv); | 1448 | i915_gem_load_init_fences(dev_priv); |
| 1448 | 1449 | ||
| 1449 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1450 | /* On the 945G/GM, the chipset reports the MSI capability on the |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9102571e9692..872a2e159a5f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -67,6 +67,7 @@ | |||
| 67 | #include "intel_ringbuffer.h" | 67 | #include "intel_ringbuffer.h" |
| 68 | #include "intel_uncore.h" | 68 | #include "intel_uncore.h" |
| 69 | #include "intel_wopcm.h" | 69 | #include "intel_wopcm.h" |
| 70 | #include "intel_workarounds.h" | ||
| 70 | #include "intel_uc.h" | 71 | #include "intel_uc.h" |
| 71 | 72 | ||
| 72 | #include "i915_gem.h" | 73 | #include "i915_gem.h" |
| @@ -1805,6 +1806,7 @@ struct drm_i915_private { | |||
| 1805 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; | 1806 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; |
| 1806 | 1807 | ||
| 1807 | struct i915_workarounds workarounds; | 1808 | struct i915_workarounds workarounds; |
| 1809 | struct i915_wa_list gt_wa_list; | ||
| 1808 | 1810 | ||
| 1809 | struct i915_frontbuffer_tracking fb_tracking; | 1811 | struct i915_frontbuffer_tracking fb_tracking; |
| 1810 | 1812 | ||
| @@ -2148,6 +2150,8 @@ struct drm_i915_private { | |||
| 2148 | struct delayed_work idle_work; | 2150 | struct delayed_work idle_work; |
| 2149 | 2151 | ||
| 2150 | ktime_t last_init_time; | 2152 | ktime_t last_init_time; |
| 2153 | |||
| 2154 | struct i915_vma *scratch; | ||
| 2151 | } gt; | 2155 | } gt; |
| 2152 | 2156 | ||
| 2153 | /* perform PHY state sanity checks? */ | 2157 | /* perform PHY state sanity checks? */ |
| @@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) | |||
| 3870 | return I915_HWS_CSB_WRITE_INDEX; | 3874 | return I915_HWS_CSB_WRITE_INDEX; |
| 3871 | } | 3875 | } |
| 3872 | 3876 | ||
| 3877 | static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) | ||
| 3878 | { | ||
| 3879 | return i915_ggtt_offset(i915->gt.scratch); | ||
| 3880 | } | ||
| 3881 | |||
| 3873 | #endif | 3882 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c8aa57ce83b..6ae9a6080cc8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) | |||
| 5305 | } | 5305 | } |
| 5306 | } | 5306 | } |
| 5307 | 5307 | ||
| 5308 | intel_gt_workarounds_apply(dev_priv); | 5308 | intel_gt_apply_workarounds(dev_priv); |
| 5309 | 5309 | ||
| 5310 | i915_gem_init_swizzling(dev_priv); | 5310 | i915_gem_init_swizzling(dev_priv); |
| 5311 | 5311 | ||
| @@ -5500,6 +5500,44 @@ err_active: | |||
| 5500 | goto out_ctx; | 5500 | goto out_ctx; |
| 5501 | } | 5501 | } |
| 5502 | 5502 | ||
| 5503 | static int | ||
| 5504 | i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) | ||
| 5505 | { | ||
| 5506 | struct drm_i915_gem_object *obj; | ||
| 5507 | struct i915_vma *vma; | ||
| 5508 | int ret; | ||
| 5509 | |||
| 5510 | obj = i915_gem_object_create_stolen(i915, size); | ||
| 5511 | if (!obj) | ||
| 5512 | obj = i915_gem_object_create_internal(i915, size); | ||
| 5513 | if (IS_ERR(obj)) { | ||
| 5514 | DRM_ERROR("Failed to allocate scratch page\n"); | ||
| 5515 | return PTR_ERR(obj); | ||
| 5516 | } | ||
| 5517 | |||
| 5518 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); | ||
| 5519 | if (IS_ERR(vma)) { | ||
| 5520 | ret = PTR_ERR(vma); | ||
| 5521 | goto err_unref; | ||
| 5522 | } | ||
| 5523 | |||
| 5524 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); | ||
| 5525 | if (ret) | ||
| 5526 | goto err_unref; | ||
| 5527 | |||
| 5528 | i915->gt.scratch = vma; | ||
| 5529 | return 0; | ||
| 5530 | |||
| 5531 | err_unref: | ||
| 5532 | i915_gem_object_put(obj); | ||
| 5533 | return ret; | ||
| 5534 | } | ||
| 5535 | |||
| 5536 | static void i915_gem_fini_scratch(struct drm_i915_private *i915) | ||
| 5537 | { | ||
| 5538 | i915_vma_unpin_and_release(&i915->gt.scratch, 0); | ||
| 5539 | } | ||
| 5540 | |||
| 5503 | int i915_gem_init(struct drm_i915_private *dev_priv) | 5541 | int i915_gem_init(struct drm_i915_private *dev_priv) |
| 5504 | { | 5542 | { |
| 5505 | int ret; | 5543 | int ret; |
| @@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
| 5546 | goto err_unlock; | 5584 | goto err_unlock; |
| 5547 | } | 5585 | } |
| 5548 | 5586 | ||
| 5549 | ret = i915_gem_contexts_init(dev_priv); | 5587 | ret = i915_gem_init_scratch(dev_priv, |
| 5588 | IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE); | ||
| 5550 | if (ret) { | 5589 | if (ret) { |
| 5551 | GEM_BUG_ON(ret == -EIO); | 5590 | GEM_BUG_ON(ret == -EIO); |
| 5552 | goto err_ggtt; | 5591 | goto err_ggtt; |
| 5553 | } | 5592 | } |
| 5554 | 5593 | ||
| 5594 | ret = i915_gem_contexts_init(dev_priv); | ||
| 5595 | if (ret) { | ||
| 5596 | GEM_BUG_ON(ret == -EIO); | ||
| 5597 | goto err_scratch; | ||
| 5598 | } | ||
| 5599 | |||
| 5555 | ret = intel_engines_init(dev_priv); | 5600 | ret = intel_engines_init(dev_priv); |
| 5556 | if (ret) { | 5601 | if (ret) { |
| 5557 | GEM_BUG_ON(ret == -EIO); | 5602 | GEM_BUG_ON(ret == -EIO); |
| @@ -5624,6 +5669,8 @@ err_pm: | |||
| 5624 | err_context: | 5669 | err_context: |
| 5625 | if (ret != -EIO) | 5670 | if (ret != -EIO) |
| 5626 | i915_gem_contexts_fini(dev_priv); | 5671 | i915_gem_contexts_fini(dev_priv); |
| 5672 | err_scratch: | ||
| 5673 | i915_gem_fini_scratch(dev_priv); | ||
| 5627 | err_ggtt: | 5674 | err_ggtt: |
| 5628 | err_unlock: | 5675 | err_unlock: |
| 5629 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 5676 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
| @@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) | |||
| 5675 | intel_uc_fini(dev_priv); | 5722 | intel_uc_fini(dev_priv); |
| 5676 | i915_gem_cleanup_engines(dev_priv); | 5723 | i915_gem_cleanup_engines(dev_priv); |
| 5677 | i915_gem_contexts_fini(dev_priv); | 5724 | i915_gem_contexts_fini(dev_priv); |
| 5725 | i915_gem_fini_scratch(dev_priv); | ||
| 5678 | mutex_unlock(&dev_priv->drm.struct_mutex); | 5726 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 5679 | 5727 | ||
| 5728 | intel_wa_list_free(&dev_priv->gt_wa_list); | ||
| 5729 | |||
| 5680 | intel_cleanup_gt_powersave(dev_priv); | 5730 | intel_cleanup_gt_powersave(dev_priv); |
| 5681 | 5731 | ||
| 5682 | intel_uc_fini_misc(dev_priv); | 5732 | intel_uc_fini_misc(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d4fac09095f8..1aaccbe7e1de 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma, | |||
| 1268 | else if (gen >= 4) | 1268 | else if (gen >= 4) |
| 1269 | len = 4; | 1269 | len = 4; |
| 1270 | else | 1270 | else |
| 1271 | len = 6; | 1271 | len = 3; |
| 1272 | 1272 | ||
| 1273 | batch = reloc_gpu(eb, vma, len); | 1273 | batch = reloc_gpu(eb, vma, len); |
| 1274 | if (IS_ERR(batch)) | 1274 | if (IS_ERR(batch)) |
| @@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma, | |||
| 1309 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | 1309 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
| 1310 | *batch++ = addr; | 1310 | *batch++ = addr; |
| 1311 | *batch++ = target_offset; | 1311 | *batch++ = target_offset; |
| 1312 | |||
| 1313 | /* And again for good measure (blb/pnv) */ | ||
| 1314 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | ||
| 1315 | *batch++ = addr; | ||
| 1316 | *batch++ = target_offset; | ||
| 1317 | } | 1312 | } |
| 1318 | 1313 | ||
| 1319 | goto out; | 1314 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 3eb33e000d6f..db4128d6c09b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
| @@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error) | |||
| 1495 | if (HAS_BROKEN_CS_TLB(i915)) | 1495 | if (HAS_BROKEN_CS_TLB(i915)) |
| 1496 | ee->wa_batchbuffer = | 1496 | ee->wa_batchbuffer = |
| 1497 | i915_error_object_create(i915, | 1497 | i915_error_object_create(i915, |
| 1498 | engine->scratch); | 1498 | i915->gt.scratch); |
| 1499 | request_record_user_bo(request, ee); | 1499 | request_record_user_bo(request, ee); |
| 1500 | 1500 | ||
| 1501 | ee->ctx = | 1501 | ee->ctx = |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 217ed3ee1cab..76b5f94ea6cb 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
| @@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) | |||
| 490 | intel_engine_init_cmd_parser(engine); | 490 | intel_engine_init_cmd_parser(engine); |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | int intel_engine_create_scratch(struct intel_engine_cs *engine, | ||
| 494 | unsigned int size) | ||
| 495 | { | ||
| 496 | struct drm_i915_gem_object *obj; | ||
| 497 | struct i915_vma *vma; | ||
| 498 | int ret; | ||
| 499 | |||
| 500 | WARN_ON(engine->scratch); | ||
| 501 | |||
| 502 | obj = i915_gem_object_create_stolen(engine->i915, size); | ||
| 503 | if (!obj) | ||
| 504 | obj = i915_gem_object_create_internal(engine->i915, size); | ||
| 505 | if (IS_ERR(obj)) { | ||
| 506 | DRM_ERROR("Failed to allocate scratch page\n"); | ||
| 507 | return PTR_ERR(obj); | ||
| 508 | } | ||
| 509 | |||
| 510 | vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); | ||
| 511 | if (IS_ERR(vma)) { | ||
| 512 | ret = PTR_ERR(vma); | ||
| 513 | goto err_unref; | ||
| 514 | } | ||
| 515 | |||
| 516 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); | ||
| 517 | if (ret) | ||
| 518 | goto err_unref; | ||
| 519 | |||
| 520 | engine->scratch = vma; | ||
| 521 | return 0; | ||
| 522 | |||
| 523 | err_unref: | ||
| 524 | i915_gem_object_put(obj); | ||
| 525 | return ret; | ||
| 526 | } | ||
| 527 | |||
| 528 | void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) | ||
| 529 | { | ||
| 530 | i915_vma_unpin_and_release(&engine->scratch, 0); | ||
| 531 | } | ||
| 532 | |||
| 533 | static void cleanup_status_page(struct intel_engine_cs *engine) | 493 | static void cleanup_status_page(struct intel_engine_cs *engine) |
| 534 | { | 494 | { |
| 535 | if (HWS_NEEDS_PHYSICAL(engine->i915)) { | 495 | if (HWS_NEEDS_PHYSICAL(engine->i915)) { |
| @@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||
| 704 | { | 664 | { |
| 705 | struct drm_i915_private *i915 = engine->i915; | 665 | struct drm_i915_private *i915 = engine->i915; |
| 706 | 666 | ||
| 707 | intel_engine_cleanup_scratch(engine); | ||
| 708 | |||
| 709 | cleanup_status_page(engine); | 667 | cleanup_status_page(engine); |
| 710 | 668 | ||
| 711 | intel_engine_fini_breadcrumbs(engine); | 669 | intel_engine_fini_breadcrumbs(engine); |
| @@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||
| 720 | __intel_context_unpin(i915->kernel_context, engine); | 678 | __intel_context_unpin(i915->kernel_context, engine); |
| 721 | 679 | ||
| 722 | i915_timeline_fini(&engine->timeline); | 680 | i915_timeline_fini(&engine->timeline); |
| 681 | |||
| 682 | intel_wa_list_free(&engine->wa_list); | ||
| 723 | } | 683 | } |
| 724 | 684 | ||
| 725 | u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) | 685 | u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 37c94a54efcb..58d1d3d47dd3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq) | |||
| 442 | * may not be visible to the HW prior to the completion of the UC | 442 | * may not be visible to the HW prior to the completion of the UC |
| 443 | * register write and that we may begin execution from the context | 443 | * register write and that we may begin execution from the context |
| 444 | * before its image is complete leading to invalid PD chasing. | 444 | * before its image is complete leading to invalid PD chasing. |
| 445 | * | ||
| 446 | * Furthermore, Braswell, at least, wants a full mb to be sure that | ||
| 447 | * the writes are coherent in memory (visible to the GPU) prior to | ||
| 448 | * execution, and not just visible to other CPUs (as is the result of | ||
| 449 | * wmb). | ||
| 445 | */ | 450 | */ |
| 446 | wmb(); | 451 | mb(); |
| 447 | return ce->lrc_desc; | 452 | return ce->lrc_desc; |
| 448 | } | 453 | } |
| 449 | 454 | ||
| @@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request) | |||
| 1443 | static u32 * | 1448 | static u32 * |
| 1444 | gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) | 1449 | gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) |
| 1445 | { | 1450 | { |
| 1451 | /* NB no one else is allowed to scribble over scratch + 256! */ | ||
| 1446 | *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; | 1452 | *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; |
| 1447 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); | 1453 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); |
| 1448 | *batch++ = i915_ggtt_offset(engine->scratch) + 256; | 1454 | *batch++ = i915_scratch_offset(engine->i915) + 256; |
| 1449 | *batch++ = 0; | 1455 | *batch++ = 0; |
| 1450 | 1456 | ||
| 1451 | *batch++ = MI_LOAD_REGISTER_IMM(1); | 1457 | *batch++ = MI_LOAD_REGISTER_IMM(1); |
| @@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) | |||
| 1459 | 1465 | ||
| 1460 | *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; | 1466 | *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; |
| 1461 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); | 1467 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); |
| 1462 | *batch++ = i915_ggtt_offset(engine->scratch) + 256; | 1468 | *batch++ = i915_scratch_offset(engine->i915) + 256; |
| 1463 | *batch++ = 0; | 1469 | *batch++ = 0; |
| 1464 | 1470 | ||
| 1465 | return batch; | 1471 | return batch; |
| @@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) | |||
| 1496 | PIPE_CONTROL_GLOBAL_GTT_IVB | | 1502 | PIPE_CONTROL_GLOBAL_GTT_IVB | |
| 1497 | PIPE_CONTROL_CS_STALL | | 1503 | PIPE_CONTROL_CS_STALL | |
| 1498 | PIPE_CONTROL_QW_WRITE, | 1504 | PIPE_CONTROL_QW_WRITE, |
| 1499 | i915_ggtt_offset(engine->scratch) + | 1505 | i915_scratch_offset(engine->i915) + |
| 1500 | 2 * CACHELINE_BYTES); | 1506 | 2 * CACHELINE_BYTES); |
| 1501 | 1507 | ||
| 1502 | *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; | 1508 | *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
| @@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) | |||
| 1573 | PIPE_CONTROL_GLOBAL_GTT_IVB | | 1579 | PIPE_CONTROL_GLOBAL_GTT_IVB | |
| 1574 | PIPE_CONTROL_CS_STALL | | 1580 | PIPE_CONTROL_CS_STALL | |
| 1575 | PIPE_CONTROL_QW_WRITE, | 1581 | PIPE_CONTROL_QW_WRITE, |
| 1576 | i915_ggtt_offset(engine->scratch) | 1582 | i915_scratch_offset(engine->i915) |
| 1577 | + 2 * CACHELINE_BYTES); | 1583 | + 2 * CACHELINE_BYTES); |
| 1578 | } | 1584 | } |
| 1579 | 1585 | ||
| @@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) | |||
| 1793 | 1799 | ||
| 1794 | static int gen8_init_common_ring(struct intel_engine_cs *engine) | 1800 | static int gen8_init_common_ring(struct intel_engine_cs *engine) |
| 1795 | { | 1801 | { |
| 1802 | intel_engine_apply_workarounds(engine); | ||
| 1803 | |||
| 1796 | intel_mocs_init_engine(engine); | 1804 | intel_mocs_init_engine(engine); |
| 1797 | 1805 | ||
| 1798 | intel_engine_reset_breadcrumbs(engine); | 1806 | intel_engine_reset_breadcrumbs(engine); |
| @@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request, | |||
| 2139 | { | 2147 | { |
| 2140 | struct intel_engine_cs *engine = request->engine; | 2148 | struct intel_engine_cs *engine = request->engine; |
| 2141 | u32 scratch_addr = | 2149 | u32 scratch_addr = |
| 2142 | i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; | 2150 | i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; |
| 2143 | bool vf_flush_wa = false, dc_flush_wa = false; | 2151 | bool vf_flush_wa = false, dc_flush_wa = false; |
| 2144 | u32 *cs, flags = 0; | 2152 | u32 *cs, flags = 0; |
| 2145 | int len; | 2153 | int len; |
| @@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) | |||
| 2476 | if (ret) | 2484 | if (ret) |
| 2477 | return ret; | 2485 | return ret; |
| 2478 | 2486 | ||
| 2479 | ret = intel_engine_create_scratch(engine, PAGE_SIZE); | ||
| 2480 | if (ret) | ||
| 2481 | goto err_cleanup_common; | ||
| 2482 | |||
| 2483 | ret = intel_init_workaround_bb(engine); | 2487 | ret = intel_init_workaround_bb(engine); |
| 2484 | if (ret) { | 2488 | if (ret) { |
| 2485 | /* | 2489 | /* |
| @@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine) | |||
| 2491 | ret); | 2495 | ret); |
| 2492 | } | 2496 | } |
| 2493 | 2497 | ||
| 2494 | return 0; | 2498 | intel_engine_init_workarounds(engine); |
| 2495 | 2499 | ||
| 2496 | err_cleanup_common: | 2500 | return 0; |
| 2497 | intel_engine_cleanup_common(engine); | ||
| 2498 | return ret; | ||
| 2499 | } | 2501 | } |
| 2500 | 2502 | ||
| 2501 | int logical_xcs_ring_init(struct intel_engine_cs *engine) | 2503 | int logical_xcs_ring_init(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 187bb0ceb4ac..1f8d2a66c791 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring) | |||
| 69 | static int | 69 | static int |
| 70 | gen2_render_ring_flush(struct i915_request *rq, u32 mode) | 70 | gen2_render_ring_flush(struct i915_request *rq, u32 mode) |
| 71 | { | 71 | { |
| 72 | unsigned int num_store_dw; | ||
| 72 | u32 cmd, *cs; | 73 | u32 cmd, *cs; |
| 73 | 74 | ||
| 74 | cmd = MI_FLUSH; | 75 | cmd = MI_FLUSH; |
| 75 | 76 | num_store_dw = 0; | |
| 76 | if (mode & EMIT_INVALIDATE) | 77 | if (mode & EMIT_INVALIDATE) |
| 77 | cmd |= MI_READ_FLUSH; | 78 | cmd |= MI_READ_FLUSH; |
| 79 | if (mode & EMIT_FLUSH) | ||
| 80 | num_store_dw = 4; | ||
| 78 | 81 | ||
| 79 | cs = intel_ring_begin(rq, 2); | 82 | cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); |
| 80 | if (IS_ERR(cs)) | 83 | if (IS_ERR(cs)) |
| 81 | return PTR_ERR(cs); | 84 | return PTR_ERR(cs); |
| 82 | 85 | ||
| 83 | *cs++ = cmd; | 86 | *cs++ = cmd; |
| 84 | *cs++ = MI_NOOP; | 87 | while (num_store_dw--) { |
| 88 | *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | ||
| 89 | *cs++ = i915_scratch_offset(rq->i915); | ||
| 90 | *cs++ = 0; | ||
| 91 | } | ||
| 92 | *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
| 93 | |||
| 85 | intel_ring_advance(rq, cs); | 94 | intel_ring_advance(rq, cs); |
| 86 | 95 | ||
| 87 | return 0; | 96 | return 0; |
| @@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
| 150 | */ | 159 | */ |
| 151 | if (mode & EMIT_INVALIDATE) { | 160 | if (mode & EMIT_INVALIDATE) { |
| 152 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; | 161 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; |
| 153 | *cs++ = i915_ggtt_offset(rq->engine->scratch) | | 162 | *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; |
| 154 | PIPE_CONTROL_GLOBAL_GTT; | ||
| 155 | *cs++ = 0; | 163 | *cs++ = 0; |
| 156 | *cs++ = 0; | 164 | *cs++ = 0; |
| 157 | 165 | ||
| @@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
| 159 | *cs++ = MI_FLUSH; | 167 | *cs++ = MI_FLUSH; |
| 160 | 168 | ||
| 161 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; | 169 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; |
| 162 | *cs++ = i915_ggtt_offset(rq->engine->scratch) | | 170 | *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; |
| 163 | PIPE_CONTROL_GLOBAL_GTT; | ||
| 164 | *cs++ = 0; | 171 | *cs++ = 0; |
| 165 | *cs++ = 0; | 172 | *cs++ = 0; |
| 166 | } | 173 | } |
| @@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
| 212 | static int | 219 | static int |
| 213 | intel_emit_post_sync_nonzero_flush(struct i915_request *rq) | 220 | intel_emit_post_sync_nonzero_flush(struct i915_request *rq) |
| 214 | { | 221 | { |
| 215 | u32 scratch_addr = | 222 | u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; |
| 216 | i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; | ||
| 217 | u32 *cs; | 223 | u32 *cs; |
| 218 | 224 | ||
| 219 | cs = intel_ring_begin(rq, 6); | 225 | cs = intel_ring_begin(rq, 6); |
| @@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq) | |||
| 246 | static int | 252 | static int |
| 247 | gen6_render_ring_flush(struct i915_request *rq, u32 mode) | 253 | gen6_render_ring_flush(struct i915_request *rq, u32 mode) |
| 248 | { | 254 | { |
| 249 | u32 scratch_addr = | 255 | u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; |
| 250 | i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; | ||
| 251 | u32 *cs, flags = 0; | 256 | u32 *cs, flags = 0; |
| 252 | int ret; | 257 | int ret; |
| 253 | 258 | ||
| @@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq) | |||
| 316 | static int | 321 | static int |
| 317 | gen7_render_ring_flush(struct i915_request *rq, u32 mode) | 322 | gen7_render_ring_flush(struct i915_request *rq, u32 mode) |
| 318 | { | 323 | { |
| 319 | u32 scratch_addr = | 324 | u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; |
| 320 | i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; | ||
| 321 | u32 *cs, flags = 0; | 325 | u32 *cs, flags = 0; |
| 322 | 326 | ||
| 323 | /* | 327 | /* |
| @@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq, | |||
| 971 | } | 975 | } |
| 972 | 976 | ||
| 973 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | 977 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
| 974 | #define I830_BATCH_LIMIT (256*1024) | 978 | #define I830_BATCH_LIMIT SZ_256K |
| 975 | #define I830_TLB_ENTRIES (2) | 979 | #define I830_TLB_ENTRIES (2) |
| 976 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) | 980 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) |
| 977 | static int | 981 | static int |
| @@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq, | |||
| 979 | u64 offset, u32 len, | 983 | u64 offset, u32 len, |
| 980 | unsigned int dispatch_flags) | 984 | unsigned int dispatch_flags) |
| 981 | { | 985 | { |
| 982 | u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch); | 986 | u32 *cs, cs_offset = i915_scratch_offset(rq->i915); |
| 987 | |||
| 988 | GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); | ||
| 983 | 989 | ||
| 984 | cs = intel_ring_begin(rq, 6); | 990 | cs = intel_ring_begin(rq, 6); |
| 985 | if (IS_ERR(cs)) | 991 | if (IS_ERR(cs)) |
| @@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) | |||
| 1437 | { | 1443 | { |
| 1438 | struct i915_timeline *timeline; | 1444 | struct i915_timeline *timeline; |
| 1439 | struct intel_ring *ring; | 1445 | struct intel_ring *ring; |
| 1440 | unsigned int size; | ||
| 1441 | int err; | 1446 | int err; |
| 1442 | 1447 | ||
| 1443 | intel_engine_setup_common(engine); | 1448 | intel_engine_setup_common(engine); |
| @@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) | |||
| 1462 | GEM_BUG_ON(engine->buffer); | 1467 | GEM_BUG_ON(engine->buffer); |
| 1463 | engine->buffer = ring; | 1468 | engine->buffer = ring; |
| 1464 | 1469 | ||
| 1465 | size = PAGE_SIZE; | ||
| 1466 | if (HAS_BROKEN_CS_TLB(engine->i915)) | ||
| 1467 | size = I830_WA_SIZE; | ||
| 1468 | err = intel_engine_create_scratch(engine, size); | ||
| 1469 | if (err) | ||
| 1470 | goto err_unpin; | ||
| 1471 | |||
| 1472 | err = intel_engine_init_common(engine); | 1470 | err = intel_engine_init_common(engine); |
| 1473 | if (err) | 1471 | if (err) |
| 1474 | goto err_scratch; | 1472 | goto err_unpin; |
| 1475 | 1473 | ||
| 1476 | return 0; | 1474 | return 0; |
| 1477 | 1475 | ||
| 1478 | err_scratch: | ||
| 1479 | intel_engine_cleanup_scratch(engine); | ||
| 1480 | err_unpin: | 1476 | err_unpin: |
| 1481 | intel_ring_unpin(ring); | 1477 | intel_ring_unpin(ring); |
| 1482 | err_ring: | 1478 | err_ring: |
| @@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq) | |||
| 1550 | /* Stall until the page table load is complete */ | 1546 | /* Stall until the page table load is complete */ |
| 1551 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; | 1547 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
| 1552 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); | 1548 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); |
| 1553 | *cs++ = i915_ggtt_offset(engine->scratch); | 1549 | *cs++ = i915_scratch_offset(rq->i915); |
| 1554 | *cs++ = MI_NOOP; | 1550 | *cs++ = MI_NOOP; |
| 1555 | 1551 | ||
| 1556 | intel_ring_advance(rq, cs); | 1552 | intel_ring_advance(rq, cs); |
| @@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) | |||
| 1659 | /* Insert a delay before the next switch! */ | 1655 | /* Insert a delay before the next switch! */ |
| 1660 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; | 1656 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
| 1661 | *cs++ = i915_mmio_reg_offset(last_reg); | 1657 | *cs++ = i915_mmio_reg_offset(last_reg); |
| 1662 | *cs++ = i915_ggtt_offset(engine->scratch); | 1658 | *cs++ = i915_scratch_offset(rq->i915); |
| 1663 | *cs++ = MI_NOOP; | 1659 | *cs++ = MI_NOOP; |
| 1664 | } | 1660 | } |
| 1665 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; | 1661 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2dfa585712c2..767a7192c969 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "i915_selftest.h" | 15 | #include "i915_selftest.h" |
| 16 | #include "i915_timeline.h" | 16 | #include "i915_timeline.h" |
| 17 | #include "intel_gpu_commands.h" | 17 | #include "intel_gpu_commands.h" |
| 18 | #include "intel_workarounds.h" | ||
| 18 | 19 | ||
| 19 | struct drm_printer; | 20 | struct drm_printer; |
| 20 | struct i915_sched_attr; | 21 | struct i915_sched_attr; |
| @@ -440,7 +441,7 @@ struct intel_engine_cs { | |||
| 440 | 441 | ||
| 441 | struct intel_hw_status_page status_page; | 442 | struct intel_hw_status_page status_page; |
| 442 | struct i915_ctx_workarounds wa_ctx; | 443 | struct i915_ctx_workarounds wa_ctx; |
| 443 | struct i915_vma *scratch; | 444 | struct i915_wa_list wa_list; |
| 444 | 445 | ||
| 445 | u32 irq_keep_mask; /* always keep these interrupts */ | 446 | u32 irq_keep_mask; /* always keep these interrupts */ |
| 446 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | 447 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
| @@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine); | |||
| 898 | int intel_engine_init_common(struct intel_engine_cs *engine); | 899 | int intel_engine_init_common(struct intel_engine_cs *engine); |
| 899 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); | 900 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); |
| 900 | 901 | ||
| 901 | int intel_engine_create_scratch(struct intel_engine_cs *engine, | ||
| 902 | unsigned int size); | ||
| 903 | void intel_engine_cleanup_scratch(struct intel_engine_cs *engine); | ||
| 904 | |||
| 905 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); | 902 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); |
| 906 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); | 903 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); |
| 907 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); | 904 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); |
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 4bcdeaf8d98f..6e580891db96 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c | |||
| @@ -48,6 +48,20 @@ | |||
| 48 | * - Public functions to init or apply the given workaround type. | 48 | * - Public functions to init or apply the given workaround type. |
| 49 | */ | 49 | */ |
| 50 | 50 | ||
| 51 | static void wa_init_start(struct i915_wa_list *wal, const char *name) | ||
| 52 | { | ||
| 53 | wal->name = name; | ||
| 54 | } | ||
| 55 | |||
| 56 | static void wa_init_finish(struct i915_wa_list *wal) | ||
| 57 | { | ||
| 58 | if (!wal->count) | ||
| 59 | return; | ||
| 60 | |||
| 61 | DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", | ||
| 62 | wal->count, wal->name); | ||
| 63 | } | ||
| 64 | |||
| 51 | static void wa_add(struct drm_i915_private *i915, | 65 | static void wa_add(struct drm_i915_private *i915, |
| 52 | i915_reg_t reg, const u32 mask, const u32 val) | 66 | i915_reg_t reg, const u32 mask, const u32 val) |
| 53 | { | 67 | { |
| @@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq) | |||
| 580 | return 0; | 594 | return 0; |
| 581 | } | 595 | } |
| 582 | 596 | ||
| 583 | static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 597 | static void |
| 598 | wal_add(struct i915_wa_list *wal, const struct i915_wa *wa) | ||
| 599 | { | ||
| 600 | const unsigned int grow = 1 << 4; | ||
| 601 | |||
| 602 | GEM_BUG_ON(!is_power_of_2(grow)); | ||
| 603 | |||
| 604 | if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ | ||
| 605 | struct i915_wa *list; | ||
| 606 | |||
| 607 | list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), | ||
| 608 | GFP_KERNEL); | ||
| 609 | if (!list) { | ||
| 610 | DRM_ERROR("No space for workaround init!\n"); | ||
| 611 | return; | ||
| 612 | } | ||
| 613 | |||
| 614 | if (wal->list) | ||
| 615 | memcpy(list, wal->list, sizeof(*wa) * wal->count); | ||
| 616 | |||
| 617 | wal->list = list; | ||
| 618 | } | ||
| 619 | |||
| 620 | wal->list[wal->count++] = *wa; | ||
| 621 | } | ||
| 622 | |||
| 623 | static void | ||
| 624 | wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | ||
| 625 | { | ||
| 626 | struct i915_wa wa = { | ||
| 627 | .reg = reg, | ||
| 628 | .mask = val, | ||
| 629 | .val = _MASKED_BIT_ENABLE(val) | ||
| 630 | }; | ||
| 631 | |||
| 632 | wal_add(wal, &wa); | ||
| 633 | } | ||
| 634 | |||
| 635 | static void | ||
| 636 | wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, | ||
| 637 | u32 val) | ||
| 584 | { | 638 | { |
| 639 | struct i915_wa wa = { | ||
| 640 | .reg = reg, | ||
| 641 | .mask = mask, | ||
| 642 | .val = val | ||
| 643 | }; | ||
| 644 | |||
| 645 | wal_add(wal, &wa); | ||
| 585 | } | 646 | } |
| 586 | 647 | ||
| 587 | static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 648 | static void |
| 649 | wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | ||
| 588 | { | 650 | { |
| 651 | wa_write_masked_or(wal, reg, ~0, val); | ||
| 589 | } | 652 | } |
| 590 | 653 | ||
| 591 | static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 654 | static void |
| 655 | wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | ||
| 592 | { | 656 | { |
| 593 | /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ | 657 | wa_write_masked_or(wal, reg, val, val); |
| 594 | I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, | 658 | } |
| 595 | _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); | ||
| 596 | 659 | ||
| 597 | /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ | 660 | static void gen9_gt_workarounds_init(struct drm_i915_private *i915) |
| 598 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | 661 | { |
| 599 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | 662 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 600 | 663 | ||
| 601 | /* WaDisableKillLogic:bxt,skl,kbl */ | 664 | /* WaDisableKillLogic:bxt,skl,kbl */ |
| 602 | if (!IS_COFFEELAKE(dev_priv)) | 665 | if (!IS_COFFEELAKE(i915)) |
| 603 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | 666 | wa_write_or(wal, |
| 604 | ECOCHK_DIS_TLB); | 667 | GAM_ECOCHK, |
| 668 | ECOCHK_DIS_TLB); | ||
| 605 | 669 | ||
| 606 | if (HAS_LLC(dev_priv)) { | 670 | if (HAS_LLC(i915)) { |
| 607 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl | 671 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl |
| 608 | * | 672 | * |
| 609 | * Must match Display Engine. See | 673 | * Must match Display Engine. See |
| 610 | * WaCompressedResourceDisplayNewHashMode. | 674 | * WaCompressedResourceDisplayNewHashMode. |
| 611 | */ | 675 | */ |
| 612 | I915_WRITE(MMCD_MISC_CTRL, | 676 | wa_write_or(wal, |
| 613 | I915_READ(MMCD_MISC_CTRL) | | 677 | MMCD_MISC_CTRL, |
| 614 | MMCD_PCLA | | 678 | MMCD_PCLA | MMCD_HOTSPOT_EN); |
| 615 | MMCD_HOTSPOT_EN); | ||
| 616 | } | 679 | } |
| 617 | 680 | ||
| 618 | /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ | 681 | /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ |
| 619 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | 682 | wa_write_or(wal, |
| 620 | BDW_DISABLE_HDC_INVALIDATION); | 683 | GAM_ECOCHK, |
| 621 | 684 | BDW_DISABLE_HDC_INVALIDATION); | |
| 622 | /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ | ||
| 623 | if (IS_GEN9_LP(dev_priv)) { | ||
| 624 | u32 val = I915_READ(GEN8_L3SQCREG1); | ||
| 625 | |||
| 626 | val &= ~L3_PRIO_CREDITS_MASK; | ||
| 627 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | ||
| 628 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
| 629 | } | ||
| 630 | |||
| 631 | /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ | ||
| 632 | I915_WRITE(GEN8_L3SQCREG4, | ||
| 633 | I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
| 634 | |||
| 635 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ | ||
| 636 | I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, | ||
| 637 | _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); | ||
| 638 | } | 685 | } |
| 639 | 686 | ||
| 640 | static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 687 | static void skl_gt_workarounds_init(struct drm_i915_private *i915) |
| 641 | { | 688 | { |
| 642 | gen9_gt_workarounds_apply(dev_priv); | 689 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 643 | 690 | ||
| 644 | /* WaEnableGapsTsvCreditFix:skl */ | 691 | gen9_gt_workarounds_init(i915); |
| 645 | I915_WRITE(GEN8_GARBCNTL, | ||
| 646 | I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
| 647 | 692 | ||
| 648 | /* WaDisableGafsUnitClkGating:skl */ | 693 | /* WaDisableGafsUnitClkGating:skl */ |
| 649 | I915_WRITE(GEN7_UCGCTL4, | 694 | wa_write_or(wal, |
| 650 | I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 695 | GEN7_UCGCTL4, |
| 696 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
| 651 | 697 | ||
| 652 | /* WaInPlaceDecompressionHang:skl */ | 698 | /* WaInPlaceDecompressionHang:skl */ |
| 653 | if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) | 699 | if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) |
| 654 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 700 | wa_write_or(wal, |
| 655 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 701 | GEN9_GAMT_ECO_REG_RW_IA, |
| 656 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 702 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
| 657 | } | 703 | } |
| 658 | 704 | ||
| 659 | static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 705 | static void bxt_gt_workarounds_init(struct drm_i915_private *i915) |
| 660 | { | 706 | { |
| 661 | gen9_gt_workarounds_apply(dev_priv); | 707 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 662 | 708 | ||
| 663 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | 709 | gen9_gt_workarounds_init(i915); |
| 664 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | ||
| 665 | _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); | ||
| 666 | 710 | ||
| 667 | /* WaInPlaceDecompressionHang:bxt */ | 711 | /* WaInPlaceDecompressionHang:bxt */ |
| 668 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 712 | wa_write_or(wal, |
| 669 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 713 | GEN9_GAMT_ECO_REG_RW_IA, |
| 670 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 714 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
| 671 | } | 715 | } |
| 672 | 716 | ||
| 673 | static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 717 | static void kbl_gt_workarounds_init(struct drm_i915_private *i915) |
| 674 | { | 718 | { |
| 675 | gen9_gt_workarounds_apply(dev_priv); | 719 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 676 | 720 | ||
| 677 | /* WaEnableGapsTsvCreditFix:kbl */ | 721 | gen9_gt_workarounds_init(i915); |
| 678 | I915_WRITE(GEN8_GARBCNTL, | ||
| 679 | I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
| 680 | 722 | ||
| 681 | /* WaDisableDynamicCreditSharing:kbl */ | 723 | /* WaDisableDynamicCreditSharing:kbl */ |
| 682 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | 724 | if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) |
| 683 | I915_WRITE(GAMT_CHKN_BIT_REG, | 725 | wa_write_or(wal, |
| 684 | I915_READ(GAMT_CHKN_BIT_REG) | | 726 | GAMT_CHKN_BIT_REG, |
| 685 | GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); | 727 | GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); |
| 686 | 728 | ||
| 687 | /* WaDisableGafsUnitClkGating:kbl */ | 729 | /* WaDisableGafsUnitClkGating:kbl */ |
| 688 | I915_WRITE(GEN7_UCGCTL4, | 730 | wa_write_or(wal, |
| 689 | I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 731 | GEN7_UCGCTL4, |
| 732 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
| 690 | 733 | ||
| 691 | /* WaInPlaceDecompressionHang:kbl */ | 734 | /* WaInPlaceDecompressionHang:kbl */ |
| 692 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 735 | wa_write_or(wal, |
| 693 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 736 | GEN9_GAMT_ECO_REG_RW_IA, |
| 694 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 737 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
| 695 | |||
| 696 | /* WaKBLVECSSemaphoreWaitPoll:kbl */ | ||
| 697 | if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { | ||
| 698 | struct intel_engine_cs *engine; | ||
| 699 | unsigned int tmp; | ||
| 700 | |||
| 701 | for_each_engine(engine, dev_priv, tmp) { | ||
| 702 | if (engine->id == RCS) | ||
| 703 | continue; | ||
| 704 | |||
| 705 | I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); | ||
| 706 | } | ||
| 707 | } | ||
| 708 | } | 738 | } |
| 709 | 739 | ||
| 710 | static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 740 | static void glk_gt_workarounds_init(struct drm_i915_private *i915) |
| 711 | { | 741 | { |
| 712 | gen9_gt_workarounds_apply(dev_priv); | 742 | gen9_gt_workarounds_init(i915); |
| 713 | } | 743 | } |
| 714 | 744 | ||
| 715 | static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 745 | static void cfl_gt_workarounds_init(struct drm_i915_private *i915) |
| 716 | { | 746 | { |
| 717 | gen9_gt_workarounds_apply(dev_priv); | 747 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 718 | 748 | ||
| 719 | /* WaEnableGapsTsvCreditFix:cfl */ | 749 | gen9_gt_workarounds_init(i915); |
| 720 | I915_WRITE(GEN8_GARBCNTL, | ||
| 721 | I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
| 722 | 750 | ||
| 723 | /* WaDisableGafsUnitClkGating:cfl */ | 751 | /* WaDisableGafsUnitClkGating:cfl */ |
| 724 | I915_WRITE(GEN7_UCGCTL4, | 752 | wa_write_or(wal, |
| 725 | I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 753 | GEN7_UCGCTL4, |
| 754 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
| 726 | 755 | ||
| 727 | /* WaInPlaceDecompressionHang:cfl */ | 756 | /* WaInPlaceDecompressionHang:cfl */ |
| 728 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 757 | wa_write_or(wal, |
| 729 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 758 | GEN9_GAMT_ECO_REG_RW_IA, |
| 730 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 759 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
| 731 | } | 760 | } |
| 732 | 761 | ||
| 733 | static void wa_init_mcr(struct drm_i915_private *dev_priv) | 762 | static void wa_init_mcr(struct drm_i915_private *dev_priv) |
| 734 | { | 763 | { |
| 735 | const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); | 764 | const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); |
| 736 | u32 mcr; | 765 | struct i915_wa_list *wal = &dev_priv->gt_wa_list; |
| 737 | u32 mcr_slice_subslice_mask; | 766 | u32 mcr_slice_subslice_mask; |
| 738 | 767 | ||
| 739 | /* | 768 | /* |
| @@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) | |||
| 770 | WARN_ON((enabled_mask & disabled_mask) != enabled_mask); | 799 | WARN_ON((enabled_mask & disabled_mask) != enabled_mask); |
| 771 | } | 800 | } |
| 772 | 801 | ||
| 773 | mcr = I915_READ(GEN8_MCR_SELECTOR); | ||
| 774 | |||
| 775 | if (INTEL_GEN(dev_priv) >= 11) | 802 | if (INTEL_GEN(dev_priv) >= 11) |
| 776 | mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | | 803 | mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | |
| 777 | GEN11_MCR_SUBSLICE_MASK; | 804 | GEN11_MCR_SUBSLICE_MASK; |
| @@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) | |||
| 789 | * occasions, such as INSTDONE, where this value is dependent | 816 | * occasions, such as INSTDONE, where this value is dependent |
| 790 | * on s/ss combo, the read should be done with read_subslice_reg. | 817 | * on s/ss combo, the read should be done with read_subslice_reg. |
| 791 | */ | 818 | */ |
| 792 | mcr &= ~mcr_slice_subslice_mask; | 819 | wa_write_masked_or(wal, |
| 793 | mcr |= intel_calculate_mcr_s_ss_select(dev_priv); | 820 | GEN8_MCR_SELECTOR, |
| 794 | I915_WRITE(GEN8_MCR_SELECTOR, mcr); | 821 | mcr_slice_subslice_mask, |
| 822 | intel_calculate_mcr_s_ss_select(dev_priv)); | ||
| 795 | } | 823 | } |
| 796 | 824 | ||
| 797 | static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 825 | static void cnl_gt_workarounds_init(struct drm_i915_private *i915) |
| 798 | { | 826 | { |
| 799 | wa_init_mcr(dev_priv); | 827 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 828 | |||
| 829 | wa_init_mcr(i915); | ||
| 800 | 830 | ||
| 801 | /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ | 831 | /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ |
| 802 | if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) | 832 | if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) |
| 803 | I915_WRITE(GAMT_CHKN_BIT_REG, | 833 | wa_write_or(wal, |
| 804 | I915_READ(GAMT_CHKN_BIT_REG) | | 834 | GAMT_CHKN_BIT_REG, |
| 805 | GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); | 835 | GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); |
| 806 | 836 | ||
| 807 | /* WaInPlaceDecompressionHang:cnl */ | 837 | /* WaInPlaceDecompressionHang:cnl */ |
| 808 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 838 | wa_write_or(wal, |
| 809 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 839 | GEN9_GAMT_ECO_REG_RW_IA, |
| 810 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 840 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
| 811 | |||
| 812 | /* WaEnablePreemptionGranularityControlByUMD:cnl */ | ||
| 813 | I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, | ||
| 814 | _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); | ||
| 815 | } | 841 | } |
| 816 | 842 | ||
| 817 | static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 843 | static void icl_gt_workarounds_init(struct drm_i915_private *i915) |
| 818 | { | 844 | { |
| 819 | wa_init_mcr(dev_priv); | 845 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 820 | 846 | ||
| 821 | /* This is not an Wa. Enable for better image quality */ | 847 | wa_init_mcr(i915); |
| 822 | I915_WRITE(_3D_CHICKEN3, | ||
| 823 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); | ||
| 824 | 848 | ||
| 825 | /* WaInPlaceDecompressionHang:icl */ | 849 | /* WaInPlaceDecompressionHang:icl */ |
| 826 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 850 | wa_write_or(wal, |
| 827 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 851 | GEN9_GAMT_ECO_REG_RW_IA, |
| 828 | 852 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | |
| 829 | /* WaPipelineFlushCoherentLines:icl */ | ||
| 830 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
| 831 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
| 832 | |||
| 833 | /* Wa_1405543622:icl | ||
| 834 | * Formerly known as WaGAPZPriorityScheme | ||
| 835 | */ | ||
| 836 | I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | | ||
| 837 | GEN11_ARBITRATION_PRIO_ORDER_MASK); | ||
| 838 | |||
| 839 | /* Wa_1604223664:icl | ||
| 840 | * Formerly known as WaL3BankAddressHashing | ||
| 841 | */ | ||
| 842 | I915_WRITE(GEN8_GARBCNTL, | ||
| 843 | (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) | | ||
| 844 | GEN11_HASH_CTRL_EXCL_BIT0); | ||
| 845 | I915_WRITE(GEN11_GLBLINVL, | ||
| 846 | (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) | | ||
| 847 | GEN11_BANK_HASH_ADDR_EXCL_BIT0); | ||
| 848 | 853 | ||
| 849 | /* WaModifyGamTlbPartitioning:icl */ | 854 | /* WaModifyGamTlbPartitioning:icl */ |
| 850 | I915_WRITE(GEN11_GACB_PERF_CTRL, | 855 | wa_write_masked_or(wal, |
| 851 | (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | | 856 | GEN11_GACB_PERF_CTRL, |
| 852 | GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); | 857 | GEN11_HASH_CTRL_MASK, |
| 853 | 858 | GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); | |
| 854 | /* Wa_1405733216:icl | ||
| 855 | * Formerly known as WaDisableCleanEvicts | ||
| 856 | */ | ||
| 857 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
| 858 | GEN11_LQSC_CLEAN_EVICT_DISABLE); | ||
| 859 | 859 | ||
| 860 | /* Wa_1405766107:icl | 860 | /* Wa_1405766107:icl |
| 861 | * Formerly known as WaCL2SFHalfMaxAlloc | 861 | * Formerly known as WaCL2SFHalfMaxAlloc |
| 862 | */ | 862 | */ |
| 863 | I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | | 863 | wa_write_or(wal, |
| 864 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | | 864 | GEN11_LSN_UNSLCVC, |
| 865 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); | 865 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | |
| 866 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); | ||
| 866 | 867 | ||
| 867 | /* Wa_220166154:icl | 868 | /* Wa_220166154:icl |
| 868 | * Formerly known as WaDisCtxReload | 869 | * Formerly known as WaDisCtxReload |
| 869 | */ | 870 | */ |
| 870 | I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | | 871 | wa_write_or(wal, |
| 871 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE); | 872 | GEN8_GAMW_ECO_DEV_RW_IA, |
| 873 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE); | ||
| 872 | 874 | ||
| 873 | /* Wa_1405779004:icl (pre-prod) */ | 875 | /* Wa_1405779004:icl (pre-prod) */ |
| 874 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) | 876 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) |
| 875 | I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, | 877 | wa_write_or(wal, |
| 876 | I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | | 878 | SLICE_UNIT_LEVEL_CLKGATE, |
| 877 | MSCUNIT_CLKGATE_DIS); | 879 | MSCUNIT_CLKGATE_DIS); |
| 878 | 880 | ||
| 879 | /* Wa_1406680159:icl */ | 881 | /* Wa_1406680159:icl */ |
| 880 | I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, | 882 | wa_write_or(wal, |
| 881 | I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | | 883 | SUBSLICE_UNIT_LEVEL_CLKGATE, |
| 882 | GWUNIT_CLKGATE_DIS); | 884 | GWUNIT_CLKGATE_DIS); |
| 883 | |||
| 884 | /* Wa_1604302699:icl */ | ||
| 885 | I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER, | ||
| 886 | I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) | | ||
| 887 | GEN11_I2M_WRITE_DISABLE); | ||
| 888 | 885 | ||
| 889 | /* Wa_1406838659:icl (pre-prod) */ | 886 | /* Wa_1406838659:icl (pre-prod) */ |
| 890 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) | 887 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) |
| 891 | I915_WRITE(INF_UNIT_LEVEL_CLKGATE, | 888 | wa_write_or(wal, |
| 892 | I915_READ(INF_UNIT_LEVEL_CLKGATE) | | 889 | INF_UNIT_LEVEL_CLKGATE, |
| 893 | CGPSF_CLKGATE_DIS); | 890 | CGPSF_CLKGATE_DIS); |
| 894 | |||
| 895 | /* WaForwardProgressSoftReset:icl */ | ||
| 896 | I915_WRITE(GEN10_SCRATCH_LNCF2, | ||
| 897 | I915_READ(GEN10_SCRATCH_LNCF2) | | ||
| 898 | PMFLUSHDONE_LNICRSDROP | | ||
| 899 | PMFLUSH_GAPL3UNBLOCK | | ||
| 900 | PMFLUSHDONE_LNEBLK); | ||
| 901 | 891 | ||
| 902 | /* Wa_1406463099:icl | 892 | /* Wa_1406463099:icl |
| 903 | * Formerly known as WaGamTlbPendError | 893 | * Formerly known as WaGamTlbPendError |
| 904 | */ | 894 | */ |
| 905 | I915_WRITE(GAMT_CHKN_BIT_REG, | 895 | wa_write_or(wal, |
| 906 | I915_READ(GAMT_CHKN_BIT_REG) | | 896 | GAMT_CHKN_BIT_REG, |
| 907 | GAMT_CHKN_DISABLE_L3_COH_PIPE); | 897 | GAMT_CHKN_DISABLE_L3_COH_PIPE); |
| 908 | } | 898 | } |
| 909 | 899 | ||
| 910 | void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 900 | void intel_gt_init_workarounds(struct drm_i915_private *i915) |
| 911 | { | 901 | { |
| 912 | if (INTEL_GEN(dev_priv) < 8) | 902 | struct i915_wa_list *wal = &i915->gt_wa_list; |
| 903 | |||
| 904 | wa_init_start(wal, "GT"); | ||
| 905 | |||
| 906 | if (INTEL_GEN(i915) < 8) | ||
| 913 | return; | 907 | return; |
| 914 | else if (IS_BROADWELL(dev_priv)) | 908 | else if (IS_BROADWELL(i915)) |
| 915 | bdw_gt_workarounds_apply(dev_priv); | 909 | return; |
| 916 | else if (IS_CHERRYVIEW(dev_priv)) | 910 | else if (IS_CHERRYVIEW(i915)) |
| 917 | chv_gt_workarounds_apply(dev_priv); | 911 | return; |
| 918 | else if (IS_SKYLAKE(dev_priv)) | 912 | else if (IS_SKYLAKE(i915)) |
| 919 | skl_gt_workarounds_apply(dev_priv); | 913 | skl_gt_workarounds_init(i915); |
| 920 | else if (IS_BROXTON(dev_priv)) | 914 | else if (IS_BROXTON(i915)) |
| 921 | bxt_gt_workarounds_apply(dev_priv); | 915 | bxt_gt_workarounds_init(i915); |
| 922 | else if (IS_KABYLAKE(dev_priv)) | 916 | else if (IS_KABYLAKE(i915)) |
| 923 | kbl_gt_workarounds_apply(dev_priv); | 917 | kbl_gt_workarounds_init(i915); |
| 924 | else if (IS_GEMINILAKE(dev_priv)) | 918 | else if (IS_GEMINILAKE(i915)) |
| 925 | glk_gt_workarounds_apply(dev_priv); | 919 | glk_gt_workarounds_init(i915); |
| 926 | else if (IS_COFFEELAKE(dev_priv)) | 920 | else if (IS_COFFEELAKE(i915)) |
| 927 | cfl_gt_workarounds_apply(dev_priv); | 921 | cfl_gt_workarounds_init(i915); |
| 928 | else if (IS_CANNONLAKE(dev_priv)) | 922 | else if (IS_CANNONLAKE(i915)) |
| 929 | cnl_gt_workarounds_apply(dev_priv); | 923 | cnl_gt_workarounds_init(i915); |
| 930 | else if (IS_ICELAKE(dev_priv)) | 924 | else if (IS_ICELAKE(i915)) |
| 931 | icl_gt_workarounds_apply(dev_priv); | 925 | icl_gt_workarounds_init(i915); |
| 932 | else | 926 | else |
| 933 | MISSING_CASE(INTEL_GEN(dev_priv)); | 927 | MISSING_CASE(INTEL_GEN(i915)); |
| 928 | |||
| 929 | wa_init_finish(wal); | ||
| 930 | } | ||
| 931 | |||
| 932 | static enum forcewake_domains | ||
| 933 | wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, | ||
| 934 | const struct i915_wa_list *wal) | ||
| 935 | { | ||
| 936 | enum forcewake_domains fw = 0; | ||
| 937 | struct i915_wa *wa; | ||
| 938 | unsigned int i; | ||
| 939 | |||
| 940 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) | ||
| 941 | fw |= intel_uncore_forcewake_for_reg(dev_priv, | ||
| 942 | wa->reg, | ||
| 943 | FW_REG_READ | | ||
| 944 | FW_REG_WRITE); | ||
| 945 | |||
| 946 | return fw; | ||
| 947 | } | ||
| 948 | |||
| 949 | static void | ||
| 950 | wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) | ||
| 951 | { | ||
| 952 | enum forcewake_domains fw; | ||
| 953 | unsigned long flags; | ||
| 954 | struct i915_wa *wa; | ||
| 955 | unsigned int i; | ||
| 956 | |||
| 957 | if (!wal->count) | ||
| 958 | return; | ||
| 959 | |||
| 960 | fw = wal_get_fw_for_rmw(dev_priv, wal); | ||
| 961 | |||
| 962 | spin_lock_irqsave(&dev_priv->uncore.lock, flags); | ||
| 963 | intel_uncore_forcewake_get__locked(dev_priv, fw); | ||
| 964 | |||
| 965 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | ||
| 966 | u32 val = I915_READ_FW(wa->reg); | ||
| 967 | |||
| 968 | val &= ~wa->mask; | ||
| 969 | val |= wa->val; | ||
| 970 | |||
| 971 | I915_WRITE_FW(wa->reg, val); | ||
| 972 | } | ||
| 973 | |||
| 974 | intel_uncore_forcewake_put__locked(dev_priv, fw); | ||
| 975 | spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); | ||
| 976 | |||
| 977 | DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); | ||
| 978 | } | ||
| 979 | |||
| 980 | void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) | ||
| 981 | { | ||
| 982 | wa_list_apply(dev_priv, &dev_priv->gt_wa_list); | ||
| 934 | } | 983 | } |
| 935 | 984 | ||
| 936 | struct whitelist { | 985 | struct whitelist { |
| @@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine) | |||
| 1077 | whitelist_apply(engine, whitelist_build(engine, &w)); | 1126 | whitelist_apply(engine, whitelist_build(engine, &w)); |
| 1078 | } | 1127 | } |
| 1079 | 1128 | ||
| 1129 | static void rcs_engine_wa_init(struct intel_engine_cs *engine) | ||
| 1130 | { | ||
| 1131 | struct drm_i915_private *i915 = engine->i915; | ||
| 1132 | struct i915_wa_list *wal = &engine->wa_list; | ||
| 1133 | |||
| 1134 | if (IS_ICELAKE(i915)) { | ||
| 1135 | /* This is not an Wa. Enable for better image quality */ | ||
| 1136 | wa_masked_en(wal, | ||
| 1137 | _3D_CHICKEN3, | ||
| 1138 | _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); | ||
| 1139 | |||
| 1140 | /* WaPipelineFlushCoherentLines:icl */ | ||
| 1141 | wa_write_or(wal, | ||
| 1142 | GEN8_L3SQCREG4, | ||
| 1143 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
| 1144 | |||
| 1145 | /* | ||
| 1146 | * Wa_1405543622:icl | ||
| 1147 | * Formerly known as WaGAPZPriorityScheme | ||
| 1148 | */ | ||
| 1149 | wa_write_or(wal, | ||
| 1150 | GEN8_GARBCNTL, | ||
| 1151 | GEN11_ARBITRATION_PRIO_ORDER_MASK); | ||
| 1152 | |||
| 1153 | /* | ||
| 1154 | * Wa_1604223664:icl | ||
| 1155 | * Formerly known as WaL3BankAddressHashing | ||
| 1156 | */ | ||
| 1157 | wa_write_masked_or(wal, | ||
| 1158 | GEN8_GARBCNTL, | ||
| 1159 | GEN11_HASH_CTRL_EXCL_MASK, | ||
| 1160 | GEN11_HASH_CTRL_EXCL_BIT0); | ||
| 1161 | wa_write_masked_or(wal, | ||
| 1162 | GEN11_GLBLINVL, | ||
| 1163 | GEN11_BANK_HASH_ADDR_EXCL_MASK, | ||
| 1164 | GEN11_BANK_HASH_ADDR_EXCL_BIT0); | ||
| 1165 | |||
| 1166 | /* | ||
| 1167 | * Wa_1405733216:icl | ||
| 1168 | * Formerly known as WaDisableCleanEvicts | ||
| 1169 | */ | ||
| 1170 | wa_write_or(wal, | ||
| 1171 | GEN8_L3SQCREG4, | ||
| 1172 | GEN11_LQSC_CLEAN_EVICT_DISABLE); | ||
| 1173 | |||
| 1174 | /* Wa_1604302699:icl */ | ||
| 1175 | wa_write_or(wal, | ||
| 1176 | GEN10_L3_CHICKEN_MODE_REGISTER, | ||
| 1177 | GEN11_I2M_WRITE_DISABLE); | ||
| 1178 | |||
| 1179 | /* WaForwardProgressSoftReset:icl */ | ||
| 1180 | wa_write_or(wal, | ||
| 1181 | GEN10_SCRATCH_LNCF2, | ||
| 1182 | PMFLUSHDONE_LNICRSDROP | | ||
| 1183 | PMFLUSH_GAPL3UNBLOCK | | ||
| 1184 | PMFLUSHDONE_LNEBLK); | ||
| 1185 | } | ||
| 1186 | |||
| 1187 | if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) { | ||
| 1188 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */ | ||
| 1189 | wa_masked_en(wal, | ||
| 1190 | GEN7_FF_SLICE_CS_CHICKEN1, | ||
| 1191 | GEN9_FFSC_PERCTX_PREEMPT_CTRL); | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { | ||
| 1195 | /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ | ||
| 1196 | wa_write_or(wal, | ||
| 1197 | GEN8_GARBCNTL, | ||
| 1198 | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
| 1199 | } | ||
| 1200 | |||
| 1201 | if (IS_BROXTON(i915)) { | ||
| 1202 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | ||
| 1203 | wa_masked_en(wal, | ||
| 1204 | FF_SLICE_CS_CHICKEN2, | ||
| 1205 | GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | if (IS_GEN9(i915)) { | ||
| 1209 | /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ | ||
| 1210 | wa_masked_en(wal, | ||
| 1211 | GEN9_CSFE_CHICKEN1_RCS, | ||
| 1212 | GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); | ||
| 1213 | |||
| 1214 | /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ | ||
| 1215 | wa_write_or(wal, | ||
| 1216 | BDW_SCRATCH1, | ||
| 1217 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | ||
| 1218 | |||
| 1219 | /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ | ||
| 1220 | if (IS_GEN9_LP(i915)) | ||
| 1221 | wa_write_masked_or(wal, | ||
| 1222 | GEN8_L3SQCREG1, | ||
| 1223 | L3_PRIO_CREDITS_MASK, | ||
| 1224 | L3_GENERAL_PRIO_CREDITS(62) | | ||
| 1225 | L3_HIGH_PRIO_CREDITS(2)); | ||
| 1226 | |||
| 1227 | /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ | ||
| 1228 | wa_write_or(wal, | ||
| 1229 | GEN8_L3SQCREG4, | ||
| 1230 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
| 1231 | } | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | static void xcs_engine_wa_init(struct intel_engine_cs *engine) | ||
| 1235 | { | ||
| 1236 | struct drm_i915_private *i915 = engine->i915; | ||
| 1237 | struct i915_wa_list *wal = &engine->wa_list; | ||
| 1238 | |||
| 1239 | /* WaKBLVECSSemaphoreWaitPoll:kbl */ | ||
| 1240 | if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { | ||
| 1241 | wa_write(wal, | ||
| 1242 | RING_SEMA_WAIT_POLL(engine->mmio_base), | ||
| 1243 | 1); | ||
| 1244 | } | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | void intel_engine_init_workarounds(struct intel_engine_cs *engine) | ||
| 1248 | { | ||
| 1249 | struct i915_wa_list *wal = &engine->wa_list; | ||
| 1250 | |||
| 1251 | if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8)) | ||
| 1252 | return; | ||
| 1253 | |||
| 1254 | wa_init_start(wal, engine->name); | ||
| 1255 | |||
| 1256 | if (engine->id == RCS) | ||
| 1257 | rcs_engine_wa_init(engine); | ||
| 1258 | else | ||
| 1259 | xcs_engine_wa_init(engine); | ||
| 1260 | |||
| 1261 | wa_init_finish(wal); | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | void intel_engine_apply_workarounds(struct intel_engine_cs *engine) | ||
| 1265 | { | ||
| 1266 | wa_list_apply(engine->i915, &engine->wa_list); | ||
| 1267 | } | ||
| 1268 | |||
| 1080 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | 1269 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 1081 | #include "selftests/intel_workarounds.c" | 1270 | #include "selftests/intel_workarounds.c" |
| 1082 | #endif | 1271 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h index b11d0623e626..979695a53964 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.h +++ b/drivers/gpu/drm/i915/intel_workarounds.h | |||
| @@ -7,11 +7,35 @@ | |||
| 7 | #ifndef _I915_WORKAROUNDS_H_ | 7 | #ifndef _I915_WORKAROUNDS_H_ |
| 8 | #define _I915_WORKAROUNDS_H_ | 8 | #define _I915_WORKAROUNDS_H_ |
| 9 | 9 | ||
| 10 | #include <linux/slab.h> | ||
| 11 | |||
| 12 | struct i915_wa { | ||
| 13 | i915_reg_t reg; | ||
| 14 | u32 mask; | ||
| 15 | u32 val; | ||
| 16 | }; | ||
| 17 | |||
| 18 | struct i915_wa_list { | ||
| 19 | const char *name; | ||
| 20 | struct i915_wa *list; | ||
| 21 | unsigned int count; | ||
| 22 | }; | ||
| 23 | |||
| 24 | static inline void intel_wa_list_free(struct i915_wa_list *wal) | ||
| 25 | { | ||
| 26 | kfree(wal->list); | ||
| 27 | memset(wal, 0, sizeof(*wal)); | ||
| 28 | } | ||
| 29 | |||
| 10 | int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); | 30 | int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); |
| 11 | int intel_ctx_workarounds_emit(struct i915_request *rq); | 31 | int intel_ctx_workarounds_emit(struct i915_request *rq); |
| 12 | 32 | ||
| 13 | void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); | 33 | void intel_gt_init_workarounds(struct drm_i915_private *dev_priv); |
| 34 | void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv); | ||
| 14 | 35 | ||
| 15 | void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); | 36 | void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); |
| 16 | 37 | ||
| 38 | void intel_engine_init_workarounds(struct intel_engine_cs *engine); | ||
| 39 | void intel_engine_apply_workarounds(struct intel_engine_cs *engine); | ||
| 40 | |||
| 17 | #endif | 41 | #endif |
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 66df1b177959..27b507eb4a99 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c | |||
| @@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi) | |||
| 818 | dsi->encoder.possible_crtcs = 1; | 818 | dsi->encoder.possible_crtcs = 1; |
| 819 | 819 | ||
| 820 | /* If there's a bridge, attach to it and let it create the connector */ | 820 | /* If there's a bridge, attach to it and let it create the connector */ |
| 821 | ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL); | 821 | if (dsi->bridge) { |
| 822 | if (ret) { | 822 | ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL); |
| 823 | DRM_ERROR("Failed to attach bridge to drm\n"); | 823 | if (ret) { |
| 824 | 824 | DRM_ERROR("Failed to attach bridge to drm\n"); | |
| 825 | goto err_encoder_cleanup; | ||
| 826 | } | ||
| 827 | } else { | ||
| 825 | /* Otherwise create our own connector and attach to a panel */ | 828 | /* Otherwise create our own connector and attach to a panel */ |
| 826 | ret = mtk_dsi_create_connector(drm, dsi); | 829 | ret = mtk_dsi_create_connector(drm, dsi); |
| 827 | if (ret) | 830 | if (ret) |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index d4530d60767b..ca169f013a14 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | |||
| @@ -1594,7 +1594,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, | |||
| 1594 | NULL); | 1594 | NULL); |
| 1595 | 1595 | ||
| 1596 | drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); | 1596 | drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); |
| 1597 | plane->crtc = crtc; | ||
| 1598 | 1597 | ||
| 1599 | /* save user friendly CRTC name for later */ | 1598 | /* save user friendly CRTC name for later */ |
| 1600 | snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); | 1599 | snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 96cdf06e7da2..d31d8281424e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | |||
| @@ -488,8 +488,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc) | |||
| 488 | 488 | ||
| 489 | drm_encoder_cleanup(drm_enc); | 489 | drm_encoder_cleanup(drm_enc); |
| 490 | mutex_destroy(&dpu_enc->enc_lock); | 490 | mutex_destroy(&dpu_enc->enc_lock); |
| 491 | |||
| 492 | kfree(dpu_enc); | ||
| 493 | } | 491 | } |
| 494 | 492 | ||
| 495 | void dpu_encoder_helper_split_config( | 493 | void dpu_encoder_helper_split_config( |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index bfcd165e96df..d743e7ca6a3c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c | |||
| @@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = { | |||
| 216 | INTERLEAVED_RGB_FMT(XBGR8888, | 216 | INTERLEAVED_RGB_FMT(XBGR8888, |
| 217 | COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, | 217 | COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, |
| 218 | C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, | 218 | C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, |
| 219 | true, 4, 0, | 219 | false, 4, 0, |
| 220 | DPU_FETCH_LINEAR, 1), | 220 | DPU_FETCH_LINEAR, 1), |
| 221 | 221 | ||
| 222 | INTERLEAVED_RGB_FMT(RGBA8888, | 222 | INTERLEAVED_RGB_FMT(RGBA8888, |
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 4c03f0b7343e..41bec570c518 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c | |||
| @@ -39,6 +39,8 @@ | |||
| 39 | #define DSI_PIXEL_PLL_CLK 1 | 39 | #define DSI_PIXEL_PLL_CLK 1 |
| 40 | #define NUM_PROVIDED_CLKS 2 | 40 | #define NUM_PROVIDED_CLKS 2 |
| 41 | 41 | ||
| 42 | #define VCO_REF_CLK_RATE 19200000 | ||
| 43 | |||
| 42 | struct dsi_pll_regs { | 44 | struct dsi_pll_regs { |
| 43 | u32 pll_prop_gain_rate; | 45 | u32 pll_prop_gain_rate; |
| 44 | u32 pll_lockdet_rate; | 46 | u32 pll_lockdet_rate; |
| @@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 316 | parent_rate); | 318 | parent_rate); |
| 317 | 319 | ||
| 318 | pll_10nm->vco_current_rate = rate; | 320 | pll_10nm->vco_current_rate = rate; |
| 319 | pll_10nm->vco_ref_clk_rate = parent_rate; | 321 | pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; |
| 320 | 322 | ||
| 321 | dsi_pll_setup_config(pll_10nm); | 323 | dsi_pll_setup_config(pll_10nm); |
| 322 | 324 | ||
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index c79659ca5706..adbdce3aeda0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c | |||
| @@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, | |||
| 332 | goto fail; | 332 | goto fail; |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | ret = msm_hdmi_hpd_enable(hdmi->connector); | ||
| 336 | if (ret < 0) { | ||
| 337 | DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); | ||
| 338 | goto fail; | ||
| 339 | } | ||
| 340 | |||
| 335 | encoder->bridge = hdmi->bridge; | 341 | encoder->bridge = hdmi->bridge; |
| 336 | 342 | ||
| 337 | priv->bridges[priv->num_bridges++] = hdmi->bridge; | 343 | priv->bridges[priv->num_bridges++] = hdmi->bridge; |
| @@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) | |||
| 571 | { | 577 | { |
| 572 | struct drm_device *drm = dev_get_drvdata(master); | 578 | struct drm_device *drm = dev_get_drvdata(master); |
| 573 | struct msm_drm_private *priv = drm->dev_private; | 579 | struct msm_drm_private *priv = drm->dev_private; |
| 574 | static struct hdmi_platform_config *hdmi_cfg; | 580 | struct hdmi_platform_config *hdmi_cfg; |
| 575 | struct hdmi *hdmi; | 581 | struct hdmi *hdmi; |
| 576 | struct device_node *of_node = dev->of_node; | 582 | struct device_node *of_node = dev->of_node; |
| 577 | int i, err; | 583 | int i, err; |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index accc9a61611d..5c5df6ab2a57 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h | |||
| @@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge); | |||
| 245 | 245 | ||
| 246 | void msm_hdmi_connector_irq(struct drm_connector *connector); | 246 | void msm_hdmi_connector_irq(struct drm_connector *connector); |
| 247 | struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); | 247 | struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); |
| 248 | int msm_hdmi_hpd_enable(struct drm_connector *connector); | ||
| 248 | 249 | ||
| 249 | /* | 250 | /* |
| 250 | * i2c adapter for ddc: | 251 | * i2c adapter for ddc: |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index e9c9a0af508e..30e908dfded7 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c | |||
| @@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) | |||
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | static int hpd_enable(struct hdmi_connector *hdmi_connector) | 170 | int msm_hdmi_hpd_enable(struct drm_connector *connector) |
| 171 | { | 171 | { |
| 172 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | ||
| 172 | struct hdmi *hdmi = hdmi_connector->hdmi; | 173 | struct hdmi *hdmi = hdmi_connector->hdmi; |
| 173 | const struct hdmi_platform_config *config = hdmi->config; | 174 | const struct hdmi_platform_config *config = hdmi->config; |
| 174 | struct device *dev = &hdmi->pdev->dev; | 175 | struct device *dev = &hdmi->pdev->dev; |
| @@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) | |||
| 450 | { | 451 | { |
| 451 | struct drm_connector *connector = NULL; | 452 | struct drm_connector *connector = NULL; |
| 452 | struct hdmi_connector *hdmi_connector; | 453 | struct hdmi_connector *hdmi_connector; |
| 453 | int ret; | ||
| 454 | 454 | ||
| 455 | hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); | 455 | hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); |
| 456 | if (!hdmi_connector) | 456 | if (!hdmi_connector) |
| @@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) | |||
| 471 | connector->interlace_allowed = 0; | 471 | connector->interlace_allowed = 0; |
| 472 | connector->doublescan_allowed = 0; | 472 | connector->doublescan_allowed = 0; |
| 473 | 473 | ||
| 474 | ret = hpd_enable(hdmi_connector); | ||
| 475 | if (ret) { | ||
| 476 | dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); | ||
| 477 | return ERR_PTR(ret); | ||
| 478 | } | ||
| 479 | |||
| 480 | drm_connector_attach_encoder(connector, hdmi->encoder); | 474 | drm_connector_attach_encoder(connector, hdmi->encoder); |
| 481 | 475 | ||
| 482 | return connector; | 476 | return connector; |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 4bcdeca7479d..2088a20eb270 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
| @@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, | |||
| 34 | if (!new_crtc_state->active) | 34 | if (!new_crtc_state->active) |
| 35 | continue; | 35 | continue; |
| 36 | 36 | ||
| 37 | if (drm_crtc_vblank_get(crtc)) | ||
| 38 | continue; | ||
| 39 | |||
| 37 | kms->funcs->wait_for_crtc_commit_done(kms, crtc); | 40 | kms->funcs->wait_for_crtc_commit_done(kms, crtc); |
| 41 | |||
| 42 | drm_crtc_vblank_put(crtc); | ||
| 38 | } | 43 | } |
| 39 | } | 44 | } |
| 40 | 45 | ||
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index f0da0d3c8a80..d756436c1fcd 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c | |||
| @@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) | |||
| 84 | 84 | ||
| 85 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 85 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 86 | if (ret) | 86 | if (ret) |
| 87 | return ret; | 87 | goto free_priv; |
| 88 | 88 | ||
| 89 | pm_runtime_get_sync(&gpu->pdev->dev); | 89 | pm_runtime_get_sync(&gpu->pdev->dev); |
| 90 | show_priv->state = gpu->funcs->gpu_state_get(gpu); | 90 | show_priv->state = gpu->funcs->gpu_state_get(gpu); |
| @@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file) | |||
| 94 | 94 | ||
| 95 | if (IS_ERR(show_priv->state)) { | 95 | if (IS_ERR(show_priv->state)) { |
| 96 | ret = PTR_ERR(show_priv->state); | 96 | ret = PTR_ERR(show_priv->state); |
| 97 | kfree(show_priv); | 97 | goto free_priv; |
| 98 | return ret; | ||
| 99 | } | 98 | } |
| 100 | 99 | ||
| 101 | show_priv->dev = dev; | 100 | show_priv->dev = dev; |
| 102 | 101 | ||
| 103 | return single_open(file, msm_gpu_show, show_priv); | 102 | ret = single_open(file, msm_gpu_show, show_priv); |
| 103 | if (ret) | ||
| 104 | goto free_priv; | ||
| 105 | |||
| 106 | return 0; | ||
| 107 | |||
| 108 | free_priv: | ||
| 109 | kfree(show_priv); | ||
| 110 | return ret; | ||
| 104 | } | 111 | } |
| 105 | 112 | ||
| 106 | static const struct file_operations msm_gpu_fops = { | 113 | static const struct file_operations msm_gpu_fops = { |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 4904d0d41409..dcff812c63d0 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
| @@ -553,17 +553,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) | |||
| 553 | kthread_run(kthread_worker_fn, | 553 | kthread_run(kthread_worker_fn, |
| 554 | &priv->disp_thread[i].worker, | 554 | &priv->disp_thread[i].worker, |
| 555 | "crtc_commit:%d", priv->disp_thread[i].crtc_id); | 555 | "crtc_commit:%d", priv->disp_thread[i].crtc_id); |
| 556 | ret = sched_setscheduler(priv->disp_thread[i].thread, | ||
| 557 | SCHED_FIFO, ¶m); | ||
| 558 | if (ret) | ||
| 559 | pr_warn("display thread priority update failed: %d\n", | ||
| 560 | ret); | ||
| 561 | |||
| 562 | if (IS_ERR(priv->disp_thread[i].thread)) { | 556 | if (IS_ERR(priv->disp_thread[i].thread)) { |
| 563 | dev_err(dev, "failed to create crtc_commit kthread\n"); | 557 | dev_err(dev, "failed to create crtc_commit kthread\n"); |
| 564 | priv->disp_thread[i].thread = NULL; | 558 | priv->disp_thread[i].thread = NULL; |
| 559 | goto err_msm_uninit; | ||
| 565 | } | 560 | } |
| 566 | 561 | ||
| 562 | ret = sched_setscheduler(priv->disp_thread[i].thread, | ||
| 563 | SCHED_FIFO, ¶m); | ||
| 564 | if (ret) | ||
| 565 | dev_warn(dev, "disp_thread set priority failed: %d\n", | ||
| 566 | ret); | ||
| 567 | |||
| 567 | /* initialize event thread */ | 568 | /* initialize event thread */ |
| 568 | priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; | 569 | priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; |
| 569 | kthread_init_worker(&priv->event_thread[i].worker); | 570 | kthread_init_worker(&priv->event_thread[i].worker); |
| @@ -572,6 +573,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) | |||
| 572 | kthread_run(kthread_worker_fn, | 573 | kthread_run(kthread_worker_fn, |
| 573 | &priv->event_thread[i].worker, | 574 | &priv->event_thread[i].worker, |
| 574 | "crtc_event:%d", priv->event_thread[i].crtc_id); | 575 | "crtc_event:%d", priv->event_thread[i].crtc_id); |
| 576 | if (IS_ERR(priv->event_thread[i].thread)) { | ||
| 577 | dev_err(dev, "failed to create crtc_event kthread\n"); | ||
| 578 | priv->event_thread[i].thread = NULL; | ||
| 579 | goto err_msm_uninit; | ||
| 580 | } | ||
| 581 | |||
| 575 | /** | 582 | /** |
| 576 | * event thread should also run at same priority as disp_thread | 583 | * event thread should also run at same priority as disp_thread |
| 577 | * because it is handling frame_done events. A lower priority | 584 | * because it is handling frame_done events. A lower priority |
| @@ -580,34 +587,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) | |||
| 580 | * failure at crtc commit level. | 587 | * failure at crtc commit level. |
| 581 | */ | 588 | */ |
| 582 | ret = sched_setscheduler(priv->event_thread[i].thread, | 589 | ret = sched_setscheduler(priv->event_thread[i].thread, |
| 583 | SCHED_FIFO, ¶m); | 590 | SCHED_FIFO, ¶m); |
| 584 | if (ret) | 591 | if (ret) |
| 585 | pr_warn("display event thread priority update failed: %d\n", | 592 | dev_warn(dev, "event_thread set priority failed:%d\n", |
| 586 | ret); | 593 | ret); |
| 587 | |||
| 588 | if (IS_ERR(priv->event_thread[i].thread)) { | ||
| 589 | dev_err(dev, "failed to create crtc_event kthread\n"); | ||
| 590 | priv->event_thread[i].thread = NULL; | ||
| 591 | } | ||
| 592 | |||
| 593 | if ((!priv->disp_thread[i].thread) || | ||
| 594 | !priv->event_thread[i].thread) { | ||
| 595 | /* clean up previously created threads if any */ | ||
| 596 | for ( ; i >= 0; i--) { | ||
| 597 | if (priv->disp_thread[i].thread) { | ||
| 598 | kthread_stop( | ||
| 599 | priv->disp_thread[i].thread); | ||
| 600 | priv->disp_thread[i].thread = NULL; | ||
| 601 | } | ||
| 602 | |||
| 603 | if (priv->event_thread[i].thread) { | ||
| 604 | kthread_stop( | ||
| 605 | priv->event_thread[i].thread); | ||
| 606 | priv->event_thread[i].thread = NULL; | ||
| 607 | } | ||
| 608 | } | ||
| 609 | goto err_msm_uninit; | ||
| 610 | } | ||
| 611 | } | 594 | } |
| 612 | 595 | ||
| 613 | ret = drm_vblank_init(ddev, priv->num_crtcs); | 596 | ret = drm_vblank_init(ddev, priv->num_crtcs); |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 7a7923e6220d..6942604ad9a8 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -317,6 +317,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
| 317 | uint32_t *ptr; | 317 | uint32_t *ptr; |
| 318 | int ret = 0; | 318 | int ret = 0; |
| 319 | 319 | ||
| 320 | if (!nr_relocs) | ||
| 321 | return 0; | ||
| 322 | |||
| 320 | if (offset % 4) { | 323 | if (offset % 4) { |
| 321 | DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); | 324 | DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); |
| 322 | return -EINVAL; | 325 | return -EINVAL; |
| @@ -410,7 +413,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 410 | struct msm_file_private *ctx = file->driver_priv; | 413 | struct msm_file_private *ctx = file->driver_priv; |
| 411 | struct msm_gem_submit *submit; | 414 | struct msm_gem_submit *submit; |
| 412 | struct msm_gpu *gpu = priv->gpu; | 415 | struct msm_gpu *gpu = priv->gpu; |
| 413 | struct dma_fence *in_fence = NULL; | ||
| 414 | struct sync_file *sync_file = NULL; | 416 | struct sync_file *sync_file = NULL; |
| 415 | struct msm_gpu_submitqueue *queue; | 417 | struct msm_gpu_submitqueue *queue; |
| 416 | struct msm_ringbuffer *ring; | 418 | struct msm_ringbuffer *ring; |
| @@ -443,6 +445,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 443 | ring = gpu->rb[queue->prio]; | 445 | ring = gpu->rb[queue->prio]; |
| 444 | 446 | ||
| 445 | if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { | 447 | if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { |
| 448 | struct dma_fence *in_fence; | ||
| 449 | |||
| 446 | in_fence = sync_file_get_fence(args->fence_fd); | 450 | in_fence = sync_file_get_fence(args->fence_fd); |
| 447 | 451 | ||
| 448 | if (!in_fence) | 452 | if (!in_fence) |
| @@ -452,11 +456,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 452 | * Wait if the fence is from a foreign context, or if the fence | 456 | * Wait if the fence is from a foreign context, or if the fence |
| 453 | * array contains any fence from a foreign context. | 457 | * array contains any fence from a foreign context. |
| 454 | */ | 458 | */ |
| 455 | if (!dma_fence_match_context(in_fence, ring->fctx->context)) { | 459 | ret = 0; |
| 460 | if (!dma_fence_match_context(in_fence, ring->fctx->context)) | ||
| 456 | ret = dma_fence_wait(in_fence, true); | 461 | ret = dma_fence_wait(in_fence, true); |
| 457 | if (ret) | 462 | |
| 458 | return ret; | 463 | dma_fence_put(in_fence); |
| 459 | } | 464 | if (ret) |
| 465 | return ret; | ||
| 460 | } | 466 | } |
| 461 | 467 | ||
| 462 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 468 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| @@ -582,8 +588,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 582 | } | 588 | } |
| 583 | 589 | ||
| 584 | out: | 590 | out: |
| 585 | if (in_fence) | ||
| 586 | dma_fence_put(in_fence); | ||
| 587 | submit_cleanup(submit); | 591 | submit_cleanup(submit); |
| 588 | if (ret) | 592 | if (ret) |
| 589 | msm_gem_submit_free(submit); | 593 | msm_gem_submit_free(submit); |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 11aac8337066..2b7c8946adba 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -345,6 +345,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, | |||
| 345 | { | 345 | { |
| 346 | struct msm_gpu_state *state; | 346 | struct msm_gpu_state *state; |
| 347 | 347 | ||
| 348 | /* Check if the target supports capturing crash state */ | ||
| 349 | if (!gpu->funcs->gpu_state_get) | ||
| 350 | return; | ||
| 351 | |||
| 348 | /* Only save one crash state at a time */ | 352 | /* Only save one crash state at a time */ |
| 349 | if (gpu->crashstate) | 353 | if (gpu->crashstate) |
| 350 | return; | 354 | return; |
| @@ -434,10 +438,9 @@ static void recover_worker(struct work_struct *work) | |||
| 434 | if (submit) { | 438 | if (submit) { |
| 435 | struct task_struct *task; | 439 | struct task_struct *task; |
| 436 | 440 | ||
| 437 | rcu_read_lock(); | 441 | task = get_pid_task(submit->pid, PIDTYPE_PID); |
| 438 | task = pid_task(submit->pid, PIDTYPE_PID); | ||
| 439 | if (task) { | 442 | if (task) { |
| 440 | comm = kstrdup(task->comm, GFP_ATOMIC); | 443 | comm = kstrdup(task->comm, GFP_KERNEL); |
| 441 | 444 | ||
| 442 | /* | 445 | /* |
| 443 | * So slightly annoying, in other paths like | 446 | * So slightly annoying, in other paths like |
| @@ -450,10 +453,10 @@ static void recover_worker(struct work_struct *work) | |||
| 450 | * about the submit going away. | 453 | * about the submit going away. |
| 451 | */ | 454 | */ |
| 452 | mutex_unlock(&dev->struct_mutex); | 455 | mutex_unlock(&dev->struct_mutex); |
| 453 | cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC); | 456 | cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); |
| 457 | put_task_struct(task); | ||
| 454 | mutex_lock(&dev->struct_mutex); | 458 | mutex_lock(&dev->struct_mutex); |
| 455 | } | 459 | } |
| 456 | rcu_read_unlock(); | ||
| 457 | 460 | ||
| 458 | if (comm && cmd) { | 461 | if (comm && cmd) { |
| 459 | dev_err(dev->dev, "%s: offending task: %s (%s)\n", | 462 | dev_err(dev->dev, "%s: offending task: %s (%s)\n", |
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index b23d33622f37..2a90aa4caec0 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c | |||
| @@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, | |||
| 66 | // pm_runtime_get_sync(mmu->dev); | 66 | // pm_runtime_get_sync(mmu->dev); |
| 67 | ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); | 67 | ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); |
| 68 | // pm_runtime_put_sync(mmu->dev); | 68 | // pm_runtime_put_sync(mmu->dev); |
| 69 | WARN_ON(ret < 0); | 69 | WARN_ON(!ret); |
| 70 | 70 | ||
| 71 | return (ret == len) ? 0 : -EINVAL; | 71 | return (ret == len) ? 0 : -EINVAL; |
| 72 | } | 72 | } |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index cca933458439..0c2c8d2c631f 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd, | |||
| 316 | uint64_t iova, uint32_t size) | 316 | uint64_t iova, uint32_t size) |
| 317 | { | 317 | { |
| 318 | struct msm_gem_object *obj = submit->bos[idx].obj; | 318 | struct msm_gem_object *obj = submit->bos[idx].obj; |
| 319 | unsigned offset = 0; | ||
| 319 | const char *buf; | 320 | const char *buf; |
| 320 | 321 | ||
| 321 | if (iova) { | 322 | if (iova) { |
| 322 | buf += iova - submit->bos[idx].iova; | 323 | offset = iova - submit->bos[idx].iova; |
| 323 | } else { | 324 | } else { |
| 324 | iova = submit->bos[idx].iova; | 325 | iova = submit->bos[idx].iova; |
| 325 | size = obj->base.size; | 326 | size = obj->base.size; |
| @@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd, | |||
| 340 | if (IS_ERR(buf)) | 341 | if (IS_ERR(buf)) |
| 341 | return; | 342 | return; |
| 342 | 343 | ||
| 344 | buf += offset; | ||
| 345 | |||
| 343 | rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); | 346 | rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); |
| 344 | 347 | ||
| 345 | msm_gem_put_vaddr(&obj->base); | 348 | msm_gem_put_vaddr(&obj->base); |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 6cbbae3f438b..db1bf7f88c1f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c | |||
| @@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, | |||
| 198 | /****************************************************************************** | 198 | /****************************************************************************** |
| 199 | * EVO channel helpers | 199 | * EVO channel helpers |
| 200 | *****************************************************************************/ | 200 | *****************************************************************************/ |
| 201 | static void | ||
| 202 | evo_flush(struct nv50_dmac *dmac) | ||
| 203 | { | ||
| 204 | /* Push buffer fetches are not coherent with BAR1, we need to ensure | ||
| 205 | * writes have been flushed right through to VRAM before writing PUT. | ||
| 206 | */ | ||
| 207 | if (dmac->push.type & NVIF_MEM_VRAM) { | ||
| 208 | struct nvif_device *device = dmac->base.device; | ||
| 209 | nvif_wr32(&device->object, 0x070000, 0x00000001); | ||
| 210 | nvif_msec(device, 2000, | ||
| 211 | if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) | ||
| 212 | break; | ||
| 213 | ); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 201 | u32 * | 217 | u32 * |
| 202 | evo_wait(struct nv50_dmac *evoc, int nr) | 218 | evo_wait(struct nv50_dmac *evoc, int nr) |
| 203 | { | 219 | { |
| @@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr) | |||
| 208 | mutex_lock(&dmac->lock); | 224 | mutex_lock(&dmac->lock); |
| 209 | if (put + nr >= (PAGE_SIZE / 4) - 8) { | 225 | if (put + nr >= (PAGE_SIZE / 4) - 8) { |
| 210 | dmac->ptr[put] = 0x20000000; | 226 | dmac->ptr[put] = 0x20000000; |
| 227 | evo_flush(dmac); | ||
| 211 | 228 | ||
| 212 | nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); | 229 | nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); |
| 213 | if (nvif_msec(device, 2000, | 230 | if (nvif_msec(device, 2000, |
| @@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc) | |||
| 230 | { | 247 | { |
| 231 | struct nv50_dmac *dmac = evoc; | 248 | struct nv50_dmac *dmac = evoc; |
| 232 | 249 | ||
| 233 | /* Push buffer fetches are not coherent with BAR1, we need to ensure | 250 | evo_flush(dmac); |
| 234 | * writes have been flushed right through to VRAM before writing PUT. | ||
| 235 | */ | ||
| 236 | if (dmac->push.type & NVIF_MEM_VRAM) { | ||
| 237 | struct nvif_device *device = dmac->base.device; | ||
| 238 | nvif_wr32(&device->object, 0x070000, 0x00000001); | ||
| 239 | nvif_msec(device, 2000, | ||
| 240 | if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) | ||
| 241 | break; | ||
| 242 | ); | ||
| 243 | } | ||
| 244 | 251 | ||
| 245 | nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); | 252 | nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); |
| 246 | mutex_unlock(&dmac->lock); | 253 | mutex_unlock(&dmac->lock); |
| @@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm) | |||
| 1264 | { | 1271 | { |
| 1265 | struct nv50_mstm *mstm = *pmstm; | 1272 | struct nv50_mstm *mstm = *pmstm; |
| 1266 | if (mstm) { | 1273 | if (mstm) { |
| 1274 | drm_dp_mst_topology_mgr_destroy(&mstm->mgr); | ||
| 1267 | kfree(*pmstm); | 1275 | kfree(*pmstm); |
| 1268 | *pmstm = NULL; | 1276 | *pmstm = NULL; |
| 1269 | } | 1277 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2b2baf6e0e0d..d2928d43f29a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func, | |||
| 1171 | goto err_free; | 1171 | goto err_free; |
| 1172 | } | 1172 | } |
| 1173 | 1173 | ||
| 1174 | err = nouveau_drm_device_init(drm); | ||
| 1175 | if (err) | ||
| 1176 | goto err_put; | ||
| 1177 | |||
| 1174 | platform_set_drvdata(pdev, drm); | 1178 | platform_set_drvdata(pdev, drm); |
| 1175 | 1179 | ||
| 1176 | return drm; | 1180 | return drm; |
| 1177 | 1181 | ||
| 1182 | err_put: | ||
| 1183 | drm_dev_put(drm); | ||
| 1178 | err_free: | 1184 | err_free: |
| 1179 | nvkm_device_del(pdevice); | 1185 | nvkm_device_del(pdevice); |
| 1180 | 1186 | ||
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 1f8161b041be..465120809eb3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c | |||
| @@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev) | |||
| 177 | dssdev->type = OMAP_DISPLAY_TYPE_DPI; | 177 | dssdev->type = OMAP_DISPLAY_TYPE_DPI; |
| 178 | dssdev->owner = THIS_MODULE; | 178 | dssdev->owner = THIS_MODULE; |
| 179 | dssdev->of_ports = BIT(0); | 179 | dssdev->of_ports = BIT(0); |
| 180 | drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags); | ||
| 180 | 181 | ||
| 181 | omapdss_display_init(dssdev); | 182 | omapdss_display_init(dssdev); |
| 182 | omapdss_device_register(dssdev); | 183 | omapdss_device_register(dssdev); |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 0a485c5b982e..00a9c2ab9e6c 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev) | |||
| 5418 | dsi->num_lanes_supported = 3; | 5418 | dsi->num_lanes_supported = 3; |
| 5419 | } | 5419 | } |
| 5420 | 5420 | ||
| 5421 | r = of_platform_populate(dev->of_node, NULL, NULL, dev); | ||
| 5422 | if (r) { | ||
| 5423 | DSSERR("Failed to populate DSI child devices: %d\n", r); | ||
| 5424 | goto err_pm_disable; | ||
| 5425 | } | ||
| 5426 | |||
| 5421 | r = dsi_init_output(dsi); | 5427 | r = dsi_init_output(dsi); |
| 5422 | if (r) | 5428 | if (r) |
| 5423 | goto err_pm_disable; | 5429 | goto err_of_depopulate; |
| 5424 | 5430 | ||
| 5425 | r = dsi_probe_of(dsi); | 5431 | r = dsi_probe_of(dsi); |
| 5426 | if (r) { | 5432 | if (r) { |
| @@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev) | |||
| 5428 | goto err_uninit_output; | 5434 | goto err_uninit_output; |
| 5429 | } | 5435 | } |
| 5430 | 5436 | ||
| 5431 | r = of_platform_populate(dev->of_node, NULL, NULL, dev); | ||
| 5432 | if (r) { | ||
| 5433 | DSSERR("Failed to populate DSI child devices: %d\n", r); | ||
| 5434 | goto err_uninit_output; | ||
| 5435 | } | ||
| 5436 | |||
| 5437 | r = component_add(&pdev->dev, &dsi_component_ops); | 5437 | r = component_add(&pdev->dev, &dsi_component_ops); |
| 5438 | if (r) | 5438 | if (r) |
| 5439 | goto err_of_depopulate; | 5439 | goto err_uninit_output; |
| 5440 | 5440 | ||
| 5441 | return 0; | 5441 | return 0; |
| 5442 | 5442 | ||
| 5443 | err_of_depopulate: | ||
| 5444 | of_platform_depopulate(dev); | ||
| 5445 | err_uninit_output: | 5443 | err_uninit_output: |
| 5446 | dsi_uninit_output(dsi); | 5444 | dsi_uninit_output(dsi); |
| 5445 | err_of_depopulate: | ||
| 5446 | of_platform_depopulate(dev); | ||
| 5447 | err_pm_disable: | 5447 | err_pm_disable: |
| 5448 | pm_runtime_disable(dev); | 5448 | pm_runtime_disable(dev); |
| 5449 | return r; | 5449 | return r; |
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 1f698a95a94a..33e15cb77efa 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h | |||
| @@ -432,7 +432,7 @@ struct omap_dss_device { | |||
| 432 | const struct omap_dss_driver *driver; | 432 | const struct omap_dss_driver *driver; |
| 433 | const struct omap_dss_device_ops *ops; | 433 | const struct omap_dss_device_ops *ops; |
| 434 | unsigned long ops_flags; | 434 | unsigned long ops_flags; |
| 435 | unsigned long bus_flags; | 435 | u32 bus_flags; |
| 436 | 436 | ||
| 437 | /* helper variable for driver suspend/resume */ | 437 | /* helper variable for driver suspend/resume */ |
| 438 | bool activate_after_resume; | 438 | bool activate_after_resume; |
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 452e625f6ce3..933ebc9f9faa 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c | |||
| @@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = { | |||
| 52 | .destroy = omap_encoder_destroy, | 52 | .destroy = omap_encoder_destroy, |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder, | ||
| 56 | struct drm_display_mode *adjusted_mode) | ||
| 57 | { | ||
| 58 | struct drm_device *dev = encoder->dev; | ||
| 59 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); | ||
| 60 | struct omap_dss_device *dssdev = omap_encoder->output; | ||
| 61 | struct drm_connector *connector; | ||
| 62 | bool hdmi_mode; | ||
| 63 | |||
| 64 | hdmi_mode = false; | ||
| 65 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 66 | if (connector->encoder == encoder) { | ||
| 67 | hdmi_mode = omap_connector_get_hdmi_mode(connector); | ||
| 68 | break; | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | if (dssdev->ops->hdmi.set_hdmi_mode) | ||
| 73 | dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); | ||
| 74 | |||
| 75 | if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) { | ||
| 76 | struct hdmi_avi_infoframe avi; | ||
| 77 | int r; | ||
| 78 | |||
| 79 | r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode, | ||
| 80 | false); | ||
| 81 | if (r == 0) | ||
| 82 | dssdev->ops->hdmi.set_infoframe(dssdev, &avi); | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 55 | static void omap_encoder_mode_set(struct drm_encoder *encoder, | 86 | static void omap_encoder_mode_set(struct drm_encoder *encoder, |
| 56 | struct drm_display_mode *mode, | 87 | struct drm_display_mode *mode, |
| 57 | struct drm_display_mode *adjusted_mode) | 88 | struct drm_display_mode *adjusted_mode) |
| 58 | { | 89 | { |
| 59 | struct drm_device *dev = encoder->dev; | ||
| 60 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); | 90 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); |
| 61 | struct drm_connector *connector; | ||
| 62 | struct omap_dss_device *dssdev; | 91 | struct omap_dss_device *dssdev; |
| 63 | struct videomode vm = { 0 }; | 92 | struct videomode vm = { 0 }; |
| 64 | bool hdmi_mode; | ||
| 65 | int r; | ||
| 66 | 93 | ||
| 67 | drm_display_mode_to_videomode(adjusted_mode, &vm); | 94 | drm_display_mode_to_videomode(adjusted_mode, &vm); |
| 68 | 95 | ||
| @@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, | |||
| 112 | } | 139 | } |
| 113 | 140 | ||
| 114 | /* Set the HDMI mode and HDMI infoframe if applicable. */ | 141 | /* Set the HDMI mode and HDMI infoframe if applicable. */ |
| 115 | hdmi_mode = false; | 142 | if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI) |
| 116 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 143 | omap_encoder_hdmi_mode_set(encoder, adjusted_mode); |
| 117 | if (connector->encoder == encoder) { | ||
| 118 | hdmi_mode = omap_connector_get_hdmi_mode(connector); | ||
| 119 | break; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 123 | dssdev = omap_encoder->output; | ||
| 124 | |||
| 125 | if (dssdev->ops->hdmi.set_hdmi_mode) | ||
| 126 | dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); | ||
| 127 | |||
| 128 | if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) { | ||
| 129 | struct hdmi_avi_infoframe avi; | ||
| 130 | |||
| 131 | r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode, | ||
| 132 | false); | ||
| 133 | if (r == 0) | ||
| 134 | dssdev->ops->hdmi.set_infoframe(dssdev, &avi); | ||
| 135 | } | ||
| 136 | } | 144 | } |
| 137 | 145 | ||
| 138 | static void omap_encoder_disable(struct drm_encoder *encoder) | 146 | static void omap_encoder_disable(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 941f35233b1f..5864cb452c5c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
| @@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev) | |||
| 448 | return 0; | 448 | return 0; |
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | static void rockchip_drm_platform_shutdown(struct platform_device *pdev) | ||
| 452 | { | ||
| 453 | rockchip_drm_platform_remove(pdev); | ||
| 454 | } | ||
| 455 | |||
| 456 | static const struct of_device_id rockchip_drm_dt_ids[] = { | 451 | static const struct of_device_id rockchip_drm_dt_ids[] = { |
| 457 | { .compatible = "rockchip,display-subsystem", }, | 452 | { .compatible = "rockchip,display-subsystem", }, |
| 458 | { /* sentinel */ }, | 453 | { /* sentinel */ }, |
| @@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); | |||
| 462 | static struct platform_driver rockchip_drm_platform_driver = { | 457 | static struct platform_driver rockchip_drm_platform_driver = { |
| 463 | .probe = rockchip_drm_platform_probe, | 458 | .probe = rockchip_drm_platform_probe, |
| 464 | .remove = rockchip_drm_platform_remove, | 459 | .remove = rockchip_drm_platform_remove, |
| 465 | .shutdown = rockchip_drm_platform_shutdown, | ||
| 466 | .driver = { | 460 | .driver = { |
| 467 | .name = "rockchip-drm", | 461 | .name = "rockchip-drm", |
| 468 | .of_match_table = rockchip_drm_dt_ids, | 462 | .of_match_table = rockchip_drm_dt_ids, |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ba80150d1052..895d77d799e4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
| @@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
| 492 | if (!fbo) | 492 | if (!fbo) |
| 493 | return -ENOMEM; | 493 | return -ENOMEM; |
| 494 | 494 | ||
| 495 | ttm_bo_get(bo); | ||
| 496 | fbo->base = *bo; | 495 | fbo->base = *bo; |
| 496 | fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; | ||
| 497 | |||
| 498 | ttm_bo_get(bo); | ||
| 497 | fbo->bo = bo; | 499 | fbo->bo = bo; |
| 498 | 500 | ||
| 499 | /** | 501 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 61a84b958d67..d7a2dfb8ee9b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -49,6 +49,8 @@ | |||
| 49 | 49 | ||
| 50 | #define VMWGFX_REPO "In Tree" | 50 | #define VMWGFX_REPO "In Tree" |
| 51 | 51 | ||
| 52 | #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) | ||
| 53 | |||
| 52 | 54 | ||
| 53 | /** | 55 | /** |
| 54 | * Fully encoded drm commands. Might move to vmw_drm.h | 56 | * Fully encoded drm commands. Might move to vmw_drm.h |
| @@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 918 | spin_unlock(&dev_priv->cap_lock); | 920 | spin_unlock(&dev_priv->cap_lock); |
| 919 | } | 921 | } |
| 920 | 922 | ||
| 921 | 923 | vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); | |
| 922 | ret = vmw_kms_init(dev_priv); | 924 | ret = vmw_kms_init(dev_priv); |
| 923 | if (unlikely(ret != 0)) | 925 | if (unlikely(ret != 0)) |
| 924 | goto out_no_kms; | 926 | goto out_no_kms; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 59f614225bcd..aca974b14b55 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -606,6 +606,9 @@ struct vmw_private { | |||
| 606 | 606 | ||
| 607 | struct vmw_cmdbuf_man *cman; | 607 | struct vmw_cmdbuf_man *cman; |
| 608 | DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); | 608 | DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); |
| 609 | |||
| 610 | /* Validation memory reservation */ | ||
| 611 | struct vmw_validation_mem vvm; | ||
| 609 | }; | 612 | }; |
| 610 | 613 | ||
| 611 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 614 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
| @@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv); | |||
| 846 | extern void vmw_ttm_global_release(struct vmw_private *dev_priv); | 849 | extern void vmw_ttm_global_release(struct vmw_private *dev_priv); |
| 847 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | 850 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); |
| 848 | 851 | ||
| 852 | extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, | ||
| 853 | size_t gran); | ||
| 849 | /** | 854 | /** |
| 850 | * TTM buffer object driver - vmwgfx_ttm_buffer.c | 855 | * TTM buffer object driver - vmwgfx_ttm_buffer.c |
| 851 | */ | 856 | */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 5a6b70ba137a..f2d13a72c05d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
| 1738 | void *buf) | 1738 | void *buf) |
| 1739 | { | 1739 | { |
| 1740 | struct vmw_buffer_object *vmw_bo; | 1740 | struct vmw_buffer_object *vmw_bo; |
| 1741 | int ret; | ||
| 1742 | 1741 | ||
| 1743 | struct { | 1742 | struct { |
| 1744 | uint32_t header; | 1743 | uint32_t header; |
| @@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
| 1748 | return vmw_translate_guest_ptr(dev_priv, sw_context, | 1747 | return vmw_translate_guest_ptr(dev_priv, sw_context, |
| 1749 | &cmd->body.ptr, | 1748 | &cmd->body.ptr, |
| 1750 | &vmw_bo); | 1749 | &vmw_bo); |
| 1751 | return ret; | ||
| 1752 | } | 1750 | } |
| 1753 | 1751 | ||
| 1754 | 1752 | ||
| @@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 3837 | struct sync_file *sync_file = NULL; | 3835 | struct sync_file *sync_file = NULL; |
| 3838 | DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); | 3836 | DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); |
| 3839 | 3837 | ||
| 3838 | vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); | ||
| 3839 | |||
| 3840 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { | 3840 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { |
| 3841 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); | 3841 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); |
| 3842 | if (out_fence_fd < 0) { | 3842 | if (out_fence_fd < 0) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 7b1e5a5cbd2c..f88247046721 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | |||
| @@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv) | |||
| 96 | drm_global_item_unref(&dev_priv->bo_global_ref.ref); | 96 | drm_global_item_unref(&dev_priv->bo_global_ref.ref); |
| 97 | drm_global_item_unref(&dev_priv->mem_global_ref); | 97 | drm_global_item_unref(&dev_priv->mem_global_ref); |
| 98 | } | 98 | } |
| 99 | |||
| 100 | /* struct vmw_validation_mem callback */ | ||
| 101 | static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size) | ||
| 102 | { | ||
| 103 | static struct ttm_operation_ctx ctx = {.interruptible = false, | ||
| 104 | .no_wait_gpu = false}; | ||
| 105 | struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); | ||
| 106 | |||
| 107 | return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx); | ||
| 108 | } | ||
| 109 | |||
| 110 | /* struct vmw_validation_mem callback */ | ||
| 111 | static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size) | ||
| 112 | { | ||
| 113 | struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); | ||
| 114 | |||
| 115 | return ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
| 116 | } | ||
| 117 | |||
| 118 | /** | ||
| 119 | * vmw_validation_mem_init_ttm - Interface the validation memory tracker | ||
| 120 | * to ttm. | ||
| 121 | * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private | ||
| 122 | * rather than a struct vmw_validation_mem is to make sure assumption in the | ||
| 123 | * callbacks that struct vmw_private derives from struct vmw_validation_mem | ||
| 124 | * holds true. | ||
| 125 | * @gran: The recommended allocation granularity | ||
| 126 | */ | ||
| 127 | void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran) | ||
| 128 | { | ||
| 129 | struct vmw_validation_mem *vvm = &dev_priv->vvm; | ||
| 130 | |||
| 131 | vvm->reserve_mem = vmw_vmt_reserve; | ||
| 132 | vvm->unreserve_mem = vmw_vmt_unreserve; | ||
| 133 | vvm->gran = gran; | ||
| 134 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index 184025fa938e..f116f092e00b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | |||
| @@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, | |||
| 104 | return NULL; | 104 | return NULL; |
| 105 | 105 | ||
| 106 | if (ctx->mem_size_left < size) { | 106 | if (ctx->mem_size_left < size) { |
| 107 | struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 107 | struct page *page; |
| 108 | 108 | ||
| 109 | if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { | ||
| 110 | int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran); | ||
| 111 | |||
| 112 | if (ret) | ||
| 113 | return NULL; | ||
| 114 | |||
| 115 | ctx->vm_size_left += ctx->vm->gran; | ||
| 116 | ctx->total_mem += ctx->vm->gran; | ||
| 117 | } | ||
| 118 | |||
| 119 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
| 109 | if (!page) | 120 | if (!page) |
| 110 | return NULL; | 121 | return NULL; |
| 111 | 122 | ||
| 123 | if (ctx->vm) | ||
| 124 | ctx->vm_size_left -= PAGE_SIZE; | ||
| 125 | |||
| 112 | list_add_tail(&page->lru, &ctx->page_list); | 126 | list_add_tail(&page->lru, &ctx->page_list); |
| 113 | ctx->page_address = page_address(page); | 127 | ctx->page_address = page_address(page); |
| 114 | ctx->mem_size_left = PAGE_SIZE; | 128 | ctx->mem_size_left = PAGE_SIZE; |
| @@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) | |||
| 138 | } | 152 | } |
| 139 | 153 | ||
| 140 | ctx->mem_size_left = 0; | 154 | ctx->mem_size_left = 0; |
| 155 | if (ctx->vm && ctx->total_mem) { | ||
| 156 | ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem); | ||
| 157 | ctx->total_mem = 0; | ||
| 158 | ctx->vm_size_left = 0; | ||
| 159 | } | ||
| 141 | } | 160 | } |
| 142 | 161 | ||
| 143 | /** | 162 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index b57e3292c386..3b396fea40d7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h | |||
| @@ -34,6 +34,21 @@ | |||
| 34 | #include <drm/ttm/ttm_execbuf_util.h> | 34 | #include <drm/ttm/ttm_execbuf_util.h> |
| 35 | 35 | ||
| 36 | /** | 36 | /** |
| 37 | * struct vmw_validation_mem - Custom interface to provide memory reservations | ||
| 38 | * for the validation code. | ||
| 39 | * @reserve_mem: Callback to reserve memory | ||
| 40 | * @unreserve_mem: Callback to unreserve memory | ||
| 41 | * @gran: Reservation granularity. Contains a hint how much memory should | ||
| 42 | * be reserved in each call to @reserve_mem(). A slow implementation may want | ||
| 43 | * reservation to be done in large batches. | ||
| 44 | */ | ||
| 45 | struct vmw_validation_mem { | ||
| 46 | int (*reserve_mem)(struct vmw_validation_mem *m, size_t size); | ||
| 47 | void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size); | ||
| 48 | size_t gran; | ||
| 49 | }; | ||
| 50 | |||
| 51 | /** | ||
| 37 | * struct vmw_validation_context - Per command submission validation context | 52 | * struct vmw_validation_context - Per command submission validation context |
| 38 | * @ht: Hash table used to find resource- or buffer object duplicates | 53 | * @ht: Hash table used to find resource- or buffer object duplicates |
| 39 | * @resource_list: List head for resource validation metadata | 54 | * @resource_list: List head for resource validation metadata |
| @@ -47,6 +62,10 @@ | |||
| 47 | * buffer objects | 62 | * buffer objects |
| 48 | * @mem_size_left: Free memory left in the last page in @page_list | 63 | * @mem_size_left: Free memory left in the last page in @page_list |
| 49 | * @page_address: Kernel virtual address of the last page in @page_list | 64 | * @page_address: Kernel virtual address of the last page in @page_list |
| 65 | * @vm: A pointer to the memory reservation interface or NULL if no | ||
| 66 | * memory reservation is needed. | ||
| 67 | * @vm_size_left: Amount of reserved memory that so far has not been allocated. | ||
| 68 | * @total_mem: Amount of reserved memory. | ||
| 50 | */ | 69 | */ |
| 51 | struct vmw_validation_context { | 70 | struct vmw_validation_context { |
| 52 | struct drm_open_hash *ht; | 71 | struct drm_open_hash *ht; |
| @@ -59,6 +78,9 @@ struct vmw_validation_context { | |||
| 59 | unsigned int merge_dups; | 78 | unsigned int merge_dups; |
| 60 | unsigned int mem_size_left; | 79 | unsigned int mem_size_left; |
| 61 | u8 *page_address; | 80 | u8 *page_address; |
| 81 | struct vmw_validation_mem *vm; | ||
| 82 | size_t vm_size_left; | ||
| 83 | size_t total_mem; | ||
| 62 | }; | 84 | }; |
| 63 | 85 | ||
| 64 | struct vmw_buffer_object; | 86 | struct vmw_buffer_object; |
| @@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx) | |||
| 102 | } | 124 | } |
| 103 | 125 | ||
| 104 | /** | 126 | /** |
| 127 | * vmw_validation_set_val_mem - Register a validation mem object for | ||
| 128 | * validation memory reservation | ||
| 129 | * @ctx: The validation context | ||
| 130 | * @vm: Pointer to a struct vmw_validation_mem | ||
| 131 | * | ||
| 132 | * Must be set before the first attempt to allocate validation memory. | ||
| 133 | */ | ||
| 134 | static inline void | ||
| 135 | vmw_validation_set_val_mem(struct vmw_validation_context *ctx, | ||
| 136 | struct vmw_validation_mem *vm) | ||
| 137 | { | ||
| 138 | ctx->vm = vm; | ||
| 139 | } | ||
| 140 | |||
| 141 | /** | ||
| 105 | * vmw_validation_set_ht - Register a hash table for duplicate finding | 142 | * vmw_validation_set_ht - Register a hash table for duplicate finding |
| 106 | * @ctx: The validation context | 143 | * @ctx: The validation context |
| 107 | * @ht: Pointer to a hash table to use for duplicate finding | 144 | * @ht: Pointer to a hash table to use for duplicate finding |
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index b372854cf38d..704049e62d58 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c | |||
| @@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device, | |||
| 309 | hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, | 309 | hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, |
| 310 | input_dev->input_buf, len, 1); | 310 | input_dev->input_buf, len, 1); |
| 311 | 311 | ||
| 312 | pm_wakeup_event(&input_dev->device->device, 0); | 312 | pm_wakeup_hard_event(&input_dev->device->device); |
| 313 | 313 | ||
| 314 | break; | 314 | break; |
| 315 | default: | 315 | default: |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index ed35c9a9a110..27519eb8ee63 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -17,6 +17,9 @@ | |||
| 17 | #ifndef HID_IDS_H_FILE | 17 | #ifndef HID_IDS_H_FILE |
| 18 | #define HID_IDS_H_FILE | 18 | #define HID_IDS_H_FILE |
| 19 | 19 | ||
| 20 | #define USB_VENDOR_ID_258A 0x258a | ||
| 21 | #define USB_DEVICE_ID_258A_6A88 0x6a88 | ||
| 22 | |||
| 20 | #define USB_VENDOR_ID_3M 0x0596 | 23 | #define USB_VENDOR_ID_3M 0x0596 |
| 21 | #define USB_DEVICE_ID_3M1968 0x0500 | 24 | #define USB_DEVICE_ID_3M1968 0x0500 |
| 22 | #define USB_DEVICE_ID_3M2256 0x0502 | 25 | #define USB_DEVICE_ID_3M2256 0x0502 |
| @@ -941,6 +944,10 @@ | |||
| 941 | #define USB_VENDOR_ID_REALTEK 0x0bda | 944 | #define USB_VENDOR_ID_REALTEK 0x0bda |
| 942 | #define USB_DEVICE_ID_REALTEK_READER 0x0152 | 945 | #define USB_DEVICE_ID_REALTEK_READER 0x0152 |
| 943 | 946 | ||
| 947 | #define USB_VENDOR_ID_RETROUSB 0xf000 | ||
| 948 | #define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003 | ||
| 949 | #define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1 | ||
| 950 | |||
| 944 | #define USB_VENDOR_ID_ROCCAT 0x1e7d | 951 | #define USB_VENDOR_ID_ROCCAT 0x1e7d |
| 945 | #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 | 952 | #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 |
| 946 | #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c | 953 | #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c |
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 1882a4ab0f29..98b059d79bc8 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c | |||
| @@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, | |||
| 42 | 42 | ||
| 43 | static const struct hid_device_id ite_devices[] = { | 43 | static const struct hid_device_id ite_devices[] = { |
| 44 | { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, | 44 | { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, |
| 45 | { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, | ||
| 45 | { } | 46 | { } |
| 46 | }; | 47 | }; |
| 47 | MODULE_DEVICE_TABLE(hid, ite_devices); | 48 | MODULE_DEVICE_TABLE(hid, ite_devices); |
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index c85a79986b6a..94088c0ed68a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c | |||
| @@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = { | |||
| 137 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, | 137 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, |
| 138 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET }, | 138 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET }, |
| 139 | { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS }, | 139 | { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS }, |
| 140 | { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, | ||
| 141 | { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, | ||
| 140 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, | 142 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, |
| 141 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, | 143 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, |
| 142 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, | 144 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6277597d3d58..edd34c167a9b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
| @@ -435,61 +435,16 @@ void vmbus_free_channels(void) | |||
| 435 | } | 435 | } |
| 436 | } | 436 | } |
| 437 | 437 | ||
| 438 | /* | 438 | /* Note: the function can run concurrently for primary/sub channels. */ |
| 439 | * vmbus_process_offer - Process the offer by creating a channel/device | 439 | static void vmbus_add_channel_work(struct work_struct *work) |
| 440 | * associated with this offer | ||
| 441 | */ | ||
| 442 | static void vmbus_process_offer(struct vmbus_channel *newchannel) | ||
| 443 | { | 440 | { |
| 444 | struct vmbus_channel *channel; | 441 | struct vmbus_channel *newchannel = |
| 445 | bool fnew = true; | 442 | container_of(work, struct vmbus_channel, add_channel_work); |
| 443 | struct vmbus_channel *primary_channel = newchannel->primary_channel; | ||
| 446 | unsigned long flags; | 444 | unsigned long flags; |
| 447 | u16 dev_type; | 445 | u16 dev_type; |
| 448 | int ret; | 446 | int ret; |
| 449 | 447 | ||
| 450 | /* Make sure this is a new offer */ | ||
| 451 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 452 | |||
| 453 | /* | ||
| 454 | * Now that we have acquired the channel_mutex, | ||
| 455 | * we can release the potentially racing rescind thread. | ||
| 456 | */ | ||
| 457 | atomic_dec(&vmbus_connection.offer_in_progress); | ||
| 458 | |||
| 459 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | ||
| 460 | if (!uuid_le_cmp(channel->offermsg.offer.if_type, | ||
| 461 | newchannel->offermsg.offer.if_type) && | ||
| 462 | !uuid_le_cmp(channel->offermsg.offer.if_instance, | ||
| 463 | newchannel->offermsg.offer.if_instance)) { | ||
| 464 | fnew = false; | ||
| 465 | break; | ||
| 466 | } | ||
| 467 | } | ||
| 468 | |||
| 469 | if (fnew) | ||
| 470 | list_add_tail(&newchannel->listentry, | ||
| 471 | &vmbus_connection.chn_list); | ||
| 472 | |||
| 473 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 474 | |||
| 475 | if (!fnew) { | ||
| 476 | /* | ||
| 477 | * Check to see if this is a sub-channel. | ||
| 478 | */ | ||
| 479 | if (newchannel->offermsg.offer.sub_channel_index != 0) { | ||
| 480 | /* | ||
| 481 | * Process the sub-channel. | ||
| 482 | */ | ||
| 483 | newchannel->primary_channel = channel; | ||
| 484 | spin_lock_irqsave(&channel->lock, flags); | ||
| 485 | list_add_tail(&newchannel->sc_list, &channel->sc_list); | ||
| 486 | channel->num_sc++; | ||
| 487 | spin_unlock_irqrestore(&channel->lock, flags); | ||
| 488 | } else { | ||
| 489 | goto err_free_chan; | ||
| 490 | } | ||
| 491 | } | ||
| 492 | |||
| 493 | dev_type = hv_get_dev_type(newchannel); | 448 | dev_type = hv_get_dev_type(newchannel); |
| 494 | 449 | ||
| 495 | init_vp_index(newchannel, dev_type); | 450 | init_vp_index(newchannel, dev_type); |
| @@ -507,27 +462,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 507 | /* | 462 | /* |
| 508 | * This state is used to indicate a successful open | 463 | * This state is used to indicate a successful open |
| 509 | * so that when we do close the channel normally, we | 464 | * so that when we do close the channel normally, we |
| 510 | * can cleanup properly | 465 | * can cleanup properly. |
| 511 | */ | 466 | */ |
| 512 | newchannel->state = CHANNEL_OPEN_STATE; | 467 | newchannel->state = CHANNEL_OPEN_STATE; |
| 513 | 468 | ||
| 514 | if (!fnew) { | 469 | if (primary_channel != NULL) { |
| 515 | struct hv_device *dev | 470 | /* newchannel is a sub-channel. */ |
| 516 | = newchannel->primary_channel->device_obj; | 471 | struct hv_device *dev = primary_channel->device_obj; |
| 517 | 472 | ||
| 518 | if (vmbus_add_channel_kobj(dev, newchannel)) | 473 | if (vmbus_add_channel_kobj(dev, newchannel)) |
| 519 | goto err_free_chan; | 474 | goto err_deq_chan; |
| 475 | |||
| 476 | if (primary_channel->sc_creation_callback != NULL) | ||
| 477 | primary_channel->sc_creation_callback(newchannel); | ||
| 520 | 478 | ||
| 521 | if (channel->sc_creation_callback != NULL) | ||
| 522 | channel->sc_creation_callback(newchannel); | ||
| 523 | newchannel->probe_done = true; | 479 | newchannel->probe_done = true; |
| 524 | return; | 480 | return; |
| 525 | } | 481 | } |
| 526 | 482 | ||
| 527 | /* | 483 | /* |
| 528 | * Start the process of binding this offer to the driver | 484 | * Start the process of binding the primary channel to the driver |
| 529 | * We need to set the DeviceObject field before calling | ||
| 530 | * vmbus_child_dev_add() | ||
| 531 | */ | 485 | */ |
| 532 | newchannel->device_obj = vmbus_device_create( | 486 | newchannel->device_obj = vmbus_device_create( |
| 533 | &newchannel->offermsg.offer.if_type, | 487 | &newchannel->offermsg.offer.if_type, |
| @@ -556,13 +510,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 556 | 510 | ||
| 557 | err_deq_chan: | 511 | err_deq_chan: |
| 558 | mutex_lock(&vmbus_connection.channel_mutex); | 512 | mutex_lock(&vmbus_connection.channel_mutex); |
| 559 | list_del(&newchannel->listentry); | 513 | |
| 514 | /* | ||
| 515 | * We need to set the flag, otherwise | ||
| 516 | * vmbus_onoffer_rescind() can be blocked. | ||
| 517 | */ | ||
| 518 | newchannel->probe_done = true; | ||
| 519 | |||
| 520 | if (primary_channel == NULL) { | ||
| 521 | list_del(&newchannel->listentry); | ||
| 522 | } else { | ||
| 523 | spin_lock_irqsave(&primary_channel->lock, flags); | ||
| 524 | list_del(&newchannel->sc_list); | ||
| 525 | spin_unlock_irqrestore(&primary_channel->lock, flags); | ||
| 526 | } | ||
| 527 | |||
| 560 | mutex_unlock(&vmbus_connection.channel_mutex); | 528 | mutex_unlock(&vmbus_connection.channel_mutex); |
| 561 | 529 | ||
| 562 | if (newchannel->target_cpu != get_cpu()) { | 530 | if (newchannel->target_cpu != get_cpu()) { |
| 563 | put_cpu(); | 531 | put_cpu(); |
| 564 | smp_call_function_single(newchannel->target_cpu, | 532 | smp_call_function_single(newchannel->target_cpu, |
| 565 | percpu_channel_deq, newchannel, true); | 533 | percpu_channel_deq, |
| 534 | newchannel, true); | ||
| 566 | } else { | 535 | } else { |
| 567 | percpu_channel_deq(newchannel); | 536 | percpu_channel_deq(newchannel); |
| 568 | put_cpu(); | 537 | put_cpu(); |
| @@ -570,14 +539,104 @@ err_deq_chan: | |||
| 570 | 539 | ||
| 571 | vmbus_release_relid(newchannel->offermsg.child_relid); | 540 | vmbus_release_relid(newchannel->offermsg.child_relid); |
| 572 | 541 | ||
| 573 | err_free_chan: | ||
| 574 | free_channel(newchannel); | 542 | free_channel(newchannel); |
| 575 | } | 543 | } |
| 576 | 544 | ||
| 577 | /* | 545 | /* |
| 546 | * vmbus_process_offer - Process the offer by creating a channel/device | ||
| 547 | * associated with this offer | ||
| 548 | */ | ||
| 549 | static void vmbus_process_offer(struct vmbus_channel *newchannel) | ||
| 550 | { | ||
| 551 | struct vmbus_channel *channel; | ||
| 552 | struct workqueue_struct *wq; | ||
| 553 | unsigned long flags; | ||
| 554 | bool fnew = true; | ||
| 555 | |||
| 556 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 557 | |||
| 558 | /* | ||
| 559 | * Now that we have acquired the channel_mutex, | ||
| 560 | * we can release the potentially racing rescind thread. | ||
| 561 | */ | ||
| 562 | atomic_dec(&vmbus_connection.offer_in_progress); | ||
| 563 | |||
| 564 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | ||
| 565 | if (!uuid_le_cmp(channel->offermsg.offer.if_type, | ||
| 566 | newchannel->offermsg.offer.if_type) && | ||
| 567 | !uuid_le_cmp(channel->offermsg.offer.if_instance, | ||
| 568 | newchannel->offermsg.offer.if_instance)) { | ||
| 569 | fnew = false; | ||
| 570 | break; | ||
| 571 | } | ||
| 572 | } | ||
| 573 | |||
| 574 | if (fnew) | ||
| 575 | list_add_tail(&newchannel->listentry, | ||
| 576 | &vmbus_connection.chn_list); | ||
| 577 | else { | ||
| 578 | /* | ||
| 579 | * Check to see if this is a valid sub-channel. | ||
| 580 | */ | ||
| 581 | if (newchannel->offermsg.offer.sub_channel_index == 0) { | ||
| 582 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 583 | /* | ||
| 584 | * Don't call free_channel(), because newchannel->kobj | ||
| 585 | * is not initialized yet. | ||
| 586 | */ | ||
| 587 | kfree(newchannel); | ||
| 588 | WARN_ON_ONCE(1); | ||
| 589 | return; | ||
| 590 | } | ||
| 591 | /* | ||
| 592 | * Process the sub-channel. | ||
| 593 | */ | ||
| 594 | newchannel->primary_channel = channel; | ||
| 595 | spin_lock_irqsave(&channel->lock, flags); | ||
| 596 | list_add_tail(&newchannel->sc_list, &channel->sc_list); | ||
| 597 | spin_unlock_irqrestore(&channel->lock, flags); | ||
| 598 | } | ||
| 599 | |||
| 600 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 601 | |||
| 602 | /* | ||
| 603 | * vmbus_process_offer() mustn't call channel->sc_creation_callback() | ||
| 604 | * directly for sub-channels, because sc_creation_callback() -> | ||
| 605 | * vmbus_open() may never get the host's response to the | ||
| 606 | * OPEN_CHANNEL message (the host may rescind a channel at any time, | ||
| 607 | * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind() | ||
| 608 | * may not wake up the vmbus_open() as it's blocked due to a non-zero | ||
| 609 | * vmbus_connection.offer_in_progress, and finally we have a deadlock. | ||
| 610 | * | ||
| 611 | * The above is also true for primary channels, if the related device | ||
| 612 | * drivers use sync probing mode by default. | ||
| 613 | * | ||
| 614 | * And, usually the handling of primary channels and sub-channels can | ||
| 615 | * depend on each other, so we should offload them to different | ||
| 616 | * workqueues to avoid possible deadlock, e.g. in sync-probing mode, | ||
| 617 | * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() -> | ||
| 618 | * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock | ||
| 619 | * and waits for all the sub-channels to appear, but the latter | ||
| 620 | * can't get the rtnl_lock and this blocks the handling of | ||
| 621 | * sub-channels. | ||
| 622 | */ | ||
| 623 | INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work); | ||
| 624 | wq = fnew ? vmbus_connection.handle_primary_chan_wq : | ||
| 625 | vmbus_connection.handle_sub_chan_wq; | ||
| 626 | queue_work(wq, &newchannel->add_channel_work); | ||
| 627 | } | ||
| 628 | |||
| 629 | /* | ||
| 578 | * We use this state to statically distribute the channel interrupt load. | 630 | * We use this state to statically distribute the channel interrupt load. |
| 579 | */ | 631 | */ |
| 580 | static int next_numa_node_id; | 632 | static int next_numa_node_id; |
| 633 | /* | ||
| 634 | * init_vp_index() accesses global variables like next_numa_node_id, and | ||
| 635 | * it can run concurrently for primary channels and sub-channels: see | ||
| 636 | * vmbus_process_offer(), so we need the lock to protect the global | ||
| 637 | * variables. | ||
| 638 | */ | ||
| 639 | static DEFINE_SPINLOCK(bind_channel_to_cpu_lock); | ||
| 581 | 640 | ||
| 582 | /* | 641 | /* |
| 583 | * Starting with Win8, we can statically distribute the incoming | 642 | * Starting with Win8, we can statically distribute the incoming |
| @@ -613,6 +672,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) | |||
| 613 | return; | 672 | return; |
| 614 | } | 673 | } |
| 615 | 674 | ||
| 675 | spin_lock(&bind_channel_to_cpu_lock); | ||
| 676 | |||
| 616 | /* | 677 | /* |
| 617 | * Based on the channel affinity policy, we will assign the NUMA | 678 | * Based on the channel affinity policy, we will assign the NUMA |
| 618 | * nodes. | 679 | * nodes. |
| @@ -695,6 +756,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) | |||
| 695 | channel->target_cpu = cur_cpu; | 756 | channel->target_cpu = cur_cpu; |
| 696 | channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); | 757 | channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); |
| 697 | 758 | ||
| 759 | spin_unlock(&bind_channel_to_cpu_lock); | ||
| 760 | |||
| 698 | free_cpumask_var(available_mask); | 761 | free_cpumask_var(available_mask); |
| 699 | } | 762 | } |
| 700 | 763 | ||
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index f4d08c8ac7f8..4fe117b761ce 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
| @@ -190,6 +190,20 @@ int vmbus_connect(void) | |||
| 190 | goto cleanup; | 190 | goto cleanup; |
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | vmbus_connection.handle_primary_chan_wq = | ||
| 194 | create_workqueue("hv_pri_chan"); | ||
| 195 | if (!vmbus_connection.handle_primary_chan_wq) { | ||
| 196 | ret = -ENOMEM; | ||
| 197 | goto cleanup; | ||
| 198 | } | ||
| 199 | |||
| 200 | vmbus_connection.handle_sub_chan_wq = | ||
| 201 | create_workqueue("hv_sub_chan"); | ||
| 202 | if (!vmbus_connection.handle_sub_chan_wq) { | ||
| 203 | ret = -ENOMEM; | ||
| 204 | goto cleanup; | ||
| 205 | } | ||
| 206 | |||
| 193 | INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); | 207 | INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); |
| 194 | spin_lock_init(&vmbus_connection.channelmsg_lock); | 208 | spin_lock_init(&vmbus_connection.channelmsg_lock); |
| 195 | 209 | ||
| @@ -280,10 +294,14 @@ void vmbus_disconnect(void) | |||
| 280 | */ | 294 | */ |
| 281 | vmbus_initiate_unload(false); | 295 | vmbus_initiate_unload(false); |
| 282 | 296 | ||
| 283 | if (vmbus_connection.work_queue) { | 297 | if (vmbus_connection.handle_sub_chan_wq) |
| 284 | drain_workqueue(vmbus_connection.work_queue); | 298 | destroy_workqueue(vmbus_connection.handle_sub_chan_wq); |
| 299 | |||
| 300 | if (vmbus_connection.handle_primary_chan_wq) | ||
| 301 | destroy_workqueue(vmbus_connection.handle_primary_chan_wq); | ||
| 302 | |||
| 303 | if (vmbus_connection.work_queue) | ||
| 285 | destroy_workqueue(vmbus_connection.work_queue); | 304 | destroy_workqueue(vmbus_connection.work_queue); |
| 286 | } | ||
| 287 | 305 | ||
| 288 | if (vmbus_connection.int_page) { | 306 | if (vmbus_connection.int_page) { |
| 289 | free_pages((unsigned long)vmbus_connection.int_page, 0); | 307 | free_pages((unsigned long)vmbus_connection.int_page, 0); |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 72eaba3d50fc..87d3d7da78f8 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
| @@ -335,7 +335,14 @@ struct vmbus_connection { | |||
| 335 | struct list_head chn_list; | 335 | struct list_head chn_list; |
| 336 | struct mutex channel_mutex; | 336 | struct mutex channel_mutex; |
| 337 | 337 | ||
| 338 | /* | ||
| 339 | * An offer message is handled first on the work_queue, and then | ||
| 340 | * is further handled on handle_primary_chan_wq or | ||
| 341 | * handle_sub_chan_wq. | ||
| 342 | */ | ||
| 338 | struct workqueue_struct *work_queue; | 343 | struct workqueue_struct *work_queue; |
| 344 | struct workqueue_struct *handle_primary_chan_wq; | ||
| 345 | struct workqueue_struct *handle_sub_chan_wq; | ||
| 339 | }; | 346 | }; |
| 340 | 347 | ||
| 341 | 348 | ||
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index 8e60048a33f8..51d34959709b 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c | |||
| @@ -74,8 +74,7 @@ | |||
| 74 | MST_STATUS_ND) | 74 | MST_STATUS_ND) |
| 75 | #define MST_STATUS_ERR (MST_STATUS_NAK | \ | 75 | #define MST_STATUS_ERR (MST_STATUS_NAK | \ |
| 76 | MST_STATUS_AL | \ | 76 | MST_STATUS_AL | \ |
| 77 | MST_STATUS_IP | \ | 77 | MST_STATUS_IP) |
| 78 | MST_STATUS_TSS) | ||
| 79 | #define MST_TX_BYTES_XFRD 0x50 | 78 | #define MST_TX_BYTES_XFRD 0x50 |
| 80 | #define MST_RX_BYTES_XFRD 0x54 | 79 | #define MST_RX_BYTES_XFRD 0x54 |
| 81 | #define SCL_HIGH_PERIOD 0x80 | 80 | #define SCL_HIGH_PERIOD 0x80 |
| @@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) | |||
| 241 | */ | 240 | */ |
| 242 | if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { | 241 | if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { |
| 243 | idev->msg_err = -EPROTO; | 242 | idev->msg_err = -EPROTO; |
| 244 | i2c_int_disable(idev, ~0); | 243 | i2c_int_disable(idev, ~MST_STATUS_TSS); |
| 245 | complete(&idev->msg_complete); | 244 | complete(&idev->msg_complete); |
| 246 | break; | 245 | break; |
| 247 | } | 246 | } |
| @@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) | |||
| 299 | 298 | ||
| 300 | if (status & MST_STATUS_SCC) { | 299 | if (status & MST_STATUS_SCC) { |
| 301 | /* Stop completed */ | 300 | /* Stop completed */ |
| 302 | i2c_int_disable(idev, ~0); | 301 | i2c_int_disable(idev, ~MST_STATUS_TSS); |
| 303 | complete(&idev->msg_complete); | 302 | complete(&idev->msg_complete); |
| 304 | } else if (status & MST_STATUS_SNS) { | 303 | } else if (status & MST_STATUS_SNS) { |
| 305 | /* Transfer done */ | 304 | /* Transfer done */ |
| 306 | i2c_int_disable(idev, ~0); | 305 | i2c_int_disable(idev, ~MST_STATUS_TSS); |
| 307 | if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) | 306 | if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) |
| 308 | axxia_i2c_empty_rx_fifo(idev); | 307 | axxia_i2c_empty_rx_fifo(idev); |
| 309 | complete(&idev->msg_complete); | 308 | complete(&idev->msg_complete); |
| 309 | } else if (status & MST_STATUS_TSS) { | ||
| 310 | /* Transfer timeout */ | ||
| 311 | idev->msg_err = -ETIMEDOUT; | ||
| 312 | i2c_int_disable(idev, ~MST_STATUS_TSS); | ||
| 313 | complete(&idev->msg_complete); | ||
| 310 | } else if (unlikely(status & MST_STATUS_ERR)) { | 314 | } else if (unlikely(status & MST_STATUS_ERR)) { |
| 311 | /* Transfer error */ | 315 | /* Transfer error */ |
| 312 | i2c_int_disable(idev, ~0); | 316 | i2c_int_disable(idev, ~0); |
| @@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) | |||
| 339 | u32 rx_xfer, tx_xfer; | 343 | u32 rx_xfer, tx_xfer; |
| 340 | u32 addr_1, addr_2; | 344 | u32 addr_1, addr_2; |
| 341 | unsigned long time_left; | 345 | unsigned long time_left; |
| 346 | unsigned int wt_value; | ||
| 342 | 347 | ||
| 343 | idev->msg = msg; | 348 | idev->msg = msg; |
| 344 | idev->msg_xfrd = 0; | 349 | idev->msg_xfrd = 0; |
| 345 | idev->msg_err = 0; | ||
| 346 | reinit_completion(&idev->msg_complete); | 350 | reinit_completion(&idev->msg_complete); |
| 347 | 351 | ||
| 348 | if (i2c_m_ten(msg)) { | 352 | if (i2c_m_ten(msg)) { |
| @@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) | |||
| 383 | else if (axxia_i2c_fill_tx_fifo(idev) != 0) | 387 | else if (axxia_i2c_fill_tx_fifo(idev) != 0) |
| 384 | int_mask |= MST_STATUS_TFL; | 388 | int_mask |= MST_STATUS_TFL; |
| 385 | 389 | ||
| 390 | wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL)); | ||
| 391 | /* Disable wait timer temporarly */ | ||
| 392 | writel(wt_value, idev->base + WAIT_TIMER_CONTROL); | ||
| 393 | /* Check if timeout error happened */ | ||
| 394 | if (idev->msg_err) | ||
| 395 | goto out; | ||
| 396 | |||
| 386 | /* Start manual mode */ | 397 | /* Start manual mode */ |
| 387 | writel(CMD_MANUAL, idev->base + MST_COMMAND); | 398 | writel(CMD_MANUAL, idev->base + MST_COMMAND); |
| 388 | 399 | ||
| 400 | writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL); | ||
| 401 | |||
| 389 | i2c_int_enable(idev, int_mask); | 402 | i2c_int_enable(idev, int_mask); |
| 390 | 403 | ||
| 391 | time_left = wait_for_completion_timeout(&idev->msg_complete, | 404 | time_left = wait_for_completion_timeout(&idev->msg_complete, |
| @@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) | |||
| 396 | if (readl(idev->base + MST_COMMAND) & CMD_BUSY) | 409 | if (readl(idev->base + MST_COMMAND) & CMD_BUSY) |
| 397 | dev_warn(idev->dev, "busy after xfer\n"); | 410 | dev_warn(idev->dev, "busy after xfer\n"); |
| 398 | 411 | ||
| 399 | if (time_left == 0) | 412 | if (time_left == 0) { |
| 400 | idev->msg_err = -ETIMEDOUT; | 413 | idev->msg_err = -ETIMEDOUT; |
| 401 | |||
| 402 | if (idev->msg_err == -ETIMEDOUT) | ||
| 403 | i2c_recover_bus(&idev->adapter); | 414 | i2c_recover_bus(&idev->adapter); |
| 415 | axxia_i2c_init(idev); | ||
| 416 | } | ||
| 404 | 417 | ||
| 405 | if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) | 418 | out: |
| 419 | if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO && | ||
| 420 | idev->msg_err != -ETIMEDOUT) | ||
| 406 | axxia_i2c_init(idev); | 421 | axxia_i2c_init(idev); |
| 407 | 422 | ||
| 408 | return idev->msg_err; | 423 | return idev->msg_err; |
| @@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) | |||
| 410 | 425 | ||
| 411 | static int axxia_i2c_stop(struct axxia_i2c_dev *idev) | 426 | static int axxia_i2c_stop(struct axxia_i2c_dev *idev) |
| 412 | { | 427 | { |
| 413 | u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC; | 428 | u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS; |
| 414 | unsigned long time_left; | 429 | unsigned long time_left; |
| 415 | 430 | ||
| 416 | reinit_completion(&idev->msg_complete); | 431 | reinit_completion(&idev->msg_complete); |
| @@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
| 437 | int i; | 452 | int i; |
| 438 | int ret = 0; | 453 | int ret = 0; |
| 439 | 454 | ||
| 455 | idev->msg_err = 0; | ||
| 456 | i2c_int_enable(idev, MST_STATUS_TSS); | ||
| 457 | |||
| 440 | for (i = 0; ret == 0 && i < num; ++i) | 458 | for (i = 0; ret == 0 && i < num; ++i) |
| 441 | ret = axxia_i2c_xfer_msg(idev, &msgs[i]); | 459 | ret = axxia_i2c_xfer_msg(idev, &msgs[i]); |
| 442 | 460 | ||
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index 8822357bca0c..e99c3bb58351 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c | |||
| @@ -89,7 +89,7 @@ static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) | |||
| 89 | 89 | ||
| 90 | if (time_is_before_jiffies(target)) { | 90 | if (time_is_before_jiffies(target)) { |
| 91 | dev_err(i2cd->dev, "i2c timeout error %x\n", val); | 91 | dev_err(i2cd->dev, "i2c timeout error %x\n", val); |
| 92 | return -ETIME; | 92 | return -ETIMEDOUT; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | val = readl(i2cd->regs + I2C_MST_CNTL); | 95 | val = readl(i2cd->regs + I2C_MST_CNTL); |
| @@ -97,9 +97,9 @@ static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) | |||
| 97 | case I2C_MST_CNTL_STATUS_OKAY: | 97 | case I2C_MST_CNTL_STATUS_OKAY: |
| 98 | return 0; | 98 | return 0; |
| 99 | case I2C_MST_CNTL_STATUS_NO_ACK: | 99 | case I2C_MST_CNTL_STATUS_NO_ACK: |
| 100 | return -EIO; | 100 | return -ENXIO; |
| 101 | case I2C_MST_CNTL_STATUS_TIMEOUT: | 101 | case I2C_MST_CNTL_STATUS_TIMEOUT: |
| 102 | return -ETIME; | 102 | return -ETIMEDOUT; |
| 103 | default: | 103 | default: |
| 104 | return 0; | 104 | return 0; |
| 105 | } | 105 | } |
| @@ -218,6 +218,7 @@ stop: | |||
| 218 | 218 | ||
| 219 | static const struct i2c_adapter_quirks gpu_i2c_quirks = { | 219 | static const struct i2c_adapter_quirks gpu_i2c_quirks = { |
| 220 | .max_read_len = 4, | 220 | .max_read_len = 4, |
| 221 | .max_comb_2nd_msg_len = 4, | ||
| 221 | .flags = I2C_AQ_COMB_WRITE_THEN_READ, | 222 | .flags = I2C_AQ_COMB_WRITE_THEN_READ, |
| 222 | }; | 223 | }; |
| 223 | 224 | ||
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 4aa7dde876f3..254e6219e538 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
| @@ -779,6 +779,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
| 779 | 779 | ||
| 780 | pm_runtime_get_sync(dev); | 780 | pm_runtime_get_sync(dev); |
| 781 | 781 | ||
| 782 | /* Check bus state before init otherwise bus busy info will be lost */ | ||
| 783 | ret = rcar_i2c_bus_barrier(priv); | ||
| 784 | if (ret < 0) | ||
| 785 | goto out; | ||
| 786 | |||
| 782 | /* Gen3 needs a reset before allowing RXDMA once */ | 787 | /* Gen3 needs a reset before allowing RXDMA once */ |
| 783 | if (priv->devtype == I2C_RCAR_GEN3) { | 788 | if (priv->devtype == I2C_RCAR_GEN3) { |
| 784 | priv->flags |= ID_P_NO_RXDMA; | 789 | priv->flags |= ID_P_NO_RXDMA; |
| @@ -791,10 +796,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
| 791 | 796 | ||
| 792 | rcar_i2c_init(priv); | 797 | rcar_i2c_init(priv); |
| 793 | 798 | ||
| 794 | ret = rcar_i2c_bus_barrier(priv); | ||
| 795 | if (ret < 0) | ||
| 796 | goto out; | ||
| 797 | |||
| 798 | for (i = 0; i < num; i++) | 799 | for (i = 0; i < num; i++) |
| 799 | rcar_i2c_request_dma(priv, msgs + i); | 800 | rcar_i2c_request_dma(priv, msgs + i); |
| 800 | 801 | ||
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index 7e9a2bbf5ddc..ff3f4553648f 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c | |||
| @@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) | |||
| 367 | { | 367 | { |
| 368 | struct acpi_smbus_cmi *smbus_cmi; | 368 | struct acpi_smbus_cmi *smbus_cmi; |
| 369 | const struct acpi_device_id *id; | 369 | const struct acpi_device_id *id; |
| 370 | int ret; | ||
| 370 | 371 | ||
| 371 | smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); | 372 | smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); |
| 372 | if (!smbus_cmi) | 373 | if (!smbus_cmi) |
| @@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) | |||
| 388 | acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, | 389 | acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, |
| 389 | acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); | 390 | acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); |
| 390 | 391 | ||
| 391 | if (smbus_cmi->cap_info == 0) | 392 | if (smbus_cmi->cap_info == 0) { |
| 393 | ret = -ENODEV; | ||
| 392 | goto err; | 394 | goto err; |
| 395 | } | ||
| 393 | 396 | ||
| 394 | snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), | 397 | snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), |
| 395 | "SMBus CMI adapter %s", | 398 | "SMBus CMI adapter %s", |
| @@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) | |||
| 400 | smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; | 403 | smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; |
| 401 | smbus_cmi->adapter.dev.parent = &device->dev; | 404 | smbus_cmi->adapter.dev.parent = &device->dev; |
| 402 | 405 | ||
| 403 | if (i2c_add_adapter(&smbus_cmi->adapter)) { | 406 | ret = i2c_add_adapter(&smbus_cmi->adapter); |
| 407 | if (ret) { | ||
| 404 | dev_err(&device->dev, "Couldn't register adapter!\n"); | 408 | dev_err(&device->dev, "Couldn't register adapter!\n"); |
| 405 | goto err; | 409 | goto err; |
| 406 | } | 410 | } |
| @@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) | |||
| 410 | err: | 414 | err: |
| 411 | kfree(smbus_cmi); | 415 | kfree(smbus_cmi); |
| 412 | device->driver_data = NULL; | 416 | device->driver_data = NULL; |
| 413 | return -EIO; | 417 | return ret; |
| 414 | } | 418 | } |
| 415 | 419 | ||
| 416 | static int acpi_smbus_cmi_remove(struct acpi_device *device) | 420 | static int acpi_smbus_cmi_remove(struct acpi_device *device) |
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index dd384743dbbd..03da4a539a2f 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c | |||
| @@ -173,8 +173,6 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) | |||
| 173 | "interrupt: enabled_irqs=%04x, irq_status=%04x\n", | 173 | "interrupt: enabled_irqs=%04x, irq_status=%04x\n", |
| 174 | priv->enabled_irqs, irq_status); | 174 | priv->enabled_irqs, irq_status); |
| 175 | 175 | ||
| 176 | uniphier_fi2c_clear_irqs(priv, irq_status); | ||
| 177 | |||
| 178 | if (irq_status & UNIPHIER_FI2C_INT_STOP) | 176 | if (irq_status & UNIPHIER_FI2C_INT_STOP) |
| 179 | goto complete; | 177 | goto complete; |
| 180 | 178 | ||
| @@ -214,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) | |||
| 214 | 212 | ||
| 215 | if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) { | 213 | if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) { |
| 216 | uniphier_fi2c_drain_rxfifo(priv); | 214 | uniphier_fi2c_drain_rxfifo(priv); |
| 217 | if (!priv->len) | 215 | /* |
| 216 | * If the number of bytes to read is multiple of the FIFO size | ||
| 217 | * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little | ||
| 218 | * earlier than INT_RB. We wait for INT_RB to confirm the | ||
| 219 | * completion of the current message. | ||
| 220 | */ | ||
| 221 | if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB)) | ||
| 218 | goto data_done; | 222 | goto data_done; |
| 219 | 223 | ||
| 220 | if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) { | 224 | if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) { |
| @@ -253,12 +257,20 @@ complete: | |||
| 253 | } | 257 | } |
| 254 | 258 | ||
| 255 | handled: | 259 | handled: |
| 260 | /* | ||
| 261 | * This controller makes a pause while any bit of the IRQ status is | ||
| 262 | * asserted. Clear the asserted bit to kick the controller just before | ||
| 263 | * exiting the handler. | ||
| 264 | */ | ||
| 265 | uniphier_fi2c_clear_irqs(priv, irq_status); | ||
| 266 | |||
| 256 | spin_unlock(&priv->lock); | 267 | spin_unlock(&priv->lock); |
| 257 | 268 | ||
| 258 | return IRQ_HANDLED; | 269 | return IRQ_HANDLED; |
| 259 | } | 270 | } |
| 260 | 271 | ||
| 261 | static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr) | 272 | static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr, |
| 273 | bool repeat) | ||
| 262 | { | 274 | { |
| 263 | priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE; | 275 | priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE; |
| 264 | uniphier_fi2c_set_irqs(priv); | 276 | uniphier_fi2c_set_irqs(priv); |
| @@ -268,8 +280,12 @@ static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr) | |||
| 268 | /* set slave address */ | 280 | /* set slave address */ |
| 269 | writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1, | 281 | writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1, |
| 270 | priv->membase + UNIPHIER_FI2C_DTTX); | 282 | priv->membase + UNIPHIER_FI2C_DTTX); |
| 271 | /* first chunk of data */ | 283 | /* |
| 272 | uniphier_fi2c_fill_txfifo(priv, true); | 284 | * First chunk of data. For a repeated START condition, do not write |
| 285 | * data to the TX fifo here to avoid the timing issue. | ||
| 286 | */ | ||
| 287 | if (!repeat) | ||
| 288 | uniphier_fi2c_fill_txfifo(priv, true); | ||
| 273 | } | 289 | } |
| 274 | 290 | ||
| 275 | static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr) | 291 | static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr) |
| @@ -350,7 +366,7 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, | |||
| 350 | if (is_read) | 366 | if (is_read) |
| 351 | uniphier_fi2c_rx_init(priv, msg->addr); | 367 | uniphier_fi2c_rx_init(priv, msg->addr); |
| 352 | else | 368 | else |
| 353 | uniphier_fi2c_tx_init(priv, msg->addr); | 369 | uniphier_fi2c_tx_init(priv, msg->addr, repeat); |
| 354 | 370 | ||
| 355 | dev_dbg(&adap->dev, "start condition\n"); | 371 | dev_dbg(&adap->dev, "start condition\n"); |
| 356 | /* | 372 | /* |
| @@ -502,9 +518,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv) | |||
| 502 | 518 | ||
| 503 | uniphier_fi2c_reset(priv); | 519 | uniphier_fi2c_reset(priv); |
| 504 | 520 | ||
| 521 | /* | ||
| 522 | * Standard-mode: tLOW + tHIGH = 10 us | ||
| 523 | * Fast-mode: tLOW + tHIGH = 2.5 us | ||
| 524 | */ | ||
| 505 | writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); | 525 | writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); |
| 506 | writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL); | 526 | /* |
| 527 | * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us | ||
| 528 | * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us | ||
| 529 | * "tLow/tHIGH = 5/4" meets both. | ||
| 530 | */ | ||
| 531 | writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL); | ||
| 532 | /* | ||
| 533 | * Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us | ||
| 534 | * Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us | ||
| 535 | */ | ||
| 507 | writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); | 536 | writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); |
| 537 | /* | ||
| 538 | * Standard-mode: tSU;DAT = 250 ns | ||
| 539 | * Fast-mode: tSU;DAT = 100 ns | ||
| 540 | */ | ||
| 508 | writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); | 541 | writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); |
| 509 | 542 | ||
| 510 | uniphier_fi2c_prepare_operation(priv); | 543 | uniphier_fi2c_prepare_operation(priv); |
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index 454f914ae66d..c488e558aef7 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c | |||
| @@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv) | |||
| 320 | 320 | ||
| 321 | uniphier_i2c_reset(priv, true); | 321 | uniphier_i2c_reset(priv, true); |
| 322 | 322 | ||
| 323 | writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); | 323 | /* |
| 324 | * Bit30-16: clock cycles of tLOW. | ||
| 325 | * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us | ||
| 326 | * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us | ||
| 327 | * "tLow/tHIGH = 5/4" meets both. | ||
| 328 | */ | ||
| 329 | writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); | ||
| 324 | 330 | ||
| 325 | uniphier_i2c_reset(priv, false); | 331 | uniphier_i2c_reset(priv, false); |
| 326 | } | 332 | } |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 45c997430332..4c8c7a620d08 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
| @@ -614,18 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p) | |||
| 614 | return 0; | 614 | return 0; |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | static int ide_drivers_open(struct inode *inode, struct file *file) | 617 | DEFINE_SHOW_ATTRIBUTE(ide_drivers); |
| 618 | { | ||
| 619 | return single_open(file, &ide_drivers_show, NULL); | ||
| 620 | } | ||
| 621 | |||
| 622 | static const struct file_operations ide_drivers_operations = { | ||
| 623 | .owner = THIS_MODULE, | ||
| 624 | .open = ide_drivers_open, | ||
| 625 | .read = seq_read, | ||
| 626 | .llseek = seq_lseek, | ||
| 627 | .release = single_release, | ||
| 628 | }; | ||
| 629 | 618 | ||
| 630 | void proc_ide_create(void) | 619 | void proc_ide_create(void) |
| 631 | { | 620 | { |
| @@ -634,7 +623,7 @@ void proc_ide_create(void) | |||
| 634 | if (!proc_ide_root) | 623 | if (!proc_ide_root) |
| 635 | return; | 624 | return; |
| 636 | 625 | ||
| 637 | proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations); | 626 | proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops); |
| 638 | } | 627 | } |
| 639 | 628 | ||
| 640 | void proc_ide_destroy(void) | 629 | void proc_ide_destroy(void) |
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index c5b902b86b44..203ed4adc04a 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c | |||
| @@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) | |||
| 920 | struct device_node *root = of_find_node_by_path("/"); | 920 | struct device_node *root = of_find_node_by_path("/"); |
| 921 | const char *model = of_get_property(root, "model", NULL); | 921 | const char *model = of_get_property(root, "model", NULL); |
| 922 | 922 | ||
| 923 | of_node_put(root); | ||
| 923 | /* Get cable type from device-tree. */ | 924 | /* Get cable type from device-tree. */ |
| 924 | if (cable && !strncmp(cable, "80-", 3)) { | 925 | if (cable && !strncmp(cable, "80-", 3)) { |
| 925 | /* Some drives fail to detect 80c cable in PowerBook */ | 926 | /* Some drives fail to detect 80c cable in PowerBook */ |
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index 25d43c8f1c2a..558de0b9895c 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c | |||
| @@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, | |||
| 267 | struct net_device *cookie_ndev = cookie; | 267 | struct net_device *cookie_ndev = cookie; |
| 268 | bool match = false; | 268 | bool match = false; |
| 269 | 269 | ||
| 270 | if (!rdma_ndev) | ||
| 271 | return false; | ||
| 272 | |||
| 270 | rcu_read_lock(); | 273 | rcu_read_lock(); |
| 271 | if (netif_is_bond_master(cookie_ndev) && | 274 | if (netif_is_bond_master(cookie_ndev) && |
| 272 | rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) | 275 | rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9b20479dc710..7e6d70936c63 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd) | |||
| 12500 | } | 12500 | } |
| 12501 | 12501 | ||
| 12502 | /* allocate space for the counter values */ | 12502 | /* allocate space for the counter values */ |
| 12503 | dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); | 12503 | dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), |
| 12504 | GFP_KERNEL); | ||
| 12504 | if (!dd->cntrs) | 12505 | if (!dd->cntrs) |
| 12505 | goto bail; | 12506 | goto bail; |
| 12506 | 12507 | ||
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 1401b6ea4a28..2b882347d0c2 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
| @@ -155,6 +155,8 @@ struct hfi1_ib_stats { | |||
| 155 | extern struct hfi1_ib_stats hfi1_stats; | 155 | extern struct hfi1_ib_stats hfi1_stats; |
| 156 | extern const struct pci_error_handlers hfi1_pci_err_handler; | 156 | extern const struct pci_error_handlers hfi1_pci_err_handler; |
| 157 | 157 | ||
| 158 | extern int num_driver_cntrs; | ||
| 159 | |||
| 158 | /* | 160 | /* |
| 159 | * First-cut criterion for "device is active" is | 161 | * First-cut criterion for "device is active" is |
| 160 | * two thousand dwords combined Tx, Rx traffic per | 162 | * two thousand dwords combined Tx, Rx traffic per |
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 6f3bc4dab858..1a016248039f 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c | |||
| @@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) | |||
| 340 | default: | 340 | default: |
| 341 | break; | 341 | break; |
| 342 | } | 342 | } |
| 343 | |||
| 344 | /* | ||
| 345 | * System latency between send and schedule is large enough that | ||
| 346 | * forcing call_send to true for piothreshold packets is necessary. | ||
| 347 | */ | ||
| 348 | if (wqe->length <= piothreshold) | ||
| 349 | *call_send = true; | ||
| 343 | return 0; | 350 | return 0; |
| 344 | } | 351 | } |
| 345 | 352 | ||
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 48e11e510358..a365089a9305 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
| @@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = { | |||
| 1479 | static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ | 1479 | static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ |
| 1480 | static const char **dev_cntr_names; | 1480 | static const char **dev_cntr_names; |
| 1481 | static const char **port_cntr_names; | 1481 | static const char **port_cntr_names; |
| 1482 | static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); | 1482 | int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); |
| 1483 | static int num_dev_cntrs; | 1483 | static int num_dev_cntrs; |
| 1484 | static int num_port_cntrs; | 1484 | static int num_port_cntrs; |
| 1485 | static int cntr_names_initialized; | 1485 | static int cntr_names_initialized; |
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 61aab7c0c513..45c421c87100 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c | |||
| @@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, | |||
| 1066 | 1066 | ||
| 1067 | err = uverbs_get_flags32(&access, attrs, | 1067 | err = uverbs_get_flags32(&access, attrs, |
| 1068 | MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, | 1068 | MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, |
| 1069 | IB_ACCESS_SUPPORTED); | 1069 | IB_ACCESS_LOCAL_WRITE | |
| 1070 | IB_ACCESS_REMOTE_WRITE | | ||
| 1071 | IB_ACCESS_REMOTE_READ); | ||
| 1070 | if (err) | 1072 | if (err) |
| 1071 | return err; | 1073 | return err; |
| 1072 | 1074 | ||
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 2cc3d69ab6f6..4dc6cc640ce0 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
| @@ -506,14 +506,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) | |||
| 506 | static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, | 506 | static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, |
| 507 | u64 io_virt, size_t bcnt, u32 *bytes_mapped) | 507 | u64 io_virt, size_t bcnt, u32 *bytes_mapped) |
| 508 | { | 508 | { |
| 509 | int npages = 0, current_seq, page_shift, ret, np; | ||
| 510 | bool implicit = false; | ||
| 509 | struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); | 511 | struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); |
| 510 | u64 access_mask = ODP_READ_ALLOWED_BIT; | 512 | u64 access_mask = ODP_READ_ALLOWED_BIT; |
| 511 | int npages = 0, page_shift, np; | ||
| 512 | u64 start_idx, page_mask; | 513 | u64 start_idx, page_mask; |
| 513 | struct ib_umem_odp *odp; | 514 | struct ib_umem_odp *odp; |
| 514 | int current_seq; | ||
| 515 | size_t size; | 515 | size_t size; |
| 516 | int ret; | ||
| 517 | 516 | ||
| 518 | if (!odp_mr->page_list) { | 517 | if (!odp_mr->page_list) { |
| 519 | odp = implicit_mr_get_data(mr, io_virt, bcnt); | 518 | odp = implicit_mr_get_data(mr, io_virt, bcnt); |
| @@ -521,7 +520,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, | |||
| 521 | if (IS_ERR(odp)) | 520 | if (IS_ERR(odp)) |
| 522 | return PTR_ERR(odp); | 521 | return PTR_ERR(odp); |
| 523 | mr = odp->private; | 522 | mr = odp->private; |
| 524 | 523 | implicit = true; | |
| 525 | } else { | 524 | } else { |
| 526 | odp = odp_mr; | 525 | odp = odp_mr; |
| 527 | } | 526 | } |
| @@ -600,7 +599,7 @@ next_mr: | |||
| 600 | 599 | ||
| 601 | out: | 600 | out: |
| 602 | if (ret == -EAGAIN) { | 601 | if (ret == -EAGAIN) { |
| 603 | if (mr->parent || !odp->dying) { | 602 | if (implicit || !odp->dying) { |
| 604 | unsigned long timeout = | 603 | unsigned long timeout = |
| 605 | msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); | 604 | msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); |
| 606 | 605 | ||
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d4b9db487b16..cfc8b94527b9 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -480,18 +480,18 @@ static const u8 xboxone_hori_init[] = { | |||
| 480 | }; | 480 | }; |
| 481 | 481 | ||
| 482 | /* | 482 | /* |
| 483 | * This packet is required for some of the PDP pads to start | 483 | * This packet is required for most (all?) of the PDP pads to start |
| 484 | * sending input reports. These pads include: (0x0e6f:0x02ab), | 484 | * sending input reports. These pads include: (0x0e6f:0x02ab), |
| 485 | * (0x0e6f:0x02a4). | 485 | * (0x0e6f:0x02a4), (0x0e6f:0x02a6). |
| 486 | */ | 486 | */ |
| 487 | static const u8 xboxone_pdp_init1[] = { | 487 | static const u8 xboxone_pdp_init1[] = { |
| 488 | 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 | 488 | 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 |
| 489 | }; | 489 | }; |
| 490 | 490 | ||
| 491 | /* | 491 | /* |
| 492 | * This packet is required for some of the PDP pads to start | 492 | * This packet is required for most (all?) of the PDP pads to start |
| 493 | * sending input reports. These pads include: (0x0e6f:0x02ab), | 493 | * sending input reports. These pads include: (0x0e6f:0x02ab), |
| 494 | * (0x0e6f:0x02a4). | 494 | * (0x0e6f:0x02a4), (0x0e6f:0x02a6). |
| 495 | */ | 495 | */ |
| 496 | static const u8 xboxone_pdp_init2[] = { | 496 | static const u8 xboxone_pdp_init2[] = { |
| 497 | 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 | 497 | 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 |
| @@ -527,12 +527,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { | |||
| 527 | XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), | 527 | XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), |
| 528 | XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), | 528 | XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), |
| 529 | XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), | 529 | XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), |
| 530 | XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), | 530 | XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), |
| 531 | XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), | 531 | XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), |
| 532 | XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), | ||
| 533 | XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), | ||
| 534 | XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1), | ||
| 535 | XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2), | ||
| 536 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), | 532 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), |
| 537 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), | 533 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), |
| 538 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), | 534 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 7e75835e220f..850bb259c20e 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
| @@ -841,7 +841,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra | |||
| 841 | if (param[0] != 3) { | 841 | if (param[0] != 3) { |
| 842 | param[0] = 2; | 842 | param[0] = 2; |
| 843 | if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET)) | 843 | if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET)) |
| 844 | return 2; | 844 | return 2; |
| 845 | } | 845 | } |
| 846 | 846 | ||
| 847 | ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR); | 847 | ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR); |
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 81be6f781f0b..d56001181598 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c | |||
| @@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev) | |||
| 493 | for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) { | 493 | for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) { |
| 494 | const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; | 494 | const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; |
| 495 | 495 | ||
| 496 | if (buttons & BIT(map->bit)) | 496 | if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) || |
| 497 | (map->ev_type == EV_SW && (switches & BIT(map->bit)))) | ||
| 497 | input_set_capability(idev, map->ev_type, map->code); | 498 | input_set_capability(idev, map->ev_type, map->code); |
| 498 | } | 499 | } |
| 499 | 500 | ||
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index f51ae09596ef..403452ef00e6 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
| @@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev) | |||
| 407 | struct matrix_keypad_platform_data *pdata; | 407 | struct matrix_keypad_platform_data *pdata; |
| 408 | struct device_node *np = dev->of_node; | 408 | struct device_node *np = dev->of_node; |
| 409 | unsigned int *gpios; | 409 | unsigned int *gpios; |
| 410 | int i, nrow, ncol; | 410 | int ret, i, nrow, ncol; |
| 411 | 411 | ||
| 412 | if (!np) { | 412 | if (!np) { |
| 413 | dev_err(dev, "device lacks DT data\n"); | 413 | dev_err(dev, "device lacks DT data\n"); |
| @@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev) | |||
| 452 | return ERR_PTR(-ENOMEM); | 452 | return ERR_PTR(-ENOMEM); |
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | for (i = 0; i < pdata->num_row_gpios; i++) | 455 | for (i = 0; i < nrow; i++) { |
| 456 | gpios[i] = of_get_named_gpio(np, "row-gpios", i); | 456 | ret = of_get_named_gpio(np, "row-gpios", i); |
| 457 | if (ret < 0) | ||
| 458 | return ERR_PTR(ret); | ||
| 459 | gpios[i] = ret; | ||
| 460 | } | ||
| 457 | 461 | ||
| 458 | for (i = 0; i < pdata->num_col_gpios; i++) | 462 | for (i = 0; i < ncol; i++) { |
| 459 | gpios[pdata->num_row_gpios + i] = | 463 | ret = of_get_named_gpio(np, "col-gpios", i); |
| 460 | of_get_named_gpio(np, "col-gpios", i); | 464 | if (ret < 0) |
| 465 | return ERR_PTR(ret); | ||
| 466 | gpios[nrow + i] = ret; | ||
| 467 | } | ||
| 461 | 468 | ||
| 462 | pdata->row_gpios = gpios; | 469 | pdata->row_gpios = gpios; |
| 463 | pdata->col_gpios = &gpios[pdata->num_row_gpios]; | 470 | pdata->col_gpios = &gpios[pdata->num_row_gpios]; |
| @@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev) | |||
| 484 | pdata = dev_get_platdata(&pdev->dev); | 491 | pdata = dev_get_platdata(&pdev->dev); |
| 485 | if (!pdata) { | 492 | if (!pdata) { |
| 486 | pdata = matrix_keypad_parse_dt(&pdev->dev); | 493 | pdata = matrix_keypad_parse_dt(&pdev->dev); |
| 487 | if (IS_ERR(pdata)) { | 494 | if (IS_ERR(pdata)) |
| 488 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
| 489 | return PTR_ERR(pdata); | 495 | return PTR_ERR(pdata); |
| 490 | } | ||
| 491 | } else if (!pdata->keymap_data) { | 496 | } else if (!pdata->keymap_data) { |
| 492 | dev_err(&pdev->dev, "no keymap data defined\n"); | 497 | dev_err(&pdev->dev, "no keymap data defined\n"); |
| 493 | return -EINVAL; | 498 | return -EINVAL; |
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index 46406345742b..a7dc286f406c 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c | |||
| @@ -60,8 +60,18 @@ | |||
| 60 | 60 | ||
| 61 | /* OMAP4 values */ | 61 | /* OMAP4 values */ |
| 62 | #define OMAP4_VAL_IRQDISABLE 0x0 | 62 | #define OMAP4_VAL_IRQDISABLE 0x0 |
| 63 | #define OMAP4_VAL_DEBOUNCINGTIME 0x7 | 63 | |
| 64 | #define OMAP4_VAL_PVT 0x7 | 64 | /* |
| 65 | * Errata i689: If a key is released for a time shorter than debounce time, | ||
| 66 | * the keyboard will idle and never detect the key release. The workaround | ||
| 67 | * is to use at least a 12ms debounce time. See omap5432 TRM chapter | ||
| 68 | * "26.4.6.2 Keyboard Controller Timer" for more information. | ||
| 69 | */ | ||
| 70 | #define OMAP4_KEYPAD_PTV_DIV_128 0x6 | ||
| 71 | #define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \ | ||
| 72 | ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1) | ||
| 73 | #define OMAP4_VAL_DEBOUNCINGTIME_16MS \ | ||
| 74 | OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128) | ||
| 65 | 75 | ||
| 66 | enum { | 76 | enum { |
| 67 | KBD_REVISION_OMAP4 = 0, | 77 | KBD_REVISION_OMAP4 = 0, |
| @@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input) | |||
| 181 | 191 | ||
| 182 | kbd_writel(keypad_data, OMAP4_KBD_CTRL, | 192 | kbd_writel(keypad_data, OMAP4_KBD_CTRL, |
| 183 | OMAP4_DEF_CTRL_NOSOFTMODE | | 193 | OMAP4_DEF_CTRL_NOSOFTMODE | |
| 184 | (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT)); | 194 | (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT)); |
| 185 | kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME, | 195 | kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME, |
| 186 | OMAP4_VAL_DEBOUNCINGTIME); | 196 | OMAP4_VAL_DEBOUNCINGTIME_16MS); |
| 187 | /* clear pending interrupts */ | 197 | /* clear pending interrupts */ |
| 188 | kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, | 198 | kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, |
| 189 | kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); | 199 | kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index b0f9d19b3410..a94b6494e71a 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -1348,6 +1348,9 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
| 1348 | { "ELAN0618", 0 }, | 1348 | { "ELAN0618", 0 }, |
| 1349 | { "ELAN061C", 0 }, | 1349 | { "ELAN061C", 0 }, |
| 1350 | { "ELAN061D", 0 }, | 1350 | { "ELAN061D", 0 }, |
| 1351 | { "ELAN061E", 0 }, | ||
| 1352 | { "ELAN0620", 0 }, | ||
| 1353 | { "ELAN0621", 0 }, | ||
| 1351 | { "ELAN0622", 0 }, | 1354 | { "ELAN0622", 0 }, |
| 1352 | { "ELAN1000", 0 }, | 1355 | { "ELAN1000", 0 }, |
| 1353 | { } | 1356 | { } |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 5e85f3cca867..2bd5bb11c8ba 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
| @@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = { | |||
| 170 | "LEN0048", /* X1 Carbon 3 */ | 170 | "LEN0048", /* X1 Carbon 3 */ |
| 171 | "LEN0046", /* X250 */ | 171 | "LEN0046", /* X250 */ |
| 172 | "LEN004a", /* W541 */ | 172 | "LEN004a", /* W541 */ |
| 173 | "LEN005b", /* P50 */ | ||
| 173 | "LEN0071", /* T480 */ | 174 | "LEN0071", /* T480 */ |
| 174 | "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ | 175 | "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ |
| 175 | "LEN0073", /* X1 Carbon G5 (Elantech) */ | 176 | "LEN0073", /* X1 Carbon G5 (Elantech) */ |
| @@ -177,6 +178,7 @@ static const char * const smbus_pnp_ids[] = { | |||
| 177 | "LEN0096", /* X280 */ | 178 | "LEN0096", /* X280 */ |
| 178 | "LEN0097", /* X280 -> ALPS trackpoint */ | 179 | "LEN0097", /* X280 -> ALPS trackpoint */ |
| 179 | "LEN200f", /* T450s */ | 180 | "LEN200f", /* T450s */ |
| 181 | "SYN3221", /* HP 15-ay000 */ | ||
| 180 | NULL | 182 | NULL |
| 181 | }; | 183 | }; |
| 182 | 184 | ||
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 47a0e81a2989..a8b9be3e28db 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c | |||
| @@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev, | |||
| 177 | * state because the Enter-UP can trigger a wakeup at once. | 177 | * state because the Enter-UP can trigger a wakeup at once. |
| 178 | */ | 178 | */ |
| 179 | if (!(info & IS_BREAK)) | 179 | if (!(info & IS_BREAK)) |
| 180 | pm_wakeup_event(&hv_dev->device, 0); | 180 | pm_wakeup_hard_event(&hv_dev->device); |
| 181 | 181 | ||
| 182 | break; | 182 | break; |
| 183 | 183 | ||
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c index 02fb11985819..42d3fd7e04d7 100644 --- a/drivers/input/touchscreen/migor_ts.c +++ b/drivers/input/touchscreen/migor_ts.c | |||
| @@ -1,23 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /* | 2 | /* |
| 2 | * Touch Screen driver for Renesas MIGO-R Platform | 3 | * Touch Screen driver for Renesas MIGO-R Platform |
| 3 | * | 4 | * |
| 4 | * Copyright (c) 2008 Magnus Damm | 5 | * Copyright (c) 2008 Magnus Damm |
| 5 | * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, | 6 | * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, |
| 6 | * Kenati Technologies Pvt Ltd. | 7 | * Kenati Technologies Pvt Ltd. |
| 7 | * | ||
| 8 | * This file is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public | ||
| 10 | * License as published by the Free Software Foundation; either | ||
| 11 | * version 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This file is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 16 | * General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public | ||
| 19 | * License along with this library; if not, write to the Free Software | ||
| 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 21 | */ | 8 | */ |
| 22 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 23 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index b71673911aac..11ff32c68025 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * ST1232 Touchscreen Controller Driver | 3 | * ST1232 Touchscreen Controller Driver |
| 3 | * | 4 | * |
| @@ -7,15 +8,6 @@ | |||
| 7 | * Using code from: | 8 | * Using code from: |
| 8 | * - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c | 9 | * - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c |
| 9 | * Copyright (C) 2007 Google, Inc. | 10 | * Copyright (C) 2007 Google, Inc. |
| 10 | * | ||
| 11 | * This software is licensed under the terms of the GNU General Public | ||
| 12 | * License version 2, as published by the Free Software Foundation, and | ||
| 13 | * may be copied, distributed, and modified under those terms. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | 11 | */ |
| 20 | 12 | ||
| 21 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| @@ -295,4 +287,4 @@ module_i2c_driver(st1232_ts_driver); | |||
| 295 | 287 | ||
| 296 | MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>"); | 288 | MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>"); |
| 297 | MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver"); | 289 | MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver"); |
| 298 | MODULE_LICENSE("GPL"); | 290 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 5936de71883f..6fc93834da44 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
| @@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, | |||
| 930 | bool dirty_flag; | 930 | bool dirty_flag; |
| 931 | *result = true; | 931 | *result = true; |
| 932 | 932 | ||
| 933 | if (from_cblock(cmd->cache_blocks) == 0) | ||
| 934 | /* Nothing to do */ | ||
| 935 | return 0; | ||
| 936 | |||
| 933 | r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, | 937 | r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, |
| 934 | from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); | 938 | from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); |
| 935 | if (r) { | 939 | if (r) { |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 0bd8d498b3b9..dadd9696340c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t) | |||
| 195 | struct dm_thin_new_mapping; | 195 | struct dm_thin_new_mapping; |
| 196 | 196 | ||
| 197 | /* | 197 | /* |
| 198 | * The pool runs in 4 modes. Ordered in degraded order for comparisons. | 198 | * The pool runs in various modes. Ordered in degraded order for comparisons. |
| 199 | */ | 199 | */ |
| 200 | enum pool_mode { | 200 | enum pool_mode { |
| 201 | PM_WRITE, /* metadata may be changed */ | 201 | PM_WRITE, /* metadata may be changed */ |
| @@ -282,9 +282,38 @@ struct pool { | |||
| 282 | mempool_t mapping_pool; | 282 | mempool_t mapping_pool; |
| 283 | }; | 283 | }; |
| 284 | 284 | ||
| 285 | static enum pool_mode get_pool_mode(struct pool *pool); | ||
| 286 | static void metadata_operation_failed(struct pool *pool, const char *op, int r); | 285 | static void metadata_operation_failed(struct pool *pool, const char *op, int r); |
| 287 | 286 | ||
| 287 | static enum pool_mode get_pool_mode(struct pool *pool) | ||
| 288 | { | ||
| 289 | return pool->pf.mode; | ||
| 290 | } | ||
| 291 | |||
| 292 | static void notify_of_pool_mode_change(struct pool *pool) | ||
| 293 | { | ||
| 294 | const char *descs[] = { | ||
| 295 | "write", | ||
| 296 | "out-of-data-space", | ||
| 297 | "read-only", | ||
| 298 | "read-only", | ||
| 299 | "fail" | ||
| 300 | }; | ||
| 301 | const char *extra_desc = NULL; | ||
| 302 | enum pool_mode mode = get_pool_mode(pool); | ||
| 303 | |||
| 304 | if (mode == PM_OUT_OF_DATA_SPACE) { | ||
| 305 | if (!pool->pf.error_if_no_space) | ||
| 306 | extra_desc = " (queue IO)"; | ||
| 307 | else | ||
| 308 | extra_desc = " (error IO)"; | ||
| 309 | } | ||
| 310 | |||
| 311 | dm_table_event(pool->ti->table); | ||
| 312 | DMINFO("%s: switching pool to %s%s mode", | ||
| 313 | dm_device_name(pool->pool_md), | ||
| 314 | descs[(int)mode], extra_desc ? : ""); | ||
| 315 | } | ||
| 316 | |||
| 288 | /* | 317 | /* |
| 289 | * Target context for a pool. | 318 | * Target context for a pool. |
| 290 | */ | 319 | */ |
| @@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws) | |||
| 2351 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); | 2380 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); |
| 2352 | } | 2381 | } |
| 2353 | 2382 | ||
| 2354 | static void notify_of_pool_mode_change_to_oods(struct pool *pool); | ||
| 2355 | |||
| 2356 | /* | 2383 | /* |
| 2357 | * We're holding onto IO to allow userland time to react. After the | 2384 | * We're holding onto IO to allow userland time to react. After the |
| 2358 | * timeout either the pool will have been resized (and thus back in | 2385 | * timeout either the pool will have been resized (and thus back in |
| @@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws) | |||
| 2365 | 2392 | ||
| 2366 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { | 2393 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { |
| 2367 | pool->pf.error_if_no_space = true; | 2394 | pool->pf.error_if_no_space = true; |
| 2368 | notify_of_pool_mode_change_to_oods(pool); | 2395 | notify_of_pool_mode_change(pool); |
| 2369 | error_retry_list_with_code(pool, BLK_STS_NOSPC); | 2396 | error_retry_list_with_code(pool, BLK_STS_NOSPC); |
| 2370 | } | 2397 | } |
| 2371 | } | 2398 | } |
| @@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) | |||
| 2433 | 2460 | ||
| 2434 | /*----------------------------------------------------------------*/ | 2461 | /*----------------------------------------------------------------*/ |
| 2435 | 2462 | ||
| 2436 | static enum pool_mode get_pool_mode(struct pool *pool) | ||
| 2437 | { | ||
| 2438 | return pool->pf.mode; | ||
| 2439 | } | ||
| 2440 | |||
| 2441 | static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) | ||
| 2442 | { | ||
| 2443 | dm_table_event(pool->ti->table); | ||
| 2444 | DMINFO("%s: switching pool to %s mode", | ||
| 2445 | dm_device_name(pool->pool_md), new_mode); | ||
| 2446 | } | ||
| 2447 | |||
| 2448 | static void notify_of_pool_mode_change_to_oods(struct pool *pool) | ||
| 2449 | { | ||
| 2450 | if (!pool->pf.error_if_no_space) | ||
| 2451 | notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); | ||
| 2452 | else | ||
| 2453 | notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); | ||
| 2454 | } | ||
| 2455 | |||
| 2456 | static bool passdown_enabled(struct pool_c *pt) | 2463 | static bool passdown_enabled(struct pool_c *pt) |
| 2457 | { | 2464 | { |
| 2458 | return pt->adjusted_pf.discard_passdown; | 2465 | return pt->adjusted_pf.discard_passdown; |
| @@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2501 | 2508 | ||
| 2502 | switch (new_mode) { | 2509 | switch (new_mode) { |
| 2503 | case PM_FAIL: | 2510 | case PM_FAIL: |
| 2504 | if (old_mode != new_mode) | ||
| 2505 | notify_of_pool_mode_change(pool, "failure"); | ||
| 2506 | dm_pool_metadata_read_only(pool->pmd); | 2511 | dm_pool_metadata_read_only(pool->pmd); |
| 2507 | pool->process_bio = process_bio_fail; | 2512 | pool->process_bio = process_bio_fail; |
| 2508 | pool->process_discard = process_bio_fail; | 2513 | pool->process_discard = process_bio_fail; |
| @@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2516 | 2521 | ||
| 2517 | case PM_OUT_OF_METADATA_SPACE: | 2522 | case PM_OUT_OF_METADATA_SPACE: |
| 2518 | case PM_READ_ONLY: | 2523 | case PM_READ_ONLY: |
| 2519 | if (!is_read_only_pool_mode(old_mode)) | ||
| 2520 | notify_of_pool_mode_change(pool, "read-only"); | ||
| 2521 | dm_pool_metadata_read_only(pool->pmd); | 2524 | dm_pool_metadata_read_only(pool->pmd); |
| 2522 | pool->process_bio = process_bio_read_only; | 2525 | pool->process_bio = process_bio_read_only; |
| 2523 | pool->process_discard = process_bio_success; | 2526 | pool->process_discard = process_bio_success; |
| @@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2538 | * alarming rate. Adjust your low water mark if you're | 2541 | * alarming rate. Adjust your low water mark if you're |
| 2539 | * frequently seeing this mode. | 2542 | * frequently seeing this mode. |
| 2540 | */ | 2543 | */ |
| 2541 | if (old_mode != new_mode) | ||
| 2542 | notify_of_pool_mode_change_to_oods(pool); | ||
| 2543 | pool->out_of_data_space = true; | 2544 | pool->out_of_data_space = true; |
| 2544 | pool->process_bio = process_bio_read_only; | 2545 | pool->process_bio = process_bio_read_only; |
| 2545 | pool->process_discard = process_discard_bio; | 2546 | pool->process_discard = process_discard_bio; |
| @@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2552 | break; | 2553 | break; |
| 2553 | 2554 | ||
| 2554 | case PM_WRITE: | 2555 | case PM_WRITE: |
| 2555 | if (old_mode != new_mode) | ||
| 2556 | notify_of_pool_mode_change(pool, "write"); | ||
| 2557 | if (old_mode == PM_OUT_OF_DATA_SPACE) | 2556 | if (old_mode == PM_OUT_OF_DATA_SPACE) |
| 2558 | cancel_delayed_work_sync(&pool->no_space_timeout); | 2557 | cancel_delayed_work_sync(&pool->no_space_timeout); |
| 2559 | pool->out_of_data_space = false; | 2558 | pool->out_of_data_space = false; |
| @@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2573 | * doesn't cause an unexpected mode transition on resume. | 2572 | * doesn't cause an unexpected mode transition on resume. |
| 2574 | */ | 2573 | */ |
| 2575 | pt->adjusted_pf.mode = new_mode; | 2574 | pt->adjusted_pf.mode = new_mode; |
| 2575 | |||
| 2576 | if (old_mode != new_mode) | ||
| 2577 | notify_of_pool_mode_change(pool); | ||
| 2576 | } | 2578 | } |
| 2577 | 2579 | ||
| 2578 | static void abort_transaction(struct pool *pool) | 2580 | static void abort_transaction(struct pool *pool) |
| @@ -4023,7 +4025,7 @@ static struct target_type pool_target = { | |||
| 4023 | .name = "thin-pool", | 4025 | .name = "thin-pool", |
| 4024 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 4026 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
| 4025 | DM_TARGET_IMMUTABLE, | 4027 | DM_TARGET_IMMUTABLE, |
| 4026 | .version = {1, 20, 0}, | 4028 | .version = {1, 21, 0}, |
| 4027 | .module = THIS_MODULE, | 4029 | .module = THIS_MODULE, |
| 4028 | .ctr = pool_ctr, | 4030 | .ctr = pool_ctr, |
| 4029 | .dtr = pool_dtr, | 4031 | .dtr = pool_dtr, |
| @@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
| 4397 | 4399 | ||
| 4398 | static struct target_type thin_target = { | 4400 | static struct target_type thin_target = { |
| 4399 | .name = "thin", | 4401 | .name = "thin", |
| 4400 | .version = {1, 20, 0}, | 4402 | .version = {1, 21, 0}, |
| 4401 | .module = THIS_MODULE, | 4403 | .module = THIS_MODULE, |
| 4402 | .ctr = thin_ctr, | 4404 | .ctr = thin_ctr, |
| 4403 | .dtr = thin_dtr, | 4405 | .dtr = thin_dtr, |
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 981154e59461..6af5babe6837 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c | |||
| @@ -20,7 +20,6 @@ struct dmz_bioctx { | |||
| 20 | struct dm_zone *zone; | 20 | struct dm_zone *zone; |
| 21 | struct bio *bio; | 21 | struct bio *bio; |
| 22 | refcount_t ref; | 22 | refcount_t ref; |
| 23 | blk_status_t status; | ||
| 24 | }; | 23 | }; |
| 25 | 24 | ||
| 26 | /* | 25 | /* |
| @@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) | |||
| 78 | { | 77 | { |
| 79 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | 78 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
| 80 | 79 | ||
| 81 | if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK) | 80 | if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) |
| 82 | bioctx->status = status; | 81 | bio->bi_status = status; |
| 83 | bio_endio(bio); | 82 | |
| 83 | if (refcount_dec_and_test(&bioctx->ref)) { | ||
| 84 | struct dm_zone *zone = bioctx->zone; | ||
| 85 | |||
| 86 | if (zone) { | ||
| 87 | if (bio->bi_status != BLK_STS_OK && | ||
| 88 | bio_op(bio) == REQ_OP_WRITE && | ||
| 89 | dmz_is_seq(zone)) | ||
| 90 | set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); | ||
| 91 | dmz_deactivate_zone(zone); | ||
| 92 | } | ||
| 93 | bio_endio(bio); | ||
| 94 | } | ||
| 84 | } | 95 | } |
| 85 | 96 | ||
| 86 | /* | 97 | /* |
| 87 | * Partial clone read BIO completion callback. This terminates the | 98 | * Completion callback for an internally cloned target BIO. This terminates the |
| 88 | * target BIO when there are no more references to its context. | 99 | * target BIO when there are no more references to its context. |
| 89 | */ | 100 | */ |
| 90 | static void dmz_read_bio_end_io(struct bio *bio) | 101 | static void dmz_clone_endio(struct bio *clone) |
| 91 | { | 102 | { |
| 92 | struct dmz_bioctx *bioctx = bio->bi_private; | 103 | struct dmz_bioctx *bioctx = clone->bi_private; |
| 93 | blk_status_t status = bio->bi_status; | 104 | blk_status_t status = clone->bi_status; |
| 94 | 105 | ||
| 95 | bio_put(bio); | 106 | bio_put(clone); |
| 96 | dmz_bio_endio(bioctx->bio, status); | 107 | dmz_bio_endio(bioctx->bio, status); |
| 97 | } | 108 | } |
| 98 | 109 | ||
| 99 | /* | 110 | /* |
| 100 | * Issue a BIO to a zone. The BIO may only partially process the | 111 | * Issue a clone of a target BIO. The clone may only partially process the |
| 101 | * original target BIO. | 112 | * original target BIO. |
| 102 | */ | 113 | */ |
| 103 | static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone, | 114 | static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, |
| 104 | struct bio *bio, sector_t chunk_block, | 115 | struct bio *bio, sector_t chunk_block, |
| 105 | unsigned int nr_blocks) | 116 | unsigned int nr_blocks) |
| 106 | { | 117 | { |
| 107 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | 118 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
| 108 | sector_t sector; | ||
| 109 | struct bio *clone; | 119 | struct bio *clone; |
| 110 | 120 | ||
| 111 | /* BIO remap sector */ | ||
| 112 | sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); | ||
| 113 | |||
| 114 | /* If the read is not partial, there is no need to clone the BIO */ | ||
| 115 | if (nr_blocks == dmz_bio_blocks(bio)) { | ||
| 116 | /* Setup and submit the BIO */ | ||
| 117 | bio->bi_iter.bi_sector = sector; | ||
| 118 | refcount_inc(&bioctx->ref); | ||
| 119 | generic_make_request(bio); | ||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | /* Partial BIO: we need to clone the BIO */ | ||
| 124 | clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); | 121 | clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); |
| 125 | if (!clone) | 122 | if (!clone) |
| 126 | return -ENOMEM; | 123 | return -ENOMEM; |
| 127 | 124 | ||
| 128 | /* Setup the clone */ | 125 | bio_set_dev(clone, dmz->dev->bdev); |
| 129 | clone->bi_iter.bi_sector = sector; | 126 | clone->bi_iter.bi_sector = |
| 127 | dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); | ||
| 130 | clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; | 128 | clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; |
| 131 | clone->bi_end_io = dmz_read_bio_end_io; | 129 | clone->bi_end_io = dmz_clone_endio; |
| 132 | clone->bi_private = bioctx; | 130 | clone->bi_private = bioctx; |
| 133 | 131 | ||
| 134 | bio_advance(bio, clone->bi_iter.bi_size); | 132 | bio_advance(bio, clone->bi_iter.bi_size); |
| 135 | 133 | ||
| 136 | /* Submit the clone */ | ||
| 137 | refcount_inc(&bioctx->ref); | 134 | refcount_inc(&bioctx->ref); |
| 138 | generic_make_request(clone); | 135 | generic_make_request(clone); |
| 139 | 136 | ||
| 137 | if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) | ||
| 138 | zone->wp_block += nr_blocks; | ||
| 139 | |||
| 140 | return 0; | 140 | return 0; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| @@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, | |||
| 214 | if (nr_blocks) { | 214 | if (nr_blocks) { |
| 215 | /* Valid blocks found: read them */ | 215 | /* Valid blocks found: read them */ |
| 216 | nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); | 216 | nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); |
| 217 | ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks); | 217 | ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); |
| 218 | if (ret) | 218 | if (ret) |
| 219 | return ret; | 219 | return ret; |
| 220 | chunk_block += nr_blocks; | 220 | chunk_block += nr_blocks; |
| @@ -229,25 +229,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, | |||
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | /* | 231 | /* |
| 232 | * Issue a write BIO to a zone. | ||
| 233 | */ | ||
| 234 | static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone, | ||
| 235 | struct bio *bio, sector_t chunk_block, | ||
| 236 | unsigned int nr_blocks) | ||
| 237 | { | ||
| 238 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | ||
| 239 | |||
| 240 | /* Setup and submit the BIO */ | ||
| 241 | bio_set_dev(bio, dmz->dev->bdev); | ||
| 242 | bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); | ||
| 243 | refcount_inc(&bioctx->ref); | ||
| 244 | generic_make_request(bio); | ||
| 245 | |||
| 246 | if (dmz_is_seq(zone)) | ||
| 247 | zone->wp_block += nr_blocks; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* | ||
| 251 | * Write blocks directly in a data zone, at the write pointer. | 232 | * Write blocks directly in a data zone, at the write pointer. |
| 252 | * If a buffer zone is assigned, invalidate the blocks written | 233 | * If a buffer zone is assigned, invalidate the blocks written |
| 253 | * in place. | 234 | * in place. |
| @@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz, | |||
| 265 | return -EROFS; | 246 | return -EROFS; |
| 266 | 247 | ||
| 267 | /* Submit write */ | 248 | /* Submit write */ |
| 268 | dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks); | 249 | ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); |
| 250 | if (ret) | ||
| 251 | return ret; | ||
| 269 | 252 | ||
| 270 | /* | 253 | /* |
| 271 | * Validate the blocks in the data zone and invalidate | 254 | * Validate the blocks in the data zone and invalidate |
| @@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz, | |||
| 301 | return -EROFS; | 284 | return -EROFS; |
| 302 | 285 | ||
| 303 | /* Submit write */ | 286 | /* Submit write */ |
| 304 | dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks); | 287 | ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); |
| 288 | if (ret) | ||
| 289 | return ret; | ||
| 305 | 290 | ||
| 306 | /* | 291 | /* |
| 307 | * Validate the blocks in the buffer zone | 292 | * Validate the blocks in the buffer zone |
| @@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) | |||
| 600 | bioctx->zone = NULL; | 585 | bioctx->zone = NULL; |
| 601 | bioctx->bio = bio; | 586 | bioctx->bio = bio; |
| 602 | refcount_set(&bioctx->ref, 1); | 587 | refcount_set(&bioctx->ref, 1); |
| 603 | bioctx->status = BLK_STS_OK; | ||
| 604 | 588 | ||
| 605 | /* Set the BIO pending in the flush list */ | 589 | /* Set the BIO pending in the flush list */ |
| 606 | if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { | 590 | if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { |
| @@ -624,35 +608,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) | |||
| 624 | } | 608 | } |
| 625 | 609 | ||
| 626 | /* | 610 | /* |
| 627 | * Completed target BIO processing. | ||
| 628 | */ | ||
| 629 | static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) | ||
| 630 | { | ||
| 631 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | ||
| 632 | |||
| 633 | if (bioctx->status == BLK_STS_OK && *error) | ||
| 634 | bioctx->status = *error; | ||
| 635 | |||
| 636 | if (!refcount_dec_and_test(&bioctx->ref)) | ||
| 637 | return DM_ENDIO_INCOMPLETE; | ||
| 638 | |||
| 639 | /* Done */ | ||
| 640 | bio->bi_status = bioctx->status; | ||
| 641 | |||
| 642 | if (bioctx->zone) { | ||
| 643 | struct dm_zone *zone = bioctx->zone; | ||
| 644 | |||
| 645 | if (*error && bio_op(bio) == REQ_OP_WRITE) { | ||
| 646 | if (dmz_is_seq(zone)) | ||
| 647 | set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); | ||
| 648 | } | ||
| 649 | dmz_deactivate_zone(zone); | ||
| 650 | } | ||
| 651 | |||
| 652 | return DM_ENDIO_DONE; | ||
| 653 | } | ||
| 654 | |||
| 655 | /* | ||
| 656 | * Get zoned device information. | 611 | * Get zoned device information. |
| 657 | */ | 612 | */ |
| 658 | static int dmz_get_zoned_device(struct dm_target *ti, char *path) | 613 | static int dmz_get_zoned_device(struct dm_target *ti, char *path) |
| @@ -946,7 +901,6 @@ static struct target_type dmz_type = { | |||
| 946 | .ctr = dmz_ctr, | 901 | .ctr = dmz_ctr, |
| 947 | .dtr = dmz_dtr, | 902 | .dtr = dmz_dtr, |
| 948 | .map = dmz_map, | 903 | .map = dmz_map, |
| 949 | .end_io = dmz_end_io, | ||
| 950 | .io_hints = dmz_io_hints, | 904 | .io_hints = dmz_io_hints, |
| 951 | .prepare_ioctl = dmz_prepare_ioctl, | 905 | .prepare_ioctl = dmz_prepare_ioctl, |
| 952 | .postsuspend = dmz_suspend, | 906 | .postsuspend = dmz_suspend, |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c510179a7f84..63a7c416b224 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -1593,6 +1593,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, | |||
| 1593 | return ret; | 1593 | return ret; |
| 1594 | } | 1594 | } |
| 1595 | 1595 | ||
| 1596 | blk_queue_split(md->queue, &bio); | ||
| 1597 | |||
| 1596 | init_clone_info(&ci, md, map, bio); | 1598 | init_clone_info(&ci, md, map, bio); |
| 1597 | 1599 | ||
| 1598 | if (bio->bi_opf & REQ_PREFLUSH) { | 1600 | if (bio->bi_opf & REQ_PREFLUSH) { |
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 8add62a18293..102eb35fcf3f 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
| @@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB | |||
| 110 | 110 | ||
| 111 | This is currently experimental. | 111 | This is currently experimental. |
| 112 | 112 | ||
| 113 | config MEDIA_CONTROLLER_REQUEST_API | ||
| 114 | bool "Enable Media controller Request API (EXPERIMENTAL)" | ||
| 115 | depends on MEDIA_CONTROLLER && STAGING_MEDIA | ||
| 116 | default n | ||
| 117 | ---help--- | ||
| 118 | DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING. | ||
| 119 | |||
| 120 | This option enables the Request API for the Media controller and V4L2 | ||
| 121 | interfaces. It is currently needed by a few stateless codec drivers. | ||
| 122 | |||
| 123 | There is currently no intention to provide API or ABI stability for | ||
| 124 | this new API as of yet. | ||
| 125 | |||
| 113 | # | 126 | # |
| 114 | # Video4Linux support | 127 | # Video4Linux support |
| 115 | # Only enables if one of the V4L2 types (ATV, webcam, radio) is selected | 128 | # Only enables if one of the V4L2 types (ATV, webcam, radio) is selected |
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 975ff5669f72..8ff8722cb6b1 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c | |||
| @@ -947,7 +947,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
| 947 | } | 947 | } |
| 948 | atomic_dec(&q->owned_by_drv_count); | 948 | atomic_dec(&q->owned_by_drv_count); |
| 949 | 949 | ||
| 950 | if (vb->req_obj.req) { | 950 | if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { |
| 951 | /* This is not supported at the moment */ | 951 | /* This is not supported at the moment */ |
| 952 | WARN_ON(state == VB2_BUF_STATE_REQUEUEING); | 952 | WARN_ON(state == VB2_BUF_STATE_REQUEUEING); |
| 953 | media_request_object_unbind(&vb->req_obj); | 953 | media_request_object_unbind(&vb->req_obj); |
| @@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj) | |||
| 1359 | { | 1359 | { |
| 1360 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); | 1360 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); |
| 1361 | 1361 | ||
| 1362 | if (vb->state == VB2_BUF_STATE_IN_REQUEST) | 1362 | if (vb->state == VB2_BUF_STATE_IN_REQUEST) { |
| 1363 | vb->state = VB2_BUF_STATE_DEQUEUED; | 1363 | vb->state = VB2_BUF_STATE_DEQUEUED; |
| 1364 | if (vb->request) | ||
| 1365 | media_request_put(vb->request); | ||
| 1366 | vb->request = NULL; | ||
| 1367 | } | ||
| 1364 | } | 1368 | } |
| 1365 | 1369 | ||
| 1366 | static const struct media_request_object_ops vb2_core_req_ops = { | 1370 | static const struct media_request_object_ops vb2_core_req_ops = { |
| @@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, | |||
| 1528 | return ret; | 1532 | return ret; |
| 1529 | 1533 | ||
| 1530 | vb->state = VB2_BUF_STATE_IN_REQUEST; | 1534 | vb->state = VB2_BUF_STATE_IN_REQUEST; |
| 1535 | |||
| 1536 | /* | ||
| 1537 | * Increment the refcount and store the request. | ||
| 1538 | * The request refcount is decremented again when the | ||
| 1539 | * buffer is dequeued. This is to prevent vb2_buffer_done() | ||
| 1540 | * from freeing the request from interrupt context, which can | ||
| 1541 | * happen if the application closed the request fd after | ||
| 1542 | * queueing the request. | ||
| 1543 | */ | ||
| 1544 | media_request_get(req); | ||
| 1545 | vb->request = req; | ||
| 1546 | |||
| 1531 | /* Fill buffer information for the userspace */ | 1547 | /* Fill buffer information for the userspace */ |
| 1532 | if (pb) { | 1548 | if (pb) { |
| 1533 | call_void_bufop(q, copy_timestamp, vb, pb); | 1549 | call_void_bufop(q, copy_timestamp, vb, pb); |
| @@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb) | |||
| 1749 | call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); | 1765 | call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); |
| 1750 | vb->planes[i].dbuf_mapped = 0; | 1766 | vb->planes[i].dbuf_mapped = 0; |
| 1751 | } | 1767 | } |
| 1752 | if (vb->req_obj.req) { | ||
| 1753 | media_request_object_unbind(&vb->req_obj); | ||
| 1754 | media_request_object_put(&vb->req_obj); | ||
| 1755 | } | ||
| 1756 | call_void_bufop(q, init_buffer, vb); | 1768 | call_void_bufop(q, init_buffer, vb); |
| 1757 | } | 1769 | } |
| 1758 | 1770 | ||
| @@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, | |||
| 1797 | /* go back to dequeued state */ | 1809 | /* go back to dequeued state */ |
| 1798 | __vb2_dqbuf(vb); | 1810 | __vb2_dqbuf(vb); |
| 1799 | 1811 | ||
| 1812 | if (WARN_ON(vb->req_obj.req)) { | ||
| 1813 | media_request_object_unbind(&vb->req_obj); | ||
| 1814 | media_request_object_put(&vb->req_obj); | ||
| 1815 | } | ||
| 1816 | if (vb->request) | ||
| 1817 | media_request_put(vb->request); | ||
| 1818 | vb->request = NULL; | ||
| 1819 | |||
| 1800 | dprintk(2, "dqbuf of buffer %d, with state %d\n", | 1820 | dprintk(2, "dqbuf of buffer %d, with state %d\n", |
| 1801 | vb->index, vb->state); | 1821 | vb->index, vb->state); |
| 1802 | 1822 | ||
| @@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q) | |||
| 1903 | vb->prepared = false; | 1923 | vb->prepared = false; |
| 1904 | } | 1924 | } |
| 1905 | __vb2_dqbuf(vb); | 1925 | __vb2_dqbuf(vb); |
| 1926 | |||
| 1927 | if (vb->req_obj.req) { | ||
| 1928 | media_request_object_unbind(&vb->req_obj); | ||
| 1929 | media_request_object_put(&vb->req_obj); | ||
| 1930 | } | ||
| 1931 | if (vb->request) | ||
| 1932 | media_request_put(vb->request); | ||
| 1933 | vb->request = NULL; | ||
| 1906 | } | 1934 | } |
| 1907 | } | 1935 | } |
| 1908 | 1936 | ||
| @@ -1940,10 +1968,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type) | |||
| 1940 | if (ret) | 1968 | if (ret) |
| 1941 | return ret; | 1969 | return ret; |
| 1942 | ret = vb2_start_streaming(q); | 1970 | ret = vb2_start_streaming(q); |
| 1943 | if (ret) { | 1971 | if (ret) |
| 1944 | __vb2_queue_cancel(q); | ||
| 1945 | return ret; | 1972 | return ret; |
| 1946 | } | ||
| 1947 | } | 1973 | } |
| 1948 | 1974 | ||
| 1949 | q->streaming = 1; | 1975 | q->streaming = 1; |
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index a17033ab2c22..1d35aeabfd85 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c | |||
| @@ -333,10 +333,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b | |||
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, | 335 | static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, |
| 336 | struct v4l2_buffer *b, | 336 | struct v4l2_buffer *b, bool is_prepare, |
| 337 | const char *opname, | ||
| 338 | struct media_request **p_req) | 337 | struct media_request **p_req) |
| 339 | { | 338 | { |
| 339 | const char *opname = is_prepare ? "prepare_buf" : "qbuf"; | ||
| 340 | struct media_request *req; | 340 | struct media_request *req; |
| 341 | struct vb2_v4l2_buffer *vbuf; | 341 | struct vb2_v4l2_buffer *vbuf; |
| 342 | struct vb2_buffer *vb; | 342 | struct vb2_buffer *vb; |
| @@ -378,6 +378,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md | |||
| 378 | return ret; | 378 | return ret; |
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | if (is_prepare) | ||
| 382 | return 0; | ||
| 383 | |||
| 381 | if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { | 384 | if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { |
| 382 | if (q->uses_requests) { | 385 | if (q->uses_requests) { |
| 383 | dprintk(1, "%s: queue uses requests\n", opname); | 386 | dprintk(1, "%s: queue uses requests\n", opname); |
| @@ -631,8 +634,10 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps) | |||
| 631 | *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; | 634 | *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; |
| 632 | if (q->io_modes & VB2_DMABUF) | 635 | if (q->io_modes & VB2_DMABUF) |
| 633 | *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; | 636 | *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; |
| 637 | #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API | ||
| 634 | if (q->supports_requests) | 638 | if (q->supports_requests) |
| 635 | *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; | 639 | *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; |
| 640 | #endif | ||
| 636 | } | 641 | } |
| 637 | 642 | ||
| 638 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | 643 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) |
| @@ -657,7 +662,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, | |||
| 657 | if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) | 662 | if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) |
| 658 | return -EINVAL; | 663 | return -EINVAL; |
| 659 | 664 | ||
| 660 | ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL); | 665 | ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL); |
| 661 | 666 | ||
| 662 | return ret ? ret : vb2_core_prepare_buf(q, b->index, b); | 667 | return ret ? ret : vb2_core_prepare_buf(q, b->index, b); |
| 663 | } | 668 | } |
| @@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, | |||
| 729 | return -EBUSY; | 734 | return -EBUSY; |
| 730 | } | 735 | } |
| 731 | 736 | ||
| 732 | ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req); | 737 | ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req); |
| 733 | if (ret) | 738 | if (ret) |
| 734 | return ret; | 739 | return ret; |
| 735 | ret = vb2_core_qbuf(q, b->index, b, req); | 740 | ret = vb2_core_qbuf(q, b->index, b, req); |
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index 6d4b2eec67b4..29836c1a40e9 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c | |||
| @@ -80,8 +80,8 @@ struct dvb_pll_desc { | |||
| 80 | 80 | ||
| 81 | static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { | 81 | static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { |
| 82 | .name = "Thomson dtt7579", | 82 | .name = "Thomson dtt7579", |
| 83 | .min = 177000000, | 83 | .min = 177 * MHz, |
| 84 | .max = 858000000, | 84 | .max = 858 * MHz, |
| 85 | .iffreq= 36166667, | 85 | .iffreq= 36166667, |
| 86 | .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, | 86 | .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, |
| 87 | .count = 4, | 87 | .count = 4, |
| @@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf) | |||
| 102 | 102 | ||
| 103 | static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = { | 103 | static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = { |
| 104 | .name = "Thomson dtt759x", | 104 | .name = "Thomson dtt759x", |
| 105 | .min = 177000000, | 105 | .min = 177 * MHz, |
| 106 | .max = 896000000, | 106 | .max = 896 * MHz, |
| 107 | .set = thomson_dtt759x_bw, | 107 | .set = thomson_dtt759x_bw, |
| 108 | .iffreq= 36166667, | 108 | .iffreq= 36166667, |
| 109 | .sleepdata = (u8[]){ 2, 0x84, 0x03 }, | 109 | .sleepdata = (u8[]){ 2, 0x84, 0x03 }, |
| @@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf) | |||
| 126 | 126 | ||
| 127 | static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { | 127 | static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { |
| 128 | .name = "Thomson dtt7520x", | 128 | .name = "Thomson dtt7520x", |
| 129 | .min = 185000000, | 129 | .min = 185 * MHz, |
| 130 | .max = 900000000, | 130 | .max = 900 * MHz, |
| 131 | .set = thomson_dtt7520x_bw, | 131 | .set = thomson_dtt7520x_bw, |
| 132 | .iffreq = 36166667, | 132 | .iffreq = 36166667, |
| 133 | .count = 7, | 133 | .count = 7, |
| @@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { | |||
| 144 | 144 | ||
| 145 | static const struct dvb_pll_desc dvb_pll_lg_z201 = { | 145 | static const struct dvb_pll_desc dvb_pll_lg_z201 = { |
| 146 | .name = "LG z201", | 146 | .name = "LG z201", |
| 147 | .min = 174000000, | 147 | .min = 174 * MHz, |
| 148 | .max = 862000000, | 148 | .max = 862 * MHz, |
| 149 | .iffreq= 36166667, | 149 | .iffreq= 36166667, |
| 150 | .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, | 150 | .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, |
| 151 | .count = 5, | 151 | .count = 5, |
| @@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = { | |||
| 160 | 160 | ||
| 161 | static const struct dvb_pll_desc dvb_pll_unknown_1 = { | 161 | static const struct dvb_pll_desc dvb_pll_unknown_1 = { |
| 162 | .name = "unknown 1", /* used by dntv live dvb-t */ | 162 | .name = "unknown 1", /* used by dntv live dvb-t */ |
| 163 | .min = 174000000, | 163 | .min = 174 * MHz, |
| 164 | .max = 862000000, | 164 | .max = 862 * MHz, |
| 165 | .iffreq= 36166667, | 165 | .iffreq= 36166667, |
| 166 | .count = 9, | 166 | .count = 9, |
| 167 | .entries = { | 167 | .entries = { |
| @@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = { | |||
| 182 | */ | 182 | */ |
| 183 | static const struct dvb_pll_desc dvb_pll_tua6010xs = { | 183 | static const struct dvb_pll_desc dvb_pll_tua6010xs = { |
| 184 | .name = "Infineon TUA6010XS", | 184 | .name = "Infineon TUA6010XS", |
| 185 | .min = 44250000, | 185 | .min = 44250 * kHz, |
| 186 | .max = 858000000, | 186 | .max = 858 * MHz, |
| 187 | .iffreq= 36125000, | 187 | .iffreq= 36125000, |
| 188 | .count = 3, | 188 | .count = 3, |
| 189 | .entries = { | 189 | .entries = { |
| @@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = { | |||
| 196 | /* Panasonic env57h1xd5 (some Philips PLL ?) */ | 196 | /* Panasonic env57h1xd5 (some Philips PLL ?) */ |
| 197 | static const struct dvb_pll_desc dvb_pll_env57h1xd5 = { | 197 | static const struct dvb_pll_desc dvb_pll_env57h1xd5 = { |
| 198 | .name = "Panasonic ENV57H1XD5", | 198 | .name = "Panasonic ENV57H1XD5", |
| 199 | .min = 44250000, | 199 | .min = 44250 * kHz, |
| 200 | .max = 858000000, | 200 | .max = 858 * MHz, |
| 201 | .iffreq= 36125000, | 201 | .iffreq= 36125000, |
| 202 | .count = 4, | 202 | .count = 4, |
| 203 | .entries = { | 203 | .entries = { |
| @@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf) | |||
| 220 | 220 | ||
| 221 | static const struct dvb_pll_desc dvb_pll_tda665x = { | 221 | static const struct dvb_pll_desc dvb_pll_tda665x = { |
| 222 | .name = "Philips TDA6650/TDA6651", | 222 | .name = "Philips TDA6650/TDA6651", |
| 223 | .min = 44250000, | 223 | .min = 44250 * kHz, |
| 224 | .max = 858000000, | 224 | .max = 858 * MHz, |
| 225 | .set = tda665x_bw, | 225 | .set = tda665x_bw, |
| 226 | .iffreq= 36166667, | 226 | .iffreq= 36166667, |
| 227 | .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, | 227 | .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, |
| @@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf) | |||
| 254 | 254 | ||
| 255 | static const struct dvb_pll_desc dvb_pll_tua6034 = { | 255 | static const struct dvb_pll_desc dvb_pll_tua6034 = { |
| 256 | .name = "Infineon TUA6034", | 256 | .name = "Infineon TUA6034", |
| 257 | .min = 44250000, | 257 | .min = 44250 * kHz, |
| 258 | .max = 858000000, | 258 | .max = 858 * MHz, |
| 259 | .iffreq= 36166667, | 259 | .iffreq= 36166667, |
| 260 | .count = 3, | 260 | .count = 3, |
| 261 | .set = tua6034_bw, | 261 | .set = tua6034_bw, |
| @@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf) | |||
| 278 | 278 | ||
| 279 | static const struct dvb_pll_desc dvb_pll_tded4 = { | 279 | static const struct dvb_pll_desc dvb_pll_tded4 = { |
| 280 | .name = "ALPS TDED4", | 280 | .name = "ALPS TDED4", |
| 281 | .min = 47000000, | 281 | .min = 47 * MHz, |
| 282 | .max = 863000000, | 282 | .max = 863 * MHz, |
| 283 | .iffreq= 36166667, | 283 | .iffreq= 36166667, |
| 284 | .set = tded4_bw, | 284 | .set = tded4_bw, |
| 285 | .count = 4, | 285 | .count = 4, |
| @@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = { | |||
| 296 | */ | 296 | */ |
| 297 | static const struct dvb_pll_desc dvb_pll_tdhu2 = { | 297 | static const struct dvb_pll_desc dvb_pll_tdhu2 = { |
| 298 | .name = "ALPS TDHU2", | 298 | .name = "ALPS TDHU2", |
| 299 | .min = 54000000, | 299 | .min = 54 * MHz, |
| 300 | .max = 864000000, | 300 | .max = 864 * MHz, |
| 301 | .iffreq= 44000000, | 301 | .iffreq= 44000000, |
| 302 | .count = 4, | 302 | .count = 4, |
| 303 | .entries = { | 303 | .entries = { |
| @@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = { | |||
| 313 | */ | 313 | */ |
| 314 | static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { | 314 | static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { |
| 315 | .name = "Samsung TBMV30111IN / TBMV30712IN1", | 315 | .name = "Samsung TBMV30111IN / TBMV30712IN1", |
| 316 | .min = 54000000, | 316 | .min = 54 * MHz, |
| 317 | .max = 860000000, | 317 | .max = 860 * MHz, |
| 318 | .iffreq= 44000000, | 318 | .iffreq= 44000000, |
| 319 | .count = 6, | 319 | .count = 6, |
| 320 | .entries = { | 320 | .entries = { |
| @@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { | |||
| 332 | */ | 332 | */ |
| 333 | static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { | 333 | static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { |
| 334 | .name = "Philips SD1878", | 334 | .name = "Philips SD1878", |
| 335 | .min = 950000, | 335 | .min = 950 * MHz, |
| 336 | .max = 2150000, | 336 | .max = 2150 * MHz, |
| 337 | .iffreq= 249, /* zero-IF, offset 249 is to round up */ | 337 | .iffreq= 249, /* zero-IF, offset 249 is to round up */ |
| 338 | .count = 4, | 338 | .count = 4, |
| 339 | .entries = { | 339 | .entries = { |
| @@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf) | |||
| 398 | 398 | ||
| 399 | static const struct dvb_pll_desc dvb_pll_opera1 = { | 399 | static const struct dvb_pll_desc dvb_pll_opera1 = { |
| 400 | .name = "Opera Tuner", | 400 | .name = "Opera Tuner", |
| 401 | .min = 900000, | 401 | .min = 900 * MHz, |
| 402 | .max = 2250000, | 402 | .max = 2250 * MHz, |
| 403 | .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, | 403 | .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, |
| 404 | .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, | 404 | .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, |
| 405 | .iffreq= 0, | 405 | .iffreq= 0, |
| @@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf) | |||
| 445 | /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ | 445 | /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ |
| 446 | static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { | 446 | static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { |
| 447 | .name = "Samsung DTOS403IH102A", | 447 | .name = "Samsung DTOS403IH102A", |
| 448 | .min = 44250000, | 448 | .min = 44250 * kHz, |
| 449 | .max = 858000000, | 449 | .max = 858 * MHz, |
| 450 | .iffreq = 36125000, | 450 | .iffreq = 36125000, |
| 451 | .count = 8, | 451 | .count = 8, |
| 452 | .set = samsung_dtos403ih102a_set, | 452 | .set = samsung_dtos403ih102a_set, |
| @@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { | |||
| 465 | /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ | 465 | /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ |
| 466 | static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { | 466 | static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { |
| 467 | .name = "Samsung TDTC9251DH0", | 467 | .name = "Samsung TDTC9251DH0", |
| 468 | .min = 48000000, | 468 | .min = 48 * MHz, |
| 469 | .max = 863000000, | 469 | .max = 863 * MHz, |
| 470 | .iffreq = 36166667, | 470 | .iffreq = 36166667, |
| 471 | .count = 3, | 471 | .count = 3, |
| 472 | .entries = { | 472 | .entries = { |
| @@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { | |||
| 479 | /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ | 479 | /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ |
| 480 | static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { | 480 | static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { |
| 481 | .name = "Samsung TBDU18132", | 481 | .name = "Samsung TBDU18132", |
| 482 | .min = 950000, | 482 | .min = 950 * MHz, |
| 483 | .max = 2150000, /* guesses */ | 483 | .max = 2150 * MHz, /* guesses */ |
| 484 | .iffreq = 0, | 484 | .iffreq = 0, |
| 485 | .count = 2, | 485 | .count = 2, |
| 486 | .entries = { | 486 | .entries = { |
| @@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { | |||
| 500 | /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ | 500 | /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ |
| 501 | static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { | 501 | static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { |
| 502 | .name = "Samsung TBMU24112", | 502 | .name = "Samsung TBMU24112", |
| 503 | .min = 950000, | 503 | .min = 950 * MHz, |
| 504 | .max = 2150000, /* guesses */ | 504 | .max = 2150 * MHz, /* guesses */ |
| 505 | .iffreq = 0, | 505 | .iffreq = 0, |
| 506 | .count = 2, | 506 | .count = 2, |
| 507 | .entries = { | 507 | .entries = { |
| @@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { | |||
| 521 | * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ | 521 | * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ |
| 522 | static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { | 522 | static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { |
| 523 | .name = "ALPS TDEE4", | 523 | .name = "ALPS TDEE4", |
| 524 | .min = 47000000, | 524 | .min = 47 * MHz, |
| 525 | .max = 862000000, | 525 | .max = 862 * MHz, |
| 526 | .iffreq = 36125000, | 526 | .iffreq = 36125000, |
| 527 | .count = 4, | 527 | .count = 4, |
| 528 | .entries = { | 528 | .entries = { |
| @@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { | |||
| 537 | /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */ | 537 | /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */ |
| 538 | static const struct dvb_pll_desc dvb_pll_tua6034_friio = { | 538 | static const struct dvb_pll_desc dvb_pll_tua6034_friio = { |
| 539 | .name = "Infineon TUA6034 ISDB-T (Friio)", | 539 | .name = "Infineon TUA6034 ISDB-T (Friio)", |
| 540 | .min = 90000000, | 540 | .min = 90 * MHz, |
| 541 | .max = 770000000, | 541 | .max = 770 * MHz, |
| 542 | .iffreq = 57000000, | 542 | .iffreq = 57000000, |
| 543 | .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 }, | 543 | .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 }, |
| 544 | .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b }, | 544 | .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b }, |
| @@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = { | |||
| 553 | /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */ | 553 | /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */ |
| 554 | static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = { | 554 | static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = { |
| 555 | .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)", | 555 | .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)", |
| 556 | .min = 90000000, | 556 | .min = 90 * MHz, |
| 557 | .max = 770000000, | 557 | .max = 770 * MHz, |
| 558 | .iffreq = 57000000, | 558 | .iffreq = 57000000, |
| 559 | .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 }, | 559 | .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 }, |
| 560 | .count = 10, | 560 | .count = 10, |
| @@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf, | |||
| 610 | u32 div; | 610 | u32 div; |
| 611 | int i; | 611 | int i; |
| 612 | 612 | ||
| 613 | if (frequency && (frequency < desc->min || frequency > desc->max)) | ||
| 614 | return -EINVAL; | ||
| 615 | |||
| 616 | for (i = 0; i < desc->count; i++) { | 613 | for (i = 0; i < desc->count; i++) { |
| 617 | if (frequency > desc->entries[i].limit) | 614 | if (frequency > desc->entries[i].limit) |
| 618 | continue; | 615 | continue; |
| @@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
| 799 | struct dvb_pll_priv *priv = NULL; | 796 | struct dvb_pll_priv *priv = NULL; |
| 800 | int ret; | 797 | int ret; |
| 801 | const struct dvb_pll_desc *desc; | 798 | const struct dvb_pll_desc *desc; |
| 802 | struct dtv_frontend_properties *c = &fe->dtv_property_cache; | ||
| 803 | 799 | ||
| 804 | b1 = kmalloc(1, GFP_KERNEL); | 800 | b1 = kmalloc(1, GFP_KERNEL); |
| 805 | if (!b1) | 801 | if (!b1) |
| @@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
| 845 | 841 | ||
| 846 | strncpy(fe->ops.tuner_ops.info.name, desc->name, | 842 | strncpy(fe->ops.tuner_ops.info.name, desc->name, |
| 847 | sizeof(fe->ops.tuner_ops.info.name)); | 843 | sizeof(fe->ops.tuner_ops.info.name)); |
| 848 | switch (c->delivery_system) { | 844 | |
| 849 | case SYS_DVBS: | 845 | fe->ops.tuner_ops.info.frequency_min_hz = desc->min; |
| 850 | case SYS_DVBS2: | 846 | fe->ops.tuner_ops.info.frequency_max_hz = desc->max; |
| 851 | case SYS_TURBO: | 847 | |
| 852 | case SYS_ISDBS: | 848 | dprintk("%s tuner, frequency range: %u...%u\n", |
| 853 | fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz; | 849 | desc->name, desc->min, desc->max); |
| 854 | fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz; | ||
| 855 | break; | ||
| 856 | default: | ||
| 857 | fe->ops.tuner_ops.info.frequency_min_hz = desc->min; | ||
| 858 | fe->ops.tuner_ops.info.frequency_max_hz = desc->max; | ||
| 859 | } | ||
| 860 | 850 | ||
| 861 | if (!desc->initdata) | 851 | if (!desc->initdata) |
| 862 | fe->ops.tuner_ops.init = NULL; | 852 | fe->ops.tuner_ops.init = NULL; |
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index bed24372e61f..b8ec88612df7 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c | |||
| @@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg) | |||
| 381 | static long media_device_request_alloc(struct media_device *mdev, | 381 | static long media_device_request_alloc(struct media_device *mdev, |
| 382 | int *alloc_fd) | 382 | int *alloc_fd) |
| 383 | { | 383 | { |
| 384 | #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API | ||
| 384 | if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue) | 385 | if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue) |
| 385 | return -ENOTTY; | 386 | return -ENOTTY; |
| 386 | 387 | ||
| 387 | return media_request_alloc(mdev, alloc_fd); | 388 | return media_request_alloc(mdev, alloc_fd); |
| 389 | #else | ||
| 390 | return -ENOTTY; | ||
| 391 | #endif | ||
| 388 | } | 392 | } |
| 389 | 393 | ||
| 390 | static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) | 394 | static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) |
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c index 4e9db1fed697..c71a34ae6383 100644 --- a/drivers/media/media-request.c +++ b/drivers/media/media-request.c | |||
| @@ -238,6 +238,9 @@ static const struct file_operations request_fops = { | |||
| 238 | .owner = THIS_MODULE, | 238 | .owner = THIS_MODULE, |
| 239 | .poll = media_request_poll, | 239 | .poll = media_request_poll, |
| 240 | .unlocked_ioctl = media_request_ioctl, | 240 | .unlocked_ioctl = media_request_ioctl, |
| 241 | #ifdef CONFIG_COMPAT | ||
| 242 | .compat_ioctl = media_request_ioctl, | ||
| 243 | #endif /* CONFIG_COMPAT */ | ||
| 241 | .release = media_request_close, | 244 | .release = media_request_close, |
| 242 | }; | 245 | }; |
| 243 | 246 | ||
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index b292cff26c86..13fb69c58967 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c | |||
| @@ -304,7 +304,8 @@ restart: | |||
| 304 | for (; p < p_out + sz; p++) { | 304 | for (; p < p_out + sz; p++) { |
| 305 | u32 copy; | 305 | u32 copy; |
| 306 | 306 | ||
| 307 | p = memchr(p, magic[ctx->comp_magic_cnt], sz); | 307 | p = memchr(p, magic[ctx->comp_magic_cnt], |
| 308 | p_out + sz - p); | ||
| 308 | if (!p) { | 309 | if (!p) { |
| 309 | ctx->comp_magic_cnt = 0; | 310 | ctx->comp_magic_cnt = 0; |
| 310 | break; | 311 | break; |
| @@ -996,11 +997,18 @@ static int vicodec_start_streaming(struct vb2_queue *q, | |||
| 996 | 997 | ||
| 997 | q_data->sequence = 0; | 998 | q_data->sequence = 0; |
| 998 | 999 | ||
| 999 | if (!V4L2_TYPE_IS_OUTPUT(q->type)) | 1000 | if (!V4L2_TYPE_IS_OUTPUT(q->type)) { |
| 1001 | if (!ctx->is_enc) { | ||
| 1002 | state->width = q_data->width; | ||
| 1003 | state->height = q_data->height; | ||
| 1004 | } | ||
| 1000 | return 0; | 1005 | return 0; |
| 1006 | } | ||
| 1001 | 1007 | ||
| 1002 | state->width = q_data->width; | 1008 | if (ctx->is_enc) { |
| 1003 | state->height = q_data->height; | 1009 | state->width = q_data->width; |
| 1010 | state->height = q_data->height; | ||
| 1011 | } | ||
| 1004 | state->ref_frame.width = state->ref_frame.height = 0; | 1012 | state->ref_frame.width = state->ref_frame.height = 0; |
| 1005 | state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div, | 1013 | state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div, |
| 1006 | GFP_KERNEL); | 1014 | GFP_KERNEL); |
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c index dcdc80e272c2..9acc709b0740 100644 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c | |||
| @@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count) | |||
| 276 | 276 | ||
| 277 | list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) { | 277 | list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) { |
| 278 | list_del(&buf->list); | 278 | list_del(&buf->list); |
| 279 | v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, | ||
| 280 | &dev->ctrl_hdl_sdr_cap); | ||
| 281 | vb2_buffer_done(&buf->vb.vb2_buf, | 279 | vb2_buffer_done(&buf->vb.vb2_buf, |
| 282 | VB2_BUF_STATE_QUEUED); | 280 | VB2_BUF_STATE_QUEUED); |
| 283 | } | 281 | } |
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c index 903cebeb5ce5..d666271bdaed 100644 --- a/drivers/media/platform/vivid/vivid-vbi-cap.c +++ b/drivers/media/platform/vivid/vivid-vbi-cap.c | |||
| @@ -204,8 +204,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count) | |||
| 204 | 204 | ||
| 205 | list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { | 205 | list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { |
| 206 | list_del(&buf->list); | 206 | list_del(&buf->list); |
| 207 | v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, | ||
| 208 | &dev->ctrl_hdl_vbi_cap); | ||
| 209 | vb2_buffer_done(&buf->vb.vb2_buf, | 207 | vb2_buffer_done(&buf->vb.vb2_buf, |
| 210 | VB2_BUF_STATE_QUEUED); | 208 | VB2_BUF_STATE_QUEUED); |
| 211 | } | 209 | } |
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c index 9357c07e30d6..cd56476902a2 100644 --- a/drivers/media/platform/vivid/vivid-vbi-out.c +++ b/drivers/media/platform/vivid/vivid-vbi-out.c | |||
| @@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count) | |||
| 96 | 96 | ||
| 97 | list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { | 97 | list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { |
| 98 | list_del(&buf->list); | 98 | list_del(&buf->list); |
| 99 | v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, | ||
| 100 | &dev->ctrl_hdl_vbi_out); | ||
| 101 | vb2_buffer_done(&buf->vb.vb2_buf, | 99 | vb2_buffer_done(&buf->vb.vb2_buf, |
| 102 | VB2_BUF_STATE_QUEUED); | 100 | VB2_BUF_STATE_QUEUED); |
| 103 | } | 101 | } |
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index 9c8e8be81ce3..673772cd17d6 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c | |||
| @@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) | |||
| 243 | 243 | ||
| 244 | list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { | 244 | list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { |
| 245 | list_del(&buf->list); | 245 | list_del(&buf->list); |
| 246 | v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, | ||
| 247 | &dev->ctrl_hdl_vid_cap); | ||
| 248 | vb2_buffer_done(&buf->vb.vb2_buf, | 246 | vb2_buffer_done(&buf->vb.vb2_buf, |
| 249 | VB2_BUF_STATE_QUEUED); | 247 | VB2_BUF_STATE_QUEUED); |
| 250 | } | 248 | } |
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index aaf13f03d5d4..628eae154ee7 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c | |||
| @@ -162,8 +162,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) | |||
| 162 | 162 | ||
| 163 | list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { | 163 | list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { |
| 164 | list_del(&buf->list); | 164 | list_del(&buf->list); |
| 165 | v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, | ||
| 166 | &dev->ctrl_hdl_vid_out); | ||
| 167 | vb2_buffer_done(&buf->vb.vb2_buf, | 165 | vb2_buffer_done(&buf->vb.vb2_buf, |
| 168 | VB2_BUF_STATE_QUEUED); | 166 | VB2_BUF_STATE_QUEUED); |
| 169 | } | 167 | } |
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c index 0b18f0bd7419..8b0a26335d70 100644 --- a/drivers/media/platform/vsp1/vsp1_lif.c +++ b/drivers/media/platform/vsp1/vsp1_lif.c | |||
| @@ -95,7 +95,7 @@ static void lif_configure_stream(struct vsp1_entity *entity, | |||
| 95 | format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config, | 95 | format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config, |
| 96 | LIF_PAD_SOURCE); | 96 | LIF_PAD_SOURCE); |
| 97 | 97 | ||
| 98 | switch (entity->vsp1->version & VI6_IP_VERSION_SOC_MASK) { | 98 | switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) { |
| 99 | case VI6_IP_VERSION_MODEL_VSPD_GEN2: | 99 | case VI6_IP_VERSION_MODEL_VSPD_GEN2: |
| 100 | case VI6_IP_VERSION_MODEL_VSPD_V2H: | 100 | case VI6_IP_VERSION_MODEL_VSPD_V2H: |
| 101 | hbth = 1536; | 101 | hbth = 1536; |
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index fce9d6f4b7c9..3137f5d89d80 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c | |||
| @@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev, | |||
| 426 | 426 | ||
| 427 | /* append the packet to the frame buffer */ | 427 | /* append the packet to the frame buffer */ |
| 428 | if (len > 0) { | 428 | if (len > 0) { |
| 429 | if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) { | 429 | if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) { |
| 430 | gspca_err(gspca_dev, "frame overflow %d > %d\n", | 430 | gspca_err(gspca_dev, "frame overflow %d > %d\n", |
| 431 | gspca_dev->image_len + len, | 431 | gspca_dev->image_len + len, |
| 432 | gspca_dev->pixfmt.sizeimage); | 432 | PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)); |
| 433 | packet_type = DISCARD_PACKET; | 433 | packet_type = DISCARD_PACKET; |
| 434 | } else { | 434 | } else { |
| 435 | /* !! image is NULL only when last pkt is LAST or DISCARD | 435 | /* !! image is NULL only when last pkt is LAST or DISCARD |
| @@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq, | |||
| 1297 | unsigned int sizes[], struct device *alloc_devs[]) | 1297 | unsigned int sizes[], struct device *alloc_devs[]) |
| 1298 | { | 1298 | { |
| 1299 | struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); | 1299 | struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); |
| 1300 | unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); | ||
| 1300 | 1301 | ||
| 1301 | if (*nplanes) | 1302 | if (*nplanes) |
| 1302 | return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0; | 1303 | return sizes[0] < size ? -EINVAL : 0; |
| 1303 | *nplanes = 1; | 1304 | *nplanes = 1; |
| 1304 | sizes[0] = gspca_dev->pixfmt.sizeimage; | 1305 | sizes[0] = size; |
| 1305 | return 0; | 1306 | return 0; |
| 1306 | } | 1307 | } |
| 1307 | 1308 | ||
| 1308 | static int gspca_buffer_prepare(struct vb2_buffer *vb) | 1309 | static int gspca_buffer_prepare(struct vb2_buffer *vb) |
| 1309 | { | 1310 | { |
| 1310 | struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); | 1311 | struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); |
| 1311 | unsigned long size = gspca_dev->pixfmt.sizeimage; | 1312 | unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); |
| 1312 | 1313 | ||
| 1313 | if (vb2_plane_size(vb, 0) < size) { | 1314 | if (vb2_plane_size(vb, 0) < size) { |
| 1314 | gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n", | 1315 | gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n", |
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 5f2b033a7a42..10b8d94edbef 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c | |||
| @@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, | |||
| 1563 | u64 offset; | 1563 | u64 offset; |
| 1564 | s64 val; | 1564 | s64 val; |
| 1565 | 1565 | ||
| 1566 | switch (ctrl->type) { | 1566 | switch ((u32)ctrl->type) { |
| 1567 | case V4L2_CTRL_TYPE_INTEGER: | 1567 | case V4L2_CTRL_TYPE_INTEGER: |
| 1568 | return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); | 1568 | return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); |
| 1569 | case V4L2_CTRL_TYPE_INTEGER64: | 1569 | case V4L2_CTRL_TYPE_INTEGER64: |
| @@ -2232,7 +2232,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, | |||
| 2232 | is_array = nr_of_dims > 0; | 2232 | is_array = nr_of_dims > 0; |
| 2233 | 2233 | ||
| 2234 | /* Prefill elem_size for all types handled by std_type_ops */ | 2234 | /* Prefill elem_size for all types handled by std_type_ops */ |
| 2235 | switch (type) { | 2235 | switch ((u32)type) { |
| 2236 | case V4L2_CTRL_TYPE_INTEGER64: | 2236 | case V4L2_CTRL_TYPE_INTEGER64: |
| 2237 | elem_size = sizeof(s64); | 2237 | elem_size = sizeof(s64); |
| 2238 | break; | 2238 | break; |
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 8f9d6964173e..b99a194ce5a4 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c | |||
| @@ -263,6 +263,11 @@ static const struct file_operations fops = { | |||
| 263 | #endif | 263 | #endif |
| 264 | }; | 264 | }; |
| 265 | 265 | ||
| 266 | static void cros_ec_class_release(struct device *dev) | ||
| 267 | { | ||
| 268 | kfree(to_cros_ec_dev(dev)); | ||
| 269 | } | ||
| 270 | |||
| 266 | static void cros_ec_sensors_register(struct cros_ec_dev *ec) | 271 | static void cros_ec_sensors_register(struct cros_ec_dev *ec) |
| 267 | { | 272 | { |
| 268 | /* | 273 | /* |
| @@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev) | |||
| 395 | int retval = -ENOMEM; | 400 | int retval = -ENOMEM; |
| 396 | struct device *dev = &pdev->dev; | 401 | struct device *dev = &pdev->dev; |
| 397 | struct cros_ec_platform *ec_platform = dev_get_platdata(dev); | 402 | struct cros_ec_platform *ec_platform = dev_get_platdata(dev); |
| 398 | struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL); | 403 | struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); |
| 399 | 404 | ||
| 400 | if (!ec) | 405 | if (!ec) |
| 401 | return retval; | 406 | return retval; |
| @@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev) | |||
| 417 | ec->class_dev.devt = MKDEV(ec_major, pdev->id); | 422 | ec->class_dev.devt = MKDEV(ec_major, pdev->id); |
| 418 | ec->class_dev.class = &cros_class; | 423 | ec->class_dev.class = &cros_class; |
| 419 | ec->class_dev.parent = dev; | 424 | ec->class_dev.parent = dev; |
| 425 | ec->class_dev.release = cros_ec_class_release; | ||
| 420 | 426 | ||
| 421 | retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); | 427 | retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); |
| 422 | if (retval) { | 428 | if (retval) { |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index c35b5b08bb33..111934838da2 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
| @@ -472,7 +472,7 @@ out: | |||
| 472 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, | 472 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, |
| 473 | struct mmc_blk_ioc_data *idata) | 473 | struct mmc_blk_ioc_data *idata) |
| 474 | { | 474 | { |
| 475 | struct mmc_command cmd = {}; | 475 | struct mmc_command cmd = {}, sbc = {}; |
| 476 | struct mmc_data data = {}; | 476 | struct mmc_data data = {}; |
| 477 | struct mmc_request mrq = {}; | 477 | struct mmc_request mrq = {}; |
| 478 | struct scatterlist sg; | 478 | struct scatterlist sg; |
| @@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, | |||
| 550 | } | 550 | } |
| 551 | 551 | ||
| 552 | if (idata->rpmb) { | 552 | if (idata->rpmb) { |
| 553 | err = mmc_set_blockcount(card, data.blocks, | 553 | sbc.opcode = MMC_SET_BLOCK_COUNT; |
| 554 | idata->ic.write_flag & (1 << 31)); | 554 | /* |
| 555 | if (err) | 555 | * We don't do any blockcount validation because the max size |
| 556 | return err; | 556 | * may be increased by a future standard. We just copy the |
| 557 | * 'Reliable Write' bit here. | ||
| 558 | */ | ||
| 559 | sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); | ||
| 560 | sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
| 561 | mrq.sbc = &sbc; | ||
| 557 | } | 562 | } |
| 558 | 563 | ||
| 559 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && | 564 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index adf32682f27a..c60a7625b1fa 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
| @@ -104,6 +104,7 @@ struct mmc_omap_slot { | |||
| 104 | unsigned int vdd; | 104 | unsigned int vdd; |
| 105 | u16 saved_con; | 105 | u16 saved_con; |
| 106 | u16 bus_mode; | 106 | u16 bus_mode; |
| 107 | u16 power_mode; | ||
| 107 | unsigned int fclk_freq; | 108 | unsigned int fclk_freq; |
| 108 | 109 | ||
| 109 | struct tasklet_struct cover_tasklet; | 110 | struct tasklet_struct cover_tasklet; |
| @@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1157 | struct mmc_omap_slot *slot = mmc_priv(mmc); | 1158 | struct mmc_omap_slot *slot = mmc_priv(mmc); |
| 1158 | struct mmc_omap_host *host = slot->host; | 1159 | struct mmc_omap_host *host = slot->host; |
| 1159 | int i, dsor; | 1160 | int i, dsor; |
| 1160 | int clk_enabled; | 1161 | int clk_enabled, init_stream; |
| 1161 | 1162 | ||
| 1162 | mmc_omap_select_slot(slot, 0); | 1163 | mmc_omap_select_slot(slot, 0); |
| 1163 | 1164 | ||
| @@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1167 | slot->vdd = ios->vdd; | 1168 | slot->vdd = ios->vdd; |
| 1168 | 1169 | ||
| 1169 | clk_enabled = 0; | 1170 | clk_enabled = 0; |
| 1171 | init_stream = 0; | ||
| 1170 | switch (ios->power_mode) { | 1172 | switch (ios->power_mode) { |
| 1171 | case MMC_POWER_OFF: | 1173 | case MMC_POWER_OFF: |
| 1172 | mmc_omap_set_power(slot, 0, ios->vdd); | 1174 | mmc_omap_set_power(slot, 0, ios->vdd); |
| @@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1174 | case MMC_POWER_UP: | 1176 | case MMC_POWER_UP: |
| 1175 | /* Cannot touch dsor yet, just power up MMC */ | 1177 | /* Cannot touch dsor yet, just power up MMC */ |
| 1176 | mmc_omap_set_power(slot, 1, ios->vdd); | 1178 | mmc_omap_set_power(slot, 1, ios->vdd); |
| 1179 | slot->power_mode = ios->power_mode; | ||
| 1177 | goto exit; | 1180 | goto exit; |
| 1178 | case MMC_POWER_ON: | 1181 | case MMC_POWER_ON: |
| 1179 | mmc_omap_fclk_enable(host, 1); | 1182 | mmc_omap_fclk_enable(host, 1); |
| 1180 | clk_enabled = 1; | 1183 | clk_enabled = 1; |
| 1181 | dsor |= 1 << 11; | 1184 | dsor |= 1 << 11; |
| 1185 | if (slot->power_mode != MMC_POWER_ON) | ||
| 1186 | init_stream = 1; | ||
| 1182 | break; | 1187 | break; |
| 1183 | } | 1188 | } |
| 1189 | slot->power_mode = ios->power_mode; | ||
| 1184 | 1190 | ||
| 1185 | if (slot->bus_mode != ios->bus_mode) { | 1191 | if (slot->bus_mode != ios->bus_mode) { |
| 1186 | if (slot->pdata->set_bus_mode != NULL) | 1192 | if (slot->pdata->set_bus_mode != NULL) |
| @@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1196 | for (i = 0; i < 2; i++) | 1202 | for (i = 0; i < 2; i++) |
| 1197 | OMAP_MMC_WRITE(host, CON, dsor); | 1203 | OMAP_MMC_WRITE(host, CON, dsor); |
| 1198 | slot->saved_con = dsor; | 1204 | slot->saved_con = dsor; |
| 1199 | if (ios->power_mode == MMC_POWER_ON) { | 1205 | if (init_stream) { |
| 1200 | /* worst case at 400kHz, 80 cycles makes 200 microsecs */ | 1206 | /* worst case at 400kHz, 80 cycles makes 200 microsecs */ |
| 1201 | int usecs = 250; | 1207 | int usecs = 250; |
| 1202 | 1208 | ||
| @@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) | |||
| 1234 | slot->host = host; | 1240 | slot->host = host; |
| 1235 | slot->mmc = mmc; | 1241 | slot->mmc = mmc; |
| 1236 | slot->id = id; | 1242 | slot->id = id; |
| 1243 | slot->power_mode = MMC_POWER_UNDEFINED; | ||
| 1237 | slot->pdata = &host->pdata->slots[id]; | 1244 | slot->pdata = &host->pdata->slots[id]; |
| 1238 | 1245 | ||
| 1239 | host->slots[id] = slot; | 1246 | host->slots[id] = slot; |
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 88347ce78f23..d264391616f9 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c | |||
| @@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
| 288 | struct device *dev = omap_host->dev; | 288 | struct device *dev = omap_host->dev; |
| 289 | struct mmc_ios *ios = &mmc->ios; | 289 | struct mmc_ios *ios = &mmc->ios; |
| 290 | u32 start_window = 0, max_window = 0; | 290 | u32 start_window = 0, max_window = 0; |
| 291 | bool dcrc_was_enabled = false; | ||
| 291 | u8 cur_match, prev_match = 0; | 292 | u8 cur_match, prev_match = 0; |
| 292 | u32 length = 0, max_len = 0; | 293 | u32 length = 0, max_len = 0; |
| 293 | u32 ier = host->ier; | ||
| 294 | u32 phase_delay = 0; | 294 | u32 phase_delay = 0; |
| 295 | int ret = 0; | 295 | int ret = 0; |
| 296 | u32 reg; | 296 | u32 reg; |
| @@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
| 317 | * during the tuning procedure. So disable it during the | 317 | * during the tuning procedure. So disable it during the |
| 318 | * tuning procedure. | 318 | * tuning procedure. |
| 319 | */ | 319 | */ |
| 320 | ier &= ~SDHCI_INT_DATA_CRC; | 320 | if (host->ier & SDHCI_INT_DATA_CRC) { |
| 321 | sdhci_writel(host, ier, SDHCI_INT_ENABLE); | 321 | host->ier &= ~SDHCI_INT_DATA_CRC; |
| 322 | sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); | 322 | dcrc_was_enabled = true; |
| 323 | } | ||
| 323 | 324 | ||
| 324 | while (phase_delay <= MAX_PHASE_DELAY) { | 325 | while (phase_delay <= MAX_PHASE_DELAY) { |
| 325 | sdhci_omap_set_dll(omap_host, phase_delay); | 326 | sdhci_omap_set_dll(omap_host, phase_delay); |
| @@ -366,6 +367,9 @@ tuning_error: | |||
| 366 | 367 | ||
| 367 | ret: | 368 | ret: |
| 368 | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); | 369 | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); |
| 370 | /* Reenable forbidden interrupt */ | ||
| 371 | if (dcrc_was_enabled) | ||
| 372 | host->ier |= SDHCI_INT_DATA_CRC; | ||
| 369 | sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); | 373 | sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); |
| 370 | sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); | 374 | sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); |
| 371 | return ret; | 375 | return ret; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 99bdae53fa2e..451b08a818a9 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -216,8 +216,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask) | |||
| 216 | timeout = ktime_add_ms(ktime_get(), 100); | 216 | timeout = ktime_add_ms(ktime_get(), 100); |
| 217 | 217 | ||
| 218 | /* hw clears the bit when it's done */ | 218 | /* hw clears the bit when it's done */ |
| 219 | while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { | 219 | while (1) { |
| 220 | if (ktime_after(ktime_get(), timeout)) { | 220 | bool timedout = ktime_after(ktime_get(), timeout); |
| 221 | |||
| 222 | if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) | ||
| 223 | break; | ||
| 224 | if (timedout) { | ||
| 221 | pr_err("%s: Reset 0x%x never completed.\n", | 225 | pr_err("%s: Reset 0x%x never completed.\n", |
| 222 | mmc_hostname(host->mmc), (int)mask); | 226 | mmc_hostname(host->mmc), (int)mask); |
| 223 | sdhci_dumpregs(host); | 227 | sdhci_dumpregs(host); |
| @@ -1608,9 +1612,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk) | |||
| 1608 | 1612 | ||
| 1609 | /* Wait max 20 ms */ | 1613 | /* Wait max 20 ms */ |
| 1610 | timeout = ktime_add_ms(ktime_get(), 20); | 1614 | timeout = ktime_add_ms(ktime_get(), 20); |
| 1611 | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) | 1615 | while (1) { |
| 1612 | & SDHCI_CLOCK_INT_STABLE)) { | 1616 | bool timedout = ktime_after(ktime_get(), timeout); |
| 1613 | if (ktime_after(ktime_get(), timeout)) { | 1617 | |
| 1618 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | ||
| 1619 | if (clk & SDHCI_CLOCK_INT_STABLE) | ||
| 1620 | break; | ||
| 1621 | if (timedout) { | ||
| 1614 | pr_err("%s: Internal clock never stabilised.\n", | 1622 | pr_err("%s: Internal clock never stabilised.\n", |
| 1615 | mmc_hostname(host->mmc)); | 1623 | mmc_hostname(host->mmc)); |
| 1616 | sdhci_dumpregs(host); | 1624 | sdhci_dumpregs(host); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index f43fb2f958a5..93dfcef8afc4 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
| @@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
| 2086 | aggregator->aggregator_identifier); | 2086 | aggregator->aggregator_identifier); |
| 2087 | 2087 | ||
| 2088 | /* Tell the partner that this port is not suitable for aggregation */ | 2088 | /* Tell the partner that this port is not suitable for aggregation */ |
| 2089 | port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; | ||
| 2090 | port->actor_oper_port_state &= ~AD_STATE_COLLECTING; | ||
| 2091 | port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; | ||
| 2089 | port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; | 2092 | port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; |
| 2090 | __update_lacpdu_from_port(port); | 2093 | __update_lacpdu_from_port(port); |
| 2091 | ad_lacpdu_send(port); | 2094 | ad_lacpdu_send(port); |
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 65f10fec25b3..0b3e51f248c2 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c | |||
| @@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) | |||
| 116 | /* Reset the switch. */ | 116 | /* Reset the switch. */ |
| 117 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, | 117 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, |
| 118 | GLOBAL_ATU_CONTROL_SWRESET | | 118 | GLOBAL_ATU_CONTROL_SWRESET | |
| 119 | GLOBAL_ATU_CONTROL_ATUSIZE_1024 | | 119 | GLOBAL_ATU_CONTROL_LEARNDIS); |
| 120 | GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); | ||
| 121 | 120 | ||
| 122 | /* Wait up to one second for reset to complete. */ | 121 | /* Wait up to one second for reset to complete. */ |
| 123 | timeout = jiffies + 1 * HZ; | 122 | timeout = jiffies + 1 * HZ; |
| @@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds) | |||
| 142 | */ | 141 | */ |
| 143 | REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); | 142 | REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); |
| 144 | 143 | ||
| 145 | /* Enable automatic address learning, set the address | 144 | /* Disable automatic address learning. |
| 146 | * database size to 1024 entries, and set the default aging | ||
| 147 | * time to 5 minutes. | ||
| 148 | */ | 145 | */ |
| 149 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, | 146 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, |
| 150 | GLOBAL_ATU_CONTROL_ATUSIZE_1024 | | 147 | GLOBAL_ATU_CONTROL_LEARNDIS); |
| 151 | GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); | ||
| 152 | 148 | ||
| 153 | return 0; | 149 | return 0; |
| 154 | } | 150 | } |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index f02592f43fe3..a7e853fa43c2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
| @@ -674,7 +674,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
| 674 | 674 | ||
| 675 | rx_stat = (0x0000003CU & rxd_wb->status) >> 2; | 675 | rx_stat = (0x0000003CU & rxd_wb->status) >> 2; |
| 676 | 676 | ||
| 677 | is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); | 677 | is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U; |
| 678 | 678 | ||
| 679 | pkt_type = 0xFFU & (rxd_wb->type >> 4); | 679 | pkt_type = 0xFFU & (rxd_wb->type >> 4); |
| 680 | 680 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d4c300117529..5d21c14853ac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -5162,6 +5162,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) | |||
| 5162 | cp = le16_to_cpu(resp->alloc_cmpl_rings); | 5162 | cp = le16_to_cpu(resp->alloc_cmpl_rings); |
| 5163 | stats = le16_to_cpu(resp->alloc_stat_ctx); | 5163 | stats = le16_to_cpu(resp->alloc_stat_ctx); |
| 5164 | cp = min_t(u16, cp, stats); | 5164 | cp = min_t(u16, cp, stats); |
| 5165 | hw_resc->resv_irqs = cp; | ||
| 5165 | if (bp->flags & BNXT_FLAG_CHIP_P5) { | 5166 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
| 5166 | int rx = hw_resc->resv_rx_rings; | 5167 | int rx = hw_resc->resv_rx_rings; |
| 5167 | int tx = hw_resc->resv_tx_rings; | 5168 | int tx = hw_resc->resv_tx_rings; |
| @@ -5175,7 +5176,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) | |||
| 5175 | hw_resc->resv_rx_rings = rx; | 5176 | hw_resc->resv_rx_rings = rx; |
| 5176 | hw_resc->resv_tx_rings = tx; | 5177 | hw_resc->resv_tx_rings = tx; |
| 5177 | } | 5178 | } |
| 5178 | cp = le16_to_cpu(resp->alloc_msix); | 5179 | hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); |
| 5179 | hw_resc->resv_hw_ring_grps = rx; | 5180 | hw_resc->resv_hw_ring_grps = rx; |
| 5180 | } | 5181 | } |
| 5181 | hw_resc->resv_cp_rings = cp; | 5182 | hw_resc->resv_cp_rings = cp; |
| @@ -5353,7 +5354,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, | |||
| 5353 | return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); | 5354 | return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); |
| 5354 | } | 5355 | } |
| 5355 | 5356 | ||
| 5356 | static int bnxt_cp_rings_in_use(struct bnxt *bp) | 5357 | static int bnxt_nq_rings_in_use(struct bnxt *bp) |
| 5357 | { | 5358 | { |
| 5358 | int cp = bp->cp_nr_rings; | 5359 | int cp = bp->cp_nr_rings; |
| 5359 | int ulp_msix, ulp_base; | 5360 | int ulp_msix, ulp_base; |
| @@ -5368,10 +5369,22 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) | |||
| 5368 | return cp; | 5369 | return cp; |
| 5369 | } | 5370 | } |
| 5370 | 5371 | ||
| 5372 | static int bnxt_cp_rings_in_use(struct bnxt *bp) | ||
| 5373 | { | ||
| 5374 | int cp; | ||
| 5375 | |||
| 5376 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 5377 | return bnxt_nq_rings_in_use(bp); | ||
| 5378 | |||
| 5379 | cp = bp->tx_nr_rings + bp->rx_nr_rings; | ||
| 5380 | return cp; | ||
| 5381 | } | ||
| 5382 | |||
| 5371 | static bool bnxt_need_reserve_rings(struct bnxt *bp) | 5383 | static bool bnxt_need_reserve_rings(struct bnxt *bp) |
| 5372 | { | 5384 | { |
| 5373 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5385 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 5374 | int cp = bnxt_cp_rings_in_use(bp); | 5386 | int cp = bnxt_cp_rings_in_use(bp); |
| 5387 | int nq = bnxt_nq_rings_in_use(bp); | ||
| 5375 | int rx = bp->rx_nr_rings; | 5388 | int rx = bp->rx_nr_rings; |
| 5376 | int vnic = 1, grp = rx; | 5389 | int vnic = 1, grp = rx; |
| 5377 | 5390 | ||
| @@ -5387,7 +5400,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) | |||
| 5387 | rx <<= 1; | 5400 | rx <<= 1; |
| 5388 | if (BNXT_NEW_RM(bp) && | 5401 | if (BNXT_NEW_RM(bp) && |
| 5389 | (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || | 5402 | (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || |
| 5390 | hw_resc->resv_vnics != vnic || | 5403 | hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || |
| 5391 | (hw_resc->resv_hw_ring_grps != grp && | 5404 | (hw_resc->resv_hw_ring_grps != grp && |
| 5392 | !(bp->flags & BNXT_FLAG_CHIP_P5)))) | 5405 | !(bp->flags & BNXT_FLAG_CHIP_P5)))) |
| 5393 | return true; | 5406 | return true; |
| @@ -5397,7 +5410,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) | |||
| 5397 | static int __bnxt_reserve_rings(struct bnxt *bp) | 5410 | static int __bnxt_reserve_rings(struct bnxt *bp) |
| 5398 | { | 5411 | { |
| 5399 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5412 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 5400 | int cp = bnxt_cp_rings_in_use(bp); | 5413 | int cp = bnxt_nq_rings_in_use(bp); |
| 5401 | int tx = bp->tx_nr_rings; | 5414 | int tx = bp->tx_nr_rings; |
| 5402 | int rx = bp->rx_nr_rings; | 5415 | int rx = bp->rx_nr_rings; |
| 5403 | int grp, rx_rings, rc; | 5416 | int grp, rx_rings, rc; |
| @@ -5422,7 +5435,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) | |||
| 5422 | tx = hw_resc->resv_tx_rings; | 5435 | tx = hw_resc->resv_tx_rings; |
| 5423 | if (BNXT_NEW_RM(bp)) { | 5436 | if (BNXT_NEW_RM(bp)) { |
| 5424 | rx = hw_resc->resv_rx_rings; | 5437 | rx = hw_resc->resv_rx_rings; |
| 5425 | cp = hw_resc->resv_cp_rings; | 5438 | cp = hw_resc->resv_irqs; |
| 5426 | grp = hw_resc->resv_hw_ring_grps; | 5439 | grp = hw_resc->resv_hw_ring_grps; |
| 5427 | vnic = hw_resc->resv_vnics; | 5440 | vnic = hw_resc->resv_vnics; |
| 5428 | } | 5441 | } |
| @@ -6292,6 +6305,8 @@ hwrm_func_qcaps_exit: | |||
| 6292 | return rc; | 6305 | return rc; |
| 6293 | } | 6306 | } |
| 6294 | 6307 | ||
| 6308 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); | ||
| 6309 | |||
| 6295 | static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | 6310 | static int bnxt_hwrm_func_qcaps(struct bnxt *bp) |
| 6296 | { | 6311 | { |
| 6297 | int rc; | 6312 | int rc; |
| @@ -6299,6 +6314,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
| 6299 | rc = __bnxt_hwrm_func_qcaps(bp); | 6314 | rc = __bnxt_hwrm_func_qcaps(bp); |
| 6300 | if (rc) | 6315 | if (rc) |
| 6301 | return rc; | 6316 | return rc; |
| 6317 | rc = bnxt_hwrm_queue_qportcfg(bp); | ||
| 6318 | if (rc) { | ||
| 6319 | netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); | ||
| 6320 | return rc; | ||
| 6321 | } | ||
| 6302 | if (bp->hwrm_spec_code >= 0x10803) { | 6322 | if (bp->hwrm_spec_code >= 0x10803) { |
| 6303 | rc = bnxt_alloc_ctx_mem(bp); | 6323 | rc = bnxt_alloc_ctx_mem(bp); |
| 6304 | if (rc) | 6324 | if (rc) |
| @@ -7026,7 +7046,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) | |||
| 7026 | 7046 | ||
| 7027 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) | 7047 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
| 7028 | { | 7048 | { |
| 7029 | return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); | 7049 | unsigned int cp = bp->hw_resc.max_cp_rings; |
| 7050 | |||
| 7051 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 7052 | cp -= bnxt_get_ulp_msix_num(bp); | ||
| 7053 | |||
| 7054 | return cp; | ||
| 7030 | } | 7055 | } |
| 7031 | 7056 | ||
| 7032 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | 7057 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
| @@ -7048,7 +7073,9 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num) | |||
| 7048 | int total_req = bp->cp_nr_rings + num; | 7073 | int total_req = bp->cp_nr_rings + num; |
| 7049 | int max_idx, avail_msix; | 7074 | int max_idx, avail_msix; |
| 7050 | 7075 | ||
| 7051 | max_idx = min_t(int, bp->total_irqs, max_cp); | 7076 | max_idx = bp->total_irqs; |
| 7077 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 7078 | max_idx = min_t(int, bp->total_irqs, max_cp); | ||
| 7052 | avail_msix = max_idx - bp->cp_nr_rings; | 7079 | avail_msix = max_idx - bp->cp_nr_rings; |
| 7053 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) | 7080 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) |
| 7054 | return avail_msix; | 7081 | return avail_msix; |
| @@ -7066,7 +7093,7 @@ static int bnxt_get_num_msix(struct bnxt *bp) | |||
| 7066 | if (!BNXT_NEW_RM(bp)) | 7093 | if (!BNXT_NEW_RM(bp)) |
| 7067 | return bnxt_get_max_func_irqs(bp); | 7094 | return bnxt_get_max_func_irqs(bp); |
| 7068 | 7095 | ||
| 7069 | return bnxt_cp_rings_in_use(bp); | 7096 | return bnxt_nq_rings_in_use(bp); |
| 7070 | } | 7097 | } |
| 7071 | 7098 | ||
| 7072 | static int bnxt_init_msix(struct bnxt *bp) | 7099 | static int bnxt_init_msix(struct bnxt *bp) |
| @@ -7794,6 +7821,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) | |||
| 7794 | 7821 | ||
| 7795 | rc = bnxt_hwrm_func_resc_qcaps(bp, true); | 7822 | rc = bnxt_hwrm_func_resc_qcaps(bp, true); |
| 7796 | hw_resc->resv_cp_rings = 0; | 7823 | hw_resc->resv_cp_rings = 0; |
| 7824 | hw_resc->resv_irqs = 0; | ||
| 7797 | hw_resc->resv_tx_rings = 0; | 7825 | hw_resc->resv_tx_rings = 0; |
| 7798 | hw_resc->resv_rx_rings = 0; | 7826 | hw_resc->resv_rx_rings = 0; |
| 7799 | hw_resc->resv_hw_ring_grps = 0; | 7827 | hw_resc->resv_hw_ring_grps = 0; |
| @@ -9799,13 +9827,16 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 9799 | int *max_cp) | 9827 | int *max_cp) |
| 9800 | { | 9828 | { |
| 9801 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 9829 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 9802 | int max_ring_grps = 0; | 9830 | int max_ring_grps = 0, max_irq; |
| 9803 | 9831 | ||
| 9804 | *max_tx = hw_resc->max_tx_rings; | 9832 | *max_tx = hw_resc->max_tx_rings; |
| 9805 | *max_rx = hw_resc->max_rx_rings; | 9833 | *max_rx = hw_resc->max_rx_rings; |
| 9806 | *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), | 9834 | *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); |
| 9807 | hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); | 9835 | max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - |
| 9808 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); | 9836 | bnxt_get_ulp_msix_num(bp), |
| 9837 | bnxt_get_max_func_stat_ctxs(bp)); | ||
| 9838 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 9839 | *max_cp = min_t(int, *max_cp, max_irq); | ||
| 9809 | max_ring_grps = hw_resc->max_hw_ring_grps; | 9840 | max_ring_grps = hw_resc->max_hw_ring_grps; |
| 9810 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { | 9841 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
| 9811 | *max_cp -= 1; | 9842 | *max_cp -= 1; |
| @@ -9813,6 +9844,11 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 9813 | } | 9844 | } |
| 9814 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 9845 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
| 9815 | *max_rx >>= 1; | 9846 | *max_rx >>= 1; |
| 9847 | if (bp->flags & BNXT_FLAG_CHIP_P5) { | ||
| 9848 | bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); | ||
| 9849 | /* On P5 chips, max_cp output param should be available NQs */ | ||
| 9850 | *max_cp = max_irq; | ||
| 9851 | } | ||
| 9816 | *max_rx = min_t(int, *max_rx, max_ring_grps); | 9852 | *max_rx = min_t(int, *max_rx, max_ring_grps); |
| 9817 | } | 9853 | } |
| 9818 | 9854 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9e99d4ab3e06..3030931ccaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -928,6 +928,7 @@ struct bnxt_hw_resc { | |||
| 928 | u16 min_stat_ctxs; | 928 | u16 min_stat_ctxs; |
| 929 | u16 max_stat_ctxs; | 929 | u16 max_stat_ctxs; |
| 930 | u16 max_irqs; | 930 | u16 max_irqs; |
| 931 | u16 resv_irqs; | ||
| 931 | }; | 932 | }; |
| 932 | 933 | ||
| 933 | #if defined(CONFIG_BNXT_SRIOV) | 934 | #if defined(CONFIG_BNXT_SRIOV) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index b59b382d34f9..0a3097baafde 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
| @@ -168,7 +168,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
| 168 | if (BNXT_NEW_RM(bp)) { | 168 | if (BNXT_NEW_RM(bp)) { |
| 169 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 169 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 170 | 170 | ||
| 171 | avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings; | 171 | avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings; |
| 172 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 172 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
| 173 | } | 173 | } |
| 174 | bnxt_fill_msix_vecs(bp, ent); | 174 | bnxt_fill_msix_vecs(bp, ent); |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 4c3925af53bc..abe5d0dac851 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | |||
| @@ -111,7 +111,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { | |||
| 111 | "mac_tx_one_collision", | 111 | "mac_tx_one_collision", |
| 112 | "mac_tx_multi_collision", | 112 | "mac_tx_multi_collision", |
| 113 | "mac_tx_max_collision_fail", | 113 | "mac_tx_max_collision_fail", |
| 114 | "mac_tx_max_deferal_fail", | 114 | "mac_tx_max_deferral_fail", |
| 115 | "mac_tx_fifo_err", | 115 | "mac_tx_fifo_err", |
| 116 | "mac_tx_runts", | 116 | "mac_tx_runts", |
| 117 | 117 | ||
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ea9859e028d4..de61060721c4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | |||
| @@ -349,13 +349,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct, | |||
| 349 | struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; | 349 | struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; |
| 350 | struct sk_buff *skb = sc->ctxptr; | 350 | struct sk_buff *skb = sc->ctxptr; |
| 351 | struct net_device *ndev = skb->dev; | 351 | struct net_device *ndev = skb->dev; |
| 352 | u32 iq_no; | ||
| 352 | 353 | ||
| 353 | dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, | 354 | dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, |
| 354 | sc->datasize, DMA_TO_DEVICE); | 355 | sc->datasize, DMA_TO_DEVICE); |
| 355 | dev_kfree_skb_any(skb); | 356 | dev_kfree_skb_any(skb); |
| 357 | iq_no = sc->iq_no; | ||
| 356 | octeon_free_soft_command(oct, sc); | 358 | octeon_free_soft_command(oct, sc); |
| 357 | 359 | ||
| 358 | if (octnet_iq_is_full(oct, sc->iq_no)) | 360 | if (octnet_iq_is_full(oct, iq_no)) |
| 359 | return; | 361 | return; |
| 360 | 362 | ||
| 361 | if (netif_queue_stopped(ndev)) | 363 | if (netif_queue_stopped(ndev)) |
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index c415ac67cb7b..e80fedb27cee 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c | |||
| @@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2786 | if (!muram_node) { | 2786 | if (!muram_node) { |
| 2787 | dev_err(&of_dev->dev, "%s: could not find MURAM node\n", | 2787 | dev_err(&of_dev->dev, "%s: could not find MURAM node\n", |
| 2788 | __func__); | 2788 | __func__); |
| 2789 | goto fman_node_put; | 2789 | goto fman_free; |
| 2790 | } | 2790 | } |
| 2791 | 2791 | ||
| 2792 | err = of_address_to_resource(muram_node, 0, | 2792 | err = of_address_to_resource(muram_node, 0, |
| @@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2795 | of_node_put(muram_node); | 2795 | of_node_put(muram_node); |
| 2796 | dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", | 2796 | dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", |
| 2797 | __func__, err); | 2797 | __func__, err); |
| 2798 | goto fman_node_put; | 2798 | goto fman_free; |
| 2799 | } | 2799 | } |
| 2800 | 2800 | ||
| 2801 | of_node_put(muram_node); | 2801 | of_node_put(muram_node); |
| 2802 | of_node_put(fm_node); | ||
| 2803 | 2802 | ||
| 2804 | err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, | 2803 | err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, |
| 2805 | "fman", fman); | 2804 | "fman", fman); |
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h index e2f80cca9bed..0d2de6f67676 100644 --- a/drivers/net/ethernet/ibm/emac/emac.h +++ b/drivers/net/ethernet/ibm/emac/emac.h | |||
| @@ -231,7 +231,7 @@ struct emac_regs { | |||
| 231 | #define EMAC_STACR_PHYE 0x00004000 | 231 | #define EMAC_STACR_PHYE 0x00004000 |
| 232 | #define EMAC_STACR_STAC_MASK 0x00003000 | 232 | #define EMAC_STACR_STAC_MASK 0x00003000 |
| 233 | #define EMAC_STACR_STAC_READ 0x00001000 | 233 | #define EMAC_STACR_STAC_READ 0x00001000 |
| 234 | #define EMAC_STACR_STAC_WRITE 0x00000800 | 234 | #define EMAC_STACR_STAC_WRITE 0x00002000 |
| 235 | #define EMAC_STACR_OPBC_MASK 0x00000C00 | 235 | #define EMAC_STACR_OPBC_MASK 0x00000C00 |
| 236 | #define EMAC_STACR_OPBC_50 0x00000000 | 236 | #define EMAC_STACR_OPBC_50 0x00000000 |
| 237 | #define EMAC_STACR_OPBC_66 0x00000400 | 237 | #define EMAC_STACR_OPBC_66 0x00000400 |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c0203a0d5e3b..ed50b8dee44f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1859,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1859 | 1859 | ||
| 1860 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && | 1860 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
| 1861 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) | 1861 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) |
| 1862 | netdev_notify_peers(netdev); | 1862 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); |
| 1863 | 1863 | ||
| 1864 | netif_carrier_on(netdev); | 1864 | netif_carrier_on(netdev); |
| 1865 | 1865 | ||
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 7a37a37e3fb3..125ea99418df 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
| @@ -4375,8 +4375,27 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4375 | unsigned long *supported, | 4375 | unsigned long *supported, |
| 4376 | struct phylink_link_state *state) | 4376 | struct phylink_link_state *state) |
| 4377 | { | 4377 | { |
| 4378 | struct mvpp2_port *port = netdev_priv(dev); | ||
| 4378 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; | 4379 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
| 4379 | 4380 | ||
| 4381 | /* Invalid combinations */ | ||
| 4382 | switch (state->interface) { | ||
| 4383 | case PHY_INTERFACE_MODE_10GKR: | ||
| 4384 | case PHY_INTERFACE_MODE_XAUI: | ||
| 4385 | if (port->gop_id != 0) | ||
| 4386 | goto empty_set; | ||
| 4387 | break; | ||
| 4388 | case PHY_INTERFACE_MODE_RGMII: | ||
| 4389 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 4390 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 4391 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 4392 | if (port->gop_id == 0) | ||
| 4393 | goto empty_set; | ||
| 4394 | break; | ||
| 4395 | default: | ||
| 4396 | break; | ||
| 4397 | } | ||
| 4398 | |||
| 4380 | phylink_set(mask, Autoneg); | 4399 | phylink_set(mask, Autoneg); |
| 4381 | phylink_set_port_modes(mask); | 4400 | phylink_set_port_modes(mask); |
| 4382 | phylink_set(mask, Pause); | 4401 | phylink_set(mask, Pause); |
| @@ -4384,6 +4403,8 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4384 | 4403 | ||
| 4385 | switch (state->interface) { | 4404 | switch (state->interface) { |
| 4386 | case PHY_INTERFACE_MODE_10GKR: | 4405 | case PHY_INTERFACE_MODE_10GKR: |
| 4406 | case PHY_INTERFACE_MODE_XAUI: | ||
| 4407 | case PHY_INTERFACE_MODE_NA: | ||
| 4387 | phylink_set(mask, 10000baseCR_Full); | 4408 | phylink_set(mask, 10000baseCR_Full); |
| 4388 | phylink_set(mask, 10000baseSR_Full); | 4409 | phylink_set(mask, 10000baseSR_Full); |
| 4389 | phylink_set(mask, 10000baseLR_Full); | 4410 | phylink_set(mask, 10000baseLR_Full); |
| @@ -4391,7 +4412,11 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4391 | phylink_set(mask, 10000baseER_Full); | 4412 | phylink_set(mask, 10000baseER_Full); |
| 4392 | phylink_set(mask, 10000baseKR_Full); | 4413 | phylink_set(mask, 10000baseKR_Full); |
| 4393 | /* Fall-through */ | 4414 | /* Fall-through */ |
| 4394 | default: | 4415 | case PHY_INTERFACE_MODE_RGMII: |
| 4416 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 4417 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 4418 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 4419 | case PHY_INTERFACE_MODE_SGMII: | ||
| 4395 | phylink_set(mask, 10baseT_Half); | 4420 | phylink_set(mask, 10baseT_Half); |
| 4396 | phylink_set(mask, 10baseT_Full); | 4421 | phylink_set(mask, 10baseT_Full); |
| 4397 | phylink_set(mask, 100baseT_Half); | 4422 | phylink_set(mask, 100baseT_Half); |
| @@ -4403,11 +4428,18 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4403 | phylink_set(mask, 1000baseT_Full); | 4428 | phylink_set(mask, 1000baseT_Full); |
| 4404 | phylink_set(mask, 1000baseX_Full); | 4429 | phylink_set(mask, 1000baseX_Full); |
| 4405 | phylink_set(mask, 2500baseX_Full); | 4430 | phylink_set(mask, 2500baseX_Full); |
| 4431 | break; | ||
| 4432 | default: | ||
| 4433 | goto empty_set; | ||
| 4406 | } | 4434 | } |
| 4407 | 4435 | ||
| 4408 | bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); | 4436 | bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 4409 | bitmap_and(state->advertising, state->advertising, mask, | 4437 | bitmap_and(state->advertising, state->advertising, mask, |
| 4410 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 4438 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 4439 | return; | ||
| 4440 | |||
| 4441 | empty_set: | ||
| 4442 | bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); | ||
| 4411 | } | 4443 | } |
| 4412 | 4444 | ||
| 4413 | static void mvpp22_xlg_link_state(struct mvpp2_port *port, | 4445 | static void mvpp22_xlg_link_state(struct mvpp2_port *port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 36054e6fb9d3..f200b8c420d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | config MLX4_EN | 5 | config MLX4_EN |
| 6 | tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" | 6 | tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" |
| 7 | depends on MAY_USE_DEVLINK | 7 | depends on MAY_USE_DEVLINK |
| 8 | depends on PCI | 8 | depends on PCI && NETDEVICES && ETHERNET && INET |
| 9 | select MLX4_CORE | 9 | select MLX4_CORE |
| 10 | imply PTP_1588_CLOCK | 10 | imply PTP_1588_CLOCK |
| 11 | ---help--- | 11 | ---help--- |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f11b45001cad..d290f0787dfb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, | |||
| 1084 | 1084 | ||
| 1085 | tx_pause = !!(pause->tx_pause); | 1085 | tx_pause = !!(pause->tx_pause); |
| 1086 | rx_pause = !!(pause->rx_pause); | 1086 | rx_pause = !!(pause->rx_pause); |
| 1087 | rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); | 1087 | rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp; |
| 1088 | tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); | 1088 | tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp; |
| 1089 | 1089 | ||
| 1090 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 1090 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
| 1091 | priv->rx_skb_size + ETH_FCS_LEN, | 1091 | priv->rx_skb_size + ETH_FCS_LEN, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b744cd49a785..6b88881b8e35 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -3493,8 +3493,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
| 3493 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; | 3493 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; |
| 3494 | } | 3494 | } |
| 3495 | 3495 | ||
| 3496 | /* MTU range: 46 - hw-specific max */ | 3496 | /* MTU range: 68 - hw-specific max */ |
| 3497 | dev->min_mtu = MLX4_EN_MIN_MTU; | 3497 | dev->min_mtu = ETH_MIN_MTU; |
| 3498 | dev->max_mtu = priv->max_mtu; | 3498 | dev->max_mtu = priv->max_mtu; |
| 3499 | 3499 | ||
| 3500 | mdev->pndev[port] = dev; | 3500 | mdev->pndev[port] = dev; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 485d856546c6..8137454e2534 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -161,7 +161,6 @@ | |||
| 161 | #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ | 161 | #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ |
| 162 | ETH_HLEN + PREAMBLE_LEN) | 162 | ETH_HLEN + PREAMBLE_LEN) |
| 163 | 163 | ||
| 164 | #define MLX4_EN_MIN_MTU 46 | ||
| 165 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple | 164 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple |
| 166 | * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). | 165 | * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). |
| 167 | */ | 166 | */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 16985ca3248d..624eed345b5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -724,9 +724,9 @@ static u32 mlx5e_get_fcs(const struct sk_buff *skb) | |||
| 724 | return __get_unaligned_cpu32(fcs_bytes); | 724 | return __get_unaligned_cpu32(fcs_bytes); |
| 725 | } | 725 | } |
| 726 | 726 | ||
| 727 | static u8 get_ip_proto(struct sk_buff *skb, __be16 proto) | 727 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) |
| 728 | { | 728 | { |
| 729 | void *ip_p = skb->data + sizeof(struct ethhdr); | 729 | void *ip_p = skb->data + network_depth; |
| 730 | 730 | ||
| 731 | return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : | 731 | return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : |
| 732 | ((struct ipv6hdr *)ip_p)->nexthdr; | 732 | ((struct ipv6hdr *)ip_p)->nexthdr; |
| @@ -755,7 +755,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 755 | goto csum_unnecessary; | 755 | goto csum_unnecessary; |
| 756 | 756 | ||
| 757 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { | 757 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { |
| 758 | if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP)) | 758 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) |
| 759 | goto csum_unnecessary; | 759 | goto csum_unnecessary; |
| 760 | 760 | ||
| 761 | skb->ip_summed = CHECKSUM_COMPLETE; | 761 | skb->ip_summed = CHECKSUM_COMPLETE; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index ad06d9969bc1..5c13674439f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c | |||
| @@ -560,7 +560,7 @@ static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp, | |||
| 560 | 560 | ||
| 561 | mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr, | 561 | mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr, |
| 562 | &mc_entry); | 562 | &mc_entry); |
| 563 | if (WARN_ON(!mc_record)) | 563 | if (!mc_record) |
| 564 | return; | 564 | return; |
| 565 | 565 | ||
| 566 | mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry); | 566 | mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry); |
| @@ -647,7 +647,7 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp, | |||
| 647 | 647 | ||
| 648 | key.fid_index = mlxsw_sp_fid_index(fid); | 648 | key.fid_index = mlxsw_sp_fid_index(fid); |
| 649 | mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key); | 649 | mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key); |
| 650 | if (WARN_ON(!mc_list)) | 650 | if (!mc_list) |
| 651 | return; | 651 | return; |
| 652 | 652 | ||
| 653 | mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list); | 653 | mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9e9bb57134f2..6ebf99cc3154 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -1275,15 +1275,12 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, | |||
| 1275 | { | 1275 | { |
| 1276 | u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; | 1276 | u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; |
| 1277 | enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; | 1277 | enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; |
| 1278 | struct net_device *ipip_ul_dev; | ||
| 1279 | 1278 | ||
| 1280 | if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) | 1279 | if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) |
| 1281 | return false; | 1280 | return false; |
| 1282 | 1281 | ||
| 1283 | ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); | ||
| 1284 | return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, | 1282 | return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, |
| 1285 | ul_tb_id, ipip_entry) && | 1283 | ul_tb_id, ipip_entry); |
| 1286 | (!ipip_ul_dev || ipip_ul_dev == ul_dev); | ||
| 1287 | } | 1284 | } |
| 1288 | 1285 | ||
| 1289 | /* Given decap parameters, find the corresponding IPIP entry. */ | 1286 | /* Given decap parameters, find the corresponding IPIP entry. */ |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 739a51f0a366..50080c60a279 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -296,7 +296,13 @@ static bool | |||
| 296 | mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * | 296 | mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * |
| 297 | bridge_port) | 297 | bridge_port) |
| 298 | { | 298 | { |
| 299 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); | 299 | struct net_device *dev = bridge_port->dev; |
| 300 | struct mlxsw_sp *mlxsw_sp; | ||
| 301 | |||
| 302 | if (is_vlan_dev(dev)) | ||
| 303 | mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev)); | ||
| 304 | else | ||
| 305 | mlxsw_sp = mlxsw_sp_lower_get(dev); | ||
| 300 | 306 | ||
| 301 | /* In case ports were pulled from out of a bridged LAG, then | 307 | /* In case ports were pulled from out of a bridged LAG, then |
| 302 | * it's possible the reference count isn't zero, yet the bridge | 308 | * it's possible the reference count isn't zero, yet the bridge |
| @@ -2109,7 +2115,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, | |||
| 2109 | 2115 | ||
| 2110 | vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; | 2116 | vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; |
| 2111 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); | 2117 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); |
| 2112 | if (WARN_ON(!mlxsw_sp_port_vlan)) | 2118 | if (!mlxsw_sp_port_vlan) |
| 2113 | return; | 2119 | return; |
| 2114 | 2120 | ||
| 2115 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); | 2121 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); |
| @@ -2134,8 +2140,10 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, | |||
| 2134 | if (!fid) | 2140 | if (!fid) |
| 2135 | return -EINVAL; | 2141 | return -EINVAL; |
| 2136 | 2142 | ||
| 2137 | if (mlxsw_sp_fid_vni_is_set(fid)) | 2143 | if (mlxsw_sp_fid_vni_is_set(fid)) { |
| 2138 | return -EINVAL; | 2144 | err = -EINVAL; |
| 2145 | goto err_vni_exists; | ||
| 2146 | } | ||
| 2139 | 2147 | ||
| 2140 | err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); | 2148 | err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); |
| 2141 | if (err) | 2149 | if (err) |
| @@ -2149,6 +2157,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, | |||
| 2149 | return 0; | 2157 | return 0; |
| 2150 | 2158 | ||
| 2151 | err_nve_fid_enable: | 2159 | err_nve_fid_enable: |
| 2160 | err_vni_exists: | ||
| 2152 | mlxsw_sp_fid_put(fid); | 2161 | mlxsw_sp_fid_put(fid); |
| 2153 | return err; | 2162 | return err; |
| 2154 | } | 2163 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 29c95423ab64..2f49eb75f3cc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
| @@ -476,16 +476,16 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, | |||
| 476 | if (err) | 476 | if (err) |
| 477 | goto err_destroy_flow; | 477 | goto err_destroy_flow; |
| 478 | 478 | ||
| 479 | err = nfp_flower_xmit_flow(netdev, flow_pay, | ||
| 480 | NFP_FLOWER_CMSG_TYPE_FLOW_ADD); | ||
| 481 | if (err) | ||
| 482 | goto err_destroy_flow; | ||
| 483 | |||
| 484 | flow_pay->tc_flower_cookie = flow->cookie; | 479 | flow_pay->tc_flower_cookie = flow->cookie; |
| 485 | err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, | 480 | err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, |
| 486 | nfp_flower_table_params); | 481 | nfp_flower_table_params); |
| 487 | if (err) | 482 | if (err) |
| 488 | goto err_destroy_flow; | 483 | goto err_release_metadata; |
| 484 | |||
| 485 | err = nfp_flower_xmit_flow(netdev, flow_pay, | ||
| 486 | NFP_FLOWER_CMSG_TYPE_FLOW_ADD); | ||
| 487 | if (err) | ||
| 488 | goto err_remove_rhash; | ||
| 489 | 489 | ||
| 490 | port->tc_offload_cnt++; | 490 | port->tc_offload_cnt++; |
| 491 | 491 | ||
| @@ -494,6 +494,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, | |||
| 494 | 494 | ||
| 495 | return 0; | 495 | return 0; |
| 496 | 496 | ||
| 497 | err_remove_rhash: | ||
| 498 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, | ||
| 499 | &flow_pay->fl_node, | ||
| 500 | nfp_flower_table_params)); | ||
| 501 | err_release_metadata: | ||
| 502 | nfp_modify_flow_metadata(app, flow_pay); | ||
| 497 | err_destroy_flow: | 503 | err_destroy_flow: |
| 498 | kfree(flow_pay->action_data); | 504 | kfree(flow_pay->action_data); |
| 499 | kfree(flow_pay->mask_data); | 505 | kfree(flow_pay->mask_data); |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 81045dfa1cd8..44f6e4873aad 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
| @@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
| 571 | struct cp_private *cp; | 571 | struct cp_private *cp; |
| 572 | int handled = 0; | 572 | int handled = 0; |
| 573 | u16 status; | 573 | u16 status; |
| 574 | u16 mask; | ||
| 574 | 575 | ||
| 575 | if (unlikely(dev == NULL)) | 576 | if (unlikely(dev == NULL)) |
| 576 | return IRQ_NONE; | 577 | return IRQ_NONE; |
| @@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
| 578 | 579 | ||
| 579 | spin_lock(&cp->lock); | 580 | spin_lock(&cp->lock); |
| 580 | 581 | ||
| 582 | mask = cpr16(IntrMask); | ||
| 583 | if (!mask) | ||
| 584 | goto out_unlock; | ||
| 585 | |||
| 581 | status = cpr16(IntrStatus); | 586 | status = cpr16(IntrStatus); |
| 582 | if (!status || (status == 0xFFFF)) | 587 | if (!status || (status == 0xFFFF)) |
| 583 | goto out_unlock; | 588 | goto out_unlock; |
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 6732f5cbde08..7c7cd9d94bcc 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c | |||
| @@ -185,8 +185,8 @@ | |||
| 185 | NETIF_MSG_TX_ERR) | 185 | NETIF_MSG_TX_ERR) |
| 186 | 186 | ||
| 187 | /* Parameter for descriptor */ | 187 | /* Parameter for descriptor */ |
| 188 | #define AVE_NR_TXDESC 32 /* Tx descriptor */ | 188 | #define AVE_NR_TXDESC 64 /* Tx descriptor */ |
| 189 | #define AVE_NR_RXDESC 64 /* Rx descriptor */ | 189 | #define AVE_NR_RXDESC 256 /* Rx descriptor */ |
| 190 | 190 | ||
| 191 | #define AVE_DESC_OFS_CMDSTS 0 | 191 | #define AVE_DESC_OFS_CMDSTS 0 |
| 192 | #define AVE_DESC_OFS_ADDRL 4 | 192 | #define AVE_DESC_OFS_ADDRL 4 |
| @@ -194,6 +194,7 @@ | |||
| 194 | 194 | ||
| 195 | /* Parameter for ethernet frame */ | 195 | /* Parameter for ethernet frame */ |
| 196 | #define AVE_MAX_ETHFRAME 1518 | 196 | #define AVE_MAX_ETHFRAME 1518 |
| 197 | #define AVE_FRAME_HEADROOM 2 | ||
| 197 | 198 | ||
| 198 | /* Parameter for interrupt */ | 199 | /* Parameter for interrupt */ |
| 199 | #define AVE_INTM_COUNT 20 | 200 | #define AVE_INTM_COUNT 20 |
| @@ -576,12 +577,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) | |||
| 576 | 577 | ||
| 577 | skb = priv->rx.desc[entry].skbs; | 578 | skb = priv->rx.desc[entry].skbs; |
| 578 | if (!skb) { | 579 | if (!skb) { |
| 579 | skb = netdev_alloc_skb_ip_align(ndev, | 580 | skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME); |
| 580 | AVE_MAX_ETHFRAME); | ||
| 581 | if (!skb) { | 581 | if (!skb) { |
| 582 | netdev_err(ndev, "can't allocate skb for Rx\n"); | 582 | netdev_err(ndev, "can't allocate skb for Rx\n"); |
| 583 | return -ENOMEM; | 583 | return -ENOMEM; |
| 584 | } | 584 | } |
| 585 | skb->data += AVE_FRAME_HEADROOM; | ||
| 586 | skb->tail += AVE_FRAME_HEADROOM; | ||
| 585 | } | 587 | } |
| 586 | 588 | ||
| 587 | /* set disable to cmdsts */ | 589 | /* set disable to cmdsts */ |
| @@ -594,12 +596,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) | |||
| 594 | * - Rx buffer begins with 2 byte headroom, and data will be put from | 596 | * - Rx buffer begins with 2 byte headroom, and data will be put from |
| 595 | * (buffer + 2). | 597 | * (buffer + 2). |
| 596 | * To satisfy this, specify the address to put back the buffer | 598 | * To satisfy this, specify the address to put back the buffer |
| 597 | * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), | 599 | * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size |
| 598 | * and expand the map size by NET_IP_ALIGN. | 600 | * by AVE_FRAME_HEADROOM. |
| 599 | */ | 601 | */ |
| 600 | ret = ave_dma_map(ndev, &priv->rx.desc[entry], | 602 | ret = ave_dma_map(ndev, &priv->rx.desc[entry], |
| 601 | skb->data - NET_IP_ALIGN, | 603 | skb->data - AVE_FRAME_HEADROOM, |
| 602 | AVE_MAX_ETHFRAME + NET_IP_ALIGN, | 604 | AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM, |
| 603 | DMA_FROM_DEVICE, &paddr); | 605 | DMA_FROM_DEVICE, &paddr); |
| 604 | if (ret) { | 606 | if (ret) { |
| 605 | netdev_err(ndev, "can't map skb for Rx\n"); | 607 | netdev_err(ndev, "can't map skb for Rx\n"); |
| @@ -1689,9 +1691,10 @@ static int ave_probe(struct platform_device *pdev) | |||
| 1689 | pdev->name, pdev->id); | 1691 | pdev->name, pdev->id); |
| 1690 | 1692 | ||
| 1691 | /* Register as a NAPI supported driver */ | 1693 | /* Register as a NAPI supported driver */ |
| 1692 | netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc); | 1694 | netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, |
| 1695 | NAPI_POLL_WEIGHT); | ||
| 1693 | netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, | 1696 | netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, |
| 1694 | priv->tx.ndesc); | 1697 | NAPI_POLL_WEIGHT); |
| 1695 | 1698 | ||
| 1696 | platform_set_drvdata(pdev, ndev); | 1699 | platform_set_drvdata(pdev, ndev); |
| 1697 | 1700 | ||
| @@ -1913,5 +1916,6 @@ static struct platform_driver ave_driver = { | |||
| 1913 | }; | 1916 | }; |
| 1914 | module_platform_driver(ave_driver); | 1917 | module_platform_driver(ave_driver); |
| 1915 | 1918 | ||
| 1919 | MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); | ||
| 1916 | MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); | 1920 | MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); |
| 1917 | MODULE_LICENSE("GPL v2"); | 1921 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 076a8be18d67..5551fead8f66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -2550,12 +2550,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) | |||
| 2550 | netdev_warn(priv->dev, "PTP init failed\n"); | 2550 | netdev_warn(priv->dev, "PTP init failed\n"); |
| 2551 | } | 2551 | } |
| 2552 | 2552 | ||
| 2553 | #ifdef CONFIG_DEBUG_FS | ||
| 2554 | ret = stmmac_init_fs(dev); | ||
| 2555 | if (ret < 0) | ||
| 2556 | netdev_warn(priv->dev, "%s: failed debugFS registration\n", | ||
| 2557 | __func__); | ||
| 2558 | #endif | ||
| 2559 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; | 2553 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
| 2560 | 2554 | ||
| 2561 | if (priv->use_riwt) { | 2555 | if (priv->use_riwt) { |
| @@ -2756,10 +2750,6 @@ static int stmmac_release(struct net_device *dev) | |||
| 2756 | 2750 | ||
| 2757 | netif_carrier_off(dev); | 2751 | netif_carrier_off(dev); |
| 2758 | 2752 | ||
| 2759 | #ifdef CONFIG_DEBUG_FS | ||
| 2760 | stmmac_exit_fs(dev); | ||
| 2761 | #endif | ||
| 2762 | |||
| 2763 | stmmac_release_ptp(priv); | 2753 | stmmac_release_ptp(priv); |
| 2764 | 2754 | ||
| 2765 | return 0; | 2755 | return 0; |
| @@ -3899,6 +3889,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) | |||
| 3899 | u32 tx_count = priv->plat->tx_queues_to_use; | 3889 | u32 tx_count = priv->plat->tx_queues_to_use; |
| 3900 | u32 queue; | 3890 | u32 queue; |
| 3901 | 3891 | ||
| 3892 | if ((dev->flags & IFF_UP) == 0) | ||
| 3893 | return 0; | ||
| 3894 | |||
| 3902 | for (queue = 0; queue < rx_count; queue++) { | 3895 | for (queue = 0; queue < rx_count; queue++) { |
| 3903 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3896 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
| 3904 | 3897 | ||
| @@ -4397,6 +4390,13 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4397 | goto error_netdev_register; | 4390 | goto error_netdev_register; |
| 4398 | } | 4391 | } |
| 4399 | 4392 | ||
| 4393 | #ifdef CONFIG_DEBUG_FS | ||
| 4394 | ret = stmmac_init_fs(ndev); | ||
| 4395 | if (ret < 0) | ||
| 4396 | netdev_warn(priv->dev, "%s: failed debugFS registration\n", | ||
| 4397 | __func__); | ||
| 4398 | #endif | ||
| 4399 | |||
| 4400 | return ret; | 4400 | return ret; |
| 4401 | 4401 | ||
| 4402 | error_netdev_register: | 4402 | error_netdev_register: |
| @@ -4432,6 +4432,9 @@ int stmmac_dvr_remove(struct device *dev) | |||
| 4432 | 4432 | ||
| 4433 | netdev_info(priv->dev, "%s: removing driver", __func__); | 4433 | netdev_info(priv->dev, "%s: removing driver", __func__); |
| 4434 | 4434 | ||
| 4435 | #ifdef CONFIG_DEBUG_FS | ||
| 4436 | stmmac_exit_fs(ndev); | ||
| 4437 | #endif | ||
| 4435 | stmmac_stop_all_dma(priv); | 4438 | stmmac_stop_all_dma(priv); |
| 4436 | 4439 | ||
| 4437 | stmmac_mac_set(priv, priv->ioaddr, false); | 4440 | stmmac_mac_set(priv, priv->ioaddr, false); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index fc8d5f1ee1ad..0da3d36b283b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev) | |||
| 608 | goto hash_add; | 608 | goto hash_add; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | err = -EBUSY; | 611 | err = -EADDRINUSE; |
| 612 | if (macvlan_addr_busy(vlan->port, dev->dev_addr)) | 612 | if (macvlan_addr_busy(vlan->port, dev->dev_addr)) |
| 613 | goto out; | 613 | goto out; |
| 614 | 614 | ||
| @@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) | |||
| 706 | } else { | 706 | } else { |
| 707 | /* Rehash and update the device filters */ | 707 | /* Rehash and update the device filters */ |
| 708 | if (macvlan_addr_busy(vlan->port, addr)) | 708 | if (macvlan_addr_busy(vlan->port, addr)) |
| 709 | return -EBUSY; | 709 | return -EADDRINUSE; |
| 710 | 710 | ||
| 711 | if (!macvlan_passthru(port)) { | 711 | if (!macvlan_passthru(port)) { |
| 712 | err = dev_uc_add(lowerdev, addr); | 712 | err = dev_uc_add(lowerdev, addr); |
| @@ -747,6 +747,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) | |||
| 747 | return dev_set_mac_address(vlan->lowerdev, addr); | 747 | return dev_set_mac_address(vlan->lowerdev, addr); |
| 748 | } | 748 | } |
| 749 | 749 | ||
| 750 | if (macvlan_addr_busy(vlan->port, addr->sa_data)) | ||
| 751 | return -EADDRINUSE; | ||
| 752 | |||
| 750 | return macvlan_sync_address(dev, addr->sa_data); | 753 | return macvlan_sync_address(dev, addr->sa_data); |
| 751 | } | 754 | } |
| 752 | 755 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 23ee3967c166..18e92c19c5ab 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -1880,20 +1880,17 @@ EXPORT_SYMBOL(genphy_loopback); | |||
| 1880 | 1880 | ||
| 1881 | static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) | 1881 | static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) |
| 1882 | { | 1882 | { |
| 1883 | phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | | ||
| 1884 | PHY_10BT_FEATURES); | ||
| 1885 | |||
| 1886 | switch (max_speed) { | 1883 | switch (max_speed) { |
| 1887 | default: | 1884 | case SPEED_10: |
| 1888 | return -ENOTSUPP; | 1885 | phydev->supported &= ~PHY_100BT_FEATURES; |
| 1889 | case SPEED_1000: | ||
| 1890 | phydev->supported |= PHY_1000BT_FEATURES; | ||
| 1891 | /* fall through */ | 1886 | /* fall through */ |
| 1892 | case SPEED_100: | 1887 | case SPEED_100: |
| 1893 | phydev->supported |= PHY_100BT_FEATURES; | 1888 | phydev->supported &= ~PHY_1000BT_FEATURES; |
| 1894 | /* fall through */ | 1889 | break; |
| 1895 | case SPEED_10: | 1890 | case SPEED_1000: |
| 1896 | phydev->supported |= PHY_10BT_FEATURES; | 1891 | break; |
| 1892 | default: | ||
| 1893 | return -ENOTSUPP; | ||
| 1897 | } | 1894 | } |
| 1898 | 1895 | ||
| 1899 | return 0; | 1896 | return 0; |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 83060fb349f4..ad9db652874d 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
| @@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, | |||
| 162 | /* 1000Base-PX or 1000Base-BX10 */ | 162 | /* 1000Base-PX or 1000Base-BX10 */ |
| 163 | if ((id->base.e_base_px || id->base.e_base_bx10) && | 163 | if ((id->base.e_base_px || id->base.e_base_bx10) && |
| 164 | br_min <= 1300 && br_max >= 1200) | 164 | br_min <= 1300 && br_max >= 1200) |
| 165 | phylink_set(support, 1000baseX_Full); | 165 | phylink_set(modes, 1000baseX_Full); |
| 166 | 166 | ||
| 167 | /* For active or passive cables, select the link modes | 167 | /* For active or passive cables, select the link modes |
| 168 | * based on the bit rates and the cable compliance bytes. | 168 | * based on the bit rates and the cable compliance bytes. |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e244f5d7512a..005020042be9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -2293,9 +2293,9 @@ static void tun_setup(struct net_device *dev) | |||
| 2293 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[], | 2293 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[], |
| 2294 | struct netlink_ext_ack *extack) | 2294 | struct netlink_ext_ack *extack) |
| 2295 | { | 2295 | { |
| 2296 | if (!data) | 2296 | NL_SET_ERR_MSG(extack, |
| 2297 | return 0; | 2297 | "tun/tap creation via rtnetlink is not supported."); |
| 2298 | return -EINVAL; | 2298 | return -EOPNOTSUPP; |
| 2299 | } | 2299 | } |
| 2300 | 2300 | ||
| 2301 | static size_t tun_get_size(const struct net_device *dev) | 2301 | static size_t tun_get_size(const struct net_device *dev) |
| @@ -2385,6 +2385,7 @@ static int tun_xdp_one(struct tun_struct *tun, | |||
| 2385 | struct tun_file *tfile, | 2385 | struct tun_file *tfile, |
| 2386 | struct xdp_buff *xdp, int *flush) | 2386 | struct xdp_buff *xdp, int *flush) |
| 2387 | { | 2387 | { |
| 2388 | unsigned int datasize = xdp->data_end - xdp->data; | ||
| 2388 | struct tun_xdp_hdr *hdr = xdp->data_hard_start; | 2389 | struct tun_xdp_hdr *hdr = xdp->data_hard_start; |
| 2389 | struct virtio_net_hdr *gso = &hdr->gso; | 2390 | struct virtio_net_hdr *gso = &hdr->gso; |
| 2390 | struct tun_pcpu_stats *stats; | 2391 | struct tun_pcpu_stats *stats; |
| @@ -2461,7 +2462,7 @@ build: | |||
| 2461 | stats = get_cpu_ptr(tun->pcpu_stats); | 2462 | stats = get_cpu_ptr(tun->pcpu_stats); |
| 2462 | u64_stats_update_begin(&stats->syncp); | 2463 | u64_stats_update_begin(&stats->syncp); |
| 2463 | stats->rx_packets++; | 2464 | stats->rx_packets++; |
| 2464 | stats->rx_bytes += skb->len; | 2465 | stats->rx_bytes += datasize; |
| 2465 | u64_stats_update_end(&stats->syncp); | 2466 | u64_stats_update_end(&stats->syncp); |
| 2466 | put_cpu_ptr(stats); | 2467 | put_cpu_ptr(stats); |
| 2467 | 2468 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cecfd77c9f3c..ea672145f6a6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) | |||
| 365 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, | 365 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 366 | struct receive_queue *rq, | 366 | struct receive_queue *rq, |
| 367 | struct page *page, unsigned int offset, | 367 | struct page *page, unsigned int offset, |
| 368 | unsigned int len, unsigned int truesize) | 368 | unsigned int len, unsigned int truesize, |
| 369 | bool hdr_valid) | ||
| 369 | { | 370 | { |
| 370 | struct sk_buff *skb; | 371 | struct sk_buff *skb; |
| 371 | struct virtio_net_hdr_mrg_rxbuf *hdr; | 372 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| @@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, | |||
| 387 | else | 388 | else |
| 388 | hdr_padded_len = sizeof(struct padded_vnet_hdr); | 389 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
| 389 | 390 | ||
| 390 | memcpy(hdr, p, hdr_len); | 391 | if (hdr_valid) |
| 392 | memcpy(hdr, p, hdr_len); | ||
| 391 | 393 | ||
| 392 | len -= hdr_len; | 394 | len -= hdr_len; |
| 393 | offset += hdr_padded_len; | 395 | offset += hdr_padded_len; |
| @@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev, | |||
| 739 | struct virtnet_rq_stats *stats) | 741 | struct virtnet_rq_stats *stats) |
| 740 | { | 742 | { |
| 741 | struct page *page = buf; | 743 | struct page *page = buf; |
| 742 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); | 744 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, |
| 745 | PAGE_SIZE, true); | ||
| 743 | 746 | ||
| 744 | stats->bytes += len - vi->hdr_len; | 747 | stats->bytes += len - vi->hdr_len; |
| 745 | if (unlikely(!skb)) | 748 | if (unlikely(!skb)) |
| @@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 842 | rcu_read_unlock(); | 845 | rcu_read_unlock(); |
| 843 | put_page(page); | 846 | put_page(page); |
| 844 | head_skb = page_to_skb(vi, rq, xdp_page, | 847 | head_skb = page_to_skb(vi, rq, xdp_page, |
| 845 | offset, len, PAGE_SIZE); | 848 | offset, len, |
| 849 | PAGE_SIZE, false); | ||
| 846 | return head_skb; | 850 | return head_skb; |
| 847 | } | 851 | } |
| 848 | break; | 852 | break; |
| @@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 898 | goto err_skb; | 902 | goto err_skb; |
| 899 | } | 903 | } |
| 900 | 904 | ||
| 901 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize); | 905 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog); |
| 902 | curr_skb = head_skb; | 906 | curr_skb = head_skb; |
| 903 | 907 | ||
| 904 | if (unlikely(!curr_skb)) | 908 | if (unlikely(!curr_skb)) |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index aa8058264d5b..d1464e3e1be2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2884,6 +2884,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2884 | 2884 | ||
| 2885 | wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); | 2885 | wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); |
| 2886 | 2886 | ||
| 2887 | tasklet_hrtimer_init(&data->beacon_timer, | ||
| 2888 | mac80211_hwsim_beacon, | ||
| 2889 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 2890 | |||
| 2887 | err = ieee80211_register_hw(hw); | 2891 | err = ieee80211_register_hw(hw); |
| 2888 | if (err < 0) { | 2892 | if (err < 0) { |
| 2889 | pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", | 2893 | pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", |
| @@ -2908,10 +2912,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2908 | data->debugfs, | 2912 | data->debugfs, |
| 2909 | data, &hwsim_simulate_radar); | 2913 | data, &hwsim_simulate_radar); |
| 2910 | 2914 | ||
| 2911 | tasklet_hrtimer_init(&data->beacon_timer, | ||
| 2912 | mac80211_hwsim_beacon, | ||
| 2913 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 2914 | |||
| 2915 | spin_lock_bh(&hwsim_radio_lock); | 2915 | spin_lock_bh(&hwsim_radio_lock); |
| 2916 | err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, | 2916 | err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, |
| 2917 | hwsim_rht_params); | 2917 | hwsim_rht_params); |
| @@ -3703,16 +3703,16 @@ static int __init init_mac80211_hwsim(void) | |||
| 3703 | if (err) | 3703 | if (err) |
| 3704 | goto out_unregister_pernet; | 3704 | goto out_unregister_pernet; |
| 3705 | 3705 | ||
| 3706 | err = hwsim_init_netlink(); | ||
| 3707 | if (err) | ||
| 3708 | goto out_unregister_driver; | ||
| 3709 | |||
| 3706 | hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); | 3710 | hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); |
| 3707 | if (IS_ERR(hwsim_class)) { | 3711 | if (IS_ERR(hwsim_class)) { |
| 3708 | err = PTR_ERR(hwsim_class); | 3712 | err = PTR_ERR(hwsim_class); |
| 3709 | goto out_unregister_driver; | 3713 | goto out_exit_netlink; |
| 3710 | } | 3714 | } |
| 3711 | 3715 | ||
| 3712 | err = hwsim_init_netlink(); | ||
| 3713 | if (err < 0) | ||
| 3714 | goto out_unregister_driver; | ||
| 3715 | |||
| 3716 | for (i = 0; i < radios; i++) { | 3716 | for (i = 0; i < radios; i++) { |
| 3717 | struct hwsim_new_radio_params param = { 0 }; | 3717 | struct hwsim_new_radio_params param = { 0 }; |
| 3718 | 3718 | ||
| @@ -3818,6 +3818,8 @@ out_free_mon: | |||
| 3818 | free_netdev(hwsim_mon); | 3818 | free_netdev(hwsim_mon); |
| 3819 | out_free_radios: | 3819 | out_free_radios: |
| 3820 | mac80211_hwsim_free(); | 3820 | mac80211_hwsim_free(); |
| 3821 | out_exit_netlink: | ||
| 3822 | hwsim_exit_netlink(); | ||
| 3821 | out_unregister_driver: | 3823 | out_unregister_driver: |
| 3822 | platform_driver_unregister(&mac80211_hwsim_driver); | 3824 | platform_driver_unregister(&mac80211_hwsim_driver); |
| 3823 | out_unregister_pernet: | 3825 | out_unregister_pernet: |
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 182258f64417..d0c621b32f72 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
| @@ -111,6 +111,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |||
| 111 | struct nd_mapping *nd_mapping, resource_size_t *overlap); | 111 | struct nd_mapping *nd_mapping, resource_size_t *overlap); |
| 112 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); | 112 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); |
| 113 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); | 113 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); |
| 114 | int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, | ||
| 115 | resource_size_t size); | ||
| 114 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | 116 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, |
| 115 | struct nd_label_id *label_id); | 117 | struct nd_label_id *label_id); |
| 116 | int alias_dpa_busy(struct device *dev, void *data); | 118 | int alias_dpa_busy(struct device *dev, void *data); |
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 24c64090169e..6f22272e8d80 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
| @@ -649,14 +649,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) | |||
| 649 | ALIGN_DOWN(phys, nd_pfn->align)); | 649 | ALIGN_DOWN(phys, nd_pfn->align)); |
| 650 | } | 650 | } |
| 651 | 651 | ||
| 652 | /* | ||
| 653 | * Check if pmem collides with 'System RAM', or other regions when | ||
| 654 | * section aligned. Trim it accordingly. | ||
| 655 | */ | ||
| 656 | static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc) | ||
| 657 | { | ||
| 658 | struct nd_namespace_common *ndns = nd_pfn->ndns; | ||
| 659 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | ||
| 660 | struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent); | ||
| 661 | const resource_size_t start = nsio->res.start; | ||
| 662 | const resource_size_t end = start + resource_size(&nsio->res); | ||
| 663 | resource_size_t adjust, size; | ||
| 664 | |||
| 665 | *start_pad = 0; | ||
| 666 | *end_trunc = 0; | ||
| 667 | |||
| 668 | adjust = start - PHYS_SECTION_ALIGN_DOWN(start); | ||
| 669 | size = resource_size(&nsio->res) + adjust; | ||
| 670 | if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM, | ||
| 671 | IORES_DESC_NONE) == REGION_MIXED | ||
| 672 | || nd_region_conflict(nd_region, start - adjust, size)) | ||
| 673 | *start_pad = PHYS_SECTION_ALIGN_UP(start) - start; | ||
| 674 | |||
| 675 | /* Now check that end of the range does not collide. */ | ||
| 676 | adjust = PHYS_SECTION_ALIGN_UP(end) - end; | ||
| 677 | size = resource_size(&nsio->res) + adjust; | ||
| 678 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, | ||
| 679 | IORES_DESC_NONE) == REGION_MIXED | ||
| 680 | || !IS_ALIGNED(end, nd_pfn->align) | ||
| 681 | || nd_region_conflict(nd_region, start, size + adjust)) | ||
| 682 | *end_trunc = end - phys_pmem_align_down(nd_pfn, end); | ||
| 683 | } | ||
| 684 | |||
| 652 | static int nd_pfn_init(struct nd_pfn *nd_pfn) | 685 | static int nd_pfn_init(struct nd_pfn *nd_pfn) |
| 653 | { | 686 | { |
| 654 | u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; | 687 | u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; |
| 655 | struct nd_namespace_common *ndns = nd_pfn->ndns; | 688 | struct nd_namespace_common *ndns = nd_pfn->ndns; |
| 656 | u32 start_pad = 0, end_trunc = 0; | 689 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
| 657 | resource_size_t start, size; | 690 | resource_size_t start, size; |
| 658 | struct nd_namespace_io *nsio; | ||
| 659 | struct nd_region *nd_region; | 691 | struct nd_region *nd_region; |
| 692 | u32 start_pad, end_trunc; | ||
| 660 | struct nd_pfn_sb *pfn_sb; | 693 | struct nd_pfn_sb *pfn_sb; |
| 661 | unsigned long npfns; | 694 | unsigned long npfns; |
| 662 | phys_addr_t offset; | 695 | phys_addr_t offset; |
| @@ -688,30 +721,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
| 688 | 721 | ||
| 689 | memset(pfn_sb, 0, sizeof(*pfn_sb)); | 722 | memset(pfn_sb, 0, sizeof(*pfn_sb)); |
| 690 | 723 | ||
| 691 | /* | 724 | trim_pfn_device(nd_pfn, &start_pad, &end_trunc); |
| 692 | * Check if pmem collides with 'System RAM' when section aligned and | ||
| 693 | * trim it accordingly | ||
| 694 | */ | ||
| 695 | nsio = to_nd_namespace_io(&ndns->dev); | ||
| 696 | start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); | ||
| 697 | size = resource_size(&nsio->res); | ||
| 698 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, | ||
| 699 | IORES_DESC_NONE) == REGION_MIXED) { | ||
| 700 | start = nsio->res.start; | ||
| 701 | start_pad = PHYS_SECTION_ALIGN_UP(start) - start; | ||
| 702 | } | ||
| 703 | |||
| 704 | start = nsio->res.start; | ||
| 705 | size = PHYS_SECTION_ALIGN_UP(start + size) - start; | ||
| 706 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, | ||
| 707 | IORES_DESC_NONE) == REGION_MIXED | ||
| 708 | || !IS_ALIGNED(start + resource_size(&nsio->res), | ||
| 709 | nd_pfn->align)) { | ||
| 710 | size = resource_size(&nsio->res); | ||
| 711 | end_trunc = start + size - phys_pmem_align_down(nd_pfn, | ||
| 712 | start + size); | ||
| 713 | } | ||
| 714 | |||
| 715 | if (start_pad + end_trunc) | 725 | if (start_pad + end_trunc) |
| 716 | dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", | 726 | dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", |
| 717 | dev_name(&ndns->dev), start_pad + end_trunc); | 727 | dev_name(&ndns->dev), start_pad + end_trunc); |
| @@ -722,7 +732,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
| 722 | * implementation will limit the pfns advertised through | 732 | * implementation will limit the pfns advertised through |
| 723 | * ->direct_access() to those that are included in the memmap. | 733 | * ->direct_access() to those that are included in the memmap. |
| 724 | */ | 734 | */ |
| 725 | start += start_pad; | 735 | start = nsio->res.start + start_pad; |
| 726 | size = resource_size(&nsio->res); | 736 | size = resource_size(&nsio->res); |
| 727 | npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) | 737 | npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) |
| 728 | / PAGE_SIZE); | 738 | / PAGE_SIZE); |
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 174a418cb171..e7377f1028ef 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
| @@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region) | |||
| 1184 | } | 1184 | } |
| 1185 | EXPORT_SYMBOL_GPL(nvdimm_has_cache); | 1185 | EXPORT_SYMBOL_GPL(nvdimm_has_cache); |
| 1186 | 1186 | ||
| 1187 | struct conflict_context { | ||
| 1188 | struct nd_region *nd_region; | ||
| 1189 | resource_size_t start, size; | ||
| 1190 | }; | ||
| 1191 | |||
| 1192 | static int region_conflict(struct device *dev, void *data) | ||
| 1193 | { | ||
| 1194 | struct nd_region *nd_region; | ||
| 1195 | struct conflict_context *ctx = data; | ||
| 1196 | resource_size_t res_end, region_end, region_start; | ||
| 1197 | |||
| 1198 | if (!is_memory(dev)) | ||
| 1199 | return 0; | ||
| 1200 | |||
| 1201 | nd_region = to_nd_region(dev); | ||
| 1202 | if (nd_region == ctx->nd_region) | ||
| 1203 | return 0; | ||
| 1204 | |||
| 1205 | res_end = ctx->start + ctx->size; | ||
| 1206 | region_start = nd_region->ndr_start; | ||
| 1207 | region_end = region_start + nd_region->ndr_size; | ||
| 1208 | if (ctx->start >= region_start && ctx->start < region_end) | ||
| 1209 | return -EBUSY; | ||
| 1210 | if (res_end > region_start && res_end <= region_end) | ||
| 1211 | return -EBUSY; | ||
| 1212 | return 0; | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, | ||
| 1216 | resource_size_t size) | ||
| 1217 | { | ||
| 1218 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | ||
| 1219 | struct conflict_context ctx = { | ||
| 1220 | .nd_region = nd_region, | ||
| 1221 | .start = start, | ||
| 1222 | .size = size, | ||
| 1223 | }; | ||
| 1224 | |||
| 1225 | return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); | ||
| 1226 | } | ||
| 1227 | |||
| 1187 | void __exit nd_region_devs_exit(void) | 1228 | void __exit nd_region_devs_exit(void) |
| 1188 | { | 1229 | { |
| 1189 | ida_destroy(®ion_ida); | 1230 | ida_destroy(®ion_ida); |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3cf1b773158e..962012135b62 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -831,6 +831,8 @@ static int nvme_submit_user_cmd(struct request_queue *q, | |||
| 831 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) | 831 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) |
| 832 | { | 832 | { |
| 833 | struct nvme_ctrl *ctrl = rq->end_io_data; | 833 | struct nvme_ctrl *ctrl = rq->end_io_data; |
| 834 | unsigned long flags; | ||
| 835 | bool startka = false; | ||
| 834 | 836 | ||
| 835 | blk_mq_free_request(rq); | 837 | blk_mq_free_request(rq); |
| 836 | 838 | ||
| @@ -841,7 +843,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) | |||
| 841 | return; | 843 | return; |
| 842 | } | 844 | } |
| 843 | 845 | ||
| 844 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | 846 | spin_lock_irqsave(&ctrl->lock, flags); |
| 847 | if (ctrl->state == NVME_CTRL_LIVE || | ||
| 848 | ctrl->state == NVME_CTRL_CONNECTING) | ||
| 849 | startka = true; | ||
| 850 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
| 851 | if (startka) | ||
| 852 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | ||
| 845 | } | 853 | } |
| 846 | 854 | ||
| 847 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) | 855 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3f7971d3706d..583086dd9cb9 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
| @@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) | |||
| 529 | { | 529 | { |
| 530 | struct nvmet_rdma_rsp *rsp = | 530 | struct nvmet_rdma_rsp *rsp = |
| 531 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); | 531 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); |
| 532 | struct nvmet_rdma_queue *queue = cq->cq_context; | ||
| 532 | 533 | ||
| 533 | nvmet_rdma_release_rsp(rsp); | 534 | nvmet_rdma_release_rsp(rsp); |
| 534 | 535 | ||
| @@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) | |||
| 536 | wc->status != IB_WC_WR_FLUSH_ERR)) { | 537 | wc->status != IB_WC_WR_FLUSH_ERR)) { |
| 537 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", | 538 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", |
| 538 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | 539 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); |
| 539 | nvmet_rdma_error_comp(rsp->queue); | 540 | nvmet_rdma_error_comp(queue); |
| 540 | } | 541 | } |
| 541 | } | 542 | } |
| 542 | 543 | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index dcb29cb76dc6..f78860ce884b 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
| @@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
| 895 | struct pcie_link_state *link; | 895 | struct pcie_link_state *link; |
| 896 | int blacklist = !!pcie_aspm_sanity_check(pdev); | 896 | int blacklist = !!pcie_aspm_sanity_check(pdev); |
| 897 | 897 | ||
| 898 | if (!aspm_support_enabled || aspm_disabled) | 898 | if (!aspm_support_enabled) |
| 899 | return; | 899 | return; |
| 900 | 900 | ||
| 901 | if (pdev->link_state) | 901 | if (pdev->link_state) |
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 53d449076dee..ea87d739f534 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c | |||
| @@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin, | |||
| 191 | case PIN_CONFIG_BIAS_DISABLE: | 191 | case PIN_CONFIG_BIAS_DISABLE: |
| 192 | dev_dbg(pc->dev, "pin %u: disable bias\n", pin); | 192 | dev_dbg(pc->dev, "pin %u: disable bias\n", pin); |
| 193 | 193 | ||
| 194 | meson_calc_reg_and_bit(bank, pin, REG_PULL, ®, &bit); | 194 | meson_calc_reg_and_bit(bank, pin, REG_PULLEN, ®, |
| 195 | &bit); | ||
| 195 | ret = regmap_update_bits(pc->reg_pullen, reg, | 196 | ret = regmap_update_bits(pc->reg_pullen, reg, |
| 196 | BIT(bit), 0); | 197 | BIT(bit), 0); |
| 197 | if (ret) | 198 | if (ret) |
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c index 6838b38555a1..1bfb0ae6b387 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdm660.c +++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c | |||
| @@ -33,7 +33,7 @@ enum { | |||
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | 35 | ||
| 36 | #define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ | 36 | #define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ |
| 37 | { \ | 37 | { \ |
| 38 | .name = "gpio" #id, \ | 38 | .name = "gpio" #id, \ |
| 39 | .pins = gpio##id##_pins, \ | 39 | .pins = gpio##id##_pins, \ |
| @@ -51,11 +51,12 @@ enum { | |||
| 51 | msm_mux_##f9 \ | 51 | msm_mux_##f9 \ |
| 52 | }, \ | 52 | }, \ |
| 53 | .nfuncs = 10, \ | 53 | .nfuncs = 10, \ |
| 54 | .ctl_reg = base + REG_SIZE * id, \ | 54 | .ctl_reg = REG_SIZE * id, \ |
| 55 | .io_reg = base + 0x4 + REG_SIZE * id, \ | 55 | .io_reg = 0x4 + REG_SIZE * id, \ |
| 56 | .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \ | 56 | .intr_cfg_reg = 0x8 + REG_SIZE * id, \ |
| 57 | .intr_status_reg = base + 0xc + REG_SIZE * id, \ | 57 | .intr_status_reg = 0xc + REG_SIZE * id, \ |
| 58 | .intr_target_reg = base + 0x8 + REG_SIZE * id, \ | 58 | .intr_target_reg = 0x8 + REG_SIZE * id, \ |
| 59 | .tile = _tile, \ | ||
| 59 | .mux_bit = 2, \ | 60 | .mux_bit = 2, \ |
| 60 | .pull_bit = 0, \ | 61 | .pull_bit = 0, \ |
| 61 | .drv_bit = 6, \ | 62 | .drv_bit = 6, \ |
| @@ -82,6 +83,7 @@ enum { | |||
| 82 | .intr_cfg_reg = 0, \ | 83 | .intr_cfg_reg = 0, \ |
| 83 | .intr_status_reg = 0, \ | 84 | .intr_status_reg = 0, \ |
| 84 | .intr_target_reg = 0, \ | 85 | .intr_target_reg = 0, \ |
| 86 | .tile = NORTH, \ | ||
| 85 | .mux_bit = -1, \ | 87 | .mux_bit = -1, \ |
| 86 | .pull_bit = pull, \ | 88 | .pull_bit = pull, \ |
| 87 | .drv_bit = drv, \ | 89 | .drv_bit = drv, \ |
| @@ -1397,13 +1399,13 @@ static const struct msm_pingroup sdm660_groups[] = { | |||
| 1397 | PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _), | 1399 | PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _), |
| 1398 | PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _), | 1400 | PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _), |
| 1399 | PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _), | 1401 | PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _), |
| 1400 | SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6), | 1402 | SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6), |
| 1401 | SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3), | 1403 | SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3), |
| 1402 | SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0), | 1404 | SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0), |
| 1403 | SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6), | 1405 | SDC_QDSD_PINGROUP(sdc2_clk, 0x9b000, 14, 6), |
| 1404 | SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3), | 1406 | SDC_QDSD_PINGROUP(sdc2_cmd, 0x9b000, 11, 3), |
| 1405 | SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0), | 1407 | SDC_QDSD_PINGROUP(sdc2_data, 0x9b000, 9, 0), |
| 1406 | SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0), | 1408 | SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0), |
| 1407 | }; | 1409 | }; |
| 1408 | 1410 | ||
| 1409 | static const struct msm_pinctrl_soc_data sdm660_pinctrl = { | 1411 | static const struct msm_pinctrl_soc_data sdm660_pinctrl = { |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 6624499eae72..4ada80317a3b 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c | |||
| @@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { | |||
| 568 | SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11), | 568 | SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11), |
| 569 | SUNXI_FUNCTION(0x0, "gpio_in"), | 569 | SUNXI_FUNCTION(0x0, "gpio_in"), |
| 570 | SUNXI_FUNCTION(0x1, "gpio_out"), | 570 | SUNXI_FUNCTION(0x1, "gpio_out"), |
| 571 | SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */ | 571 | SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */ |
| 572 | }; | 572 | }; |
| 573 | 573 | ||
| 574 | static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = { | 574 | static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = { |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 97b6f197f007..c9c57b4a0b71 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
| @@ -56,6 +56,7 @@ struct virtio_ccw_device { | |||
| 56 | unsigned int revision; /* Transport revision */ | 56 | unsigned int revision; /* Transport revision */ |
| 57 | wait_queue_head_t wait_q; | 57 | wait_queue_head_t wait_q; |
| 58 | spinlock_t lock; | 58 | spinlock_t lock; |
| 59 | struct mutex io_lock; /* Serializes I/O requests */ | ||
| 59 | struct list_head virtqueues; | 60 | struct list_head virtqueues; |
| 60 | unsigned long indicators; | 61 | unsigned long indicators; |
| 61 | unsigned long indicators2; | 62 | unsigned long indicators2; |
| @@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, | |||
| 296 | unsigned long flags; | 297 | unsigned long flags; |
| 297 | int flag = intparm & VIRTIO_CCW_INTPARM_MASK; | 298 | int flag = intparm & VIRTIO_CCW_INTPARM_MASK; |
| 298 | 299 | ||
| 300 | mutex_lock(&vcdev->io_lock); | ||
| 299 | do { | 301 | do { |
| 300 | spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); | 302 | spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); |
| 301 | ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); | 303 | ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); |
| @@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, | |||
| 308 | cpu_relax(); | 310 | cpu_relax(); |
| 309 | } while (ret == -EBUSY); | 311 | } while (ret == -EBUSY); |
| 310 | wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); | 312 | wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); |
| 311 | return ret ? ret : vcdev->err; | 313 | ret = ret ? ret : vcdev->err; |
| 314 | mutex_unlock(&vcdev->io_lock); | ||
| 315 | return ret; | ||
| 312 | } | 316 | } |
| 313 | 317 | ||
| 314 | static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, | 318 | static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, |
| @@ -828,6 +832,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, | |||
| 828 | int ret; | 832 | int ret; |
| 829 | struct ccw1 *ccw; | 833 | struct ccw1 *ccw; |
| 830 | void *config_area; | 834 | void *config_area; |
| 835 | unsigned long flags; | ||
| 831 | 836 | ||
| 832 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); | 837 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); |
| 833 | if (!ccw) | 838 | if (!ccw) |
| @@ -846,11 +851,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, | |||
| 846 | if (ret) | 851 | if (ret) |
| 847 | goto out_free; | 852 | goto out_free; |
| 848 | 853 | ||
| 854 | spin_lock_irqsave(&vcdev->lock, flags); | ||
| 849 | memcpy(vcdev->config, config_area, offset + len); | 855 | memcpy(vcdev->config, config_area, offset + len); |
| 850 | if (buf) | ||
| 851 | memcpy(buf, &vcdev->config[offset], len); | ||
| 852 | if (vcdev->config_ready < offset + len) | 856 | if (vcdev->config_ready < offset + len) |
| 853 | vcdev->config_ready = offset + len; | 857 | vcdev->config_ready = offset + len; |
| 858 | spin_unlock_irqrestore(&vcdev->lock, flags); | ||
| 859 | if (buf) | ||
| 860 | memcpy(buf, config_area + offset, len); | ||
| 854 | 861 | ||
| 855 | out_free: | 862 | out_free: |
| 856 | kfree(config_area); | 863 | kfree(config_area); |
| @@ -864,6 +871,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, | |||
| 864 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); | 871 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); |
| 865 | struct ccw1 *ccw; | 872 | struct ccw1 *ccw; |
| 866 | void *config_area; | 873 | void *config_area; |
| 874 | unsigned long flags; | ||
| 867 | 875 | ||
| 868 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); | 876 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); |
| 869 | if (!ccw) | 877 | if (!ccw) |
| @@ -876,9 +884,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, | |||
| 876 | /* Make sure we don't overwrite fields. */ | 884 | /* Make sure we don't overwrite fields. */ |
| 877 | if (vcdev->config_ready < offset) | 885 | if (vcdev->config_ready < offset) |
| 878 | virtio_ccw_get_config(vdev, 0, NULL, offset); | 886 | virtio_ccw_get_config(vdev, 0, NULL, offset); |
| 887 | spin_lock_irqsave(&vcdev->lock, flags); | ||
| 879 | memcpy(&vcdev->config[offset], buf, len); | 888 | memcpy(&vcdev->config[offset], buf, len); |
| 880 | /* Write the config area to the host. */ | 889 | /* Write the config area to the host. */ |
| 881 | memcpy(config_area, vcdev->config, sizeof(vcdev->config)); | 890 | memcpy(config_area, vcdev->config, sizeof(vcdev->config)); |
| 891 | spin_unlock_irqrestore(&vcdev->lock, flags); | ||
| 882 | ccw->cmd_code = CCW_CMD_WRITE_CONF; | 892 | ccw->cmd_code = CCW_CMD_WRITE_CONF; |
| 883 | ccw->flags = 0; | 893 | ccw->flags = 0; |
| 884 | ccw->count = offset + len; | 894 | ccw->count = offset + len; |
| @@ -1247,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_device *cdev) | |||
| 1247 | init_waitqueue_head(&vcdev->wait_q); | 1257 | init_waitqueue_head(&vcdev->wait_q); |
| 1248 | INIT_LIST_HEAD(&vcdev->virtqueues); | 1258 | INIT_LIST_HEAD(&vcdev->virtqueues); |
| 1249 | spin_lock_init(&vcdev->lock); | 1259 | spin_lock_init(&vcdev->lock); |
| 1260 | mutex_init(&vcdev->io_lock); | ||
| 1250 | 1261 | ||
| 1251 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1262 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); |
| 1252 | dev_set_drvdata(&cdev->dev, vcdev); | 1263 | dev_set_drvdata(&cdev->dev, vcdev); |
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 5c8ed7350a04..a36e4cf1841d 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c | |||
| @@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op) | |||
| 220 | dev_set_drvdata(&op->dev, p); | 220 | dev_set_drvdata(&op->dev, p); |
| 221 | d7s_device = p; | 221 | d7s_device = p; |
| 222 | err = 0; | 222 | err = 0; |
| 223 | of_node_put(opts); | ||
| 223 | 224 | ||
| 224 | out: | 225 | out: |
| 225 | return err; | 226 | return err; |
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 56e962a01493..b8481927bfe4 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
| @@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp, | |||
| 910 | for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { | 910 | for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { |
| 911 | pchild->mon_type[len] = ENVCTRL_NOMON; | 911 | pchild->mon_type[len] = ENVCTRL_NOMON; |
| 912 | } | 912 | } |
| 913 | of_node_put(root_node); | ||
| 913 | return; | 914 | return; |
| 914 | } | 915 | } |
| 916 | of_node_put(root_node); | ||
| 915 | } | 917 | } |
| 916 | 918 | ||
| 917 | /* Get the monitor channels. */ | 919 | /* Get the monitor channels. */ |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 93c66ebad907..f78d2e5c1471 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc) | |||
| 2416 | failed: | 2416 | failed: |
| 2417 | ISCSI_DBG_EH(session, | 2417 | ISCSI_DBG_EH(session, |
| 2418 | "failing session reset: Could not log back into " | 2418 | "failing session reset: Could not log back into " |
| 2419 | "%s, %s [age %d]\n", session->targetname, | 2419 | "%s [age %d]\n", session->targetname, |
| 2420 | conn->persistent_address, session->age); | 2420 | session->age); |
| 2421 | spin_unlock_bh(&session->frwd_lock); | 2421 | spin_unlock_bh(&session->frwd_lock); |
| 2422 | mutex_unlock(&session->eh_mutex); | 2422 | mutex_unlock(&session->eh_mutex); |
| 2423 | return FAILED; | 2423 | return FAILED; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 20fa6785a0e2..68d62d55a3a5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
| @@ -167,7 +167,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba) | |||
| 167 | sizeof(phba->wwpn)); | 167 | sizeof(phba->wwpn)); |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | phba->sli3_options = 0x0; | 170 | /* |
| 171 | * Clear all option bits except LPFC_SLI3_BG_ENABLED, | ||
| 172 | * which was already set in lpfc_get_cfgparam() | ||
| 173 | */ | ||
| 174 | phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; | ||
| 171 | 175 | ||
| 172 | /* Setup and issue mailbox READ REV command */ | 176 | /* Setup and issue mailbox READ REV command */ |
| 173 | lpfc_read_rev(phba, pmb); | 177 | lpfc_read_rev(phba, pmb); |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 783a1540cfbe..b9e5cd79931a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -4965,7 +4965,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) | |||
| 4965 | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | | 4965 | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | |
| 4966 | LPFC_SLI3_HBQ_ENABLED | | 4966 | LPFC_SLI3_HBQ_ENABLED | |
| 4967 | LPFC_SLI3_CRP_ENABLED | | 4967 | LPFC_SLI3_CRP_ENABLED | |
| 4968 | LPFC_SLI3_BG_ENABLED | | ||
| 4969 | LPFC_SLI3_DSS_ENABLED); | 4968 | LPFC_SLI3_DSS_ENABLED); |
| 4970 | if (rc != MBX_SUCCESS) { | 4969 | if (rc != MBX_SUCCESS) { |
| 4971 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 4970 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f03dc03a42c3..8f88348ebe42 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -446,7 +446,6 @@ struct storvsc_device { | |||
| 446 | 446 | ||
| 447 | bool destroy; | 447 | bool destroy; |
| 448 | bool drain_notify; | 448 | bool drain_notify; |
| 449 | bool open_sub_channel; | ||
| 450 | atomic_t num_outstanding_req; | 449 | atomic_t num_outstanding_req; |
| 451 | struct Scsi_Host *host; | 450 | struct Scsi_Host *host; |
| 452 | 451 | ||
| @@ -636,33 +635,38 @@ get_in_err: | |||
| 636 | static void handle_sc_creation(struct vmbus_channel *new_sc) | 635 | static void handle_sc_creation(struct vmbus_channel *new_sc) |
| 637 | { | 636 | { |
| 638 | struct hv_device *device = new_sc->primary_channel->device_obj; | 637 | struct hv_device *device = new_sc->primary_channel->device_obj; |
| 638 | struct device *dev = &device->device; | ||
| 639 | struct storvsc_device *stor_device; | 639 | struct storvsc_device *stor_device; |
| 640 | struct vmstorage_channel_properties props; | 640 | struct vmstorage_channel_properties props; |
| 641 | int ret; | ||
| 641 | 642 | ||
| 642 | stor_device = get_out_stor_device(device); | 643 | stor_device = get_out_stor_device(device); |
| 643 | if (!stor_device) | 644 | if (!stor_device) |
| 644 | return; | 645 | return; |
| 645 | 646 | ||
| 646 | if (stor_device->open_sub_channel == false) | ||
| 647 | return; | ||
| 648 | |||
| 649 | memset(&props, 0, sizeof(struct vmstorage_channel_properties)); | 647 | memset(&props, 0, sizeof(struct vmstorage_channel_properties)); |
| 650 | 648 | ||
| 651 | vmbus_open(new_sc, | 649 | ret = vmbus_open(new_sc, |
| 652 | storvsc_ringbuffer_size, | 650 | storvsc_ringbuffer_size, |
| 653 | storvsc_ringbuffer_size, | 651 | storvsc_ringbuffer_size, |
| 654 | (void *)&props, | 652 | (void *)&props, |
| 655 | sizeof(struct vmstorage_channel_properties), | 653 | sizeof(struct vmstorage_channel_properties), |
| 656 | storvsc_on_channel_callback, new_sc); | 654 | storvsc_on_channel_callback, new_sc); |
| 657 | 655 | ||
| 658 | if (new_sc->state == CHANNEL_OPENED_STATE) { | 656 | /* In case vmbus_open() fails, we don't use the sub-channel. */ |
| 659 | stor_device->stor_chns[new_sc->target_cpu] = new_sc; | 657 | if (ret != 0) { |
| 660 | cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); | 658 | dev_err(dev, "Failed to open sub-channel: err=%d\n", ret); |
| 659 | return; | ||
| 661 | } | 660 | } |
| 661 | |||
| 662 | /* Add the sub-channel to the array of available channels. */ | ||
| 663 | stor_device->stor_chns[new_sc->target_cpu] = new_sc; | ||
| 664 | cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); | ||
| 662 | } | 665 | } |
| 663 | 666 | ||
| 664 | static void handle_multichannel_storage(struct hv_device *device, int max_chns) | 667 | static void handle_multichannel_storage(struct hv_device *device, int max_chns) |
| 665 | { | 668 | { |
| 669 | struct device *dev = &device->device; | ||
| 666 | struct storvsc_device *stor_device; | 670 | struct storvsc_device *stor_device; |
| 667 | int num_cpus = num_online_cpus(); | 671 | int num_cpus = num_online_cpus(); |
| 668 | int num_sc; | 672 | int num_sc; |
| @@ -679,22 +683,12 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) | |||
| 679 | request = &stor_device->init_request; | 683 | request = &stor_device->init_request; |
| 680 | vstor_packet = &request->vstor_packet; | 684 | vstor_packet = &request->vstor_packet; |
| 681 | 685 | ||
| 682 | stor_device->open_sub_channel = true; | ||
| 683 | /* | 686 | /* |
| 684 | * Establish a handler for dealing with subchannels. | 687 | * Establish a handler for dealing with subchannels. |
| 685 | */ | 688 | */ |
| 686 | vmbus_set_sc_create_callback(device->channel, handle_sc_creation); | 689 | vmbus_set_sc_create_callback(device->channel, handle_sc_creation); |
| 687 | 690 | ||
| 688 | /* | 691 | /* |
| 689 | * Check to see if sub-channels have already been created. This | ||
| 690 | * can happen when this driver is re-loaded after unloading. | ||
| 691 | */ | ||
| 692 | |||
| 693 | if (vmbus_are_subchannels_present(device->channel)) | ||
| 694 | return; | ||
| 695 | |||
| 696 | stor_device->open_sub_channel = false; | ||
| 697 | /* | ||
| 698 | * Request the host to create sub-channels. | 692 | * Request the host to create sub-channels. |
| 699 | */ | 693 | */ |
| 700 | memset(request, 0, sizeof(struct storvsc_cmd_request)); | 694 | memset(request, 0, sizeof(struct storvsc_cmd_request)); |
| @@ -710,23 +704,29 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) | |||
| 710 | VM_PKT_DATA_INBAND, | 704 | VM_PKT_DATA_INBAND, |
| 711 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 705 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
| 712 | 706 | ||
| 713 | if (ret != 0) | 707 | if (ret != 0) { |
| 708 | dev_err(dev, "Failed to create sub-channel: err=%d\n", ret); | ||
| 714 | return; | 709 | return; |
| 710 | } | ||
| 715 | 711 | ||
| 716 | t = wait_for_completion_timeout(&request->wait_event, 10*HZ); | 712 | t = wait_for_completion_timeout(&request->wait_event, 10*HZ); |
| 717 | if (t == 0) | 713 | if (t == 0) { |
| 714 | dev_err(dev, "Failed to create sub-channel: timed out\n"); | ||
| 718 | return; | 715 | return; |
| 716 | } | ||
| 719 | 717 | ||
| 720 | if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || | 718 | if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || |
| 721 | vstor_packet->status != 0) | 719 | vstor_packet->status != 0) { |
| 720 | dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n", | ||
| 721 | vstor_packet->operation, vstor_packet->status); | ||
| 722 | return; | 722 | return; |
| 723 | } | ||
| 723 | 724 | ||
| 724 | /* | 725 | /* |
| 725 | * Now that we created the sub-channels, invoke the check; this | 726 | * We need to do nothing here, because vmbus_process_offer() |
| 726 | * may trigger the callback. | 727 | * invokes channel->sc_creation_callback, which will open and use |
| 728 | * the sub-channel(s). | ||
| 727 | */ | 729 | */ |
| 728 | stor_device->open_sub_channel = true; | ||
| 729 | vmbus_are_subchannels_present(device->channel); | ||
| 730 | } | 730 | } |
| 731 | 731 | ||
| 732 | static void cache_wwn(struct storvsc_device *stor_device, | 732 | static void cache_wwn(struct storvsc_device *stor_device, |
| @@ -1794,7 +1794,6 @@ static int storvsc_probe(struct hv_device *device, | |||
| 1794 | } | 1794 | } |
| 1795 | 1795 | ||
| 1796 | stor_device->destroy = false; | 1796 | stor_device->destroy = false; |
| 1797 | stor_device->open_sub_channel = false; | ||
| 1798 | init_waitqueue_head(&stor_device->waiting_to_drain); | 1797 | init_waitqueue_head(&stor_device->waiting_to_drain); |
| 1799 | stor_device->device = device; | 1798 | stor_device->device = device; |
| 1800 | stor_device->host = host; | 1799 | stor_device->host = host; |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 6e491023fdd8..0d6b2a88fc8e 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
| @@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) | |||
| 1202 | 1202 | ||
| 1203 | static void pvscsi_release_resources(struct pvscsi_adapter *adapter) | 1203 | static void pvscsi_release_resources(struct pvscsi_adapter *adapter) |
| 1204 | { | 1204 | { |
| 1205 | pvscsi_shutdown_intr(adapter); | ||
| 1206 | |||
| 1207 | if (adapter->workqueue) | 1205 | if (adapter->workqueue) |
| 1208 | destroy_workqueue(adapter->workqueue); | 1206 | destroy_workqueue(adapter->workqueue); |
| 1209 | 1207 | ||
| @@ -1534,6 +1532,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1534 | out_reset_adapter: | 1532 | out_reset_adapter: |
| 1535 | ll_adapter_reset(adapter); | 1533 | ll_adapter_reset(adapter); |
| 1536 | out_release_resources: | 1534 | out_release_resources: |
| 1535 | pvscsi_shutdown_intr(adapter); | ||
| 1537 | pvscsi_release_resources(adapter); | 1536 | pvscsi_release_resources(adapter); |
| 1538 | scsi_host_put(host); | 1537 | scsi_host_put(host); |
| 1539 | out_disable_device: | 1538 | out_disable_device: |
| @@ -1542,6 +1541,7 @@ out_disable_device: | |||
| 1542 | return error; | 1541 | return error; |
| 1543 | 1542 | ||
| 1544 | out_release_resources_and_disable: | 1543 | out_release_resources_and_disable: |
| 1544 | pvscsi_shutdown_intr(adapter); | ||
| 1545 | pvscsi_release_resources(adapter); | 1545 | pvscsi_release_resources(adapter); |
| 1546 | goto out_disable_device; | 1546 | goto out_disable_device; |
| 1547 | } | 1547 | } |
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig index a7a34e89c42d..3252efa422f9 100644 --- a/drivers/staging/media/sunxi/cedrus/Kconfig +++ b/drivers/staging/media/sunxi/cedrus/Kconfig | |||
| @@ -3,6 +3,7 @@ config VIDEO_SUNXI_CEDRUS | |||
| 3 | depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER | 3 | depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER |
| 4 | depends on HAS_DMA | 4 | depends on HAS_DMA |
| 5 | depends on OF | 5 | depends on OF |
| 6 | depends on MEDIA_CONTROLLER_REQUEST_API | ||
| 6 | select SUNXI_SRAM | 7 | select SUNXI_SRAM |
| 7 | select VIDEOBUF2_DMA_CONTIG | 8 | select VIDEOBUF2_DMA_CONTIG |
| 8 | select V4L2_MEM2MEM_DEV | 9 | select V4L2_MEM2MEM_DEV |
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO index ec277ece47af..a951b3fd1ea1 100644 --- a/drivers/staging/media/sunxi/cedrus/TODO +++ b/drivers/staging/media/sunxi/cedrus/TODO | |||
| @@ -5,3 +5,8 @@ Before this stateless decoder driver can leave the staging area: | |||
| 5 | * Userspace support for the Request API needs to be reviewed; | 5 | * Userspace support for the Request API needs to be reviewed; |
| 6 | * Another stateless decoder driver should be submitted; | 6 | * Another stateless decoder driver should be submitted; |
| 7 | * At least one stateless encoder driver should be submitted. | 7 | * At least one stateless encoder driver should be submitted. |
| 8 | * When queueing a request containing references to I frames, the | ||
| 9 | refcount of the memory for those I frames needs to be incremented | ||
| 10 | and decremented when the request is completed. This will likely | ||
| 11 | require some help from vb2. The driver should fail the request | ||
| 12 | if the memory/buffer is gone. | ||
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c index 32adbcbe6175..07520a2ce179 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c | |||
| @@ -255,10 +255,10 @@ int cedrus_hw_probe(struct cedrus_dev *dev) | |||
| 255 | 255 | ||
| 256 | res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0); | 256 | res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0); |
| 257 | dev->base = devm_ioremap_resource(dev->dev, res); | 257 | dev->base = devm_ioremap_resource(dev->dev, res); |
| 258 | if (!dev->base) { | 258 | if (IS_ERR(dev->base)) { |
| 259 | v4l2_err(&dev->v4l2_dev, "Failed to map registers\n"); | 259 | v4l2_err(&dev->v4l2_dev, "Failed to map registers\n"); |
| 260 | 260 | ||
| 261 | ret = -ENOMEM; | 261 | ret = PTR_ERR(dev->base); |
| 262 | goto err_sram; | 262 | goto err_sram; |
| 263 | } | 263 | } |
| 264 | 264 | ||
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c index 9d156efbc9ed..4d473f008aa4 100644 --- a/drivers/staging/rtl8712/mlme_linux.c +++ b/drivers/staging/rtl8712/mlme_linux.c | |||
| @@ -146,7 +146,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie) | |||
| 146 | p = buff; | 146 | p = buff; |
| 147 | p += sprintf(p, "ASSOCINFO(ReqIEs="); | 147 | p += sprintf(p, "ASSOCINFO(ReqIEs="); |
| 148 | len = sec_ie[1] + 2; | 148 | len = sec_ie[1] + 2; |
| 149 | len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1; | 149 | len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX; |
| 150 | for (i = 0; i < len; i++) | 150 | for (i = 0; i < len; i++) |
| 151 | p += sprintf(p, "%02x", sec_ie[i]); | 151 | p += sprintf(p, "%02x", sec_ie[i]); |
| 152 | p += sprintf(p, ")"); | 152 | p += sprintf(p, ")"); |
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index a7374006a9fb..986a1d526918 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c | |||
| @@ -1346,7 +1346,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie, | |||
| 1346 | u8 *out_ie, uint in_len) | 1346 | u8 *out_ie, uint in_len) |
| 1347 | { | 1347 | { |
| 1348 | u8 authmode = 0, match; | 1348 | u8 authmode = 0, match; |
| 1349 | u8 sec_ie[255], uncst_oui[4], bkup_ie[255]; | 1349 | u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255]; |
| 1350 | u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; | 1350 | u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; |
| 1351 | uint ielength, cnt, remove_cnt; | 1351 | uint ielength, cnt, remove_cnt; |
| 1352 | int iEntry; | 1352 | int iEntry; |
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 69c7abc0e3a5..8445d516c93d 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c | |||
| @@ -1565,7 +1565,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame) | |||
| 1565 | if (pstat->aid > 0) { | 1565 | if (pstat->aid > 0) { |
| 1566 | DBG_871X(" old AID %d\n", pstat->aid); | 1566 | DBG_871X(" old AID %d\n", pstat->aid); |
| 1567 | } else { | 1567 | } else { |
| 1568 | for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++) | 1568 | for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++) |
| 1569 | if (pstapriv->sta_aid[pstat->aid - 1] == NULL) | 1569 | if (pstapriv->sta_aid[pstat->aid - 1] == NULL) |
| 1570 | break; | 1570 | break; |
| 1571 | 1571 | ||
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index 92f67d40f2e9..d7105d01859a 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c | |||
| @@ -357,7 +357,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal, | |||
| 357 | int ret; | 357 | int ret; |
| 358 | 358 | ||
| 359 | /* Valid check */ | 359 | /* Valid check */ |
| 360 | if (armada_is_valid(priv)) { | 360 | if (!armada_is_valid(priv)) { |
| 361 | dev_err(priv->dev, | 361 | dev_err(priv->dev, |
| 362 | "Temperature sensor reading not valid\n"); | 362 | "Temperature sensor reading not valid\n"); |
| 363 | return -EIO; | 363 | return -EIO; |
| @@ -395,7 +395,7 @@ unlock_mutex: | |||
| 395 | return ret; | 395 | return ret; |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static struct thermal_zone_of_device_ops of_ops = { | 398 | static const struct thermal_zone_of_device_ops of_ops = { |
| 399 | .get_temp = armada_get_temp, | 399 | .get_temp = armada_get_temp, |
| 400 | }; | 400 | }; |
| 401 | 401 | ||
| @@ -526,23 +526,21 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev, | |||
| 526 | 526 | ||
| 527 | /* First memory region points towards the status register */ | 527 | /* First memory region points towards the status register */ |
| 528 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 528 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 529 | if (!res) | ||
| 530 | return -EIO; | ||
| 531 | |||
| 532 | /* | ||
| 533 | * Edit the resource start address and length to map over all the | ||
| 534 | * registers, instead of pointing at them one by one. | ||
| 535 | */ | ||
| 536 | res->start -= data->syscon_status_off; | ||
| 537 | res->end = res->start + max(data->syscon_status_off, | ||
| 538 | max(data->syscon_control0_off, | ||
| 539 | data->syscon_control1_off)) + | ||
| 540 | sizeof(unsigned int) - 1; | ||
| 541 | |||
| 542 | base = devm_ioremap_resource(&pdev->dev, res); | 529 | base = devm_ioremap_resource(&pdev->dev, res); |
| 543 | if (IS_ERR(base)) | 530 | if (IS_ERR(base)) |
| 544 | return PTR_ERR(base); | 531 | return PTR_ERR(base); |
| 545 | 532 | ||
| 533 | /* | ||
| 534 | * Fix up from the old individual DT register specification to | ||
| 535 | * cover all the registers. We do this by adjusting the ioremap() | ||
| 536 | * result, which should be fine as ioremap() deals with pages. | ||
| 537 | * However, validate that we do not cross a page boundary while | ||
| 538 | * making this adjustment. | ||
| 539 | */ | ||
| 540 | if (((unsigned long)base & ~PAGE_MASK) < data->syscon_status_off) | ||
| 541 | return -EINVAL; | ||
| 542 | base -= data->syscon_status_off; | ||
| 543 | |||
| 546 | priv->syscon = devm_regmap_init_mmio(&pdev->dev, base, | 544 | priv->syscon = devm_regmap_init_mmio(&pdev->dev, base, |
| 547 | &armada_thermal_regmap_config); | 545 | &armada_thermal_regmap_config); |
| 548 | if (IS_ERR(priv->syscon)) | 546 | if (IS_ERR(priv->syscon)) |
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 23ad4f9f2143..b9d90f0ed504 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c | |||
| @@ -1,17 +1,8 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for Broadcom BCM2835 SoC temperature sensor | 3 | * Driver for Broadcom BCM2835 SoC temperature sensor |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2016 Martin Sperl | 5 | * Copyright (C) 2016 Martin Sperl |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | 6 | */ |
| 16 | 7 | ||
| 17 | #include <linux/clk.h> | 8 | #include <linux/clk.h> |
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c index 1919f91fa756..e8b1570cc388 100644 --- a/drivers/thermal/broadcom/brcmstb_thermal.c +++ b/drivers/thermal/broadcom/brcmstb_thermal.c | |||
| @@ -299,7 +299,7 @@ static int brcmstb_set_trips(void *data, int low, int high) | |||
| 299 | return 0; | 299 | return 0; |
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | static struct thermal_zone_of_device_ops of_ops = { | 302 | static const struct thermal_zone_of_device_ops of_ops = { |
| 303 | .get_temp = brcmstb_get_temp, | 303 | .get_temp = brcmstb_get_temp, |
| 304 | .set_trips = brcmstb_set_trips, | 304 | .set_trips = brcmstb_set_trips, |
| 305 | }; | 305 | }; |
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index c4111a98f1a7..2d26ae80e202 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c | |||
| @@ -424,7 +424,7 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data) | |||
| 424 | struct platform_device *pdev = data->pdev; | 424 | struct platform_device *pdev = data->pdev; |
| 425 | struct device *dev = &pdev->dev; | 425 | struct device *dev = &pdev->dev; |
| 426 | 426 | ||
| 427 | data->nr_sensors = 2; | 427 | data->nr_sensors = 1; |
| 428 | 428 | ||
| 429 | data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) * | 429 | data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) * |
| 430 | data->nr_sensors, GFP_KERNEL); | 430 | data->nr_sensors, GFP_KERNEL); |
| @@ -589,7 +589,7 @@ static int hisi_thermal_probe(struct platform_device *pdev) | |||
| 589 | return ret; | 589 | return ret; |
| 590 | } | 590 | } |
| 591 | 591 | ||
| 592 | ret = platform_get_irq_byname(pdev, sensor->irq_name); | 592 | ret = platform_get_irq(pdev, 0); |
| 593 | if (ret < 0) | 593 | if (ret < 0) |
| 594 | return ret; | 594 | return ret; |
| 595 | 595 | ||
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index 47623da0f91b..bbd73c5a4a4e 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c | |||
| @@ -241,8 +241,8 @@ static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor) | |||
| 241 | sensor->t0 = TS1_T0_VAL1; | 241 | sensor->t0 = TS1_T0_VAL1; |
| 242 | 242 | ||
| 243 | /* Retrieve fmt0 and put it on Hz */ | 243 | /* Retrieve fmt0 and put it on Hz */ |
| 244 | sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET) | 244 | sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base + |
| 245 | & TS1_FMT0_MASK; | 245 | DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK); |
| 246 | 246 | ||
| 247 | /* Retrieve ramp coefficient */ | 247 | /* Retrieve ramp coefficient */ |
| 248 | sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) & | 248 | sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) & |
| @@ -532,6 +532,10 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor) | |||
| 532 | if (ret) | 532 | if (ret) |
| 533 | return ret; | 533 | return ret; |
| 534 | 534 | ||
| 535 | ret = stm_thermal_read_factory_settings(sensor); | ||
| 536 | if (ret) | ||
| 537 | goto thermal_unprepare; | ||
| 538 | |||
| 535 | ret = stm_thermal_calibration(sensor); | 539 | ret = stm_thermal_calibration(sensor); |
| 536 | if (ret) | 540 | if (ret) |
| 537 | goto thermal_unprepare; | 541 | goto thermal_unprepare; |
| @@ -636,10 +640,6 @@ static int stm_thermal_probe(struct platform_device *pdev) | |||
| 636 | /* Populate sensor */ | 640 | /* Populate sensor */ |
| 637 | sensor->base = base; | 641 | sensor->base = base; |
| 638 | 642 | ||
| 639 | ret = stm_thermal_read_factory_settings(sensor); | ||
| 640 | if (ret) | ||
| 641 | return ret; | ||
| 642 | |||
| 643 | sensor->clk = devm_clk_get(&pdev->dev, "pclk"); | 643 | sensor->clk = devm_clk_get(&pdev->dev, "pclk"); |
| 644 | if (IS_ERR(sensor->clk)) { | 644 | if (IS_ERR(sensor->clk)) { |
| 645 | dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n", | 645 | dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n", |
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index dd5e1cede2b5..c3f933d10295 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c | |||
| @@ -213,17 +213,17 @@ static int mtk8250_probe(struct platform_device *pdev) | |||
| 213 | 213 | ||
| 214 | platform_set_drvdata(pdev, data); | 214 | platform_set_drvdata(pdev, data); |
| 215 | 215 | ||
| 216 | pm_runtime_enable(&pdev->dev); | 216 | err = mtk8250_runtime_resume(&pdev->dev); |
| 217 | if (!pm_runtime_enabled(&pdev->dev)) { | 217 | if (err) |
| 218 | err = mtk8250_runtime_resume(&pdev->dev); | 218 | return err; |
| 219 | if (err) | ||
| 220 | return err; | ||
| 221 | } | ||
| 222 | 219 | ||
| 223 | data->line = serial8250_register_8250_port(&uart); | 220 | data->line = serial8250_register_8250_port(&uart); |
| 224 | if (data->line < 0) | 221 | if (data->line < 0) |
| 225 | return data->line; | 222 | return data->line; |
| 226 | 223 | ||
| 224 | pm_runtime_set_active(&pdev->dev); | ||
| 225 | pm_runtime_enable(&pdev->dev); | ||
| 226 | |||
| 227 | return 0; | 227 | return 0; |
| 228 | } | 228 | } |
| 229 | 229 | ||
| @@ -234,13 +234,11 @@ static int mtk8250_remove(struct platform_device *pdev) | |||
| 234 | pm_runtime_get_sync(&pdev->dev); | 234 | pm_runtime_get_sync(&pdev->dev); |
| 235 | 235 | ||
| 236 | serial8250_unregister_port(data->line); | 236 | serial8250_unregister_port(data->line); |
| 237 | mtk8250_runtime_suspend(&pdev->dev); | ||
| 237 | 238 | ||
| 238 | pm_runtime_disable(&pdev->dev); | 239 | pm_runtime_disable(&pdev->dev); |
| 239 | pm_runtime_put_noidle(&pdev->dev); | 240 | pm_runtime_put_noidle(&pdev->dev); |
| 240 | 241 | ||
| 241 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
| 242 | mtk8250_runtime_suspend(&pdev->dev); | ||
| 243 | |||
| 244 | return 0; | 242 | return 0; |
| 245 | } | 243 | } |
| 246 | 244 | ||
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index baeeeaec3f03..6fb312e7af71 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c | |||
| @@ -233,7 +233,7 @@ static void kgdboc_put_char(u8 chr) | |||
| 233 | static int param_set_kgdboc_var(const char *kmessage, | 233 | static int param_set_kgdboc_var(const char *kmessage, |
| 234 | const struct kernel_param *kp) | 234 | const struct kernel_param *kp) |
| 235 | { | 235 | { |
| 236 | int len = strlen(kmessage); | 236 | size_t len = strlen(kmessage); |
| 237 | 237 | ||
| 238 | if (len >= MAX_CONFIG_LEN) { | 238 | if (len >= MAX_CONFIG_LEN) { |
| 239 | pr_err("config string too long\n"); | 239 | pr_err("config string too long\n"); |
| @@ -254,7 +254,7 @@ static int param_set_kgdboc_var(const char *kmessage, | |||
| 254 | 254 | ||
| 255 | strcpy(config, kmessage); | 255 | strcpy(config, kmessage); |
| 256 | /* Chop out \n char as a result of echo */ | 256 | /* Chop out \n char as a result of echo */ |
| 257 | if (config[len - 1] == '\n') | 257 | if (len && config[len - 1] == '\n') |
| 258 | config[len - 1] = '\0'; | 258 | config[len - 1] = '\0'; |
| 259 | 259 | ||
| 260 | if (configured == 1) | 260 | if (configured == 1) |
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c index 70a4ea4eaa6e..990376576970 100644 --- a/drivers/tty/serial/suncore.c +++ b/drivers/tty/serial/suncore.c | |||
| @@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp) | |||
| 112 | mode = of_get_property(dp, mode_prop, NULL); | 112 | mode = of_get_property(dp, mode_prop, NULL); |
| 113 | if (!mode) | 113 | if (!mode) |
| 114 | mode = "9600,8,n,1,-"; | 114 | mode = "9600,8,n,1,-"; |
| 115 | of_node_put(dp); | ||
| 115 | } | 116 | } |
| 116 | 117 | ||
| 117 | cflag = CREAD | HUPCL | CLOCAL; | 118 | cflag = CREAD | HUPCL | CLOCAL; |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index ee80dfbd5442..687250ec8032 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -1373,7 +1373,13 @@ err_release_lock: | |||
| 1373 | return ERR_PTR(retval); | 1373 | return ERR_PTR(retval); |
| 1374 | } | 1374 | } |
| 1375 | 1375 | ||
| 1376 | static void tty_free_termios(struct tty_struct *tty) | 1376 | /** |
| 1377 | * tty_save_termios() - save tty termios data in driver table | ||
| 1378 | * @tty: tty whose termios data to save | ||
| 1379 | * | ||
| 1380 | * Locking: Caller guarantees serialisation with tty_init_termios(). | ||
| 1381 | */ | ||
| 1382 | void tty_save_termios(struct tty_struct *tty) | ||
| 1377 | { | 1383 | { |
| 1378 | struct ktermios *tp; | 1384 | struct ktermios *tp; |
| 1379 | int idx = tty->index; | 1385 | int idx = tty->index; |
| @@ -1392,6 +1398,7 @@ static void tty_free_termios(struct tty_struct *tty) | |||
| 1392 | } | 1398 | } |
| 1393 | *tp = tty->termios; | 1399 | *tp = tty->termios; |
| 1394 | } | 1400 | } |
| 1401 | EXPORT_SYMBOL_GPL(tty_save_termios); | ||
| 1395 | 1402 | ||
| 1396 | /** | 1403 | /** |
| 1397 | * tty_flush_works - flush all works of a tty/pty pair | 1404 | * tty_flush_works - flush all works of a tty/pty pair |
| @@ -1491,7 +1498,7 @@ static void release_tty(struct tty_struct *tty, int idx) | |||
| 1491 | WARN_ON(!mutex_is_locked(&tty_mutex)); | 1498 | WARN_ON(!mutex_is_locked(&tty_mutex)); |
| 1492 | if (tty->ops->shutdown) | 1499 | if (tty->ops->shutdown) |
| 1493 | tty->ops->shutdown(tty); | 1500 | tty->ops->shutdown(tty); |
| 1494 | tty_free_termios(tty); | 1501 | tty_save_termios(tty); |
| 1495 | tty_driver_remove_tty(tty->driver, tty); | 1502 | tty_driver_remove_tty(tty->driver, tty); |
| 1496 | tty->port->itty = NULL; | 1503 | tty->port->itty = NULL; |
| 1497 | if (tty->link) | 1504 | if (tty->link) |
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index cb6075096a5b..044c3cbdcfa4 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c | |||
| @@ -633,7 +633,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty, | |||
| 633 | if (tty_port_close_start(port, tty, filp) == 0) | 633 | if (tty_port_close_start(port, tty, filp) == 0) |
| 634 | return; | 634 | return; |
| 635 | tty_port_shutdown(port, tty); | 635 | tty_port_shutdown(port, tty); |
| 636 | set_bit(TTY_IO_ERROR, &tty->flags); | 636 | if (!port->console) |
| 637 | set_bit(TTY_IO_ERROR, &tty->flags); | ||
| 637 | tty_port_close_end(port, tty); | 638 | tty_port_close_end(port, tty); |
| 638 | tty_port_tty_set(port, NULL); | 639 | tty_port_tty_set(port, NULL); |
| 639 | } | 640 | } |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 0f9381b69a3b..f76b2e0aba9d 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -2251,7 +2251,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev) | |||
| 2251 | /* descriptor may appear anywhere in config */ | 2251 | /* descriptor may appear anywhere in config */ |
| 2252 | err = __usb_get_extra_descriptor(udev->rawdescriptors[0], | 2252 | err = __usb_get_extra_descriptor(udev->rawdescriptors[0], |
| 2253 | le16_to_cpu(udev->config[0].desc.wTotalLength), | 2253 | le16_to_cpu(udev->config[0].desc.wTotalLength), |
| 2254 | USB_DT_OTG, (void **) &desc); | 2254 | USB_DT_OTG, (void **) &desc, sizeof(*desc)); |
| 2255 | if (err || !(desc->bmAttributes & USB_OTG_HNP)) | 2255 | if (err || !(desc->bmAttributes & USB_OTG_HNP)) |
| 2256 | return 0; | 2256 | return 0; |
| 2257 | 2257 | ||
| @@ -5163,7 +5163,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
| 5163 | /* Handle notifying userspace about hub over-current events */ | 5163 | /* Handle notifying userspace about hub over-current events */ |
| 5164 | static void port_over_current_notify(struct usb_port *port_dev) | 5164 | static void port_over_current_notify(struct usb_port *port_dev) |
| 5165 | { | 5165 | { |
| 5166 | static char *envp[] = { NULL, NULL, NULL }; | 5166 | char *envp[3]; |
| 5167 | struct device *hub_dev; | 5167 | struct device *hub_dev; |
| 5168 | char *port_dev_path; | 5168 | char *port_dev_path; |
| 5169 | 5169 | ||
| @@ -5187,6 +5187,7 @@ static void port_over_current_notify(struct usb_port *port_dev) | |||
| 5187 | if (!envp[1]) | 5187 | if (!envp[1]) |
| 5188 | goto exit; | 5188 | goto exit; |
| 5189 | 5189 | ||
| 5190 | envp[2] = NULL; | ||
| 5190 | kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp); | 5191 | kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp); |
| 5191 | 5192 | ||
| 5192 | kfree(envp[1]); | 5193 | kfree(envp[1]); |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0690fcff0ea2..514c5214ddb2 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -333,6 +333,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 333 | /* Midiman M-Audio Keystation 88es */ | 333 | /* Midiman M-Audio Keystation 88es */ |
| 334 | { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME }, | 334 | { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 335 | 335 | ||
| 336 | /* SanDisk Ultra Fit and Ultra Flair */ | ||
| 337 | { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 338 | { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 339 | |||
| 336 | /* M-Systems Flash Disk Pioneers */ | 340 | /* M-Systems Flash Disk Pioneers */ |
| 337 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 341 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 338 | 342 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 79d8bd7a612e..4ebfbd737905 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
| @@ -832,14 +832,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number); | |||
| 832 | */ | 832 | */ |
| 833 | 833 | ||
| 834 | int __usb_get_extra_descriptor(char *buffer, unsigned size, | 834 | int __usb_get_extra_descriptor(char *buffer, unsigned size, |
| 835 | unsigned char type, void **ptr) | 835 | unsigned char type, void **ptr, size_t minsize) |
| 836 | { | 836 | { |
| 837 | struct usb_descriptor_header *header; | 837 | struct usb_descriptor_header *header; |
| 838 | 838 | ||
| 839 | while (size >= sizeof(struct usb_descriptor_header)) { | 839 | while (size >= sizeof(struct usb_descriptor_header)) { |
| 840 | header = (struct usb_descriptor_header *)buffer; | 840 | header = (struct usb_descriptor_header *)buffer; |
| 841 | 841 | ||
| 842 | if (header->bLength < 2) { | 842 | if (header->bLength < 2 || header->bLength > size) { |
| 843 | printk(KERN_ERR | 843 | printk(KERN_ERR |
| 844 | "%s: bogus descriptor, type %d length %d\n", | 844 | "%s: bogus descriptor, type %d length %d\n", |
| 845 | usbcore_name, | 845 | usbcore_name, |
| @@ -848,7 +848,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size, | |||
| 848 | return -1; | 848 | return -1; |
| 849 | } | 849 | } |
| 850 | 850 | ||
| 851 | if (header->bDescriptorType == type) { | 851 | if (header->bDescriptorType == type && header->bLength >= minsize) { |
| 852 | *ptr = header; | 852 | *ptr = header; |
| 853 | return 0; | 853 | return 0; |
| 854 | } | 854 | } |
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 684d6f074c3a..09a8ebd95588 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c | |||
| @@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc) | |||
| 640 | top = itr + itr_size; | 640 | top = itr + itr_size; |
| 641 | result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], | 641 | result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], |
| 642 | le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), | 642 | le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), |
| 643 | USB_DT_SECURITY, (void **) &secd); | 643 | USB_DT_SECURITY, (void **) &secd, sizeof(*secd)); |
| 644 | if (result == -1) { | 644 | if (result == -1) { |
| 645 | dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); | 645 | dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); |
| 646 | return 0; | 646 | return 0; |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index a9515265db4d..a9ec7051f286 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -139,6 +139,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 139 | pdev->device == 0x43bb)) | 139 | pdev->device == 0x43bb)) |
| 140 | xhci->quirks |= XHCI_SUSPEND_DELAY; | 140 | xhci->quirks |= XHCI_SUSPEND_DELAY; |
| 141 | 141 | ||
| 142 | if (pdev->vendor == PCI_VENDOR_ID_AMD && | ||
| 143 | (pdev->device == 0x15e0 || pdev->device == 0x15e1)) | ||
| 144 | xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND; | ||
| 145 | |||
| 142 | if (pdev->vendor == PCI_VENDOR_ID_AMD) | 146 | if (pdev->vendor == PCI_VENDOR_ID_AMD) |
| 143 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 147 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
| 144 | 148 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c928dbbff881..dae3be1b9c8f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -968,6 +968,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) | |||
| 968 | unsigned int delay = XHCI_MAX_HALT_USEC; | 968 | unsigned int delay = XHCI_MAX_HALT_USEC; |
| 969 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 969 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 970 | u32 command; | 970 | u32 command; |
| 971 | u32 res; | ||
| 971 | 972 | ||
| 972 | if (!hcd->state) | 973 | if (!hcd->state) |
| 973 | return 0; | 974 | return 0; |
| @@ -1021,11 +1022,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) | |||
| 1021 | command = readl(&xhci->op_regs->command); | 1022 | command = readl(&xhci->op_regs->command); |
| 1022 | command |= CMD_CSS; | 1023 | command |= CMD_CSS; |
| 1023 | writel(command, &xhci->op_regs->command); | 1024 | writel(command, &xhci->op_regs->command); |
| 1025 | xhci->broken_suspend = 0; | ||
| 1024 | if (xhci_handshake(&xhci->op_regs->status, | 1026 | if (xhci_handshake(&xhci->op_regs->status, |
| 1025 | STS_SAVE, 0, 10 * 1000)) { | 1027 | STS_SAVE, 0, 10 * 1000)) { |
| 1026 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); | 1028 | /* |
| 1027 | spin_unlock_irq(&xhci->lock); | 1029 | * AMD SNPS xHC 3.0 occasionally does not clear the |
| 1028 | return -ETIMEDOUT; | 1030 | * SSS bit of USBSTS and when driver tries to poll |
| 1031 | * to see if the xHC clears BIT(8) which never happens | ||
| 1032 | * and driver assumes that controller is not responding | ||
| 1033 | * and times out. To workaround this, its good to check | ||
| 1034 | * if SRE and HCE bits are not set (as per xhci | ||
| 1035 | * Section 5.4.2) and bypass the timeout. | ||
| 1036 | */ | ||
| 1037 | res = readl(&xhci->op_regs->status); | ||
| 1038 | if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && | ||
| 1039 | (((res & STS_SRE) == 0) && | ||
| 1040 | ((res & STS_HCE) == 0))) { | ||
| 1041 | xhci->broken_suspend = 1; | ||
| 1042 | } else { | ||
| 1043 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); | ||
| 1044 | spin_unlock_irq(&xhci->lock); | ||
| 1045 | return -ETIMEDOUT; | ||
| 1046 | } | ||
| 1029 | } | 1047 | } |
| 1030 | spin_unlock_irq(&xhci->lock); | 1048 | spin_unlock_irq(&xhci->lock); |
| 1031 | 1049 | ||
| @@ -1078,7 +1096,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
| 1078 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | 1096 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
| 1079 | 1097 | ||
| 1080 | spin_lock_irq(&xhci->lock); | 1098 | spin_lock_irq(&xhci->lock); |
| 1081 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | 1099 | if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) |
| 1082 | hibernated = true; | 1100 | hibernated = true; |
| 1083 | 1101 | ||
| 1084 | if (!hibernated) { | 1102 | if (!hibernated) { |
| @@ -4496,6 +4514,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, | |||
| 4496 | { | 4514 | { |
| 4497 | unsigned long long timeout_ns; | 4515 | unsigned long long timeout_ns; |
| 4498 | 4516 | ||
| 4517 | /* Prevent U1 if service interval is shorter than U1 exit latency */ | ||
| 4518 | if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { | ||
| 4519 | if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { | ||
| 4520 | dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); | ||
| 4521 | return USB3_LPM_DISABLED; | ||
| 4522 | } | ||
| 4523 | } | ||
| 4524 | |||
| 4499 | if (xhci->quirks & XHCI_INTEL_HOST) | 4525 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4500 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); | 4526 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); |
| 4501 | else | 4527 | else |
| @@ -4552,6 +4578,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, | |||
| 4552 | { | 4578 | { |
| 4553 | unsigned long long timeout_ns; | 4579 | unsigned long long timeout_ns; |
| 4554 | 4580 | ||
| 4581 | /* Prevent U2 if service interval is shorter than U2 exit latency */ | ||
| 4582 | if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { | ||
| 4583 | if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { | ||
| 4584 | dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); | ||
| 4585 | return USB3_LPM_DISABLED; | ||
| 4586 | } | ||
| 4587 | } | ||
| 4588 | |||
| 4555 | if (xhci->quirks & XHCI_INTEL_HOST) | 4589 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4556 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); | 4590 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); |
| 4557 | else | 4591 | else |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 260b259b72bc..c3515bad5dbb 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
| @@ -1850,6 +1850,7 @@ struct xhci_hcd { | |||
| 1850 | #define XHCI_ZERO_64B_REGS BIT_ULL(32) | 1850 | #define XHCI_ZERO_64B_REGS BIT_ULL(32) |
| 1851 | #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) | 1851 | #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) |
| 1852 | #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) | 1852 | #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) |
| 1853 | #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) | ||
| 1853 | 1854 | ||
| 1854 | unsigned int num_active_eps; | 1855 | unsigned int num_active_eps; |
| 1855 | unsigned int limit_active_eps; | 1856 | unsigned int limit_active_eps; |
| @@ -1879,6 +1880,8 @@ struct xhci_hcd { | |||
| 1879 | void *dbc; | 1880 | void *dbc; |
| 1880 | /* platform-specific data -- must come last */ | 1881 | /* platform-specific data -- must come last */ |
| 1881 | unsigned long priv[0] __aligned(sizeof(s64)); | 1882 | unsigned long priv[0] __aligned(sizeof(s64)); |
| 1883 | /* Broken Suspend flag for SNPS Suspend resume issue */ | ||
| 1884 | u8 broken_suspend; | ||
| 1882 | }; | 1885 | }; |
| 1883 | 1886 | ||
| 1884 | /* Platform specific overrides to generic XHCI hc_driver ops */ | 1887 | /* Platform specific overrides to generic XHCI hc_driver ops */ |
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index 85b48c6ddc7e..39ca31b4de46 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c | |||
| @@ -51,6 +51,7 @@ static const struct usb_device_id appledisplay_table[] = { | |||
| 51 | { APPLEDISPLAY_DEVICE(0x921c) }, | 51 | { APPLEDISPLAY_DEVICE(0x921c) }, |
| 52 | { APPLEDISPLAY_DEVICE(0x921d) }, | 52 | { APPLEDISPLAY_DEVICE(0x921d) }, |
| 53 | { APPLEDISPLAY_DEVICE(0x9222) }, | 53 | { APPLEDISPLAY_DEVICE(0x9222) }, |
| 54 | { APPLEDISPLAY_DEVICE(0x9226) }, | ||
| 54 | { APPLEDISPLAY_DEVICE(0x9236) }, | 55 | { APPLEDISPLAY_DEVICE(0x9236) }, |
| 55 | 56 | ||
| 56 | /* Terminating entry */ | 57 | /* Terminating entry */ |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 17940589c647..7d289302ff6c 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
| @@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options) | |||
| 101 | cflag |= PARENB; | 101 | cflag |= PARENB; |
| 102 | break; | 102 | break; |
| 103 | } | 103 | } |
| 104 | co->cflag = cflag; | ||
| 105 | 104 | ||
| 106 | /* | 105 | /* |
| 107 | * no need to check the index here: if the index is wrong, console | 106 | * no need to check the index here: if the index is wrong, console |
| @@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
| 164 | serial->type->set_termios(tty, port, &dummy); | 163 | serial->type->set_termios(tty, port, &dummy); |
| 165 | 164 | ||
| 166 | tty_port_tty_set(&port->port, NULL); | 165 | tty_port_tty_set(&port->port, NULL); |
| 166 | tty_save_termios(tty); | ||
| 167 | tty_kref_put(tty); | 167 | tty_kref_put(tty); |
| 168 | } | 168 | } |
| 169 | tty_port_set_initialized(&port->port, 1); | 169 | tty_port_set_initialized(&port->port, 1); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 3a5f81a66d34..6b98d8e3a5bf 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -944,10 +944,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, | |||
| 944 | if (msg->iova <= vq_msg->iova && | 944 | if (msg->iova <= vq_msg->iova && |
| 945 | msg->iova + msg->size - 1 >= vq_msg->iova && | 945 | msg->iova + msg->size - 1 >= vq_msg->iova && |
| 946 | vq_msg->type == VHOST_IOTLB_MISS) { | 946 | vq_msg->type == VHOST_IOTLB_MISS) { |
| 947 | mutex_lock(&node->vq->mutex); | ||
| 948 | vhost_poll_queue(&node->vq->poll); | 947 | vhost_poll_queue(&node->vq->poll); |
| 949 | mutex_unlock(&node->vq->mutex); | ||
| 950 | |||
| 951 | list_del(&node->node); | 948 | list_del(&node->node); |
| 952 | kfree(node); | 949 | kfree(node); |
| 953 | } | 950 | } |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6d..98ed5be132c6 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <net/sock.h> | 15 | #include <net/sock.h> |
| 16 | #include <linux/virtio_vsock.h> | 16 | #include <linux/virtio_vsock.h> |
| 17 | #include <linux/vhost.h> | 17 | #include <linux/vhost.h> |
| 18 | #include <linux/hashtable.h> | ||
| 18 | 19 | ||
| 19 | #include <net/af_vsock.h> | 20 | #include <net/af_vsock.h> |
| 20 | #include "vhost.h" | 21 | #include "vhost.h" |
| @@ -27,14 +28,14 @@ enum { | |||
| 27 | 28 | ||
| 28 | /* Used to track all the vhost_vsock instances on the system. */ | 29 | /* Used to track all the vhost_vsock instances on the system. */ |
| 29 | static DEFINE_SPINLOCK(vhost_vsock_lock); | 30 | static DEFINE_SPINLOCK(vhost_vsock_lock); |
| 30 | static LIST_HEAD(vhost_vsock_list); | 31 | static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); |
| 31 | 32 | ||
| 32 | struct vhost_vsock { | 33 | struct vhost_vsock { |
| 33 | struct vhost_dev dev; | 34 | struct vhost_dev dev; |
| 34 | struct vhost_virtqueue vqs[2]; | 35 | struct vhost_virtqueue vqs[2]; |
| 35 | 36 | ||
| 36 | /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ | 37 | /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */ |
| 37 | struct list_head list; | 38 | struct hlist_node hash; |
| 38 | 39 | ||
| 39 | struct vhost_work send_pkt_work; | 40 | struct vhost_work send_pkt_work; |
| 40 | spinlock_t send_pkt_list_lock; | 41 | spinlock_t send_pkt_list_lock; |
| @@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void) | |||
| 50 | return VHOST_VSOCK_DEFAULT_HOST_CID; | 51 | return VHOST_VSOCK_DEFAULT_HOST_CID; |
| 51 | } | 52 | } |
| 52 | 53 | ||
| 53 | static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) | 54 | /* Callers that dereference the return value must hold vhost_vsock_lock or the |
| 55 | * RCU read lock. | ||
| 56 | */ | ||
| 57 | static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) | ||
| 54 | { | 58 | { |
| 55 | struct vhost_vsock *vsock; | 59 | struct vhost_vsock *vsock; |
| 56 | 60 | ||
| 57 | list_for_each_entry(vsock, &vhost_vsock_list, list) { | 61 | hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { |
| 58 | u32 other_cid = vsock->guest_cid; | 62 | u32 other_cid = vsock->guest_cid; |
| 59 | 63 | ||
| 60 | /* Skip instances that have no CID yet */ | 64 | /* Skip instances that have no CID yet */ |
| @@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) | |||
| 69 | return NULL; | 73 | return NULL; |
| 70 | } | 74 | } |
| 71 | 75 | ||
| 72 | static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) | ||
| 73 | { | ||
| 74 | struct vhost_vsock *vsock; | ||
| 75 | |||
| 76 | spin_lock_bh(&vhost_vsock_lock); | ||
| 77 | vsock = __vhost_vsock_get(guest_cid); | ||
| 78 | spin_unlock_bh(&vhost_vsock_lock); | ||
| 79 | |||
| 80 | return vsock; | ||
| 81 | } | ||
| 82 | |||
| 83 | static void | 76 | static void |
| 84 | vhost_transport_do_send_pkt(struct vhost_vsock *vsock, | 77 | vhost_transport_do_send_pkt(struct vhost_vsock *vsock, |
| 85 | struct vhost_virtqueue *vq) | 78 | struct vhost_virtqueue *vq) |
| @@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |||
| 210 | struct vhost_vsock *vsock; | 203 | struct vhost_vsock *vsock; |
| 211 | int len = pkt->len; | 204 | int len = pkt->len; |
| 212 | 205 | ||
| 206 | rcu_read_lock(); | ||
| 207 | |||
| 213 | /* Find the vhost_vsock according to guest context id */ | 208 | /* Find the vhost_vsock according to guest context id */ |
| 214 | vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); | 209 | vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); |
| 215 | if (!vsock) { | 210 | if (!vsock) { |
| 211 | rcu_read_unlock(); | ||
| 216 | virtio_transport_free_pkt(pkt); | 212 | virtio_transport_free_pkt(pkt); |
| 217 | return -ENODEV; | 213 | return -ENODEV; |
| 218 | } | 214 | } |
| @@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |||
| 225 | spin_unlock_bh(&vsock->send_pkt_list_lock); | 221 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
| 226 | 222 | ||
| 227 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); | 223 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); |
| 224 | |||
| 225 | rcu_read_unlock(); | ||
| 228 | return len; | 226 | return len; |
| 229 | } | 227 | } |
| 230 | 228 | ||
| @@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) | |||
| 234 | struct vhost_vsock *vsock; | 232 | struct vhost_vsock *vsock; |
| 235 | struct virtio_vsock_pkt *pkt, *n; | 233 | struct virtio_vsock_pkt *pkt, *n; |
| 236 | int cnt = 0; | 234 | int cnt = 0; |
| 235 | int ret = -ENODEV; | ||
| 237 | LIST_HEAD(freeme); | 236 | LIST_HEAD(freeme); |
| 238 | 237 | ||
| 238 | rcu_read_lock(); | ||
| 239 | |||
| 239 | /* Find the vhost_vsock according to guest context id */ | 240 | /* Find the vhost_vsock according to guest context id */ |
| 240 | vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); | 241 | vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); |
| 241 | if (!vsock) | 242 | if (!vsock) |
| 242 | return -ENODEV; | 243 | goto out; |
| 243 | 244 | ||
| 244 | spin_lock_bh(&vsock->send_pkt_list_lock); | 245 | spin_lock_bh(&vsock->send_pkt_list_lock); |
| 245 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { | 246 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { |
| @@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) | |||
| 265 | vhost_poll_queue(&tx_vq->poll); | 266 | vhost_poll_queue(&tx_vq->poll); |
| 266 | } | 267 | } |
| 267 | 268 | ||
| 268 | return 0; | 269 | ret = 0; |
| 270 | out: | ||
| 271 | rcu_read_unlock(); | ||
| 272 | return ret; | ||
| 269 | } | 273 | } |
| 270 | 274 | ||
| 271 | static struct virtio_vsock_pkt * | 275 | static struct virtio_vsock_pkt * |
| @@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) | |||
| 533 | spin_lock_init(&vsock->send_pkt_list_lock); | 537 | spin_lock_init(&vsock->send_pkt_list_lock); |
| 534 | INIT_LIST_HEAD(&vsock->send_pkt_list); | 538 | INIT_LIST_HEAD(&vsock->send_pkt_list); |
| 535 | vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); | 539 | vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); |
| 536 | |||
| 537 | spin_lock_bh(&vhost_vsock_lock); | ||
| 538 | list_add_tail(&vsock->list, &vhost_vsock_list); | ||
| 539 | spin_unlock_bh(&vhost_vsock_lock); | ||
| 540 | return 0; | 540 | return 0; |
| 541 | 541 | ||
| 542 | out: | 542 | out: |
| @@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk) | |||
| 563 | * executing. | 563 | * executing. |
| 564 | */ | 564 | */ |
| 565 | 565 | ||
| 566 | if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { | 566 | /* If the peer is still valid, no need to reset connection */ |
| 567 | sock_set_flag(sk, SOCK_DONE); | 567 | if (vhost_vsock_get(vsk->remote_addr.svm_cid)) |
| 568 | vsk->peer_shutdown = SHUTDOWN_MASK; | 568 | return; |
| 569 | sk->sk_state = SS_UNCONNECTED; | 569 | |
| 570 | sk->sk_err = ECONNRESET; | 570 | /* If the close timeout is pending, let it expire. This avoids races |
| 571 | sk->sk_error_report(sk); | 571 | * with the timeout callback. |
| 572 | } | 572 | */ |
| 573 | if (vsk->close_work_scheduled) | ||
| 574 | return; | ||
| 575 | |||
| 576 | sock_set_flag(sk, SOCK_DONE); | ||
| 577 | vsk->peer_shutdown = SHUTDOWN_MASK; | ||
| 578 | sk->sk_state = SS_UNCONNECTED; | ||
| 579 | sk->sk_err = ECONNRESET; | ||
| 580 | sk->sk_error_report(sk); | ||
| 573 | } | 581 | } |
| 574 | 582 | ||
| 575 | static int vhost_vsock_dev_release(struct inode *inode, struct file *file) | 583 | static int vhost_vsock_dev_release(struct inode *inode, struct file *file) |
| @@ -577,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) | |||
| 577 | struct vhost_vsock *vsock = file->private_data; | 585 | struct vhost_vsock *vsock = file->private_data; |
| 578 | 586 | ||
| 579 | spin_lock_bh(&vhost_vsock_lock); | 587 | spin_lock_bh(&vhost_vsock_lock); |
| 580 | list_del(&vsock->list); | 588 | if (vsock->guest_cid) |
| 589 | hash_del_rcu(&vsock->hash); | ||
| 581 | spin_unlock_bh(&vhost_vsock_lock); | 590 | spin_unlock_bh(&vhost_vsock_lock); |
| 582 | 591 | ||
| 592 | /* Wait for other CPUs to finish using vsock */ | ||
| 593 | synchronize_rcu(); | ||
| 594 | |||
| 583 | /* Iterating over all connections for all CIDs to find orphans is | 595 | /* Iterating over all connections for all CIDs to find orphans is |
| 584 | * inefficient. Room for improvement here. */ | 596 | * inefficient. Room for improvement here. */ |
| 585 | vsock_for_each_connected_socket(vhost_vsock_reset_orphans); | 597 | vsock_for_each_connected_socket(vhost_vsock_reset_orphans); |
| @@ -620,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) | |||
| 620 | 632 | ||
| 621 | /* Refuse if CID is already in use */ | 633 | /* Refuse if CID is already in use */ |
| 622 | spin_lock_bh(&vhost_vsock_lock); | 634 | spin_lock_bh(&vhost_vsock_lock); |
| 623 | other = __vhost_vsock_get(guest_cid); | 635 | other = vhost_vsock_get(guest_cid); |
| 624 | if (other && other != vsock) { | 636 | if (other && other != vsock) { |
| 625 | spin_unlock_bh(&vhost_vsock_lock); | 637 | spin_unlock_bh(&vhost_vsock_lock); |
| 626 | return -EADDRINUSE; | 638 | return -EADDRINUSE; |
| 627 | } | 639 | } |
| 640 | |||
| 641 | if (vsock->guest_cid) | ||
| 642 | hash_del_rcu(&vsock->hash); | ||
| 643 | |||
| 628 | vsock->guest_cid = guest_cid; | 644 | vsock->guest_cid = guest_cid; |
| 645 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); | ||
| 629 | spin_unlock_bh(&vhost_vsock_lock); | 646 | spin_unlock_bh(&vhost_vsock_lock); |
| 630 | 647 | ||
| 631 | return 0; | 648 | return 0; |
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 678b27063198..f9ef0673a083 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c | |||
| @@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev) | |||
| 562 | goto err_alloc; | 562 | goto err_alloc; |
| 563 | } | 563 | } |
| 564 | 564 | ||
| 565 | if (!data->levels) { | 565 | if (data->levels) { |
| 566 | /* | ||
| 567 | * For the DT case, only when brightness levels is defined | ||
| 568 | * data->levels is filled. For the non-DT case, data->levels | ||
| 569 | * can come from platform data, however is not usual. | ||
| 570 | */ | ||
| 571 | for (i = 0; i <= data->max_brightness; i++) { | ||
| 572 | if (data->levels[i] > pb->scale) | ||
| 573 | pb->scale = data->levels[i]; | ||
| 574 | |||
| 575 | pb->levels = data->levels; | ||
| 576 | } | ||
| 577 | } else if (!data->max_brightness) { | ||
| 578 | /* | ||
| 579 | * If no brightness levels are provided and max_brightness is | ||
| 580 | * not set, use the default brightness table. For the DT case, | ||
| 581 | * max_brightness is set to 0 when brightness levels is not | ||
| 582 | * specified. For the non-DT case, max_brightness is usually | ||
| 583 | * set to some value. | ||
| 584 | */ | ||
| 585 | |||
| 586 | /* Get the PWM period (in nanoseconds) */ | ||
| 587 | pwm_get_state(pb->pwm, &state); | ||
| 588 | |||
| 566 | ret = pwm_backlight_brightness_default(&pdev->dev, data, | 589 | ret = pwm_backlight_brightness_default(&pdev->dev, data, |
| 567 | state.period); | 590 | state.period); |
| 568 | if (ret < 0) { | 591 | if (ret < 0) { |
| @@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev) | |||
| 570 | "failed to setup default brightness table\n"); | 593 | "failed to setup default brightness table\n"); |
| 571 | goto err_alloc; | 594 | goto err_alloc; |
| 572 | } | 595 | } |
| 573 | } | ||
| 574 | 596 | ||
| 575 | for (i = 0; i <= data->max_brightness; i++) { | 597 | for (i = 0; i <= data->max_brightness; i++) { |
| 576 | if (data->levels[i] > pb->scale) | 598 | if (data->levels[i] > pb->scale) |
| 577 | pb->scale = data->levels[i]; | 599 | pb->scale = data->levels[i]; |
| 578 | 600 | ||
| 579 | pb->levels = data->levels; | 601 | pb->levels = data->levels; |
| 602 | } | ||
| 603 | } else { | ||
| 604 | /* | ||
| 605 | * That only happens for the non-DT case, where platform data | ||
| 606 | * sets the max_brightness value. | ||
| 607 | */ | ||
| 608 | pb->scale = data->max_brightness; | ||
| 580 | } | 609 | } |
| 581 | 610 | ||
| 582 | pb->lth_brightness = data->lth_brightness * (state.period / pb->scale); | 611 | pb->lth_brightness = data->lth_brightness * (state.period / pb->scale); |
| @@ -45,6 +45,7 @@ | |||
| 45 | 45 | ||
| 46 | #include <asm/kmap_types.h> | 46 | #include <asm/kmap_types.h> |
| 47 | #include <linux/uaccess.h> | 47 | #include <linux/uaccess.h> |
| 48 | #include <linux/nospec.h> | ||
| 48 | 49 | ||
| 49 | #include "internal.h" | 50 | #include "internal.h" |
| 50 | 51 | ||
| @@ -1038,6 +1039,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
| 1038 | if (!table || id >= table->nr) | 1039 | if (!table || id >= table->nr) |
| 1039 | goto out; | 1040 | goto out; |
| 1040 | 1041 | ||
| 1042 | id = array_index_nospec(id, table->nr); | ||
| 1041 | ctx = rcu_dereference(table->table[id]); | 1043 | ctx = rcu_dereference(table->table[id]); |
| 1042 | if (ctx && ctx->user_id == ctx_id) { | 1044 | if (ctx && ctx->user_id == ctx_id) { |
| 1043 | if (percpu_ref_tryget_live(&ctx->users)) | 1045 | if (percpu_ref_tryget_live(&ctx->users)) |
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index efcf89a8ba44..1a4e2b101ef2 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c | |||
| @@ -389,13 +389,11 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info, | |||
| 389 | 389 | ||
| 390 | /* | 390 | /* |
| 391 | * Here we don't really care about alignment since extent allocator can | 391 | * Here we don't really care about alignment since extent allocator can |
| 392 | * handle it. We care more about the size, as if one block group is | 392 | * handle it. We care more about the size. |
| 393 | * larger than maximum size, it's must be some obvious corruption. | ||
| 394 | */ | 393 | */ |
| 395 | if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) { | 394 | if (key->offset == 0) { |
| 396 | block_group_err(fs_info, leaf, slot, | 395 | block_group_err(fs_info, leaf, slot, |
| 397 | "invalid block group size, have %llu expect (0, %llu]", | 396 | "invalid block group size 0"); |
| 398 | key->offset, BTRFS_MAX_DATA_CHUNK_SIZE); | ||
| 399 | return -EUCLEAN; | 397 | return -EUCLEAN; |
| 400 | } | 398 | } |
| 401 | 399 | ||
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b5ecd6f50360..4e9a7cc488da 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
| @@ -563,8 +563,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) | |||
| 563 | seq_puts(m, ",noacl"); | 563 | seq_puts(m, ",noacl"); |
| 564 | #endif | 564 | #endif |
| 565 | 565 | ||
| 566 | if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) | 566 | if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0) |
| 567 | seq_puts(m, ",nocopyfrom"); | 567 | seq_puts(m, ",copyfrom"); |
| 568 | 568 | ||
| 569 | if (fsopt->mds_namespace) | 569 | if (fsopt->mds_namespace) |
| 570 | seq_show_option(m, "mds_namespace", fsopt->mds_namespace); | 570 | seq_show_option(m, "mds_namespace", fsopt->mds_namespace); |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index c005a5400f2e..79a265ba9200 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
| @@ -42,7 +42,9 @@ | |||
| 42 | #define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */ | 42 | #define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */ |
| 43 | #define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */ | 43 | #define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */ |
| 44 | 44 | ||
| 45 | #define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE | 45 | #define CEPH_MOUNT_OPT_DEFAULT \ |
| 46 | (CEPH_MOUNT_OPT_DCACHE | \ | ||
| 47 | CEPH_MOUNT_OPT_NOCOPYFROM) | ||
| 46 | 48 | ||
| 47 | #define ceph_set_mount_opt(fsc, opt) \ | 49 | #define ceph_set_mount_opt(fsc, opt) \ |
| 48 | (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; | 50 | (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index abcd78e332fe..85dadb93c992 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
| @@ -133,7 +133,7 @@ config CIFS_XATTR | |||
| 133 | 133 | ||
| 134 | config CIFS_POSIX | 134 | config CIFS_POSIX |
| 135 | bool "CIFS POSIX Extensions" | 135 | bool "CIFS POSIX Extensions" |
| 136 | depends on CIFS_XATTR | 136 | depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR |
| 137 | help | 137 | help |
| 138 | Enabling this option will cause the cifs client to attempt to | 138 | Enabling this option will cause the cifs client to attempt to |
| 139 | negotiate a newer dialect with servers, such as Samba 3.0.5 | 139 | negotiate a newer dialect with servers, such as Samba 3.0.5 |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 3713d22b95a7..907e85d65bb4 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
| @@ -174,7 +174,7 @@ cifs_bp_rename_retry: | |||
| 174 | 174 | ||
| 175 | cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); | 175 | cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); |
| 176 | memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); | 176 | memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); |
| 177 | full_path[dfsplen] = '\\'; | 177 | full_path[dfsplen] = dirsep; |
| 178 | for (i = 0; i < pplen-1; i++) | 178 | for (i = 0; i < pplen-1; i++) |
| 179 | if (full_path[dfsplen+1+i] == '/') | 179 | if (full_path[dfsplen+1+i] == '/') |
| 180 | full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); | 180 | full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 74c33d5fafc8..c9bc56b1baac 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -2541,14 +2541,13 @@ static int | |||
| 2541 | cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list, | 2541 | cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list, |
| 2542 | struct cifs_aio_ctx *ctx) | 2542 | struct cifs_aio_ctx *ctx) |
| 2543 | { | 2543 | { |
| 2544 | int wait_retry = 0; | ||
| 2545 | unsigned int wsize, credits; | 2544 | unsigned int wsize, credits; |
| 2546 | int rc; | 2545 | int rc; |
| 2547 | struct TCP_Server_Info *server = | 2546 | struct TCP_Server_Info *server = |
| 2548 | tlink_tcon(wdata->cfile->tlink)->ses->server; | 2547 | tlink_tcon(wdata->cfile->tlink)->ses->server; |
| 2549 | 2548 | ||
| 2550 | /* | 2549 | /* |
| 2551 | * Try to resend this wdata, waiting for credits up to 3 seconds. | 2550 | * Wait for credits to resend this wdata. |
| 2552 | * Note: we are attempting to resend the whole wdata not in segments | 2551 | * Note: we are attempting to resend the whole wdata not in segments |
| 2553 | */ | 2552 | */ |
| 2554 | do { | 2553 | do { |
| @@ -2556,19 +2555,13 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list, | |||
| 2556 | server, wdata->bytes, &wsize, &credits); | 2555 | server, wdata->bytes, &wsize, &credits); |
| 2557 | 2556 | ||
| 2558 | if (rc) | 2557 | if (rc) |
| 2559 | break; | 2558 | goto out; |
| 2560 | 2559 | ||
| 2561 | if (wsize < wdata->bytes) { | 2560 | if (wsize < wdata->bytes) { |
| 2562 | add_credits_and_wake_if(server, credits, 0); | 2561 | add_credits_and_wake_if(server, credits, 0); |
| 2563 | msleep(1000); | 2562 | msleep(1000); |
| 2564 | wait_retry++; | ||
| 2565 | } | 2563 | } |
| 2566 | } while (wsize < wdata->bytes && wait_retry < 3); | 2564 | } while (wsize < wdata->bytes); |
| 2567 | |||
| 2568 | if (wsize < wdata->bytes) { | ||
| 2569 | rc = -EBUSY; | ||
| 2570 | goto out; | ||
| 2571 | } | ||
| 2572 | 2565 | ||
| 2573 | rc = -EAGAIN; | 2566 | rc = -EAGAIN; |
| 2574 | while (rc == -EAGAIN) { | 2567 | while (rc == -EAGAIN) { |
| @@ -3234,14 +3227,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata, | |||
| 3234 | struct list_head *rdata_list, | 3227 | struct list_head *rdata_list, |
| 3235 | struct cifs_aio_ctx *ctx) | 3228 | struct cifs_aio_ctx *ctx) |
| 3236 | { | 3229 | { |
| 3237 | int wait_retry = 0; | ||
| 3238 | unsigned int rsize, credits; | 3230 | unsigned int rsize, credits; |
| 3239 | int rc; | 3231 | int rc; |
| 3240 | struct TCP_Server_Info *server = | 3232 | struct TCP_Server_Info *server = |
| 3241 | tlink_tcon(rdata->cfile->tlink)->ses->server; | 3233 | tlink_tcon(rdata->cfile->tlink)->ses->server; |
| 3242 | 3234 | ||
| 3243 | /* | 3235 | /* |
| 3244 | * Try to resend this rdata, waiting for credits up to 3 seconds. | 3236 | * Wait for credits to resend this rdata. |
| 3245 | * Note: we are attempting to resend the whole rdata not in segments | 3237 | * Note: we are attempting to resend the whole rdata not in segments |
| 3246 | */ | 3238 | */ |
| 3247 | do { | 3239 | do { |
| @@ -3249,24 +3241,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata, | |||
| 3249 | &rsize, &credits); | 3241 | &rsize, &credits); |
| 3250 | 3242 | ||
| 3251 | if (rc) | 3243 | if (rc) |
| 3252 | break; | 3244 | goto out; |
| 3253 | 3245 | ||
| 3254 | if (rsize < rdata->bytes) { | 3246 | if (rsize < rdata->bytes) { |
| 3255 | add_credits_and_wake_if(server, credits, 0); | 3247 | add_credits_and_wake_if(server, credits, 0); |
| 3256 | msleep(1000); | 3248 | msleep(1000); |
| 3257 | wait_retry++; | ||
| 3258 | } | 3249 | } |
| 3259 | } while (rsize < rdata->bytes && wait_retry < 3); | 3250 | } while (rsize < rdata->bytes); |
| 3260 | |||
| 3261 | /* | ||
| 3262 | * If we can't find enough credits to send this rdata | ||
| 3263 | * release the rdata and return failure, this will pass | ||
| 3264 | * whatever I/O amount we have finished to VFS. | ||
| 3265 | */ | ||
| 3266 | if (rsize < rdata->bytes) { | ||
| 3267 | rc = -EBUSY; | ||
| 3268 | goto out; | ||
| 3269 | } | ||
| 3270 | 3251 | ||
| 3271 | rc = -EAGAIN; | 3252 | rc = -EAGAIN; |
| 3272 | while (rc == -EAGAIN) { | 3253 | while (rc == -EAGAIN) { |
| @@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas) | |||
| 232 | } | 232 | } |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | /* | ||
| 236 | * The only thing keeping the address space around is the i_pages lock | ||
| 237 | * (it's cycled in clear_inode() after removing the entries from i_pages) | ||
| 238 | * After we call xas_unlock_irq(), we cannot touch xas->xa. | ||
| 239 | */ | ||
| 240 | static void wait_entry_unlocked(struct xa_state *xas, void *entry) | ||
| 241 | { | ||
| 242 | struct wait_exceptional_entry_queue ewait; | ||
| 243 | wait_queue_head_t *wq; | ||
| 244 | |||
| 245 | init_wait(&ewait.wait); | ||
| 246 | ewait.wait.func = wake_exceptional_entry_func; | ||
| 247 | |||
| 248 | wq = dax_entry_waitqueue(xas, entry, &ewait.key); | ||
| 249 | prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); | ||
| 250 | xas_unlock_irq(xas); | ||
| 251 | schedule(); | ||
| 252 | finish_wait(wq, &ewait.wait); | ||
| 253 | |||
| 254 | /* | ||
| 255 | * Entry lock waits are exclusive. Wake up the next waiter since | ||
| 256 | * we aren't sure we will acquire the entry lock and thus wake | ||
| 257 | * the next waiter up on unlock. | ||
| 258 | */ | ||
| 259 | if (waitqueue_active(wq)) | ||
| 260 | __wake_up(wq, TASK_NORMAL, 1, &ewait.key); | ||
| 261 | } | ||
| 262 | |||
| 235 | static void put_unlocked_entry(struct xa_state *xas, void *entry) | 263 | static void put_unlocked_entry(struct xa_state *xas, void *entry) |
| 236 | { | 264 | { |
| 237 | /* If we were the only waiter woken, wake the next one */ | 265 | /* If we were the only waiter woken, wake the next one */ |
| @@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry) | |||
| 351 | * @page: The page whose entry we want to lock | 379 | * @page: The page whose entry we want to lock |
| 352 | * | 380 | * |
| 353 | * Context: Process context. | 381 | * Context: Process context. |
| 354 | * Return: %true if the entry was locked or does not need to be locked. | 382 | * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could |
| 383 | * not be locked. | ||
| 355 | */ | 384 | */ |
| 356 | bool dax_lock_mapping_entry(struct page *page) | 385 | dax_entry_t dax_lock_page(struct page *page) |
| 357 | { | 386 | { |
| 358 | XA_STATE(xas, NULL, 0); | 387 | XA_STATE(xas, NULL, 0); |
| 359 | void *entry; | 388 | void *entry; |
| 360 | bool locked; | ||
| 361 | 389 | ||
| 362 | /* Ensure page->mapping isn't freed while we look at it */ | 390 | /* Ensure page->mapping isn't freed while we look at it */ |
| 363 | rcu_read_lock(); | 391 | rcu_read_lock(); |
| 364 | for (;;) { | 392 | for (;;) { |
| 365 | struct address_space *mapping = READ_ONCE(page->mapping); | 393 | struct address_space *mapping = READ_ONCE(page->mapping); |
| 366 | 394 | ||
| 367 | locked = false; | 395 | entry = NULL; |
| 368 | if (!dax_mapping(mapping)) | 396 | if (!mapping || !dax_mapping(mapping)) |
| 369 | break; | 397 | break; |
| 370 | 398 | ||
| 371 | /* | 399 | /* |
| @@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page) | |||
| 375 | * otherwise we would not have a valid pfn_to_page() | 403 | * otherwise we would not have a valid pfn_to_page() |
| 376 | * translation. | 404 | * translation. |
| 377 | */ | 405 | */ |
| 378 | locked = true; | 406 | entry = (void *)~0UL; |
| 379 | if (S_ISCHR(mapping->host->i_mode)) | 407 | if (S_ISCHR(mapping->host->i_mode)) |
| 380 | break; | 408 | break; |
| 381 | 409 | ||
| @@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page) | |||
| 389 | entry = xas_load(&xas); | 417 | entry = xas_load(&xas); |
| 390 | if (dax_is_locked(entry)) { | 418 | if (dax_is_locked(entry)) { |
| 391 | rcu_read_unlock(); | 419 | rcu_read_unlock(); |
| 392 | entry = get_unlocked_entry(&xas); | 420 | wait_entry_unlocked(&xas, entry); |
| 393 | xas_unlock_irq(&xas); | ||
| 394 | put_unlocked_entry(&xas, entry); | ||
| 395 | rcu_read_lock(); | 421 | rcu_read_lock(); |
| 396 | continue; | 422 | continue; |
| 397 | } | 423 | } |
| @@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page) | |||
| 400 | break; | 426 | break; |
| 401 | } | 427 | } |
| 402 | rcu_read_unlock(); | 428 | rcu_read_unlock(); |
| 403 | return locked; | 429 | return (dax_entry_t)entry; |
| 404 | } | 430 | } |
| 405 | 431 | ||
| 406 | void dax_unlock_mapping_entry(struct page *page) | 432 | void dax_unlock_page(struct page *page, dax_entry_t cookie) |
| 407 | { | 433 | { |
| 408 | struct address_space *mapping = page->mapping; | 434 | struct address_space *mapping = page->mapping; |
| 409 | XA_STATE(xas, &mapping->i_pages, page->index); | 435 | XA_STATE(xas, &mapping->i_pages, page->index); |
| 410 | void *entry; | ||
| 411 | 436 | ||
| 412 | if (S_ISCHR(mapping->host->i_mode)) | 437 | if (S_ISCHR(mapping->host->i_mode)) |
| 413 | return; | 438 | return; |
| 414 | 439 | ||
| 415 | rcu_read_lock(); | 440 | dax_unlock_entry(&xas, (void *)cookie); |
| 416 | entry = xas_load(&xas); | ||
| 417 | rcu_read_unlock(); | ||
| 418 | entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry)); | ||
| 419 | dax_unlock_entry(&xas, entry); | ||
| 420 | } | 441 | } |
| 421 | 442 | ||
| 422 | /* | 443 | /* |
| @@ -62,7 +62,6 @@ | |||
| 62 | #include <linux/oom.h> | 62 | #include <linux/oom.h> |
| 63 | #include <linux/compat.h> | 63 | #include <linux/compat.h> |
| 64 | #include <linux/vmalloc.h> | 64 | #include <linux/vmalloc.h> |
| 65 | #include <linux/freezer.h> | ||
| 66 | 65 | ||
| 67 | #include <linux/uaccess.h> | 66 | #include <linux/uaccess.h> |
| 68 | #include <asm/mmu_context.h> | 67 | #include <asm/mmu_context.h> |
| @@ -1084,7 +1083,7 @@ static int de_thread(struct task_struct *tsk) | |||
| 1084 | while (sig->notify_count) { | 1083 | while (sig->notify_count) { |
| 1085 | __set_current_state(TASK_KILLABLE); | 1084 | __set_current_state(TASK_KILLABLE); |
| 1086 | spin_unlock_irq(lock); | 1085 | spin_unlock_irq(lock); |
| 1087 | freezable_schedule(); | 1086 | schedule(); |
| 1088 | if (unlikely(__fatal_signal_pending(tsk))) | 1087 | if (unlikely(__fatal_signal_pending(tsk))) |
| 1089 | goto killed; | 1088 | goto killed; |
| 1090 | spin_lock_irq(lock); | 1089 | spin_lock_irq(lock); |
| @@ -1112,7 +1111,7 @@ static int de_thread(struct task_struct *tsk) | |||
| 1112 | __set_current_state(TASK_KILLABLE); | 1111 | __set_current_state(TASK_KILLABLE); |
| 1113 | write_unlock_irq(&tasklist_lock); | 1112 | write_unlock_irq(&tasklist_lock); |
| 1114 | cgroup_threadgroup_change_end(tsk); | 1113 | cgroup_threadgroup_change_end(tsk); |
| 1115 | freezable_schedule(); | 1114 | schedule(); |
| 1116 | if (unlikely(__fatal_signal_pending(tsk))) | 1115 | if (unlikely(__fatal_signal_pending(tsk))) |
| 1117 | goto killed; | 1116 | goto killed; |
| 1118 | } | 1117 | } |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 47395b0c3b35..e909678afa2d 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
| @@ -1119,8 +1119,10 @@ static int fuse_permission(struct inode *inode, int mask) | |||
| 1119 | if (fc->default_permissions || | 1119 | if (fc->default_permissions || |
| 1120 | ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { | 1120 | ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { |
| 1121 | struct fuse_inode *fi = get_fuse_inode(inode); | 1121 | struct fuse_inode *fi = get_fuse_inode(inode); |
| 1122 | u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID; | ||
| 1122 | 1123 | ||
| 1123 | if (time_before64(fi->i_time, get_jiffies_64())) { | 1124 | if (perm_mask & READ_ONCE(fi->inval_mask) || |
| 1125 | time_before64(fi->i_time, get_jiffies_64())) { | ||
| 1124 | refreshed = true; | 1126 | refreshed = true; |
| 1125 | 1127 | ||
| 1126 | err = fuse_perm_getattr(inode, mask); | 1128 | err = fuse_perm_getattr(inode, mask); |
| @@ -1241,7 +1243,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file) | |||
| 1241 | 1243 | ||
| 1242 | static int fuse_dir_release(struct inode *inode, struct file *file) | 1244 | static int fuse_dir_release(struct inode *inode, struct file *file) |
| 1243 | { | 1245 | { |
| 1244 | fuse_release_common(file, FUSE_RELEASEDIR); | 1246 | fuse_release_common(file, true); |
| 1245 | 1247 | ||
| 1246 | return 0; | 1248 | return 0; |
| 1247 | } | 1249 | } |
| @@ -1249,7 +1251,25 @@ static int fuse_dir_release(struct inode *inode, struct file *file) | |||
| 1249 | static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, | 1251 | static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, |
| 1250 | int datasync) | 1252 | int datasync) |
| 1251 | { | 1253 | { |
| 1252 | return fuse_fsync_common(file, start, end, datasync, 1); | 1254 | struct inode *inode = file->f_mapping->host; |
| 1255 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
| 1256 | int err; | ||
| 1257 | |||
| 1258 | if (is_bad_inode(inode)) | ||
| 1259 | return -EIO; | ||
| 1260 | |||
| 1261 | if (fc->no_fsyncdir) | ||
| 1262 | return 0; | ||
| 1263 | |||
| 1264 | inode_lock(inode); | ||
| 1265 | err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR); | ||
| 1266 | if (err == -ENOSYS) { | ||
| 1267 | fc->no_fsyncdir = 1; | ||
| 1268 | err = 0; | ||
| 1269 | } | ||
| 1270 | inode_unlock(inode); | ||
| 1271 | |||
| 1272 | return err; | ||
| 1253 | } | 1273 | } |
| 1254 | 1274 | ||
| 1255 | static long fuse_dir_ioctl(struct file *file, unsigned int cmd, | 1275 | static long fuse_dir_ioctl(struct file *file, unsigned int cmd, |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index b52f9baaa3e7..ffaffe18352a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -89,12 +89,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) | |||
| 89 | iput(req->misc.release.inode); | 89 | iput(req->misc.release.inode); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static void fuse_file_put(struct fuse_file *ff, bool sync) | 92 | static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) |
| 93 | { | 93 | { |
| 94 | if (refcount_dec_and_test(&ff->count)) { | 94 | if (refcount_dec_and_test(&ff->count)) { |
| 95 | struct fuse_req *req = ff->reserved_req; | 95 | struct fuse_req *req = ff->reserved_req; |
| 96 | 96 | ||
| 97 | if (ff->fc->no_open) { | 97 | if (ff->fc->no_open && !isdir) { |
| 98 | /* | 98 | /* |
| 99 | * Drop the release request when client does not | 99 | * Drop the release request when client does not |
| 100 | * implement 'open' | 100 | * implement 'open' |
| @@ -247,10 +247,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) | |||
| 247 | req->in.args[0].value = inarg; | 247 | req->in.args[0].value = inarg; |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | void fuse_release_common(struct file *file, int opcode) | 250 | void fuse_release_common(struct file *file, bool isdir) |
| 251 | { | 251 | { |
| 252 | struct fuse_file *ff = file->private_data; | 252 | struct fuse_file *ff = file->private_data; |
| 253 | struct fuse_req *req = ff->reserved_req; | 253 | struct fuse_req *req = ff->reserved_req; |
| 254 | int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; | ||
| 254 | 255 | ||
| 255 | fuse_prepare_release(ff, file->f_flags, opcode); | 256 | fuse_prepare_release(ff, file->f_flags, opcode); |
| 256 | 257 | ||
| @@ -272,7 +273,7 @@ void fuse_release_common(struct file *file, int opcode) | |||
| 272 | * synchronous RELEASE is allowed (and desirable) in this case | 273 | * synchronous RELEASE is allowed (and desirable) in this case |
| 273 | * because the server can be trusted not to screw up. | 274 | * because the server can be trusted not to screw up. |
| 274 | */ | 275 | */ |
| 275 | fuse_file_put(ff, ff->fc->destroy_req != NULL); | 276 | fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir); |
| 276 | } | 277 | } |
| 277 | 278 | ||
| 278 | static int fuse_open(struct inode *inode, struct file *file) | 279 | static int fuse_open(struct inode *inode, struct file *file) |
| @@ -288,7 +289,7 @@ static int fuse_release(struct inode *inode, struct file *file) | |||
| 288 | if (fc->writeback_cache) | 289 | if (fc->writeback_cache) |
| 289 | write_inode_now(inode, 1); | 290 | write_inode_now(inode, 1); |
| 290 | 291 | ||
| 291 | fuse_release_common(file, FUSE_RELEASE); | 292 | fuse_release_common(file, false); |
| 292 | 293 | ||
| 293 | /* return value is ignored by VFS */ | 294 | /* return value is ignored by VFS */ |
| 294 | return 0; | 295 | return 0; |
| @@ -302,7 +303,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags) | |||
| 302 | * iput(NULL) is a no-op and since the refcount is 1 and everything's | 303 | * iput(NULL) is a no-op and since the refcount is 1 and everything's |
| 303 | * synchronous, we are fine with not doing igrab() here" | 304 | * synchronous, we are fine with not doing igrab() here" |
| 304 | */ | 305 | */ |
| 305 | fuse_file_put(ff, true); | 306 | fuse_file_put(ff, true, false); |
| 306 | } | 307 | } |
| 307 | EXPORT_SYMBOL_GPL(fuse_sync_release); | 308 | EXPORT_SYMBOL_GPL(fuse_sync_release); |
| 308 | 309 | ||
| @@ -441,13 +442,30 @@ static int fuse_flush(struct file *file, fl_owner_t id) | |||
| 441 | } | 442 | } |
| 442 | 443 | ||
| 443 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, | 444 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
| 444 | int datasync, int isdir) | 445 | int datasync, int opcode) |
| 445 | { | 446 | { |
| 446 | struct inode *inode = file->f_mapping->host; | 447 | struct inode *inode = file->f_mapping->host; |
| 447 | struct fuse_conn *fc = get_fuse_conn(inode); | 448 | struct fuse_conn *fc = get_fuse_conn(inode); |
| 448 | struct fuse_file *ff = file->private_data; | 449 | struct fuse_file *ff = file->private_data; |
| 449 | FUSE_ARGS(args); | 450 | FUSE_ARGS(args); |
| 450 | struct fuse_fsync_in inarg; | 451 | struct fuse_fsync_in inarg; |
| 452 | |||
| 453 | memset(&inarg, 0, sizeof(inarg)); | ||
| 454 | inarg.fh = ff->fh; | ||
| 455 | inarg.fsync_flags = datasync ? 1 : 0; | ||
| 456 | args.in.h.opcode = opcode; | ||
| 457 | args.in.h.nodeid = get_node_id(inode); | ||
| 458 | args.in.numargs = 1; | ||
| 459 | args.in.args[0].size = sizeof(inarg); | ||
| 460 | args.in.args[0].value = &inarg; | ||
| 461 | return fuse_simple_request(fc, &args); | ||
| 462 | } | ||
| 463 | |||
| 464 | static int fuse_fsync(struct file *file, loff_t start, loff_t end, | ||
| 465 | int datasync) | ||
| 466 | { | ||
| 467 | struct inode *inode = file->f_mapping->host; | ||
| 468 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
| 451 | int err; | 469 | int err; |
| 452 | 470 | ||
| 453 | if (is_bad_inode(inode)) | 471 | if (is_bad_inode(inode)) |
| @@ -479,34 +497,18 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end, | |||
| 479 | if (err) | 497 | if (err) |
| 480 | goto out; | 498 | goto out; |
| 481 | 499 | ||
| 482 | if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) | 500 | if (fc->no_fsync) |
| 483 | goto out; | 501 | goto out; |
| 484 | 502 | ||
| 485 | memset(&inarg, 0, sizeof(inarg)); | 503 | err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); |
| 486 | inarg.fh = ff->fh; | ||
| 487 | inarg.fsync_flags = datasync ? 1 : 0; | ||
| 488 | args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; | ||
| 489 | args.in.h.nodeid = get_node_id(inode); | ||
| 490 | args.in.numargs = 1; | ||
| 491 | args.in.args[0].size = sizeof(inarg); | ||
| 492 | args.in.args[0].value = &inarg; | ||
| 493 | err = fuse_simple_request(fc, &args); | ||
| 494 | if (err == -ENOSYS) { | 504 | if (err == -ENOSYS) { |
| 495 | if (isdir) | 505 | fc->no_fsync = 1; |
| 496 | fc->no_fsyncdir = 1; | ||
| 497 | else | ||
| 498 | fc->no_fsync = 1; | ||
| 499 | err = 0; | 506 | err = 0; |
| 500 | } | 507 | } |
| 501 | out: | 508 | out: |
| 502 | inode_unlock(inode); | 509 | inode_unlock(inode); |
| 503 | return err; | ||
| 504 | } | ||
| 505 | 510 | ||
| 506 | static int fuse_fsync(struct file *file, loff_t start, loff_t end, | 511 | return err; |
| 507 | int datasync) | ||
| 508 | { | ||
| 509 | return fuse_fsync_common(file, start, end, datasync, 0); | ||
| 510 | } | 512 | } |
| 511 | 513 | ||
| 512 | void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, | 514 | void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, |
| @@ -807,7 +809,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) | |||
| 807 | put_page(page); | 809 | put_page(page); |
| 808 | } | 810 | } |
| 809 | if (req->ff) | 811 | if (req->ff) |
| 810 | fuse_file_put(req->ff, false); | 812 | fuse_file_put(req->ff, false, false); |
| 811 | } | 813 | } |
| 812 | 814 | ||
| 813 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) | 815 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
| @@ -1460,7 +1462,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) | |||
| 1460 | __free_page(req->pages[i]); | 1462 | __free_page(req->pages[i]); |
| 1461 | 1463 | ||
| 1462 | if (req->ff) | 1464 | if (req->ff) |
| 1463 | fuse_file_put(req->ff, false); | 1465 | fuse_file_put(req->ff, false, false); |
| 1464 | } | 1466 | } |
| 1465 | 1467 | ||
| 1466 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | 1468 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) |
| @@ -1619,7 +1621,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 1619 | ff = __fuse_write_file_get(fc, fi); | 1621 | ff = __fuse_write_file_get(fc, fi); |
| 1620 | err = fuse_flush_times(inode, ff); | 1622 | err = fuse_flush_times(inode, ff); |
| 1621 | if (ff) | 1623 | if (ff) |
| 1622 | fuse_file_put(ff, 0); | 1624 | fuse_file_put(ff, false, false); |
| 1623 | 1625 | ||
| 1624 | return err; | 1626 | return err; |
| 1625 | } | 1627 | } |
| @@ -1940,7 +1942,7 @@ static int fuse_writepages(struct address_space *mapping, | |||
| 1940 | err = 0; | 1942 | err = 0; |
| 1941 | } | 1943 | } |
| 1942 | if (data.ff) | 1944 | if (data.ff) |
| 1943 | fuse_file_put(data.ff, false); | 1945 | fuse_file_put(data.ff, false, false); |
| 1944 | 1946 | ||
| 1945 | kfree(data.orig_pages); | 1947 | kfree(data.orig_pages); |
| 1946 | out: | 1948 | out: |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index e9f712e81c7d..2f2c92e6f8cb 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
| @@ -822,13 +822,13 @@ void fuse_sync_release(struct fuse_file *ff, int flags); | |||
| 822 | /** | 822 | /** |
| 823 | * Send RELEASE or RELEASEDIR request | 823 | * Send RELEASE or RELEASEDIR request |
| 824 | */ | 824 | */ |
| 825 | void fuse_release_common(struct file *file, int opcode); | 825 | void fuse_release_common(struct file *file, bool isdir); |
| 826 | 826 | ||
| 827 | /** | 827 | /** |
| 828 | * Send FSYNC or FSYNCDIR request | 828 | * Send FSYNC or FSYNCDIR request |
| 829 | */ | 829 | */ |
| 830 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, | 830 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
| 831 | int datasync, int isdir); | 831 | int datasync, int opcode); |
| 832 | 832 | ||
| 833 | /** | 833 | /** |
| 834 | * Notify poll wakeup | 834 | * Notify poll wakeup |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 0b94b23b02d4..568abed20eb2 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
| @@ -115,7 +115,7 @@ static void fuse_i_callback(struct rcu_head *head) | |||
| 115 | static void fuse_destroy_inode(struct inode *inode) | 115 | static void fuse_destroy_inode(struct inode *inode) |
| 116 | { | 116 | { |
| 117 | struct fuse_inode *fi = get_fuse_inode(inode); | 117 | struct fuse_inode *fi = get_fuse_inode(inode); |
| 118 | if (S_ISREG(inode->i_mode)) { | 118 | if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) { |
| 119 | WARN_ON(!list_empty(&fi->write_files)); | 119 | WARN_ON(!list_empty(&fi->write_files)); |
| 120 | WARN_ON(!list_empty(&fi->queued_writes)); | 120 | WARN_ON(!list_empty(&fi->queued_writes)); |
| 121 | } | 121 | } |
| @@ -1068,6 +1068,7 @@ void fuse_dev_free(struct fuse_dev *fud) | |||
| 1068 | 1068 | ||
| 1069 | fuse_conn_put(fc); | 1069 | fuse_conn_put(fc); |
| 1070 | } | 1070 | } |
| 1071 | kfree(fud->pq.processing); | ||
| 1071 | kfree(fud); | 1072 | kfree(fud); |
| 1072 | } | 1073 | } |
| 1073 | EXPORT_SYMBOL_GPL(fuse_dev_free); | 1074 | EXPORT_SYMBOL_GPL(fuse_dev_free); |
diff --git a/fs/iomap.c b/fs/iomap.c index 3ffb776fbebe..5bc172f3dfe8 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
| @@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page) | |||
| 116 | atomic_set(&iop->read_count, 0); | 116 | atomic_set(&iop->read_count, 0); |
| 117 | atomic_set(&iop->write_count, 0); | 117 | atomic_set(&iop->write_count, 0); |
| 118 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); | 118 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); |
| 119 | |||
| 120 | /* | ||
| 121 | * migrate_page_move_mapping() assumes that pages with private data have | ||
| 122 | * their count elevated by 1. | ||
| 123 | */ | ||
| 124 | get_page(page); | ||
| 119 | set_page_private(page, (unsigned long)iop); | 125 | set_page_private(page, (unsigned long)iop); |
| 120 | SetPagePrivate(page); | 126 | SetPagePrivate(page); |
| 121 | return iop; | 127 | return iop; |
| @@ -132,6 +138,7 @@ iomap_page_release(struct page *page) | |||
| 132 | WARN_ON_ONCE(atomic_read(&iop->write_count)); | 138 | WARN_ON_ONCE(atomic_read(&iop->write_count)); |
| 133 | ClearPagePrivate(page); | 139 | ClearPagePrivate(page); |
| 134 | set_page_private(page, 0); | 140 | set_page_private(page, 0); |
| 141 | put_page(page); | ||
| 135 | kfree(iop); | 142 | kfree(iop); |
| 136 | } | 143 | } |
| 137 | 144 | ||
| @@ -1877,15 +1884,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 1877 | dio->wait_for_completion = true; | 1884 | dio->wait_for_completion = true; |
| 1878 | ret = 0; | 1885 | ret = 0; |
| 1879 | } | 1886 | } |
| 1880 | |||
| 1881 | /* | ||
| 1882 | * Splicing to pipes can fail on a full pipe. We have to | ||
| 1883 | * swallow this to make it look like a short IO | ||
| 1884 | * otherwise the higher splice layers will completely | ||
| 1885 | * mishandle the error and stop moving data. | ||
| 1886 | */ | ||
| 1887 | if (ret == -EFAULT) | ||
| 1888 | ret = 0; | ||
| 1889 | break; | 1887 | break; |
| 1890 | } | 1888 | } |
| 1891 | pos += ret; | 1889 | pos += ret; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index aa12c3063bae..33824a0a57bf 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
| @@ -98,8 +98,11 @@ struct nfs_direct_req { | |||
| 98 | struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */ | 98 | struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */ |
| 99 | struct work_struct work; | 99 | struct work_struct work; |
| 100 | int flags; | 100 | int flags; |
| 101 | /* for write */ | ||
| 101 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ | 102 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ |
| 102 | #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ | 103 | #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ |
| 104 | /* for read */ | ||
| 105 | #define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */ | ||
| 103 | struct nfs_writeverf verf; /* unstable write verifier */ | 106 | struct nfs_writeverf verf; /* unstable write verifier */ |
| 104 | }; | 107 | }; |
| 105 | 108 | ||
| @@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) | |||
| 412 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | 415 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); |
| 413 | struct page *page = req->wb_page; | 416 | struct page *page = req->wb_page; |
| 414 | 417 | ||
| 415 | if (!PageCompound(page) && bytes < hdr->good_bytes) | 418 | if (!PageCompound(page) && bytes < hdr->good_bytes && |
| 419 | (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY)) | ||
| 416 | set_page_dirty(page); | 420 | set_page_dirty(page); |
| 417 | bytes += req->wb_bytes; | 421 | bytes += req->wb_bytes; |
| 418 | nfs_list_remove_request(req); | 422 | nfs_list_remove_request(req); |
| @@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) | |||
| 587 | if (!is_sync_kiocb(iocb)) | 591 | if (!is_sync_kiocb(iocb)) |
| 588 | dreq->iocb = iocb; | 592 | dreq->iocb = iocb; |
| 589 | 593 | ||
| 594 | if (iter_is_iovec(iter)) | ||
| 595 | dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; | ||
| 596 | |||
| 590 | nfs_start_io_direct(inode); | 597 | nfs_start_io_direct(inode); |
| 591 | 598 | ||
| 592 | NFS_I(inode)->read_io += count; | 599 | NFS_I(inode)->read_io += count; |
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 74b36ed883ca..310d7500f665 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c | |||
| @@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr) | |||
| 1733 | if (fh) | 1733 | if (fh) |
| 1734 | hdr->args.fh = fh; | 1734 | hdr->args.fh = fh; |
| 1735 | 1735 | ||
| 1736 | if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid)) | 1736 | if (vers == 4 && |
| 1737 | !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid)) | ||
| 1737 | goto out_failed; | 1738 | goto out_failed; |
| 1738 | 1739 | ||
| 1739 | /* | 1740 | /* |
| @@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) | |||
| 1798 | if (fh) | 1799 | if (fh) |
| 1799 | hdr->args.fh = fh; | 1800 | hdr->args.fh = fh; |
| 1800 | 1801 | ||
| 1801 | if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid)) | 1802 | if (vers == 4 && |
| 1803 | !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid)) | ||
| 1802 | goto out_failed; | 1804 | goto out_failed; |
| 1803 | 1805 | ||
| 1804 | /* | 1806 | /* |
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index c6289147c787..82c129bfe58d 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c | |||
| @@ -651,6 +651,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry, | |||
| 651 | return ovl_create_object(dentry, S_IFLNK, 0, link); | 651 | return ovl_create_object(dentry, S_IFLNK, 0, link); |
| 652 | } | 652 | } |
| 653 | 653 | ||
| 654 | static int ovl_set_link_redirect(struct dentry *dentry) | ||
| 655 | { | ||
| 656 | const struct cred *old_cred; | ||
| 657 | int err; | ||
| 658 | |||
| 659 | old_cred = ovl_override_creds(dentry->d_sb); | ||
| 660 | err = ovl_set_redirect(dentry, false); | ||
| 661 | revert_creds(old_cred); | ||
| 662 | |||
| 663 | return err; | ||
| 664 | } | ||
| 665 | |||
| 654 | static int ovl_link(struct dentry *old, struct inode *newdir, | 666 | static int ovl_link(struct dentry *old, struct inode *newdir, |
| 655 | struct dentry *new) | 667 | struct dentry *new) |
| 656 | { | 668 | { |
| @@ -670,7 +682,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir, | |||
| 670 | goto out_drop_write; | 682 | goto out_drop_write; |
| 671 | 683 | ||
| 672 | if (ovl_is_metacopy_dentry(old)) { | 684 | if (ovl_is_metacopy_dentry(old)) { |
| 673 | err = ovl_set_redirect(old, false); | 685 | err = ovl_set_link_redirect(old); |
| 674 | if (err) | 686 | if (err) |
| 675 | goto out_drop_write; | 687 | goto out_drop_write; |
| 676 | } | 688 | } |
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 8fa37cd7818a..54e5d17d7f3e 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c | |||
| @@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb, | |||
| 754 | goto out; | 754 | goto out; |
| 755 | } | 755 | } |
| 756 | 756 | ||
| 757 | /* Otherwise, get a connected non-upper dir or disconnected non-dir */ | 757 | /* Find origin.dentry again with ovl_acceptable() layer check */ |
| 758 | if (d_is_dir(origin.dentry) && | 758 | if (d_is_dir(origin.dentry)) { |
| 759 | (origin.dentry->d_flags & DCACHE_DISCONNECTED)) { | ||
| 760 | dput(origin.dentry); | 759 | dput(origin.dentry); |
| 761 | origin.dentry = NULL; | 760 | origin.dentry = NULL; |
| 762 | err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack); | 761 | err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack); |
| @@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb, | |||
| 769 | goto out_err; | 768 | goto out_err; |
| 770 | } | 769 | } |
| 771 | 770 | ||
| 771 | /* Get a connected non-upper dir or disconnected non-dir */ | ||
| 772 | dentry = ovl_get_dentry(sb, NULL, &origin, index); | 772 | dentry = ovl_get_dentry(sb, NULL, &origin, index); |
| 773 | 773 | ||
| 774 | out: | 774 | out: |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 6bcc9dedc342..3b7ed5d2279c 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
| @@ -286,22 +286,13 @@ int ovl_permission(struct inode *inode, int mask) | |||
| 286 | if (err) | 286 | if (err) |
| 287 | return err; | 287 | return err; |
| 288 | 288 | ||
| 289 | /* No need to do any access on underlying for special files */ | 289 | old_cred = ovl_override_creds(inode->i_sb); |
| 290 | if (special_file(realinode->i_mode)) | 290 | if (!upperinode && |
| 291 | return 0; | 291 | !special_file(realinode->i_mode) && mask & MAY_WRITE) { |
| 292 | |||
| 293 | /* No need to access underlying for execute */ | ||
| 294 | mask &= ~MAY_EXEC; | ||
| 295 | if ((mask & (MAY_READ | MAY_WRITE)) == 0) | ||
| 296 | return 0; | ||
| 297 | |||
| 298 | /* Lower files get copied up, so turn write access into read */ | ||
| 299 | if (!upperinode && mask & MAY_WRITE) { | ||
| 300 | mask &= ~(MAY_WRITE | MAY_APPEND); | 292 | mask &= ~(MAY_WRITE | MAY_APPEND); |
| 293 | /* Make sure mounter can read file for copy up later */ | ||
| 301 | mask |= MAY_READ; | 294 | mask |= MAY_READ; |
| 302 | } | 295 | } |
| 303 | |||
| 304 | old_cred = ovl_override_creds(inode->i_sb); | ||
| 305 | err = inode_permission(realinode, mask); | 296 | err = inode_permission(realinode, mask); |
| 306 | revert_creds(old_cred); | 297 | revert_creds(old_cred); |
| 307 | 298 | ||
diff --git a/fs/read_write.c b/fs/read_write.c index 4dae0399c75a..58f30537c47a 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -1956,7 +1956,7 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, | |||
| 1956 | struct inode *inode_out = file_inode(file_out); | 1956 | struct inode *inode_out = file_inode(file_out); |
| 1957 | loff_t ret; | 1957 | loff_t ret; |
| 1958 | 1958 | ||
| 1959 | WARN_ON_ONCE(remap_flags); | 1959 | WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP); |
| 1960 | 1960 | ||
| 1961 | if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) | 1961 | if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) |
| 1962 | return -EISDIR; | 1962 | return -EISDIR; |
diff --git a/fs/splice.c b/fs/splice.c index 3553f1956508..de2ede048473 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -945,11 +945,16 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
| 945 | sd->flags &= ~SPLICE_F_NONBLOCK; | 945 | sd->flags &= ~SPLICE_F_NONBLOCK; |
| 946 | more = sd->flags & SPLICE_F_MORE; | 946 | more = sd->flags & SPLICE_F_MORE; |
| 947 | 947 | ||
| 948 | WARN_ON_ONCE(pipe->nrbufs != 0); | ||
| 949 | |||
| 948 | while (len) { | 950 | while (len) { |
| 949 | size_t read_len; | 951 | size_t read_len; |
| 950 | loff_t pos = sd->pos, prev_pos = pos; | 952 | loff_t pos = sd->pos, prev_pos = pos; |
| 951 | 953 | ||
| 952 | ret = do_splice_to(in, &pos, pipe, len, flags); | 954 | /* Don't try to read more the pipe has space for. */ |
| 955 | read_len = min_t(size_t, len, | ||
| 956 | (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT); | ||
| 957 | ret = do_splice_to(in, &pos, pipe, read_len, flags); | ||
| 953 | if (unlikely(ret <= 0)) | 958 | if (unlikely(ret <= 0)) |
| 954 | goto out_release; | 959 | goto out_release; |
| 955 | 960 | ||
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index cd58939dc977..7a85e609fc27 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
| @@ -1566,7 +1566,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, | |||
| 1566 | cond_resched(); | 1566 | cond_resched(); |
| 1567 | 1567 | ||
| 1568 | BUG_ON(!vma_can_userfault(vma)); | 1568 | BUG_ON(!vma_can_userfault(vma)); |
| 1569 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); | ||
| 1570 | 1569 | ||
| 1571 | /* | 1570 | /* |
| 1572 | * Nothing to do: this vma is already registered into this | 1571 | * Nothing to do: this vma is already registered into this |
| @@ -1575,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, | |||
| 1575 | if (!vma->vm_userfaultfd_ctx.ctx) | 1574 | if (!vma->vm_userfaultfd_ctx.ctx) |
| 1576 | goto skip; | 1575 | goto skip; |
| 1577 | 1576 | ||
| 1577 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); | ||
| 1578 | |||
| 1578 | if (vma->vm_start > start) | 1579 | if (vma->vm_start > start) |
| 1579 | start = vma->vm_start; | 1580 | start = vma->vm_start; |
| 1580 | vma_end = min(end, vma->vm_end); | 1581 | vma_end = min(end, vma->vm_end); |
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 34c6d7bd4d18..bbdae2b4559f 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c | |||
| @@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc( | |||
| 330 | 330 | ||
| 331 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | 331 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
| 332 | if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn))) | 332 | if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn))) |
| 333 | return __this_address; | 333 | return false; |
| 334 | return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); | 334 | return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); |
| 335 | } | 335 | } |
| 336 | 336 | ||
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 404e581f1ea1..1ee8c5539fa4 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -1126,9 +1126,9 @@ xfs_free_file_space( | |||
| 1126 | * page could be mmap'd and iomap_zero_range doesn't do that for us. | 1126 | * page could be mmap'd and iomap_zero_range doesn't do that for us. |
| 1127 | * Writeback of the eof page will do this, albeit clumsily. | 1127 | * Writeback of the eof page will do this, albeit clumsily. |
| 1128 | */ | 1128 | */ |
| 1129 | if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) { | 1129 | if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) { |
| 1130 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 1130 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
| 1131 | (offset + len) & ~PAGE_MASK, LLONG_MAX); | 1131 | round_down(offset + len, PAGE_SIZE), LLONG_MAX); |
| 1132 | } | 1132 | } |
| 1133 | 1133 | ||
| 1134 | return error; | 1134 | return error; |
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index 73a1d77ec187..3091e4bc04ef 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c | |||
| @@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot( | |||
| 40 | statp->f_files = limit; | 40 | statp->f_files = limit; |
| 41 | statp->f_ffree = | 41 | statp->f_ffree = |
| 42 | (statp->f_files > dqp->q_res_icount) ? | 42 | (statp->f_files > dqp->q_res_icount) ? |
| 43 | (statp->f_ffree - dqp->q_res_icount) : 0; | 43 | (statp->f_files - dqp->q_res_icount) : 0; |
| 44 | } | 44 | } |
| 45 | } | 45 | } |
| 46 | 46 | ||
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index 827e4d3bbc7a..8cc7b09c1bc7 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #define __ASM_GENERIC_FIXMAP_H | 16 | #define __ASM_GENERIC_FIXMAP_H |
| 17 | 17 | ||
| 18 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
| 19 | #include <linux/mm_types.h> | ||
| 19 | 20 | ||
| 20 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 21 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) |
| 21 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | 22 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 450b28db9533..0dd316a74a29 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | #include <linux/radix-tree.h> | 7 | #include <linux/radix-tree.h> |
| 8 | #include <asm/pgtable.h> | 8 | #include <asm/pgtable.h> |
| 9 | 9 | ||
| 10 | typedef unsigned long dax_entry_t; | ||
| 11 | |||
| 10 | struct iomap_ops; | 12 | struct iomap_ops; |
| 11 | struct dax_device; | 13 | struct dax_device; |
| 12 | struct dax_operations { | 14 | struct dax_operations { |
| @@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, | |||
| 88 | struct block_device *bdev, struct writeback_control *wbc); | 90 | struct block_device *bdev, struct writeback_control *wbc); |
| 89 | 91 | ||
| 90 | struct page *dax_layout_busy_page(struct address_space *mapping); | 92 | struct page *dax_layout_busy_page(struct address_space *mapping); |
| 91 | bool dax_lock_mapping_entry(struct page *page); | 93 | dax_entry_t dax_lock_page(struct page *page); |
| 92 | void dax_unlock_mapping_entry(struct page *page); | 94 | void dax_unlock_page(struct page *page, dax_entry_t cookie); |
| 93 | #else | 95 | #else |
| 94 | static inline bool bdev_dax_supported(struct block_device *bdev, | 96 | static inline bool bdev_dax_supported(struct block_device *bdev, |
| 95 | int blocksize) | 97 | int blocksize) |
| @@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, | |||
| 122 | return -EOPNOTSUPP; | 124 | return -EOPNOTSUPP; |
| 123 | } | 125 | } |
| 124 | 126 | ||
| 125 | static inline bool dax_lock_mapping_entry(struct page *page) | 127 | static inline dax_entry_t dax_lock_page(struct page *page) |
| 126 | { | 128 | { |
| 127 | if (IS_DAX(page->mapping->host)) | 129 | if (IS_DAX(page->mapping->host)) |
| 128 | return true; | 130 | return ~0UL; |
| 129 | return false; | 131 | return 0; |
| 130 | } | 132 | } |
| 131 | 133 | ||
| 132 | static inline void dax_unlock_mapping_entry(struct page *page) | 134 | static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) |
| 133 | { | 135 | { |
| 134 | } | 136 | } |
| 135 | #endif | 137 | #endif |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 448dcc448f1f..795ff0b869bb 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -449,6 +449,13 @@ struct sock_reuseport; | |||
| 449 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | 449 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 |
| 450 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ | 450 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ |
| 451 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 | 451 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 |
| 452 | #if BITS_PER_LONG == 64 | ||
| 453 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | ||
| 454 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | ||
| 455 | #else | ||
| 456 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | ||
| 457 | offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 | ||
| 458 | #endif /* BITS_PER_LONG == 64 */ | ||
| 452 | 459 | ||
| 453 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ | 460 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ |
| 454 | ({ \ | 461 | ({ \ |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 76f8db0b0e71..0705164f928c 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) | |||
| 510 | } | 510 | } |
| 511 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, | 511 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
| 512 | struct vm_area_struct *vma, unsigned long addr, | 512 | struct vm_area_struct *vma, unsigned long addr, |
| 513 | int node); | 513 | int node, bool hugepage); |
| 514 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ | ||
| 515 | alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) | ||
| 514 | #else | 516 | #else |
| 515 | #define alloc_pages(gfp_mask, order) \ | 517 | #define alloc_pages(gfp_mask, order) \ |
| 516 | alloc_pages_node(numa_node_id(), gfp_mask, order) | 518 | alloc_pages_node(numa_node_id(), gfp_mask, order) |
| 517 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ | 519 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ |
| 520 | alloc_pages(gfp_mask, order) | ||
| 521 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ | ||
| 518 | alloc_pages(gfp_mask, order) | 522 | alloc_pages(gfp_mask, order) |
| 519 | #endif | 523 | #endif |
| 520 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | 524 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
| 521 | #define alloc_page_vma(gfp_mask, vma, addr) \ | 525 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
| 522 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) | 526 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) |
| 523 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ | 527 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ |
| 524 | alloc_pages_vma(gfp_mask, 0, vma, addr, node) | 528 | alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) |
| 525 | 529 | ||
| 526 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 530 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
| 527 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 531 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b3e24368930a..14131b6fae68 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -905,6 +905,13 @@ struct vmbus_channel { | |||
| 905 | 905 | ||
| 906 | bool probe_done; | 906 | bool probe_done; |
| 907 | 907 | ||
| 908 | /* | ||
| 909 | * We must offload the handling of the primary/sub channels | ||
| 910 | * from the single-threaded vmbus_connection.work_queue to | ||
| 911 | * two different workqueue, otherwise we can block | ||
| 912 | * vmbus_connection.work_queue and hang: see vmbus_process_offer(). | ||
| 913 | */ | ||
| 914 | struct work_struct add_channel_work; | ||
| 908 | }; | 915 | }; |
| 909 | 916 | ||
| 910 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) | 917 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index bac395f1d00a..5228c62af416 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
| @@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |||
| 139 | struct mempolicy *get_task_policy(struct task_struct *p); | 139 | struct mempolicy *get_task_policy(struct task_struct *p); |
| 140 | struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, | 140 | struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, |
| 141 | unsigned long addr); | 141 | unsigned long addr); |
| 142 | struct mempolicy *get_vma_policy(struct vm_area_struct *vma, | ||
| 143 | unsigned long addr); | ||
| 144 | bool vma_policy_mof(struct vm_area_struct *vma); | 142 | bool vma_policy_mof(struct vm_area_struct *vma); |
| 145 | 143 | ||
| 146 | extern void numa_default_policy(void); | 144 | extern void numa_default_policy(void); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5ed8f6292a53..2c471a2c43fa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -206,6 +206,11 @@ struct page { | |||
| 206 | #endif | 206 | #endif |
| 207 | } _struct_page_alignment; | 207 | } _struct_page_alignment; |
| 208 | 208 | ||
| 209 | /* | ||
| 210 | * Used for sizing the vmemmap region on some architectures | ||
| 211 | */ | ||
| 212 | #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) | ||
| 213 | |||
| 209 | #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) | 214 | #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) |
| 210 | #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) | 215 | #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) |
| 211 | 216 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 847705a6d0ec..db023a92f3a4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); | |||
| 783 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 783 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} |
| 784 | #endif | 784 | #endif |
| 785 | 785 | ||
| 786 | #if defined(CONFIG_SPARSEMEM) | ||
| 787 | void memblocks_present(void); | ||
| 788 | #else | ||
| 789 | static inline void memblocks_present(void) {} | ||
| 790 | #endif | ||
| 791 | |||
| 786 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES | 792 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
| 787 | int local_memory_node(int node_id); | 793 | int local_memory_node(int node_id); |
| 788 | #else | 794 | #else |
diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d37518e89db2..d9d9de3fcf8e 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h | |||
| @@ -224,7 +224,7 @@ struct sfp_eeprom_ext { | |||
| 224 | * | 224 | * |
| 225 | * See the SFF-8472 specification and related documents for the definition | 225 | * See the SFF-8472 specification and related documents for the definition |
| 226 | * of these structure members. This can be obtained from | 226 | * of these structure members. This can be obtained from |
| 227 | * ftp://ftp.seagate.com/sff | 227 | * https://www.snia.org/technology-communities/sff/specifications |
| 228 | */ | 228 | */ |
| 229 | struct sfp_eeprom_id { | 229 | struct sfp_eeprom_id { |
| 230 | struct sfp_eeprom_base base; | 230 | struct sfp_eeprom_base base; |
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 43106ffa6788..2ec128060239 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
| @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) | |||
| 72 | buf->head[0].iov_base = start; | 72 | buf->head[0].iov_base = start; |
| 73 | buf->head[0].iov_len = len; | 73 | buf->head[0].iov_len = len; |
| 74 | buf->tail[0].iov_len = 0; | 74 | buf->tail[0].iov_len = 0; |
| 75 | buf->bvec = NULL; | ||
| 76 | buf->pages = NULL; | 75 | buf->pages = NULL; |
| 77 | buf->page_len = 0; | 76 | buf->page_len = 0; |
| 78 | buf->flags = 0; | 77 | buf->flags = 0; |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 414db2bce715..392138fe59b6 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); | |||
| 556 | extern void tty_release_struct(struct tty_struct *tty, int idx); | 556 | extern void tty_release_struct(struct tty_struct *tty, int idx); |
| 557 | extern int tty_release(struct inode *inode, struct file *filp); | 557 | extern int tty_release(struct inode *inode, struct file *filp); |
| 558 | extern void tty_init_termios(struct tty_struct *tty); | 558 | extern void tty_init_termios(struct tty_struct *tty); |
| 559 | extern void tty_save_termios(struct tty_struct *tty); | ||
| 559 | extern int tty_standard_install(struct tty_driver *driver, | 560 | extern int tty_standard_install(struct tty_driver *driver, |
| 560 | struct tty_struct *tty); | 561 | struct tty_struct *tty); |
| 561 | 562 | ||
diff --git a/include/linux/usb.h b/include/linux/usb.h index 4cdd515a4385..5e49e82c4368 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -407,11 +407,11 @@ struct usb_host_bos { | |||
| 407 | }; | 407 | }; |
| 408 | 408 | ||
| 409 | int __usb_get_extra_descriptor(char *buffer, unsigned size, | 409 | int __usb_get_extra_descriptor(char *buffer, unsigned size, |
| 410 | unsigned char type, void **ptr); | 410 | unsigned char type, void **ptr, size_t min); |
| 411 | #define usb_get_extra_descriptor(ifpoint, type, ptr) \ | 411 | #define usb_get_extra_descriptor(ifpoint, type, ptr) \ |
| 412 | __usb_get_extra_descriptor((ifpoint)->extra, \ | 412 | __usb_get_extra_descriptor((ifpoint)->extra, \ |
| 413 | (ifpoint)->extralen, \ | 413 | (ifpoint)->extralen, \ |
| 414 | type, (void **)ptr) | 414 | type, (void **)ptr, sizeof(**(ptr))) |
| 415 | 415 | ||
| 416 | /* ----------------------------------------------------------------------- */ | 416 | /* ----------------------------------------------------------------------- */ |
| 417 | 417 | ||
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 564892e19f8c..f492e21c4aa2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h | |||
| @@ -554,6 +554,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, | |||
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | /** | 556 | /** |
| 557 | * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. | ||
| 558 | * @xa: XArray. | ||
| 559 | * @index: Index into array. | ||
| 560 | * @old: Old value to test against. | ||
| 561 | * @entry: New value to place in array. | ||
| 562 | * @gfp: Memory allocation flags. | ||
| 563 | * | ||
| 564 | * This function is like calling xa_cmpxchg() except it disables softirqs | ||
| 565 | * while holding the array lock. | ||
| 566 | * | ||
| 567 | * Context: Any context. Takes and releases the xa_lock while | ||
| 568 | * disabling softirqs. May sleep if the @gfp flags permit. | ||
| 569 | * Return: The old value at this index or xa_err() if an error happened. | ||
| 570 | */ | ||
| 571 | static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, | ||
| 572 | void *old, void *entry, gfp_t gfp) | ||
| 573 | { | ||
| 574 | void *curr; | ||
| 575 | |||
| 576 | xa_lock_bh(xa); | ||
| 577 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | ||
| 578 | xa_unlock_bh(xa); | ||
| 579 | |||
| 580 | return curr; | ||
| 581 | } | ||
| 582 | |||
| 583 | /** | ||
| 584 | * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. | ||
| 585 | * @xa: XArray. | ||
| 586 | * @index: Index into array. | ||
| 587 | * @old: Old value to test against. | ||
| 588 | * @entry: New value to place in array. | ||
| 589 | * @gfp: Memory allocation flags. | ||
| 590 | * | ||
| 591 | * This function is like calling xa_cmpxchg() except it disables interrupts | ||
| 592 | * while holding the array lock. | ||
| 593 | * | ||
| 594 | * Context: Process context. Takes and releases the xa_lock while | ||
| 595 | * disabling interrupts. May sleep if the @gfp flags permit. | ||
| 596 | * Return: The old value at this index or xa_err() if an error happened. | ||
| 597 | */ | ||
| 598 | static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, | ||
| 599 | void *old, void *entry, gfp_t gfp) | ||
| 600 | { | ||
| 601 | void *curr; | ||
| 602 | |||
| 603 | xa_lock_irq(xa); | ||
| 604 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | ||
| 605 | xa_unlock_irq(xa); | ||
| 606 | |||
| 607 | return curr; | ||
| 608 | } | ||
| 609 | |||
| 610 | /** | ||
| 557 | * xa_insert() - Store this entry in the XArray unless another entry is | 611 | * xa_insert() - Store this entry in the XArray unless another entry is |
| 558 | * already present. | 612 | * already present. |
| 559 | * @xa: XArray. | 613 | * @xa: XArray. |
diff --git a/include/media/media-request.h b/include/media/media-request.h index 0ce75c35131f..bd36d7431698 100644 --- a/include/media/media-request.h +++ b/include/media/media-request.h | |||
| @@ -68,7 +68,7 @@ struct media_request { | |||
| 68 | unsigned int access_count; | 68 | unsigned int access_count; |
| 69 | struct list_head objects; | 69 | struct list_head objects; |
| 70 | unsigned int num_incomplete_objects; | 70 | unsigned int num_incomplete_objects; |
| 71 | struct wait_queue_head poll_wait; | 71 | wait_queue_head_t poll_wait; |
| 72 | spinlock_t lock; | 72 | spinlock_t lock; |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h new file mode 100644 index 000000000000..d21f40edc09e --- /dev/null +++ b/include/media/mpeg2-ctrls.h | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * These are the MPEG2 state controls for use with stateless MPEG-2 | ||
| 4 | * codec drivers. | ||
| 5 | * | ||
| 6 | * It turns out that these structs are not stable yet and will undergo | ||
| 7 | * more changes. So keep them private until they are stable and ready to | ||
| 8 | * become part of the official public API. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _MPEG2_CTRLS_H_ | ||
| 12 | #define _MPEG2_CTRLS_H_ | ||
| 13 | |||
| 14 | #define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) | ||
| 15 | #define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) | ||
| 16 | |||
| 17 | /* enum v4l2_ctrl_type type values */ | ||
| 18 | #define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103 | ||
| 19 | #define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104 | ||
| 20 | |||
| 21 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 | ||
| 22 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 | ||
| 23 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 | ||
| 24 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 | ||
| 25 | |||
| 26 | struct v4l2_mpeg2_sequence { | ||
| 27 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ | ||
| 28 | __u16 horizontal_size; | ||
| 29 | __u16 vertical_size; | ||
| 30 | __u32 vbv_buffer_size; | ||
| 31 | |||
| 32 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ | ||
| 33 | __u8 profile_and_level_indication; | ||
| 34 | __u8 progressive_sequence; | ||
| 35 | __u8 chroma_format; | ||
| 36 | __u8 pad; | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct v4l2_mpeg2_picture { | ||
| 40 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ | ||
| 41 | __u8 picture_coding_type; | ||
| 42 | |||
| 43 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ | ||
| 44 | __u8 f_code[2][2]; | ||
| 45 | __u8 intra_dc_precision; | ||
| 46 | __u8 picture_structure; | ||
| 47 | __u8 top_field_first; | ||
| 48 | __u8 frame_pred_frame_dct; | ||
| 49 | __u8 concealment_motion_vectors; | ||
| 50 | __u8 q_scale_type; | ||
| 51 | __u8 intra_vlc_format; | ||
| 52 | __u8 alternate_scan; | ||
| 53 | __u8 repeat_first_field; | ||
| 54 | __u8 progressive_frame; | ||
| 55 | __u8 pad; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct v4l2_ctrl_mpeg2_slice_params { | ||
| 59 | __u32 bit_size; | ||
| 60 | __u32 data_bit_offset; | ||
| 61 | |||
| 62 | struct v4l2_mpeg2_sequence sequence; | ||
| 63 | struct v4l2_mpeg2_picture picture; | ||
| 64 | |||
| 65 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ | ||
| 66 | __u8 quantiser_scale_code; | ||
| 67 | |||
| 68 | __u8 backward_ref_index; | ||
| 69 | __u8 forward_ref_index; | ||
| 70 | __u8 pad; | ||
| 71 | }; | ||
| 72 | |||
| 73 | struct v4l2_ctrl_mpeg2_quantization { | ||
| 74 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ | ||
| 75 | __u8 load_intra_quantiser_matrix; | ||
| 76 | __u8 load_non_intra_quantiser_matrix; | ||
| 77 | __u8 load_chroma_intra_quantiser_matrix; | ||
| 78 | __u8 load_chroma_non_intra_quantiser_matrix; | ||
| 79 | |||
| 80 | __u8 intra_quantiser_matrix[64]; | ||
| 81 | __u8 non_intra_quantiser_matrix[64]; | ||
| 82 | __u8 chroma_intra_quantiser_matrix[64]; | ||
| 83 | __u8 chroma_non_intra_quantiser_matrix[64]; | ||
| 84 | }; | ||
| 85 | |||
| 86 | #endif | ||
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 83ce0593b275..d63cf227b0ab 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h | |||
| @@ -22,6 +22,12 @@ | |||
| 22 | #include <linux/videodev2.h> | 22 | #include <linux/videodev2.h> |
| 23 | #include <media/media-request.h> | 23 | #include <media/media-request.h> |
| 24 | 24 | ||
| 25 | /* | ||
| 26 | * Include the mpeg2 stateless codec compound control definitions. | ||
| 27 | * This will move to the public headers once this API is fully stable. | ||
| 28 | */ | ||
| 29 | #include <media/mpeg2-ctrls.h> | ||
| 30 | |||
| 25 | /* forward references */ | 31 | /* forward references */ |
| 26 | struct file; | 32 | struct file; |
| 27 | struct v4l2_ctrl_handler; | 33 | struct v4l2_ctrl_handler; |
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index e86981d615ae..4a737b2c610b 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
| @@ -239,6 +239,7 @@ struct vb2_queue; | |||
| 239 | * @num_planes: number of planes in the buffer | 239 | * @num_planes: number of planes in the buffer |
| 240 | * on an internal driver queue. | 240 | * on an internal driver queue. |
| 241 | * @timestamp: frame timestamp in ns. | 241 | * @timestamp: frame timestamp in ns. |
| 242 | * @request: the request this buffer is associated with. | ||
| 242 | * @req_obj: used to bind this buffer to a request. This | 243 | * @req_obj: used to bind this buffer to a request. This |
| 243 | * request object has a refcount. | 244 | * request object has a refcount. |
| 244 | */ | 245 | */ |
| @@ -249,6 +250,7 @@ struct vb2_buffer { | |||
| 249 | unsigned int memory; | 250 | unsigned int memory; |
| 250 | unsigned int num_planes; | 251 | unsigned int num_planes; |
| 251 | u64 timestamp; | 252 | u64 timestamp; |
| 253 | struct media_request *request; | ||
| 252 | struct media_request_object req_obj; | 254 | struct media_request_object req_obj; |
| 253 | 255 | ||
| 254 | /* private: internal use only | 256 | /* private: internal use only |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f58b384aa6c9..665990c7dec8 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
| @@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) | |||
| 454 | 454 | ||
| 455 | static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) | 455 | static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) |
| 456 | { | 456 | { |
| 457 | unsigned int hh_alen = 0; | ||
| 457 | unsigned int seq; | 458 | unsigned int seq; |
| 458 | unsigned int hh_len; | 459 | unsigned int hh_len; |
| 459 | 460 | ||
| @@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb | |||
| 461 | seq = read_seqbegin(&hh->hh_lock); | 462 | seq = read_seqbegin(&hh->hh_lock); |
| 462 | hh_len = hh->hh_len; | 463 | hh_len = hh->hh_len; |
| 463 | if (likely(hh_len <= HH_DATA_MOD)) { | 464 | if (likely(hh_len <= HH_DATA_MOD)) { |
| 464 | /* this is inlined by gcc */ | 465 | hh_alen = HH_DATA_MOD; |
| 465 | memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); | 466 | |
| 467 | /* skb_push() would proceed silently if we have room for | ||
| 468 | * the unaligned size but not for the aligned size: | ||
| 469 | * check headroom explicitly. | ||
| 470 | */ | ||
| 471 | if (likely(skb_headroom(skb) >= HH_DATA_MOD)) { | ||
| 472 | /* this is inlined by gcc */ | ||
| 473 | memcpy(skb->data - HH_DATA_MOD, hh->hh_data, | ||
| 474 | HH_DATA_MOD); | ||
| 475 | } | ||
| 466 | } else { | 476 | } else { |
| 467 | unsigned int hh_alen = HH_DATA_ALIGN(hh_len); | 477 | hh_alen = HH_DATA_ALIGN(hh_len); |
| 468 | 478 | ||
| 469 | memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); | 479 | if (likely(skb_headroom(skb) >= hh_alen)) { |
| 480 | memcpy(skb->data - hh_alen, hh->hh_data, | ||
| 481 | hh_alen); | ||
| 482 | } | ||
| 470 | } | 483 | } |
| 471 | } while (read_seqretry(&hh->hh_lock, seq)); | 484 | } while (read_seqretry(&hh->hh_lock, seq)); |
| 472 | 485 | ||
| 473 | skb_push(skb, hh_len); | 486 | if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) { |
| 487 | kfree_skb(skb); | ||
| 488 | return NET_XMIT_DROP; | ||
| 489 | } | ||
| 490 | |||
| 491 | __skb_push(skb, hh_len); | ||
| 474 | return dev_queue_xmit(skb); | 492 | return dev_queue_xmit(skb); |
| 475 | } | 493 | } |
| 476 | 494 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ab9242e51d9e..2abbc15824af 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -620,4 +620,9 @@ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) | |||
| 620 | return false; | 620 | return false; |
| 621 | } | 621 | } |
| 622 | 622 | ||
| 623 | static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) | ||
| 624 | { | ||
| 625 | return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); | ||
| 626 | } | ||
| 627 | |||
| 623 | #endif /* __net_sctp_h__ */ | 628 | #endif /* __net_sctp_h__ */ |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a11f93790476..feada358d872 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -2075,6 +2075,8 @@ struct sctp_association { | |||
| 2075 | 2075 | ||
| 2076 | __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; | 2076 | __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; |
| 2077 | __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; | 2077 | __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; |
| 2078 | |||
| 2079 | struct rcu_head rcu; | ||
| 2078 | }; | 2080 | }; |
| 2079 | 2081 | ||
| 2080 | 2082 | ||
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h index 2dd37cada7c0..888a833d3b00 100644 --- a/include/sound/pcm_params.h +++ b/include/sound/pcm_params.h | |||
| @@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i) | |||
| 254 | static inline int snd_interval_single(const struct snd_interval *i) | 254 | static inline int snd_interval_single(const struct snd_interval *i) |
| 255 | { | 255 | { |
| 256 | return (i->min == i->max || | 256 | return (i->min == i->max || |
| 257 | (i->min + 1 == i->max && i->openmax)); | 257 | (i->min + 1 == i->max && (i->openmin || i->openmax))); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static inline int snd_interval_value(const struct snd_interval *i) | 260 | static inline int snd_interval_value(const struct snd_interval *i) |
| 261 | { | 261 | { |
| 262 | if (i->openmin && !i->openmax) | ||
| 263 | return i->max; | ||
| 262 | return i->min; | 264 | return i->min; |
| 263 | } | 265 | } |
| 264 | 266 | ||
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 538546edbfbd..c7f3321fbe43 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h | |||
| @@ -760,8 +760,10 @@ __SYSCALL(__NR_rseq, sys_rseq) | |||
| 760 | #define __NR_ftruncate __NR3264_ftruncate | 760 | #define __NR_ftruncate __NR3264_ftruncate |
| 761 | #define __NR_lseek __NR3264_lseek | 761 | #define __NR_lseek __NR3264_lseek |
| 762 | #define __NR_sendfile __NR3264_sendfile | 762 | #define __NR_sendfile __NR3264_sendfile |
| 763 | #if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) | ||
| 763 | #define __NR_newfstatat __NR3264_fstatat | 764 | #define __NR_newfstatat __NR3264_fstatat |
| 764 | #define __NR_fstat __NR3264_fstat | 765 | #define __NR_fstat __NR3264_fstat |
| 766 | #endif | ||
| 765 | #define __NR_mmap __NR3264_mmap | 767 | #define __NR_mmap __NR3264_mmap |
| 766 | #define __NR_fadvise64 __NR3264_fadvise64 | 768 | #define __NR_fadvise64 __NR3264_fadvise64 |
| 767 | #ifdef __NR3264_stat | 769 | #ifdef __NR3264_stat |
| @@ -776,8 +778,10 @@ __SYSCALL(__NR_rseq, sys_rseq) | |||
| 776 | #define __NR_ftruncate64 __NR3264_ftruncate | 778 | #define __NR_ftruncate64 __NR3264_ftruncate |
| 777 | #define __NR_llseek __NR3264_lseek | 779 | #define __NR_llseek __NR3264_lseek |
| 778 | #define __NR_sendfile64 __NR3264_sendfile | 780 | #define __NR_sendfile64 __NR3264_sendfile |
| 781 | #if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) | ||
| 779 | #define __NR_fstatat64 __NR3264_fstatat | 782 | #define __NR_fstatat64 __NR3264_fstatat |
| 780 | #define __NR_fstat64 __NR3264_fstat | 783 | #define __NR_fstat64 __NR3264_fstat |
| 784 | #endif | ||
| 781 | #define __NR_mmap2 __NR3264_mmap | 785 | #define __NR_mmap2 __NR3264_mmap |
| 782 | #define __NR_fadvise64_64 __NR3264_fadvise64 | 786 | #define __NR_fadvise64_64 __NR3264_fadvise64 |
| 783 | #ifdef __NR3264_stat | 787 | #ifdef __NR3264_stat |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 852dc17ab47a..72c453a8bf50 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
| @@ -2170,7 +2170,7 @@ union bpf_attr { | |||
| 2170 | * Return | 2170 | * Return |
| 2171 | * 0 on success, or a negative error in case of failure. | 2171 | * 0 on success, or a negative error in case of failure. |
| 2172 | * | 2172 | * |
| 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2174 | * Description | 2174 | * Description |
| 2175 | * Look for TCP socket matching *tuple*, optionally in a child | 2175 | * Look for TCP socket matching *tuple*, optionally in a child |
| 2176 | * network namespace *netns*. The return value must be checked, | 2176 | * network namespace *netns*. The return value must be checked, |
| @@ -2187,12 +2187,14 @@ union bpf_attr { | |||
| 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2188 | * Look for an IPv6 socket. | 2188 | * Look for an IPv6 socket. |
| 2189 | * | 2189 | * |
| 2190 | * If the *netns* is zero, then the socket lookup table in the | 2190 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2191 | * netns associated with the *ctx* will be used. For the TC hooks, | 2191 | * socket lookup table in the netns associated with the *ctx* will |
| 2192 | * this in the netns of the device in the skb. For socket hooks, | 2192 | * will be used. For the TC hooks, this is the netns of the device |
| 2193 | * this in the netns of the socket. If *netns* is non-zero, then | 2193 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2194 | * it specifies the ID of the netns relative to the netns | 2194 | * If *netns* is any other signed 32-bit value greater than or |
| 2195 | * associated with the *ctx*. | 2195 | * equal to zero then it specifies the ID of the netns relative to |
| 2196 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2197 | * range of 32-bit integers are reserved for future use. | ||
| 2196 | * | 2198 | * |
| 2197 | * All values for *flags* are reserved for future usage, and must | 2199 | * All values for *flags* are reserved for future usage, and must |
| 2198 | * be left at zero. | 2200 | * be left at zero. |
| @@ -2201,8 +2203,10 @@ union bpf_attr { | |||
| 2201 | * **CONFIG_NET** configuration option. | 2203 | * **CONFIG_NET** configuration option. |
| 2202 | * Return | 2204 | * Return |
| 2203 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2205 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2206 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2207 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2204 | * | 2208 | * |
| 2205 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2209 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2206 | * Description | 2210 | * Description |
| 2207 | * Look for UDP socket matching *tuple*, optionally in a child | 2211 | * Look for UDP socket matching *tuple*, optionally in a child |
| 2208 | * network namespace *netns*. The return value must be checked, | 2212 | * network namespace *netns*. The return value must be checked, |
| @@ -2219,12 +2223,14 @@ union bpf_attr { | |||
| 2219 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2223 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2220 | * Look for an IPv6 socket. | 2224 | * Look for an IPv6 socket. |
| 2221 | * | 2225 | * |
| 2222 | * If the *netns* is zero, then the socket lookup table in the | 2226 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2223 | * netns associated with the *ctx* will be used. For the TC hooks, | 2227 | * socket lookup table in the netns associated with the *ctx* will |
| 2224 | * this in the netns of the device in the skb. For socket hooks, | 2228 | * will be used. For the TC hooks, this is the netns of the device |
| 2225 | * this in the netns of the socket. If *netns* is non-zero, then | 2229 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2226 | * it specifies the ID of the netns relative to the netns | 2230 | * If *netns* is any other signed 32-bit value greater than or |
| 2227 | * associated with the *ctx*. | 2231 | * equal to zero then it specifies the ID of the netns relative to |
| 2232 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2233 | * range of 32-bit integers are reserved for future use. | ||
| 2228 | * | 2234 | * |
| 2229 | * All values for *flags* are reserved for future usage, and must | 2235 | * All values for *flags* are reserved for future usage, and must |
| 2230 | * be left at zero. | 2236 | * be left at zero. |
| @@ -2233,6 +2239,8 @@ union bpf_attr { | |||
| 2233 | * **CONFIG_NET** configuration option. | 2239 | * **CONFIG_NET** configuration option. |
| 2234 | * Return | 2240 | * Return |
| 2235 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2241 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2242 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2243 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2236 | * | 2244 | * |
| 2237 | * int bpf_sk_release(struct bpf_sock *sk) | 2245 | * int bpf_sk_release(struct bpf_sock *sk) |
| 2238 | * Description | 2246 | * Description |
| @@ -2405,6 +2413,9 @@ enum bpf_func_id { | |||
| 2405 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | 2413 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
| 2406 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) | 2414 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) |
| 2407 | 2415 | ||
| 2416 | /* Current network namespace */ | ||
| 2417 | #define BPF_F_CURRENT_NETNS (-1L) | ||
| 2418 | |||
| 2408 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ | 2419 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ |
| 2409 | enum bpf_adj_room_mode { | 2420 | enum bpf_adj_room_mode { |
| 2410 | BPF_ADJ_ROOM_NET, | 2421 | BPF_ADJ_ROOM_NET, |
| @@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode { | |||
| 2422 | BPF_LWT_ENCAP_SEG6_INLINE | 2433 | BPF_LWT_ENCAP_SEG6_INLINE |
| 2423 | }; | 2434 | }; |
| 2424 | 2435 | ||
| 2436 | #define __bpf_md_ptr(type, name) \ | ||
| 2437 | union { \ | ||
| 2438 | type name; \ | ||
| 2439 | __u64 :64; \ | ||
| 2440 | } __attribute__((aligned(8))) | ||
| 2441 | |||
| 2425 | /* user accessible mirror of in-kernel sk_buff. | 2442 | /* user accessible mirror of in-kernel sk_buff. |
| 2426 | * new fields can only be added to the end of this structure | 2443 | * new fields can only be added to the end of this structure |
| 2427 | */ | 2444 | */ |
| @@ -2456,7 +2473,7 @@ struct __sk_buff { | |||
| 2456 | /* ... here. */ | 2473 | /* ... here. */ |
| 2457 | 2474 | ||
| 2458 | __u32 data_meta; | 2475 | __u32 data_meta; |
| 2459 | struct bpf_flow_keys *flow_keys; | 2476 | __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); |
| 2460 | }; | 2477 | }; |
| 2461 | 2478 | ||
| 2462 | struct bpf_tunnel_key { | 2479 | struct bpf_tunnel_key { |
| @@ -2572,8 +2589,8 @@ enum sk_action { | |||
| 2572 | * be added to the end of this structure | 2589 | * be added to the end of this structure |
| 2573 | */ | 2590 | */ |
| 2574 | struct sk_msg_md { | 2591 | struct sk_msg_md { |
| 2575 | void *data; | 2592 | __bpf_md_ptr(void *, data); |
| 2576 | void *data_end; | 2593 | __bpf_md_ptr(void *, data_end); |
| 2577 | 2594 | ||
| 2578 | __u32 family; | 2595 | __u32 family; |
| 2579 | __u32 remote_ip4; /* Stored in network byte order */ | 2596 | __u32 remote_ip4; /* Stored in network byte order */ |
| @@ -2589,8 +2606,9 @@ struct sk_reuseport_md { | |||
| 2589 | * Start of directly accessible data. It begins from | 2606 | * Start of directly accessible data. It begins from |
| 2590 | * the tcp/udp header. | 2607 | * the tcp/udp header. |
| 2591 | */ | 2608 | */ |
| 2592 | void *data; | 2609 | __bpf_md_ptr(void *, data); |
| 2593 | void *data_end; /* End of directly accessible data */ | 2610 | /* End of directly accessible data */ |
| 2611 | __bpf_md_ptr(void *, data_end); | ||
| 2594 | /* | 2612 | /* |
| 2595 | * Total length of packet (starting from the tcp/udp header). | 2613 | * Total length of packet (starting from the tcp/udp header). |
| 2596 | * Note that the directly accessible bytes (data_end - data) | 2614 | * Note that the directly accessible bytes (data_end - data) |
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 3eb5a4c3d60a..ae366b87426a 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h | |||
| @@ -752,6 +752,15 @@ | |||
| 752 | 752 | ||
| 753 | #define ABS_MISC 0x28 | 753 | #define ABS_MISC 0x28 |
| 754 | 754 | ||
| 755 | /* | ||
| 756 | * 0x2e is reserved and should not be used in input drivers. | ||
| 757 | * It was used by HID as ABS_MISC+6 and userspace needs to detect if | ||
| 758 | * the next ABS_* event is correct or is just ABS_MISC + n. | ||
| 759 | * We define here ABS_RESERVED so userspace can rely on it and detect | ||
| 760 | * the situation described above. | ||
| 761 | */ | ||
| 762 | #define ABS_RESERVED 0x2e | ||
| 763 | |||
| 755 | #define ABS_MT_SLOT 0x2f /* MT slot being modified */ | 764 | #define ABS_MT_SLOT 0x2f /* MT slot being modified */ |
| 756 | #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ | 765 | #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ |
| 757 | #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ | 766 | #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ |
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 998983a6e6b7..3dcfc6148f99 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h | |||
| @@ -404,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode { | |||
| 404 | #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) | 404 | #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) |
| 405 | #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) | 405 | #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) |
| 406 | 406 | ||
| 407 | #define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) | ||
| 408 | #define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) | ||
| 409 | |||
| 410 | #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) | 407 | #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) |
| 411 | #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) | 408 | #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) |
| 412 | #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) | 409 | #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) |
| @@ -1097,69 +1094,4 @@ enum v4l2_detect_md_mode { | |||
| 1097 | #define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) | 1094 | #define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) |
| 1098 | #define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) | 1095 | #define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) |
| 1099 | 1096 | ||
| 1100 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 | ||
| 1101 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 | ||
| 1102 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 | ||
| 1103 | #define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 | ||
| 1104 | |||
| 1105 | struct v4l2_mpeg2_sequence { | ||
| 1106 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ | ||
| 1107 | __u16 horizontal_size; | ||
| 1108 | __u16 vertical_size; | ||
| 1109 | __u32 vbv_buffer_size; | ||
| 1110 | |||
| 1111 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ | ||
| 1112 | __u8 profile_and_level_indication; | ||
| 1113 | __u8 progressive_sequence; | ||
| 1114 | __u8 chroma_format; | ||
| 1115 | __u8 pad; | ||
| 1116 | }; | ||
| 1117 | |||
| 1118 | struct v4l2_mpeg2_picture { | ||
| 1119 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ | ||
| 1120 | __u8 picture_coding_type; | ||
| 1121 | |||
| 1122 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ | ||
| 1123 | __u8 f_code[2][2]; | ||
| 1124 | __u8 intra_dc_precision; | ||
| 1125 | __u8 picture_structure; | ||
| 1126 | __u8 top_field_first; | ||
| 1127 | __u8 frame_pred_frame_dct; | ||
| 1128 | __u8 concealment_motion_vectors; | ||
| 1129 | __u8 q_scale_type; | ||
| 1130 | __u8 intra_vlc_format; | ||
| 1131 | __u8 alternate_scan; | ||
| 1132 | __u8 repeat_first_field; | ||
| 1133 | __u8 progressive_frame; | ||
| 1134 | __u8 pad; | ||
| 1135 | }; | ||
| 1136 | |||
| 1137 | struct v4l2_ctrl_mpeg2_slice_params { | ||
| 1138 | __u32 bit_size; | ||
| 1139 | __u32 data_bit_offset; | ||
| 1140 | |||
| 1141 | struct v4l2_mpeg2_sequence sequence; | ||
| 1142 | struct v4l2_mpeg2_picture picture; | ||
| 1143 | |||
| 1144 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ | ||
| 1145 | __u8 quantiser_scale_code; | ||
| 1146 | |||
| 1147 | __u8 backward_ref_index; | ||
| 1148 | __u8 forward_ref_index; | ||
| 1149 | __u8 pad; | ||
| 1150 | }; | ||
| 1151 | |||
| 1152 | struct v4l2_ctrl_mpeg2_quantization { | ||
| 1153 | /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ | ||
| 1154 | __u8 load_intra_quantiser_matrix; | ||
| 1155 | __u8 load_non_intra_quantiser_matrix; | ||
| 1156 | __u8 load_chroma_intra_quantiser_matrix; | ||
| 1157 | __u8 load_chroma_non_intra_quantiser_matrix; | ||
| 1158 | |||
| 1159 | __u8 intra_quantiser_matrix[64]; | ||
| 1160 | __u8 non_intra_quantiser_matrix[64]; | ||
| 1161 | __u8 chroma_intra_quantiser_matrix[64]; | ||
| 1162 | __u8 chroma_non_intra_quantiser_matrix[64]; | ||
| 1163 | }; | ||
| 1164 | |||
| 1165 | #endif | 1097 | #endif |
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index c8e8ff810190..2ba2ad0e23fb 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h | |||
| @@ -1622,8 +1622,6 @@ struct v4l2_ext_control { | |||
| 1622 | __u8 __user *p_u8; | 1622 | __u8 __user *p_u8; |
| 1623 | __u16 __user *p_u16; | 1623 | __u16 __user *p_u16; |
| 1624 | __u32 __user *p_u32; | 1624 | __u32 __user *p_u32; |
| 1625 | struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params; | ||
| 1626 | struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization; | ||
| 1627 | void __user *ptr; | 1625 | void __user *ptr; |
| 1628 | }; | 1626 | }; |
| 1629 | } __attribute__ ((packed)); | 1627 | } __attribute__ ((packed)); |
| @@ -1669,8 +1667,6 @@ enum v4l2_ctrl_type { | |||
| 1669 | V4L2_CTRL_TYPE_U8 = 0x0100, | 1667 | V4L2_CTRL_TYPE_U8 = 0x0100, |
| 1670 | V4L2_CTRL_TYPE_U16 = 0x0101, | 1668 | V4L2_CTRL_TYPE_U16 = 0x0101, |
| 1671 | V4L2_CTRL_TYPE_U32 = 0x0102, | 1669 | V4L2_CTRL_TYPE_U32 = 0x0102, |
| 1672 | V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103, | ||
| 1673 | V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104, | ||
| 1674 | }; | 1670 | }; |
| 1675 | 1671 | ||
| 1676 | /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ | 1672 | /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ |
diff --git a/init/Kconfig b/init/Kconfig index cf5b5a0dcbc2..ed9352513c32 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -515,8 +515,8 @@ config PSI_DEFAULT_DISABLED | |||
| 515 | depends on PSI | 515 | depends on PSI |
| 516 | help | 516 | help |
| 517 | If set, pressure stall information tracking will be disabled | 517 | If set, pressure stall information tracking will be disabled |
| 518 | per default but can be enabled through passing psi_enable=1 | 518 | per default but can be enabled through passing psi=1 on the |
| 519 | on the kernel commandline during boot. | 519 | kernel commandline during boot. |
| 520 | 520 | ||
| 521 | endmenu # "CPU/Task time and stats accounting" | 521 | endmenu # "CPU/Task time and stats accounting" |
| 522 | 522 | ||
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ee4c82667d65..4da543d6bea2 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <uapi/linux/types.h> | 5 | #include <uapi/linux/types.h> |
| 6 | #include <linux/seq_file.h> | 6 | #include <linux/seq_file.h> |
| 7 | #include <linux/compiler.h> | 7 | #include <linux/compiler.h> |
| 8 | #include <linux/ctype.h> | ||
| 8 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
| 9 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 10 | #include <linux/anon_inodes.h> | 11 | #include <linux/anon_inodes.h> |
| @@ -426,6 +427,30 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset) | |||
| 426 | offset < btf->hdr.str_len; | 427 | offset < btf->hdr.str_len; |
| 427 | } | 428 | } |
| 428 | 429 | ||
| 430 | /* Only C-style identifier is permitted. This can be relaxed if | ||
| 431 | * necessary. | ||
| 432 | */ | ||
| 433 | static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) | ||
| 434 | { | ||
| 435 | /* offset must be valid */ | ||
| 436 | const char *src = &btf->strings[offset]; | ||
| 437 | const char *src_limit; | ||
| 438 | |||
| 439 | if (!isalpha(*src) && *src != '_') | ||
| 440 | return false; | ||
| 441 | |||
| 442 | /* set a limit on identifier length */ | ||
| 443 | src_limit = src + KSYM_NAME_LEN; | ||
| 444 | src++; | ||
| 445 | while (*src && src < src_limit) { | ||
| 446 | if (!isalnum(*src) && *src != '_') | ||
| 447 | return false; | ||
| 448 | src++; | ||
| 449 | } | ||
| 450 | |||
| 451 | return !*src; | ||
| 452 | } | ||
| 453 | |||
| 429 | static const char *btf_name_by_offset(const struct btf *btf, u32 offset) | 454 | static const char *btf_name_by_offset(const struct btf *btf, u32 offset) |
| 430 | { | 455 | { |
| 431 | if (!offset) | 456 | if (!offset) |
| @@ -1143,6 +1168,22 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env, | |||
| 1143 | return -EINVAL; | 1168 | return -EINVAL; |
| 1144 | } | 1169 | } |
| 1145 | 1170 | ||
| 1171 | /* typedef type must have a valid name, and other ref types, | ||
| 1172 | * volatile, const, restrict, should have a null name. | ||
| 1173 | */ | ||
| 1174 | if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { | ||
| 1175 | if (!t->name_off || | ||
| 1176 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1177 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1178 | return -EINVAL; | ||
| 1179 | } | ||
| 1180 | } else { | ||
| 1181 | if (t->name_off) { | ||
| 1182 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1183 | return -EINVAL; | ||
| 1184 | } | ||
| 1185 | } | ||
| 1186 | |||
| 1146 | btf_verifier_log_type(env, t, NULL); | 1187 | btf_verifier_log_type(env, t, NULL); |
| 1147 | 1188 | ||
| 1148 | return 0; | 1189 | return 0; |
| @@ -1300,6 +1341,13 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env, | |||
| 1300 | return -EINVAL; | 1341 | return -EINVAL; |
| 1301 | } | 1342 | } |
| 1302 | 1343 | ||
| 1344 | /* fwd type must have a valid name */ | ||
| 1345 | if (!t->name_off || | ||
| 1346 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1347 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1348 | return -EINVAL; | ||
| 1349 | } | ||
| 1350 | |||
| 1303 | btf_verifier_log_type(env, t, NULL); | 1351 | btf_verifier_log_type(env, t, NULL); |
| 1304 | 1352 | ||
| 1305 | return 0; | 1353 | return 0; |
| @@ -1356,6 +1404,12 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env, | |||
| 1356 | return -EINVAL; | 1404 | return -EINVAL; |
| 1357 | } | 1405 | } |
| 1358 | 1406 | ||
| 1407 | /* array type should not have a name */ | ||
| 1408 | if (t->name_off) { | ||
| 1409 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1410 | return -EINVAL; | ||
| 1411 | } | ||
| 1412 | |||
| 1359 | if (btf_type_vlen(t)) { | 1413 | if (btf_type_vlen(t)) { |
| 1360 | btf_verifier_log_type(env, t, "vlen != 0"); | 1414 | btf_verifier_log_type(env, t, "vlen != 0"); |
| 1361 | return -EINVAL; | 1415 | return -EINVAL; |
| @@ -1532,6 +1586,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, | |||
| 1532 | return -EINVAL; | 1586 | return -EINVAL; |
| 1533 | } | 1587 | } |
| 1534 | 1588 | ||
| 1589 | /* struct type either no name or a valid one */ | ||
| 1590 | if (t->name_off && | ||
| 1591 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1592 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1593 | return -EINVAL; | ||
| 1594 | } | ||
| 1595 | |||
| 1535 | btf_verifier_log_type(env, t, NULL); | 1596 | btf_verifier_log_type(env, t, NULL); |
| 1536 | 1597 | ||
| 1537 | last_offset = 0; | 1598 | last_offset = 0; |
| @@ -1543,6 +1604,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, | |||
| 1543 | return -EINVAL; | 1604 | return -EINVAL; |
| 1544 | } | 1605 | } |
| 1545 | 1606 | ||
| 1607 | /* struct member either no name or a valid one */ | ||
| 1608 | if (member->name_off && | ||
| 1609 | !btf_name_valid_identifier(btf, member->name_off)) { | ||
| 1610 | btf_verifier_log_member(env, t, member, "Invalid name"); | ||
| 1611 | return -EINVAL; | ||
| 1612 | } | ||
| 1546 | /* A member cannot be in type void */ | 1613 | /* A member cannot be in type void */ |
| 1547 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { | 1614 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { |
| 1548 | btf_verifier_log_member(env, t, member, | 1615 | btf_verifier_log_member(env, t, member, |
| @@ -1730,6 +1797,13 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env, | |||
| 1730 | return -EINVAL; | 1797 | return -EINVAL; |
| 1731 | } | 1798 | } |
| 1732 | 1799 | ||
| 1800 | /* enum type either no name or a valid one */ | ||
| 1801 | if (t->name_off && | ||
| 1802 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1803 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1804 | return -EINVAL; | ||
| 1805 | } | ||
| 1806 | |||
| 1733 | btf_verifier_log_type(env, t, NULL); | 1807 | btf_verifier_log_type(env, t, NULL); |
| 1734 | 1808 | ||
| 1735 | for (i = 0; i < nr_enums; i++) { | 1809 | for (i = 0; i < nr_enums; i++) { |
| @@ -1739,6 +1813,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env, | |||
| 1739 | return -EINVAL; | 1813 | return -EINVAL; |
| 1740 | } | 1814 | } |
| 1741 | 1815 | ||
| 1816 | /* enum member must have a valid name */ | ||
| 1817 | if (!enums[i].name_off || | ||
| 1818 | !btf_name_valid_identifier(btf, enums[i].name_off)) { | ||
| 1819 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1820 | return -EINVAL; | ||
| 1821 | } | ||
| 1822 | |||
| 1823 | |||
| 1742 | btf_verifier_log(env, "\t%s val=%d\n", | 1824 | btf_verifier_log(env, "\t%s val=%d\n", |
| 1743 | btf_name_by_offset(btf, enums[i].name_off), | 1825 | btf_name_by_offset(btf, enums[i].name_off), |
| 1744 | enums[i].val); | 1826 | enums[i].val); |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6dd419550aba..fc760d00a38c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -175,6 +175,7 @@ struct bpf_verifier_stack_elem { | |||
| 175 | 175 | ||
| 176 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 | 176 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 |
| 177 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 | 177 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 |
| 178 | #define BPF_COMPLEXITY_LIMIT_STATES 64 | ||
| 178 | 179 | ||
| 179 | #define BPF_MAP_PTR_UNPRIV 1UL | 180 | #define BPF_MAP_PTR_UNPRIV 1UL |
| 180 | #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ | 181 | #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ |
| @@ -3751,6 +3752,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, | |||
| 3751 | } | 3752 | } |
| 3752 | } | 3753 | } |
| 3753 | 3754 | ||
| 3755 | /* compute branch direction of the expression "if (reg opcode val) goto target;" | ||
| 3756 | * and return: | ||
| 3757 | * 1 - branch will be taken and "goto target" will be executed | ||
| 3758 | * 0 - branch will not be taken and fall-through to next insn | ||
| 3759 | * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] | ||
| 3760 | */ | ||
| 3761 | static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) | ||
| 3762 | { | ||
| 3763 | if (__is_pointer_value(false, reg)) | ||
| 3764 | return -1; | ||
| 3765 | |||
| 3766 | switch (opcode) { | ||
| 3767 | case BPF_JEQ: | ||
| 3768 | if (tnum_is_const(reg->var_off)) | ||
| 3769 | return !!tnum_equals_const(reg->var_off, val); | ||
| 3770 | break; | ||
| 3771 | case BPF_JNE: | ||
| 3772 | if (tnum_is_const(reg->var_off)) | ||
| 3773 | return !tnum_equals_const(reg->var_off, val); | ||
| 3774 | break; | ||
| 3775 | case BPF_JGT: | ||
| 3776 | if (reg->umin_value > val) | ||
| 3777 | return 1; | ||
| 3778 | else if (reg->umax_value <= val) | ||
| 3779 | return 0; | ||
| 3780 | break; | ||
| 3781 | case BPF_JSGT: | ||
| 3782 | if (reg->smin_value > (s64)val) | ||
| 3783 | return 1; | ||
| 3784 | else if (reg->smax_value < (s64)val) | ||
| 3785 | return 0; | ||
| 3786 | break; | ||
| 3787 | case BPF_JLT: | ||
| 3788 | if (reg->umax_value < val) | ||
| 3789 | return 1; | ||
| 3790 | else if (reg->umin_value >= val) | ||
| 3791 | return 0; | ||
| 3792 | break; | ||
| 3793 | case BPF_JSLT: | ||
| 3794 | if (reg->smax_value < (s64)val) | ||
| 3795 | return 1; | ||
| 3796 | else if (reg->smin_value >= (s64)val) | ||
| 3797 | return 0; | ||
| 3798 | break; | ||
| 3799 | case BPF_JGE: | ||
| 3800 | if (reg->umin_value >= val) | ||
| 3801 | return 1; | ||
| 3802 | else if (reg->umax_value < val) | ||
| 3803 | return 0; | ||
| 3804 | break; | ||
| 3805 | case BPF_JSGE: | ||
| 3806 | if (reg->smin_value >= (s64)val) | ||
| 3807 | return 1; | ||
| 3808 | else if (reg->smax_value < (s64)val) | ||
| 3809 | return 0; | ||
| 3810 | break; | ||
| 3811 | case BPF_JLE: | ||
| 3812 | if (reg->umax_value <= val) | ||
| 3813 | return 1; | ||
| 3814 | else if (reg->umin_value > val) | ||
| 3815 | return 0; | ||
| 3816 | break; | ||
| 3817 | case BPF_JSLE: | ||
| 3818 | if (reg->smax_value <= (s64)val) | ||
| 3819 | return 1; | ||
| 3820 | else if (reg->smin_value > (s64)val) | ||
| 3821 | return 0; | ||
| 3822 | break; | ||
| 3823 | } | ||
| 3824 | |||
| 3825 | return -1; | ||
| 3826 | } | ||
| 3827 | |||
| 3754 | /* Adjusts the register min/max values in the case that the dst_reg is the | 3828 | /* Adjusts the register min/max values in the case that the dst_reg is the |
| 3755 | * variable register that we are working on, and src_reg is a constant or we're | 3829 | * variable register that we are working on, and src_reg is a constant or we're |
| 3756 | * simply doing a BPF_K check. | 3830 | * simply doing a BPF_K check. |
| @@ -4152,21 +4226,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
| 4152 | 4226 | ||
| 4153 | dst_reg = ®s[insn->dst_reg]; | 4227 | dst_reg = ®s[insn->dst_reg]; |
| 4154 | 4228 | ||
| 4155 | /* detect if R == 0 where R was initialized to zero earlier */ | 4229 | if (BPF_SRC(insn->code) == BPF_K) { |
| 4156 | if (BPF_SRC(insn->code) == BPF_K && | 4230 | int pred = is_branch_taken(dst_reg, insn->imm, opcode); |
| 4157 | (opcode == BPF_JEQ || opcode == BPF_JNE) && | 4231 | |
| 4158 | dst_reg->type == SCALAR_VALUE && | 4232 | if (pred == 1) { |
| 4159 | tnum_is_const(dst_reg->var_off)) { | 4233 | /* only follow the goto, ignore fall-through */ |
| 4160 | if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || | ||
| 4161 | (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { | ||
| 4162 | /* if (imm == imm) goto pc+off; | ||
| 4163 | * only follow the goto, ignore fall-through | ||
| 4164 | */ | ||
| 4165 | *insn_idx += insn->off; | 4234 | *insn_idx += insn->off; |
| 4166 | return 0; | 4235 | return 0; |
| 4167 | } else { | 4236 | } else if (pred == 0) { |
| 4168 | /* if (imm != imm) goto pc+off; | 4237 | /* only follow fall-through branch, since |
| 4169 | * only follow fall-through branch, since | ||
| 4170 | * that's where the program will go | 4238 | * that's where the program will go |
| 4171 | */ | 4239 | */ |
| 4172 | return 0; | 4240 | return 0; |
| @@ -4980,7 +5048,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | |||
| 4980 | struct bpf_verifier_state_list *new_sl; | 5048 | struct bpf_verifier_state_list *new_sl; |
| 4981 | struct bpf_verifier_state_list *sl; | 5049 | struct bpf_verifier_state_list *sl; |
| 4982 | struct bpf_verifier_state *cur = env->cur_state, *new; | 5050 | struct bpf_verifier_state *cur = env->cur_state, *new; |
| 4983 | int i, j, err; | 5051 | int i, j, err, states_cnt = 0; |
| 4984 | 5052 | ||
| 4985 | sl = env->explored_states[insn_idx]; | 5053 | sl = env->explored_states[insn_idx]; |
| 4986 | if (!sl) | 5054 | if (!sl) |
| @@ -5007,8 +5075,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | |||
| 5007 | return 1; | 5075 | return 1; |
| 5008 | } | 5076 | } |
| 5009 | sl = sl->next; | 5077 | sl = sl->next; |
| 5078 | states_cnt++; | ||
| 5010 | } | 5079 | } |
| 5011 | 5080 | ||
| 5081 | if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) | ||
| 5082 | return 0; | ||
| 5083 | |||
| 5012 | /* there were no equivalent states, remember current one. | 5084 | /* there were no equivalent states, remember current one. |
| 5013 | * technically the current state is not proven to be safe yet, | 5085 | * technically the current state is not proven to be safe yet, |
| 5014 | * but it will either reach outer most bpf_exit (which means it's safe) | 5086 | * but it will either reach outer most bpf_exit (which means it's safe) |
| @@ -5148,6 +5220,9 @@ static int do_check(struct bpf_verifier_env *env) | |||
| 5148 | goto process_bpf_exit; | 5220 | goto process_bpf_exit; |
| 5149 | } | 5221 | } |
| 5150 | 5222 | ||
| 5223 | if (signal_pending(current)) | ||
| 5224 | return -EAGAIN; | ||
| 5225 | |||
| 5151 | if (need_resched()) | 5226 | if (need_resched()) |
| 5152 | cond_resched(); | 5227 | cond_resched(); |
| 5153 | 5228 | ||
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 322e97bbb437..abbd8da9ac21 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
| @@ -572,7 +572,9 @@ static void put_uprobe(struct uprobe *uprobe) | |||
| 572 | * gets called, we don't get a chance to remove uprobe from | 572 | * gets called, we don't get a chance to remove uprobe from |
| 573 | * delayed_uprobe_list from remove_breakpoint(). Do it here. | 573 | * delayed_uprobe_list from remove_breakpoint(). Do it here. |
| 574 | */ | 574 | */ |
| 575 | mutex_lock(&delayed_uprobe_lock); | ||
| 575 | delayed_uprobe_remove(uprobe, NULL); | 576 | delayed_uprobe_remove(uprobe, NULL); |
| 577 | mutex_unlock(&delayed_uprobe_lock); | ||
| 576 | kfree(uprobe); | 578 | kfree(uprobe); |
| 577 | } | 579 | } |
| 578 | } | 580 | } |
diff --git a/kernel/stackleak.c b/kernel/stackleak.c index 08cb57eed389..b193a59fc05b 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c | |||
| @@ -104,7 +104,7 @@ asmlinkage void notrace stackleak_erase(void) | |||
| 104 | } | 104 | } |
| 105 | NOKPROBE_SYMBOL(stackleak_erase); | 105 | NOKPROBE_SYMBOL(stackleak_erase); |
| 106 | 106 | ||
| 107 | void __used stackleak_track_stack(void) | 107 | void __used notrace stackleak_track_stack(void) |
| 108 | { | 108 | { |
| 109 | /* | 109 | /* |
| 110 | * N.B. stackleak_erase() fills the kernel stack with the poison value, | 110 | * N.B. stackleak_erase() fills the kernel stack with the poison value, |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 77734451cb05..e23eb9fc77aa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops) | |||
| 5460 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | 5460 | if (ops->flags & FTRACE_OPS_FL_ENABLED) |
| 5461 | ftrace_shutdown(ops, 0); | 5461 | ftrace_shutdown(ops, 0); |
| 5462 | ops->flags |= FTRACE_OPS_FL_DELETED; | 5462 | ops->flags |= FTRACE_OPS_FL_DELETED; |
| 5463 | ftrace_free_filter(ops); | ||
| 5463 | mutex_unlock(&ftrace_lock); | 5464 | mutex_unlock(&ftrace_lock); |
| 5464 | } | 5465 | } |
| 5465 | 5466 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 84a65173b1e9..5574e862de8d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, | |||
| 570 | } | 570 | } |
| 571 | } | 571 | } |
| 572 | 572 | ||
| 573 | kfree(op_stack); | ||
| 574 | kfree(inverts); | ||
| 573 | return prog; | 575 | return prog; |
| 574 | out_free: | 576 | out_free: |
| 575 | kfree(op_stack); | 577 | kfree(op_stack); |
| 576 | kfree(prog_stack); | ||
| 577 | kfree(inverts); | 578 | kfree(inverts); |
| 579 | kfree(prog_stack); | ||
| 578 | return ERR_PTR(ret); | 580 | return ERR_PTR(ret); |
| 579 | } | 581 | } |
| 580 | 582 | ||
| @@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call, | |||
| 1718 | err = process_preds(call, filter_string, *filterp, pe); | 1720 | err = process_preds(call, filter_string, *filterp, pe); |
| 1719 | if (err && set_str) | 1721 | if (err && set_str) |
| 1720 | append_filter_err(pe, *filterp); | 1722 | append_filter_err(pe, *filterp); |
| 1723 | create_filter_finish(pe); | ||
| 1721 | 1724 | ||
| 1722 | return err; | 1725 | return err; |
| 1723 | } | 1726 | } |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 2152d1e530cb..cd12ecb66eb9 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str, | |||
| 732 | 732 | ||
| 733 | /* The filter is for the 'trigger' event, not the triggered event */ | 733 | /* The filter is for the 'trigger' event, not the triggered event */ |
| 734 | ret = create_event_filter(file->event_call, filter_str, false, &filter); | 734 | ret = create_event_filter(file->event_call, filter_str, false, &filter); |
| 735 | if (ret) | 735 | /* |
| 736 | goto out; | 736 | * If create_event_filter() fails, filter still needs to be freed. |
| 737 | * Which the calling code will do with data->filter. | ||
| 738 | */ | ||
| 737 | assign: | 739 | assign: |
| 738 | tmp = rcu_access_pointer(data->filter); | 740 | tmp = rcu_access_pointer(data->filter); |
| 739 | 741 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1106bb6aa01e..14d51548bea6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root, | |||
| 784 | while (radix_tree_is_internal_node(node)) { | 784 | while (radix_tree_is_internal_node(node)) { |
| 785 | unsigned offset; | 785 | unsigned offset; |
| 786 | 786 | ||
| 787 | if (node == RADIX_TREE_RETRY) | ||
| 788 | goto restart; | ||
| 789 | parent = entry_to_node(node); | 787 | parent = entry_to_node(node); |
| 790 | offset = radix_tree_descend(parent, &node, index); | 788 | offset = radix_tree_descend(parent, &node, index); |
| 791 | slot = parent->slots + offset; | 789 | slot = parent->slots + offset; |
| 790 | if (node == RADIX_TREE_RETRY) | ||
| 791 | goto restart; | ||
| 792 | if (parent->shift == 0) | 792 | if (parent->shift == 0) |
| 793 | break; | 793 | break; |
| 794 | } | 794 | } |
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0598e86af8fc..4676c0a1eeca 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c | |||
| @@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { } | |||
| 28 | } while (0) | 28 | } while (0) |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | static void *xa_mk_index(unsigned long index) | ||
| 32 | { | ||
| 33 | return xa_mk_value(index & LONG_MAX); | ||
| 34 | } | ||
| 35 | |||
| 31 | static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) | 36 | static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 32 | { | 37 | { |
| 33 | return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); | 38 | return xa_store(xa, index, xa_mk_index(index), gfp); |
| 34 | } | 39 | } |
| 35 | 40 | ||
| 36 | static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) | 41 | static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 37 | { | 42 | { |
| 38 | u32 id = 0; | 43 | u32 id = 0; |
| 39 | 44 | ||
| 40 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), | 45 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), |
| 41 | gfp) != 0); | 46 | gfp) != 0); |
| 42 | XA_BUG_ON(xa, id != index); | 47 | XA_BUG_ON(xa, id != index); |
| 43 | } | 48 | } |
| 44 | 49 | ||
| 45 | static void xa_erase_index(struct xarray *xa, unsigned long index) | 50 | static void xa_erase_index(struct xarray *xa, unsigned long index) |
| 46 | { | 51 | { |
| 47 | XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); | 52 | XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); |
| 48 | XA_BUG_ON(xa, xa_load(xa, index) != NULL); | 53 | XA_BUG_ON(xa, xa_load(xa, index) != NULL); |
| 49 | } | 54 | } |
| 50 | 55 | ||
| @@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa) | |||
| 118 | 123 | ||
| 119 | xas_set(&xas, 0); | 124 | xas_set(&xas, 0); |
| 120 | xas_for_each(&xas, entry, ULONG_MAX) { | 125 | xas_for_each(&xas, entry, ULONG_MAX) { |
| 121 | xas_store(&xas, xa_mk_value(xas.xa_index)); | 126 | xas_store(&xas, xa_mk_index(xas.xa_index)); |
| 122 | } | 127 | } |
| 123 | xas_unlock(&xas); | 128 | xas_unlock(&xas); |
| 124 | 129 | ||
| @@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) | |||
| 196 | XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); | 201 | XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); |
| 197 | xa_set_mark(xa, index + 2, XA_MARK_1); | 202 | xa_set_mark(xa, index + 2, XA_MARK_1); |
| 198 | XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); | 203 | XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); |
| 199 | xa_store_order(xa, index, order, xa_mk_value(index), | 204 | xa_store_order(xa, index, order, xa_mk_index(index), |
| 200 | GFP_KERNEL); | 205 | GFP_KERNEL); |
| 201 | for (i = base; i < next; i++) { | 206 | for (i = base; i < next; i++) { |
| 202 | XA_STATE(xas, xa, i); | 207 | XA_STATE(xas, xa, i); |
| @@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa) | |||
| 405 | xas_set(&xas, j); | 410 | xas_set(&xas, j); |
| 406 | do { | 411 | do { |
| 407 | xas_lock(&xas); | 412 | xas_lock(&xas); |
| 408 | xas_store(&xas, xa_mk_value(j)); | 413 | xas_store(&xas, xa_mk_index(j)); |
| 409 | xas_unlock(&xas); | 414 | xas_unlock(&xas); |
| 410 | } while (xas_nomem(&xas, GFP_KERNEL)); | 415 | } while (xas_nomem(&xas, GFP_KERNEL)); |
| 411 | } | 416 | } |
| @@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa) | |||
| 423 | xas_set(&xas, 0); | 428 | xas_set(&xas, 0); |
| 424 | j = i; | 429 | j = i; |
| 425 | xas_for_each(&xas, entry, ULONG_MAX) { | 430 | xas_for_each(&xas, entry, ULONG_MAX) { |
| 426 | XA_BUG_ON(xa, entry != xa_mk_value(j)); | 431 | XA_BUG_ON(xa, entry != xa_mk_index(j)); |
| 427 | xas_store(&xas, NULL); | 432 | xas_store(&xas, NULL); |
| 428 | j++; | 433 | j++; |
| 429 | } | 434 | } |
| @@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, | |||
| 440 | unsigned long min = index & ~((1UL << order) - 1); | 445 | unsigned long min = index & ~((1UL << order) - 1); |
| 441 | unsigned long max = min + (1UL << order); | 446 | unsigned long max = min + (1UL << order); |
| 442 | 447 | ||
| 443 | xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL); | 448 | xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); |
| 444 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index)); | 449 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); |
| 445 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index)); | 450 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); |
| 446 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); | 451 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); |
| 447 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); | 452 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); |
| 448 | 453 | ||
| 449 | xas_lock(&xas); | 454 | xas_lock(&xas); |
| 450 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); | 455 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); |
| 451 | xas_unlock(&xas); | 456 | xas_unlock(&xas); |
| 452 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); | 457 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); |
| 453 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); | 458 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); |
| 454 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); | 459 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); |
| 455 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); | 460 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); |
| 456 | 461 | ||
| @@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, | |||
| 471 | xas_unlock(&xas); | 476 | xas_unlock(&xas); |
| 472 | XA_BUG_ON(xa, !xa_empty(xa)); | 477 | XA_BUG_ON(xa, !xa_empty(xa)); |
| 473 | } | 478 | } |
| 479 | |||
| 480 | static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, | ||
| 481 | unsigned int order) | ||
| 482 | { | ||
| 483 | XA_STATE(xas, xa, 0); | ||
| 484 | void *entry; | ||
| 485 | int n = 0; | ||
| 486 | |||
| 487 | xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); | ||
| 488 | |||
| 489 | xas_lock(&xas); | ||
| 490 | xas_for_each(&xas, entry, ULONG_MAX) { | ||
| 491 | XA_BUG_ON(xa, entry != xa_mk_index(index)); | ||
| 492 | n++; | ||
| 493 | } | ||
| 494 | XA_BUG_ON(xa, n != 1); | ||
| 495 | xas_set(&xas, index + 1); | ||
| 496 | xas_for_each(&xas, entry, ULONG_MAX) { | ||
| 497 | XA_BUG_ON(xa, entry != xa_mk_index(index)); | ||
| 498 | n++; | ||
| 499 | } | ||
| 500 | XA_BUG_ON(xa, n != 2); | ||
| 501 | xas_unlock(&xas); | ||
| 502 | |||
| 503 | xa_destroy(xa); | ||
| 504 | } | ||
| 474 | #endif | 505 | #endif |
| 475 | 506 | ||
| 476 | static noinline void check_multi_store(struct xarray *xa) | 507 | static noinline void check_multi_store(struct xarray *xa) |
| @@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa) | |||
| 523 | 554 | ||
| 524 | for (i = 0; i < max_order; i++) { | 555 | for (i = 0; i < max_order; i++) { |
| 525 | for (j = 0; j < max_order; j++) { | 556 | for (j = 0; j < max_order; j++) { |
| 526 | xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL); | 557 | xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); |
| 527 | xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL); | 558 | xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); |
| 528 | 559 | ||
| 529 | for (k = 0; k < max_order; k++) { | 560 | for (k = 0; k < max_order; k++) { |
| 530 | void *entry = xa_load(xa, (1UL << k) - 1); | 561 | void *entry = xa_load(xa, (1UL << k) - 1); |
| 531 | if ((i < k) && (j < k)) | 562 | if ((i < k) && (j < k)) |
| 532 | XA_BUG_ON(xa, entry != NULL); | 563 | XA_BUG_ON(xa, entry != NULL); |
| 533 | else | 564 | else |
| 534 | XA_BUG_ON(xa, entry != xa_mk_value(j)); | 565 | XA_BUG_ON(xa, entry != xa_mk_index(j)); |
| 535 | } | 566 | } |
| 536 | 567 | ||
| 537 | xa_erase(xa, 0); | 568 | xa_erase(xa, 0); |
| @@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa) | |||
| 545 | check_multi_store_1(xa, (1UL << i) + 1, i); | 576 | check_multi_store_1(xa, (1UL << i) + 1, i); |
| 546 | } | 577 | } |
| 547 | check_multi_store_2(xa, 4095, 9); | 578 | check_multi_store_2(xa, 4095, 9); |
| 579 | |||
| 580 | for (i = 1; i < 20; i++) { | ||
| 581 | check_multi_store_3(xa, 0, i); | ||
| 582 | check_multi_store_3(xa, 1UL << i, i); | ||
| 583 | } | ||
| 548 | #endif | 584 | #endif |
| 549 | } | 585 | } |
| 550 | 586 | ||
| @@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void) | |||
| 587 | xa_destroy(&xa0); | 623 | xa_destroy(&xa0); |
| 588 | 624 | ||
| 589 | id = 0xfffffffeU; | 625 | id = 0xfffffffeU; |
| 590 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), | 626 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), |
| 591 | GFP_KERNEL) != 0); | 627 | GFP_KERNEL) != 0); |
| 592 | XA_BUG_ON(&xa0, id != 0xfffffffeU); | 628 | XA_BUG_ON(&xa0, id != 0xfffffffeU); |
| 593 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), | 629 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), |
| 594 | GFP_KERNEL) != 0); | 630 | GFP_KERNEL) != 0); |
| 595 | XA_BUG_ON(&xa0, id != 0xffffffffU); | 631 | XA_BUG_ON(&xa0, id != 0xffffffffU); |
| 596 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), | 632 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), |
| 597 | GFP_KERNEL) != -ENOSPC); | 633 | GFP_KERNEL) != -ENOSPC); |
| 598 | XA_BUG_ON(&xa0, id != 0xffffffffU); | 634 | XA_BUG_ON(&xa0, id != 0xffffffffU); |
| 599 | xa_destroy(&xa0); | 635 | xa_destroy(&xa0); |
| 636 | |||
| 637 | id = 10; | ||
| 638 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), | ||
| 639 | GFP_KERNEL) != -ENOSPC); | ||
| 640 | XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); | ||
| 641 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), | ||
| 642 | GFP_KERNEL) != -ENOSPC); | ||
| 643 | xa_erase_index(&xa0, 3); | ||
| 644 | XA_BUG_ON(&xa0, !xa_empty(&xa0)); | ||
| 600 | } | 645 | } |
| 601 | 646 | ||
| 602 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, | 647 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, |
| @@ -610,11 +655,11 @@ retry: | |||
| 610 | xas_lock(&xas); | 655 | xas_lock(&xas); |
| 611 | xas_for_each_conflict(&xas, entry) { | 656 | xas_for_each_conflict(&xas, entry) { |
| 612 | XA_BUG_ON(xa, !xa_is_value(entry)); | 657 | XA_BUG_ON(xa, !xa_is_value(entry)); |
| 613 | XA_BUG_ON(xa, entry < xa_mk_value(start)); | 658 | XA_BUG_ON(xa, entry < xa_mk_index(start)); |
| 614 | XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1)); | 659 | XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); |
| 615 | count++; | 660 | count++; |
| 616 | } | 661 | } |
| 617 | xas_store(&xas, xa_mk_value(start)); | 662 | xas_store(&xas, xa_mk_index(start)); |
| 618 | xas_unlock(&xas); | 663 | xas_unlock(&xas); |
| 619 | if (xas_nomem(&xas, GFP_KERNEL)) { | 664 | if (xas_nomem(&xas, GFP_KERNEL)) { |
| 620 | count = 0; | 665 | count = 0; |
| @@ -622,9 +667,9 @@ retry: | |||
| 622 | } | 667 | } |
| 623 | XA_BUG_ON(xa, xas_error(&xas)); | 668 | XA_BUG_ON(xa, xas_error(&xas)); |
| 624 | XA_BUG_ON(xa, count != present); | 669 | XA_BUG_ON(xa, count != present); |
| 625 | XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start)); | 670 | XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); |
| 626 | XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != | 671 | XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != |
| 627 | xa_mk_value(start)); | 672 | xa_mk_index(start)); |
| 628 | xa_erase_index(xa, start); | 673 | xa_erase_index(xa, start); |
| 629 | } | 674 | } |
| 630 | 675 | ||
| @@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa) | |||
| 703 | for (j = 0; j < index; j++) { | 748 | for (j = 0; j < index; j++) { |
| 704 | XA_STATE(xas, xa, j + index); | 749 | XA_STATE(xas, xa, j + index); |
| 705 | xa_store_index(xa, index - 1, GFP_KERNEL); | 750 | xa_store_index(xa, index - 1, GFP_KERNEL); |
| 706 | xa_store_order(xa, index, i, xa_mk_value(index), | 751 | xa_store_order(xa, index, i, xa_mk_index(index), |
| 707 | GFP_KERNEL); | 752 | GFP_KERNEL); |
| 708 | rcu_read_lock(); | 753 | rcu_read_lock(); |
| 709 | xas_for_each(&xas, entry, ULONG_MAX) { | 754 | xas_for_each(&xas, entry, ULONG_MAX) { |
| @@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa) | |||
| 778 | j = 0; | 823 | j = 0; |
| 779 | index = 0; | 824 | index = 0; |
| 780 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { | 825 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { |
| 781 | XA_BUG_ON(xa, xa_mk_value(index) != entry); | 826 | XA_BUG_ON(xa, xa_mk_index(index) != entry); |
| 782 | XA_BUG_ON(xa, index != j++); | 827 | XA_BUG_ON(xa, index != j++); |
| 783 | } | 828 | } |
| 784 | } | 829 | } |
| @@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa) | |||
| 786 | xa_destroy(xa); | 831 | xa_destroy(xa); |
| 787 | } | 832 | } |
| 788 | 833 | ||
| 834 | static noinline void check_find_3(struct xarray *xa) | ||
| 835 | { | ||
| 836 | XA_STATE(xas, xa, 0); | ||
| 837 | unsigned long i, j, k; | ||
| 838 | void *entry; | ||
| 839 | |||
| 840 | for (i = 0; i < 100; i++) { | ||
| 841 | for (j = 0; j < 100; j++) { | ||
| 842 | for (k = 0; k < 100; k++) { | ||
| 843 | xas_set(&xas, j); | ||
| 844 | xas_for_each_marked(&xas, entry, k, XA_MARK_0) | ||
| 845 | ; | ||
| 846 | if (j > k) | ||
| 847 | XA_BUG_ON(xa, | ||
| 848 | xas.xa_node != XAS_RESTART); | ||
| 849 | } | ||
| 850 | } | ||
| 851 | xa_store_index(xa, i, GFP_KERNEL); | ||
| 852 | xa_set_mark(xa, i, XA_MARK_0); | ||
| 853 | } | ||
| 854 | xa_destroy(xa); | ||
| 855 | } | ||
| 856 | |||
| 789 | static noinline void check_find(struct xarray *xa) | 857 | static noinline void check_find(struct xarray *xa) |
| 790 | { | 858 | { |
| 791 | check_find_1(xa); | 859 | check_find_1(xa); |
| 792 | check_find_2(xa); | 860 | check_find_2(xa); |
| 861 | check_find_3(xa); | ||
| 793 | check_multi_find(xa); | 862 | check_multi_find(xa); |
| 794 | check_multi_find_2(xa); | 863 | check_multi_find_2(xa); |
| 795 | } | 864 | } |
| @@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa) | |||
| 829 | for (index = 0; index < (1UL << (order + 5)); | 898 | for (index = 0; index < (1UL << (order + 5)); |
| 830 | index += (1UL << order)) { | 899 | index += (1UL << order)) { |
| 831 | xa_store_order(xa, index, order, | 900 | xa_store_order(xa, index, order, |
| 832 | xa_mk_value(index), GFP_KERNEL); | 901 | xa_mk_index(index), GFP_KERNEL); |
| 833 | XA_BUG_ON(xa, xa_load(xa, index) != | 902 | XA_BUG_ON(xa, xa_load(xa, index) != |
| 834 | xa_mk_value(index)); | 903 | xa_mk_index(index)); |
| 835 | XA_BUG_ON(xa, xa_find_entry(xa, | 904 | XA_BUG_ON(xa, xa_find_entry(xa, |
| 836 | xa_mk_value(index)) != index); | 905 | xa_mk_index(index)) != index); |
| 837 | } | 906 | } |
| 838 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); | 907 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); |
| 839 | xa_destroy(xa); | 908 | xa_destroy(xa); |
| @@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa) | |||
| 844 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); | 913 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); |
| 845 | xa_store_index(xa, ULONG_MAX, GFP_KERNEL); | 914 | xa_store_index(xa, ULONG_MAX, GFP_KERNEL); |
| 846 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); | 915 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); |
| 847 | XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1); | 916 | XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); |
| 848 | xa_erase_index(xa, ULONG_MAX); | 917 | xa_erase_index(xa, ULONG_MAX); |
| 849 | XA_BUG_ON(xa, !xa_empty(xa)); | 918 | XA_BUG_ON(xa, !xa_empty(xa)); |
| 850 | } | 919 | } |
| @@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) | |||
| 864 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); | 933 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); |
| 865 | XA_BUG_ON(xa, xas.xa_index != i); | 934 | XA_BUG_ON(xa, xas.xa_index != i); |
| 866 | if (i == 0 || i == idx) | 935 | if (i == 0 || i == idx) |
| 867 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 936 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 868 | else | 937 | else |
| 869 | XA_BUG_ON(xa, entry != NULL); | 938 | XA_BUG_ON(xa, entry != NULL); |
| 870 | } | 939 | } |
| @@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) | |||
| 878 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); | 947 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); |
| 879 | XA_BUG_ON(xa, xas.xa_index != i); | 948 | XA_BUG_ON(xa, xas.xa_index != i); |
| 880 | if (i == 0 || i == idx) | 949 | if (i == 0 || i == idx) |
| 881 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 950 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 882 | else | 951 | else |
| 883 | XA_BUG_ON(xa, entry != NULL); | 952 | XA_BUG_ON(xa, entry != NULL); |
| 884 | } while (i > 0); | 953 | } while (i > 0); |
| @@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 909 | do { | 978 | do { |
| 910 | void *entry = xas_prev(&xas); | 979 | void *entry = xas_prev(&xas); |
| 911 | i--; | 980 | i--; |
| 912 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 981 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 913 | XA_BUG_ON(xa, i != xas.xa_index); | 982 | XA_BUG_ON(xa, i != xas.xa_index); |
| 914 | } while (i != 0); | 983 | } while (i != 0); |
| 915 | 984 | ||
| @@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 918 | 987 | ||
| 919 | do { | 988 | do { |
| 920 | void *entry = xas_next(&xas); | 989 | void *entry = xas_next(&xas); |
| 921 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 990 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 922 | XA_BUG_ON(xa, i != xas.xa_index); | 991 | XA_BUG_ON(xa, i != xas.xa_index); |
| 923 | i++; | 992 | i++; |
| 924 | } while (i < (1 << 16)); | 993 | } while (i < (1 << 16)); |
| @@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 934 | void *entry = xas_prev(&xas); | 1003 | void *entry = xas_prev(&xas); |
| 935 | i--; | 1004 | i--; |
| 936 | if ((i < (1 << 8)) || (i >= (1 << 15))) | 1005 | if ((i < (1 << 8)) || (i >= (1 << 15))) |
| 937 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 1006 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 938 | else | 1007 | else |
| 939 | XA_BUG_ON(xa, entry != NULL); | 1008 | XA_BUG_ON(xa, entry != NULL); |
| 940 | XA_BUG_ON(xa, i != xas.xa_index); | 1009 | XA_BUG_ON(xa, i != xas.xa_index); |
| @@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 946 | do { | 1015 | do { |
| 947 | void *entry = xas_next(&xas); | 1016 | void *entry = xas_next(&xas); |
| 948 | if ((i < (1 << 8)) || (i >= (1 << 15))) | 1017 | if ((i < (1 << 8)) || (i >= (1 << 15))) |
| 949 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 1018 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 950 | else | 1019 | else |
| 951 | XA_BUG_ON(xa, entry != NULL); | 1020 | XA_BUG_ON(xa, entry != NULL); |
| 952 | XA_BUG_ON(xa, i != xas.xa_index); | 1021 | XA_BUG_ON(xa, i != xas.xa_index); |
| @@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa, | |||
| 976 | if (xas_error(&xas)) | 1045 | if (xas_error(&xas)) |
| 977 | goto unlock; | 1046 | goto unlock; |
| 978 | for (i = 0; i < (1U << order); i++) { | 1047 | for (i = 0; i < (1U << order); i++) { |
| 979 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i))); | 1048 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); |
| 980 | xas_next(&xas); | 1049 | xas_next(&xas); |
| 981 | } | 1050 | } |
| 982 | unlock: | 1051 | unlock: |
| @@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa, | |||
| 1031 | if (xas_error(&xas)) | 1100 | if (xas_error(&xas)) |
| 1032 | goto unlock; | 1101 | goto unlock; |
| 1033 | for (i = 0; i < (1UL << order); i++) { | 1102 | for (i = 0; i < (1UL << order); i++) { |
| 1034 | void *old = xas_store(&xas, xa_mk_value(base + i)); | 1103 | void *old = xas_store(&xas, xa_mk_index(base + i)); |
| 1035 | if (xas.xa_index == index) | 1104 | if (xas.xa_index == index) |
| 1036 | XA_BUG_ON(xa, old != xa_mk_value(base + i)); | 1105 | XA_BUG_ON(xa, old != xa_mk_index(base + i)); |
| 1037 | else | 1106 | else |
| 1038 | XA_BUG_ON(xa, old != NULL); | 1107 | XA_BUG_ON(xa, old != NULL); |
| 1039 | xas_next(&xas); | 1108 | xas_next(&xas); |
| @@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first, | |||
| 1085 | unsigned long last) | 1154 | unsigned long last) |
| 1086 | { | 1155 | { |
| 1087 | #ifdef CONFIG_XARRAY_MULTI | 1156 | #ifdef CONFIG_XARRAY_MULTI |
| 1088 | xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL); | 1157 | xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); |
| 1089 | 1158 | ||
| 1090 | XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first)); | 1159 | XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); |
| 1091 | XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first)); | 1160 | XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); |
| 1092 | XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); | 1161 | XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); |
| 1093 | XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); | 1162 | XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); |
| 1094 | 1163 | ||
| @@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa) | |||
| 1195 | XA_BUG_ON(xa, xas.xa_node->nr_values != 0); | 1264 | XA_BUG_ON(xa, xas.xa_node->nr_values != 0); |
| 1196 | rcu_read_unlock(); | 1265 | rcu_read_unlock(); |
| 1197 | 1266 | ||
| 1198 | xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), | 1267 | xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), |
| 1199 | GFP_KERNEL); | 1268 | GFP_KERNEL); |
| 1200 | XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); | 1269 | XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); |
| 1201 | 1270 | ||
diff --git a/lib/xarray.c b/lib/xarray.c index bbacca576593..5f3f9311de89 100644 --- a/lib/xarray.c +++ b/lib/xarray.c | |||
| @@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | |||
| 1131 | entry = xa_head(xas->xa); | 1131 | entry = xa_head(xas->xa); |
| 1132 | xas->xa_node = NULL; | 1132 | xas->xa_node = NULL; |
| 1133 | if (xas->xa_index > max_index(entry)) | 1133 | if (xas->xa_index > max_index(entry)) |
| 1134 | goto bounds; | 1134 | goto out; |
| 1135 | if (!xa_is_node(entry)) { | 1135 | if (!xa_is_node(entry)) { |
| 1136 | if (xa_marked(xas->xa, mark)) | 1136 | if (xa_marked(xas->xa, mark)) |
| 1137 | return entry; | 1137 | return entry; |
| @@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | |||
| 1180 | } | 1180 | } |
| 1181 | 1181 | ||
| 1182 | out: | 1182 | out: |
| 1183 | if (!max) | 1183 | if (xas->xa_index > max) |
| 1184 | goto max; | 1184 | goto max; |
| 1185 | bounds: | 1185 | return set_bounds(xas); |
| 1186 | xas->xa_node = XAS_BOUNDS; | ||
| 1187 | return NULL; | ||
| 1188 | max: | 1186 | max: |
| 1189 | xas->xa_node = XAS_RESTART; | 1187 | xas->xa_node = XAS_RESTART; |
| 1190 | return NULL; | 1188 | return NULL; |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 622cced74fd9..5da55b38b1b7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -629,40 +629,30 @@ release: | |||
| 629 | * available | 629 | * available |
| 630 | * never: never stall for any thp allocation | 630 | * never: never stall for any thp allocation |
| 631 | */ | 631 | */ |
| 632 | static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) | 632 | static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) |
| 633 | { | 633 | { |
| 634 | const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); | 634 | const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); |
| 635 | gfp_t this_node = 0; | ||
| 636 | |||
| 637 | #ifdef CONFIG_NUMA | ||
| 638 | struct mempolicy *pol; | ||
| 639 | /* | ||
| 640 | * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not | ||
| 641 | * specified, to express a general desire to stay on the current | ||
| 642 | * node for optimistic allocation attempts. If the defrag mode | ||
| 643 | * and/or madvise hint requires the direct reclaim then we prefer | ||
| 644 | * to fallback to other node rather than node reclaim because that | ||
| 645 | * can lead to excessive reclaim even though there is free memory | ||
| 646 | * on other nodes. We expect that NUMA preferences are specified | ||
| 647 | * by memory policies. | ||
| 648 | */ | ||
| 649 | pol = get_vma_policy(vma, addr); | ||
| 650 | if (pol->mode != MPOL_BIND) | ||
| 651 | this_node = __GFP_THISNODE; | ||
| 652 | mpol_cond_put(pol); | ||
| 653 | #endif | ||
| 654 | 635 | ||
| 636 | /* Always do synchronous compaction */ | ||
| 655 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) | 637 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) |
| 656 | return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); | 638 | return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); |
| 639 | |||
| 640 | /* Kick kcompactd and fail quickly */ | ||
| 657 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) | 641 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) |
| 658 | return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; | 642 | return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; |
| 643 | |||
| 644 | /* Synchronous compaction if madvised, otherwise kick kcompactd */ | ||
| 659 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) | 645 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) |
| 660 | return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : | 646 | return GFP_TRANSHUGE_LIGHT | |
| 661 | __GFP_KSWAPD_RECLAIM | this_node); | 647 | (vma_madvised ? __GFP_DIRECT_RECLAIM : |
| 648 | __GFP_KSWAPD_RECLAIM); | ||
| 649 | |||
| 650 | /* Only do synchronous compaction if madvised */ | ||
| 662 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) | 651 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
| 663 | return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : | 652 | return GFP_TRANSHUGE_LIGHT | |
| 664 | this_node); | 653 | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); |
| 665 | return GFP_TRANSHUGE_LIGHT | this_node; | 654 | |
| 655 | return GFP_TRANSHUGE_LIGHT; | ||
| 666 | } | 656 | } |
| 667 | 657 | ||
| 668 | /* Caller must hold page table lock. */ | 658 | /* Caller must hold page table lock. */ |
| @@ -734,8 +724,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) | |||
| 734 | pte_free(vma->vm_mm, pgtable); | 724 | pte_free(vma->vm_mm, pgtable); |
| 735 | return ret; | 725 | return ret; |
| 736 | } | 726 | } |
| 737 | gfp = alloc_hugepage_direct_gfpmask(vma, haddr); | 727 | gfp = alloc_hugepage_direct_gfpmask(vma); |
| 738 | page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id()); | 728 | page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); |
| 739 | if (unlikely(!page)) { | 729 | if (unlikely(!page)) { |
| 740 | count_vm_event(THP_FAULT_FALLBACK); | 730 | count_vm_event(THP_FAULT_FALLBACK); |
| 741 | return VM_FAULT_FALLBACK; | 731 | return VM_FAULT_FALLBACK; |
| @@ -1305,9 +1295,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) | |||
| 1305 | alloc: | 1295 | alloc: |
| 1306 | if (transparent_hugepage_enabled(vma) && | 1296 | if (transparent_hugepage_enabled(vma) && |
| 1307 | !transparent_hugepage_debug_cow()) { | 1297 | !transparent_hugepage_debug_cow()) { |
| 1308 | huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); | 1298 | huge_gfp = alloc_hugepage_direct_gfpmask(vma); |
| 1309 | new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, | 1299 | new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); |
| 1310 | haddr, numa_node_id()); | ||
| 1311 | } else | 1300 | } else |
| 1312 | new_page = NULL; | 1301 | new_page = NULL; |
| 1313 | 1302 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 705a3e9cc910..a80832487981 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page) | |||
| 1248 | (struct hugepage_subpool *)page_private(page); | 1248 | (struct hugepage_subpool *)page_private(page); |
| 1249 | bool restore_reserve; | 1249 | bool restore_reserve; |
| 1250 | 1250 | ||
| 1251 | set_page_private(page, 0); | ||
| 1252 | page->mapping = NULL; | ||
| 1253 | VM_BUG_ON_PAGE(page_count(page), page); | 1251 | VM_BUG_ON_PAGE(page_count(page), page); |
| 1254 | VM_BUG_ON_PAGE(page_mapcount(page), page); | 1252 | VM_BUG_ON_PAGE(page_mapcount(page), page); |
| 1253 | |||
| 1254 | set_page_private(page, 0); | ||
| 1255 | page->mapping = NULL; | ||
| 1255 | restore_reserve = PagePrivate(page); | 1256 | restore_reserve = PagePrivate(page); |
| 1256 | ClearPagePrivate(page); | 1257 | ClearPagePrivate(page); |
| 1257 | 1258 | ||
diff --git a/mm/memblock.c b/mm/memblock.c index 9a2d5ae81ae1..81ae63ca78d0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr | |||
| 1727 | return -1; | 1727 | return -1; |
| 1728 | } | 1728 | } |
| 1729 | 1729 | ||
| 1730 | bool __init memblock_is_reserved(phys_addr_t addr) | 1730 | bool __init_memblock memblock_is_reserved(phys_addr_t addr) |
| 1731 | { | 1731 | { |
| 1732 | return memblock_search(&memblock.reserved, addr) != -1; | 1732 | return memblock_search(&memblock.reserved, addr) != -1; |
| 1733 | } | 1733 | } |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 0cd3de3550f0..7c72f2a95785 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, | |||
| 1161 | LIST_HEAD(tokill); | 1161 | LIST_HEAD(tokill); |
| 1162 | int rc = -EBUSY; | 1162 | int rc = -EBUSY; |
| 1163 | loff_t start; | 1163 | loff_t start; |
| 1164 | dax_entry_t cookie; | ||
| 1164 | 1165 | ||
| 1165 | /* | 1166 | /* |
| 1166 | * Prevent the inode from being freed while we are interrogating | 1167 | * Prevent the inode from being freed while we are interrogating |
| @@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, | |||
| 1169 | * also prevents changes to the mapping of this pfn until | 1170 | * also prevents changes to the mapping of this pfn until |
| 1170 | * poison signaling is complete. | 1171 | * poison signaling is complete. |
| 1171 | */ | 1172 | */ |
| 1172 | if (!dax_lock_mapping_entry(page)) | 1173 | cookie = dax_lock_page(page); |
| 1174 | if (!cookie) | ||
| 1173 | goto out; | 1175 | goto out; |
| 1174 | 1176 | ||
| 1175 | if (hwpoison_filter(page)) { | 1177 | if (hwpoison_filter(page)) { |
| @@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, | |||
| 1220 | kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); | 1222 | kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); |
| 1221 | rc = 0; | 1223 | rc = 0; |
| 1222 | unlock: | 1224 | unlock: |
| 1223 | dax_unlock_mapping_entry(page); | 1225 | dax_unlock_page(page, cookie); |
| 1224 | out: | 1226 | out: |
| 1225 | /* drop pgmap ref acquired in caller */ | 1227 | /* drop pgmap ref acquired in caller */ |
| 1226 | put_dev_pagemap(pgmap); | 1228 | put_dev_pagemap(pgmap); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5837a067124d..d4496d9d34f5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1116,8 +1116,8 @@ static struct page *new_page(struct page *page, unsigned long start) | |||
| 1116 | } else if (PageTransHuge(page)) { | 1116 | } else if (PageTransHuge(page)) { |
| 1117 | struct page *thp; | 1117 | struct page *thp; |
| 1118 | 1118 | ||
| 1119 | thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma, | 1119 | thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, |
| 1120 | address, numa_node_id()); | 1120 | HPAGE_PMD_ORDER); |
| 1121 | if (!thp) | 1121 | if (!thp) |
| 1122 | return NULL; | 1122 | return NULL; |
| 1123 | prep_transhuge_page(thp); | 1123 | prep_transhuge_page(thp); |
| @@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, | |||
| 1662 | * freeing by another task. It is the caller's responsibility to free the | 1662 | * freeing by another task. It is the caller's responsibility to free the |
| 1663 | * extra reference for shared policies. | 1663 | * extra reference for shared policies. |
| 1664 | */ | 1664 | */ |
| 1665 | struct mempolicy *get_vma_policy(struct vm_area_struct *vma, | 1665 | static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, |
| 1666 | unsigned long addr) | 1666 | unsigned long addr) |
| 1667 | { | 1667 | { |
| 1668 | struct mempolicy *pol = __get_vma_policy(vma, addr); | 1668 | struct mempolicy *pol = __get_vma_policy(vma, addr); |
| @@ -2011,6 +2011,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
| 2011 | * @vma: Pointer to VMA or NULL if not available. | 2011 | * @vma: Pointer to VMA or NULL if not available. |
| 2012 | * @addr: Virtual Address of the allocation. Must be inside the VMA. | 2012 | * @addr: Virtual Address of the allocation. Must be inside the VMA. |
| 2013 | * @node: Which node to prefer for allocation (modulo policy). | 2013 | * @node: Which node to prefer for allocation (modulo policy). |
| 2014 | * @hugepage: for hugepages try only the preferred node if possible | ||
| 2014 | * | 2015 | * |
| 2015 | * This function allocates a page from the kernel page pool and applies | 2016 | * This function allocates a page from the kernel page pool and applies |
| 2016 | * a NUMA policy associated with the VMA or the current process. | 2017 | * a NUMA policy associated with the VMA or the current process. |
| @@ -2021,7 +2022,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
| 2021 | */ | 2022 | */ |
| 2022 | struct page * | 2023 | struct page * |
| 2023 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | 2024 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, |
| 2024 | unsigned long addr, int node) | 2025 | unsigned long addr, int node, bool hugepage) |
| 2025 | { | 2026 | { |
| 2026 | struct mempolicy *pol; | 2027 | struct mempolicy *pol; |
| 2027 | struct page *page; | 2028 | struct page *page; |
| @@ -2039,6 +2040,31 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
| 2039 | goto out; | 2040 | goto out; |
| 2040 | } | 2041 | } |
| 2041 | 2042 | ||
| 2043 | if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { | ||
| 2044 | int hpage_node = node; | ||
| 2045 | |||
| 2046 | /* | ||
| 2047 | * For hugepage allocation and non-interleave policy which | ||
| 2048 | * allows the current node (or other explicitly preferred | ||
| 2049 | * node) we only try to allocate from the current/preferred | ||
| 2050 | * node and don't fall back to other nodes, as the cost of | ||
| 2051 | * remote accesses would likely offset THP benefits. | ||
| 2052 | * | ||
| 2053 | * If the policy is interleave, or does not allow the current | ||
| 2054 | * node in its nodemask, we allocate the standard way. | ||
| 2055 | */ | ||
| 2056 | if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) | ||
| 2057 | hpage_node = pol->v.preferred_node; | ||
| 2058 | |||
| 2059 | nmask = policy_nodemask(gfp, pol); | ||
| 2060 | if (!nmask || node_isset(hpage_node, *nmask)) { | ||
| 2061 | mpol_cond_put(pol); | ||
| 2062 | page = __alloc_pages_node(hpage_node, | ||
| 2063 | gfp | __GFP_THISNODE, order); | ||
| 2064 | goto out; | ||
| 2065 | } | ||
| 2066 | } | ||
| 2067 | |||
| 2042 | nmask = policy_nodemask(gfp, pol); | 2068 | nmask = policy_nodemask(gfp, pol); |
| 2043 | preferred_nid = policy_node(gfp, pol, node); | 2069 | preferred_nid = policy_node(gfp, pol, node); |
| 2044 | page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); | 2070 | page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); |
diff --git a/mm/shmem.c b/mm/shmem.c index cddc72ac44d8..5d07e0b1352f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping, | |||
| 661 | { | 661 | { |
| 662 | void *old; | 662 | void *old; |
| 663 | 663 | ||
| 664 | xa_lock_irq(&mapping->i_pages); | 664 | old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); |
| 665 | old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0); | ||
| 666 | xa_unlock_irq(&mapping->i_pages); | ||
| 667 | if (old != radswap) | 665 | if (old != radswap) |
| 668 | return -ENOENT; | 666 | return -ENOENT; |
| 669 | free_swap_and_cache(radix_to_swp_entry(radswap)); | 667 | free_swap_and_cache(radix_to_swp_entry(radswap)); |
| @@ -1439,7 +1437,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, | |||
| 1439 | 1437 | ||
| 1440 | shmem_pseudo_vma_init(&pvma, info, hindex); | 1438 | shmem_pseudo_vma_init(&pvma, info, hindex); |
| 1441 | page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, | 1439 | page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, |
| 1442 | HPAGE_PMD_ORDER, &pvma, 0, numa_node_id()); | 1440 | HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); |
| 1443 | shmem_pseudo_vma_destroy(&pvma); | 1441 | shmem_pseudo_vma_destroy(&pvma); |
| 1444 | if (page) | 1442 | if (page) |
| 1445 | prep_transhuge_page(page); | 1443 | prep_transhuge_page(page); |
diff --git a/mm/sparse.c b/mm/sparse.c index 33307fc05c4d..3abc8cc50201 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
| @@ -240,6 +240,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) | |||
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | /* | 242 | /* |
| 243 | * Mark all memblocks as present using memory_present(). This is a | ||
| 244 | * convienence function that is useful for a number of arches | ||
| 245 | * to mark all of the systems memory as present during initialization. | ||
| 246 | */ | ||
| 247 | void __init memblocks_present(void) | ||
| 248 | { | ||
| 249 | struct memblock_region *reg; | ||
| 250 | |||
| 251 | for_each_memblock(memory, reg) { | ||
| 252 | memory_present(memblock_get_region_node(reg), | ||
| 253 | memblock_region_memory_base_pfn(reg), | ||
| 254 | memblock_region_memory_end_pfn(reg)); | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | /* | ||
| 243 | * Subtle, we encode the real pfn into the mem_map such that | 259 | * Subtle, we encode the real pfn into the mem_map such that |
| 244 | * the identity pfn - section_mem_map will return the actual | 260 | * the identity pfn - section_mem_map will return the actual |
| 245 | * physical page frame number. | 261 | * physical page frame number. |
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index c89c22c49015..25001913d03b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c | |||
| @@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, | |||
| 28 | return ret; | 28 | return ret; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) | 31 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, |
| 32 | u32 *time) | ||
| 32 | { | 33 | { |
| 33 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; | 34 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; |
| 34 | enum bpf_cgroup_storage_type stype; | 35 | enum bpf_cgroup_storage_type stype; |
| 35 | u64 time_start, time_spent = 0; | 36 | u64 time_start, time_spent = 0; |
| 36 | u32 ret = 0, i; | 37 | u32 i; |
| 37 | 38 | ||
| 38 | for_each_cgroup_storage_type(stype) { | 39 | for_each_cgroup_storage_type(stype) { |
| 39 | storage[stype] = bpf_cgroup_storage_alloc(prog, stype); | 40 | storage[stype] = bpf_cgroup_storage_alloc(prog, stype); |
| @@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) | |||
| 49 | repeat = 1; | 50 | repeat = 1; |
| 50 | time_start = ktime_get_ns(); | 51 | time_start = ktime_get_ns(); |
| 51 | for (i = 0; i < repeat; i++) { | 52 | for (i = 0; i < repeat; i++) { |
| 52 | ret = bpf_test_run_one(prog, ctx, storage); | 53 | *ret = bpf_test_run_one(prog, ctx, storage); |
| 53 | if (need_resched()) { | 54 | if (need_resched()) { |
| 54 | if (signal_pending(current)) | 55 | if (signal_pending(current)) |
| 55 | break; | 56 | break; |
| @@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) | |||
| 65 | for_each_cgroup_storage_type(stype) | 66 | for_each_cgroup_storage_type(stype) |
| 66 | bpf_cgroup_storage_free(storage[stype]); | 67 | bpf_cgroup_storage_free(storage[stype]); |
| 67 | 68 | ||
| 68 | return ret; | 69 | return 0; |
| 69 | } | 70 | } |
| 70 | 71 | ||
| 71 | static int bpf_test_finish(const union bpf_attr *kattr, | 72 | static int bpf_test_finish(const union bpf_attr *kattr, |
| @@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 165 | __skb_push(skb, hh_len); | 166 | __skb_push(skb, hh_len); |
| 166 | if (is_direct_pkt_access) | 167 | if (is_direct_pkt_access) |
| 167 | bpf_compute_data_pointers(skb); | 168 | bpf_compute_data_pointers(skb); |
| 168 | retval = bpf_test_run(prog, skb, repeat, &duration); | 169 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration); |
| 170 | if (ret) { | ||
| 171 | kfree_skb(skb); | ||
| 172 | kfree(sk); | ||
| 173 | return ret; | ||
| 174 | } | ||
| 169 | if (!is_l2) { | 175 | if (!is_l2) { |
| 170 | if (skb_headroom(skb) < hh_len) { | 176 | if (skb_headroom(skb) < hh_len) { |
| 171 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | 177 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); |
| @@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 212 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); | 218 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
| 213 | xdp.rxq = &rxqueue->xdp_rxq; | 219 | xdp.rxq = &rxqueue->xdp_rxq; |
| 214 | 220 | ||
| 215 | retval = bpf_test_run(prog, &xdp, repeat, &duration); | 221 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); |
| 222 | if (ret) | ||
| 223 | goto out; | ||
| 216 | if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || | 224 | if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || |
| 217 | xdp.data_end != xdp.data + size) | 225 | xdp.data_end != xdp.data + size) |
| 218 | size = xdp.data_end - xdp.data; | 226 | size = xdp.data_end - xdp.data; |
| 219 | ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); | 227 | ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); |
| 228 | out: | ||
| 220 | kfree(data); | 229 | kfree(data); |
| 221 | return ret; | 230 | return ret; |
| 222 | } | 231 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index ddc551f24ba2..722d50dbf8a4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2175,6 +2175,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev, | |||
| 2175 | return active; | 2175 | return active; |
| 2176 | } | 2176 | } |
| 2177 | 2177 | ||
| 2178 | static void reset_xps_maps(struct net_device *dev, | ||
| 2179 | struct xps_dev_maps *dev_maps, | ||
| 2180 | bool is_rxqs_map) | ||
| 2181 | { | ||
| 2182 | if (is_rxqs_map) { | ||
| 2183 | static_key_slow_dec_cpuslocked(&xps_rxqs_needed); | ||
| 2184 | RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); | ||
| 2185 | } else { | ||
| 2186 | RCU_INIT_POINTER(dev->xps_cpus_map, NULL); | ||
| 2187 | } | ||
| 2188 | static_key_slow_dec_cpuslocked(&xps_needed); | ||
| 2189 | kfree_rcu(dev_maps, rcu); | ||
| 2190 | } | ||
| 2191 | |||
| 2178 | static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, | 2192 | static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, |
| 2179 | struct xps_dev_maps *dev_maps, unsigned int nr_ids, | 2193 | struct xps_dev_maps *dev_maps, unsigned int nr_ids, |
| 2180 | u16 offset, u16 count, bool is_rxqs_map) | 2194 | u16 offset, u16 count, bool is_rxqs_map) |
| @@ -2186,18 +2200,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, | |||
| 2186 | j < nr_ids;) | 2200 | j < nr_ids;) |
| 2187 | active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, | 2201 | active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, |
| 2188 | count); | 2202 | count); |
| 2189 | if (!active) { | 2203 | if (!active) |
| 2190 | if (is_rxqs_map) { | 2204 | reset_xps_maps(dev, dev_maps, is_rxqs_map); |
| 2191 | RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); | ||
| 2192 | } else { | ||
| 2193 | RCU_INIT_POINTER(dev->xps_cpus_map, NULL); | ||
| 2194 | 2205 | ||
| 2195 | for (i = offset + (count - 1); count--; i--) | 2206 | if (!is_rxqs_map) { |
| 2196 | netdev_queue_numa_node_write( | 2207 | for (i = offset + (count - 1); count--; i--) { |
| 2197 | netdev_get_tx_queue(dev, i), | 2208 | netdev_queue_numa_node_write( |
| 2198 | NUMA_NO_NODE); | 2209 | netdev_get_tx_queue(dev, i), |
| 2210 | NUMA_NO_NODE); | ||
| 2199 | } | 2211 | } |
| 2200 | kfree_rcu(dev_maps, rcu); | ||
| 2201 | } | 2212 | } |
| 2202 | } | 2213 | } |
| 2203 | 2214 | ||
| @@ -2234,10 +2245,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset, | |||
| 2234 | false); | 2245 | false); |
| 2235 | 2246 | ||
| 2236 | out_no_maps: | 2247 | out_no_maps: |
| 2237 | if (static_key_enabled(&xps_rxqs_needed)) | ||
| 2238 | static_key_slow_dec_cpuslocked(&xps_rxqs_needed); | ||
| 2239 | |||
| 2240 | static_key_slow_dec_cpuslocked(&xps_needed); | ||
| 2241 | mutex_unlock(&xps_map_mutex); | 2248 | mutex_unlock(&xps_map_mutex); |
| 2242 | cpus_read_unlock(); | 2249 | cpus_read_unlock(); |
| 2243 | } | 2250 | } |
| @@ -2355,9 +2362,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, | |||
| 2355 | if (!new_dev_maps) | 2362 | if (!new_dev_maps) |
| 2356 | goto out_no_new_maps; | 2363 | goto out_no_new_maps; |
| 2357 | 2364 | ||
| 2358 | static_key_slow_inc_cpuslocked(&xps_needed); | 2365 | if (!dev_maps) { |
| 2359 | if (is_rxqs_map) | 2366 | /* Increment static keys at most once per type */ |
| 2360 | static_key_slow_inc_cpuslocked(&xps_rxqs_needed); | 2367 | static_key_slow_inc_cpuslocked(&xps_needed); |
| 2368 | if (is_rxqs_map) | ||
| 2369 | static_key_slow_inc_cpuslocked(&xps_rxqs_needed); | ||
| 2370 | } | ||
| 2361 | 2371 | ||
| 2362 | for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), | 2372 | for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), |
| 2363 | j < nr_ids;) { | 2373 | j < nr_ids;) { |
| @@ -2455,13 +2465,8 @@ out_no_new_maps: | |||
| 2455 | } | 2465 | } |
| 2456 | 2466 | ||
| 2457 | /* free map if not active */ | 2467 | /* free map if not active */ |
| 2458 | if (!active) { | 2468 | if (!active) |
| 2459 | if (is_rxqs_map) | 2469 | reset_xps_maps(dev, dev_maps, is_rxqs_map); |
| 2460 | RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); | ||
| 2461 | else | ||
| 2462 | RCU_INIT_POINTER(dev->xps_cpus_map, NULL); | ||
| 2463 | kfree_rcu(dev_maps, rcu); | ||
| 2464 | } | ||
| 2465 | 2470 | ||
| 2466 | out_no_maps: | 2471 | out_no_maps: |
| 2467 | mutex_unlock(&xps_map_mutex); | 2472 | mutex_unlock(&xps_map_mutex); |
| @@ -5009,7 +5014,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo | |||
| 5009 | struct net_device *orig_dev = skb->dev; | 5014 | struct net_device *orig_dev = skb->dev; |
| 5010 | struct packet_type *pt_prev = NULL; | 5015 | struct packet_type *pt_prev = NULL; |
| 5011 | 5016 | ||
| 5012 | list_del(&skb->list); | 5017 | skb_list_del_init(skb); |
| 5013 | __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); | 5018 | __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); |
| 5014 | if (!pt_prev) | 5019 | if (!pt_prev) |
| 5015 | continue; | 5020 | continue; |
| @@ -5165,7 +5170,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) | |||
| 5165 | INIT_LIST_HEAD(&sublist); | 5170 | INIT_LIST_HEAD(&sublist); |
| 5166 | list_for_each_entry_safe(skb, next, head, list) { | 5171 | list_for_each_entry_safe(skb, next, head, list) { |
| 5167 | net_timestamp_check(netdev_tstamp_prequeue, skb); | 5172 | net_timestamp_check(netdev_tstamp_prequeue, skb); |
| 5168 | list_del(&skb->list); | 5173 | skb_list_del_init(skb); |
| 5169 | if (!skb_defer_rx_timestamp(skb)) | 5174 | if (!skb_defer_rx_timestamp(skb)) |
| 5170 | list_add_tail(&skb->list, &sublist); | 5175 | list_add_tail(&skb->list, &sublist); |
| 5171 | } | 5176 | } |
| @@ -5176,7 +5181,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) | |||
| 5176 | rcu_read_lock(); | 5181 | rcu_read_lock(); |
| 5177 | list_for_each_entry_safe(skb, next, head, list) { | 5182 | list_for_each_entry_safe(skb, next, head, list) { |
| 5178 | xdp_prog = rcu_dereference(skb->dev->xdp_prog); | 5183 | xdp_prog = rcu_dereference(skb->dev->xdp_prog); |
| 5179 | list_del(&skb->list); | 5184 | skb_list_del_init(skb); |
| 5180 | if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) | 5185 | if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) |
| 5181 | list_add_tail(&skb->list, &sublist); | 5186 | list_add_tail(&skb->list, &sublist); |
| 5182 | } | 5187 | } |
| @@ -5195,7 +5200,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) | |||
| 5195 | 5200 | ||
| 5196 | if (cpu >= 0) { | 5201 | if (cpu >= 0) { |
| 5197 | /* Will be handled, remove from list */ | 5202 | /* Will be handled, remove from list */ |
| 5198 | list_del(&skb->list); | 5203 | skb_list_del_init(skb); |
| 5199 | enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | 5204 | enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
| 5200 | } | 5205 | } |
| 5201 | } | 5206 | } |
| @@ -6204,8 +6209,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
| 6204 | napi->skb = NULL; | 6209 | napi->skb = NULL; |
| 6205 | napi->poll = poll; | 6210 | napi->poll = poll; |
| 6206 | if (weight > NAPI_POLL_WEIGHT) | 6211 | if (weight > NAPI_POLL_WEIGHT) |
| 6207 | pr_err_once("netif_napi_add() called with weight %d on device %s\n", | 6212 | netdev_err_once(dev, "%s() called with weight %d\n", __func__, |
| 6208 | weight, dev->name); | 6213 | weight); |
| 6209 | napi->weight = weight; | 6214 | napi->weight = weight; |
| 6210 | list_add(&napi->dev_list, &dev->napi_list); | 6215 | list_add(&napi->dev_list, &dev->napi_list); |
| 6211 | napi->dev = dev; | 6216 | napi->dev = dev; |
diff --git a/net/core/filter.c b/net/core/filter.c index 9a1327eb25fa..8d2c629501e2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, | |||
| 4890 | struct net *net; | 4890 | struct net *net; |
| 4891 | 4891 | ||
| 4892 | family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; | 4892 | family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; |
| 4893 | if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) | 4893 | if (unlikely(family == AF_UNSPEC || flags || |
| 4894 | !((s32)netns_id < 0 || netns_id <= S32_MAX))) | ||
| 4894 | goto out; | 4895 | goto out; |
| 4895 | 4896 | ||
| 4896 | if (skb->dev) | 4897 | if (skb->dev) |
| 4897 | caller_net = dev_net(skb->dev); | 4898 | caller_net = dev_net(skb->dev); |
| 4898 | else | 4899 | else |
| 4899 | caller_net = sock_net(skb->sk); | 4900 | caller_net = sock_net(skb->sk); |
| 4900 | if (netns_id) { | 4901 | if ((s32)netns_id < 0) { |
| 4902 | net = caller_net; | ||
| 4903 | sk = sk_lookup(net, tuple, skb, family, proto); | ||
| 4904 | } else { | ||
| 4901 | net = get_net_ns_by_id(caller_net, netns_id); | 4905 | net = get_net_ns_by_id(caller_net, netns_id); |
| 4902 | if (unlikely(!net)) | 4906 | if (unlikely(!net)) |
| 4903 | goto out; | 4907 | goto out; |
| 4904 | sk = sk_lookup(net, tuple, skb, family, proto); | 4908 | sk = sk_lookup(net, tuple, skb, family, proto); |
| 4905 | put_net(net); | 4909 | put_net(net); |
| 4906 | } else { | ||
| 4907 | net = caller_net; | ||
| 4908 | sk = sk_lookup(net, tuple, skb, family, proto); | ||
| 4909 | } | 4910 | } |
| 4910 | 4911 | ||
| 4911 | if (sk) | 4912 | if (sk) |
| @@ -5435,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type | |||
| 5435 | if (size != size_default) | 5436 | if (size != size_default) |
| 5436 | return false; | 5437 | return false; |
| 5437 | break; | 5438 | break; |
| 5438 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5439 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5439 | if (size != sizeof(struct bpf_flow_keys *)) | 5440 | if (size != sizeof(__u64)) |
| 5440 | return false; | 5441 | return false; |
| 5441 | break; | 5442 | break; |
| 5442 | default: | 5443 | default: |
| @@ -5464,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size, | |||
| 5464 | case bpf_ctx_range(struct __sk_buff, data): | 5465 | case bpf_ctx_range(struct __sk_buff, data): |
| 5465 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5466 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5466 | case bpf_ctx_range(struct __sk_buff, data_end): | 5467 | case bpf_ctx_range(struct __sk_buff, data_end): |
| 5467 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5468 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5468 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | 5469 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
| 5469 | return false; | 5470 | return false; |
| 5470 | } | 5471 | } |
| @@ -5489,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size, | |||
| 5489 | switch (off) { | 5490 | switch (off) { |
| 5490 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 5491 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
| 5491 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5492 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5492 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5493 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5493 | return false; | 5494 | return false; |
| 5494 | case bpf_ctx_range(struct __sk_buff, data): | 5495 | case bpf_ctx_range(struct __sk_buff, data): |
| 5495 | case bpf_ctx_range(struct __sk_buff, data_end): | 5496 | case bpf_ctx_range(struct __sk_buff, data_end): |
| @@ -5530,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size, | |||
| 5530 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 5531 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
| 5531 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | 5532 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
| 5532 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5533 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5533 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5534 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5534 | return false; | 5535 | return false; |
| 5535 | } | 5536 | } |
| 5536 | 5537 | ||
| @@ -5756,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, | |||
| 5756 | case bpf_ctx_range(struct __sk_buff, data_end): | 5757 | case bpf_ctx_range(struct __sk_buff, data_end): |
| 5757 | info->reg_type = PTR_TO_PACKET_END; | 5758 | info->reg_type = PTR_TO_PACKET_END; |
| 5758 | break; | 5759 | break; |
| 5759 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5760 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5760 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | 5761 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
| 5761 | return false; | 5762 | return false; |
| 5762 | } | 5763 | } |
| @@ -5958,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size, | |||
| 5958 | switch (off) { | 5959 | switch (off) { |
| 5959 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 5960 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
| 5960 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5961 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5961 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5962 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5962 | return false; | 5963 | return false; |
| 5963 | } | 5964 | } |
| 5964 | 5965 | ||
| @@ -6039,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size, | |||
| 6039 | case bpf_ctx_range(struct __sk_buff, data_end): | 6040 | case bpf_ctx_range(struct __sk_buff, data_end): |
| 6040 | info->reg_type = PTR_TO_PACKET_END; | 6041 | info->reg_type = PTR_TO_PACKET_END; |
| 6041 | break; | 6042 | break; |
| 6042 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 6043 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 6043 | info->reg_type = PTR_TO_FLOW_KEYS; | 6044 | info->reg_type = PTR_TO_FLOW_KEYS; |
| 6044 | break; | 6045 | break; |
| 6045 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 6046 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 33d9227a8b80..7819f7804eeb 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -3800,6 +3800,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb, | |||
| 3800 | { | 3800 | { |
| 3801 | int err; | 3801 | int err; |
| 3802 | 3802 | ||
| 3803 | if (dev->type != ARPHRD_ETHER) | ||
| 3804 | return -EINVAL; | ||
| 3805 | |||
| 3803 | netif_addr_lock_bh(dev); | 3806 | netif_addr_lock_bh(dev); |
| 3804 | err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); | 3807 | err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); |
| 3805 | if (err) | 3808 | if (err) |
diff --git a/net/dsa/master.c b/net/dsa/master.c index c90ee3227dea..5e8c9bef78bd 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c | |||
| @@ -158,8 +158,31 @@ static void dsa_master_ethtool_teardown(struct net_device *dev) | |||
| 158 | cpu_dp->orig_ethtool_ops = NULL; | 158 | cpu_dp->orig_ethtool_ops = NULL; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static ssize_t tagging_show(struct device *d, struct device_attribute *attr, | ||
| 162 | char *buf) | ||
| 163 | { | ||
| 164 | struct net_device *dev = to_net_dev(d); | ||
| 165 | struct dsa_port *cpu_dp = dev->dsa_ptr; | ||
| 166 | |||
| 167 | return sprintf(buf, "%s\n", | ||
| 168 | dsa_tag_protocol_to_str(cpu_dp->tag_ops)); | ||
| 169 | } | ||
| 170 | static DEVICE_ATTR_RO(tagging); | ||
| 171 | |||
| 172 | static struct attribute *dsa_slave_attrs[] = { | ||
| 173 | &dev_attr_tagging.attr, | ||
| 174 | NULL | ||
| 175 | }; | ||
| 176 | |||
| 177 | static const struct attribute_group dsa_group = { | ||
| 178 | .name = "dsa", | ||
| 179 | .attrs = dsa_slave_attrs, | ||
| 180 | }; | ||
| 181 | |||
| 161 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | 182 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) |
| 162 | { | 183 | { |
| 184 | int ret; | ||
| 185 | |||
| 163 | /* If we use a tagging format that doesn't have an ethertype | 186 | /* If we use a tagging format that doesn't have an ethertype |
| 164 | * field, make sure that all packets from this point on get | 187 | * field, make sure that all packets from this point on get |
| 165 | * sent to the tag format's receive function. | 188 | * sent to the tag format's receive function. |
| @@ -168,11 +191,20 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | |||
| 168 | 191 | ||
| 169 | dev->dsa_ptr = cpu_dp; | 192 | dev->dsa_ptr = cpu_dp; |
| 170 | 193 | ||
| 171 | return dsa_master_ethtool_setup(dev); | 194 | ret = dsa_master_ethtool_setup(dev); |
| 195 | if (ret) | ||
| 196 | return ret; | ||
| 197 | |||
| 198 | ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); | ||
| 199 | if (ret) | ||
| 200 | dsa_master_ethtool_teardown(dev); | ||
| 201 | |||
| 202 | return ret; | ||
| 172 | } | 203 | } |
| 173 | 204 | ||
| 174 | void dsa_master_teardown(struct net_device *dev) | 205 | void dsa_master_teardown(struct net_device *dev) |
| 175 | { | 206 | { |
| 207 | sysfs_remove_group(&dev->dev.kobj, &dsa_group); | ||
| 176 | dsa_master_ethtool_teardown(dev); | 208 | dsa_master_ethtool_teardown(dev); |
| 177 | 209 | ||
| 178 | dev->dsa_ptr = NULL; | 210 | dev->dsa_ptr = NULL; |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 7d0c19e7edcf..aec78f5aca72 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -1058,27 +1058,6 @@ static struct device_type dsa_type = { | |||
| 1058 | .name = "dsa", | 1058 | .name = "dsa", |
| 1059 | }; | 1059 | }; |
| 1060 | 1060 | ||
| 1061 | static ssize_t tagging_show(struct device *d, struct device_attribute *attr, | ||
| 1062 | char *buf) | ||
| 1063 | { | ||
| 1064 | struct net_device *dev = to_net_dev(d); | ||
| 1065 | struct dsa_port *dp = dsa_slave_to_port(dev); | ||
| 1066 | |||
| 1067 | return sprintf(buf, "%s\n", | ||
| 1068 | dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops)); | ||
| 1069 | } | ||
| 1070 | static DEVICE_ATTR_RO(tagging); | ||
| 1071 | |||
| 1072 | static struct attribute *dsa_slave_attrs[] = { | ||
| 1073 | &dev_attr_tagging.attr, | ||
| 1074 | NULL | ||
| 1075 | }; | ||
| 1076 | |||
| 1077 | static const struct attribute_group dsa_group = { | ||
| 1078 | .name = "dsa", | ||
| 1079 | .attrs = dsa_slave_attrs, | ||
| 1080 | }; | ||
| 1081 | |||
| 1082 | static void dsa_slave_phylink_validate(struct net_device *dev, | 1061 | static void dsa_slave_phylink_validate(struct net_device *dev, |
| 1083 | unsigned long *supported, | 1062 | unsigned long *supported, |
| 1084 | struct phylink_link_state *state) | 1063 | struct phylink_link_state *state) |
| @@ -1374,14 +1353,8 @@ int dsa_slave_create(struct dsa_port *port) | |||
| 1374 | goto out_phy; | 1353 | goto out_phy; |
| 1375 | } | 1354 | } |
| 1376 | 1355 | ||
| 1377 | ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group); | ||
| 1378 | if (ret) | ||
| 1379 | goto out_unreg; | ||
| 1380 | |||
| 1381 | return 0; | 1356 | return 0; |
| 1382 | 1357 | ||
| 1383 | out_unreg: | ||
| 1384 | unregister_netdev(slave_dev); | ||
| 1385 | out_phy: | 1358 | out_phy: |
| 1386 | rtnl_lock(); | 1359 | rtnl_lock(); |
| 1387 | phylink_disconnect_phy(p->dp->pl); | 1360 | phylink_disconnect_phy(p->dp->pl); |
| @@ -1405,7 +1378,6 @@ void dsa_slave_destroy(struct net_device *slave_dev) | |||
| 1405 | rtnl_unlock(); | 1378 | rtnl_unlock(); |
| 1406 | 1379 | ||
| 1407 | dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); | 1380 | dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); |
| 1408 | sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group); | ||
| 1409 | unregister_netdev(slave_dev); | 1381 | unregister_netdev(slave_dev); |
| 1410 | phylink_destroy(dp->pl); | 1382 | phylink_destroy(dp->pl); |
| 1411 | free_percpu(p->stats64); | 1383 | free_percpu(p->stats64); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index d6ee343fdb86..aa0b22697998 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -515,6 +515,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, | |||
| 515 | struct rb_node *rbn; | 515 | struct rb_node *rbn; |
| 516 | int len; | 516 | int len; |
| 517 | int ihlen; | 517 | int ihlen; |
| 518 | int delta; | ||
| 518 | int err; | 519 | int err; |
| 519 | u8 ecn; | 520 | u8 ecn; |
| 520 | 521 | ||
| @@ -556,10 +557,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, | |||
| 556 | if (len > 65535) | 557 | if (len > 65535) |
| 557 | goto out_oversize; | 558 | goto out_oversize; |
| 558 | 559 | ||
| 560 | delta = - head->truesize; | ||
| 561 | |||
| 559 | /* Head of list must not be cloned. */ | 562 | /* Head of list must not be cloned. */ |
| 560 | if (skb_unclone(head, GFP_ATOMIC)) | 563 | if (skb_unclone(head, GFP_ATOMIC)) |
| 561 | goto out_nomem; | 564 | goto out_nomem; |
| 562 | 565 | ||
| 566 | delta += head->truesize; | ||
| 567 | if (delta) | ||
| 568 | add_frag_mem_limit(qp->q.net, delta); | ||
| 569 | |||
| 563 | /* If the first fragment is fragmented itself, we split | 570 | /* If the first fragment is fragmented itself, we split |
| 564 | * it to two chunks: the first with data and paged part | 571 | * it to two chunks: the first with data and paged part |
| 565 | * and the second, holding only fragments. */ | 572 | * and the second, holding only fragments. */ |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 35a786c0aaa0..e609b08c9df4 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -547,7 +547,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, | |||
| 547 | list_for_each_entry_safe(skb, next, head, list) { | 547 | list_for_each_entry_safe(skb, next, head, list) { |
| 548 | struct dst_entry *dst; | 548 | struct dst_entry *dst; |
| 549 | 549 | ||
| 550 | list_del(&skb->list); | 550 | skb_list_del_init(skb); |
| 551 | /* if ingress device is enslaved to an L3 master device pass the | 551 | /* if ingress device is enslaved to an L3 master device pass the |
| 552 | * skb to its handler for processing | 552 | * skb to its handler for processing |
| 553 | */ | 553 | */ |
| @@ -594,7 +594,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt, | |||
| 594 | struct net_device *dev = skb->dev; | 594 | struct net_device *dev = skb->dev; |
| 595 | struct net *net = dev_net(dev); | 595 | struct net *net = dev_net(dev); |
| 596 | 596 | ||
| 597 | list_del(&skb->list); | 597 | skb_list_del_init(skb); |
| 598 | skb = ip_rcv_core(skb, net); | 598 | skb = ip_rcv_core(skb, net); |
| 599 | if (skb == NULL) | 599 | if (skb == NULL) |
| 600 | continue; | 600 | continue; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f510cad0b3e..d1676d8a6ed7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue, | |||
| 1904 | * This algorithm is from John Heffner. | 1904 | * This algorithm is from John Heffner. |
| 1905 | */ | 1905 | */ |
| 1906 | static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | 1906 | static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, |
| 1907 | bool *is_cwnd_limited, u32 max_segs) | 1907 | bool *is_cwnd_limited, |
| 1908 | bool *is_rwnd_limited, | ||
| 1909 | u32 max_segs) | ||
| 1908 | { | 1910 | { |
| 1909 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1911 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 1910 | u32 age, send_win, cong_win, limit, in_flight; | 1912 | u32 age, send_win, cong_win, limit, in_flight; |
| @@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
| 1912 | struct sk_buff *head; | 1914 | struct sk_buff *head; |
| 1913 | int win_divisor; | 1915 | int win_divisor; |
| 1914 | 1916 | ||
| 1915 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | ||
| 1916 | goto send_now; | ||
| 1917 | |||
| 1918 | if (icsk->icsk_ca_state >= TCP_CA_Recovery) | 1917 | if (icsk->icsk_ca_state >= TCP_CA_Recovery) |
| 1919 | goto send_now; | 1918 | goto send_now; |
| 1920 | 1919 | ||
| @@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
| 1973 | if (age < (tp->srtt_us >> 4)) | 1972 | if (age < (tp->srtt_us >> 4)) |
| 1974 | goto send_now; | 1973 | goto send_now; |
| 1975 | 1974 | ||
| 1976 | /* Ok, it looks like it is advisable to defer. */ | 1975 | /* Ok, it looks like it is advisable to defer. |
| 1976 | * Three cases are tracked : | ||
| 1977 | * 1) We are cwnd-limited | ||
| 1978 | * 2) We are rwnd-limited | ||
| 1979 | * 3) We are application limited. | ||
| 1980 | */ | ||
| 1981 | if (cong_win < send_win) { | ||
| 1982 | if (cong_win <= skb->len) { | ||
| 1983 | *is_cwnd_limited = true; | ||
| 1984 | return true; | ||
| 1985 | } | ||
| 1986 | } else { | ||
| 1987 | if (send_win <= skb->len) { | ||
| 1988 | *is_rwnd_limited = true; | ||
| 1989 | return true; | ||
| 1990 | } | ||
| 1991 | } | ||
| 1977 | 1992 | ||
| 1978 | if (cong_win < send_win && cong_win <= skb->len) | 1993 | /* If this packet won't get more data, do not wait. */ |
| 1979 | *is_cwnd_limited = true; | 1994 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 1995 | goto send_now; | ||
| 1980 | 1996 | ||
| 1981 | return true; | 1997 | return true; |
| 1982 | 1998 | ||
| @@ -2356,7 +2372,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
| 2356 | } else { | 2372 | } else { |
| 2357 | if (!push_one && | 2373 | if (!push_one && |
| 2358 | tcp_tso_should_defer(sk, skb, &is_cwnd_limited, | 2374 | tcp_tso_should_defer(sk, skb, &is_cwnd_limited, |
| 2359 | max_segs)) | 2375 | &is_rwnd_limited, max_segs)) |
| 2360 | break; | 2376 | break; |
| 2361 | } | 2377 | } |
| 2362 | 2378 | ||
| @@ -2494,15 +2510,18 @@ void tcp_send_loss_probe(struct sock *sk) | |||
| 2494 | goto rearm_timer; | 2510 | goto rearm_timer; |
| 2495 | } | 2511 | } |
| 2496 | skb = skb_rb_last(&sk->tcp_rtx_queue); | 2512 | skb = skb_rb_last(&sk->tcp_rtx_queue); |
| 2513 | if (unlikely(!skb)) { | ||
| 2514 | WARN_ONCE(tp->packets_out, | ||
| 2515 | "invalid inflight: %u state %u cwnd %u mss %d\n", | ||
| 2516 | tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); | ||
| 2517 | inet_csk(sk)->icsk_pending = 0; | ||
| 2518 | return; | ||
| 2519 | } | ||
| 2497 | 2520 | ||
| 2498 | /* At most one outstanding TLP retransmission. */ | 2521 | /* At most one outstanding TLP retransmission. */ |
| 2499 | if (tp->tlp_high_seq) | 2522 | if (tp->tlp_high_seq) |
| 2500 | goto rearm_timer; | 2523 | goto rearm_timer; |
| 2501 | 2524 | ||
| 2502 | /* Retransmit last segment. */ | ||
| 2503 | if (WARN_ON(!skb)) | ||
| 2504 | goto rearm_timer; | ||
| 2505 | |||
| 2506 | if (skb_still_in_host_queue(sk, skb)) | 2525 | if (skb_still_in_host_queue(sk, skb)) |
| 2507 | goto rearm_timer; | 2526 | goto rearm_timer; |
| 2508 | 2527 | ||
| @@ -2920,7 +2939,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | |||
| 2920 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; | 2939 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; |
| 2921 | trace_tcp_retransmit_skb(sk, skb); | 2940 | trace_tcp_retransmit_skb(sk, skb); |
| 2922 | } else if (err != -EBUSY) { | 2941 | } else if (err != -EBUSY) { |
| 2923 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); | 2942 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); |
| 2924 | } | 2943 | } |
| 2925 | return err; | 2944 | return err; |
| 2926 | } | 2945 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 091c53925e4d..f87dbc78b6bc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -378,7 +378,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
| 378 | return; | 378 | return; |
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | if (icsk->icsk_probes_out > max_probes) { | 381 | if (icsk->icsk_probes_out >= max_probes) { |
| 382 | abort: tcp_write_err(sk); | 382 | abort: tcp_write_err(sk); |
| 383 | } else { | 383 | } else { |
| 384 | /* Only send another probe if we didn't close things up. */ | 384 | /* Only send another probe if we didn't close things up. */ |
| @@ -484,11 +484,12 @@ void tcp_retransmit_timer(struct sock *sk) | |||
| 484 | goto out_reset_timer; | 484 | goto out_reset_timer; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); | ||
| 487 | if (tcp_write_timeout(sk)) | 488 | if (tcp_write_timeout(sk)) |
| 488 | goto out; | 489 | goto out; |
| 489 | 490 | ||
| 490 | if (icsk->icsk_retransmits == 0) { | 491 | if (icsk->icsk_retransmits == 0) { |
| 491 | int mib_idx; | 492 | int mib_idx = 0; |
| 492 | 493 | ||
| 493 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { | 494 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
| 494 | if (tcp_is_sack(tp)) | 495 | if (tcp_is_sack(tp)) |
| @@ -503,10 +504,9 @@ void tcp_retransmit_timer(struct sock *sk) | |||
| 503 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | 504 | mib_idx = LINUX_MIB_TCPSACKFAILURES; |
| 504 | else | 505 | else |
| 505 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | 506 | mib_idx = LINUX_MIB_TCPRENOFAILURES; |
| 506 | } else { | ||
| 507 | mib_idx = LINUX_MIB_TCPTIMEOUTS; | ||
| 508 | } | 507 | } |
| 509 | __NET_INC_STATS(sock_net(sk), mib_idx); | 508 | if (mib_idx) |
| 509 | __NET_INC_STATS(sock_net(sk), mib_idx); | ||
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | tcp_enter_loss(sk); | 512 | tcp_enter_loss(sk); |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 96577e742afd..c1d85830c906 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
| @@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk, | |||
| 95 | list_for_each_entry_safe(skb, next, head, list) { | 95 | list_for_each_entry_safe(skb, next, head, list) { |
| 96 | struct dst_entry *dst; | 96 | struct dst_entry *dst; |
| 97 | 97 | ||
| 98 | list_del(&skb->list); | 98 | skb_list_del_init(skb); |
| 99 | /* if ingress device is enslaved to an L3 master device pass the | 99 | /* if ingress device is enslaved to an L3 master device pass the |
| 100 | * skb to its handler for processing | 100 | * skb to its handler for processing |
| 101 | */ | 101 | */ |
| @@ -296,7 +296,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, | |||
| 296 | struct net_device *dev = skb->dev; | 296 | struct net_device *dev = skb->dev; |
| 297 | struct net *net = dev_net(dev); | 297 | struct net *net = dev_net(dev); |
| 298 | 298 | ||
| 299 | list_del(&skb->list); | 299 | skb_list_del_init(skb); |
| 300 | skb = ip6_rcv_core(skb, dev, net); | 300 | skb = ip6_rcv_core(skb, dev, net); |
| 301 | if (skb == NULL) | 301 | if (skb == NULL) |
| 302 | continue; | 302 | continue; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 827a3f5ff3bb..fcd3c66ded16 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
| 195 | const struct ipv6_pinfo *np = inet6_sk(sk); | 195 | const struct ipv6_pinfo *np = inet6_sk(sk); |
| 196 | struct in6_addr *first_hop = &fl6->daddr; | 196 | struct in6_addr *first_hop = &fl6->daddr; |
| 197 | struct dst_entry *dst = skb_dst(skb); | 197 | struct dst_entry *dst = skb_dst(skb); |
| 198 | unsigned int head_room; | ||
| 198 | struct ipv6hdr *hdr; | 199 | struct ipv6hdr *hdr; |
| 199 | u8 proto = fl6->flowi6_proto; | 200 | u8 proto = fl6->flowi6_proto; |
| 200 | int seg_len = skb->len; | 201 | int seg_len = skb->len; |
| 201 | int hlimit = -1; | 202 | int hlimit = -1; |
| 202 | u32 mtu; | 203 | u32 mtu; |
| 203 | 204 | ||
| 204 | if (opt) { | 205 | head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); |
| 205 | unsigned int head_room; | 206 | if (opt) |
| 207 | head_room += opt->opt_nflen + opt->opt_flen; | ||
| 206 | 208 | ||
| 207 | /* First: exthdrs may take lots of space (~8K for now) | 209 | if (unlikely(skb_headroom(skb) < head_room)) { |
| 208 | MAX_HEADER is not enough. | 210 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); |
| 209 | */ | 211 | if (!skb2) { |
| 210 | head_room = opt->opt_nflen + opt->opt_flen; | 212 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
| 211 | seg_len += head_room; | 213 | IPSTATS_MIB_OUTDISCARDS); |
| 212 | head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); | 214 | kfree_skb(skb); |
| 213 | 215 | return -ENOBUFS; | |
| 214 | if (skb_headroom(skb) < head_room) { | ||
| 215 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); | ||
| 216 | if (!skb2) { | ||
| 217 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | ||
| 218 | IPSTATS_MIB_OUTDISCARDS); | ||
| 219 | kfree_skb(skb); | ||
| 220 | return -ENOBUFS; | ||
| 221 | } | ||
| 222 | if (skb->sk) | ||
| 223 | skb_set_owner_w(skb2, skb->sk); | ||
| 224 | consume_skb(skb); | ||
| 225 | skb = skb2; | ||
| 226 | } | 216 | } |
| 217 | if (skb->sk) | ||
| 218 | skb_set_owner_w(skb2, skb->sk); | ||
| 219 | consume_skb(skb); | ||
| 220 | skb = skb2; | ||
| 221 | } | ||
| 222 | |||
| 223 | if (opt) { | ||
| 224 | seg_len += opt->opt_nflen + opt->opt_flen; | ||
| 225 | |||
| 227 | if (opt->opt_flen) | 226 | if (opt->opt_flen) |
| 228 | ipv6_push_frag_opts(skb, opt, &proto); | 227 | ipv6_push_frag_opts(skb, opt, &proto); |
| 228 | |||
| 229 | if (opt->opt_nflen) | 229 | if (opt->opt_nflen) |
| 230 | ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop, | 230 | ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop, |
| 231 | &fl6->saddr); | 231 | &fl6->saddr); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d219979c3e52..181da2c40f9a 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -341,7 +341,7 @@ static bool | |||
| 341 | nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) | 341 | nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) |
| 342 | { | 342 | { |
| 343 | struct sk_buff *fp, *head = fq->q.fragments; | 343 | struct sk_buff *fp, *head = fq->q.fragments; |
| 344 | int payload_len; | 344 | int payload_len, delta; |
| 345 | u8 ecn; | 345 | u8 ecn; |
| 346 | 346 | ||
| 347 | inet_frag_kill(&fq->q); | 347 | inet_frag_kill(&fq->q); |
| @@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic | |||
| 363 | return false; | 363 | return false; |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | delta = - head->truesize; | ||
| 367 | |||
| 366 | /* Head of list must not be cloned. */ | 368 | /* Head of list must not be cloned. */ |
| 367 | if (skb_unclone(head, GFP_ATOMIC)) | 369 | if (skb_unclone(head, GFP_ATOMIC)) |
| 368 | return false; | 370 | return false; |
| 369 | 371 | ||
| 372 | delta += head->truesize; | ||
| 373 | if (delta) | ||
| 374 | add_frag_mem_limit(fq->q.net, delta); | ||
| 375 | |||
| 370 | /* If the first fragment is fragmented itself, we split | 376 | /* If the first fragment is fragmented itself, we split |
| 371 | * it to two chunks: the first with data and paged part | 377 | * it to two chunks: the first with data and paged part |
| 372 | * and the second, holding only fragments. */ | 378 | * and the second, holding only fragments. */ |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 5c3c92713096..aa26c45486d9 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
| @@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 281 | { | 281 | { |
| 282 | struct net *net = container_of(fq->q.net, struct net, ipv6.frags); | 282 | struct net *net = container_of(fq->q.net, struct net, ipv6.frags); |
| 283 | struct sk_buff *fp, *head = fq->q.fragments; | 283 | struct sk_buff *fp, *head = fq->q.fragments; |
| 284 | int payload_len; | 284 | int payload_len, delta; |
| 285 | unsigned int nhoff; | 285 | unsigned int nhoff; |
| 286 | int sum_truesize; | 286 | int sum_truesize; |
| 287 | u8 ecn; | 287 | u8 ecn; |
| @@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 322 | if (payload_len > IPV6_MAXPLEN) | 322 | if (payload_len > IPV6_MAXPLEN) |
| 323 | goto out_oversize; | 323 | goto out_oversize; |
| 324 | 324 | ||
| 325 | delta = - head->truesize; | ||
| 326 | |||
| 325 | /* Head of list must not be cloned. */ | 327 | /* Head of list must not be cloned. */ |
| 326 | if (skb_unclone(head, GFP_ATOMIC)) | 328 | if (skb_unclone(head, GFP_ATOMIC)) |
| 327 | goto out_oom; | 329 | goto out_oom; |
| 328 | 330 | ||
| 331 | delta += head->truesize; | ||
| 332 | if (delta) | ||
| 333 | add_frag_mem_limit(fq->q.net, delta); | ||
| 334 | |||
| 329 | /* If the first fragment is fragmented itself, we split | 335 | /* If the first fragment is fragmented itself, we split |
| 330 | * it to two chunks: the first with data and paged part | 336 | * it to two chunks: the first with data and paged part |
| 331 | * and the second, holding only fragments. */ | 337 | * and the second, holding only fragments. */ |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index a8854dd3e9c5..8181ee7e1e27 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
| @@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
| 347 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 347 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
| 348 | struct flowi6 fl6; | 348 | struct flowi6 fl6; |
| 349 | 349 | ||
| 350 | memset(&fl6, 0, sizeof(fl6)); | ||
| 350 | fl6.daddr = hdr->daddr; | 351 | fl6.daddr = hdr->daddr; |
| 351 | fl6.saddr = hdr->saddr; | 352 | fl6.saddr = hdr->saddr; |
| 352 | fl6.flowlabel = ip6_flowinfo(hdr); | 353 | fl6.flowlabel = ip6_flowinfo(hdr); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 51622333d460..818aa0060349 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -2891,7 +2891,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) | |||
| 2891 | 2891 | ||
| 2892 | len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + | 2892 | len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + |
| 2893 | beacon->proberesp_ies_len + beacon->assocresp_ies_len + | 2893 | beacon->proberesp_ies_len + beacon->assocresp_ies_len + |
| 2894 | beacon->probe_resp_len; | 2894 | beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len; |
| 2895 | 2895 | ||
| 2896 | new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); | 2896 | new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); |
| 2897 | if (!new_beacon) | 2897 | if (!new_beacon) |
| @@ -2934,8 +2934,9 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) | |||
| 2934 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); | 2934 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); |
| 2935 | pos += beacon->probe_resp_len; | 2935 | pos += beacon->probe_resp_len; |
| 2936 | } | 2936 | } |
| 2937 | if (beacon->ftm_responder) | 2937 | |
| 2938 | new_beacon->ftm_responder = beacon->ftm_responder; | 2938 | /* might copy -1, meaning no changes requested */ |
| 2939 | new_beacon->ftm_responder = beacon->ftm_responder; | ||
| 2939 | if (beacon->lci) { | 2940 | if (beacon->lci) { |
| 2940 | new_beacon->lci_len = beacon->lci_len; | 2941 | new_beacon->lci_len = beacon->lci_len; |
| 2941 | new_beacon->lci = pos; | 2942 | new_beacon->lci = pos; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 5836ddeac9e3..5f3c81e705c7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -1015,6 +1015,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 1015 | if (local->open_count == 0) | 1015 | if (local->open_count == 0) |
| 1016 | ieee80211_clear_tx_pending(local); | 1016 | ieee80211_clear_tx_pending(local); |
| 1017 | 1017 | ||
| 1018 | sdata->vif.bss_conf.beacon_int = 0; | ||
| 1019 | |||
| 1018 | /* | 1020 | /* |
| 1019 | * If the interface goes down while suspended, presumably because | 1021 | * If the interface goes down while suspended, presumably because |
| 1020 | * the device was unplugged and that happens before our resume, | 1022 | * the device was unplugged and that happens before our resume, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d2bc8d57c87e..bcf5ffc1567a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -2766,6 +2766,7 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata, | |||
| 2766 | { | 2766 | { |
| 2767 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2767 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 2768 | struct sta_info *sta; | 2768 | struct sta_info *sta; |
| 2769 | bool result = true; | ||
| 2769 | 2770 | ||
| 2770 | sdata_info(sdata, "authenticated\n"); | 2771 | sdata_info(sdata, "authenticated\n"); |
| 2771 | ifmgd->auth_data->done = true; | 2772 | ifmgd->auth_data->done = true; |
| @@ -2778,15 +2779,18 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata, | |||
| 2778 | sta = sta_info_get(sdata, bssid); | 2779 | sta = sta_info_get(sdata, bssid); |
| 2779 | if (!sta) { | 2780 | if (!sta) { |
| 2780 | WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); | 2781 | WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); |
| 2781 | return false; | 2782 | result = false; |
| 2783 | goto out; | ||
| 2782 | } | 2784 | } |
| 2783 | if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { | 2785 | if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { |
| 2784 | sdata_info(sdata, "failed moving %pM to auth\n", bssid); | 2786 | sdata_info(sdata, "failed moving %pM to auth\n", bssid); |
| 2785 | return false; | 2787 | result = false; |
| 2788 | goto out; | ||
| 2786 | } | 2789 | } |
| 2787 | mutex_unlock(&sdata->local->sta_mtx); | ||
| 2788 | 2790 | ||
| 2789 | return true; | 2791 | out: |
| 2792 | mutex_unlock(&sdata->local->sta_mtx); | ||
| 2793 | return result; | ||
| 2790 | } | 2794 | } |
| 2791 | 2795 | ||
| 2792 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | 2796 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3bd3b5769797..428f7ad5f9b5 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -1403,6 +1403,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) | |||
| 1403 | return RX_CONTINUE; | 1403 | return RX_CONTINUE; |
| 1404 | 1404 | ||
| 1405 | if (ieee80211_is_ctl(hdr->frame_control) || | 1405 | if (ieee80211_is_ctl(hdr->frame_control) || |
| 1406 | ieee80211_is_nullfunc(hdr->frame_control) || | ||
| 1406 | ieee80211_is_qos_nullfunc(hdr->frame_control) || | 1407 | ieee80211_is_qos_nullfunc(hdr->frame_control) || |
| 1407 | is_multicast_ether_addr(hdr->addr1)) | 1408 | is_multicast_ether_addr(hdr->addr1)) |
| 1408 | return RX_CONTINUE; | 1409 | return RX_CONTINUE; |
| @@ -3063,7 +3064,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 3063 | cfg80211_sta_opmode_change_notify(sdata->dev, | 3064 | cfg80211_sta_opmode_change_notify(sdata->dev, |
| 3064 | rx->sta->addr, | 3065 | rx->sta->addr, |
| 3065 | &sta_opmode, | 3066 | &sta_opmode, |
| 3066 | GFP_KERNEL); | 3067 | GFP_ATOMIC); |
| 3067 | goto handled; | 3068 | goto handled; |
| 3068 | } | 3069 | } |
| 3069 | case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { | 3070 | case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { |
| @@ -3100,7 +3101,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 3100 | cfg80211_sta_opmode_change_notify(sdata->dev, | 3101 | cfg80211_sta_opmode_change_notify(sdata->dev, |
| 3101 | rx->sta->addr, | 3102 | rx->sta->addr, |
| 3102 | &sta_opmode, | 3103 | &sta_opmode, |
| 3103 | GFP_KERNEL); | 3104 | GFP_ATOMIC); |
| 3104 | goto handled; | 3105 | goto handled; |
| 3105 | } | 3106 | } |
| 3106 | default: | 3107 | default: |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index aa4afbf0abaf..a794ca729000 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
| @@ -964,6 +964,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, | |||
| 964 | /* Track when last TDLS packet was ACKed */ | 964 | /* Track when last TDLS packet was ACKed */ |
| 965 | if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) | 965 | if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) |
| 966 | sta->status_stats.last_tdls_pkt_time = jiffies; | 966 | sta->status_stats.last_tdls_pkt_time = jiffies; |
| 967 | } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { | ||
| 968 | return; | ||
| 967 | } else { | 969 | } else { |
| 968 | ieee80211_lost_packet(sta, info); | 970 | ieee80211_lost_packet(sta, info); |
| 969 | } | 971 | } |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e0ccee23fbcd..1f536ba573b4 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
| 439 | if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) | 439 | if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) |
| 440 | info->hw_queue = tx->sdata->vif.cab_queue; | 440 | info->hw_queue = tx->sdata->vif.cab_queue; |
| 441 | 441 | ||
| 442 | /* no stations in PS mode */ | 442 | /* no stations in PS mode and no buffered packets */ |
| 443 | if (!atomic_read(&ps->num_sta_ps)) | 443 | if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf)) |
| 444 | return TX_CONTINUE; | 444 | return TX_CONTINUE; |
| 445 | 445 | ||
| 446 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; | 446 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index a4660c48ff01..cd94f925495a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
| @@ -1166,7 +1166,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, | |||
| 1166 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 1166 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
| 1167 | if (err) { | 1167 | if (err) { |
| 1168 | net_warn_ratelimited("openvswitch: zone: %u " | 1168 | net_warn_ratelimited("openvswitch: zone: %u " |
| 1169 | "execeeds conntrack limit\n", | 1169 | "exceeds conntrack limit\n", |
| 1170 | info->zone.id); | 1170 | info->zone.id); |
| 1171 | return err; | 1171 | return err; |
| 1172 | } | 1172 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 37c9b8f0e10f..ec8ec55e0fe8 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -85,7 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 85 | int ovr, int bind, bool rtnl_held, | 85 | int ovr, int bind, bool rtnl_held, |
| 86 | struct netlink_ext_ack *extack) | 86 | struct netlink_ext_ack *extack) |
| 87 | { | 87 | { |
| 88 | int ret = 0, err; | 88 | int ret = 0, tcfp_result = TC_ACT_OK, err, size; |
| 89 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 89 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
| 90 | struct tc_police *parm; | 90 | struct tc_police *parm; |
| 91 | struct tcf_police *police; | 91 | struct tcf_police *police; |
| @@ -93,7 +93,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 93 | struct tc_action_net *tn = net_generic(net, police_net_id); | 93 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 94 | struct tcf_police_params *new; | 94 | struct tcf_police_params *new; |
| 95 | bool exists = false; | 95 | bool exists = false; |
| 96 | int size; | ||
| 97 | 96 | ||
| 98 | if (nla == NULL) | 97 | if (nla == NULL) |
| 99 | return -EINVAL; | 98 | return -EINVAL; |
| @@ -160,6 +159,16 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 160 | goto failure; | 159 | goto failure; |
| 161 | } | 160 | } |
| 162 | 161 | ||
| 162 | if (tb[TCA_POLICE_RESULT]) { | ||
| 163 | tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); | ||
| 164 | if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) { | ||
| 165 | NL_SET_ERR_MSG(extack, | ||
| 166 | "goto chain not allowed on fallback"); | ||
| 167 | err = -EINVAL; | ||
| 168 | goto failure; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 163 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 172 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
| 164 | if (unlikely(!new)) { | 173 | if (unlikely(!new)) { |
| 165 | err = -ENOMEM; | 174 | err = -ENOMEM; |
| @@ -167,6 +176,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 167 | } | 176 | } |
| 168 | 177 | ||
| 169 | /* No failure allowed after this point */ | 178 | /* No failure allowed after this point */ |
| 179 | new->tcfp_result = tcfp_result; | ||
| 170 | new->tcfp_mtu = parm->mtu; | 180 | new->tcfp_mtu = parm->mtu; |
| 171 | if (!new->tcfp_mtu) { | 181 | if (!new->tcfp_mtu) { |
| 172 | new->tcfp_mtu = ~0; | 182 | new->tcfp_mtu = ~0; |
| @@ -196,16 +206,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 196 | if (tb[TCA_POLICE_AVRATE]) | 206 | if (tb[TCA_POLICE_AVRATE]) |
| 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); | 207 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
| 198 | 208 | ||
| 199 | if (tb[TCA_POLICE_RESULT]) { | ||
| 200 | new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); | ||
| 201 | if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) { | ||
| 202 | NL_SET_ERR_MSG(extack, | ||
| 203 | "goto chain not allowed on fallback"); | ||
| 204 | err = -EINVAL; | ||
| 205 | goto failure; | ||
| 206 | } | ||
| 207 | } | ||
| 208 | |||
| 209 | spin_lock_bh(&police->tcf_lock); | 209 | spin_lock_bh(&police->tcf_lock); |
| 210 | spin_lock_bh(&police->tcfp_lock); | 210 | spin_lock_bh(&police->tcfp_lock); |
| 211 | police->tcfp_t_c = ktime_get_ns(); | 211 | police->tcfp_t_c = ktime_get_ns(); |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index c6c327874abc..71312d7bd8f4 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
| @@ -1238,18 +1238,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 1238 | if (err) | 1238 | if (err) |
| 1239 | goto errout_idr; | 1239 | goto errout_idr; |
| 1240 | 1240 | ||
| 1241 | if (!tc_skip_sw(fnew->flags)) { | 1241 | if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) { |
| 1242 | if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) { | 1242 | err = -EEXIST; |
| 1243 | err = -EEXIST; | 1243 | goto errout_mask; |
| 1244 | goto errout_mask; | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, | ||
| 1248 | fnew->mask->filter_ht_params); | ||
| 1249 | if (err) | ||
| 1250 | goto errout_mask; | ||
| 1251 | } | 1244 | } |
| 1252 | 1245 | ||
| 1246 | err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, | ||
| 1247 | fnew->mask->filter_ht_params); | ||
| 1248 | if (err) | ||
| 1249 | goto errout_mask; | ||
| 1250 | |||
| 1253 | if (!tc_skip_hw(fnew->flags)) { | 1251 | if (!tc_skip_hw(fnew->flags)) { |
| 1254 | err = fl_hw_replace_filter(tp, fnew, extack); | 1252 | err = fl_hw_replace_filter(tp, fnew, extack); |
| 1255 | if (err) | 1253 | if (err) |
| @@ -1303,9 +1301,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, | |||
| 1303 | struct cls_fl_head *head = rtnl_dereference(tp->root); | 1301 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
| 1304 | struct cls_fl_filter *f = arg; | 1302 | struct cls_fl_filter *f = arg; |
| 1305 | 1303 | ||
| 1306 | if (!tc_skip_sw(f->flags)) | 1304 | rhashtable_remove_fast(&f->mask->ht, &f->ht_node, |
| 1307 | rhashtable_remove_fast(&f->mask->ht, &f->ht_node, | 1305 | f->mask->filter_ht_params); |
| 1308 | f->mask->filter_ht_params); | ||
| 1309 | __fl_delete(tp, f, extack); | 1306 | __fl_delete(tp, f, extack); |
| 1310 | *last = list_empty(&head->masks); | 1307 | *last = list_empty(&head->masks); |
| 1311 | return 0; | 1308 | return 0; |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 2c38e3d07924..22cd46a60057 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -431,6 +431,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 431 | int count = 1; | 431 | int count = 1; |
| 432 | int rc = NET_XMIT_SUCCESS; | 432 | int rc = NET_XMIT_SUCCESS; |
| 433 | 433 | ||
| 434 | /* Do not fool qdisc_drop_all() */ | ||
| 435 | skb->prev = NULL; | ||
| 436 | |||
| 434 | /* Random duplication */ | 437 | /* Random duplication */ |
| 435 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | 438 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) |
| 436 | ++count; | 439 | ++count; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 6a28b96e779e..914750b819b2 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init( | |||
| 118 | asoc->flowlabel = sp->flowlabel; | 118 | asoc->flowlabel = sp->flowlabel; |
| 119 | asoc->dscp = sp->dscp; | 119 | asoc->dscp = sp->dscp; |
| 120 | 120 | ||
| 121 | /* Initialize default path MTU. */ | ||
| 122 | asoc->pathmtu = sp->pathmtu; | ||
| 123 | |||
| 124 | /* Set association default SACK delay */ | 121 | /* Set association default SACK delay */ |
| 125 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); | 122 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); |
| 126 | asoc->sackfreq = sp->sackfreq; | 123 | asoc->sackfreq = sp->sackfreq; |
| @@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init( | |||
| 252 | 0, gfp)) | 249 | 0, gfp)) |
| 253 | goto fail_init; | 250 | goto fail_init; |
| 254 | 251 | ||
| 252 | /* Initialize default path MTU. */ | ||
| 253 | asoc->pathmtu = sp->pathmtu; | ||
| 254 | sctp_assoc_update_frag_point(asoc); | ||
| 255 | |||
| 255 | /* Assume that peer would support both address types unless we are | 256 | /* Assume that peer would support both address types unless we are |
| 256 | * told otherwise. | 257 | * told otherwise. |
| 257 | */ | 258 | */ |
| @@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc) | |||
| 434 | 435 | ||
| 435 | WARN_ON(atomic_read(&asoc->rmem_alloc)); | 436 | WARN_ON(atomic_read(&asoc->rmem_alloc)); |
| 436 | 437 | ||
| 437 | kfree(asoc); | 438 | kfree_rcu(asoc, rcu); |
| 438 | SCTP_DBG_OBJCNT_DEC(assoc); | 439 | SCTP_DBG_OBJCNT_DEC(assoc); |
| 439 | } | 440 | } |
| 440 | 441 | ||
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index ce8087846f05..d2048de86e7c 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c | |||
| @@ -191,6 +191,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
| 191 | * the packet | 191 | * the packet |
| 192 | */ | 192 | */ |
| 193 | max_data = asoc->frag_point; | 193 | max_data = asoc->frag_point; |
| 194 | if (unlikely(!max_data)) { | ||
| 195 | max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk), | ||
| 196 | sctp_datachk_len(&asoc->stream)); | ||
| 197 | pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)", | ||
| 198 | __func__, asoc, max_data); | ||
| 199 | } | ||
| 194 | 200 | ||
| 195 | /* If the the peer requested that we authenticate DATA chunks | 201 | /* If the the peer requested that we authenticate DATA chunks |
| 196 | * we need to account for bundling of the AUTH chunks along with | 202 | * we need to account for bundling of the AUTH chunks along with |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 4a4fd1971255..f4ac6c592e13 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, | |||
| 2462 | asoc->c.sinit_max_instreams, gfp)) | 2462 | asoc->c.sinit_max_instreams, gfp)) |
| 2463 | goto clean_up; | 2463 | goto clean_up; |
| 2464 | 2464 | ||
| 2465 | /* Update frag_point when stream_interleave may get changed. */ | ||
| 2466 | sctp_assoc_update_frag_point(asoc); | ||
| 2467 | |||
| 2465 | if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) | 2468 | if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) |
| 2466 | goto clean_up; | 2469 | goto clean_up; |
| 2467 | 2470 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bf618d1b41fd..b8cebd5a87e5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3324,8 +3324,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned | |||
| 3324 | __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : | 3324 | __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : |
| 3325 | sizeof(struct sctp_data_chunk); | 3325 | sizeof(struct sctp_data_chunk); |
| 3326 | 3326 | ||
| 3327 | min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, | 3327 | min_len = sctp_min_frag_point(sp, datasize); |
| 3328 | datasize); | ||
| 3329 | max_len = SCTP_MAX_CHUNK_LEN - datasize; | 3328 | max_len = SCTP_MAX_CHUNK_LEN - datasize; |
| 3330 | 3329 | ||
| 3331 | if (val < min_len || val > max_len) | 3330 | if (val < min_len || val > max_len) |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 5d3f252659f1..ba765473d1f0 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -1791,6 +1791,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp) | |||
| 1791 | for (i=0; i < rqstp->rq_enc_pages_num; i++) | 1791 | for (i=0; i < rqstp->rq_enc_pages_num; i++) |
| 1792 | __free_page(rqstp->rq_enc_pages[i]); | 1792 | __free_page(rqstp->rq_enc_pages[i]); |
| 1793 | kfree(rqstp->rq_enc_pages); | 1793 | kfree(rqstp->rq_enc_pages); |
| 1794 | rqstp->rq_release_snd_buf = NULL; | ||
| 1794 | } | 1795 | } |
| 1795 | 1796 | ||
| 1796 | static int | 1797 | static int |
| @@ -1799,6 +1800,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp) | |||
| 1799 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; | 1800 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; |
| 1800 | int first, last, i; | 1801 | int first, last, i; |
| 1801 | 1802 | ||
| 1803 | if (rqstp->rq_release_snd_buf) | ||
| 1804 | rqstp->rq_release_snd_buf(rqstp); | ||
| 1805 | |||
| 1802 | if (snd_buf->page_len == 0) { | 1806 | if (snd_buf->page_len == 0) { |
| 1803 | rqstp->rq_enc_pages_num = 0; | 1807 | rqstp->rq_enc_pages_num = 0; |
| 1804 | return 0; | 1808 | return 0; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index ae3b8145da35..c6782aa47525 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -1915,6 +1915,13 @@ call_connect_status(struct rpc_task *task) | |||
| 1915 | struct rpc_clnt *clnt = task->tk_client; | 1915 | struct rpc_clnt *clnt = task->tk_client; |
| 1916 | int status = task->tk_status; | 1916 | int status = task->tk_status; |
| 1917 | 1917 | ||
| 1918 | /* Check if the task was already transmitted */ | ||
| 1919 | if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { | ||
| 1920 | xprt_end_transmit(task); | ||
| 1921 | task->tk_action = call_transmit_status; | ||
| 1922 | return; | ||
| 1923 | } | ||
| 1924 | |||
| 1918 | dprint_status(task); | 1925 | dprint_status(task); |
| 1919 | 1926 | ||
| 1920 | trace_rpc_connect_status(task); | 1927 | trace_rpc_connect_status(task); |
| @@ -2302,6 +2309,7 @@ out_retry: | |||
| 2302 | task->tk_status = 0; | 2309 | task->tk_status = 0; |
| 2303 | /* Note: rpc_verify_header() may have freed the RPC slot */ | 2310 | /* Note: rpc_verify_header() may have freed the RPC slot */ |
| 2304 | if (task->tk_rqstp == req) { | 2311 | if (task->tk_rqstp == req) { |
| 2312 | xdr_free_bvec(&req->rq_rcv_buf); | ||
| 2305 | req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; | 2313 | req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; |
| 2306 | if (task->tk_client->cl_discrtry) | 2314 | if (task->tk_client->cl_discrtry) |
| 2307 | xprt_conditional_disconnect(req->rq_xprt, | 2315 | xprt_conditional_disconnect(req->rq_xprt, |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 86bea4520c4d..ce927002862a 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -826,8 +826,15 @@ void xprt_connect(struct rpc_task *task) | |||
| 826 | return; | 826 | return; |
| 827 | if (xprt_test_and_set_connecting(xprt)) | 827 | if (xprt_test_and_set_connecting(xprt)) |
| 828 | return; | 828 | return; |
| 829 | xprt->stat.connect_start = jiffies; | 829 | /* Race breaker */ |
| 830 | xprt->ops->connect(xprt, task); | 830 | if (!xprt_connected(xprt)) { |
| 831 | xprt->stat.connect_start = jiffies; | ||
| 832 | xprt->ops->connect(xprt, task); | ||
| 833 | } else { | ||
| 834 | xprt_clear_connecting(xprt); | ||
| 835 | task->tk_status = 0; | ||
| 836 | rpc_wake_up_queued_task(&xprt->pending, task); | ||
| 837 | } | ||
| 831 | } | 838 | } |
| 832 | xprt_release_write(xprt, task); | 839 | xprt_release_write(xprt, task); |
| 833 | } | 840 | } |
| @@ -1623,6 +1630,8 @@ xprt_request_init(struct rpc_task *task) | |||
| 1623 | req->rq_snd_buf.buflen = 0; | 1630 | req->rq_snd_buf.buflen = 0; |
| 1624 | req->rq_rcv_buf.len = 0; | 1631 | req->rq_rcv_buf.len = 0; |
| 1625 | req->rq_rcv_buf.buflen = 0; | 1632 | req->rq_rcv_buf.buflen = 0; |
| 1633 | req->rq_snd_buf.bvec = NULL; | ||
| 1634 | req->rq_rcv_buf.bvec = NULL; | ||
| 1626 | req->rq_release_snd_buf = NULL; | 1635 | req->rq_release_snd_buf = NULL; |
| 1627 | xprt_reset_majortimeo(req); | 1636 | xprt_reset_majortimeo(req); |
| 1628 | dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, | 1637 | dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ae77c71c1f64..8a5e823e0b33 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -330,18 +330,16 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp) | |||
| 330 | { | 330 | { |
| 331 | size_t i,n; | 331 | size_t i,n; |
| 332 | 332 | ||
| 333 | if (!(buf->flags & XDRBUF_SPARSE_PAGES)) | 333 | if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES)) |
| 334 | return want; | 334 | return want; |
| 335 | if (want > buf->page_len) | ||
| 336 | want = buf->page_len; | ||
| 337 | n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; | 335 | n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 338 | for (i = 0; i < n; i++) { | 336 | for (i = 0; i < n; i++) { |
| 339 | if (buf->pages[i]) | 337 | if (buf->pages[i]) |
| 340 | continue; | 338 | continue; |
| 341 | buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); | 339 | buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); |
| 342 | if (!buf->pages[i]) { | 340 | if (!buf->pages[i]) { |
| 343 | buf->page_len = (i * PAGE_SIZE) - buf->page_base; | 341 | i *= PAGE_SIZE; |
| 344 | return buf->page_len; | 342 | return i > buf->page_base ? i - buf->page_base : 0; |
| 345 | } | 343 | } |
| 346 | } | 344 | } |
| 347 | return want; | 345 | return want; |
| @@ -378,8 +376,8 @@ static ssize_t | |||
| 378 | xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, | 376 | xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, |
| 379 | size_t count) | 377 | size_t count) |
| 380 | { | 378 | { |
| 381 | struct kvec kvec = { 0 }; | 379 | iov_iter_discard(&msg->msg_iter, READ, count); |
| 382 | return xs_read_kvec(sock, msg, flags | MSG_TRUNC, &kvec, count, 0); | 380 | return sock_recvmsg(sock, msg, flags); |
| 383 | } | 381 | } |
| 384 | 382 | ||
| 385 | static ssize_t | 383 | static ssize_t |
| @@ -398,16 +396,17 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
| 398 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 396 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 399 | goto out; | 397 | goto out; |
| 400 | if (ret != want) | 398 | if (ret != want) |
| 401 | goto eagain; | 399 | goto out; |
| 402 | seek = 0; | 400 | seek = 0; |
| 403 | } else { | 401 | } else { |
| 404 | seek -= buf->head[0].iov_len; | 402 | seek -= buf->head[0].iov_len; |
| 405 | offset += buf->head[0].iov_len; | 403 | offset += buf->head[0].iov_len; |
| 406 | } | 404 | } |
| 407 | if (seek < buf->page_len) { | 405 | |
| 408 | want = xs_alloc_sparse_pages(buf, | 406 | want = xs_alloc_sparse_pages(buf, |
| 409 | min_t(size_t, count - offset, buf->page_len), | 407 | min_t(size_t, count - offset, buf->page_len), |
| 410 | GFP_NOWAIT); | 408 | GFP_NOWAIT); |
| 409 | if (seek < want) { | ||
| 411 | ret = xs_read_bvec(sock, msg, flags, buf->bvec, | 410 | ret = xs_read_bvec(sock, msg, flags, buf->bvec, |
| 412 | xdr_buf_pagecount(buf), | 411 | xdr_buf_pagecount(buf), |
| 413 | want + buf->page_base, | 412 | want + buf->page_base, |
| @@ -418,12 +417,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
| 418 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 417 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 419 | goto out; | 418 | goto out; |
| 420 | if (ret != want) | 419 | if (ret != want) |
| 421 | goto eagain; | 420 | goto out; |
| 422 | seek = 0; | 421 | seek = 0; |
| 423 | } else { | 422 | } else { |
| 424 | seek -= buf->page_len; | 423 | seek -= want; |
| 425 | offset += buf->page_len; | 424 | offset += want; |
| 426 | } | 425 | } |
| 426 | |||
| 427 | if (seek < buf->tail[0].iov_len) { | 427 | if (seek < buf->tail[0].iov_len) { |
| 428 | want = min_t(size_t, count - offset, buf->tail[0].iov_len); | 428 | want = min_t(size_t, count - offset, buf->tail[0].iov_len); |
| 429 | ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); | 429 | ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); |
| @@ -433,17 +433,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
| 433 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 433 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 434 | goto out; | 434 | goto out; |
| 435 | if (ret != want) | 435 | if (ret != want) |
| 436 | goto eagain; | 436 | goto out; |
| 437 | } else | 437 | } else |
| 438 | offset += buf->tail[0].iov_len; | 438 | offset += buf->tail[0].iov_len; |
| 439 | ret = -EMSGSIZE; | 439 | ret = -EMSGSIZE; |
| 440 | msg->msg_flags |= MSG_TRUNC; | ||
| 441 | out: | 440 | out: |
| 442 | *read = offset - seek_init; | 441 | *read = offset - seek_init; |
| 443 | return ret; | 442 | return ret; |
| 444 | eagain: | ||
| 445 | ret = -EAGAIN; | ||
| 446 | goto out; | ||
| 447 | sock_err: | 443 | sock_err: |
| 448 | offset += seek; | 444 | offset += seek; |
| 449 | goto out; | 445 | goto out; |
| @@ -486,19 +482,20 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, | |||
| 486 | if (transport->recv.offset == transport->recv.len) { | 482 | if (transport->recv.offset == transport->recv.len) { |
| 487 | if (xs_read_stream_request_done(transport)) | 483 | if (xs_read_stream_request_done(transport)) |
| 488 | msg->msg_flags |= MSG_EOR; | 484 | msg->msg_flags |= MSG_EOR; |
| 489 | return transport->recv.copied; | 485 | return read; |
| 490 | } | 486 | } |
| 491 | 487 | ||
| 492 | switch (ret) { | 488 | switch (ret) { |
| 489 | default: | ||
| 490 | break; | ||
| 491 | case -EFAULT: | ||
| 493 | case -EMSGSIZE: | 492 | case -EMSGSIZE: |
| 494 | return transport->recv.copied; | 493 | msg->msg_flags |= MSG_TRUNC; |
| 494 | return read; | ||
| 495 | case 0: | 495 | case 0: |
| 496 | return -ESHUTDOWN; | 496 | return -ESHUTDOWN; |
| 497 | default: | ||
| 498 | if (ret < 0) | ||
| 499 | return ret; | ||
| 500 | } | 497 | } |
| 501 | return -EAGAIN; | 498 | return ret < 0 ? ret : read; |
| 502 | } | 499 | } |
| 503 | 500 | ||
| 504 | static size_t | 501 | static size_t |
| @@ -537,7 +534,7 @@ xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) | |||
| 537 | 534 | ||
| 538 | ret = xs_read_stream_request(transport, msg, flags, req); | 535 | ret = xs_read_stream_request(transport, msg, flags, req); |
| 539 | if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 536 | if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 540 | xprt_complete_bc_request(req, ret); | 537 | xprt_complete_bc_request(req, transport->recv.copied); |
| 541 | 538 | ||
| 542 | return ret; | 539 | return ret; |
| 543 | } | 540 | } |
| @@ -570,7 +567,7 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags) | |||
| 570 | 567 | ||
| 571 | spin_lock(&xprt->queue_lock); | 568 | spin_lock(&xprt->queue_lock); |
| 572 | if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 569 | if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 573 | xprt_complete_rqst(req->rq_task, ret); | 570 | xprt_complete_rqst(req->rq_task, transport->recv.copied); |
| 574 | xprt_unpin_rqst(req); | 571 | xprt_unpin_rqst(req); |
| 575 | out: | 572 | out: |
| 576 | spin_unlock(&xprt->queue_lock); | 573 | spin_unlock(&xprt->queue_lock); |
| @@ -591,10 +588,8 @@ xs_read_stream(struct sock_xprt *transport, int flags) | |||
| 591 | if (ret <= 0) | 588 | if (ret <= 0) |
| 592 | goto out_err; | 589 | goto out_err; |
| 593 | transport->recv.offset = ret; | 590 | transport->recv.offset = ret; |
| 594 | if (ret != want) { | 591 | if (transport->recv.offset != want) |
| 595 | ret = -EAGAIN; | 592 | return transport->recv.offset; |
| 596 | goto out_err; | ||
| 597 | } | ||
| 598 | transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & | 593 | transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & |
| 599 | RPC_FRAGMENT_SIZE_MASK; | 594 | RPC_FRAGMENT_SIZE_MASK; |
| 600 | transport->recv.offset -= sizeof(transport->recv.fraghdr); | 595 | transport->recv.offset -= sizeof(transport->recv.fraghdr); |
| @@ -602,6 +597,9 @@ xs_read_stream(struct sock_xprt *transport, int flags) | |||
| 602 | } | 597 | } |
| 603 | 598 | ||
| 604 | switch (be32_to_cpu(transport->recv.calldir)) { | 599 | switch (be32_to_cpu(transport->recv.calldir)) { |
| 600 | default: | ||
| 601 | msg.msg_flags |= MSG_TRUNC; | ||
| 602 | break; | ||
| 605 | case RPC_CALL: | 603 | case RPC_CALL: |
| 606 | ret = xs_read_stream_call(transport, &msg, flags); | 604 | ret = xs_read_stream_call(transport, &msg, flags); |
| 607 | break; | 605 | break; |
| @@ -616,6 +614,9 @@ xs_read_stream(struct sock_xprt *transport, int flags) | |||
| 616 | goto out_err; | 614 | goto out_err; |
| 617 | read += ret; | 615 | read += ret; |
| 618 | if (transport->recv.offset < transport->recv.len) { | 616 | if (transport->recv.offset < transport->recv.len) { |
| 617 | if (!(msg.msg_flags & MSG_TRUNC)) | ||
| 618 | return read; | ||
| 619 | msg.msg_flags = 0; | ||
| 619 | ret = xs_read_discard(transport->sock, &msg, flags, | 620 | ret = xs_read_discard(transport->sock, &msg, flags, |
| 620 | transport->recv.len - transport->recv.offset); | 621 | transport->recv.len - transport->recv.offset); |
| 621 | if (ret <= 0) | 622 | if (ret <= 0) |
| @@ -623,7 +624,7 @@ xs_read_stream(struct sock_xprt *transport, int flags) | |||
| 623 | transport->recv.offset += ret; | 624 | transport->recv.offset += ret; |
| 624 | read += ret; | 625 | read += ret; |
| 625 | if (transport->recv.offset != transport->recv.len) | 626 | if (transport->recv.offset != transport->recv.len) |
| 626 | return -EAGAIN; | 627 | return read; |
| 627 | } | 628 | } |
| 628 | if (xs_read_stream_request_done(transport)) { | 629 | if (xs_read_stream_request_done(transport)) { |
| 629 | trace_xs_stream_read_request(transport); | 630 | trace_xs_stream_read_request(transport); |
| @@ -633,13 +634,7 @@ xs_read_stream(struct sock_xprt *transport, int flags) | |||
| 633 | transport->recv.len = 0; | 634 | transport->recv.len = 0; |
| 634 | return read; | 635 | return read; |
| 635 | out_err: | 636 | out_err: |
| 636 | switch (ret) { | 637 | return ret != 0 ? ret : -ESHUTDOWN; |
| 637 | case 0: | ||
| 638 | case -ESHUTDOWN: | ||
| 639 | xprt_force_disconnect(&transport->xprt); | ||
| 640 | return -ESHUTDOWN; | ||
| 641 | } | ||
| 642 | return ret; | ||
| 643 | } | 638 | } |
| 644 | 639 | ||
| 645 | static void xs_stream_data_receive(struct sock_xprt *transport) | 640 | static void xs_stream_data_receive(struct sock_xprt *transport) |
| @@ -648,12 +643,12 @@ static void xs_stream_data_receive(struct sock_xprt *transport) | |||
| 648 | ssize_t ret = 0; | 643 | ssize_t ret = 0; |
| 649 | 644 | ||
| 650 | mutex_lock(&transport->recv_mutex); | 645 | mutex_lock(&transport->recv_mutex); |
| 646 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
| 651 | if (transport->sock == NULL) | 647 | if (transport->sock == NULL) |
| 652 | goto out; | 648 | goto out; |
| 653 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
| 654 | for (;;) { | 649 | for (;;) { |
| 655 | ret = xs_read_stream(transport, MSG_DONTWAIT); | 650 | ret = xs_read_stream(transport, MSG_DONTWAIT); |
| 656 | if (ret <= 0) | 651 | if (ret < 0) |
| 657 | break; | 652 | break; |
| 658 | read += ret; | 653 | read += ret; |
| 659 | cond_resched(); | 654 | cond_resched(); |
| @@ -1345,10 +1340,10 @@ static void xs_udp_data_receive(struct sock_xprt *transport) | |||
| 1345 | int err; | 1340 | int err; |
| 1346 | 1341 | ||
| 1347 | mutex_lock(&transport->recv_mutex); | 1342 | mutex_lock(&transport->recv_mutex); |
| 1343 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
| 1348 | sk = transport->inet; | 1344 | sk = transport->inet; |
| 1349 | if (sk == NULL) | 1345 | if (sk == NULL) |
| 1350 | goto out; | 1346 | goto out; |
| 1351 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
| 1352 | for (;;) { | 1347 | for (;;) { |
| 1353 | skb = skb_recv_udp(sk, 0, 1, &err); | 1348 | skb = skb_recv_udp(sk, 0, 1, &err); |
| 1354 | if (skb == NULL) | 1349 | if (skb == NULL) |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 12b3edf70a7b..1615e503f8e3 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
| @@ -272,11 +272,11 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, | |||
| 272 | 272 | ||
| 273 | p1 = (u8*)(ht_capa); | 273 | p1 = (u8*)(ht_capa); |
| 274 | p2 = (u8*)(ht_capa_mask); | 274 | p2 = (u8*)(ht_capa_mask); |
| 275 | for (i = 0; i<sizeof(*ht_capa); i++) | 275 | for (i = 0; i < sizeof(*ht_capa); i++) |
| 276 | p1[i] &= p2[i]; | 276 | p1[i] &= p2[i]; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | /* Do a logical ht_capa &= ht_capa_mask. */ | 279 | /* Do a logical vht_capa &= vht_capa_mask. */ |
| 280 | void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, | 280 | void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, |
| 281 | const struct ieee80211_vht_cap *vht_capa_mask) | 281 | const struct ieee80211_vht_cap *vht_capa_mask) |
| 282 | { | 282 | { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 744b5851bbf9..8d763725498c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -7870,6 +7870,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) | |||
| 7870 | } | 7870 | } |
| 7871 | 7871 | ||
| 7872 | memset(¶ms, 0, sizeof(params)); | 7872 | memset(¶ms, 0, sizeof(params)); |
| 7873 | params.beacon_csa.ftm_responder = -1; | ||
| 7873 | 7874 | ||
| 7874 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || | 7875 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || |
| 7875 | !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) | 7876 | !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d536b07582f8..f741d8376a46 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
| @@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void) | |||
| 642 | * All devices must be idle as otherwise if you are actively | 642 | * All devices must be idle as otherwise if you are actively |
| 643 | * scanning some new beacon hints could be learned and would | 643 | * scanning some new beacon hints could be learned and would |
| 644 | * count as new regulatory hints. | 644 | * count as new regulatory hints. |
| 645 | * Also if there is any other active beaconing interface we | ||
| 646 | * need not issue a disconnect hint and reset any info such | ||
| 647 | * as chan dfs state, etc. | ||
| 645 | */ | 648 | */ |
| 646 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { | 649 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { |
| 647 | list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { | 650 | list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { |
| 648 | wdev_lock(wdev); | 651 | wdev_lock(wdev); |
| 649 | if (wdev->conn || wdev->current_bss) | 652 | if (wdev->conn || wdev->current_bss || |
| 653 | cfg80211_beaconing_iface_active(wdev)) | ||
| 650 | is_all_idle = false; | 654 | is_all_idle = false; |
| 651 | wdev_unlock(wdev); | 655 | wdev_unlock(wdev); |
| 652 | } | 656 | } |
| @@ -1171,6 +1175,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
| 1171 | 1175 | ||
| 1172 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, | 1176 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, |
| 1173 | rdev->wiphy.ht_capa_mod_mask); | 1177 | rdev->wiphy.ht_capa_mod_mask); |
| 1178 | cfg80211_oper_and_vht_capa(&connect->vht_capa_mask, | ||
| 1179 | rdev->wiphy.vht_capa_mod_mask); | ||
| 1174 | 1180 | ||
| 1175 | if (connkeys && connkeys->def >= 0) { | 1181 | if (connkeys && connkeys->def >= 0) { |
| 1176 | int idx; | 1182 | int idx; |
diff --git a/net/wireless/util.c b/net/wireless/util.c index ef14d80ca03e..d473bd135da8 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -1421,6 +1421,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, | |||
| 1421 | ies[pos + ext], | 1421 | ies[pos + ext], |
| 1422 | ext == 2)) | 1422 | ext == 2)) |
| 1423 | pos = skip_ie(ies, ielen, pos); | 1423 | pos = skip_ie(ies, ielen, pos); |
| 1424 | else | ||
| 1425 | break; | ||
| 1424 | } | 1426 | } |
| 1425 | } else { | 1427 | } else { |
| 1426 | pos = skip_ie(ies, ielen, pos); | 1428 | pos = skip_ie(ies, ielen, pos); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d49aa79b7997..5121729b8b63 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
| @@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb, | |||
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | len = *skb->data; | 102 | len = *skb->data; |
| 103 | needed = 1 + (len >> 4) + (len & 0x0f); | 103 | needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2; |
| 104 | 104 | ||
| 105 | if (!pskb_may_pull(skb, needed)) { | 105 | if (!pskb_may_pull(skb, needed)) { |
| 106 | /* packet is too short to hold the addresses it claims | 106 | /* packet is too short to hold the addresses it claims |
| @@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr, | |||
| 288 | sk_for_each(s, &x25_list) | 288 | sk_for_each(s, &x25_list) |
| 289 | if ((!strcmp(addr->x25_addr, | 289 | if ((!strcmp(addr->x25_addr, |
| 290 | x25_sk(s)->source_addr.x25_addr) || | 290 | x25_sk(s)->source_addr.x25_addr) || |
| 291 | !strcmp(addr->x25_addr, | 291 | !strcmp(x25_sk(s)->source_addr.x25_addr, |
| 292 | null_x25_address.x25_addr)) && | 292 | null_x25_address.x25_addr)) && |
| 293 | s->sk_state == TCP_LISTEN) { | 293 | s->sk_state == TCP_LISTEN) { |
| 294 | /* | 294 | /* |
| @@ -688,11 +688,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 688 | goto out; | 688 | goto out; |
| 689 | } | 689 | } |
| 690 | 690 | ||
| 691 | len = strlen(addr->sx25_addr.x25_addr); | 691 | /* check for the null_x25_address */ |
| 692 | for (i = 0; i < len; i++) { | 692 | if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) { |
| 693 | if (!isdigit(addr->sx25_addr.x25_addr[i])) { | 693 | |
| 694 | rc = -EINVAL; | 694 | len = strlen(addr->sx25_addr.x25_addr); |
| 695 | goto out; | 695 | for (i = 0; i < len; i++) { |
| 696 | if (!isdigit(addr->sx25_addr.x25_addr[i])) { | ||
| 697 | rc = -EINVAL; | ||
| 698 | goto out; | ||
| 699 | } | ||
| 696 | } | 700 | } |
| 697 | } | 701 | } |
| 698 | 702 | ||
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 3c12cae32001..afb26221d8a8 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
| @@ -142,6 +142,15 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
| 142 | sk->sk_state_change(sk); | 142 | sk->sk_state_change(sk); |
| 143 | break; | 143 | break; |
| 144 | } | 144 | } |
| 145 | case X25_CALL_REQUEST: | ||
| 146 | /* call collision */ | ||
| 147 | x25->causediag.cause = 0x01; | ||
| 148 | x25->causediag.diagnostic = 0x48; | ||
| 149 | |||
| 150 | x25_write_internal(sk, X25_CLEAR_REQUEST); | ||
| 151 | x25_disconnect(sk, EISCONN, 0x01, 0x48); | ||
| 152 | break; | ||
| 153 | |||
| 145 | case X25_CLEAR_REQUEST: | 154 | case X25_CLEAR_REQUEST: |
| 146 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) | 155 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) |
| 147 | goto out_clear; | 156 | goto out_clear; |
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl index 8081b6cf67d2..34414c6efad6 100755 --- a/scripts/checkstack.pl +++ b/scripts/checkstack.pl | |||
| @@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre); | |||
| 47 | $xs = "[0-9a-f ]"; # hex character or space | 47 | $xs = "[0-9a-f ]"; # hex character or space |
| 48 | $funcre = qr/^$x* <(.*)>:$/; | 48 | $funcre = qr/^$x* <(.*)>:$/; |
| 49 | if ($arch eq 'aarch64') { | 49 | if ($arch eq 'aarch64') { |
| 50 | #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]! | 50 | #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! |
| 51 | $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o; | 51 | $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; |
| 52 | } elsif ($arch eq 'arm') { | 52 | } elsif ($arch eq 'arm') { |
| 53 | #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 | 53 | #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 |
| 54 | $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; | 54 | $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; |
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c index 2f48da98b5d4..dbd37460c573 100644 --- a/scripts/gcc-plugins/stackleak_plugin.c +++ b/scripts/gcc-plugins/stackleak_plugin.c | |||
| @@ -363,10 +363,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, | |||
| 363 | PASS_POS_INSERT_BEFORE); | 363 | PASS_POS_INSERT_BEFORE); |
| 364 | 364 | ||
| 365 | /* | 365 | /* |
| 366 | * The stackleak_cleanup pass should be executed after the | 366 | * The stackleak_cleanup pass should be executed before the "*free_cfg" |
| 367 | * "reload" pass, when the stack frame size is final. | 367 | * pass. It's the moment when the stack frame size is already final, |
| 368 | * function prologues and epilogues are generated, and the | ||
| 369 | * machine-dependent code transformations are not done. | ||
| 368 | */ | 370 | */ |
| 369 | PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER); | 371 | PASS_INFO(stackleak_cleanup, "*free_cfg", 1, PASS_POS_INSERT_BEFORE); |
| 370 | 372 | ||
| 371 | if (!plugin_default_version_check(version, &gcc_version)) { | 373 | if (!plugin_default_version_check(version, &gcc_version)) { |
| 372 | error(G_("incompatible gcc/plugin versions")); | 374 | error(G_("incompatible gcc/plugin versions")); |
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py index 5056fb3b897d..e559c6294c39 100755 --- a/scripts/spdxcheck.py +++ b/scripts/spdxcheck.py | |||
| @@ -168,6 +168,7 @@ class id_parser(object): | |||
| 168 | self.curline = 0 | 168 | self.curline = 0 |
| 169 | try: | 169 | try: |
| 170 | for line in fd: | 170 | for line in fd: |
| 171 | line = line.decode(locale.getpreferredencoding(False), errors='ignore') | ||
| 171 | self.curline += 1 | 172 | self.curline += 1 |
| 172 | if self.curline > maxlines: | 173 | if self.curline > maxlines: |
| 173 | break | 174 | break |
| @@ -249,12 +250,13 @@ if __name__ == '__main__': | |||
| 249 | 250 | ||
| 250 | try: | 251 | try: |
| 251 | if len(args.path) and args.path[0] == '-': | 252 | if len(args.path) and args.path[0] == '-': |
| 252 | parser.parse_lines(sys.stdin, args.maxlines, '-') | 253 | stdin = os.fdopen(sys.stdin.fileno(), 'rb') |
| 254 | parser.parse_lines(stdin, args.maxlines, '-') | ||
| 253 | else: | 255 | else: |
| 254 | if args.path: | 256 | if args.path: |
| 255 | for p in args.path: | 257 | for p in args.path: |
| 256 | if os.path.isfile(p): | 258 | if os.path.isfile(p): |
| 257 | parser.parse_lines(open(p), args.maxlines, p) | 259 | parser.parse_lines(open(p, 'rb'), args.maxlines, p) |
| 258 | elif os.path.isdir(p): | 260 | elif os.path.isdir(p): |
| 259 | scan_git_subtree(repo.head.reference.commit.tree, p) | 261 | scan_git_subtree(repo.head.reference.commit.tree, p) |
| 260 | else: | 262 | else: |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 66c90f486af9..818dff1de545 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <sound/timer.h> | 36 | #include <sound/timer.h> |
| 37 | #include <sound/minors.h> | 37 | #include <sound/minors.h> |
| 38 | #include <linux/uio.h> | 38 | #include <linux/uio.h> |
| 39 | #include <linux/delay.h> | ||
| 39 | 40 | ||
| 40 | #include "pcm_local.h" | 41 | #include "pcm_local.h" |
| 41 | 42 | ||
| @@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem); | |||
| 91 | * and this may lead to a deadlock when the code path takes read sem | 92 | * and this may lead to a deadlock when the code path takes read sem |
| 92 | * twice (e.g. one in snd_pcm_action_nonatomic() and another in | 93 | * twice (e.g. one in snd_pcm_action_nonatomic() and another in |
| 93 | * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to | 94 | * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to |
| 94 | * spin until it gets the lock. | 95 | * sleep until all the readers are completed without blocking by writer. |
| 95 | */ | 96 | */ |
| 96 | static inline void down_write_nonblock(struct rw_semaphore *lock) | 97 | static inline void down_write_nonfifo(struct rw_semaphore *lock) |
| 97 | { | 98 | { |
| 98 | while (!down_write_trylock(lock)) | 99 | while (!down_write_trylock(lock)) |
| 99 | cond_resched(); | 100 | msleep(1); |
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | #define PCM_LOCK_DEFAULT 0 | 103 | #define PCM_LOCK_DEFAULT 0 |
| @@ -1967,7 +1968,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) | |||
| 1967 | res = -ENOMEM; | 1968 | res = -ENOMEM; |
| 1968 | goto _nolock; | 1969 | goto _nolock; |
| 1969 | } | 1970 | } |
| 1970 | down_write_nonblock(&snd_pcm_link_rwsem); | 1971 | down_write_nonfifo(&snd_pcm_link_rwsem); |
| 1971 | write_lock_irq(&snd_pcm_link_rwlock); | 1972 | write_lock_irq(&snd_pcm_link_rwlock); |
| 1972 | if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || | 1973 | if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || |
| 1973 | substream->runtime->status->state != substream1->runtime->status->state || | 1974 | substream->runtime->status->state != substream1->runtime->status->state || |
| @@ -2014,7 +2015,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream) | |||
| 2014 | struct snd_pcm_substream *s; | 2015 | struct snd_pcm_substream *s; |
| 2015 | int res = 0; | 2016 | int res = 0; |
| 2016 | 2017 | ||
| 2017 | down_write_nonblock(&snd_pcm_link_rwsem); | 2018 | down_write_nonfifo(&snd_pcm_link_rwsem); |
| 2018 | write_lock_irq(&snd_pcm_link_rwlock); | 2019 | write_lock_irq(&snd_pcm_link_rwlock); |
| 2019 | if (!snd_pcm_stream_linked(substream)) { | 2020 | if (!snd_pcm_stream_linked(substream)) { |
| 2020 | res = -EALREADY; | 2021 | res = -EALREADY; |
| @@ -2369,7 +2370,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) | |||
| 2369 | 2370 | ||
| 2370 | static void pcm_release_private(struct snd_pcm_substream *substream) | 2371 | static void pcm_release_private(struct snd_pcm_substream *substream) |
| 2371 | { | 2372 | { |
| 2372 | snd_pcm_unlink(substream); | 2373 | if (snd_pcm_stream_linked(substream)) |
| 2374 | snd_pcm_unlink(substream); | ||
| 2373 | } | 2375 | } |
| 2374 | 2376 | ||
| 2375 | void snd_pcm_release_substream(struct snd_pcm_substream *substream) | 2377 | void snd_pcm_release_substream(struct snd_pcm_substream *substream) |
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c index 64c3cb0fb926..654a50319198 100644 --- a/sound/firewire/fireface/ff-protocol-ff400.c +++ b/sound/firewire/fireface/ff-protocol-ff400.c | |||
| @@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate, | |||
| 30 | int err; | 30 | int err; |
| 31 | 31 | ||
| 32 | err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST, | 32 | err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST, |
| 33 | FF400_SYNC_STATUS, ®, sizeof(reg), 0); | 33 | FF400_CLOCK_CONFIG, ®, sizeof(reg), 0); |
| 34 | if (err < 0) | 34 | if (err < 0) |
| 35 | return err; | 35 | return err; |
| 36 | data = le32_to_cpu(reg); | 36 | data = le32_to_cpu(reg); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0bbdf1a01e76..76f03abd15ab 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2498,6 +2498,10 @@ static const struct pci_device_id azx_ids[] = { | |||
| 2498 | /* AMD Hudson */ | 2498 | /* AMD Hudson */ |
| 2499 | { PCI_DEVICE(0x1022, 0x780d), | 2499 | { PCI_DEVICE(0x1022, 0x780d), |
| 2500 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, | 2500 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, |
| 2501 | /* AMD Stoney */ | ||
| 2502 | { PCI_DEVICE(0x1022, 0x157a), | ||
| 2503 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | | ||
| 2504 | AZX_DCAPS_PM_RUNTIME }, | ||
| 2501 | /* AMD Raven */ | 2505 | /* AMD Raven */ |
| 2502 | { PCI_DEVICE(0x1022, 0x15e3), | 2506 | { PCI_DEVICE(0x1022, 0x15e3), |
| 2503 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | | 2507 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 06f93032d0cc..15021c839372 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -4988,9 +4988,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, | |||
| 4988 | { 0x19, 0x21a11010 }, /* dock mic */ | 4988 | { 0x19, 0x21a11010 }, /* dock mic */ |
| 4989 | { } | 4989 | { } |
| 4990 | }; | 4990 | }; |
| 4991 | /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise | ||
| 4992 | * the speaker output becomes too low by some reason on Thinkpads with | ||
| 4993 | * ALC298 codec | ||
| 4994 | */ | ||
| 4995 | static hda_nid_t preferred_pairs[] = { | ||
| 4996 | 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, | ||
| 4997 | 0 | ||
| 4998 | }; | ||
| 4991 | struct alc_spec *spec = codec->spec; | 4999 | struct alc_spec *spec = codec->spec; |
| 4992 | 5000 | ||
| 4993 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | 5001 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
| 5002 | spec->gen.preferred_dacs = preferred_pairs; | ||
| 4994 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; | 5003 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; |
| 4995 | snd_hda_apply_pincfgs(codec, pincfgs); | 5004 | snd_hda_apply_pincfgs(codec, pincfgs); |
| 4996 | } else if (action == HDA_FIXUP_ACT_INIT) { | 5005 | } else if (action == HDA_FIXUP_ACT_INIT) { |
| @@ -5510,6 +5519,10 @@ enum { | |||
| 5510 | ALC221_FIXUP_HP_HEADSET_MIC, | 5519 | ALC221_FIXUP_HP_HEADSET_MIC, |
| 5511 | ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, | 5520 | ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, |
| 5512 | ALC295_FIXUP_HP_AUTO_MUTE, | 5521 | ALC295_FIXUP_HP_AUTO_MUTE, |
| 5522 | ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, | ||
| 5523 | ALC294_FIXUP_ASUS_MIC, | ||
| 5524 | ALC294_FIXUP_ASUS_HEADSET_MIC, | ||
| 5525 | ALC294_FIXUP_ASUS_SPK, | ||
| 5513 | }; | 5526 | }; |
| 5514 | 5527 | ||
| 5515 | static const struct hda_fixup alc269_fixups[] = { | 5528 | static const struct hda_fixup alc269_fixups[] = { |
| @@ -6382,11 +6395,52 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 6382 | [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = { | 6395 | [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = { |
| 6383 | .type = HDA_FIXUP_FUNC, | 6396 | .type = HDA_FIXUP_FUNC, |
| 6384 | .v.func = alc285_fixup_invalidate_dacs, | 6397 | .v.func = alc285_fixup_invalidate_dacs, |
| 6398 | .chained = true, | ||
| 6399 | .chain_id = ALC269_FIXUP_THINKPAD_ACPI | ||
| 6385 | }, | 6400 | }, |
| 6386 | [ALC295_FIXUP_HP_AUTO_MUTE] = { | 6401 | [ALC295_FIXUP_HP_AUTO_MUTE] = { |
| 6387 | .type = HDA_FIXUP_FUNC, | 6402 | .type = HDA_FIXUP_FUNC, |
| 6388 | .v.func = alc_fixup_auto_mute_via_amp, | 6403 | .v.func = alc_fixup_auto_mute_via_amp, |
| 6389 | }, | 6404 | }, |
| 6405 | [ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE] = { | ||
| 6406 | .type = HDA_FIXUP_PINS, | ||
| 6407 | .v.pins = (const struct hda_pintbl[]) { | ||
| 6408 | { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | ||
| 6409 | { } | ||
| 6410 | }, | ||
| 6411 | .chained = true, | ||
| 6412 | .chain_id = ALC269_FIXUP_HEADSET_MIC | ||
| 6413 | }, | ||
| 6414 | [ALC294_FIXUP_ASUS_MIC] = { | ||
| 6415 | .type = HDA_FIXUP_PINS, | ||
| 6416 | .v.pins = (const struct hda_pintbl[]) { | ||
| 6417 | { 0x13, 0x90a60160 }, /* use as internal mic */ | ||
| 6418 | { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */ | ||
| 6419 | { } | ||
| 6420 | }, | ||
| 6421 | .chained = true, | ||
| 6422 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC | ||
| 6423 | }, | ||
| 6424 | [ALC294_FIXUP_ASUS_HEADSET_MIC] = { | ||
| 6425 | .type = HDA_FIXUP_PINS, | ||
| 6426 | .v.pins = (const struct hda_pintbl[]) { | ||
| 6427 | { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */ | ||
| 6428 | { } | ||
| 6429 | }, | ||
| 6430 | .chained = true, | ||
| 6431 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC | ||
| 6432 | }, | ||
| 6433 | [ALC294_FIXUP_ASUS_SPK] = { | ||
| 6434 | .type = HDA_FIXUP_VERBS, | ||
| 6435 | .v.verbs = (const struct hda_verb[]) { | ||
| 6436 | /* Set EAPD high */ | ||
| 6437 | { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 }, | ||
| 6438 | { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 }, | ||
| 6439 | { } | ||
| 6440 | }, | ||
| 6441 | .chained = true, | ||
| 6442 | .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC | ||
| 6443 | }, | ||
| 6390 | }; | 6444 | }; |
| 6391 | 6445 | ||
| 6392 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 6446 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
| @@ -6401,7 +6455,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6401 | SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), | 6455 | SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), |
| 6402 | SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), | 6456 | SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), |
| 6403 | SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), | 6457 | SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), |
| 6458 | SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), | ||
| 6404 | SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), | 6459 | SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), |
| 6460 | SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), | ||
| 6461 | SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), | ||
| 6462 | SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), | ||
| 6405 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | 6463 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), |
| 6406 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), | 6464 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), |
| 6407 | SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), | 6465 | SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), |
| @@ -6525,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6525 | SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), | 6583 | SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), |
| 6526 | SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), | 6584 | SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), |
| 6527 | SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), | 6585 | SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), |
| 6586 | SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK), | ||
| 6528 | SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), | 6587 | SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), |
| 6529 | SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), | 6588 | SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), |
| 6530 | SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), | 6589 | SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), |
| @@ -7065,6 +7124,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 7065 | {0x14, 0x90170110}, | 7124 | {0x14, 0x90170110}, |
| 7066 | {0x19, 0x04a11040}, | 7125 | {0x19, 0x04a11040}, |
| 7067 | {0x21, 0x04211020}), | 7126 | {0x21, 0x04211020}), |
| 7127 | SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, | ||
| 7128 | {0x12, 0x90a60130}, | ||
| 7129 | {0x17, 0x90170110}, | ||
| 7130 | {0x21, 0x02211020}), | ||
| 7068 | SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, | 7131 | SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 7069 | {0x12, 0x90a60120}, | 7132 | {0x12, 0x90a60120}, |
| 7070 | {0x14, 0x90170110}, | 7133 | {0x14, 0x90170110}, |
| @@ -7128,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 7128 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, | 7191 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 7129 | ALC292_STANDARD_PINS, | 7192 | ALC292_STANDARD_PINS, |
| 7130 | {0x13, 0x90a60140}), | 7193 | {0x13, 0x90a60140}), |
| 7194 | SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC, | ||
| 7195 | {0x14, 0x90170110}, | ||
| 7196 | {0x1b, 0x90a70130}, | ||
| 7197 | {0x21, 0x04211020}), | ||
| 7198 | SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK, | ||
| 7199 | {0x12, 0x90a60130}, | ||
| 7200 | {0x17, 0x90170110}, | ||
| 7201 | {0x21, 0x04211020}), | ||
| 7131 | SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, | 7202 | SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 7132 | ALC295_STANDARD_PINS, | 7203 | ALC295_STANDARD_PINS, |
| 7133 | {0x17, 0x21014020}, | 7204 | {0x17, 0x21014020}, |
| @@ -7200,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec) | |||
| 7200 | alc_update_coef_idx(codec, 0x4, 0, 1<<11); | 7271 | alc_update_coef_idx(codec, 0x4, 0, 1<<11); |
| 7201 | } | 7272 | } |
| 7202 | 7273 | ||
| 7274 | static void alc294_hp_init(struct hda_codec *codec) | ||
| 7275 | { | ||
| 7276 | struct alc_spec *spec = codec->spec; | ||
| 7277 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | ||
| 7278 | int i, val; | ||
| 7279 | |||
| 7280 | if (!hp_pin) | ||
| 7281 | return; | ||
| 7282 | |||
| 7283 | snd_hda_codec_write(codec, hp_pin, 0, | ||
| 7284 | AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); | ||
| 7285 | |||
| 7286 | msleep(100); | ||
| 7287 | |||
| 7288 | snd_hda_codec_write(codec, hp_pin, 0, | ||
| 7289 | AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); | ||
| 7290 | |||
| 7291 | alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */ | ||
| 7292 | alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */ | ||
| 7293 | |||
| 7294 | /* Wait for depop procedure finish */ | ||
| 7295 | val = alc_read_coefex_idx(codec, 0x58, 0x01); | ||
| 7296 | for (i = 0; i < 20 && val & 0x0080; i++) { | ||
| 7297 | msleep(50); | ||
| 7298 | val = alc_read_coefex_idx(codec, 0x58, 0x01); | ||
| 7299 | } | ||
| 7300 | /* Set HP depop to auto mode */ | ||
| 7301 | alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b); | ||
| 7302 | msleep(50); | ||
| 7303 | } | ||
| 7304 | |||
| 7203 | /* | 7305 | /* |
| 7204 | */ | 7306 | */ |
| 7205 | static int patch_alc269(struct hda_codec *codec) | 7307 | static int patch_alc269(struct hda_codec *codec) |
| @@ -7325,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec) | |||
| 7325 | spec->codec_variant = ALC269_TYPE_ALC294; | 7427 | spec->codec_variant = ALC269_TYPE_ALC294; |
| 7326 | spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ | 7428 | spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ |
| 7327 | alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ | 7429 | alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ |
| 7430 | alc294_hp_init(codec); | ||
| 7328 | break; | 7431 | break; |
| 7329 | case 0x10ec0300: | 7432 | case 0x10ec0300: |
| 7330 | spec->codec_variant = ALC269_TYPE_ALC300; | 7433 | spec->codec_variant = ALC269_TYPE_ALC300; |
| @@ -7336,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec) | |||
| 7336 | spec->codec_variant = ALC269_TYPE_ALC700; | 7439 | spec->codec_variant = ALC269_TYPE_ALC700; |
| 7337 | spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ | 7440 | spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ |
| 7338 | alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ | 7441 | alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ |
| 7442 | alc294_hp_init(codec); | ||
| 7339 | break; | 7443 | break; |
| 7340 | 7444 | ||
| 7341 | } | 7445 | } |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 2bfe4e80a6b9..a105947eaf55 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
| @@ -682,9 +682,12 @@ static int usb_audio_probe(struct usb_interface *intf, | |||
| 682 | 682 | ||
| 683 | __error: | 683 | __error: |
| 684 | if (chip) { | 684 | if (chip) { |
| 685 | /* chip->active is inside the chip->card object, | ||
| 686 | * decrement before memory is possibly returned. | ||
| 687 | */ | ||
| 688 | atomic_dec(&chip->active); | ||
| 685 | if (!chip->num_interfaces) | 689 | if (!chip->num_interfaces) |
| 686 | snd_card_free(chip->card); | 690 | snd_card_free(chip->card); |
| 687 | atomic_dec(&chip->active); | ||
| 688 | } | 691 | } |
| 689 | mutex_unlock(®ister_mutex); | 692 | mutex_unlock(®ister_mutex); |
| 690 | return err; | 693 | return err; |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 8a945ece9869..6623cafc94f2 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
| @@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, | |||
| 1373 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; | 1373 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
| 1374 | break; | 1374 | break; |
| 1375 | 1375 | ||
| 1376 | case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ | ||
| 1376 | case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ | 1377 | case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ |
| 1377 | case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ | 1378 | case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ |
| 1378 | case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */ | 1379 | case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */ |
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 55bc512a1831..e4e6e2b3fd84 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c | |||
| @@ -32,7 +32,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw, | |||
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, | 34 | static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, |
| 35 | const void *data) | 35 | __u8 bit_offset, const void *data) |
| 36 | { | 36 | { |
| 37 | int actual_type_id; | 37 | int actual_type_id; |
| 38 | 38 | ||
| @@ -40,7 +40,7 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, | |||
| 40 | if (actual_type_id < 0) | 40 | if (actual_type_id < 0) |
| 41 | return actual_type_id; | 41 | return actual_type_id; |
| 42 | 42 | ||
| 43 | return btf_dumper_do_type(d, actual_type_id, 0, data); | 43 | return btf_dumper_do_type(d, actual_type_id, bit_offset, data); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static void btf_dumper_enum(const void *data, json_writer_t *jw) | 46 | static void btf_dumper_enum(const void *data, json_writer_t *jw) |
| @@ -237,7 +237,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, | |||
| 237 | case BTF_KIND_VOLATILE: | 237 | case BTF_KIND_VOLATILE: |
| 238 | case BTF_KIND_CONST: | 238 | case BTF_KIND_CONST: |
| 239 | case BTF_KIND_RESTRICT: | 239 | case BTF_KIND_RESTRICT: |
| 240 | return btf_dumper_modifier(d, type_id, data); | 240 | return btf_dumper_modifier(d, type_id, bit_offset, data); |
| 241 | default: | 241 | default: |
| 242 | jsonw_printf(d->jw, "(unsupported-kind"); | 242 | jsonw_printf(d->jw, "(unsupported-kind"); |
| 243 | return -EINVAL; | 243 | return -EINVAL; |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 852dc17ab47a..72c453a8bf50 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
| @@ -2170,7 +2170,7 @@ union bpf_attr { | |||
| 2170 | * Return | 2170 | * Return |
| 2171 | * 0 on success, or a negative error in case of failure. | 2171 | * 0 on success, or a negative error in case of failure. |
| 2172 | * | 2172 | * |
| 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2174 | * Description | 2174 | * Description |
| 2175 | * Look for TCP socket matching *tuple*, optionally in a child | 2175 | * Look for TCP socket matching *tuple*, optionally in a child |
| 2176 | * network namespace *netns*. The return value must be checked, | 2176 | * network namespace *netns*. The return value must be checked, |
| @@ -2187,12 +2187,14 @@ union bpf_attr { | |||
| 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2188 | * Look for an IPv6 socket. | 2188 | * Look for an IPv6 socket. |
| 2189 | * | 2189 | * |
| 2190 | * If the *netns* is zero, then the socket lookup table in the | 2190 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2191 | * netns associated with the *ctx* will be used. For the TC hooks, | 2191 | * socket lookup table in the netns associated with the *ctx* will |
| 2192 | * this in the netns of the device in the skb. For socket hooks, | 2192 | * will be used. For the TC hooks, this is the netns of the device |
| 2193 | * this in the netns of the socket. If *netns* is non-zero, then | 2193 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2194 | * it specifies the ID of the netns relative to the netns | 2194 | * If *netns* is any other signed 32-bit value greater than or |
| 2195 | * associated with the *ctx*. | 2195 | * equal to zero then it specifies the ID of the netns relative to |
| 2196 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2197 | * range of 32-bit integers are reserved for future use. | ||
| 2196 | * | 2198 | * |
| 2197 | * All values for *flags* are reserved for future usage, and must | 2199 | * All values for *flags* are reserved for future usage, and must |
| 2198 | * be left at zero. | 2200 | * be left at zero. |
| @@ -2201,8 +2203,10 @@ union bpf_attr { | |||
| 2201 | * **CONFIG_NET** configuration option. | 2203 | * **CONFIG_NET** configuration option. |
| 2202 | * Return | 2204 | * Return |
| 2203 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2205 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2206 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2207 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2204 | * | 2208 | * |
| 2205 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2209 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2206 | * Description | 2210 | * Description |
| 2207 | * Look for UDP socket matching *tuple*, optionally in a child | 2211 | * Look for UDP socket matching *tuple*, optionally in a child |
| 2208 | * network namespace *netns*. The return value must be checked, | 2212 | * network namespace *netns*. The return value must be checked, |
| @@ -2219,12 +2223,14 @@ union bpf_attr { | |||
| 2219 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2223 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2220 | * Look for an IPv6 socket. | 2224 | * Look for an IPv6 socket. |
| 2221 | * | 2225 | * |
| 2222 | * If the *netns* is zero, then the socket lookup table in the | 2226 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2223 | * netns associated with the *ctx* will be used. For the TC hooks, | 2227 | * socket lookup table in the netns associated with the *ctx* will |
| 2224 | * this in the netns of the device in the skb. For socket hooks, | 2228 | * will be used. For the TC hooks, this is the netns of the device |
| 2225 | * this in the netns of the socket. If *netns* is non-zero, then | 2229 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2226 | * it specifies the ID of the netns relative to the netns | 2230 | * If *netns* is any other signed 32-bit value greater than or |
| 2227 | * associated with the *ctx*. | 2231 | * equal to zero then it specifies the ID of the netns relative to |
| 2232 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2233 | * range of 32-bit integers are reserved for future use. | ||
| 2228 | * | 2234 | * |
| 2229 | * All values for *flags* are reserved for future usage, and must | 2235 | * All values for *flags* are reserved for future usage, and must |
| 2230 | * be left at zero. | 2236 | * be left at zero. |
| @@ -2233,6 +2239,8 @@ union bpf_attr { | |||
| 2233 | * **CONFIG_NET** configuration option. | 2239 | * **CONFIG_NET** configuration option. |
| 2234 | * Return | 2240 | * Return |
| 2235 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2241 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2242 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2243 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2236 | * | 2244 | * |
| 2237 | * int bpf_sk_release(struct bpf_sock *sk) | 2245 | * int bpf_sk_release(struct bpf_sock *sk) |
| 2238 | * Description | 2246 | * Description |
| @@ -2405,6 +2413,9 @@ enum bpf_func_id { | |||
| 2405 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | 2413 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
| 2406 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) | 2414 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) |
| 2407 | 2415 | ||
| 2416 | /* Current network namespace */ | ||
| 2417 | #define BPF_F_CURRENT_NETNS (-1L) | ||
| 2418 | |||
| 2408 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ | 2419 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ |
| 2409 | enum bpf_adj_room_mode { | 2420 | enum bpf_adj_room_mode { |
| 2410 | BPF_ADJ_ROOM_NET, | 2421 | BPF_ADJ_ROOM_NET, |
| @@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode { | |||
| 2422 | BPF_LWT_ENCAP_SEG6_INLINE | 2433 | BPF_LWT_ENCAP_SEG6_INLINE |
| 2423 | }; | 2434 | }; |
| 2424 | 2435 | ||
| 2436 | #define __bpf_md_ptr(type, name) \ | ||
| 2437 | union { \ | ||
| 2438 | type name; \ | ||
| 2439 | __u64 :64; \ | ||
| 2440 | } __attribute__((aligned(8))) | ||
| 2441 | |||
| 2425 | /* user accessible mirror of in-kernel sk_buff. | 2442 | /* user accessible mirror of in-kernel sk_buff. |
| 2426 | * new fields can only be added to the end of this structure | 2443 | * new fields can only be added to the end of this structure |
| 2427 | */ | 2444 | */ |
| @@ -2456,7 +2473,7 @@ struct __sk_buff { | |||
| 2456 | /* ... here. */ | 2473 | /* ... here. */ |
| 2457 | 2474 | ||
| 2458 | __u32 data_meta; | 2475 | __u32 data_meta; |
| 2459 | struct bpf_flow_keys *flow_keys; | 2476 | __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); |
| 2460 | }; | 2477 | }; |
| 2461 | 2478 | ||
| 2462 | struct bpf_tunnel_key { | 2479 | struct bpf_tunnel_key { |
| @@ -2572,8 +2589,8 @@ enum sk_action { | |||
| 2572 | * be added to the end of this structure | 2589 | * be added to the end of this structure |
| 2573 | */ | 2590 | */ |
| 2574 | struct sk_msg_md { | 2591 | struct sk_msg_md { |
| 2575 | void *data; | 2592 | __bpf_md_ptr(void *, data); |
| 2576 | void *data_end; | 2593 | __bpf_md_ptr(void *, data_end); |
| 2577 | 2594 | ||
| 2578 | __u32 family; | 2595 | __u32 family; |
| 2579 | __u32 remote_ip4; /* Stored in network byte order */ | 2596 | __u32 remote_ip4; /* Stored in network byte order */ |
| @@ -2589,8 +2606,9 @@ struct sk_reuseport_md { | |||
| 2589 | * Start of directly accessible data. It begins from | 2606 | * Start of directly accessible data. It begins from |
| 2590 | * the tcp/udp header. | 2607 | * the tcp/udp header. |
| 2591 | */ | 2608 | */ |
| 2592 | void *data; | 2609 | __bpf_md_ptr(void *, data); |
| 2593 | void *data_end; /* End of directly accessible data */ | 2610 | /* End of directly accessible data */ |
| 2611 | __bpf_md_ptr(void *, data_end); | ||
| 2594 | /* | 2612 | /* |
| 2595 | * Total length of packet (starting from the tcp/udp header). | 2613 | * Total length of packet (starting from the tcp/udp header). |
| 2596 | * Note that the directly accessible bytes (data_end - data) | 2614 | * Note that the directly accessible bytes (data_end - data) |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 01ec04bf91b5..6c16ac36d482 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
| 16 | #include <linux/workqueue.h> | 16 | #include <linux/workqueue.h> |
| 17 | #include <linux/libnvdimm.h> | 17 | #include <linux/libnvdimm.h> |
| 18 | #include <linux/genalloc.h> | ||
| 18 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
| 19 | #include <linux/device.h> | 20 | #include <linux/device.h> |
| 20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| @@ -215,6 +216,8 @@ struct nfit_test { | |||
| 215 | 216 | ||
| 216 | static struct workqueue_struct *nfit_wq; | 217 | static struct workqueue_struct *nfit_wq; |
| 217 | 218 | ||
| 219 | static struct gen_pool *nfit_pool; | ||
| 220 | |||
| 218 | static struct nfit_test *to_nfit_test(struct device *dev) | 221 | static struct nfit_test *to_nfit_test(struct device *dev) |
| 219 | { | 222 | { |
| 220 | struct platform_device *pdev = to_platform_device(dev); | 223 | struct platform_device *pdev = to_platform_device(dev); |
| @@ -1132,6 +1135,9 @@ static void release_nfit_res(void *data) | |||
| 1132 | list_del(&nfit_res->list); | 1135 | list_del(&nfit_res->list); |
| 1133 | spin_unlock(&nfit_test_lock); | 1136 | spin_unlock(&nfit_test_lock); |
| 1134 | 1137 | ||
| 1138 | if (resource_size(&nfit_res->res) >= DIMM_SIZE) | ||
| 1139 | gen_pool_free(nfit_pool, nfit_res->res.start, | ||
| 1140 | resource_size(&nfit_res->res)); | ||
| 1135 | vfree(nfit_res->buf); | 1141 | vfree(nfit_res->buf); |
| 1136 | kfree(nfit_res); | 1142 | kfree(nfit_res); |
| 1137 | } | 1143 | } |
| @@ -1144,7 +1150,7 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, | |||
| 1144 | GFP_KERNEL); | 1150 | GFP_KERNEL); |
| 1145 | int rc; | 1151 | int rc; |
| 1146 | 1152 | ||
| 1147 | if (!buf || !nfit_res) | 1153 | if (!buf || !nfit_res || !*dma) |
| 1148 | goto err; | 1154 | goto err; |
| 1149 | rc = devm_add_action(dev, release_nfit_res, nfit_res); | 1155 | rc = devm_add_action(dev, release_nfit_res, nfit_res); |
| 1150 | if (rc) | 1156 | if (rc) |
| @@ -1164,6 +1170,8 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, | |||
| 1164 | 1170 | ||
| 1165 | return nfit_res->buf; | 1171 | return nfit_res->buf; |
| 1166 | err: | 1172 | err: |
| 1173 | if (*dma && size >= DIMM_SIZE) | ||
| 1174 | gen_pool_free(nfit_pool, *dma, size); | ||
| 1167 | if (buf) | 1175 | if (buf) |
| 1168 | vfree(buf); | 1176 | vfree(buf); |
| 1169 | kfree(nfit_res); | 1177 | kfree(nfit_res); |
| @@ -1172,9 +1180,16 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, | |||
| 1172 | 1180 | ||
| 1173 | static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) | 1181 | static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) |
| 1174 | { | 1182 | { |
| 1183 | struct genpool_data_align data = { | ||
| 1184 | .align = SZ_128M, | ||
| 1185 | }; | ||
| 1175 | void *buf = vmalloc(size); | 1186 | void *buf = vmalloc(size); |
| 1176 | 1187 | ||
| 1177 | *dma = (unsigned long) buf; | 1188 | if (size >= DIMM_SIZE) |
| 1189 | *dma = gen_pool_alloc_algo(nfit_pool, size, | ||
| 1190 | gen_pool_first_fit_align, &data); | ||
| 1191 | else | ||
| 1192 | *dma = (unsigned long) buf; | ||
| 1178 | return __test_alloc(t, size, dma, buf); | 1193 | return __test_alloc(t, size, dma, buf); |
| 1179 | } | 1194 | } |
| 1180 | 1195 | ||
| @@ -2839,6 +2854,17 @@ static __init int nfit_test_init(void) | |||
| 2839 | goto err_register; | 2854 | goto err_register; |
| 2840 | } | 2855 | } |
| 2841 | 2856 | ||
| 2857 | nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE); | ||
| 2858 | if (!nfit_pool) { | ||
| 2859 | rc = -ENOMEM; | ||
| 2860 | goto err_register; | ||
| 2861 | } | ||
| 2862 | |||
| 2863 | if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) { | ||
| 2864 | rc = -ENOMEM; | ||
| 2865 | goto err_register; | ||
| 2866 | } | ||
| 2867 | |||
| 2842 | for (i = 0; i < NUM_NFITS; i++) { | 2868 | for (i = 0; i < NUM_NFITS; i++) { |
| 2843 | struct nfit_test *nfit_test; | 2869 | struct nfit_test *nfit_test; |
| 2844 | struct platform_device *pdev; | 2870 | struct platform_device *pdev; |
| @@ -2894,6 +2920,9 @@ static __init int nfit_test_init(void) | |||
| 2894 | return 0; | 2920 | return 0; |
| 2895 | 2921 | ||
| 2896 | err_register: | 2922 | err_register: |
| 2923 | if (nfit_pool) | ||
| 2924 | gen_pool_destroy(nfit_pool); | ||
| 2925 | |||
| 2897 | destroy_workqueue(nfit_wq); | 2926 | destroy_workqueue(nfit_wq); |
| 2898 | for (i = 0; i < NUM_NFITS; i++) | 2927 | for (i = 0; i < NUM_NFITS; i++) |
| 2899 | if (instances[i]) | 2928 | if (instances[i]) |
| @@ -2917,6 +2946,8 @@ static __exit void nfit_test_exit(void) | |||
| 2917 | platform_driver_unregister(&nfit_test_driver); | 2946 | platform_driver_unregister(&nfit_test_driver); |
| 2918 | nfit_test_teardown(); | 2947 | nfit_test_teardown(); |
| 2919 | 2948 | ||
| 2949 | gen_pool_destroy(nfit_pool); | ||
| 2950 | |||
| 2920 | for (i = 0; i < NUM_NFITS; i++) | 2951 | for (i = 0; i < NUM_NFITS; i++) |
| 2921 | put_device(&instances[i]->pdev.dev); | 2952 | put_device(&instances[i]->pdev.dev); |
| 2922 | class_destroy(nfit_test_dimm); | 2953 | class_destroy(nfit_test_dimm); |
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index acf1afa01c5b..397d6b612502 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile | |||
| @@ -7,6 +7,7 @@ LDLIBS+= -lpthread -lurcu | |||
| 7 | TARGETS = main idr-test multiorder xarray | 7 | TARGETS = main idr-test multiorder xarray |
| 8 | CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o | 8 | CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o |
| 9 | OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ | 9 | OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ |
| 10 | regression4.o \ | ||
| 10 | tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o | 11 | tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o |
| 11 | 12 | ||
| 12 | ifndef SHIFT | 13 | ifndef SHIFT |
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 77a44c54998f..7a22d6e3732e 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c | |||
| @@ -308,6 +308,7 @@ int main(int argc, char **argv) | |||
| 308 | regression1_test(); | 308 | regression1_test(); |
| 309 | regression2_test(); | 309 | regression2_test(); |
| 310 | regression3_test(); | 310 | regression3_test(); |
| 311 | regression4_test(); | ||
| 311 | iteration_test(0, 10 + 90 * long_run); | 312 | iteration_test(0, 10 + 90 * long_run); |
| 312 | iteration_test(7, 10 + 90 * long_run); | 313 | iteration_test(7, 10 + 90 * long_run); |
| 313 | single_thread_tests(long_run); | 314 | single_thread_tests(long_run); |
diff --git a/tools/testing/radix-tree/regression.h b/tools/testing/radix-tree/regression.h index 3c8a1584e9ee..135145af18b7 100644 --- a/tools/testing/radix-tree/regression.h +++ b/tools/testing/radix-tree/regression.h | |||
| @@ -5,5 +5,6 @@ | |||
| 5 | void regression1_test(void); | 5 | void regression1_test(void); |
| 6 | void regression2_test(void); | 6 | void regression2_test(void); |
| 7 | void regression3_test(void); | 7 | void regression3_test(void); |
| 8 | void regression4_test(void); | ||
| 8 | 9 | ||
| 9 | #endif | 10 | #endif |
diff --git a/tools/testing/radix-tree/regression4.c b/tools/testing/radix-tree/regression4.c new file mode 100644 index 000000000000..cf4e5aba6b08 --- /dev/null +++ b/tools/testing/radix-tree/regression4.c | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | #include <linux/kernel.h> | ||
| 3 | #include <linux/gfp.h> | ||
| 4 | #include <linux/slab.h> | ||
| 5 | #include <linux/radix-tree.h> | ||
| 6 | #include <linux/rcupdate.h> | ||
| 7 | #include <stdlib.h> | ||
| 8 | #include <pthread.h> | ||
| 9 | #include <stdio.h> | ||
| 10 | #include <assert.h> | ||
| 11 | |||
| 12 | #include "regression.h" | ||
| 13 | |||
| 14 | static pthread_barrier_t worker_barrier; | ||
| 15 | static int obj0, obj1; | ||
| 16 | static RADIX_TREE(mt_tree, GFP_KERNEL); | ||
| 17 | |||
| 18 | static void *reader_fn(void *arg) | ||
| 19 | { | ||
| 20 | int i; | ||
| 21 | void *entry; | ||
| 22 | |||
| 23 | rcu_register_thread(); | ||
| 24 | pthread_barrier_wait(&worker_barrier); | ||
| 25 | |||
| 26 | for (i = 0; i < 1000000; i++) { | ||
| 27 | rcu_read_lock(); | ||
| 28 | entry = radix_tree_lookup(&mt_tree, 0); | ||
| 29 | rcu_read_unlock(); | ||
| 30 | if (entry != &obj0) { | ||
| 31 | printf("iteration %d bad entry = %p\n", i, entry); | ||
| 32 | abort(); | ||
| 33 | } | ||
| 34 | } | ||
| 35 | |||
| 36 | rcu_unregister_thread(); | ||
| 37 | |||
| 38 | return NULL; | ||
| 39 | } | ||
| 40 | |||
| 41 | static void *writer_fn(void *arg) | ||
| 42 | { | ||
| 43 | int i; | ||
| 44 | |||
| 45 | rcu_register_thread(); | ||
| 46 | pthread_barrier_wait(&worker_barrier); | ||
| 47 | |||
| 48 | for (i = 0; i < 1000000; i++) { | ||
| 49 | radix_tree_insert(&mt_tree, 1, &obj1); | ||
| 50 | radix_tree_delete(&mt_tree, 1); | ||
| 51 | } | ||
| 52 | |||
| 53 | rcu_unregister_thread(); | ||
| 54 | |||
| 55 | return NULL; | ||
| 56 | } | ||
| 57 | |||
| 58 | void regression4_test(void) | ||
| 59 | { | ||
| 60 | pthread_t reader, writer; | ||
| 61 | |||
| 62 | printv(1, "regression test 4 starting\n"); | ||
| 63 | |||
| 64 | radix_tree_insert(&mt_tree, 0, &obj0); | ||
| 65 | pthread_barrier_init(&worker_barrier, NULL, 2); | ||
| 66 | |||
| 67 | if (pthread_create(&reader, NULL, reader_fn, NULL) || | ||
| 68 | pthread_create(&writer, NULL, writer_fn, NULL)) { | ||
| 69 | perror("pthread_create"); | ||
| 70 | exit(1); | ||
| 71 | } | ||
| 72 | |||
| 73 | if (pthread_join(reader, NULL) || pthread_join(writer, NULL)) { | ||
| 74 | perror("pthread_join"); | ||
| 75 | exit(1); | ||
| 76 | } | ||
| 77 | |||
| 78 | printv(1, "regression test 4 passed\n"); | ||
| 79 | } | ||
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 686e57ce40f4..efb6c13ab0de 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h | |||
| @@ -154,12 +154,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = | |||
| 154 | (void *) BPF_FUNC_skb_ancestor_cgroup_id; | 154 | (void *) BPF_FUNC_skb_ancestor_cgroup_id; |
| 155 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, | 155 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, |
| 156 | struct bpf_sock_tuple *tuple, | 156 | struct bpf_sock_tuple *tuple, |
| 157 | int size, unsigned int netns_id, | 157 | int size, unsigned long long netns_id, |
| 158 | unsigned long long flags) = | 158 | unsigned long long flags) = |
| 159 | (void *) BPF_FUNC_sk_lookup_tcp; | 159 | (void *) BPF_FUNC_sk_lookup_tcp; |
| 160 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, | 160 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, |
| 161 | struct bpf_sock_tuple *tuple, | 161 | struct bpf_sock_tuple *tuple, |
| 162 | int size, unsigned int netns_id, | 162 | int size, unsigned long long netns_id, |
| 163 | unsigned long long flags) = | 163 | unsigned long long flags) = |
| 164 | (void *) BPF_FUNC_sk_lookup_udp; | 164 | (void *) BPF_FUNC_sk_lookup_udp; |
| 165 | static int (*bpf_sk_release)(struct bpf_sock *sk) = | 165 | static int (*bpf_sk_release)(struct bpf_sock *sk) = |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index f42b3396d622..38e1cbaaffdb 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
| @@ -432,11 +432,11 @@ static struct btf_raw_test raw_tests[] = { | |||
| 432 | /* const void* */ /* [3] */ | 432 | /* const void* */ /* [3] */ |
| 433 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), | 433 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), |
| 434 | /* typedef const void * const_void_ptr */ | 434 | /* typedef const void * const_void_ptr */ |
| 435 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3), | 435 | BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */ |
| 436 | /* struct A { */ /* [4] */ | 436 | /* struct A { */ /* [5] */ |
| 437 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), | 437 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), |
| 438 | /* const_void_ptr m; */ | 438 | /* const_void_ptr m; */ |
| 439 | BTF_MEMBER_ENC(NAME_TBD, 3, 0), | 439 | BTF_MEMBER_ENC(NAME_TBD, 4, 0), |
| 440 | /* } */ | 440 | /* } */ |
| 441 | BTF_END_RAW, | 441 | BTF_END_RAW, |
| 442 | }, | 442 | }, |
| @@ -494,10 +494,10 @@ static struct btf_raw_test raw_tests[] = { | |||
| 494 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), | 494 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), |
| 495 | /* const void* */ /* [3] */ | 495 | /* const void* */ /* [3] */ |
| 496 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), | 496 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), |
| 497 | /* typedef const void * const_void_ptr */ /* [4] */ | 497 | /* typedef const void * const_void_ptr */ |
| 498 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3), | 498 | BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */ |
| 499 | /* const_void_ptr[4] */ /* [5] */ | 499 | /* const_void_ptr[4] */ |
| 500 | BTF_TYPE_ARRAY_ENC(3, 1, 4), | 500 | BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */ |
| 501 | BTF_END_RAW, | 501 | BTF_END_RAW, |
| 502 | }, | 502 | }, |
| 503 | .str_sec = "\0const_void_ptr", | 503 | .str_sec = "\0const_void_ptr", |
| @@ -1293,6 +1293,367 @@ static struct btf_raw_test raw_tests[] = { | |||
| 1293 | }, | 1293 | }, |
| 1294 | 1294 | ||
| 1295 | { | 1295 | { |
| 1296 | .descr = "typedef (invalid name, name_off = 0)", | ||
| 1297 | .raw_types = { | ||
| 1298 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1299 | BTF_TYPEDEF_ENC(0, 1), /* [2] */ | ||
| 1300 | BTF_END_RAW, | ||
| 1301 | }, | ||
| 1302 | .str_sec = "\0__int", | ||
| 1303 | .str_sec_size = sizeof("\0__int"), | ||
| 1304 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1305 | .map_name = "typedef_check_btf", | ||
| 1306 | .key_size = sizeof(int), | ||
| 1307 | .value_size = sizeof(int), | ||
| 1308 | .key_type_id = 1, | ||
| 1309 | .value_type_id = 1, | ||
| 1310 | .max_entries = 4, | ||
| 1311 | .btf_load_err = true, | ||
| 1312 | .err_str = "Invalid name", | ||
| 1313 | }, | ||
| 1314 | |||
| 1315 | { | ||
| 1316 | .descr = "typedef (invalid name, invalid identifier)", | ||
| 1317 | .raw_types = { | ||
| 1318 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1319 | BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ | ||
| 1320 | BTF_END_RAW, | ||
| 1321 | }, | ||
| 1322 | .str_sec = "\0__!int", | ||
| 1323 | .str_sec_size = sizeof("\0__!int"), | ||
| 1324 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1325 | .map_name = "typedef_check_btf", | ||
| 1326 | .key_size = sizeof(int), | ||
| 1327 | .value_size = sizeof(int), | ||
| 1328 | .key_type_id = 1, | ||
| 1329 | .value_type_id = 1, | ||
| 1330 | .max_entries = 4, | ||
| 1331 | .btf_load_err = true, | ||
| 1332 | .err_str = "Invalid name", | ||
| 1333 | }, | ||
| 1334 | |||
| 1335 | { | ||
| 1336 | .descr = "ptr type (invalid name, name_off <> 0)", | ||
| 1337 | .raw_types = { | ||
| 1338 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1339 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1340 | BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */ | ||
| 1341 | BTF_END_RAW, | ||
| 1342 | }, | ||
| 1343 | .str_sec = "\0__int", | ||
| 1344 | .str_sec_size = sizeof("\0__int"), | ||
| 1345 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1346 | .map_name = "ptr_type_check_btf", | ||
| 1347 | .key_size = sizeof(int), | ||
| 1348 | .value_size = sizeof(int), | ||
| 1349 | .key_type_id = 1, | ||
| 1350 | .value_type_id = 1, | ||
| 1351 | .max_entries = 4, | ||
| 1352 | .btf_load_err = true, | ||
| 1353 | .err_str = "Invalid name", | ||
| 1354 | }, | ||
| 1355 | |||
| 1356 | { | ||
| 1357 | .descr = "volatile type (invalid name, name_off <> 0)", | ||
| 1358 | .raw_types = { | ||
| 1359 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1360 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1361 | BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */ | ||
| 1362 | BTF_END_RAW, | ||
| 1363 | }, | ||
| 1364 | .str_sec = "\0__int", | ||
| 1365 | .str_sec_size = sizeof("\0__int"), | ||
| 1366 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1367 | .map_name = "volatile_type_check_btf", | ||
| 1368 | .key_size = sizeof(int), | ||
| 1369 | .value_size = sizeof(int), | ||
| 1370 | .key_type_id = 1, | ||
| 1371 | .value_type_id = 1, | ||
| 1372 | .max_entries = 4, | ||
| 1373 | .btf_load_err = true, | ||
| 1374 | .err_str = "Invalid name", | ||
| 1375 | }, | ||
| 1376 | |||
| 1377 | { | ||
| 1378 | .descr = "const type (invalid name, name_off <> 0)", | ||
| 1379 | .raw_types = { | ||
| 1380 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1381 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1382 | BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */ | ||
| 1383 | BTF_END_RAW, | ||
| 1384 | }, | ||
| 1385 | .str_sec = "\0__int", | ||
| 1386 | .str_sec_size = sizeof("\0__int"), | ||
| 1387 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1388 | .map_name = "const_type_check_btf", | ||
| 1389 | .key_size = sizeof(int), | ||
| 1390 | .value_size = sizeof(int), | ||
| 1391 | .key_type_id = 1, | ||
| 1392 | .value_type_id = 1, | ||
| 1393 | .max_entries = 4, | ||
| 1394 | .btf_load_err = true, | ||
| 1395 | .err_str = "Invalid name", | ||
| 1396 | }, | ||
| 1397 | |||
| 1398 | { | ||
| 1399 | .descr = "restrict type (invalid name, name_off <> 0)", | ||
| 1400 | .raw_types = { | ||
| 1401 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1402 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */ | ||
| 1403 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1404 | BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */ | ||
| 1405 | BTF_END_RAW, | ||
| 1406 | }, | ||
| 1407 | .str_sec = "\0__int", | ||
| 1408 | .str_sec_size = sizeof("\0__int"), | ||
| 1409 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1410 | .map_name = "restrict_type_check_btf", | ||
| 1411 | .key_size = sizeof(int), | ||
| 1412 | .value_size = sizeof(int), | ||
| 1413 | .key_type_id = 1, | ||
| 1414 | .value_type_id = 1, | ||
| 1415 | .max_entries = 4, | ||
| 1416 | .btf_load_err = true, | ||
| 1417 | .err_str = "Invalid name", | ||
| 1418 | }, | ||
| 1419 | |||
| 1420 | { | ||
| 1421 | .descr = "fwd type (invalid name, name_off = 0)", | ||
| 1422 | .raw_types = { | ||
| 1423 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1424 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */ | ||
| 1425 | BTF_END_RAW, | ||
| 1426 | }, | ||
| 1427 | .str_sec = "\0__skb", | ||
| 1428 | .str_sec_size = sizeof("\0__skb"), | ||
| 1429 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1430 | .map_name = "fwd_type_check_btf", | ||
| 1431 | .key_size = sizeof(int), | ||
| 1432 | .value_size = sizeof(int), | ||
| 1433 | .key_type_id = 1, | ||
| 1434 | .value_type_id = 1, | ||
| 1435 | .max_entries = 4, | ||
| 1436 | .btf_load_err = true, | ||
| 1437 | .err_str = "Invalid name", | ||
| 1438 | }, | ||
| 1439 | |||
| 1440 | { | ||
| 1441 | .descr = "fwd type (invalid name, invalid identifier)", | ||
| 1442 | .raw_types = { | ||
| 1443 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1444 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1445 | BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */ | ||
| 1446 | BTF_END_RAW, | ||
| 1447 | }, | ||
| 1448 | .str_sec = "\0__!skb", | ||
| 1449 | .str_sec_size = sizeof("\0__!skb"), | ||
| 1450 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1451 | .map_name = "fwd_type_check_btf", | ||
| 1452 | .key_size = sizeof(int), | ||
| 1453 | .value_size = sizeof(int), | ||
| 1454 | .key_type_id = 1, | ||
| 1455 | .value_type_id = 1, | ||
| 1456 | .max_entries = 4, | ||
| 1457 | .btf_load_err = true, | ||
| 1458 | .err_str = "Invalid name", | ||
| 1459 | }, | ||
| 1460 | |||
| 1461 | { | ||
| 1462 | .descr = "array type (invalid name, name_off <> 0)", | ||
| 1463 | .raw_types = { | ||
| 1464 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1465 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1466 | BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */ | ||
| 1467 | BTF_ARRAY_ENC(1, 1, 4), | ||
| 1468 | BTF_END_RAW, | ||
| 1469 | }, | ||
| 1470 | .str_sec = "\0__skb", | ||
| 1471 | .str_sec_size = sizeof("\0__skb"), | ||
| 1472 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1473 | .map_name = "array_type_check_btf", | ||
| 1474 | .key_size = sizeof(int), | ||
| 1475 | .value_size = sizeof(int), | ||
| 1476 | .key_type_id = 1, | ||
| 1477 | .value_type_id = 1, | ||
| 1478 | .max_entries = 4, | ||
| 1479 | .btf_load_err = true, | ||
| 1480 | .err_str = "Invalid name", | ||
| 1481 | }, | ||
| 1482 | |||
| 1483 | { | ||
| 1484 | .descr = "struct type (name_off = 0)", | ||
| 1485 | .raw_types = { | ||
| 1486 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1487 | BTF_TYPE_ENC(0, | ||
| 1488 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1489 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1490 | BTF_END_RAW, | ||
| 1491 | }, | ||
| 1492 | .str_sec = "\0A", | ||
| 1493 | .str_sec_size = sizeof("\0A"), | ||
| 1494 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1495 | .map_name = "struct_type_check_btf", | ||
| 1496 | .key_size = sizeof(int), | ||
| 1497 | .value_size = sizeof(int), | ||
| 1498 | .key_type_id = 1, | ||
| 1499 | .value_type_id = 1, | ||
| 1500 | .max_entries = 4, | ||
| 1501 | }, | ||
| 1502 | |||
| 1503 | { | ||
| 1504 | .descr = "struct type (invalid name, invalid identifier)", | ||
| 1505 | .raw_types = { | ||
| 1506 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1507 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1508 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1509 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1510 | BTF_END_RAW, | ||
| 1511 | }, | ||
| 1512 | .str_sec = "\0A!\0B", | ||
| 1513 | .str_sec_size = sizeof("\0A!\0B"), | ||
| 1514 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1515 | .map_name = "struct_type_check_btf", | ||
| 1516 | .key_size = sizeof(int), | ||
| 1517 | .value_size = sizeof(int), | ||
| 1518 | .key_type_id = 1, | ||
| 1519 | .value_type_id = 1, | ||
| 1520 | .max_entries = 4, | ||
| 1521 | .btf_load_err = true, | ||
| 1522 | .err_str = "Invalid name", | ||
| 1523 | }, | ||
| 1524 | |||
| 1525 | { | ||
| 1526 | .descr = "struct member (name_off = 0)", | ||
| 1527 | .raw_types = { | ||
| 1528 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1529 | BTF_TYPE_ENC(0, | ||
| 1530 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1531 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1532 | BTF_END_RAW, | ||
| 1533 | }, | ||
| 1534 | .str_sec = "\0A", | ||
| 1535 | .str_sec_size = sizeof("\0A"), | ||
| 1536 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1537 | .map_name = "struct_type_check_btf", | ||
| 1538 | .key_size = sizeof(int), | ||
| 1539 | .value_size = sizeof(int), | ||
| 1540 | .key_type_id = 1, | ||
| 1541 | .value_type_id = 1, | ||
| 1542 | .max_entries = 4, | ||
| 1543 | }, | ||
| 1544 | |||
| 1545 | { | ||
| 1546 | .descr = "struct member (invalid name, invalid identifier)", | ||
| 1547 | .raw_types = { | ||
| 1548 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1549 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1550 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1551 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1552 | BTF_END_RAW, | ||
| 1553 | }, | ||
| 1554 | .str_sec = "\0A\0B*", | ||
| 1555 | .str_sec_size = sizeof("\0A\0B*"), | ||
| 1556 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1557 | .map_name = "struct_type_check_btf", | ||
| 1558 | .key_size = sizeof(int), | ||
| 1559 | .value_size = sizeof(int), | ||
| 1560 | .key_type_id = 1, | ||
| 1561 | .value_type_id = 1, | ||
| 1562 | .max_entries = 4, | ||
| 1563 | .btf_load_err = true, | ||
| 1564 | .err_str = "Invalid name", | ||
| 1565 | }, | ||
| 1566 | |||
| 1567 | { | ||
| 1568 | .descr = "enum type (name_off = 0)", | ||
| 1569 | .raw_types = { | ||
| 1570 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1571 | BTF_TYPE_ENC(0, | ||
| 1572 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1573 | sizeof(int)), /* [2] */ | ||
| 1574 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
| 1575 | BTF_END_RAW, | ||
| 1576 | }, | ||
| 1577 | .str_sec = "\0A\0B", | ||
| 1578 | .str_sec_size = sizeof("\0A\0B"), | ||
| 1579 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1580 | .map_name = "enum_type_check_btf", | ||
| 1581 | .key_size = sizeof(int), | ||
| 1582 | .value_size = sizeof(int), | ||
| 1583 | .key_type_id = 1, | ||
| 1584 | .value_type_id = 1, | ||
| 1585 | .max_entries = 4, | ||
| 1586 | }, | ||
| 1587 | |||
| 1588 | { | ||
| 1589 | .descr = "enum type (invalid name, invalid identifier)", | ||
| 1590 | .raw_types = { | ||
| 1591 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1592 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1593 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1594 | sizeof(int)), /* [2] */ | ||
| 1595 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
| 1596 | BTF_END_RAW, | ||
| 1597 | }, | ||
| 1598 | .str_sec = "\0A!\0B", | ||
| 1599 | .str_sec_size = sizeof("\0A!\0B"), | ||
| 1600 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1601 | .map_name = "enum_type_check_btf", | ||
| 1602 | .key_size = sizeof(int), | ||
| 1603 | .value_size = sizeof(int), | ||
| 1604 | .key_type_id = 1, | ||
| 1605 | .value_type_id = 1, | ||
| 1606 | .max_entries = 4, | ||
| 1607 | .btf_load_err = true, | ||
| 1608 | .err_str = "Invalid name", | ||
| 1609 | }, | ||
| 1610 | |||
| 1611 | { | ||
| 1612 | .descr = "enum member (invalid name, name_off = 0)", | ||
| 1613 | .raw_types = { | ||
| 1614 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1615 | BTF_TYPE_ENC(0, | ||
| 1616 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1617 | sizeof(int)), /* [2] */ | ||
| 1618 | BTF_ENUM_ENC(0, 0), | ||
| 1619 | BTF_END_RAW, | ||
| 1620 | }, | ||
| 1621 | .str_sec = "", | ||
| 1622 | .str_sec_size = sizeof(""), | ||
| 1623 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1624 | .map_name = "enum_type_check_btf", | ||
| 1625 | .key_size = sizeof(int), | ||
| 1626 | .value_size = sizeof(int), | ||
| 1627 | .key_type_id = 1, | ||
| 1628 | .value_type_id = 1, | ||
| 1629 | .max_entries = 4, | ||
| 1630 | .btf_load_err = true, | ||
| 1631 | .err_str = "Invalid name", | ||
| 1632 | }, | ||
| 1633 | |||
| 1634 | { | ||
| 1635 | .descr = "enum member (invalid name, invalid identifier)", | ||
| 1636 | .raw_types = { | ||
| 1637 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1638 | BTF_TYPE_ENC(0, | ||
| 1639 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1640 | sizeof(int)), /* [2] */ | ||
| 1641 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
| 1642 | BTF_END_RAW, | ||
| 1643 | }, | ||
| 1644 | .str_sec = "\0A!", | ||
| 1645 | .str_sec_size = sizeof("\0A!"), | ||
| 1646 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1647 | .map_name = "enum_type_check_btf", | ||
| 1648 | .key_size = sizeof(int), | ||
| 1649 | .value_size = sizeof(int), | ||
| 1650 | .key_type_id = 1, | ||
| 1651 | .value_type_id = 1, | ||
| 1652 | .max_entries = 4, | ||
| 1653 | .btf_load_err = true, | ||
| 1654 | .err_str = "Invalid name", | ||
| 1655 | }, | ||
| 1656 | { | ||
| 1296 | .descr = "arraymap invalid btf key (a bit field)", | 1657 | .descr = "arraymap invalid btf key (a bit field)", |
| 1297 | .raw_types = { | 1658 | .raw_types = { |
| 1298 | /* int */ /* [1] */ | 1659 | /* int */ /* [1] */ |
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/test_sk_lookup_kern.c index b745bdc08c2b..e21cd736c196 100644 --- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/test_sk_lookup_kern.c | |||
| @@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb) | |||
| 72 | return TC_ACT_SHOT; | 72 | return TC_ACT_SHOT; |
| 73 | 73 | ||
| 74 | tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); | 74 | tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); |
| 75 | sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0); | 75 | sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0); |
| 76 | if (sk) | 76 | if (sk) |
| 77 | bpf_sk_release(sk); | 77 | bpf_sk_release(sk); |
| 78 | return sk ? TC_ACT_OK : TC_ACT_UNSPEC; | 78 | return sk ? TC_ACT_OK : TC_ACT_UNSPEC; |
| @@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb) | |||
| 84 | struct bpf_sock_tuple tuple = {}; | 84 | struct bpf_sock_tuple tuple = {}; |
| 85 | struct bpf_sock *sk; | 85 | struct bpf_sock *sk; |
| 86 | 86 | ||
| 87 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 87 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 88 | if (sk) | 88 | if (sk) |
| 89 | bpf_sk_release(sk); | 89 | bpf_sk_release(sk); |
| 90 | return 0; | 90 | return 0; |
| @@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb) | |||
| 97 | struct bpf_sock *sk; | 97 | struct bpf_sock *sk; |
| 98 | __u32 family = 0; | 98 | __u32 family = 0; |
| 99 | 99 | ||
| 100 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 100 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 101 | if (sk) { | 101 | if (sk) { |
| 102 | bpf_sk_release(sk); | 102 | bpf_sk_release(sk); |
| 103 | family = sk->family; | 103 | family = sk->family; |
| @@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb) | |||
| 112 | struct bpf_sock *sk; | 112 | struct bpf_sock *sk; |
| 113 | __u32 family; | 113 | __u32 family; |
| 114 | 114 | ||
| 115 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 115 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 116 | if (sk) { | 116 | if (sk) { |
| 117 | sk += 1; | 117 | sk += 1; |
| 118 | bpf_sk_release(sk); | 118 | bpf_sk_release(sk); |
| @@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) | |||
| 127 | struct bpf_sock *sk; | 127 | struct bpf_sock *sk; |
| 128 | __u32 family; | 128 | __u32 family; |
| 129 | 129 | ||
| 130 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 130 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 131 | sk += 1; | 131 | sk += 1; |
| 132 | if (sk) | 132 | if (sk) |
| 133 | bpf_sk_release(sk); | 133 | bpf_sk_release(sk); |
| @@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb) | |||
| 139 | { | 139 | { |
| 140 | struct bpf_sock_tuple tuple = {}; | 140 | struct bpf_sock_tuple tuple = {}; |
| 141 | 141 | ||
| 142 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 142 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 143 | return 0; | 143 | return 0; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| @@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb) | |||
| 149 | struct bpf_sock_tuple tuple = {}; | 149 | struct bpf_sock_tuple tuple = {}; |
| 150 | struct bpf_sock *sk; | 150 | struct bpf_sock *sk; |
| 151 | 151 | ||
| 152 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 152 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 153 | bpf_sk_release(sk); | 153 | bpf_sk_release(sk); |
| 154 | bpf_sk_release(sk); | 154 | bpf_sk_release(sk); |
| 155 | return 0; | 155 | return 0; |
| @@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb) | |||
| 161 | struct bpf_sock_tuple tuple = {}; | 161 | struct bpf_sock_tuple tuple = {}; |
| 162 | struct bpf_sock *sk; | 162 | struct bpf_sock *sk; |
| 163 | 163 | ||
| 164 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 164 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 165 | bpf_sk_release(sk); | 165 | bpf_sk_release(sk); |
| 166 | return 0; | 166 | return 0; |
| 167 | } | 167 | } |
| @@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb) | |||
| 169 | void lookup_no_release(struct __sk_buff *skb) | 169 | void lookup_no_release(struct __sk_buff *skb) |
| 170 | { | 170 | { |
| 171 | struct bpf_sock_tuple tuple = {}; | 171 | struct bpf_sock_tuple tuple = {}; |
| 172 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 172 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | SEC("fail_no_release_subcall") | 175 | SEC("fail_no_release_subcall") |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 550b7e46bf4a..df6f751cc1e8 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
| @@ -8576,7 +8576,7 @@ static struct bpf_test tests[] = { | |||
| 8576 | BPF_JMP_IMM(BPF_JA, 0, 0, -7), | 8576 | BPF_JMP_IMM(BPF_JA, 0, 0, -7), |
| 8577 | }, | 8577 | }, |
| 8578 | .fixup_map_hash_8b = { 4 }, | 8578 | .fixup_map_hash_8b = { 4 }, |
| 8579 | .errstr = "R0 invalid mem access 'inv'", | 8579 | .errstr = "unbounded min value", |
| 8580 | .result = REJECT, | 8580 | .result = REJECT, |
| 8581 | }, | 8581 | }, |
| 8582 | { | 8582 | { |
| @@ -10547,7 +10547,7 @@ static struct bpf_test tests[] = { | |||
| 10547 | "check deducing bounds from const, 5", | 10547 | "check deducing bounds from const, 5", |
| 10548 | .insns = { | 10548 | .insns = { |
| 10549 | BPF_MOV64_IMM(BPF_REG_0, 0), | 10549 | BPF_MOV64_IMM(BPF_REG_0, 0), |
| 10550 | BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), | 10550 | BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), |
| 10551 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), | 10551 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
| 10552 | BPF_EXIT_INSN(), | 10552 | BPF_EXIT_INSN(), |
| 10553 | }, | 10553 | }, |
| @@ -14230,7 +14230,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, | |||
| 14230 | 14230 | ||
| 14231 | reject_from_alignment = fd_prog < 0 && | 14231 | reject_from_alignment = fd_prog < 0 && |
| 14232 | (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && | 14232 | (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && |
| 14233 | strstr(bpf_vlog, "Unknown alignment."); | 14233 | strstr(bpf_vlog, "misaligned"); |
| 14234 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 14234 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 14235 | if (reject_from_alignment) { | 14235 | if (reject_from_alignment) { |
| 14236 | printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", | 14236 | printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", |
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index e1473234968d..c9a2abf8be1b 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
| @@ -2731,9 +2731,14 @@ TEST(syscall_restart) | |||
| 2731 | ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); | 2731 | ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); |
| 2732 | ASSERT_EQ(true, WIFSTOPPED(status)); | 2732 | ASSERT_EQ(true, WIFSTOPPED(status)); |
| 2733 | ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); | 2733 | ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); |
| 2734 | /* Verify signal delivery came from parent now. */ | ||
| 2735 | ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); | 2734 | ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); |
| 2736 | EXPECT_EQ(getpid(), info.si_pid); | 2735 | /* |
| 2736 | * There is no siginfo on SIGSTOP any more, so we can't verify | ||
| 2737 | * signal delivery came from parent now (getpid() == info.si_pid). | ||
| 2738 | * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com | ||
| 2739 | * At least verify the SIGSTOP via PTRACE_GETSIGINFO. | ||
| 2740 | */ | ||
| 2741 | EXPECT_EQ(SIGSTOP, info.si_signo); | ||
| 2737 | 2742 | ||
| 2738 | /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ | 2743 | /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ |
| 2739 | ASSERT_EQ(0, kill(child_pid, SIGCONT)); | 2744 | ASSERT_EQ(0, kill(child_pid, SIGCONT)); |
