diff options
850 files changed, 9329 insertions, 9252 deletions
| @@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com> | |||
| 156 | Morten Welinder <welinder@troll.com> | 156 | Morten Welinder <welinder@troll.com> |
| 157 | Mythri P K <mythripk@ti.com> | 157 | Mythri P K <mythripk@ti.com> |
| 158 | Nguyen Anh Quynh <aquynh@gmail.com> | 158 | Nguyen Anh Quynh <aquynh@gmail.com> |
| 159 | Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org> | ||
| 160 | Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org> | ||
| 159 | Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | 161 | Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> |
| 160 | Patrick Mochel <mochel@digitalimplant.org> | 162 | Patrick Mochel <mochel@digitalimplant.org> |
| 161 | Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> | 163 | Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> |
| @@ -224,3 +226,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com> | |||
| 224 | Yusuke Goda <goda.yusuke@renesas.com> | 226 | Yusuke Goda <goda.yusuke@renesas.com> |
| 225 | Gustavo Padovan <gustavo@las.ic.unicamp.br> | 227 | Gustavo Padovan <gustavo@las.ic.unicamp.br> |
| 226 | Gustavo Padovan <padovan@profusion.mobi> | 228 | Gustavo Padovan <padovan@profusion.mobi> |
| 229 | Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> | ||
| 230 | Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> | ||
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt index b8ca28b60215..7e71c9c1d8e9 100644 --- a/Documentation/accounting/psi.txt +++ b/Documentation/accounting/psi.txt | |||
| @@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is | |||
| 56 | still doing productive work. As such, time spent in this subset of the | 56 | still doing productive work. As such, time spent in this subset of the |
| 57 | stall state is tracked separately and exported in the "full" averages. | 57 | stall state is tracked separately and exported in the "full" averages. |
| 58 | 58 | ||
| 59 | The ratios are tracked as recent trends over ten, sixty, and three | 59 | The ratios (in %) are tracked as recent trends over ten, sixty, and |
| 60 | hundred second windows, which gives insight into short term events as | 60 | three hundred second windows, which gives insight into short term events |
| 61 | well as medium and long term trends. The total absolute stall time is | 61 | as well as medium and long term trends. The total absolute stall time |
| 62 | tracked and exported as well, to allow detection of latency spikes | 62 | (in us) is tracked and exported as well, to allow detection of latency |
| 63 | which wouldn't necessarily make a dent in the time averages, or to | 63 | spikes which wouldn't necessarily make a dent in the time averages, |
| 64 | average trends over custom time frames. | 64 | or to average trends over custom time frames. |
| 65 | 65 | ||
| 66 | Cgroup2 interface | 66 | Cgroup2 interface |
| 67 | ================= | 67 | ================= |
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 9a60a5d60e38..7313d354f20e 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst | |||
| @@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()`` | |||
| 148 | for the type. The maximum value of ``BTF_INT_BITS()`` is 128. | 148 | for the type. The maximum value of ``BTF_INT_BITS()`` is 128. |
| 149 | 149 | ||
| 150 | The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values | 150 | The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values |
| 151 | for this int. For example, a bitfield struct member has: * btf member bit | 151 | for this int. For example, a bitfield struct member has: |
| 152 | offset 100 from the start of the structure, * btf member pointing to an int | 152 | * btf member bit offset 100 from the start of the structure, |
| 153 | type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` | 153 | * btf member pointing to an int type, |
| 154 | * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` | ||
| 154 | 155 | ||
| 155 | Then in the struct memory layout, this member will occupy ``4`` bits starting | 156 | Then in the struct memory layout, this member will occupy ``4`` bits starting |
| 156 | from bits ``100 + 2 = 102``. | 157 | from bits ``100 + 2 = 102``. |
| 157 | 158 | ||
| 158 | Alternatively, the bitfield struct member can be the following to access the | 159 | Alternatively, the bitfield struct member can be the following to access the |
| 159 | same bits as the above: | 160 | same bits as the above: |
| 160 | |||
| 161 | * btf member bit offset 102, | 161 | * btf member bit offset 102, |
| 162 | * btf member pointing to an int type, | 162 | * btf member pointing to an int type, |
| 163 | * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` | 163 | * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` |
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml index 365dcf384d73..82dd7582e945 100644 --- a/Documentation/devicetree/bindings/arm/cpus.yaml +++ b/Documentation/devicetree/bindings/arm/cpus.yaml | |||
| @@ -228,7 +228,7 @@ patternProperties: | |||
| 228 | - renesas,r9a06g032-smp | 228 | - renesas,r9a06g032-smp |
| 229 | - rockchip,rk3036-smp | 229 | - rockchip,rk3036-smp |
| 230 | - rockchip,rk3066-smp | 230 | - rockchip,rk3066-smp |
| 231 | - socionext,milbeaut-m10v-smp | 231 | - socionext,milbeaut-m10v-smp |
| 232 | - ste,dbx500-smp | 232 | - ste,dbx500-smp |
| 233 | 233 | ||
| 234 | cpu-release-addr: | 234 | cpu-release-addr: |
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt index 08bab0e94d25..d0ae46d7bac3 100644 --- a/Documentation/devicetree/bindings/hwmon/adc128d818.txt +++ b/Documentation/devicetree/bindings/hwmon/adc128d818.txt | |||
| @@ -26,7 +26,7 @@ Required node properties: | |||
| 26 | 26 | ||
| 27 | Optional node properties: | 27 | Optional node properties: |
| 28 | 28 | ||
| 29 | - ti,mode: Operation mode (see above). | 29 | - ti,mode: Operation mode (u8) (see above). |
| 30 | 30 | ||
| 31 | 31 | ||
| 32 | Example (operation mode 2): | 32 | Example (operation mode 2): |
| @@ -34,5 +34,5 @@ Example (operation mode 2): | |||
| 34 | adc128d818@1d { | 34 | adc128d818@1d { |
| 35 | compatible = "ti,adc128d818"; | 35 | compatible = "ti,adc128d818"; |
| 36 | reg = <0x1d>; | 36 | reg = <0x1d>; |
| 37 | ti,mode = <2>; | 37 | ti,mode = /bits/ 8 <2>; |
| 38 | }; | 38 | }; |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt index dcc8390e0d24..dcc8390e0d24 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt | |||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt index ee4c32454198..ee4c32454198 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt | |||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt index bd81a482634f..bd81a482634f 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt | |||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt index 49df0053347a..49df0053347a 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt | |||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt index 94a425eaa6c7..94a425eaa6c7 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt | |||
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt index bbcb255c3150..93a7469e70d4 100644 --- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt +++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt | |||
| @@ -12,10 +12,15 @@ Required properties: | |||
| 12 | Subnodes: | 12 | Subnodes: |
| 13 | 13 | ||
| 14 | The integrated switch subnode should be specified according to the binding | 14 | The integrated switch subnode should be specified according to the binding |
| 15 | described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of | 15 | described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external |
| 16 | port and PHY id, each subnode describing a port needs to have a valid phandle | 16 | mdio-bus each subnode describing a port needs to have a valid phandle |
| 17 | referencing the internal PHY connected to it. The CPU port of this switch is | 17 | referencing the internal PHY it is connected to. This is because there's no |
| 18 | always port 0. | 18 | N:N mapping of port and PHY id. |
| 19 | |||
| 20 | Don't use mixed external and internal mdio-bus configurations, as this is | ||
| 21 | not supported by the hardware. | ||
| 22 | |||
| 23 | The CPU port of this switch is always port 0. | ||
| 19 | 24 | ||
| 20 | A CPU port node has the following optional node: | 25 | A CPU port node has the following optional node: |
| 21 | 26 | ||
| @@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties: | |||
| 31 | - 'full-duplex' (boolean, optional), to indicate that full duplex is | 36 | - 'full-duplex' (boolean, optional), to indicate that full duplex is |
| 32 | used. When absent, half duplex is assumed. | 37 | used. When absent, half duplex is assumed. |
| 33 | 38 | ||
| 34 | Example: | 39 | Examples: |
| 35 | 40 | ||
| 41 | for the external mdio-bus configuration: | ||
| 36 | 42 | ||
| 37 | &mdio0 { | 43 | &mdio0 { |
| 38 | phy_port1: phy@0 { | 44 | phy_port1: phy@0 { |
| @@ -55,12 +61,12 @@ Example: | |||
| 55 | reg = <4>; | 61 | reg = <4>; |
| 56 | }; | 62 | }; |
| 57 | 63 | ||
| 58 | switch0@0 { | 64 | switch@10 { |
| 59 | compatible = "qca,qca8337"; | 65 | compatible = "qca,qca8337"; |
| 60 | #address-cells = <1>; | 66 | #address-cells = <1>; |
| 61 | #size-cells = <0>; | 67 | #size-cells = <0>; |
| 62 | 68 | ||
| 63 | reg = <0>; | 69 | reg = <0x10>; |
| 64 | 70 | ||
| 65 | ports { | 71 | ports { |
| 66 | #address-cells = <1>; | 72 | #address-cells = <1>; |
| @@ -108,3 +114,56 @@ Example: | |||
| 108 | }; | 114 | }; |
| 109 | }; | 115 | }; |
| 110 | }; | 116 | }; |
| 117 | |||
| 118 | for the internal master mdio-bus configuration: | ||
| 119 | |||
| 120 | &mdio0 { | ||
| 121 | switch@10 { | ||
| 122 | compatible = "qca,qca8337"; | ||
| 123 | #address-cells = <1>; | ||
| 124 | #size-cells = <0>; | ||
| 125 | |||
| 126 | reg = <0x10>; | ||
| 127 | |||
| 128 | ports { | ||
| 129 | #address-cells = <1>; | ||
| 130 | #size-cells = <0>; | ||
| 131 | |||
| 132 | port@0 { | ||
| 133 | reg = <0>; | ||
| 134 | label = "cpu"; | ||
| 135 | ethernet = <&gmac1>; | ||
| 136 | phy-mode = "rgmii"; | ||
| 137 | fixed-link { | ||
| 138 | speed = 1000; | ||
| 139 | full-duplex; | ||
| 140 | }; | ||
| 141 | }; | ||
| 142 | |||
| 143 | port@1 { | ||
| 144 | reg = <1>; | ||
| 145 | label = "lan1"; | ||
| 146 | }; | ||
| 147 | |||
| 148 | port@2 { | ||
| 149 | reg = <2>; | ||
| 150 | label = "lan2"; | ||
| 151 | }; | ||
| 152 | |||
| 153 | port@3 { | ||
| 154 | reg = <3>; | ||
| 155 | label = "lan3"; | ||
| 156 | }; | ||
| 157 | |||
| 158 | port@4 { | ||
| 159 | reg = <4>; | ||
| 160 | label = "lan4"; | ||
| 161 | }; | ||
| 162 | |||
| 163 | port@5 { | ||
| 164 | reg = <5>; | ||
| 165 | label = "wan"; | ||
| 166 | }; | ||
| 167 | }; | ||
| 168 | }; | ||
| 169 | }; | ||
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt index 742cb470595b..bcfb13194f16 100644 --- a/Documentation/devicetree/bindings/serial/mtk-uart.txt +++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt | |||
| @@ -16,6 +16,7 @@ Required properties: | |||
| 16 | * "mediatek,mt8127-uart" for MT8127 compatible UARTS | 16 | * "mediatek,mt8127-uart" for MT8127 compatible UARTS |
| 17 | * "mediatek,mt8135-uart" for MT8135 compatible UARTS | 17 | * "mediatek,mt8135-uart" for MT8135 compatible UARTS |
| 18 | * "mediatek,mt8173-uart" for MT8173 compatible UARTS | 18 | * "mediatek,mt8173-uart" for MT8173 compatible UARTS |
| 19 | * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS | ||
| 19 | * "mediatek,mt6577-uart" for MT6577 and all of the above | 20 | * "mediatek,mt6577-uart" for MT6577 and all of the above |
| 20 | 21 | ||
| 21 | - reg: The base address of the UART register bank. | 22 | - reg: The base address of the UART register bank. |
diff --git a/Documentation/filesystems/mount_api.txt b/Documentation/filesystems/mount_api.txt index 944d1965e917..00ff0cfccfa7 100644 --- a/Documentation/filesystems/mount_api.txt +++ b/Documentation/filesystems/mount_api.txt | |||
| @@ -12,11 +12,13 @@ CONTENTS | |||
| 12 | 12 | ||
| 13 | (4) Filesystem context security. | 13 | (4) Filesystem context security. |
| 14 | 14 | ||
| 15 | (5) VFS filesystem context operations. | 15 | (5) VFS filesystem context API. |
| 16 | 16 | ||
| 17 | (6) Parameter description. | 17 | (6) Superblock creation helpers. |
| 18 | 18 | ||
| 19 | (7) Parameter helper functions. | 19 | (7) Parameter description. |
| 20 | |||
| 21 | (8) Parameter helper functions. | ||
| 20 | 22 | ||
| 21 | 23 | ||
| 22 | ======== | 24 | ======== |
| @@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process: | |||
| 41 | 43 | ||
| 42 | (7) Destroy the context. | 44 | (7) Destroy the context. |
| 43 | 45 | ||
| 44 | To support this, the file_system_type struct gains a new field: | 46 | To support this, the file_system_type struct gains two new fields: |
| 45 | 47 | ||
| 46 | int (*init_fs_context)(struct fs_context *fc); | 48 | int (*init_fs_context)(struct fs_context *fc); |
| 49 | const struct fs_parameter_description *parameters; | ||
| 47 | 50 | ||
| 48 | which is invoked to set up the filesystem-specific parts of a filesystem | 51 | The first is invoked to set up the filesystem-specific parts of a filesystem |
| 49 | context, including the additional space. | 52 | context, including the additional space, and the second points to the |
| 53 | parameter description for validation at registration time and querying by a | ||
| 54 | future system call. | ||
| 50 | 55 | ||
| 51 | Note that security initialisation is done *after* the filesystem is called so | 56 | Note that security initialisation is done *after* the filesystem is called so |
| 52 | that the namespaces may be adjusted first. | 57 | that the namespaces may be adjusted first. |
| @@ -73,9 +78,9 @@ context. This is represented by the fs_context structure: | |||
| 73 | void *s_fs_info; | 78 | void *s_fs_info; |
| 74 | unsigned int sb_flags; | 79 | unsigned int sb_flags; |
| 75 | unsigned int sb_flags_mask; | 80 | unsigned int sb_flags_mask; |
| 81 | unsigned int s_iflags; | ||
| 82 | unsigned int lsm_flags; | ||
| 76 | enum fs_context_purpose purpose:8; | 83 | enum fs_context_purpose purpose:8; |
| 77 | bool sloppy:1; | ||
| 78 | bool silent:1; | ||
| 79 | ... | 84 | ... |
| 80 | }; | 85 | }; |
| 81 | 86 | ||
| @@ -141,6 +146,10 @@ The fs_context fields are as follows: | |||
| 141 | 146 | ||
| 142 | Which bits SB_* flags are to be set/cleared in super_block::s_flags. | 147 | Which bits SB_* flags are to be set/cleared in super_block::s_flags. |
| 143 | 148 | ||
| 149 | (*) unsigned int s_iflags | ||
| 150 | |||
| 151 | These will be bitwise-OR'd with s->s_iflags when a superblock is created. | ||
| 152 | |||
| 144 | (*) enum fs_context_purpose | 153 | (*) enum fs_context_purpose |
| 145 | 154 | ||
| 146 | This indicates the purpose for which the context is intended. The | 155 | This indicates the purpose for which the context is intended. The |
| @@ -150,17 +159,6 @@ The fs_context fields are as follows: | |||
| 150 | FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount | 159 | FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount |
| 151 | FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount | 160 | FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount |
| 152 | 161 | ||
| 153 | (*) bool sloppy | ||
| 154 | (*) bool silent | ||
| 155 | |||
| 156 | These are set if the sloppy or silent mount options are given. | ||
| 157 | |||
| 158 | [NOTE] sloppy is probably unnecessary when userspace passes over one | ||
| 159 | option at a time since the error can just be ignored if userspace deems it | ||
| 160 | to be unimportant. | ||
| 161 | |||
| 162 | [NOTE] silent is probably redundant with sb_flags & SB_SILENT. | ||
| 163 | |||
| 164 | The mount context is created by calling vfs_new_fs_context() or | 162 | The mount context is created by calling vfs_new_fs_context() or |
| 165 | vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the | 163 | vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the |
| 166 | structure is not refcounted. | 164 | structure is not refcounted. |
| @@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose: | |||
| 342 | It should return 0 on success or a negative error code on failure. | 340 | It should return 0 on success or a negative error code on failure. |
| 343 | 341 | ||
| 344 | 342 | ||
| 345 | ================================= | 343 | ========================== |
| 346 | VFS FILESYSTEM CONTEXT OPERATIONS | 344 | VFS FILESYSTEM CONTEXT API |
| 347 | ================================= | 345 | ========================== |
| 348 | 346 | ||
| 349 | There are four operations for creating a filesystem context and | 347 | There are four operations for creating a filesystem context and one for |
| 350 | one for destroying a context: | 348 | destroying a context: |
| 351 | 349 | ||
| 352 | (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type, | 350 | (*) struct fs_context *fs_context_for_mount( |
| 353 | struct dentry *reference, | 351 | struct file_system_type *fs_type, |
| 354 | unsigned int sb_flags, | 352 | unsigned int sb_flags); |
| 355 | unsigned int sb_flags_mask, | ||
| 356 | enum fs_context_purpose purpose); | ||
| 357 | 353 | ||
| 358 | Create a filesystem context for a given filesystem type and purpose. This | 354 | Allocate a filesystem context for the purpose of setting up a new mount, |
| 359 | allocates the filesystem context, sets the superblock flags, initialises | 355 | whether that be with a new superblock or sharing an existing one. This |
| 360 | the security and calls fs_type->init_fs_context() to initialise the | 356 | sets the superblock flags, initialises the security and calls |
| 361 | filesystem private data. | 357 | fs_type->init_fs_context() to initialise the filesystem private data. |
| 362 | 358 | ||
| 363 | reference can be NULL or it may indicate the root dentry of a superblock | 359 | fs_type specifies the filesystem type that will manage the context and |
| 364 | that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or | 360 | sb_flags presets the superblock flags stored therein. |
| 365 | the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT). | 361 | |
| 366 | This is provided as a source of namespace information. | 362 | (*) struct fs_context *fs_context_for_reconfigure( |
| 363 | struct dentry *dentry, | ||
| 364 | unsigned int sb_flags, | ||
| 365 | unsigned int sb_flags_mask); | ||
| 366 | |||
| 367 | Allocate a filesystem context for the purpose of reconfiguring an | ||
| 368 | existing superblock. dentry provides a reference to the superblock to be | ||
| 369 | configured. sb_flags and sb_flags_mask indicate which superblock flags | ||
| 370 | need changing and to what. | ||
| 371 | |||
| 372 | (*) struct fs_context *fs_context_for_submount( | ||
| 373 | struct file_system_type *fs_type, | ||
| 374 | struct dentry *reference); | ||
| 375 | |||
| 376 | Allocate a filesystem context for the purpose of creating a new mount for | ||
| 377 | an automount point or other derived superblock. fs_type specifies the | ||
| 378 | filesystem type that will manage the context and the reference dentry | ||
| 379 | supplies the parameters. Namespaces are propagated from the reference | ||
| 380 | dentry's superblock also. | ||
| 381 | |||
| 382 | Note that it's not a requirement that the reference dentry be of the same | ||
| 383 | filesystem type as fs_type. | ||
| 367 | 384 | ||
| 368 | (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc); | 385 | (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc); |
| 369 | 386 | ||
| @@ -390,20 +407,6 @@ context pointer or a negative error code. | |||
| 390 | For the remaining operations, if an error occurs, a negative error code will be | 407 | For the remaining operations, if an error occurs, a negative error code will be |
| 391 | returned. | 408 | returned. |
| 392 | 409 | ||
| 393 | (*) int vfs_get_tree(struct fs_context *fc); | ||
| 394 | |||
| 395 | Get or create the mountable root and superblock, using the parameters in | ||
| 396 | the filesystem context to select/configure the superblock. This invokes | ||
| 397 | the ->validate() op and then the ->get_tree() op. | ||
| 398 | |||
| 399 | [NOTE] ->validate() could perhaps be rolled into ->get_tree() and | ||
| 400 | ->reconfigure(). | ||
| 401 | |||
| 402 | (*) struct vfsmount *vfs_create_mount(struct fs_context *fc); | ||
| 403 | |||
| 404 | Create a mount given the parameters in the specified filesystem context. | ||
| 405 | Note that this does not attach the mount to anything. | ||
| 406 | |||
| 407 | (*) int vfs_parse_fs_param(struct fs_context *fc, | 410 | (*) int vfs_parse_fs_param(struct fs_context *fc, |
| 408 | struct fs_parameter *param); | 411 | struct fs_parameter *param); |
| 409 | 412 | ||
| @@ -432,17 +435,80 @@ returned. | |||
| 432 | clear the pointer, but then becomes responsible for disposing of the | 435 | clear the pointer, but then becomes responsible for disposing of the |
| 433 | object. | 436 | object. |
| 434 | 437 | ||
| 435 | (*) int vfs_parse_fs_string(struct fs_context *fc, char *key, | 438 | (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key, |
| 436 | const char *value, size_t v_size); | 439 | const char *value, size_t v_size); |
| 437 | 440 | ||
| 438 | A wrapper around vfs_parse_fs_param() that just passes a constant string. | 441 | A wrapper around vfs_parse_fs_param() that copies the value string it is |
| 442 | passed. | ||
| 439 | 443 | ||
| 440 | (*) int generic_parse_monolithic(struct fs_context *fc, void *data); | 444 | (*) int generic_parse_monolithic(struct fs_context *fc, void *data); |
| 441 | 445 | ||
| 442 | Parse a sys_mount() data page, assuming the form to be a text list | 446 | Parse a sys_mount() data page, assuming the form to be a text list |
| 443 | consisting of key[=val] options separated by commas. Each item in the | 447 | consisting of key[=val] options separated by commas. Each item in the |
| 444 | list is passed to vfs_mount_option(). This is the default when the | 448 | list is passed to vfs_mount_option(). This is the default when the |
| 445 | ->parse_monolithic() operation is NULL. | 449 | ->parse_monolithic() method is NULL. |
| 450 | |||
| 451 | (*) int vfs_get_tree(struct fs_context *fc); | ||
| 452 | |||
| 453 | Get or create the mountable root and superblock, using the parameters in | ||
| 454 | the filesystem context to select/configure the superblock. This invokes | ||
| 455 | the ->get_tree() method. | ||
| 456 | |||
| 457 | (*) struct vfsmount *vfs_create_mount(struct fs_context *fc); | ||
| 458 | |||
| 459 | Create a mount given the parameters in the specified filesystem context. | ||
| 460 | Note that this does not attach the mount to anything. | ||
| 461 | |||
| 462 | |||
| 463 | =========================== | ||
| 464 | SUPERBLOCK CREATION HELPERS | ||
| 465 | =========================== | ||
| 466 | |||
| 467 | A number of VFS helpers are available for use by filesystems for the creation | ||
| 468 | or looking up of superblocks. | ||
| 469 | |||
| 470 | (*) struct super_block * | ||
| 471 | sget_fc(struct fs_context *fc, | ||
| 472 | int (*test)(struct super_block *sb, struct fs_context *fc), | ||
| 473 | int (*set)(struct super_block *sb, struct fs_context *fc)); | ||
| 474 | |||
| 475 | This is the core routine. If test is non-NULL, it searches for an | ||
| 476 | existing superblock matching the criteria held in the fs_context, using | ||
| 477 | the test function to match them. If no match is found, a new superblock | ||
| 478 | is created and the set function is called to set it up. | ||
| 479 | |||
| 480 | Prior to the set function being called, fc->s_fs_info will be transferred | ||
| 481 | to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns | ||
| 482 | success (ie. 0). | ||
| 483 | |||
| 484 | The following helpers all wrap sget_fc(): | ||
| 485 | |||
| 486 | (*) int vfs_get_super(struct fs_context *fc, | ||
| 487 | enum vfs_get_super_keying keying, | ||
| 488 | int (*fill_super)(struct super_block *sb, | ||
| 489 | struct fs_context *fc)) | ||
| 490 | |||
| 491 | This creates/looks up a deviceless superblock. The keying indicates how | ||
| 492 | many superblocks of this type may exist and in what manner they may be | ||
| 493 | shared: | ||
| 494 | |||
| 495 | (1) vfs_get_single_super | ||
| 496 | |||
| 497 | Only one such superblock may exist in the system. Any further | ||
| 498 | attempt to get a new superblock gets this one (and any parameter | ||
| 499 | differences are ignored). | ||
| 500 | |||
| 501 | (2) vfs_get_keyed_super | ||
| 502 | |||
| 503 | Multiple superblocks of this type may exist and they're keyed on | ||
| 504 | their s_fs_info pointer (for example this may refer to a | ||
| 505 | namespace). | ||
| 506 | |||
| 507 | (3) vfs_get_independent_super | ||
| 508 | |||
| 509 | Multiple independent superblocks of this type may exist. This | ||
| 510 | function never matches an existing one and always creates a new | ||
| 511 | one. | ||
| 446 | 512 | ||
| 447 | 513 | ||
| 448 | ===================== | 514 | ===================== |
| @@ -454,35 +520,22 @@ There's a core description struct that links everything together: | |||
| 454 | 520 | ||
| 455 | struct fs_parameter_description { | 521 | struct fs_parameter_description { |
| 456 | const char name[16]; | 522 | const char name[16]; |
| 457 | u8 nr_params; | ||
| 458 | u8 nr_alt_keys; | ||
| 459 | u8 nr_enums; | ||
| 460 | bool ignore_unknown; | ||
| 461 | bool no_source; | ||
| 462 | const char *const *keys; | ||
| 463 | const struct constant_table *alt_keys; | ||
| 464 | const struct fs_parameter_spec *specs; | 523 | const struct fs_parameter_spec *specs; |
| 465 | const struct fs_parameter_enum *enums; | 524 | const struct fs_parameter_enum *enums; |
| 466 | }; | 525 | }; |
| 467 | 526 | ||
| 468 | For example: | 527 | For example: |
| 469 | 528 | ||
| 470 | enum afs_param { | 529 | enum { |
| 471 | Opt_autocell, | 530 | Opt_autocell, |
| 472 | Opt_bar, | 531 | Opt_bar, |
| 473 | Opt_dyn, | 532 | Opt_dyn, |
| 474 | Opt_foo, | 533 | Opt_foo, |
| 475 | Opt_source, | 534 | Opt_source, |
| 476 | nr__afs_params | ||
| 477 | }; | 535 | }; |
| 478 | 536 | ||
| 479 | static const struct fs_parameter_description afs_fs_parameters = { | 537 | static const struct fs_parameter_description afs_fs_parameters = { |
| 480 | .name = "kAFS", | 538 | .name = "kAFS", |
| 481 | .nr_params = nr__afs_params, | ||
| 482 | .nr_alt_keys = ARRAY_SIZE(afs_param_alt_keys), | ||
| 483 | .nr_enums = ARRAY_SIZE(afs_param_enums), | ||
| 484 | .keys = afs_param_keys, | ||
| 485 | .alt_keys = afs_param_alt_keys, | ||
| 486 | .specs = afs_param_specs, | 539 | .specs = afs_param_specs, |
| 487 | .enums = afs_param_enums, | 540 | .enums = afs_param_enums, |
| 488 | }; | 541 | }; |
| @@ -494,28 +547,24 @@ The members are as follows: | |||
| 494 | The name to be used in error messages generated by the parse helper | 547 | The name to be used in error messages generated by the parse helper |
| 495 | functions. | 548 | functions. |
| 496 | 549 | ||
| 497 | (2) u8 nr_params; | 550 | (2) const struct fs_parameter_specification *specs; |
| 498 | |||
| 499 | The number of discrete parameter identifiers. This indicates the number | ||
| 500 | of elements in the ->types[] array and also limits the values that may be | ||
| 501 | used in the values that the ->keys[] array maps to. | ||
| 502 | |||
| 503 | It is expected that, for example, two parameters that are related, say | ||
| 504 | "acl" and "noacl" with have the same ID, but will be flagged to indicate | ||
| 505 | that one is the inverse of the other. The value can then be picked out | ||
| 506 | from the parse result. | ||
| 507 | 551 | ||
| 508 | (3) const struct fs_parameter_specification *specs; | 552 | Table of parameter specifications, terminated with a null entry, where the |
| 553 | entries are of type: | ||
| 509 | 554 | ||
| 510 | Table of parameter specifications, where the entries are of type: | 555 | struct fs_parameter_spec { |
| 511 | 556 | const char *name; | |
| 512 | struct fs_parameter_type { | 557 | u8 opt; |
| 513 | enum fs_parameter_spec type:8; | 558 | enum fs_parameter_type type:8; |
| 514 | u8 flags; | 559 | unsigned short flags; |
| 515 | }; | 560 | }; |
| 516 | 561 | ||
| 517 | and the parameter identifier is the index to the array. 'type' indicates | 562 | The 'name' field is a string to match exactly to the parameter key (no |
| 518 | the desired value type and must be one of: | 563 | wildcards, patterns and no case-independence) and 'opt' is the value that |
| 564 | will be returned by the fs_parser() function in the case of a successful | ||
| 565 | match. | ||
| 566 | |||
| 567 | The 'type' field indicates the desired value type and must be one of: | ||
| 519 | 568 | ||
| 520 | TYPE NAME EXPECTED VALUE RESULT IN | 569 | TYPE NAME EXPECTED VALUE RESULT IN |
| 521 | ======================= ======================= ===================== | 570 | ======================= ======================= ===================== |
| @@ -525,85 +574,65 @@ The members are as follows: | |||
| 525 | fs_param_is_u32_octal 32-bit octal int result->uint_32 | 574 | fs_param_is_u32_octal 32-bit octal int result->uint_32 |
| 526 | fs_param_is_u32_hex 32-bit hex int result->uint_32 | 575 | fs_param_is_u32_hex 32-bit hex int result->uint_32 |
| 527 | fs_param_is_s32 32-bit signed int result->int_32 | 576 | fs_param_is_s32 32-bit signed int result->int_32 |
| 577 | fs_param_is_u64 64-bit unsigned int result->uint_64 | ||
| 528 | fs_param_is_enum Enum value name result->uint_32 | 578 | fs_param_is_enum Enum value name result->uint_32 |
| 529 | fs_param_is_string Arbitrary string param->string | 579 | fs_param_is_string Arbitrary string param->string |
| 530 | fs_param_is_blob Binary blob param->blob | 580 | fs_param_is_blob Binary blob param->blob |
| 531 | fs_param_is_blockdev Blockdev path * Needs lookup | 581 | fs_param_is_blockdev Blockdev path * Needs lookup |
| 532 | fs_param_is_path Path * Needs lookup | 582 | fs_param_is_path Path * Needs lookup |
| 533 | fs_param_is_fd File descriptor param->file | 583 | fs_param_is_fd File descriptor result->int_32 |
| 534 | |||
| 535 | And each parameter can be qualified with 'flags': | ||
| 536 | |||
| 537 | fs_param_v_optional The value is optional | ||
| 538 | fs_param_neg_with_no If key name is prefixed with "no", it is false | ||
| 539 | fs_param_neg_with_empty If value is "", it is false | ||
| 540 | fs_param_deprecated The parameter is deprecated. | ||
| 541 | |||
| 542 | For example: | ||
| 543 | |||
| 544 | static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = { | ||
| 545 | [Opt_autocell] = { fs_param_is flag }, | ||
| 546 | [Opt_bar] = { fs_param_is_enum }, | ||
| 547 | [Opt_dyn] = { fs_param_is flag }, | ||
| 548 | [Opt_foo] = { fs_param_is_bool, fs_param_neg_with_no }, | ||
| 549 | [Opt_source] = { fs_param_is_string }, | ||
| 550 | }; | ||
| 551 | 584 | ||
| 552 | Note that if the value is of fs_param_is_bool type, fs_parse() will try | 585 | Note that if the value is of fs_param_is_bool type, fs_parse() will try |
| 553 | to match any string value against "0", "1", "no", "yes", "false", "true". | 586 | to match any string value against "0", "1", "no", "yes", "false", "true". |
| 554 | 587 | ||
| 555 | [!] NOTE that the table must be sorted according to primary key name so | 588 | Each parameter can also be qualified with 'flags': |
| 556 | that ->keys[] is also sorted. | ||
| 557 | |||
| 558 | (4) const char *const *keys; | ||
| 559 | |||
| 560 | Table of primary key names for the parameters. There must be one entry | ||
| 561 | per defined parameter. The table is optional if ->nr_params is 0. The | ||
| 562 | table is just an array of names e.g.: | ||
| 563 | 589 | ||
| 564 | static const char *const afs_param_keys[nr__afs_params] = { | 590 | fs_param_v_optional The value is optional |
| 565 | [Opt_autocell] = "autocell", | 591 | fs_param_neg_with_no result->negated set if key is prefixed with "no" |
| 566 | [Opt_bar] = "bar", | 592 | fs_param_neg_with_empty result->negated set if value is "" |
| 567 | [Opt_dyn] = "dyn", | 593 | fs_param_deprecated The parameter is deprecated. |
| 568 | [Opt_foo] = "foo", | ||
| 569 | [Opt_source] = "source", | ||
| 570 | }; | ||
| 571 | |||
| 572 | [!] NOTE that the table must be sorted such that the table can be searched | ||
| 573 | with bsearch() using strcmp(). This means that the Opt_* values must | ||
| 574 | correspond to the entries in this table. | ||
| 575 | |||
| 576 | (5) const struct constant_table *alt_keys; | ||
| 577 | u8 nr_alt_keys; | ||
| 578 | |||
| 579 | Table of additional key names and their mappings to parameter ID plus the | ||
| 580 | number of elements in the table. This is optional. The table is just an | ||
| 581 | array of { name, integer } pairs, e.g.: | ||
| 582 | 594 | ||
| 583 | static const struct constant_table afs_param_keys[] = { | 595 | These are wrapped with a number of convenience wrappers: |
| 584 | { "baz", Opt_bar }, | 596 | |
| 585 | { "dynamic", Opt_dyn }, | 597 | MACRO SPECIFIES |
| 598 | ======================= =============================================== | ||
| 599 | fsparam_flag() fs_param_is_flag | ||
| 600 | fsparam_flag_no() fs_param_is_flag, fs_param_neg_with_no | ||
| 601 | fsparam_bool() fs_param_is_bool | ||
| 602 | fsparam_u32() fs_param_is_u32 | ||
| 603 | fsparam_u32oct() fs_param_is_u32_octal | ||
| 604 | fsparam_u32hex() fs_param_is_u32_hex | ||
| 605 | fsparam_s32() fs_param_is_s32 | ||
| 606 | fsparam_u64() fs_param_is_u64 | ||
| 607 | fsparam_enum() fs_param_is_enum | ||
| 608 | fsparam_string() fs_param_is_string | ||
| 609 | fsparam_blob() fs_param_is_blob | ||
| 610 | fsparam_bdev() fs_param_is_blockdev | ||
| 611 | fsparam_path() fs_param_is_path | ||
| 612 | fsparam_fd() fs_param_is_fd | ||
| 613 | |||
| 614 | all of which take two arguments, name string and option number - for | ||
| 615 | example: | ||
| 616 | |||
| 617 | static const struct fs_parameter_spec afs_param_specs[] = { | ||
| 618 | fsparam_flag ("autocell", Opt_autocell), | ||
| 619 | fsparam_flag ("dyn", Opt_dyn), | ||
| 620 | fsparam_string ("source", Opt_source), | ||
| 621 | fsparam_flag_no ("foo", Opt_foo), | ||
| 622 | {} | ||
| 586 | }; | 623 | }; |
| 587 | 624 | ||
| 588 | [!] NOTE that the table must be sorted such that strcmp() can be used with | 625 | An addition macro, __fsparam() is provided that takes an additional pair |
| 589 | bsearch() to search the entries. | 626 | of arguments to specify the type and the flags for anything that doesn't |
| 590 | 627 | match one of the above macros. | |
| 591 | The parameter ID can also be fs_param_key_removed to indicate that a | ||
| 592 | deprecated parameter has been removed and that an error will be given. | ||
| 593 | This differs from fs_param_deprecated where the parameter may still have | ||
| 594 | an effect. | ||
| 595 | |||
| 596 | Further, the behaviour of the parameter may differ when an alternate name | ||
| 597 | is used (for instance with NFS, "v3", "v4.2", etc. are alternate names). | ||
| 598 | 628 | ||
| 599 | (6) const struct fs_parameter_enum *enums; | 629 | (6) const struct fs_parameter_enum *enums; |
| 600 | u8 nr_enums; | ||
| 601 | 630 | ||
| 602 | Table of enum value names to integer mappings and the number of elements | 631 | Table of enum value names to integer mappings, terminated with a null |
| 603 | stored therein. This is of type: | 632 | entry. This is of type: |
| 604 | 633 | ||
| 605 | struct fs_parameter_enum { | 634 | struct fs_parameter_enum { |
| 606 | u8 param_id; | 635 | u8 opt; |
| 607 | char name[14]; | 636 | char name[14]; |
| 608 | u8 value; | 637 | u8 value; |
| 609 | }; | 638 | }; |
| @@ -621,11 +650,6 @@ The members are as follows: | |||
| 621 | try to look the value up in the enum table and the result will be stored | 650 | try to look the value up in the enum table and the result will be stored |
| 622 | in the parse result. | 651 | in the parse result. |
| 623 | 652 | ||
| 624 | (7) bool no_source; | ||
| 625 | |||
| 626 | If this is set, fs_parse() will ignore any "source" parameter and not | ||
| 627 | pass it to the filesystem. | ||
| 628 | |||
| 629 | The parser should be pointed to by the parser pointer in the file_system_type | 653 | The parser should be pointed to by the parser pointer in the file_system_type |
| 630 | struct as this will provide validation on registration (if | 654 | struct as this will provide validation on registration (if |
| 631 | CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from | 655 | CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from |
| @@ -650,9 +674,8 @@ process the parameters it is given. | |||
| 650 | int value; | 674 | int value; |
| 651 | }; | 675 | }; |
| 652 | 676 | ||
| 653 | and it must be sorted such that it can be searched using bsearch() using | 677 | If a match is found, the corresponding value is returned. If a match |
| 654 | strcmp(). If a match is found, the corresponding value is returned. If a | 678 | isn't found, the not_found value is returned instead. |
| 655 | match isn't found, the not_found value is returned instead. | ||
| 656 | 679 | ||
| 657 | (*) bool validate_constant_table(const struct constant_table *tbl, | 680 | (*) bool validate_constant_table(const struct constant_table *tbl, |
| 658 | size_t tbl_size, | 681 | size_t tbl_size, |
| @@ -665,36 +688,36 @@ process the parameters it is given. | |||
| 665 | should just be set to lie inside the low-to-high range. | 688 | should just be set to lie inside the low-to-high range. |
| 666 | 689 | ||
| 667 | If all is good, true is returned. If the table is invalid, errors are | 690 | If all is good, true is returned. If the table is invalid, errors are |
| 668 | logged to dmesg, the stack is dumped and false is returned. | 691 | logged to dmesg and false is returned. |
| 692 | |||
| 693 | (*) bool fs_validate_description(const struct fs_parameter_description *desc); | ||
| 694 | |||
| 695 | This performs some validation checks on a parameter description. It | ||
| 696 | returns true if the description is good and false if it is not. It will | ||
| 697 | log errors to dmesg if validation fails. | ||
| 669 | 698 | ||
| 670 | (*) int fs_parse(struct fs_context *fc, | 699 | (*) int fs_parse(struct fs_context *fc, |
| 671 | const struct fs_param_parser *parser, | 700 | const struct fs_parameter_description *desc, |
| 672 | struct fs_parameter *param, | 701 | struct fs_parameter *param, |
| 673 | struct fs_param_parse_result *result); | 702 | struct fs_parse_result *result); |
| 674 | 703 | ||
| 675 | This is the main interpreter of parameters. It uses the parameter | 704 | This is the main interpreter of parameters. It uses the parameter |
| 676 | description (parser) to look up the name of the parameter to use and to | 705 | description to look up a parameter by key name and to convert that to an |
| 677 | convert that to a parameter ID (stored in result->key). | 706 | option number (which it returns). |
| 678 | 707 | ||
| 679 | If successful, and if the parameter type indicates the result is a | 708 | If successful, and if the parameter type indicates the result is a |
| 680 | boolean, integer or enum type, the value is converted by this function and | 709 | boolean, integer or enum type, the value is converted by this function and |
| 681 | the result stored in result->{boolean,int_32,uint_32}. | 710 | the result stored in result->{boolean,int_32,uint_32,uint_64}. |
| 682 | 711 | ||
| 683 | If a match isn't initially made, the key is prefixed with "no" and no | 712 | If a match isn't initially made, the key is prefixed with "no" and no |
| 684 | value is present then an attempt will be made to look up the key with the | 713 | value is present then an attempt will be made to look up the key with the |
| 685 | prefix removed. If this matches a parameter for which the type has flag | 714 | prefix removed. If this matches a parameter for which the type has flag |
| 686 | fs_param_neg_with_no set, then a match will be made and the value will be | 715 | fs_param_neg_with_no set, then a match will be made and result->negated |
| 687 | set to false/0/NULL. | 716 | will be set to true. |
| 688 | |||
| 689 | If the parameter is successfully matched and, optionally, parsed | ||
| 690 | correctly, 1 is returned. If the parameter isn't matched and | ||
| 691 | parser->ignore_unknown is set, then 0 is returned. Otherwise -EINVAL is | ||
| 692 | returned. | ||
| 693 | |||
| 694 | (*) bool fs_validate_description(const struct fs_parameter_description *desc); | ||
| 695 | 717 | ||
| 696 | This is validates the parameter description. It returns true if the | 718 | If the parameter isn't matched, -ENOPARAM will be returned; if the |
| 697 | description is good and false if it is not. | 719 | parameter is matched, but the value is erroneous, -EINVAL will be |
| 720 | returned; otherwise the parameter's option number will be returned. | ||
| 698 | 721 | ||
| 699 | (*) int fs_lookup_param(struct fs_context *fc, | 722 | (*) int fs_lookup_param(struct fs_context *fc, |
| 700 | struct fs_parameter *value, | 723 | struct fs_parameter *value, |
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index d1ee484a787d..ee9984f35868 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
| @@ -36,6 +36,7 @@ Supported adapters: | |||
| 36 | * Intel Cannon Lake (PCH) | 36 | * Intel Cannon Lake (PCH) |
| 37 | * Intel Cedar Fork (PCH) | 37 | * Intel Cedar Fork (PCH) |
| 38 | * Intel Ice Lake (PCH) | 38 | * Intel Ice Lake (PCH) |
| 39 | * Intel Comet Lake (PCH) | ||
| 39 | Datasheets: Publicly available at the Intel website | 40 | Datasheets: Publicly available at the Intel website |
| 40 | 41 | ||
| 41 | On Intel Patsburg and later chipsets, both the normal host SMBus controller | 42 | On Intel Patsburg and later chipsets, both the normal host SMBus controller |
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt index f79934225d8d..ca983328976b 100644 --- a/Documentation/lzo.txt +++ b/Documentation/lzo.txt | |||
| @@ -102,9 +102,11 @@ Byte sequences | |||
| 102 | dictionary which is empty, and that it will always be | 102 | dictionary which is empty, and that it will always be |
| 103 | invalid at this place. | 103 | invalid at this place. |
| 104 | 104 | ||
| 105 | 17 : bitstream version. If the first byte is 17, the next byte | 105 | 17 : bitstream version. If the first byte is 17, and compressed |
| 106 | gives the bitstream version (version 1 only). If the first byte | 106 | stream length is at least 5 bytes (length of shortest possible |
| 107 | is not 17, the bitstream version is 0. | 107 | versioned bitstream), the next byte gives the bitstream version |
| 108 | (version 1 only). | ||
| 109 | Otherwise, the bitstream version is 0. | ||
| 108 | 110 | ||
| 109 | 18..21 : copy 0..3 literals | 111 | 18..21 : copy 0..3 literals |
| 110 | state = (byte - 17) = 0..3 [ copy <state> literals ] | 112 | state = (byte - 17) = 0..3 [ copy <state> literals ] |
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst new file mode 100644 index 000000000000..b375ae2ec2c4 --- /dev/null +++ b/Documentation/networking/bpf_flow_dissector.rst | |||
| @@ -0,0 +1,126 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | ================== | ||
| 4 | BPF Flow Dissector | ||
| 5 | ================== | ||
| 6 | |||
| 7 | Overview | ||
| 8 | ======== | ||
| 9 | |||
| 10 | Flow dissector is a routine that parses metadata out of the packets. It's | ||
| 11 | used in the various places in the networking subsystem (RFS, flow hash, etc). | ||
| 12 | |||
| 13 | BPF flow dissector is an attempt to reimplement C-based flow dissector logic | ||
| 14 | in BPF to gain all the benefits of BPF verifier (namely, limits on the | ||
| 15 | number of instructions and tail calls). | ||
| 16 | |||
| 17 | API | ||
| 18 | === | ||
| 19 | |||
| 20 | BPF flow dissector programs operate on an ``__sk_buff``. However, only the | ||
| 21 | limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``. | ||
| 22 | ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input | ||
| 23 | and output arguments. | ||
| 24 | |||
| 25 | The inputs are: | ||
| 26 | * ``nhoff`` - initial offset of the networking header | ||
| 27 | * ``thoff`` - initial offset of the transport header, initialized to nhoff | ||
| 28 | * ``n_proto`` - L3 protocol type, parsed out of L2 header | ||
| 29 | |||
| 30 | Flow dissector BPF program should fill out the rest of the ``struct | ||
| 31 | bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be | ||
| 32 | also adjusted accordingly. | ||
| 33 | |||
| 34 | The return code of the BPF program is either BPF_OK to indicate successful | ||
| 35 | dissection, or BPF_DROP to indicate parsing error. | ||
| 36 | |||
| 37 | __sk_buff->data | ||
| 38 | =============== | ||
| 39 | |||
| 40 | In the VLAN-less case, this is what the initial state of the BPF flow | ||
| 41 | dissector looks like:: | ||
| 42 | |||
| 43 | +------+------+------------+-----------+ | ||
| 44 | | DMAC | SMAC | ETHER_TYPE | L3_HEADER | | ||
| 45 | +------+------+------------+-----------+ | ||
| 46 | ^ | ||
| 47 | | | ||
| 48 | +-- flow dissector starts here | ||
| 49 | |||
| 50 | |||
| 51 | .. code:: c | ||
| 52 | |||
| 53 | skb->data + flow_keys->nhoff point to the first byte of L3_HEADER | ||
| 54 | flow_keys->thoff = nhoff | ||
| 55 | flow_keys->n_proto = ETHER_TYPE | ||
| 56 | |||
| 57 | In case of VLAN, flow dissector can be called with the two different states. | ||
| 58 | |||
| 59 | Pre-VLAN parsing:: | ||
| 60 | |||
| 61 | +------+------+------+-----+-----------+-----------+ | ||
| 62 | | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER | | ||
| 63 | +------+------+------+-----+-----------+-----------+ | ||
| 64 | ^ | ||
| 65 | | | ||
| 66 | +-- flow dissector starts here | ||
| 67 | |||
| 68 | .. code:: c | ||
| 69 | |||
| 70 | skb->data + flow_keys->nhoff point the to first byte of TCI | ||
| 71 | flow_keys->thoff = nhoff | ||
| 72 | flow_keys->n_proto = TPID | ||
| 73 | |||
| 74 | Please note that TPID can be 802.1AD and, hence, BPF program would | ||
| 75 | have to parse VLAN information twice for double tagged packets. | ||
| 76 | |||
| 77 | |||
| 78 | Post-VLAN parsing:: | ||
| 79 | |||
| 80 | +------+------+------+-----+-----------+-----------+ | ||
| 81 | | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER | | ||
| 82 | +------+------+------+-----+-----------+-----------+ | ||
| 83 | ^ | ||
| 84 | | | ||
| 85 | +-- flow dissector starts here | ||
| 86 | |||
| 87 | .. code:: c | ||
| 88 | |||
| 89 | skb->data + flow_keys->nhoff point the to first byte of L3_HEADER | ||
| 90 | flow_keys->thoff = nhoff | ||
| 91 | flow_keys->n_proto = ETHER_TYPE | ||
| 92 | |||
| 93 | In this case VLAN information has been processed before the flow dissector | ||
| 94 | and BPF flow dissector is not required to handle it. | ||
| 95 | |||
| 96 | |||
| 97 | The takeaway here is as follows: BPF flow dissector program can be called with | ||
| 98 | the optional VLAN header and should gracefully handle both cases: when single | ||
| 99 | or double VLAN is present and when it is not present. The same program | ||
| 100 | can be called for both cases and would have to be written carefully to | ||
| 101 | handle both cases. | ||
| 102 | |||
| 103 | |||
| 104 | Reference Implementation | ||
| 105 | ======================== | ||
| 106 | |||
| 107 | See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference | ||
| 108 | implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]`` | ||
| 109 | for the loader. bpftool can be used to load BPF flow dissector program as well. | ||
| 110 | |||
| 111 | The reference implementation is organized as follows: | ||
| 112 | * ``jmp_table`` map that contains sub-programs for each supported L3 protocol | ||
| 113 | * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and | ||
| 114 | does ``bpf_tail_call`` to the appropriate L3 handler | ||
| 115 | |||
| 116 | Since BPF at this point doesn't support looping (or any jumping back), | ||
| 117 | jmp_table is used instead to handle multiple levels of encapsulation (and | ||
| 118 | IPv6 options). | ||
| 119 | |||
| 120 | |||
| 121 | Current Limitations | ||
| 122 | =================== | ||
| 123 | BPF flow dissector doesn't support exporting all the metadata that in-kernel | ||
| 124 | C-based implementation can export. Notable example is single VLAN (802.1Q) | ||
| 125 | and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys`` | ||
| 126 | for a set of information that's currently can be exported from the BPF context. | ||
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 5449149be496..984e68f9e026 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst | |||
| @@ -9,6 +9,7 @@ Contents: | |||
| 9 | netdev-FAQ | 9 | netdev-FAQ |
| 10 | af_xdp | 10 | af_xdp |
| 11 | batman-adv | 11 | batman-adv |
| 12 | bpf_flow_dissector | ||
| 12 | can | 13 | can |
| 13 | can_ucan_protocol | 14 | can_ucan_protocol |
| 14 | device_drivers/freescale/dpaa2/index | 15 | device_drivers/freescale/dpaa2/index |
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index 18c1415e7bfa..ace56204dd03 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst | |||
| @@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code. | |||
| 50 | 50 | ||
| 51 | patchset | 51 | patchset |
| 52 | [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY | 52 | [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY |
| 53 | http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com | 53 | https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com |
| 54 | 54 | ||
| 55 | 55 | ||
| 56 | Interface | 56 | Interface |
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst index 0ac5fa77f501..8c7a713cf657 100644 --- a/Documentation/networking/netdev-FAQ.rst +++ b/Documentation/networking/netdev-FAQ.rst | |||
| @@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current | |||
| 131 | version that should be applied. If there is any doubt, the maintainer | 131 | version that should be applied. If there is any doubt, the maintainer |
| 132 | will reply and ask what should be done. | 132 | will reply and ask what should be done. |
| 133 | 133 | ||
| 134 | Q: I made changes to only a few patches in a patch series should I resend only those changed? | ||
| 135 | -------------------------------------------------------------------------------------------- | ||
| 136 | A: No, please resend the entire patch series and make sure you do number your | ||
| 137 | patches such that it is clear this is the latest and greatest set of patches | ||
| 138 | that can be applied. | ||
| 139 | |||
| 140 | Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do? | ||
| 141 | ------------------------------------------------------------------------------------------------------------------------------------------- | ||
| 142 | A: There is no revert possible, once it is pushed out, it stays like that. | ||
| 143 | Please send incremental versions on top of what has been merged in order to fix | ||
| 144 | the patches the way they would look like if your latest patch series was to be | ||
| 145 | merged. | ||
| 146 | |||
| 134 | Q: How can I tell what patches are queued up for backporting to the various stable releases? | 147 | Q: How can I tell what patches are queued up for backporting to the various stable releases? |
| 135 | -------------------------------------------------------------------------------------------- | 148 | -------------------------------------------------------------------------------------------- |
| 136 | A: Normally Greg Kroah-Hartman collects stable commits himself, but for | 149 | A: Normally Greg Kroah-Hartman collects stable commits himself, but for |
diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt index 54128c50d508..ca2136c76042 100644 --- a/Documentation/networking/nf_flowtable.txt +++ b/Documentation/networking/nf_flowtable.txt | |||
| @@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass. | |||
| 44 | / \ / \ |Routing | / \ | 44 | / \ / \ |Routing | / \ |
| 45 | --> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit | 45 | --> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit |
| 46 | \_________/ \__________/ ---------- \____________/ ^ | 46 | \_________/ \__________/ ---------- \____________/ ^ |
| 47 | | ^ | | ^ | | 47 | | ^ | ^ | |
| 48 | flowtable | | ____\/___ | | | 48 | flowtable | ____\/___ | | |
| 49 | | | | / \ | | | 49 | | | / \ | | |
| 50 | __\/___ | --------->| forward |------------ | | 50 | __\/___ | | forward |------------ | |
| 51 | |-----| | \_________/ | | 51 | |-----| | \_________/ | |
| 52 | |-----| | 'flow offload' rule | | 52 | |-----| | 'flow offload' rule | |
| 53 | |-----| | adds entry to | | 53 | |-----| | adds entry to | |
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst index 52b026be028f..38a4edc4522b 100644 --- a/Documentation/networking/snmp_counter.rst +++ b/Documentation/networking/snmp_counter.rst | |||
| @@ -413,7 +413,7 @@ algorithm. | |||
| 413 | .. _F-RTO: https://tools.ietf.org/html/rfc5682 | 413 | .. _F-RTO: https://tools.ietf.org/html/rfc5682 |
| 414 | 414 | ||
| 415 | TCP Fast Path | 415 | TCP Fast Path |
| 416 | ============ | 416 | ============= |
| 417 | When kernel receives a TCP packet, it has two paths to handler the | 417 | When kernel receives a TCP packet, it has two paths to handler the |
| 418 | packet, one is fast path, another is slow path. The comment in kernel | 418 | packet, one is fast path, another is slow path. The comment in kernel |
| 419 | code provides a good explanation of them, I pasted them below:: | 419 | code provides a good explanation of them, I pasted them below:: |
| @@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a | |||
| 681 | DSACK to the sender. | 681 | DSACK to the sender. |
| 682 | 682 | ||
| 683 | * TcpExtTCPDSACKRecv | 683 | * TcpExtTCPDSACKRecv |
| 684 | |||
| 684 | The TCP stack receives a DSACK, which indicates an acknowledged | 685 | The TCP stack receives a DSACK, which indicates an acknowledged |
| 685 | duplicate packet is received. | 686 | duplicate packet is received. |
| 686 | 687 | ||
| @@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order | |||
| 690 | duplicate packet is received. | 691 | duplicate packet is received. |
| 691 | 692 | ||
| 692 | invalid SACK and DSACK | 693 | invalid SACK and DSACK |
| 693 | ==================== | 694 | ====================== |
| 694 | When a SACK (or DSACK) block is invalid, a corresponding counter would | 695 | When a SACK (or DSACK) block is invalid, a corresponding counter would |
| 695 | be updated. The validation method is base on the start/end sequence | 696 | be updated. The validation method is base on the start/end sequence |
| 696 | number of the SACK block. For more details, please refer the comment | 697 | number of the SACK block. For more details, please refer the comment |
| @@ -704,11 +705,13 @@ explaination: | |||
| 704 | .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 | 705 | .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 |
| 705 | 706 | ||
| 706 | * TcpExtTCPSACKDiscard | 707 | * TcpExtTCPSACKDiscard |
| 708 | |||
| 707 | This counter indicates how many SACK blocks are invalid. If the invalid | 709 | This counter indicates how many SACK blocks are invalid. If the invalid |
| 708 | SACK block is caused by ACK recording, the TCP stack will only ignore | 710 | SACK block is caused by ACK recording, the TCP stack will only ignore |
| 709 | it and won't update this counter. | 711 | it and won't update this counter. |
| 710 | 712 | ||
| 711 | * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo | 713 | * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo |
| 714 | |||
| 712 | When a DSACK block is invalid, one of these two counters would be | 715 | When a DSACK block is invalid, one of these two counters would be |
| 713 | updated. Which counter will be updated depends on the undo_marker flag | 716 | updated. Which counter will be updated depends on the undo_marker flag |
| 714 | of the TCP socket. If the undo_marker is not set, the TCP stack isn't | 717 | of the TCP socket. If the undo_marker is not set, the TCP stack isn't |
| @@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld | |||
| 719 | will be updated. As implied in its name, it might be an old packet. | 722 | will be updated. As implied in its name, it might be an old packet. |
| 720 | 723 | ||
| 721 | SACK shift | 724 | SACK shift |
| 722 | ========= | 725 | ========== |
| 723 | The linux networking stack stores data in sk_buff struct (skb for | 726 | The linux networking stack stores data in sk_buff struct (skb for |
| 724 | short). If a SACK block acrosses multiple skb, the TCP stack will try | 727 | short). If a SACK block acrosses multiple skb, the TCP stack will try |
| 725 | to re-arrange data in these skb. E.g. if a SACK block acknowledges seq | 728 | to re-arrange data in these skb. E.g. if a SACK block acknowledges seq |
| @@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be | |||
| 730 | discard, this operation is 'merge'. | 733 | discard, this operation is 'merge'. |
| 731 | 734 | ||
| 732 | * TcpExtTCPSackShifted | 735 | * TcpExtTCPSackShifted |
| 736 | |||
| 733 | A skb is shifted | 737 | A skb is shifted |
| 734 | 738 | ||
| 735 | * TcpExtTCPSackMerged | 739 | * TcpExtTCPSackMerged |
| 740 | |||
| 736 | A skb is merged | 741 | A skb is merged |
| 737 | 742 | ||
| 738 | * TcpExtTCPSackShiftFallback | 743 | * TcpExtTCPSackShiftFallback |
| 744 | |||
| 739 | A skb should be shifted or merged, but the TCP stack doesn't do it for | 745 | A skb should be shifted or merged, but the TCP stack doesn't do it for |
| 740 | some reasons. | 746 | some reasons. |
| 741 | 747 | ||
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 7de9eee73fcd..67068c47c591 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
| @@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation | |||
| 5 | ---------------------- | 5 | ---------------------- |
| 6 | 6 | ||
| 7 | The kvm API is a set of ioctls that are issued to control various aspects | 7 | The kvm API is a set of ioctls that are issued to control various aspects |
| 8 | of a virtual machine. The ioctls belong to three classes | 8 | of a virtual machine. The ioctls belong to three classes: |
| 9 | 9 | ||
| 10 | - System ioctls: These query and set global attributes which affect the | 10 | - System ioctls: These query and set global attributes which affect the |
| 11 | whole kvm subsystem. In addition a system ioctl is used to create | 11 | whole kvm subsystem. In addition a system ioctl is used to create |
| 12 | virtual machines | 12 | virtual machines. |
| 13 | 13 | ||
| 14 | - VM ioctls: These query and set attributes that affect an entire virtual | 14 | - VM ioctls: These query and set attributes that affect an entire virtual |
| 15 | machine, for example memory layout. In addition a VM ioctl is used to | 15 | machine, for example memory layout. In addition a VM ioctl is used to |
| 16 | create virtual cpus (vcpus). | 16 | create virtual cpus (vcpus) and devices. |
| 17 | 17 | ||
| 18 | Only run VM ioctls from the same process (address space) that was used | 18 | VM ioctls must be issued from the same process (address space) that was |
| 19 | to create the VM. | 19 | used to create the VM. |
| 20 | 20 | ||
| 21 | - vcpu ioctls: These query and set attributes that control the operation | 21 | - vcpu ioctls: These query and set attributes that control the operation |
| 22 | of a single virtual cpu. | 22 | of a single virtual cpu. |
| 23 | 23 | ||
| 24 | Only run vcpu ioctls from the same thread that was used to create the | 24 | vcpu ioctls should be issued from the same thread that was used to create |
| 25 | vcpu. | 25 | the vcpu, except for asynchronous vcpu ioctl that are marked as such in |
| 26 | the documentation. Otherwise, the first ioctl after switching threads | ||
| 27 | could see a performance impact. | ||
| 26 | 28 | ||
| 29 | - device ioctls: These query and set attributes that control the operation | ||
| 30 | of a single device. | ||
| 31 | |||
| 32 | device ioctls must be issued from the same process (address space) that | ||
| 33 | was used to create the VM. | ||
| 27 | 34 | ||
| 28 | 2. File descriptors | 35 | 2. File descriptors |
| 29 | ------------------- | 36 | ------------------- |
| @@ -32,17 +39,34 @@ The kvm API is centered around file descriptors. An initial | |||
| 32 | open("/dev/kvm") obtains a handle to the kvm subsystem; this handle | 39 | open("/dev/kvm") obtains a handle to the kvm subsystem; this handle |
| 33 | can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this | 40 | can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this |
| 34 | handle will create a VM file descriptor which can be used to issue VM | 41 | handle will create a VM file descriptor which can be used to issue VM |
| 35 | ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu | 42 | ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will |
| 36 | and return a file descriptor pointing to it. Finally, ioctls on a vcpu | 43 | create a virtual cpu or device and return a file descriptor pointing to |
| 37 | fd can be used to control the vcpu, including the important task of | 44 | the new resource. Finally, ioctls on a vcpu or device fd can be used |
| 38 | actually running guest code. | 45 | to control the vcpu or device. For vcpus, this includes the important |
| 46 | task of actually running guest code. | ||
| 39 | 47 | ||
| 40 | In general file descriptors can be migrated among processes by means | 48 | In general file descriptors can be migrated among processes by means |
| 41 | of fork() and the SCM_RIGHTS facility of unix domain socket. These | 49 | of fork() and the SCM_RIGHTS facility of unix domain socket. These |
| 42 | kinds of tricks are explicitly not supported by kvm. While they will | 50 | kinds of tricks are explicitly not supported by kvm. While they will |
| 43 | not cause harm to the host, their actual behavior is not guaranteed by | 51 | not cause harm to the host, their actual behavior is not guaranteed by |
| 44 | the API. The only supported use is one virtual machine per process, | 52 | the API. See "General description" for details on the ioctl usage |
| 45 | and one vcpu per thread. | 53 | model that is supported by KVM. |
| 54 | |||
| 55 | It is important to note that althought VM ioctls may only be issued from | ||
| 56 | the process that created the VM, a VM's lifecycle is associated with its | ||
| 57 | file descriptor, not its creator (process). In other words, the VM and | ||
| 58 | its resources, *including the associated address space*, are not freed | ||
| 59 | until the last reference to the VM's file descriptor has been released. | ||
| 60 | For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will | ||
| 61 | not be freed until both the parent (original) process and its child have | ||
| 62 | put their references to the VM's file descriptor. | ||
| 63 | |||
| 64 | Because a VM's resources are not freed until the last reference to its | ||
| 65 | file descriptor is released, creating additional references to a VM via | ||
| 66 | via fork(), dup(), etc... without careful consideration is strongly | ||
| 67 | discouraged and may have unwanted side effects, e.g. memory allocated | ||
| 68 | by and on behalf of the VM's process may not be freed/unaccounted when | ||
| 69 | the VM is shut down. | ||
| 46 | 70 | ||
| 47 | 71 | ||
| 48 | It is important to note that althought VM ioctls may only be issued from | 72 | It is important to note that althought VM ioctls may only be issued from |
| @@ -515,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL | |||
| 515 | Note that any value for 'irq' other than the ones stated above is invalid | 539 | Note that any value for 'irq' other than the ones stated above is invalid |
| 516 | and incurs unexpected behavior. | 540 | and incurs unexpected behavior. |
| 517 | 541 | ||
| 542 | This is an asynchronous vcpu ioctl and can be invoked from any thread. | ||
| 543 | |||
| 518 | MIPS: | 544 | MIPS: |
| 519 | 545 | ||
| 520 | Queues an external interrupt to be injected into the virtual CPU. A negative | 546 | Queues an external interrupt to be injected into the virtual CPU. A negative |
| 521 | interrupt number dequeues the interrupt. | 547 | interrupt number dequeues the interrupt. |
| 522 | 548 | ||
| 549 | This is an asynchronous vcpu ioctl and can be invoked from any thread. | ||
| 550 | |||
| 523 | 551 | ||
| 524 | 4.17 KVM_DEBUG_GUEST | 552 | 4.17 KVM_DEBUG_GUEST |
| 525 | 553 | ||
| @@ -1086,14 +1114,12 @@ struct kvm_userspace_memory_region { | |||
| 1086 | #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) | 1114 | #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) |
| 1087 | #define KVM_MEM_READONLY (1UL << 1) | 1115 | #define KVM_MEM_READONLY (1UL << 1) |
| 1088 | 1116 | ||
| 1089 | This ioctl allows the user to create or modify a guest physical memory | 1117 | This ioctl allows the user to create, modify or delete a guest physical |
| 1090 | slot. When changing an existing slot, it may be moved in the guest | 1118 | memory slot. Bits 0-15 of "slot" specify the slot id and this value |
| 1091 | physical memory space, or its flags may be modified. It may not be | 1119 | should be less than the maximum number of user memory slots supported per |
| 1092 | resized. Slots may not overlap in guest physical address space. | 1120 | VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS, |
| 1093 | Bits 0-15 of "slot" specifies the slot id and this value should be | 1121 | if this capability is supported by the architecture. Slots may not |
| 1094 | less than the maximum number of user memory slots supported per VM. | 1122 | overlap in guest physical address space. |
| 1095 | The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS, | ||
| 1096 | if this capability is supported by the architecture. | ||
| 1097 | 1123 | ||
| 1098 | If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" | 1124 | If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" |
| 1099 | specifies the address space which is being modified. They must be | 1125 | specifies the address space which is being modified. They must be |
| @@ -1102,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces | |||
| 1102 | are unrelated; the restriction on overlapping slots only applies within | 1128 | are unrelated; the restriction on overlapping slots only applies within |
| 1103 | each address space. | 1129 | each address space. |
| 1104 | 1130 | ||
| 1131 | Deleting a slot is done by passing zero for memory_size. When changing | ||
| 1132 | an existing slot, it may be moved in the guest physical memory space, | ||
| 1133 | or its flags may be modified, but it may not be resized. | ||
| 1134 | |||
| 1105 | Memory for the region is taken starting at the address denoted by the | 1135 | Memory for the region is taken starting at the address denoted by the |
| 1106 | field userspace_addr, which must point at user addressable memory for | 1136 | field userspace_addr, which must point at user addressable memory for |
| 1107 | the entire memory slot size. Any object may back this memory, including | 1137 | the entire memory slot size. Any object may back this memory, including |
| @@ -2493,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm, | |||
| 2493 | machine checks needing further payload are not | 2523 | machine checks needing further payload are not |
| 2494 | supported by this ioctl) | 2524 | supported by this ioctl) |
| 2495 | 2525 | ||
| 2496 | Note that the vcpu ioctl is asynchronous to vcpu execution. | 2526 | This is an asynchronous vcpu ioctl and can be invoked from any thread. |
| 2497 | 2527 | ||
| 2498 | 4.78 KVM_PPC_GET_HTAB_FD | 2528 | 4.78 KVM_PPC_GET_HTAB_FD |
| 2499 | 2529 | ||
| @@ -3042,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg | |||
| 3042 | KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall | 3072 | KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall |
| 3043 | KVM_S390_MCHK - machine check interrupt; parameters in .mchk | 3073 | KVM_S390_MCHK - machine check interrupt; parameters in .mchk |
| 3044 | 3074 | ||
| 3045 | 3075 | This is an asynchronous vcpu ioctl and can be invoked from any thread. | |
| 3046 | Note that the vcpu ioctl is asynchronous to vcpu execution. | ||
| 3047 | 3076 | ||
| 3048 | 4.94 KVM_S390_GET_IRQ_STATE | 3077 | 4.94 KVM_S390_GET_IRQ_STATE |
| 3049 | 3078 | ||
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index f365102c80f5..2efe0efc516e 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt | |||
| @@ -142,7 +142,7 @@ Shadow pages contain the following information: | |||
| 142 | If clear, this page corresponds to a guest page table denoted by the gfn | 142 | If clear, this page corresponds to a guest page table denoted by the gfn |
| 143 | field. | 143 | field. |
| 144 | role.quadrant: | 144 | role.quadrant: |
| 145 | When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit | 145 | When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit |
| 146 | sptes. That means a guest page table contains more ptes than the host, | 146 | sptes. That means a guest page table contains more ptes than the host, |
| 147 | so multiple shadow pages are needed to shadow one guest page. | 147 | so multiple shadow pages are needed to shadow one guest page. |
| 148 | For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the | 148 | For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the |
| @@ -158,9 +158,9 @@ Shadow pages contain the following information: | |||
| 158 | The page is invalid and should not be used. It is a root page that is | 158 | The page is invalid and should not be used. It is a root page that is |
| 159 | currently pinned (by a cpu hardware register pointing to it); once it is | 159 | currently pinned (by a cpu hardware register pointing to it); once it is |
| 160 | unpinned it will be destroyed. | 160 | unpinned it will be destroyed. |
| 161 | role.cr4_pae: | 161 | role.gpte_is_8_bytes: |
| 162 | Contains the value of cr4.pae for which the page is valid (e.g. whether | 162 | Reflects the size of the guest PTE for which the page is valid, i.e. '1' |
| 163 | 32-bit or 64-bit gptes are in use). | 163 | if 64-bit gptes are in use, '0' if 32-bit gptes are in use. |
| 164 | role.nxe: | 164 | role.nxe: |
| 165 | Contains the value of efer.nxe for which the page is valid. | 165 | Contains the value of efer.nxe for which the page is valid. |
| 166 | role.cr0_wp: | 166 | role.cr0_wp: |
| @@ -173,6 +173,9 @@ Shadow pages contain the following information: | |||
| 173 | Contains the value of cr4.smap && !cr0.wp for which the page is valid | 173 | Contains the value of cr4.smap && !cr0.wp for which the page is valid |
| 174 | (pages for which this is true are different from other pages; see the | 174 | (pages for which this is true are different from other pages; see the |
| 175 | treatment of cr0.wp=0 below). | 175 | treatment of cr0.wp=0 below). |
| 176 | role.ept_sp: | ||
| 177 | This is a virtual flag to denote a shadowed nested EPT page. ept_sp | ||
| 178 | is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination. | ||
| 176 | role.smm: | 179 | role.smm: |
| 177 | Is 1 if the page is valid in system management mode. This field | 180 | Is 1 if the page is valid in system management mode. This field |
| 178 | determines which of the kvm_memslots array was used to build this | 181 | determines which of the kvm_memslots array was used to build this |
diff --git a/MAINTAINERS b/MAINTAINERS index 3e5a5d263f29..27b0de13506c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1893,14 +1893,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git | |||
| 1893 | ARM/NUVOTON NPCM ARCHITECTURE | 1893 | ARM/NUVOTON NPCM ARCHITECTURE |
| 1894 | M: Avi Fishman <avifishman70@gmail.com> | 1894 | M: Avi Fishman <avifishman70@gmail.com> |
| 1895 | M: Tomer Maimon <tmaimon77@gmail.com> | 1895 | M: Tomer Maimon <tmaimon77@gmail.com> |
| 1896 | M: Tali Perry <tali.perry1@gmail.com> | ||
| 1896 | R: Patrick Venture <venture@google.com> | 1897 | R: Patrick Venture <venture@google.com> |
| 1897 | R: Nancy Yuen <yuenn@google.com> | 1898 | R: Nancy Yuen <yuenn@google.com> |
| 1898 | R: Brendan Higgins <brendanhiggins@google.com> | 1899 | R: Benjamin Fair <benjaminfair@google.com> |
| 1899 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) | 1900 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) |
| 1900 | S: Supported | 1901 | S: Supported |
| 1901 | F: arch/arm/mach-npcm/ | 1902 | F: arch/arm/mach-npcm/ |
| 1902 | F: arch/arm/boot/dts/nuvoton-npcm* | 1903 | F: arch/arm/boot/dts/nuvoton-npcm* |
| 1903 | F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h | 1904 | F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h |
| 1904 | F: drivers/*/*npcm* | 1905 | F: drivers/*/*npcm* |
| 1905 | F: Documentation/devicetree/bindings/*/*npcm* | 1906 | F: Documentation/devicetree/bindings/*/*npcm* |
| 1906 | F: Documentation/devicetree/bindings/*/*/*npcm* | 1907 | F: Documentation/devicetree/bindings/*/*/*npcm* |
| @@ -2356,7 +2357,7 @@ F: arch/arm/mm/cache-uniphier.c | |||
| 2356 | F: arch/arm64/boot/dts/socionext/uniphier* | 2357 | F: arch/arm64/boot/dts/socionext/uniphier* |
| 2357 | F: drivers/bus/uniphier-system-bus.c | 2358 | F: drivers/bus/uniphier-system-bus.c |
| 2358 | F: drivers/clk/uniphier/ | 2359 | F: drivers/clk/uniphier/ |
| 2359 | F: drivers/dmaengine/uniphier-mdmac.c | 2360 | F: drivers/dma/uniphier-mdmac.c |
| 2360 | F: drivers/gpio/gpio-uniphier.c | 2361 | F: drivers/gpio/gpio-uniphier.c |
| 2361 | F: drivers/i2c/busses/i2c-uniphier* | 2362 | F: drivers/i2c/busses/i2c-uniphier* |
| 2362 | F: drivers/irqchip/irq-uniphier-aidet.c | 2363 | F: drivers/irqchip/irq-uniphier-aidet.c |
| @@ -4129,7 +4130,7 @@ F: drivers/cpuidle/* | |||
| 4129 | F: include/linux/cpuidle.h | 4130 | F: include/linux/cpuidle.h |
| 4130 | 4131 | ||
| 4131 | CRAMFS FILESYSTEM | 4132 | CRAMFS FILESYSTEM |
| 4132 | M: Nicolas Pitre <nico@linaro.org> | 4133 | M: Nicolas Pitre <nico@fluxnic.net> |
| 4133 | S: Maintained | 4134 | S: Maintained |
| 4134 | F: Documentation/filesystems/cramfs.txt | 4135 | F: Documentation/filesystems/cramfs.txt |
| 4135 | F: fs/cramfs/ | 4136 | F: fs/cramfs/ |
| @@ -5833,7 +5834,7 @@ L: netdev@vger.kernel.org | |||
| 5833 | S: Maintained | 5834 | S: Maintained |
| 5834 | F: Documentation/ABI/testing/sysfs-bus-mdio | 5835 | F: Documentation/ABI/testing/sysfs-bus-mdio |
| 5835 | F: Documentation/devicetree/bindings/net/mdio* | 5836 | F: Documentation/devicetree/bindings/net/mdio* |
| 5836 | F: Documentation/networking/phy.txt | 5837 | F: Documentation/networking/phy.rst |
| 5837 | F: drivers/net/phy/ | 5838 | F: drivers/net/phy/ |
| 5838 | F: drivers/of/of_mdio.c | 5839 | F: drivers/of/of_mdio.c |
| 5839 | F: drivers/of/of_net.c | 5840 | F: drivers/of/of_net.c |
| @@ -6408,7 +6409,6 @@ L: linux-kernel@vger.kernel.org | |||
| 6408 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core | 6409 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core |
| 6409 | S: Maintained | 6410 | S: Maintained |
| 6410 | F: kernel/futex.c | 6411 | F: kernel/futex.c |
| 6411 | F: kernel/futex_compat.c | ||
| 6412 | F: include/asm-generic/futex.h | 6412 | F: include/asm-generic/futex.h |
| 6413 | F: include/linux/futex.h | 6413 | F: include/linux/futex.h |
| 6414 | F: include/uapi/linux/futex.h | 6414 | F: include/uapi/linux/futex.h |
| @@ -7516,7 +7516,7 @@ F: include/net/mac802154.h | |||
| 7516 | F: include/net/af_ieee802154.h | 7516 | F: include/net/af_ieee802154.h |
| 7517 | F: include/net/cfg802154.h | 7517 | F: include/net/cfg802154.h |
| 7518 | F: include/net/ieee802154_netdev.h | 7518 | F: include/net/ieee802154_netdev.h |
| 7519 | F: Documentation/networking/ieee802154.txt | 7519 | F: Documentation/networking/ieee802154.rst |
| 7520 | 7520 | ||
| 7521 | IFE PROTOCOL | 7521 | IFE PROTOCOL |
| 7522 | M: Yotam Gigi <yotam.gi@gmail.com> | 7522 | M: Yotam Gigi <yotam.gi@gmail.com> |
| @@ -13982,7 +13982,7 @@ F: drivers/media/rc/serial_ir.c | |||
| 13982 | SFC NETWORK DRIVER | 13982 | SFC NETWORK DRIVER |
| 13983 | M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> | 13983 | M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> |
| 13984 | M: Edward Cree <ecree@solarflare.com> | 13984 | M: Edward Cree <ecree@solarflare.com> |
| 13985 | M: Bert Kenward <bkenward@solarflare.com> | 13985 | M: Martin Habets <mhabets@solarflare.com> |
| 13986 | L: netdev@vger.kernel.org | 13986 | L: netdev@vger.kernel.org |
| 13987 | S: Supported | 13987 | S: Supported |
| 13988 | F: drivers/net/ethernet/sfc/ | 13988 | F: drivers/net/ethernet/sfc/ |
| @@ -2,7 +2,7 @@ | |||
| 2 | VERSION = 5 | 2 | VERSION = 5 |
| 3 | PATCHLEVEL = 1 | 3 | PATCHLEVEL = 1 |
| 4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
| 5 | EXTRAVERSION = -rc2 | 5 | EXTRAVERSION = -rc4 |
| 6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
| 7 | 7 | ||
| 8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
| @@ -31,26 +31,12 @@ _all: | |||
| 31 | # descending is started. They are now explicitly listed as the | 31 | # descending is started. They are now explicitly listed as the |
| 32 | # prepare rule. | 32 | # prepare rule. |
| 33 | 33 | ||
| 34 | # Ugly workaround for Debian make-kpkg: | 34 | ifneq ($(sub_make_done),1) |
| 35 | # make-kpkg directly includes the top Makefile of Linux kernel. In such a case, | ||
| 36 | # skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but | ||
| 37 | # displays warning to discourage such abusage. | ||
| 38 | ifneq ($(word 2, $(MAKEFILE_LIST)),) | ||
| 39 | $(warning Do not include top Makefile of Linux Kernel) | ||
| 40 | sub-make-done := 1 | ||
| 41 | MAKEFLAGS += -rR | ||
| 42 | endif | ||
| 43 | |||
| 44 | ifneq ($(sub-make-done),1) | ||
| 45 | 35 | ||
| 46 | # Do not use make's built-in rules and variables | 36 | # Do not use make's built-in rules and variables |
| 47 | # (this increases performance and avoids hard-to-debug behaviour) | 37 | # (this increases performance and avoids hard-to-debug behaviour) |
| 48 | MAKEFLAGS += -rR | 38 | MAKEFLAGS += -rR |
| 49 | 39 | ||
| 50 | # 'MAKEFLAGS += -rR' does not become immediately effective for old | ||
| 51 | # GNU Make versions. Cancel implicit rules for this Makefile. | ||
| 52 | $(lastword $(MAKEFILE_LIST)): ; | ||
| 53 | |||
| 54 | # Avoid funny character set dependencies | 40 | # Avoid funny character set dependencies |
| 55 | unexport LC_ALL | 41 | unexport LC_ALL |
| 56 | LC_COLLATE=C | 42 | LC_COLLATE=C |
| @@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \ | |||
| 153 | # 'sub-make' below. | 139 | # 'sub-make' below. |
| 154 | MAKEFLAGS += --include-dir=$(CURDIR) | 140 | MAKEFLAGS += --include-dir=$(CURDIR) |
| 155 | 141 | ||
| 142 | need-sub-make := 1 | ||
| 156 | else | 143 | else |
| 157 | 144 | ||
| 158 | # Do not print "Entering directory ..." at all for in-tree build. | 145 | # Do not print "Entering directory ..." at all for in-tree build. |
| @@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory | |||
| 160 | 147 | ||
| 161 | endif # ifneq ($(KBUILD_OUTPUT),) | 148 | endif # ifneq ($(KBUILD_OUTPUT),) |
| 162 | 149 | ||
| 150 | ifneq ($(filter 3.%,$(MAKE_VERSION)),) | ||
| 151 | # 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x | ||
| 152 | # We need to invoke sub-make to avoid implicit rules in the top Makefile. | ||
| 153 | need-sub-make := 1 | ||
| 154 | # Cancel implicit rules for this Makefile. | ||
| 155 | $(lastword $(MAKEFILE_LIST)): ; | ||
| 156 | endif | ||
| 157 | |||
| 158 | export sub_make_done := 1 | ||
| 159 | |||
| 160 | ifeq ($(need-sub-make),1) | ||
| 161 | |||
| 163 | PHONY += $(MAKECMDGOALS) sub-make | 162 | PHONY += $(MAKECMDGOALS) sub-make |
| 164 | 163 | ||
| 165 | $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make | 164 | $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make |
| @@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make | |||
| 167 | 166 | ||
| 168 | # Invoke a second make in the output directory, passing relevant variables | 167 | # Invoke a second make in the output directory, passing relevant variables |
| 169 | sub-make: | 168 | sub-make: |
| 170 | $(Q)$(MAKE) sub-make-done=1 \ | 169 | $(Q)$(MAKE) \ |
| 171 | $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ | 170 | $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ |
| 172 | -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) | 171 | -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) |
| 173 | 172 | ||
| 174 | else # sub-make-done | 173 | endif # need-sub-make |
| 174 | endif # sub_make_done | ||
| 175 | |||
| 175 | # We process the rest of the Makefile if this is the final invocation of make | 176 | # We process the rest of the Makefile if this is the final invocation of make |
| 177 | ifeq ($(need-sub-make),) | ||
| 176 | 178 | ||
| 177 | # Do not print "Entering directory ...", | 179 | # Do not print "Entering directory ...", |
| 178 | # but we want to display it when entering to the output directory | 180 | # but we want to display it when entering to the output directory |
| @@ -497,7 +499,8 @@ outputmakefile: | |||
| 497 | ifneq ($(KBUILD_SRC),) | 499 | ifneq ($(KBUILD_SRC),) |
| 498 | $(Q)ln -fsn $(srctree) source | 500 | $(Q)ln -fsn $(srctree) source |
| 499 | $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) | 501 | $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) |
| 500 | $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore | 502 | $(Q)test -e .gitignore || \ |
| 503 | { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore | ||
| 501 | endif | 504 | endif |
| 502 | 505 | ||
| 503 | ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) | 506 | ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) |
| @@ -677,7 +680,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) | |||
| 677 | KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) | 680 | KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) |
| 678 | 681 | ||
| 679 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE | 682 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
| 680 | KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) | 683 | KBUILD_CFLAGS += -Os |
| 681 | else | 684 | else |
| 682 | KBUILD_CFLAGS += -O2 | 685 | KBUILD_CFLAGS += -O2 |
| 683 | endif | 686 | endif |
| @@ -950,9 +953,11 @@ mod_sign_cmd = true | |||
| 950 | endif | 953 | endif |
| 951 | export mod_sign_cmd | 954 | export mod_sign_cmd |
| 952 | 955 | ||
| 956 | HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) | ||
| 957 | |||
| 953 | ifdef CONFIG_STACK_VALIDATION | 958 | ifdef CONFIG_STACK_VALIDATION |
| 954 | has_libelf := $(call try-run,\ | 959 | has_libelf := $(call try-run,\ |
| 955 | echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) | 960 | echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) |
| 956 | ifeq ($(has_libelf),1) | 961 | ifeq ($(has_libelf),1) |
| 957 | objtool_target := tools/objtool FORCE | 962 | objtool_target := tools/objtool FORCE |
| 958 | else | 963 | else |
| @@ -1757,7 +1762,7 @@ existing-targets := $(wildcard $(sort $(targets))) | |||
| 1757 | 1762 | ||
| 1758 | endif # ifeq ($(config-targets),1) | 1763 | endif # ifeq ($(config-targets),1) |
| 1759 | endif # ifeq ($(mixed-targets),1) | 1764 | endif # ifeq ($(mixed-targets),1) |
| 1760 | endif # sub-make-done | 1765 | endif # need-sub-make |
| 1761 | 1766 | ||
| 1762 | PHONY += FORCE | 1767 | PHONY += FORCE |
| 1763 | FORCE: | 1768 | FORCE: |
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index dc0ab28baca1..70b783333965 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild | |||
| @@ -6,6 +6,7 @@ generic-y += exec.h | |||
| 6 | generic-y += export.h | 6 | generic-y += export.h |
| 7 | generic-y += fb.h | 7 | generic-y += fb.h |
| 8 | generic-y += irq_work.h | 8 | generic-y += irq_work.h |
| 9 | generic-y += kvm_para.h | ||
| 9 | generic-y += mcs_spinlock.h | 10 | generic-y += mcs_spinlock.h |
| 10 | generic-y += mm-arch-hooks.h | 11 | generic-y += mm-arch-hooks.h |
| 11 | generic-y += preempt.h | 12 | generic-y += preempt.h |
diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/alpha/include/uapi/asm/kvm_para.h +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index b41f8881ecc8..decc306a3b52 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
| @@ -11,6 +11,7 @@ generic-y += hardirq.h | |||
| 11 | generic-y += hw_irq.h | 11 | generic-y += hw_irq.h |
| 12 | generic-y += irq_regs.h | 12 | generic-y += irq_regs.h |
| 13 | generic-y += irq_work.h | 13 | generic-y += irq_work.h |
| 14 | generic-y += kvm_para.h | ||
| 14 | generic-y += local.h | 15 | generic-y += local.h |
| 15 | generic-y += local64.h | 16 | generic-y += local64.h |
| 16 | generic-y += mcs_spinlock.h | 17 | generic-y += mcs_spinlock.h |
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h index 29de09804306..c7a4201ed62b 100644 --- a/arch/arc/include/asm/syscall.h +++ b/arch/arc/include/asm/syscall.h | |||
| @@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 55 | */ | 55 | */ |
| 56 | static inline void | 56 | static inline void |
| 57 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 57 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 58 | unsigned int i, unsigned int n, unsigned long *args) | 58 | unsigned long *args) |
| 59 | { | 59 | { |
| 60 | unsigned long *inside_ptregs = &(regs->r0); | 60 | unsigned long *inside_ptregs = &(regs->r0); |
| 61 | inside_ptregs -= i; | 61 | unsigned int n = 6; |
| 62 | 62 | unsigned int i = 0; | |
| 63 | BUG_ON((i + n) > 6); | ||
| 64 | 63 | ||
| 65 | while (n--) { | 64 | while (n--) { |
| 66 | args[i++] = (*inside_ptregs); | 65 | args[i++] = (*inside_ptregs); |
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/arc/include/uapi/asm/Kbuild +++ b/arch/arc/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generic-y += kvm_para.h | ||
| 2 | generic-y += ucontext.h | generic-y += ucontext.h | |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 054ead960f98..850b4805e2d1 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -596,6 +596,7 @@ config ARCH_DAVINCI | |||
| 596 | select HAVE_IDE | 596 | select HAVE_IDE |
| 597 | select PM_GENERIC_DOMAINS if PM | 597 | select PM_GENERIC_DOMAINS if PM |
| 598 | select PM_GENERIC_DOMAINS_OF if PM && OF | 598 | select PM_GENERIC_DOMAINS_OF if PM && OF |
| 599 | select REGMAP_MMIO | ||
| 599 | select RESET_CONTROLLER | 600 | select RESET_CONTROLLER |
| 600 | select SPARSE_IRQ | 601 | select SPARSE_IRQ |
| 601 | select USE_OF | 602 | select USE_OF |
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index dce5be5df97b..edcff79879e7 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts | |||
| @@ -57,6 +57,24 @@ | |||
| 57 | enable-active-high; | 57 | enable-active-high; |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | /* TPS79501 */ | ||
| 61 | v1_8d_reg: fixedregulator-v1_8d { | ||
| 62 | compatible = "regulator-fixed"; | ||
| 63 | regulator-name = "v1_8d"; | ||
| 64 | vin-supply = <&vbat>; | ||
| 65 | regulator-min-microvolt = <1800000>; | ||
| 66 | regulator-max-microvolt = <1800000>; | ||
| 67 | }; | ||
| 68 | |||
| 69 | /* TPS79501 */ | ||
| 70 | v3_3d_reg: fixedregulator-v3_3d { | ||
| 71 | compatible = "regulator-fixed"; | ||
| 72 | regulator-name = "v3_3d"; | ||
| 73 | vin-supply = <&vbat>; | ||
| 74 | regulator-min-microvolt = <3300000>; | ||
| 75 | regulator-max-microvolt = <3300000>; | ||
| 76 | }; | ||
| 77 | |||
| 60 | matrix_keypad: matrix_keypad0 { | 78 | matrix_keypad: matrix_keypad0 { |
| 61 | compatible = "gpio-matrix-keypad"; | 79 | compatible = "gpio-matrix-keypad"; |
| 62 | debounce-delay-ms = <5>; | 80 | debounce-delay-ms = <5>; |
| @@ -499,10 +517,10 @@ | |||
| 499 | status = "okay"; | 517 | status = "okay"; |
| 500 | 518 | ||
| 501 | /* Regulators */ | 519 | /* Regulators */ |
| 502 | AVDD-supply = <&vaux2_reg>; | 520 | AVDD-supply = <&v3_3d_reg>; |
| 503 | IOVDD-supply = <&vaux2_reg>; | 521 | IOVDD-supply = <&v3_3d_reg>; |
| 504 | DRVDD-supply = <&vaux2_reg>; | 522 | DRVDD-supply = <&v3_3d_reg>; |
| 505 | DVDD-supply = <&vbat>; | 523 | DVDD-supply = <&v1_8d_reg>; |
| 506 | }; | 524 | }; |
| 507 | }; | 525 | }; |
| 508 | 526 | ||
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index b128998097ce..2c2d8b5b8cf5 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts | |||
| @@ -73,6 +73,24 @@ | |||
| 73 | enable-active-high; | 73 | enable-active-high; |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | /* TPS79518 */ | ||
| 77 | v1_8d_reg: fixedregulator-v1_8d { | ||
| 78 | compatible = "regulator-fixed"; | ||
| 79 | regulator-name = "v1_8d"; | ||
| 80 | vin-supply = <&vbat>; | ||
| 81 | regulator-min-microvolt = <1800000>; | ||
| 82 | regulator-max-microvolt = <1800000>; | ||
| 83 | }; | ||
| 84 | |||
| 85 | /* TPS78633 */ | ||
| 86 | v3_3d_reg: fixedregulator-v3_3d { | ||
| 87 | compatible = "regulator-fixed"; | ||
| 88 | regulator-name = "v3_3d"; | ||
| 89 | vin-supply = <&vbat>; | ||
| 90 | regulator-min-microvolt = <3300000>; | ||
| 91 | regulator-max-microvolt = <3300000>; | ||
| 92 | }; | ||
| 93 | |||
| 76 | leds { | 94 | leds { |
| 77 | pinctrl-names = "default"; | 95 | pinctrl-names = "default"; |
| 78 | pinctrl-0 = <&user_leds_s0>; | 96 | pinctrl-0 = <&user_leds_s0>; |
| @@ -501,10 +519,10 @@ | |||
| 501 | status = "okay"; | 519 | status = "okay"; |
| 502 | 520 | ||
| 503 | /* Regulators */ | 521 | /* Regulators */ |
| 504 | AVDD-supply = <&vaux2_reg>; | 522 | AVDD-supply = <&v3_3d_reg>; |
| 505 | IOVDD-supply = <&vaux2_reg>; | 523 | IOVDD-supply = <&v3_3d_reg>; |
| 506 | DRVDD-supply = <&vaux2_reg>; | 524 | DRVDD-supply = <&v3_3d_reg>; |
| 507 | DVDD-supply = <&vbat>; | 525 | DVDD-supply = <&v1_8d_reg>; |
| 508 | }; | 526 | }; |
| 509 | }; | 527 | }; |
| 510 | 528 | ||
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index f459ec316a22..ca6d9f02a800 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi | |||
| @@ -1762,7 +1762,7 @@ | |||
| 1762 | reg = <0xcc000 0x4>; | 1762 | reg = <0xcc000 0x4>; |
| 1763 | reg-names = "rev"; | 1763 | reg-names = "rev"; |
| 1764 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ | 1764 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ |
| 1765 | clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>; | 1765 | clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>; |
| 1766 | clock-names = "fck"; | 1766 | clock-names = "fck"; |
| 1767 | #address-cells = <1>; | 1767 | #address-cells = <1>; |
| 1768 | #size-cells = <1>; | 1768 | #size-cells = <1>; |
| @@ -1785,7 +1785,7 @@ | |||
| 1785 | reg = <0xd0000 0x4>; | 1785 | reg = <0xd0000 0x4>; |
| 1786 | reg-names = "rev"; | 1786 | reg-names = "rev"; |
| 1787 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ | 1787 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ |
| 1788 | clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>; | 1788 | clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>; |
| 1789 | clock-names = "fck"; | 1789 | clock-names = "fck"; |
| 1790 | #address-cells = <1>; | 1790 | #address-cells = <1>; |
| 1791 | #size-cells = <1>; | 1791 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts index 5641d162dfdb..28e7513ce617 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts | |||
| @@ -93,7 +93,7 @@ | |||
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | &hdmi { | 95 | &hdmi { |
| 96 | hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; | 96 | hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | &pwm { | 99 | &pwm { |
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi index b715ab0fa1ff..e8d800fec637 100644 --- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi | |||
| @@ -114,9 +114,9 @@ | |||
| 114 | reg = <2>; | 114 | reg = <2>; |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | switch@0 { | 117 | switch@10 { |
| 118 | compatible = "qca,qca8334"; | 118 | compatible = "qca,qca8334"; |
| 119 | reg = <0>; | 119 | reg = <10>; |
| 120 | 120 | ||
| 121 | switch_ports: ports { | 121 | switch_ports: ports { |
| 122 | #address-cells = <1>; | 122 | #address-cells = <1>; |
| @@ -125,7 +125,7 @@ | |||
| 125 | ethphy0: port@0 { | 125 | ethphy0: port@0 { |
| 126 | reg = <0>; | 126 | reg = <0>; |
| 127 | label = "cpu"; | 127 | label = "cpu"; |
| 128 | phy-mode = "rgmii"; | 128 | phy-mode = "rgmii-id"; |
| 129 | ethernet = <&fec>; | 129 | ethernet = <&fec>; |
| 130 | 130 | ||
| 131 | fixed-link { | 131 | fixed-link { |
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi index 1d1b4bd0670f..a4217f564a53 100644 --- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi | |||
| @@ -264,7 +264,7 @@ | |||
| 264 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 264 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
| 265 | vmcc-supply = <®_sd3_vmmc>; | 265 | vmcc-supply = <®_sd3_vmmc>; |
| 266 | cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; | 266 | cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; |
| 267 | bus-witdh = <4>; | 267 | bus-width = <4>; |
| 268 | no-1-8-v; | 268 | no-1-8-v; |
| 269 | status = "okay"; | 269 | status = "okay"; |
| 270 | }; | 270 | }; |
| @@ -275,7 +275,7 @@ | |||
| 275 | pinctrl-1 = <&pinctrl_usdhc4_100mhz>; | 275 | pinctrl-1 = <&pinctrl_usdhc4_100mhz>; |
| 276 | pinctrl-2 = <&pinctrl_usdhc4_200mhz>; | 276 | pinctrl-2 = <&pinctrl_usdhc4_200mhz>; |
| 277 | vmcc-supply = <®_sd4_vmmc>; | 277 | vmcc-supply = <®_sd4_vmmc>; |
| 278 | bus-witdh = <8>; | 278 | bus-width = <8>; |
| 279 | no-1-8-v; | 279 | no-1-8-v; |
| 280 | non-removable; | 280 | non-removable; |
| 281 | status = "okay"; | 281 | status = "okay"; |
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 433bf09a1954..027df06c5dc7 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | |||
| @@ -91,6 +91,7 @@ | |||
| 91 | pinctrl-0 = <&pinctrl_enet>; | 91 | pinctrl-0 = <&pinctrl_enet>; |
| 92 | phy-handle = <ðphy>; | 92 | phy-handle = <ðphy>; |
| 93 | phy-mode = "rgmii"; | 93 | phy-mode = "rgmii"; |
| 94 | phy-reset-duration = <10>; /* in msecs */ | ||
| 94 | phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; | 95 | phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; |
| 95 | phy-supply = <&vdd_eth_io_reg>; | 96 | phy-supply = <&vdd_eth_io_reg>; |
| 96 | status = "disabled"; | 97 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h index f6fb6783c193..54cfe72295aa 100644 --- a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h +++ b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) 2016 Freescale Semiconductor, Inc. | 3 | * Copyright (C) 2016 Freescale Semiconductor, Inc. |
| 4 | * Copyright (C) 2017 NXP | 4 | * Copyright (C) 2017 NXP |
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi index aa107ee41b8b..ef653c3209bc 100644 --- a/arch/arm/boot/dts/rk3288-tinker.dtsi +++ b/arch/arm/boot/dts/rk3288-tinker.dtsi | |||
| @@ -254,6 +254,7 @@ | |||
| 254 | }; | 254 | }; |
| 255 | 255 | ||
| 256 | vccio_sd: LDO_REG5 { | 256 | vccio_sd: LDO_REG5 { |
| 257 | regulator-boot-on; | ||
| 257 | regulator-min-microvolt = <1800000>; | 258 | regulator-min-microvolt = <1800000>; |
| 258 | regulator-max-microvolt = <3300000>; | 259 | regulator-max-microvolt = <3300000>; |
| 259 | regulator-name = "vccio_sd"; | 260 | regulator-name = "vccio_sd"; |
| @@ -430,7 +431,7 @@ | |||
| 430 | bus-width = <4>; | 431 | bus-width = <4>; |
| 431 | cap-mmc-highspeed; | 432 | cap-mmc-highspeed; |
| 432 | cap-sd-highspeed; | 433 | cap-sd-highspeed; |
| 433 | card-detect-delay = <200>; | 434 | broken-cd; |
| 434 | disable-wp; /* wp not hooked up */ | 435 | disable-wp; /* wp not hooked up */ |
| 435 | pinctrl-names = "default"; | 436 | pinctrl-names = "default"; |
| 436 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; | 437 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; |
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 0bc2409f6903..192dbc089ade 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi | |||
| @@ -25,8 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | gpio_keys: gpio-keys { | 26 | gpio_keys: gpio-keys { |
| 27 | compatible = "gpio-keys"; | 27 | compatible = "gpio-keys"; |
| 28 | #address-cells = <1>; | ||
| 29 | #size-cells = <0>; | ||
| 30 | 28 | ||
| 31 | pinctrl-names = "default"; | 29 | pinctrl-names = "default"; |
| 32 | pinctrl-0 = <&pwr_key_l>; | 30 | pinctrl-0 = <&pwr_key_l>; |
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index ca7d52daa8fb..a024d1e7e74c 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
| @@ -70,7 +70,7 @@ | |||
| 70 | compatible = "arm,cortex-a12"; | 70 | compatible = "arm,cortex-a12"; |
| 71 | reg = <0x501>; | 71 | reg = <0x501>; |
| 72 | resets = <&cru SRST_CORE1>; | 72 | resets = <&cru SRST_CORE1>; |
| 73 | operating-points = <&cpu_opp_table>; | 73 | operating-points-v2 = <&cpu_opp_table>; |
| 74 | #cooling-cells = <2>; /* min followed by max */ | 74 | #cooling-cells = <2>; /* min followed by max */ |
| 75 | clock-latency = <40000>; | 75 | clock-latency = <40000>; |
| 76 | clocks = <&cru ARMCLK>; | 76 | clocks = <&cru ARMCLK>; |
| @@ -80,7 +80,7 @@ | |||
| 80 | compatible = "arm,cortex-a12"; | 80 | compatible = "arm,cortex-a12"; |
| 81 | reg = <0x502>; | 81 | reg = <0x502>; |
| 82 | resets = <&cru SRST_CORE2>; | 82 | resets = <&cru SRST_CORE2>; |
| 83 | operating-points = <&cpu_opp_table>; | 83 | operating-points-v2 = <&cpu_opp_table>; |
| 84 | #cooling-cells = <2>; /* min followed by max */ | 84 | #cooling-cells = <2>; /* min followed by max */ |
| 85 | clock-latency = <40000>; | 85 | clock-latency = <40000>; |
| 86 | clocks = <&cru ARMCLK>; | 86 | clocks = <&cru ARMCLK>; |
| @@ -90,7 +90,7 @@ | |||
| 90 | compatible = "arm,cortex-a12"; | 90 | compatible = "arm,cortex-a12"; |
| 91 | reg = <0x503>; | 91 | reg = <0x503>; |
| 92 | resets = <&cru SRST_CORE3>; | 92 | resets = <&cru SRST_CORE3>; |
| 93 | operating-points = <&cpu_opp_table>; | 93 | operating-points-v2 = <&cpu_opp_table>; |
| 94 | #cooling-cells = <2>; /* min followed by max */ | 94 | #cooling-cells = <2>; /* min followed by max */ |
| 95 | clock-latency = <40000>; | 95 | clock-latency = <40000>; |
| 96 | clocks = <&cru ARMCLK>; | 96 | clocks = <&cru ARMCLK>; |
| @@ -1119,8 +1119,6 @@ | |||
| 1119 | clock-names = "ref", "pclk"; | 1119 | clock-names = "ref", "pclk"; |
| 1120 | power-domains = <&power RK3288_PD_VIO>; | 1120 | power-domains = <&power RK3288_PD_VIO>; |
| 1121 | rockchip,grf = <&grf>; | 1121 | rockchip,grf = <&grf>; |
| 1122 | #address-cells = <1>; | ||
| 1123 | #size-cells = <0>; | ||
| 1124 | status = "disabled"; | 1122 | status = "disabled"; |
| 1125 | 1123 | ||
| 1126 | ports { | 1124 | ports { |
| @@ -1282,27 +1280,27 @@ | |||
| 1282 | gpu_opp_table: gpu-opp-table { | 1280 | gpu_opp_table: gpu-opp-table { |
| 1283 | compatible = "operating-points-v2"; | 1281 | compatible = "operating-points-v2"; |
| 1284 | 1282 | ||
| 1285 | opp@100000000 { | 1283 | opp-100000000 { |
| 1286 | opp-hz = /bits/ 64 <100000000>; | 1284 | opp-hz = /bits/ 64 <100000000>; |
| 1287 | opp-microvolt = <950000>; | 1285 | opp-microvolt = <950000>; |
| 1288 | }; | 1286 | }; |
| 1289 | opp@200000000 { | 1287 | opp-200000000 { |
| 1290 | opp-hz = /bits/ 64 <200000000>; | 1288 | opp-hz = /bits/ 64 <200000000>; |
| 1291 | opp-microvolt = <950000>; | 1289 | opp-microvolt = <950000>; |
| 1292 | }; | 1290 | }; |
| 1293 | opp@300000000 { | 1291 | opp-300000000 { |
| 1294 | opp-hz = /bits/ 64 <300000000>; | 1292 | opp-hz = /bits/ 64 <300000000>; |
| 1295 | opp-microvolt = <1000000>; | 1293 | opp-microvolt = <1000000>; |
| 1296 | }; | 1294 | }; |
| 1297 | opp@400000000 { | 1295 | opp-400000000 { |
| 1298 | opp-hz = /bits/ 64 <400000000>; | 1296 | opp-hz = /bits/ 64 <400000000>; |
| 1299 | opp-microvolt = <1100000>; | 1297 | opp-microvolt = <1100000>; |
| 1300 | }; | 1298 | }; |
| 1301 | opp@500000000 { | 1299 | opp-500000000 { |
| 1302 | opp-hz = /bits/ 64 <500000000>; | 1300 | opp-hz = /bits/ 64 <500000000>; |
| 1303 | opp-microvolt = <1200000>; | 1301 | opp-microvolt = <1200000>; |
| 1304 | }; | 1302 | }; |
| 1305 | opp@600000000 { | 1303 | opp-600000000 { |
| 1306 | opp-hz = /bits/ 64 <600000000>; | 1304 | opp-hz = /bits/ 64 <600000000>; |
| 1307 | opp-microvolt = <1250000>; | 1305 | opp-microvolt = <1250000>; |
| 1308 | }; | 1306 | }; |
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h index 1c01a6f843d8..28a2e45752fe 100644 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h | |||
| @@ -518,7 +518,7 @@ | |||
| 518 | #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) | 518 | #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) |
| 519 | #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) | 519 | #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) |
| 520 | #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) | 520 | #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) |
| 521 | #define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1) | 521 | #define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1) |
| 522 | #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) | 522 | #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) |
| 523 | #define PIN_PC10 74 | 523 | #define PIN_PC10 74 |
| 524 | #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) | 524 | #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) |
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index 8661dd9b064a..b37f8e675e40 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig | |||
| @@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y | |||
| 170 | # CONFIG_IOMMU_SUPPORT is not set | 170 | # CONFIG_IOMMU_SUPPORT is not set |
| 171 | CONFIG_IIO=y | 171 | CONFIG_IIO=y |
| 172 | CONFIG_FSL_MX25_ADC=y | 172 | CONFIG_FSL_MX25_ADC=y |
| 173 | CONFIG_PWM=y | ||
| 174 | CONFIG_PWM_IMX1=y | ||
| 175 | CONFIG_PWM_IMX27=y | ||
| 173 | CONFIG_EXT4_FS=y | 176 | CONFIG_EXT4_FS=y |
| 174 | # CONFIG_DNOTIFY is not set | 177 | # CONFIG_DNOTIFY is not set |
| 175 | CONFIG_VFAT_FS=y | 178 | CONFIG_VFAT_FS=y |
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 5586a5074a96..50fb01d70b10 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig | |||
| @@ -398,7 +398,7 @@ CONFIG_MAG3110=y | |||
| 398 | CONFIG_MPL3115=y | 398 | CONFIG_MPL3115=y |
| 399 | CONFIG_PWM=y | 399 | CONFIG_PWM=y |
| 400 | CONFIG_PWM_FSL_FTM=y | 400 | CONFIG_PWM_FSL_FTM=y |
| 401 | CONFIG_PWM_IMX=y | 401 | CONFIG_PWM_IMX27=y |
| 402 | CONFIG_NVMEM_IMX_OCOTP=y | 402 | CONFIG_NVMEM_IMX_OCOTP=y |
| 403 | CONFIG_NVMEM_VF610_OCOTP=y | 403 | CONFIG_NVMEM_VF610_OCOTP=y |
| 404 | CONFIG_TEE=y | 404 | CONFIG_TEE=y |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 2de96a180166..31de4ab93005 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
| @@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, | |||
| 381 | return ret; | 381 | return ret; |
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, | ||
| 385 | const void *data, unsigned long len) | ||
| 386 | { | ||
| 387 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
| 388 | int ret = kvm_write_guest(kvm, gpa, data, len); | ||
| 389 | |||
| 390 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
| 391 | |||
| 392 | return ret; | ||
| 393 | } | ||
| 394 | |||
| 384 | static inline void *kvm_get_hyp_vector(void) | 395 | static inline void *kvm_get_hyp_vector(void) |
| 385 | { | 396 | { |
| 386 | switch(read_cpuid_part()) { | 397 | switch(read_cpuid_part()) { |
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index de2089501b8b..9e11dce55e06 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h | |||
| @@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) | |||
| 75 | 75 | ||
| 76 | #define S2_PMD_MASK PMD_MASK | 76 | #define S2_PMD_MASK PMD_MASK |
| 77 | #define S2_PMD_SIZE PMD_SIZE | 77 | #define S2_PMD_SIZE PMD_SIZE |
| 78 | #define S2_PUD_MASK PUD_MASK | ||
| 79 | #define S2_PUD_SIZE PUD_SIZE | ||
| 78 | 80 | ||
| 79 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) | 81 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) |
| 80 | { | 82 | { |
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 06dea6bce293..080ce70cab12 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h | |||
| @@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 55 | 55 | ||
| 56 | static inline void syscall_get_arguments(struct task_struct *task, | 56 | static inline void syscall_get_arguments(struct task_struct *task, |
| 57 | struct pt_regs *regs, | 57 | struct pt_regs *regs, |
| 58 | unsigned int i, unsigned int n, | ||
| 59 | unsigned long *args) | 58 | unsigned long *args) |
| 60 | { | 59 | { |
| 61 | if (n == 0) | 60 | args[0] = regs->ARM_ORIG_r0; |
| 62 | return; | 61 | args++; |
| 63 | 62 | ||
| 64 | if (i + n > SYSCALL_MAX_ARGS) { | 63 | memcpy(args, ®s->ARM_r0 + 1, 5 * sizeof(args[0])); |
| 65 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 66 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 67 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 68 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 69 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 70 | n = SYSCALL_MAX_ARGS - i; | ||
| 71 | } | ||
| 72 | |||
| 73 | if (i == 0) { | ||
| 74 | args[0] = regs->ARM_ORIG_r0; | ||
| 75 | args++; | ||
| 76 | i++; | ||
| 77 | n--; | ||
| 78 | } | ||
| 79 | |||
| 80 | memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0])); | ||
| 81 | } | 64 | } |
| 82 | 65 | ||
| 83 | static inline void syscall_set_arguments(struct task_struct *task, | 66 | static inline void syscall_set_arguments(struct task_struct *task, |
| 84 | struct pt_regs *regs, | 67 | struct pt_regs *regs, |
| 85 | unsigned int i, unsigned int n, | ||
| 86 | const unsigned long *args) | 68 | const unsigned long *args) |
| 87 | { | 69 | { |
| 88 | if (n == 0) | 70 | regs->ARM_ORIG_r0 = args[0]; |
| 89 | return; | 71 | args++; |
| 90 | 72 | ||
| 91 | if (i + n > SYSCALL_MAX_ARGS) { | 73 | memcpy(®s->ARM_r0 + 1, args, 5 * sizeof(args[0])); |
| 92 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 93 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 94 | n = SYSCALL_MAX_ARGS - i; | ||
| 95 | } | ||
| 96 | |||
| 97 | if (i == 0) { | ||
| 98 | regs->ARM_ORIG_r0 = args[0]; | ||
| 99 | args++; | ||
| 100 | i++; | ||
| 101 | n--; | ||
| 102 | } | ||
| 103 | |||
| 104 | memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); | ||
| 105 | } | 74 | } |
| 106 | 75 | ||
| 107 | static inline int syscall_get_arch(void) | 76 | static inline int syscall_get_arch(void) |
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 23b4464c0995..ce8573157774 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild | |||
| @@ -3,3 +3,4 @@ | |||
| 3 | generated-y += unistd-common.h | 3 | generated-y += unistd-common.h |
| 4 | generated-y += unistd-oabi.h | 4 | generated-y += unistd-oabi.h |
| 5 | generated-y += unistd-eabi.h | 5 | generated-y += unistd-eabi.h |
| 6 | generic-y += kvm_para.h | ||
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/arm/include/uapi/asm/kvm_para.h +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 51e808adb00c..2a757dcaa1a5 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
| @@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void) | |||
| 591 | 591 | ||
| 592 | np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); | 592 | np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); |
| 593 | if (!np) | 593 | if (!np) |
| 594 | goto securam_fail; | 594 | goto securam_fail_no_ref_dev; |
| 595 | 595 | ||
| 596 | pdev = of_find_device_by_node(np); | 596 | pdev = of_find_device_by_node(np); |
| 597 | of_node_put(np); | 597 | of_node_put(np); |
| 598 | if (!pdev) { | 598 | if (!pdev) { |
| 599 | pr_warn("%s: failed to find securam device!\n", __func__); | 599 | pr_warn("%s: failed to find securam device!\n", __func__); |
| 600 | goto securam_fail; | 600 | goto securam_fail_no_ref_dev; |
| 601 | } | 601 | } |
| 602 | 602 | ||
| 603 | sram_pool = gen_pool_get(&pdev->dev, NULL); | 603 | sram_pool = gen_pool_get(&pdev->dev, NULL); |
| @@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void) | |||
| 620 | return 0; | 620 | return 0; |
| 621 | 621 | ||
| 622 | securam_fail: | 622 | securam_fail: |
| 623 | put_device(&pdev->dev); | ||
| 624 | securam_fail_no_ref_dev: | ||
| 623 | iounmap(pm_data.sfrbu); | 625 | iounmap(pm_data.sfrbu); |
| 624 | pm_data.sfrbu = NULL; | 626 | pm_data.sfrbu = NULL; |
| 625 | return ret; | 627 | return ret; |
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index bfeb25aaf9a2..326e870d7123 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c | |||
| @@ -16,30 +16,23 @@ | |||
| 16 | #include "cpuidle.h" | 16 | #include "cpuidle.h" |
| 17 | #include "hardware.h" | 17 | #include "hardware.h" |
| 18 | 18 | ||
| 19 | static atomic_t master = ATOMIC_INIT(0); | 19 | static int num_idle_cpus = 0; |
| 20 | static DEFINE_SPINLOCK(master_lock); | 20 | static DEFINE_SPINLOCK(cpuidle_lock); |
| 21 | 21 | ||
| 22 | static int imx6q_enter_wait(struct cpuidle_device *dev, | 22 | static int imx6q_enter_wait(struct cpuidle_device *dev, |
| 23 | struct cpuidle_driver *drv, int index) | 23 | struct cpuidle_driver *drv, int index) |
| 24 | { | 24 | { |
| 25 | if (atomic_inc_return(&master) == num_online_cpus()) { | 25 | spin_lock(&cpuidle_lock); |
| 26 | /* | 26 | if (++num_idle_cpus == num_online_cpus()) |
| 27 | * With this lock, we prevent other cpu to exit and enter | ||
| 28 | * this function again and become the master. | ||
| 29 | */ | ||
| 30 | if (!spin_trylock(&master_lock)) | ||
| 31 | goto idle; | ||
| 32 | imx6_set_lpm(WAIT_UNCLOCKED); | 27 | imx6_set_lpm(WAIT_UNCLOCKED); |
| 33 | cpu_do_idle(); | 28 | spin_unlock(&cpuidle_lock); |
| 34 | imx6_set_lpm(WAIT_CLOCKED); | ||
| 35 | spin_unlock(&master_lock); | ||
| 36 | goto done; | ||
| 37 | } | ||
| 38 | 29 | ||
| 39 | idle: | ||
| 40 | cpu_do_idle(); | 30 | cpu_do_idle(); |
| 41 | done: | 31 | |
| 42 | atomic_dec(&master); | 32 | spin_lock(&cpuidle_lock); |
| 33 | if (num_idle_cpus-- == num_online_cpus()) | ||
| 34 | imx6_set_lpm(WAIT_CLOCKED); | ||
| 35 | spin_unlock(&cpuidle_lock); | ||
| 43 | 36 | ||
| 44 | return index; | 37 | return index; |
| 45 | } | 38 | } |
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c index c7169c2f94c4..08c7892866c2 100644 --- a/arch/arm/mach-imx/mach-imx51.c +++ b/arch/arm/mach-imx/mach-imx51.c | |||
| @@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void) | |||
| 59 | return; | 59 | return; |
| 60 | 60 | ||
| 61 | m4if_base = of_iomap(np, 0); | 61 | m4if_base = of_iomap(np, 0); |
| 62 | of_node_put(np); | ||
| 62 | if (!m4if_base) { | 63 | if (!m4if_base) { |
| 63 | pr_err("Unable to map M4IF registers\n"); | 64 | pr_err("Unable to map M4IF registers\n"); |
| 64 | return; | 65 | return; |
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 53c316f7301e..fe4932fda01d 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c | |||
| @@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = { | |||
| 300 | } | 300 | } |
| 301 | }; | 301 | }; |
| 302 | 302 | ||
| 303 | static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); | 303 | static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32); |
| 304 | static struct iop_adma_platform_data iop13xx_adma_0_data = { | 304 | static struct iop_adma_platform_data iop13xx_adma_0_data = { |
| 305 | .hw_id = 0, | 305 | .hw_id = 0, |
| 306 | .pool_size = PAGE_SIZE, | 306 | .pool_size = PAGE_SIZE, |
| @@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = { | |||
| 324 | .resource = iop13xx_adma_0_resources, | 324 | .resource = iop13xx_adma_0_resources, |
| 325 | .dev = { | 325 | .dev = { |
| 326 | .dma_mask = &iop13xx_adma_dmamask, | 326 | .dma_mask = &iop13xx_adma_dmamask, |
| 327 | .coherent_dma_mask = DMA_BIT_MASK(64), | 327 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 328 | .platform_data = (void *) &iop13xx_adma_0_data, | 328 | .platform_data = (void *) &iop13xx_adma_0_data, |
| 329 | }, | 329 | }, |
| 330 | }; | 330 | }; |
| @@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = { | |||
| 336 | .resource = iop13xx_adma_1_resources, | 336 | .resource = iop13xx_adma_1_resources, |
| 337 | .dev = { | 337 | .dev = { |
| 338 | .dma_mask = &iop13xx_adma_dmamask, | 338 | .dma_mask = &iop13xx_adma_dmamask, |
| 339 | .coherent_dma_mask = DMA_BIT_MASK(64), | 339 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 340 | .platform_data = (void *) &iop13xx_adma_1_data, | 340 | .platform_data = (void *) &iop13xx_adma_1_data, |
| 341 | }, | 341 | }, |
| 342 | }; | 342 | }; |
| @@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = { | |||
| 348 | .resource = iop13xx_adma_2_resources, | 348 | .resource = iop13xx_adma_2_resources, |
| 349 | .dev = { | 349 | .dev = { |
| 350 | .dma_mask = &iop13xx_adma_dmamask, | 350 | .dma_mask = &iop13xx_adma_dmamask, |
| 351 | .coherent_dma_mask = DMA_BIT_MASK(64), | 351 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 352 | .platform_data = (void *) &iop13xx_adma_2_data, | 352 | .platform_data = (void *) &iop13xx_adma_2_data, |
| 353 | }, | 353 | }, |
| 354 | }; | 354 | }; |
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c index db511ec2b1df..116feb6b261e 100644 --- a/arch/arm/mach-iop13xx/tpmi.c +++ b/arch/arm/mach-iop13xx/tpmi.c | |||
| @@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = { | |||
| 152 | } | 152 | } |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); | 155 | u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32); |
| 156 | static struct platform_device iop13xx_tpmi_0_device = { | 156 | static struct platform_device iop13xx_tpmi_0_device = { |
| 157 | .name = "iop-tpmi", | 157 | .name = "iop-tpmi", |
| 158 | .id = 0, | 158 | .id = 0, |
| @@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = { | |||
| 160 | .resource = iop13xx_tpmi_0_resources, | 160 | .resource = iop13xx_tpmi_0_resources, |
| 161 | .dev = { | 161 | .dev = { |
| 162 | .dma_mask = &iop13xx_tpmi_mask, | 162 | .dma_mask = &iop13xx_tpmi_mask, |
| 163 | .coherent_dma_mask = DMA_BIT_MASK(64), | 163 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 164 | }, | 164 | }, |
| 165 | }; | 165 | }; |
| 166 | 166 | ||
| @@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = { | |||
| 171 | .resource = iop13xx_tpmi_1_resources, | 171 | .resource = iop13xx_tpmi_1_resources, |
| 172 | .dev = { | 172 | .dev = { |
| 173 | .dma_mask = &iop13xx_tpmi_mask, | 173 | .dma_mask = &iop13xx_tpmi_mask, |
| 174 | .coherent_dma_mask = DMA_BIT_MASK(64), | 174 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 175 | }, | 175 | }, |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| @@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = { | |||
| 182 | .resource = iop13xx_tpmi_2_resources, | 182 | .resource = iop13xx_tpmi_2_resources, |
| 183 | .dev = { | 183 | .dev = { |
| 184 | .dma_mask = &iop13xx_tpmi_mask, | 184 | .dma_mask = &iop13xx_tpmi_mask, |
| 185 | .coherent_dma_mask = DMA_BIT_MASK(64), | 185 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 186 | }, | 186 | }, |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| @@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = { | |||
| 193 | .resource = iop13xx_tpmi_3_resources, | 193 | .resource = iop13xx_tpmi_3_resources, |
| 194 | .dev = { | 194 | .dev = { |
| 195 | .dma_mask = &iop13xx_tpmi_mask, | 195 | .dma_mask = &iop13xx_tpmi_mask, |
| 196 | .coherent_dma_mask = DMA_BIT_MASK(64), | 196 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 197 | }, | 197 | }, |
| 198 | }; | 198 | }; |
| 199 | 199 | ||
diff --git a/arch/arm/mach-milbeaut/platsmp.c b/arch/arm/mach-milbeaut/platsmp.c index 591543c81399..3ea880f5fcb7 100644 --- a/arch/arm/mach-milbeaut/platsmp.c +++ b/arch/arm/mach-milbeaut/platsmp.c | |||
| @@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus) | |||
| 65 | writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); | 65 | writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 68 | static void m10v_cpu_die(unsigned int l_cpu) | 69 | static void m10v_cpu_die(unsigned int l_cpu) |
| 69 | { | 70 | { |
| 70 | gic_cpu_if_down(0); | 71 | gic_cpu_if_down(0); |
| @@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu) | |||
| 83 | 84 | ||
| 84 | return 1; | 85 | return 1; |
| 85 | } | 86 | } |
| 87 | #endif | ||
| 86 | 88 | ||
| 87 | static struct smp_operations m10v_smp_ops __initdata = { | 89 | static struct smp_operations m10v_smp_ops __initdata = { |
| 88 | .smp_prepare_cpus = m10v_smp_init, | 90 | .smp_prepare_cpus = m10v_smp_init, |
| 89 | .smp_boot_secondary = m10v_boot_secondary, | 91 | .smp_boot_secondary = m10v_boot_secondary, |
| 92 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 90 | .cpu_die = m10v_cpu_die, | 93 | .cpu_die = m10v_cpu_die, |
| 91 | .cpu_kill = m10v_cpu_kill, | 94 | .cpu_kill = m10v_cpu_kill, |
| 95 | #endif | ||
| 92 | }; | 96 | }; |
| 93 | CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); | 97 | CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); |
| 94 | 98 | ||
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index be30c3c061b4..1b15d593837e 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c | |||
| @@ -182,6 +182,7 @@ static struct resource latch1_resources[] = { | |||
| 182 | 182 | ||
| 183 | static struct bgpio_pdata latch1_pdata = { | 183 | static struct bgpio_pdata latch1_pdata = { |
| 184 | .label = LATCH1_LABEL, | 184 | .label = LATCH1_LABEL, |
| 185 | .base = -1, | ||
| 185 | .ngpio = LATCH1_NGPIO, | 186 | .ngpio = LATCH1_NGPIO, |
| 186 | }; | 187 | }; |
| 187 | 188 | ||
| @@ -219,6 +220,7 @@ static struct resource latch2_resources[] = { | |||
| 219 | 220 | ||
| 220 | static struct bgpio_pdata latch2_pdata = { | 221 | static struct bgpio_pdata latch2_pdata = { |
| 221 | .label = LATCH2_LABEL, | 222 | .label = LATCH2_LABEL, |
| 223 | .base = -1, | ||
| 222 | .ngpio = LATCH2_NGPIO, | 224 | .ngpio = LATCH2_NGPIO, |
| 223 | }; | 225 | }; |
| 224 | 226 | ||
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 1444b4b4bd9f..439e143cad7b 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
| @@ -250,8 +250,10 @@ static int __init omapdss_init_of(void) | |||
| 250 | if (!node) | 250 | if (!node) |
| 251 | return 0; | 251 | return 0; |
| 252 | 252 | ||
| 253 | if (!of_device_is_available(node)) | 253 | if (!of_device_is_available(node)) { |
| 254 | of_node_put(node); | ||
| 254 | return 0; | 255 | return 0; |
| 256 | } | ||
| 255 | 257 | ||
| 256 | pdev = of_find_device_by_node(node); | 258 | pdev = of_find_device_by_node(node); |
| 257 | 259 | ||
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index a4d1f8de3b5b..d9612221e484 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c | |||
| @@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = { | |||
| 143 | .resource = iop3xx_dma_0_resources, | 143 | .resource = iop3xx_dma_0_resources, |
| 144 | .dev = { | 144 | .dev = { |
| 145 | .dma_mask = &iop3xx_adma_dmamask, | 145 | .dma_mask = &iop3xx_adma_dmamask, |
| 146 | .coherent_dma_mask = DMA_BIT_MASK(64), | 146 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 147 | .platform_data = (void *) &iop3xx_dma_0_data, | 147 | .platform_data = (void *) &iop3xx_dma_0_data, |
| 148 | }, | 148 | }, |
| 149 | }; | 149 | }; |
| @@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = { | |||
| 155 | .resource = iop3xx_dma_1_resources, | 155 | .resource = iop3xx_dma_1_resources, |
| 156 | .dev = { | 156 | .dev = { |
| 157 | .dma_mask = &iop3xx_adma_dmamask, | 157 | .dma_mask = &iop3xx_adma_dmamask, |
| 158 | .coherent_dma_mask = DMA_BIT_MASK(64), | 158 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 159 | .platform_data = (void *) &iop3xx_dma_1_data, | 159 | .platform_data = (void *) &iop3xx_dma_1_data, |
| 160 | }, | 160 | }, |
| 161 | }; | 161 | }; |
| @@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = { | |||
| 167 | .resource = iop3xx_aau_resources, | 167 | .resource = iop3xx_aau_resources, |
| 168 | .dev = { | 168 | .dev = { |
| 169 | .dma_mask = &iop3xx_adma_dmamask, | 169 | .dma_mask = &iop3xx_adma_dmamask, |
| 170 | .coherent_dma_mask = DMA_BIT_MASK(64), | 170 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 171 | .platform_data = (void *) &iop3xx_aau_data, | 171 | .platform_data = (void *) &iop3xx_aau_data, |
| 172 | }, | 172 | }, |
| 173 | }; | 173 | }; |
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a6c81ce00f52..8647cb80a93b 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
| @@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = { | |||
| 622 | .resource = orion_xor0_shared_resources, | 622 | .resource = orion_xor0_shared_resources, |
| 623 | .dev = { | 623 | .dev = { |
| 624 | .dma_mask = &orion_xor_dmamask, | 624 | .dma_mask = &orion_xor_dmamask, |
| 625 | .coherent_dma_mask = DMA_BIT_MASK(64), | 625 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 626 | .platform_data = &orion_xor0_pdata, | 626 | .platform_data = &orion_xor0_pdata, |
| 627 | }, | 627 | }, |
| 628 | }; | 628 | }; |
| @@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = { | |||
| 683 | .resource = orion_xor1_shared_resources, | 683 | .resource = orion_xor1_shared_resources, |
| 684 | .dev = { | 684 | .dev = { |
| 685 | .dma_mask = &orion_xor_dmamask, | 685 | .dma_mask = &orion_xor_dmamask, |
| 686 | .coherent_dma_mask = DMA_BIT_MASK(64), | 686 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 687 | .platform_data = &orion_xor1_pdata, | 687 | .platform_data = &orion_xor1_pdata, |
| 688 | }, | 688 | }, |
| 689 | }; | 689 | }; |
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 70498a033cf5..b5ca9c50876d 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms | |||
| @@ -27,6 +27,7 @@ config ARCH_BCM2835 | |||
| 27 | bool "Broadcom BCM2835 family" | 27 | bool "Broadcom BCM2835 family" |
| 28 | select TIMER_OF | 28 | select TIMER_OF |
| 29 | select GPIOLIB | 29 | select GPIOLIB |
| 30 | select MFD_CORE | ||
| 30 | select PINCTRL | 31 | select PINCTRL |
| 31 | select PINCTRL_BCM2835 | 32 | select PINCTRL_BCM2835 |
| 32 | select ARM_AMBA | 33 | select ARM_AMBA |
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 7c649f6b14cb..cd7c76e58b09 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi | |||
| @@ -162,6 +162,7 @@ | |||
| 162 | rx-fifo-depth = <16384>; | 162 | rx-fifo-depth = <16384>; |
| 163 | snps,multicast-filter-bins = <256>; | 163 | snps,multicast-filter-bins = <256>; |
| 164 | iommus = <&smmu 1>; | 164 | iommus = <&smmu 1>; |
| 165 | altr,sysmgr-syscon = <&sysmgr 0x44 0>; | ||
| 165 | status = "disabled"; | 166 | status = "disabled"; |
| 166 | }; | 167 | }; |
| 167 | 168 | ||
| @@ -179,6 +180,7 @@ | |||
| 179 | rx-fifo-depth = <16384>; | 180 | rx-fifo-depth = <16384>; |
| 180 | snps,multicast-filter-bins = <256>; | 181 | snps,multicast-filter-bins = <256>; |
| 181 | iommus = <&smmu 2>; | 182 | iommus = <&smmu 2>; |
| 183 | altr,sysmgr-syscon = <&sysmgr 0x48 0>; | ||
| 182 | status = "disabled"; | 184 | status = "disabled"; |
| 183 | }; | 185 | }; |
| 184 | 186 | ||
| @@ -196,6 +198,7 @@ | |||
| 196 | rx-fifo-depth = <16384>; | 198 | rx-fifo-depth = <16384>; |
| 197 | snps,multicast-filter-bins = <256>; | 199 | snps,multicast-filter-bins = <256>; |
| 198 | iommus = <&smmu 3>; | 200 | iommus = <&smmu 3>; |
| 201 | altr,sysmgr-syscon = <&sysmgr 0x4c 0>; | ||
| 199 | status = "disabled"; | 202 | status = "disabled"; |
| 200 | }; | 203 | }; |
| 201 | 204 | ||
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index bb2045be8814..97aeb946ed5e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi | |||
| @@ -321,7 +321,6 @@ | |||
| 321 | nvidia,default-trim = <0x9>; | 321 | nvidia,default-trim = <0x9>; |
| 322 | nvidia,dqs-trim = <63>; | 322 | nvidia,dqs-trim = <63>; |
| 323 | mmc-hs400-1_8v; | 323 | mmc-hs400-1_8v; |
| 324 | supports-cqe; | ||
| 325 | status = "disabled"; | 324 | status = "disabled"; |
| 326 | }; | 325 | }; |
| 327 | 326 | ||
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index 61a0afb74e63..1ea684af99c4 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | /* | 2 | /* |
| 3 | * Device Tree Source for the RZ/G2E (R8A774C0) SoC | 3 | * Device Tree Source for the RZ/G2E (R8A774C0) SoC |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2018 Renesas Electronics Corp. | 5 | * Copyright (C) 2018-2019 Renesas Electronics Corp. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <dt-bindings/clock/r8a774c0-cpg-mssr.h> | 8 | #include <dt-bindings/clock/r8a774c0-cpg-mssr.h> |
| @@ -1150,9 +1150,8 @@ | |||
| 1150 | <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, | 1150 | <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, |
| 1151 | <&scif_clk>; | 1151 | <&scif_clk>; |
| 1152 | clock-names = "fck", "brg_int", "scif_clk"; | 1152 | clock-names = "fck", "brg_int", "scif_clk"; |
| 1153 | dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, | 1153 | dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; |
| 1154 | <&dmac2 0x5b>, <&dmac2 0x5a>; | 1154 | dma-names = "tx", "rx"; |
| 1155 | dma-names = "tx", "rx", "tx", "rx"; | ||
| 1156 | power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; | 1155 | power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; |
| 1157 | resets = <&cpg 202>; | 1156 | resets = <&cpg 202>; |
| 1158 | status = "disabled"; | 1157 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index a69faa60ea4d..d2ad665fe2d9 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | /* | 2 | /* |
| 3 | * Device Tree Source for the R-Car E3 (R8A77990) SoC | 3 | * Device Tree Source for the R-Car E3 (R8A77990) SoC |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2018 Renesas Electronics Corp. | 5 | * Copyright (C) 2018-2019 Renesas Electronics Corp. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <dt-bindings/clock/r8a77990-cpg-mssr.h> | 8 | #include <dt-bindings/clock/r8a77990-cpg-mssr.h> |
| @@ -1067,9 +1067,8 @@ | |||
| 1067 | <&cpg CPG_CORE R8A77990_CLK_S3D1C>, | 1067 | <&cpg CPG_CORE R8A77990_CLK_S3D1C>, |
| 1068 | <&scif_clk>; | 1068 | <&scif_clk>; |
| 1069 | clock-names = "fck", "brg_int", "scif_clk"; | 1069 | clock-names = "fck", "brg_int", "scif_clk"; |
| 1070 | dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, | 1070 | dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; |
| 1071 | <&dmac2 0x5b>, <&dmac2 0x5a>; | 1071 | dma-names = "tx", "rx"; |
| 1072 | dma-names = "tx", "rx", "tx", "rx"; | ||
| 1073 | power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; | 1072 | power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; |
| 1074 | resets = <&cpg 202>; | 1073 | resets = <&cpg 202>; |
| 1075 | status = "disabled"; | 1074 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 33c44e857247..0e34354b2092 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts | |||
| @@ -108,8 +108,8 @@ | |||
| 108 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; | 108 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; |
| 109 | snps,reset-active-low; | 109 | snps,reset-active-low; |
| 110 | snps,reset-delays-us = <0 10000 50000>; | 110 | snps,reset-delays-us = <0 10000 50000>; |
| 111 | tx_delay = <0x25>; | 111 | tx_delay = <0x24>; |
| 112 | rx_delay = <0x11>; | 112 | rx_delay = <0x18>; |
| 113 | status = "okay"; | 113 | status = "okay"; |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 2157a528276b..79b4d1d4b5d6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
| @@ -46,8 +46,7 @@ | |||
| 46 | 46 | ||
| 47 | vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { | 47 | vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { |
| 48 | compatible = "regulator-fixed"; | 48 | compatible = "regulator-fixed"; |
| 49 | enable-active-high; | 49 | gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; |
| 50 | gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>; | ||
| 51 | pinctrl-names = "default"; | 50 | pinctrl-names = "default"; |
| 52 | pinctrl-0 = <&usb20_host_drv>; | 51 | pinctrl-0 = <&usb20_host_drv>; |
| 53 | regulator-name = "vcc_host1_5v"; | 52 | regulator-name = "vcc_host1_5v"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 84f14b132e8f..dabef1a21649 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi | |||
| @@ -1445,11 +1445,11 @@ | |||
| 1445 | 1445 | ||
| 1446 | sdmmc0 { | 1446 | sdmmc0 { |
| 1447 | sdmmc0_clk: sdmmc0-clk { | 1447 | sdmmc0_clk: sdmmc0-clk { |
| 1448 | rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>; | 1448 | rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>; |
| 1449 | }; | 1449 | }; |
| 1450 | 1450 | ||
| 1451 | sdmmc0_cmd: sdmmc0-cmd { | 1451 | sdmmc0_cmd: sdmmc0-cmd { |
| 1452 | rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>; | 1452 | rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>; |
| 1453 | }; | 1453 | }; |
| 1454 | 1454 | ||
| 1455 | sdmmc0_dectn: sdmmc0-dectn { | 1455 | sdmmc0_dectn: sdmmc0-dectn { |
| @@ -1461,14 +1461,14 @@ | |||
| 1461 | }; | 1461 | }; |
| 1462 | 1462 | ||
| 1463 | sdmmc0_bus1: sdmmc0-bus1 { | 1463 | sdmmc0_bus1: sdmmc0-bus1 { |
| 1464 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>; | 1464 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>; |
| 1465 | }; | 1465 | }; |
| 1466 | 1466 | ||
| 1467 | sdmmc0_bus4: sdmmc0-bus4 { | 1467 | sdmmc0_bus4: sdmmc0-bus4 { |
| 1468 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>, | 1468 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>, |
| 1469 | <1 RK_PA1 1 &pcfg_pull_up_4ma>, | 1469 | <1 RK_PA1 1 &pcfg_pull_up_8ma>, |
| 1470 | <1 RK_PA2 1 &pcfg_pull_up_4ma>, | 1470 | <1 RK_PA2 1 &pcfg_pull_up_8ma>, |
| 1471 | <1 RK_PA3 1 &pcfg_pull_up_4ma>; | 1471 | <1 RK_PA3 1 &pcfg_pull_up_8ma>; |
| 1472 | }; | 1472 | }; |
| 1473 | 1473 | ||
| 1474 | sdmmc0_gpio: sdmmc0-gpio { | 1474 | sdmmc0_gpio: sdmmc0-gpio { |
| @@ -1642,50 +1642,50 @@ | |||
| 1642 | rgmiim1_pins: rgmiim1-pins { | 1642 | rgmiim1_pins: rgmiim1-pins { |
| 1643 | rockchip,pins = | 1643 | rockchip,pins = |
| 1644 | /* mac_txclk */ | 1644 | /* mac_txclk */ |
| 1645 | <1 RK_PB4 2 &pcfg_pull_none_12ma>, | 1645 | <1 RK_PB4 2 &pcfg_pull_none_8ma>, |
| 1646 | /* mac_rxclk */ | 1646 | /* mac_rxclk */ |
| 1647 | <1 RK_PB5 2 &pcfg_pull_none_2ma>, | 1647 | <1 RK_PB5 2 &pcfg_pull_none_4ma>, |
| 1648 | /* mac_mdio */ | 1648 | /* mac_mdio */ |
| 1649 | <1 RK_PC3 2 &pcfg_pull_none_2ma>, | 1649 | <1 RK_PC3 2 &pcfg_pull_none_4ma>, |
| 1650 | /* mac_txen */ | 1650 | /* mac_txen */ |
| 1651 | <1 RK_PD1 2 &pcfg_pull_none_12ma>, | 1651 | <1 RK_PD1 2 &pcfg_pull_none_8ma>, |
| 1652 | /* mac_clk */ | 1652 | /* mac_clk */ |
| 1653 | <1 RK_PC5 2 &pcfg_pull_none_2ma>, | 1653 | <1 RK_PC5 2 &pcfg_pull_none_4ma>, |
| 1654 | /* mac_rxdv */ | 1654 | /* mac_rxdv */ |
| 1655 | <1 RK_PC6 2 &pcfg_pull_none_2ma>, | 1655 | <1 RK_PC6 2 &pcfg_pull_none_4ma>, |
| 1656 | /* mac_mdc */ | 1656 | /* mac_mdc */ |
| 1657 | <1 RK_PC7 2 &pcfg_pull_none_2ma>, | 1657 | <1 RK_PC7 2 &pcfg_pull_none_4ma>, |
| 1658 | /* mac_rxd1 */ | 1658 | /* mac_rxd1 */ |
| 1659 | <1 RK_PB2 2 &pcfg_pull_none_2ma>, | 1659 | <1 RK_PB2 2 &pcfg_pull_none_4ma>, |
| 1660 | /* mac_rxd0 */ | 1660 | /* mac_rxd0 */ |
| 1661 | <1 RK_PB3 2 &pcfg_pull_none_2ma>, | 1661 | <1 RK_PB3 2 &pcfg_pull_none_4ma>, |
| 1662 | /* mac_txd1 */ | 1662 | /* mac_txd1 */ |
| 1663 | <1 RK_PB0 2 &pcfg_pull_none_12ma>, | 1663 | <1 RK_PB0 2 &pcfg_pull_none_8ma>, |
| 1664 | /* mac_txd0 */ | 1664 | /* mac_txd0 */ |
| 1665 | <1 RK_PB1 2 &pcfg_pull_none_12ma>, | 1665 | <1 RK_PB1 2 &pcfg_pull_none_8ma>, |
| 1666 | /* mac_rxd3 */ | 1666 | /* mac_rxd3 */ |
| 1667 | <1 RK_PB6 2 &pcfg_pull_none_2ma>, | 1667 | <1 RK_PB6 2 &pcfg_pull_none_4ma>, |
| 1668 | /* mac_rxd2 */ | 1668 | /* mac_rxd2 */ |
| 1669 | <1 RK_PB7 2 &pcfg_pull_none_2ma>, | 1669 | <1 RK_PB7 2 &pcfg_pull_none_4ma>, |
| 1670 | /* mac_txd3 */ | 1670 | /* mac_txd3 */ |
| 1671 | <1 RK_PC0 2 &pcfg_pull_none_12ma>, | 1671 | <1 RK_PC0 2 &pcfg_pull_none_8ma>, |
| 1672 | /* mac_txd2 */ | 1672 | /* mac_txd2 */ |
| 1673 | <1 RK_PC1 2 &pcfg_pull_none_12ma>, | 1673 | <1 RK_PC1 2 &pcfg_pull_none_8ma>, |
| 1674 | 1674 | ||
| 1675 | /* mac_txclk */ | 1675 | /* mac_txclk */ |
| 1676 | <0 RK_PB0 1 &pcfg_pull_none>, | 1676 | <0 RK_PB0 1 &pcfg_pull_none_8ma>, |
| 1677 | /* mac_txen */ | 1677 | /* mac_txen */ |
| 1678 | <0 RK_PB4 1 &pcfg_pull_none>, | 1678 | <0 RK_PB4 1 &pcfg_pull_none_8ma>, |
| 1679 | /* mac_clk */ | 1679 | /* mac_clk */ |
| 1680 | <0 RK_PD0 1 &pcfg_pull_none>, | 1680 | <0 RK_PD0 1 &pcfg_pull_none_4ma>, |
| 1681 | /* mac_txd1 */ | 1681 | /* mac_txd1 */ |
| 1682 | <0 RK_PC0 1 &pcfg_pull_none>, | 1682 | <0 RK_PC0 1 &pcfg_pull_none_8ma>, |
| 1683 | /* mac_txd0 */ | 1683 | /* mac_txd0 */ |
| 1684 | <0 RK_PC1 1 &pcfg_pull_none>, | 1684 | <0 RK_PC1 1 &pcfg_pull_none_8ma>, |
| 1685 | /* mac_txd3 */ | 1685 | /* mac_txd3 */ |
| 1686 | <0 RK_PC7 1 &pcfg_pull_none>, | 1686 | <0 RK_PC7 1 &pcfg_pull_none_8ma>, |
| 1687 | /* mac_txd2 */ | 1687 | /* mac_txd2 */ |
| 1688 | <0 RK_PC6 1 &pcfg_pull_none>; | 1688 | <0 RK_PC6 1 &pcfg_pull_none_8ma>; |
| 1689 | }; | 1689 | }; |
| 1690 | 1690 | ||
| 1691 | rmiim1_pins: rmiim1-pins { | 1691 | rmiim1_pins: rmiim1-pins { |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts index 4a543f2117d4..844eac939a97 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts | |||
| @@ -158,6 +158,7 @@ | |||
| 158 | }; | 158 | }; |
| 159 | 159 | ||
| 160 | &hdmi { | 160 | &hdmi { |
| 161 | ddc-i2c-bus = <&i2c3>; | ||
| 161 | pinctrl-names = "default"; | 162 | pinctrl-names = "default"; |
| 162 | pinctrl-0 = <&hdmi_cec>; | 163 | pinctrl-0 = <&hdmi_cec>; |
| 163 | status = "okay"; | 164 | status = "okay"; |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b0742a16c6c9..ebeefcf835e8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
| @@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, | |||
| 445 | return ret; | 445 | return ret; |
| 446 | } | 446 | } |
| 447 | 447 | ||
| 448 | static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, | ||
| 449 | const void *data, unsigned long len) | ||
| 450 | { | ||
| 451 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
| 452 | int ret = kvm_write_guest(kvm, gpa, data, len); | ||
| 453 | |||
| 454 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
| 455 | |||
| 456 | return ret; | ||
| 457 | } | ||
| 458 | |||
| 448 | #ifdef CONFIG_KVM_INDIRECT_VECTORS | 459 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
| 449 | /* | 460 | /* |
| 450 | * EL2 vectors can be mapped and rerouted in a number of ways, | 461 | * EL2 vectors can be mapped and rerouted in a number of ways, |
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ad8be16a39c9..a179df3674a1 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h | |||
| @@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 65 | 65 | ||
| 66 | static inline void syscall_get_arguments(struct task_struct *task, | 66 | static inline void syscall_get_arguments(struct task_struct *task, |
| 67 | struct pt_regs *regs, | 67 | struct pt_regs *regs, |
| 68 | unsigned int i, unsigned int n, | ||
| 69 | unsigned long *args) | 68 | unsigned long *args) |
| 70 | { | 69 | { |
| 71 | if (n == 0) | 70 | args[0] = regs->orig_x0; |
| 72 | return; | 71 | args++; |
| 73 | 72 | ||
| 74 | if (i + n > SYSCALL_MAX_ARGS) { | 73 | memcpy(args, ®s->regs[1], 5 * sizeof(args[0])); |
| 75 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 76 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 77 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 78 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 79 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 80 | } | ||
| 81 | |||
| 82 | if (i == 0) { | ||
| 83 | args[0] = regs->orig_x0; | ||
| 84 | args++; | ||
| 85 | i++; | ||
| 86 | n--; | ||
| 87 | } | ||
| 88 | |||
| 89 | memcpy(args, ®s->regs[i], n * sizeof(args[0])); | ||
| 90 | } | 74 | } |
| 91 | 75 | ||
| 92 | static inline void syscall_set_arguments(struct task_struct *task, | 76 | static inline void syscall_set_arguments(struct task_struct *task, |
| 93 | struct pt_regs *regs, | 77 | struct pt_regs *regs, |
| 94 | unsigned int i, unsigned int n, | ||
| 95 | const unsigned long *args) | 78 | const unsigned long *args) |
| 96 | { | 79 | { |
| 97 | if (n == 0) | 80 | regs->orig_x0 = args[0]; |
| 98 | return; | 81 | args++; |
| 99 | 82 | ||
| 100 | if (i + n > SYSCALL_MAX_ARGS) { | 83 | memcpy(®s->regs[1], args, 5 * sizeof(args[0])); |
| 101 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 102 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 103 | n = SYSCALL_MAX_ARGS - i; | ||
| 104 | } | ||
| 105 | |||
| 106 | if (i == 0) { | ||
| 107 | regs->orig_x0 = args[0]; | ||
| 108 | args++; | ||
| 109 | i++; | ||
| 110 | n--; | ||
| 111 | } | ||
| 112 | |||
| 113 | memcpy(®s->regs[i], args, n * sizeof(args[0])); | ||
| 114 | } | 84 | } |
| 115 | 85 | ||
| 116 | /* | 86 | /* |
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 5ba4465e44f0..ea94cf8f9dc6 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c | |||
| @@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) | |||
| 94 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); | 94 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); |
| 95 | unsigned long high = low + SDEI_STACK_SIZE; | 95 | unsigned long high = low + SDEI_STACK_SIZE; |
| 96 | 96 | ||
| 97 | if (!low) | ||
| 98 | return false; | ||
| 99 | |||
| 97 | if (sp < low || sp >= high) | 100 | if (sp < low || sp >= high) |
| 98 | return false; | 101 | return false; |
| 99 | 102 | ||
| @@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) | |||
| 111 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); | 114 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); |
| 112 | unsigned long high = low + SDEI_STACK_SIZE; | 115 | unsigned long high = low + SDEI_STACK_SIZE; |
| 113 | 116 | ||
| 117 | if (!low) | ||
| 118 | return false; | ||
| 119 | |||
| 114 | if (sp < low || sp >= high) | 120 | if (sp < low || sp >= high) |
| 115 | return false; | 121 | return false; |
| 116 | 122 | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f8482fe5a190..413d566405d1 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
| @@ -217,7 +217,7 @@ static void __init request_standard_resources(void) | |||
| 217 | 217 | ||
| 218 | num_standard_resources = memblock.memory.cnt; | 218 | num_standard_resources = memblock.memory.cnt; |
| 219 | res_size = num_standard_resources * sizeof(*standard_resources); | 219 | res_size = num_standard_resources * sizeof(*standard_resources); |
| 220 | standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); | 220 | standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); |
| 221 | if (!standard_resources) | 221 | if (!standard_resources) |
| 222 | panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); | 222 | panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); |
| 223 | 223 | ||
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f16a5f8ff2b4..e2a0500cd7a2 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
| @@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
| 123 | int ret = -EINVAL; | 123 | int ret = -EINVAL; |
| 124 | bool loaded; | 124 | bool loaded; |
| 125 | 125 | ||
| 126 | /* Reset PMU outside of the non-preemptible section */ | ||
| 127 | kvm_pmu_vcpu_reset(vcpu); | ||
| 128 | |||
| 126 | preempt_disable(); | 129 | preempt_disable(); |
| 127 | loaded = (vcpu->cpu != -1); | 130 | loaded = (vcpu->cpu != -1); |
| 128 | if (loaded) | 131 | if (loaded) |
| @@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
| 170 | vcpu->arch.reset_state.reset = false; | 173 | vcpu->arch.reset_state.reset = false; |
| 171 | } | 174 | } |
| 172 | 175 | ||
| 173 | /* Reset PMU */ | ||
| 174 | kvm_pmu_vcpu_reset(vcpu); | ||
| 175 | |||
| 176 | /* Default workaround setup is enabled (if supported) */ | 176 | /* Default workaround setup is enabled (if supported) */ |
| 177 | if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) | 177 | if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) |
| 178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; | 178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; |
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 63b4a1705182..249c9f6f26dc 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild | |||
| @@ -19,6 +19,7 @@ generic-y += irq_work.h | |||
| 19 | generic-y += kdebug.h | 19 | generic-y += kdebug.h |
| 20 | generic-y += kmap_types.h | 20 | generic-y += kmap_types.h |
| 21 | generic-y += kprobes.h | 21 | generic-y += kprobes.h |
| 22 | generic-y += kvm_para.h | ||
| 22 | generic-y += local.h | 23 | generic-y += local.h |
| 23 | generic-y += mcs_spinlock.h | 24 | generic-y += mcs_spinlock.h |
| 24 | generic-y += mm-arch-hooks.h | 25 | generic-y += mm-arch-hooks.h |
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h index ae2be315ee9c..15ba8599858e 100644 --- a/arch/c6x/include/asm/syscall.h +++ b/arch/c6x/include/asm/syscall.h | |||
| @@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static inline void syscall_get_arguments(struct task_struct *task, | 48 | static inline void syscall_get_arguments(struct task_struct *task, |
| 49 | struct pt_regs *regs, unsigned int i, | 49 | struct pt_regs *regs, |
| 50 | unsigned int n, unsigned long *args) | 50 | unsigned long *args) |
| 51 | { | 51 | { |
| 52 | switch (i) { | 52 | *args++ = regs->a4; |
| 53 | case 0: | 53 | *args++ = regs->b4; |
| 54 | if (!n--) | 54 | *args++ = regs->a6; |
| 55 | break; | 55 | *args++ = regs->b6; |
| 56 | *args++ = regs->a4; | 56 | *args++ = regs->a8; |
| 57 | case 1: | 57 | *args = regs->b8; |
| 58 | if (!n--) | ||
| 59 | break; | ||
| 60 | *args++ = regs->b4; | ||
| 61 | case 2: | ||
| 62 | if (!n--) | ||
| 63 | break; | ||
| 64 | *args++ = regs->a6; | ||
| 65 | case 3: | ||
| 66 | if (!n--) | ||
| 67 | break; | ||
| 68 | *args++ = regs->b6; | ||
| 69 | case 4: | ||
| 70 | if (!n--) | ||
| 71 | break; | ||
| 72 | *args++ = regs->a8; | ||
| 73 | case 5: | ||
| 74 | if (!n--) | ||
| 75 | break; | ||
| 76 | *args++ = regs->b8; | ||
| 77 | case 6: | ||
| 78 | if (!n--) | ||
| 79 | break; | ||
| 80 | default: | ||
| 81 | BUG(); | ||
| 82 | } | ||
| 83 | } | 58 | } |
| 84 | 59 | ||
| 85 | static inline void syscall_set_arguments(struct task_struct *task, | 60 | static inline void syscall_set_arguments(struct task_struct *task, |
| 86 | struct pt_regs *regs, | 61 | struct pt_regs *regs, |
| 87 | unsigned int i, unsigned int n, | ||
| 88 | const unsigned long *args) | 62 | const unsigned long *args) |
| 89 | { | 63 | { |
| 90 | switch (i) { | 64 | regs->a4 = *args++; |
| 91 | case 0: | 65 | regs->b4 = *args++; |
| 92 | if (!n--) | 66 | regs->a6 = *args++; |
| 93 | break; | 67 | regs->b6 = *args++; |
| 94 | regs->a4 = *args++; | 68 | regs->a8 = *args++; |
| 95 | case 1: | 69 | regs->a9 = *args; |
| 96 | if (!n--) | ||
| 97 | break; | ||
| 98 | regs->b4 = *args++; | ||
| 99 | case 2: | ||
| 100 | if (!n--) | ||
| 101 | break; | ||
| 102 | regs->a6 = *args++; | ||
| 103 | case 3: | ||
| 104 | if (!n--) | ||
| 105 | break; | ||
| 106 | regs->b6 = *args++; | ||
| 107 | case 4: | ||
| 108 | if (!n--) | ||
| 109 | break; | ||
| 110 | regs->a8 = *args++; | ||
| 111 | case 5: | ||
| 112 | if (!n--) | ||
| 113 | break; | ||
| 114 | regs->a9 = *args++; | ||
| 115 | case 6: | ||
| 116 | if (!n) | ||
| 117 | break; | ||
| 118 | default: | ||
| 119 | BUG(); | ||
| 120 | } | ||
| 121 | } | 70 | } |
| 122 | 71 | ||
| 123 | #endif /* __ASM_C6X_SYSCALLS_H */ | 72 | #endif /* __ASM_C6X_SYSCALLS_H */ |
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generic-y += kvm_para.h | ||
| 2 | generic-y += ucontext.h | generic-y += ucontext.h | |
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index d637445737b7..bda0a446c63e 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h | |||
| @@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 43 | 43 | ||
| 44 | static inline void | 44 | static inline void |
| 45 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 45 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 46 | unsigned int i, unsigned int n, unsigned long *args) | 46 | unsigned long *args) |
| 47 | { | 47 | { |
| 48 | BUG_ON(i + n > 6); | 48 | args[0] = regs->orig_a0; |
| 49 | if (i == 0) { | 49 | args++; |
| 50 | args[0] = regs->orig_a0; | 50 | memcpy(args, ®s->a1, 5 * sizeof(args[0])); |
| 51 | args++; | ||
| 52 | i++; | ||
| 53 | n--; | ||
| 54 | } | ||
| 55 | memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); | ||
| 56 | } | 51 | } |
| 57 | 52 | ||
| 58 | static inline void | 53 | static inline void |
| 59 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 54 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 60 | unsigned int i, unsigned int n, const unsigned long *args) | 55 | const unsigned long *args) |
| 61 | { | 56 | { |
| 62 | BUG_ON(i + n > 6); | 57 | regs->orig_a0 = args[0]; |
| 63 | if (i == 0) { | 58 | args++; |
| 64 | regs->orig_a0 = args[0]; | 59 | memcpy(®s->a1, args, 5 * sizeof(regs->a1)); |
| 65 | args++; | ||
| 66 | i++; | ||
| 67 | n--; | ||
| 68 | } | ||
| 69 | memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); | ||
| 70 | } | 60 | } |
| 71 | 61 | ||
| 72 | static inline int | 62 | static inline int |
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 3e7c8ecf151e..e3dead402e5f 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild | |||
| @@ -23,6 +23,7 @@ generic-y += irq_work.h | |||
| 23 | generic-y += kdebug.h | 23 | generic-y += kdebug.h |
| 24 | generic-y += kmap_types.h | 24 | generic-y += kmap_types.h |
| 25 | generic-y += kprobes.h | 25 | generic-y += kprobes.h |
| 26 | generic-y += kvm_para.h | ||
| 26 | generic-y += linkage.h | 27 | generic-y += linkage.h |
| 27 | generic-y += local.h | 28 | generic-y += local.h |
| 28 | generic-y += local64.h | 29 | generic-y += local64.h |
diff --git a/arch/h8300/include/asm/syscall.h b/arch/h8300/include/asm/syscall.h index 924990401237..ddd483c6ca95 100644 --- a/arch/h8300/include/asm/syscall.h +++ b/arch/h8300/include/asm/syscall.h | |||
| @@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | |||
| 17 | 17 | ||
| 18 | static inline void | 18 | static inline void |
| 19 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 19 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 20 | unsigned int i, unsigned int n, unsigned long *args) | 20 | unsigned long *args) |
| 21 | { | 21 | { |
| 22 | BUG_ON(i + n > 6); | 22 | *args++ = regs->er1; |
| 23 | 23 | *args++ = regs->er2; | |
| 24 | while (n > 0) { | 24 | *args++ = regs->er3; |
| 25 | switch (i) { | 25 | *args++ = regs->er4; |
| 26 | case 0: | 26 | *args++ = regs->er5; |
| 27 | *args++ = regs->er1; | 27 | *args = regs->er6; |
| 28 | break; | ||
| 29 | case 1: | ||
| 30 | *args++ = regs->er2; | ||
| 31 | break; | ||
| 32 | case 2: | ||
| 33 | *args++ = regs->er3; | ||
| 34 | break; | ||
| 35 | case 3: | ||
| 36 | *args++ = regs->er4; | ||
| 37 | break; | ||
| 38 | case 4: | ||
| 39 | *args++ = regs->er5; | ||
| 40 | break; | ||
| 41 | case 5: | ||
| 42 | *args++ = regs->er6; | ||
| 43 | break; | ||
| 44 | } | ||
| 45 | i++; | ||
| 46 | n--; | ||
| 47 | } | ||
| 48 | } | 28 | } |
| 49 | 29 | ||
| 50 | 30 | ||
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generic-y += kvm_para.h | ||
| 2 | generic-y += ucontext.h | generic-y += ucontext.h | |
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index b25fd42aa0f4..d046e8ccdf78 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild | |||
| @@ -19,6 +19,7 @@ generic-y += irq_work.h | |||
| 19 | generic-y += kdebug.h | 19 | generic-y += kdebug.h |
| 20 | generic-y += kmap_types.h | 20 | generic-y += kmap_types.h |
| 21 | generic-y += kprobes.h | 21 | generic-y += kprobes.h |
| 22 | generic-y += kvm_para.h | ||
| 22 | generic-y += local.h | 23 | generic-y += local.h |
| 23 | generic-y += local64.h | 24 | generic-y += local64.h |
| 24 | generic-y += mcs_spinlock.h | 25 | generic-y += mcs_spinlock.h |
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h index 4af9c7b6f13a..ae3a1e24fabd 100644 --- a/arch/hexagon/include/asm/syscall.h +++ b/arch/hexagon/include/asm/syscall.h | |||
| @@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task, | |||
| 37 | 37 | ||
| 38 | static inline void syscall_get_arguments(struct task_struct *task, | 38 | static inline void syscall_get_arguments(struct task_struct *task, |
| 39 | struct pt_regs *regs, | 39 | struct pt_regs *regs, |
| 40 | unsigned int i, unsigned int n, | ||
| 41 | unsigned long *args) | 40 | unsigned long *args) |
| 42 | { | 41 | { |
| 43 | BUG_ON(i + n > 6); | 42 | memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); |
| 44 | memcpy(args, &(®s->r00)[i], n * sizeof(args[0])); | ||
| 45 | } | 43 | } |
| 46 | #endif | 44 | #endif |
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/hexagon/include/uapi/asm/kvm_para.h +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 43e21fe3499c..11f191689c9e 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild | |||
| @@ -2,6 +2,7 @@ generated-y += syscall_table.h | |||
| 2 | generic-y += compat.h | 2 | generic-y += compat.h |
| 3 | generic-y += exec.h | 3 | generic-y += exec.h |
| 4 | generic-y += irq_work.h | 4 | generic-y += irq_work.h |
| 5 | generic-y += kvm_para.h | ||
| 5 | generic-y += mcs_spinlock.h | 6 | generic-y += mcs_spinlock.h |
| 6 | generic-y += mm-arch-hooks.h | 7 | generic-y += mm-arch-hooks.h |
| 7 | generic-y += preempt.h | 8 | generic-y += preempt.h |
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 1d0b875fec44..0d9e7fab4a79 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h | |||
| @@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | extern void ia64_syscall_get_set_arguments(struct task_struct *task, | 61 | extern void ia64_syscall_get_set_arguments(struct task_struct *task, |
| 62 | struct pt_regs *regs, unsigned int i, unsigned int n, | 62 | struct pt_regs *regs, unsigned long *args, int rw); |
| 63 | unsigned long *args, int rw); | ||
| 64 | static inline void syscall_get_arguments(struct task_struct *task, | 63 | static inline void syscall_get_arguments(struct task_struct *task, |
| 65 | struct pt_regs *regs, | 64 | struct pt_regs *regs, |
| 66 | unsigned int i, unsigned int n, | ||
| 67 | unsigned long *args) | 65 | unsigned long *args) |
| 68 | { | 66 | { |
| 69 | BUG_ON(i + n > 6); | 67 | ia64_syscall_get_set_arguments(task, regs, args, 0); |
| 70 | |||
| 71 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); | ||
| 72 | } | 68 | } |
| 73 | 69 | ||
| 74 | static inline void syscall_set_arguments(struct task_struct *task, | 70 | static inline void syscall_set_arguments(struct task_struct *task, |
| 75 | struct pt_regs *regs, | 71 | struct pt_regs *regs, |
| 76 | unsigned int i, unsigned int n, | ||
| 77 | unsigned long *args) | 72 | unsigned long *args) |
| 78 | { | 73 | { |
| 79 | BUG_ON(i + n > 6); | 74 | ia64_syscall_get_set_arguments(task, regs, args, 1); |
| 80 | |||
| 81 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); | ||
| 82 | } | 75 | } |
| 83 | 76 | ||
| 84 | static inline int syscall_get_arch(void) | 77 | static inline int syscall_get_arch(void) |
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 20018cb883a9..62a9522af51e 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generated-y += unistd_64.h | generated-y += unistd_64.h | |
| 2 | generic-y += kvm_para.h | ||
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6d50ede0ed69..bf9c24d9ce84 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
| @@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) | |||
| 2179 | } | 2179 | } |
| 2180 | 2180 | ||
| 2181 | void ia64_syscall_get_set_arguments(struct task_struct *task, | 2181 | void ia64_syscall_get_set_arguments(struct task_struct *task, |
| 2182 | struct pt_regs *regs, unsigned int i, unsigned int n, | 2182 | struct pt_regs *regs, unsigned long *args, int rw) |
| 2183 | unsigned long *args, int rw) | ||
| 2184 | { | 2183 | { |
| 2185 | struct syscall_get_set_args data = { | 2184 | struct syscall_get_set_args data = { |
| 2186 | .i = i, | 2185 | .i = 0, |
| 2187 | .n = n, | 2186 | .n = 6, |
| 2188 | .args = args, | 2187 | .args = args, |
| 2189 | .regs = regs, | 2188 | .regs = regs, |
| 2190 | .rw = rw, | 2189 | .rw = rw, |
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 95f8f631c4df..2c359d9e80f6 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
| @@ -13,6 +13,7 @@ generic-y += irq_work.h | |||
| 13 | generic-y += kdebug.h | 13 | generic-y += kdebug.h |
| 14 | generic-y += kmap_types.h | 14 | generic-y += kmap_types.h |
| 15 | generic-y += kprobes.h | 15 | generic-y += kprobes.h |
| 16 | generic-y += kvm_para.h | ||
| 16 | generic-y += local.h | 17 | generic-y += local.h |
| 17 | generic-y += local64.h | 18 | generic-y += local64.h |
| 18 | generic-y += mcs_spinlock.h | 19 | generic-y += mcs_spinlock.h |
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 8a7ad40be463..7417847dc438 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generated-y += unistd_32.h | generated-y += unistd_32.h | |
| 2 | generic-y += kvm_para.h | ||
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 791cc8d54d0a..1a8285c3f693 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
| @@ -17,6 +17,7 @@ generic-y += irq_work.h | |||
| 17 | generic-y += kdebug.h | 17 | generic-y += kdebug.h |
| 18 | generic-y += kmap_types.h | 18 | generic-y += kmap_types.h |
| 19 | generic-y += kprobes.h | 19 | generic-y += kprobes.h |
| 20 | generic-y += kvm_para.h | ||
| 20 | generic-y += linkage.h | 21 | generic-y += linkage.h |
| 21 | generic-y += local.h | 22 | generic-y += local.h |
| 22 | generic-y += local64.h | 23 | generic-y += local64.h |
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 220decd605a4..833d3a53dab3 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h | |||
| @@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs, | |||
| 82 | 82 | ||
| 83 | static inline void syscall_get_arguments(struct task_struct *task, | 83 | static inline void syscall_get_arguments(struct task_struct *task, |
| 84 | struct pt_regs *regs, | 84 | struct pt_regs *regs, |
| 85 | unsigned int i, unsigned int n, | ||
| 86 | unsigned long *args) | 85 | unsigned long *args) |
| 87 | { | 86 | { |
| 87 | unsigned int i = 0; | ||
| 88 | unsigned int n = 6; | ||
| 89 | |||
| 88 | while (n--) | 90 | while (n--) |
| 89 | *args++ = microblaze_get_syscall_arg(regs, i++); | 91 | *args++ = microblaze_get_syscall_arg(regs, i++); |
| 90 | } | 92 | } |
| 91 | 93 | ||
| 92 | static inline void syscall_set_arguments(struct task_struct *task, | 94 | static inline void syscall_set_arguments(struct task_struct *task, |
| 93 | struct pt_regs *regs, | 95 | struct pt_regs *regs, |
| 94 | unsigned int i, unsigned int n, | ||
| 95 | const unsigned long *args) | 96 | const unsigned long *args) |
| 96 | { | 97 | { |
| 98 | unsigned int i = 0; | ||
| 99 | unsigned int n = 6; | ||
| 100 | |||
| 97 | while (n--) | 101 | while (n--) |
| 98 | microblaze_set_syscall_arg(regs, i++, *args++); | 102 | microblaze_set_syscall_arg(regs, i++, *args++); |
| 99 | } | 103 | } |
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 3ce84fbb2678..13f59631c576 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild | |||
| @@ -1,3 +1,2 @@ | |||
| 1 | generated-y += unistd_32.h | 1 | generated-y += unistd_32.h |
| 2 | generic-y += kvm_para.h | ||
| 3 | generic-y += ucontext.h | 2 | generic-y += ucontext.h |
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 6cf8ffb5367e..a2b4748655df 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
| @@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 116 | 116 | ||
| 117 | static inline void syscall_get_arguments(struct task_struct *task, | 117 | static inline void syscall_get_arguments(struct task_struct *task, |
| 118 | struct pt_regs *regs, | 118 | struct pt_regs *regs, |
| 119 | unsigned int i, unsigned int n, | ||
| 120 | unsigned long *args) | 119 | unsigned long *args) |
| 121 | { | 120 | { |
| 121 | unsigned int i = 0; | ||
| 122 | unsigned int n = 6; | ||
| 122 | int ret; | 123 | int ret; |
| 123 | 124 | ||
| 124 | /* O32 ABI syscall() */ | 125 | /* O32 ABI syscall() */ |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0057c910bc2f..3a62f80958e1 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
| @@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) | |||
| 1419 | 1419 | ||
| 1420 | sd.nr = syscall; | 1420 | sd.nr = syscall; |
| 1421 | sd.arch = syscall_get_arch(); | 1421 | sd.arch = syscall_get_arch(); |
| 1422 | syscall_get_arguments(current, regs, 0, 6, args); | 1422 | syscall_get_arguments(current, regs, args); |
| 1423 | for (i = 0; i < 6; i++) | 1423 | for (i = 0; i < 6; i++) |
| 1424 | sd.args[i] = args[i]; | 1424 | sd.args[i] = args[i]; |
| 1425 | sd.instruction_pointer = KSTK_EIP(current); | 1425 | sd.instruction_pointer = KSTK_EIP(current); |
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index f7e5e86765fe..671ebd357496 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h | |||
| @@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 108 | * syscall_get_arguments - extract system call parameter values | 108 | * syscall_get_arguments - extract system call parameter values |
| 109 | * @task: task of interest, must be blocked | 109 | * @task: task of interest, must be blocked |
| 110 | * @regs: task_pt_regs() of @task | 110 | * @regs: task_pt_regs() of @task |
| 111 | * @i: argument index [0,5] | ||
| 112 | * @n: number of arguments; n+i must be [1,6]. | ||
| 113 | * @args: array filled with argument values | 111 | * @args: array filled with argument values |
| 114 | * | 112 | * |
| 115 | * Fetches @n arguments to the system call starting with the @i'th argument | 113 | * Fetches 6 arguments to the system call (from 0 through 5). The first |
| 116 | * (from 0 through 5). Argument @i is stored in @args[0], and so on. | 114 | * argument is stored in @args[0], and so on. |
| 117 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 118 | * | 115 | * |
| 119 | * It's only valid to call this when @task is stopped for tracing on | 116 | * It's only valid to call this when @task is stopped for tracing on |
| 120 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 117 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 121 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 122 | * taking up to 6 arguments. | ||
| 123 | */ | 118 | */ |
| 124 | #define SYSCALL_MAX_ARGS 6 | 119 | #define SYSCALL_MAX_ARGS 6 |
| 125 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 120 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 126 | unsigned int i, unsigned int n, unsigned long *args) | 121 | unsigned long *args) |
| 127 | { | 122 | { |
| 128 | if (n == 0) | 123 | args[0] = regs->orig_r0; |
| 129 | return; | 124 | args++; |
| 130 | if (i + n > SYSCALL_MAX_ARGS) { | 125 | memcpy(args, ®s->uregs[0] + 1, 5 * sizeof(args[0])); |
| 131 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 132 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 133 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 134 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 135 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 136 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 137 | } | ||
| 138 | |||
| 139 | if (i == 0) { | ||
| 140 | args[0] = regs->orig_r0; | ||
| 141 | args++; | ||
| 142 | i++; | ||
| 143 | n--; | ||
| 144 | } | ||
| 145 | |||
| 146 | memcpy(args, ®s->uregs[0] + i, n * sizeof(args[0])); | ||
| 147 | } | 126 | } |
| 148 | 127 | ||
| 149 | /** | 128 | /** |
| 150 | * syscall_set_arguments - change system call parameter value | 129 | * syscall_set_arguments - change system call parameter value |
| 151 | * @task: task of interest, must be in system call entry tracing | 130 | * @task: task of interest, must be in system call entry tracing |
| 152 | * @regs: task_pt_regs() of @task | 131 | * @regs: task_pt_regs() of @task |
| 153 | * @i: argument index [0,5] | ||
| 154 | * @n: number of arguments; n+i must be [1,6]. | ||
| 155 | * @args: array of argument values to store | 132 | * @args: array of argument values to store |
| 156 | * | 133 | * |
| 157 | * Changes @n arguments to the system call starting with the @i'th argument. | 134 | * Changes 6 arguments to the system call. The first argument gets value |
| 158 | * Argument @i gets value @args[0], and so on. | 135 | * @args[0], and so on. |
| 159 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 160 | * | 136 | * |
| 161 | * It's only valid to call this when @task is stopped for tracing on | 137 | * It's only valid to call this when @task is stopped for tracing on |
| 162 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 138 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 163 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 164 | * taking up to 6 arguments. | ||
| 165 | */ | 139 | */ |
| 166 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 140 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 167 | unsigned int i, unsigned int n, | ||
| 168 | const unsigned long *args) | 141 | const unsigned long *args) |
| 169 | { | 142 | { |
| 170 | if (n == 0) | 143 | regs->orig_r0 = args[0]; |
| 171 | return; | 144 | args++; |
| 172 | |||
| 173 | if (i + n > SYSCALL_MAX_ARGS) { | ||
| 174 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 175 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 176 | n = SYSCALL_MAX_ARGS - i; | ||
| 177 | } | ||
| 178 | |||
| 179 | if (i == 0) { | ||
| 180 | regs->orig_r0 = args[0]; | ||
| 181 | args++; | ||
| 182 | i++; | ||
| 183 | n--; | ||
| 184 | } | ||
| 185 | 145 | ||
| 186 | memcpy(®s->uregs[0] + i, args, n * sizeof(args[0])); | 146 | memcpy(®s->uregs[0] + 1, args, 5 * sizeof(args[0])); |
| 187 | } | 147 | } |
| 188 | #endif /* _ASM_NDS32_SYSCALL_H */ | 148 | #endif /* _ASM_NDS32_SYSCALL_H */ |
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 8fde4fa2c34f..88a667d12aaa 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild | |||
| @@ -23,6 +23,7 @@ generic-y += irq_work.h | |||
| 23 | generic-y += kdebug.h | 23 | generic-y += kdebug.h |
| 24 | generic-y += kmap_types.h | 24 | generic-y += kmap_types.h |
| 25 | generic-y += kprobes.h | 25 | generic-y += kprobes.h |
| 26 | generic-y += kvm_para.h | ||
| 26 | generic-y += local.h | 27 | generic-y += local.h |
| 27 | generic-y += mcs_spinlock.h | 28 | generic-y += mcs_spinlock.h |
| 28 | generic-y += mm-arch-hooks.h | 29 | generic-y += mm-arch-hooks.h |
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h index 9de220854c4a..d7624ed06efb 100644 --- a/arch/nios2/include/asm/syscall.h +++ b/arch/nios2/include/asm/syscall.h | |||
| @@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static inline void syscall_get_arguments(struct task_struct *task, | 60 | static inline void syscall_get_arguments(struct task_struct *task, |
| 61 | struct pt_regs *regs, unsigned int i, unsigned int n, | 61 | struct pt_regs *regs, unsigned long *args) |
| 62 | unsigned long *args) | ||
| 63 | { | 62 | { |
| 64 | BUG_ON(i + n > 6); | 63 | *args++ = regs->r4; |
| 65 | 64 | *args++ = regs->r5; | |
| 66 | switch (i) { | 65 | *args++ = regs->r6; |
| 67 | case 0: | 66 | *args++ = regs->r7; |
| 68 | if (!n--) | 67 | *args++ = regs->r8; |
| 69 | break; | 68 | *args = regs->r9; |
| 70 | *args++ = regs->r4; | ||
| 71 | case 1: | ||
| 72 | if (!n--) | ||
| 73 | break; | ||
| 74 | *args++ = regs->r5; | ||
| 75 | case 2: | ||
| 76 | if (!n--) | ||
| 77 | break; | ||
| 78 | *args++ = regs->r6; | ||
| 79 | case 3: | ||
| 80 | if (!n--) | ||
| 81 | break; | ||
| 82 | *args++ = regs->r7; | ||
| 83 | case 4: | ||
| 84 | if (!n--) | ||
| 85 | break; | ||
| 86 | *args++ = regs->r8; | ||
| 87 | case 5: | ||
| 88 | if (!n--) | ||
| 89 | break; | ||
| 90 | *args++ = regs->r9; | ||
| 91 | case 6: | ||
| 92 | if (!n--) | ||
| 93 | break; | ||
| 94 | default: | ||
| 95 | BUG(); | ||
| 96 | } | ||
| 97 | } | 69 | } |
| 98 | 70 | ||
| 99 | static inline void syscall_set_arguments(struct task_struct *task, | 71 | static inline void syscall_set_arguments(struct task_struct *task, |
| 100 | struct pt_regs *regs, unsigned int i, unsigned int n, | 72 | struct pt_regs *regs, const unsigned long *args) |
| 101 | const unsigned long *args) | ||
| 102 | { | 73 | { |
| 103 | BUG_ON(i + n > 6); | 74 | regs->r4 = *args++; |
| 104 | 75 | regs->r5 = *args++; | |
| 105 | switch (i) { | 76 | regs->r6 = *args++; |
| 106 | case 0: | 77 | regs->r7 = *args++; |
| 107 | if (!n--) | 78 | regs->r8 = *args++; |
| 108 | break; | 79 | regs->r9 = *args; |
| 109 | regs->r4 = *args++; | ||
| 110 | case 1: | ||
| 111 | if (!n--) | ||
| 112 | break; | ||
| 113 | regs->r5 = *args++; | ||
| 114 | case 2: | ||
| 115 | if (!n--) | ||
| 116 | break; | ||
| 117 | regs->r6 = *args++; | ||
| 118 | case 3: | ||
| 119 | if (!n--) | ||
| 120 | break; | ||
| 121 | regs->r7 = *args++; | ||
| 122 | case 4: | ||
| 123 | if (!n--) | ||
| 124 | break; | ||
| 125 | regs->r8 = *args++; | ||
| 126 | case 5: | ||
| 127 | if (!n--) | ||
| 128 | break; | ||
| 129 | regs->r9 = *args++; | ||
| 130 | case 6: | ||
| 131 | if (!n) | ||
| 132 | break; | ||
| 133 | default: | ||
| 134 | BUG(); | ||
| 135 | } | ||
| 136 | } | 80 | } |
| 137 | 81 | ||
| 138 | #endif | 82 | #endif |
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generic-y += kvm_para.h | ||
| 2 | generic-y += ucontext.h | generic-y += ucontext.h | |
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 5a73e2956ac4..22aa97136c01 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild | |||
| @@ -20,6 +20,7 @@ generic-y += irq_work.h | |||
| 20 | generic-y += kdebug.h | 20 | generic-y += kdebug.h |
| 21 | generic-y += kmap_types.h | 21 | generic-y += kmap_types.h |
| 22 | generic-y += kprobes.h | 22 | generic-y += kprobes.h |
| 23 | generic-y += kvm_para.h | ||
| 23 | generic-y += local.h | 24 | generic-y += local.h |
| 24 | generic-y += mcs_spinlock.h | 25 | generic-y += mcs_spinlock.h |
| 25 | generic-y += mm-arch-hooks.h | 26 | generic-y += mm-arch-hooks.h |
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index 2db9f1cf0694..b4ff07c1baed 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h | |||
| @@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 56 | 56 | ||
| 57 | static inline void | 57 | static inline void |
| 58 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 58 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 59 | unsigned int i, unsigned int n, unsigned long *args) | 59 | unsigned long *args) |
| 60 | { | 60 | { |
| 61 | BUG_ON(i + n > 6); | 61 | memcpy(args, ®s->gpr[3], 6 * sizeof(args[0])); |
| 62 | |||
| 63 | memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0])); | ||
| 64 | } | 62 | } |
| 65 | 63 | ||
| 66 | static inline void | 64 | static inline void |
| 67 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 65 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 68 | unsigned int i, unsigned int n, const unsigned long *args) | 66 | const unsigned long *args) |
| 69 | { | 67 | { |
| 70 | BUG_ON(i + n > 6); | 68 | memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); |
| 71 | |||
| 72 | memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); | ||
| 73 | } | 69 | } |
| 74 | 70 | ||
| 75 | static inline int syscall_get_arch(void) | 71 | static inline int syscall_get_arch(void) |
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generic-y += kvm_para.h | ||
| 2 | generic-y += ucontext.h | generic-y += ucontext.h | |
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 6f49e77d82a2..9bcd0c903dbb 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
| @@ -11,6 +11,7 @@ generic-y += irq_regs.h | |||
| 11 | generic-y += irq_work.h | 11 | generic-y += irq_work.h |
| 12 | generic-y += kdebug.h | 12 | generic-y += kdebug.h |
| 13 | generic-y += kprobes.h | 13 | generic-y += kprobes.h |
| 14 | generic-y += kvm_para.h | ||
| 14 | generic-y += local.h | 15 | generic-y += local.h |
| 15 | generic-y += local64.h | 16 | generic-y += local64.h |
| 16 | generic-y += mcs_spinlock.h | 17 | generic-y += mcs_spinlock.h |
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 2a27b275ab09..9ff033d261ab 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h | |||
| @@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *); | |||
| 22 | 22 | ||
| 23 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 23 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
| 24 | { | 24 | { |
| 25 | return regs->gr[20]; | 25 | return regs->gr[28]; |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | static inline void instruction_pointer_set(struct pt_regs *regs, | 28 | static inline void instruction_pointer_set(struct pt_regs *regs, |
| 29 | unsigned long val) | 29 | unsigned long val) |
| 30 | { | 30 | { |
| 31 | regs->iaoq[0] = val; | 31 | regs->iaoq[0] = val; |
| 32 | regs->iaoq[1] = val + 4; | ||
| 32 | } | 33 | } |
| 33 | 34 | ||
| 34 | /* Query offset/name of register from its name/offset */ | 35 | /* Query offset/name of register from its name/offset */ |
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index 8bff1a58c97f..62a6d477fae0 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h | |||
| @@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk, | |||
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | static inline void syscall_get_arguments(struct task_struct *tsk, | 20 | static inline void syscall_get_arguments(struct task_struct *tsk, |
| 21 | struct pt_regs *regs, unsigned int i, | 21 | struct pt_regs *regs, |
| 22 | unsigned int n, unsigned long *args) | 22 | unsigned long *args) |
| 23 | { | 23 | { |
| 24 | BUG_ON(i); | 24 | args[5] = regs->gr[21]; |
| 25 | 25 | args[4] = regs->gr[22]; | |
| 26 | switch (n) { | 26 | args[3] = regs->gr[23]; |
| 27 | case 6: | 27 | args[2] = regs->gr[24]; |
| 28 | args[5] = regs->gr[21]; | 28 | args[1] = regs->gr[25]; |
| 29 | case 5: | 29 | args[0] = regs->gr[26]; |
| 30 | args[4] = regs->gr[22]; | ||
| 31 | case 4: | ||
| 32 | args[3] = regs->gr[23]; | ||
| 33 | case 3: | ||
| 34 | args[2] = regs->gr[24]; | ||
| 35 | case 2: | ||
| 36 | args[1] = regs->gr[25]; | ||
| 37 | case 1: | ||
| 38 | args[0] = regs->gr[26]; | ||
| 39 | case 0: | ||
| 40 | break; | ||
| 41 | default: | ||
| 42 | BUG(); | ||
| 43 | } | ||
| 44 | } | 30 | } |
| 45 | 31 | ||
| 46 | static inline long syscall_get_return_value(struct task_struct *task, | 32 | static inline long syscall_get_return_value(struct task_struct *task, |
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index 22fdbd08cdc8..2bd5b392277c 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild | |||
| @@ -1,3 +1,2 @@ | |||
| 1 | generated-y += unistd_32.h | 1 | generated-y += unistd_32.h |
| 2 | generated-y += unistd_64.h | 2 | generated-y += unistd_64.h |
| 3 | generic-y += kvm_para.h | ||
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index eb39e7e380d7..841db71958cd 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
| @@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void) | |||
| 210 | 210 | ||
| 211 | static int __init parisc_idle_init(void) | 211 | static int __init parisc_idle_init(void) |
| 212 | { | 212 | { |
| 213 | const char *marker; | ||
| 214 | |||
| 215 | /* check QEMU/SeaBIOS marker in PAGE0 */ | ||
| 216 | marker = (char *) &PAGE0->pad0; | ||
| 217 | running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); | ||
| 218 | |||
| 219 | if (!running_on_qemu) | 213 | if (!running_on_qemu) |
| 220 | cpu_idle_poll_ctrl(1); | 214 | cpu_idle_poll_ctrl(1); |
| 221 | 215 | ||
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 15dd9e21be7e..d908058d05c1 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
| @@ -397,6 +397,9 @@ void __init start_parisc(void) | |||
| 397 | int ret, cpunum; | 397 | int ret, cpunum; |
| 398 | struct pdc_coproc_cfg coproc_cfg; | 398 | struct pdc_coproc_cfg coproc_cfg; |
| 399 | 399 | ||
| 400 | /* check QEMU/SeaBIOS marker in PAGE0 */ | ||
| 401 | running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); | ||
| 402 | |||
| 400 | cpunum = smp_processor_id(); | 403 | cpunum = smp_processor_id(); |
| 401 | 404 | ||
| 402 | init_cpu_topology(); | 405 | init_cpu_topology(); |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c5698a523bb1..23f7ed796f38 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
| @@ -302,6 +302,7 @@ | |||
| 302 | /* Misc instructions for BPF compiler */ | 302 | /* Misc instructions for BPF compiler */ |
| 303 | #define PPC_INST_LBZ 0x88000000 | 303 | #define PPC_INST_LBZ 0x88000000 |
| 304 | #define PPC_INST_LD 0xe8000000 | 304 | #define PPC_INST_LD 0xe8000000 |
| 305 | #define PPC_INST_LDX 0x7c00002a | ||
| 305 | #define PPC_INST_LHZ 0xa0000000 | 306 | #define PPC_INST_LHZ 0xa0000000 |
| 306 | #define PPC_INST_LWZ 0x80000000 | 307 | #define PPC_INST_LWZ 0x80000000 |
| 307 | #define PPC_INST_LHBRX 0x7c00062c | 308 | #define PPC_INST_LHBRX 0x7c00062c |
| @@ -309,6 +310,7 @@ | |||
| 309 | #define PPC_INST_STB 0x98000000 | 310 | #define PPC_INST_STB 0x98000000 |
| 310 | #define PPC_INST_STH 0xb0000000 | 311 | #define PPC_INST_STH 0xb0000000 |
| 311 | #define PPC_INST_STD 0xf8000000 | 312 | #define PPC_INST_STD 0xf8000000 |
| 313 | #define PPC_INST_STDX 0x7c00012a | ||
| 312 | #define PPC_INST_STDU 0xf8000001 | 314 | #define PPC_INST_STDU 0xf8000001 |
| 313 | #define PPC_INST_STW 0x90000000 | 315 | #define PPC_INST_STW 0x90000000 |
| 314 | #define PPC_INST_STWU 0x94000000 | 316 | #define PPC_INST_STWU 0x94000000 |
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 1a0e7a8b1c81..1243045bad2d 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h | |||
| @@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 65 | 65 | ||
| 66 | static inline void syscall_get_arguments(struct task_struct *task, | 66 | static inline void syscall_get_arguments(struct task_struct *task, |
| 67 | struct pt_regs *regs, | 67 | struct pt_regs *regs, |
| 68 | unsigned int i, unsigned int n, | ||
| 69 | unsigned long *args) | 68 | unsigned long *args) |
| 70 | { | 69 | { |
| 71 | unsigned long val, mask = -1UL; | 70 | unsigned long val, mask = -1UL; |
| 72 | 71 | unsigned int n = 6; | |
| 73 | BUG_ON(i + n > 6); | ||
| 74 | 72 | ||
| 75 | #ifdef CONFIG_COMPAT | 73 | #ifdef CONFIG_COMPAT |
| 76 | if (test_tsk_thread_flag(task, TIF_32BIT)) | 74 | if (test_tsk_thread_flag(task, TIF_32BIT)) |
| 77 | mask = 0xffffffff; | 75 | mask = 0xffffffff; |
| 78 | #endif | 76 | #endif |
| 79 | while (n--) { | 77 | while (n--) { |
| 80 | if (n == 0 && i == 0) | 78 | if (n == 0) |
| 81 | val = regs->orig_gpr3; | 79 | val = regs->orig_gpr3; |
| 82 | else | 80 | else |
| 83 | val = regs->gpr[3 + i + n]; | 81 | val = regs->gpr[3 + n]; |
| 84 | 82 | ||
| 85 | args[n] = val & mask; | 83 | args[n] = val & mask; |
| 86 | } | 84 | } |
| @@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
| 88 | 86 | ||
| 89 | static inline void syscall_set_arguments(struct task_struct *task, | 87 | static inline void syscall_set_arguments(struct task_struct *task, |
| 90 | struct pt_regs *regs, | 88 | struct pt_regs *regs, |
| 91 | unsigned int i, unsigned int n, | ||
| 92 | const unsigned long *args) | 89 | const unsigned long *args) |
| 93 | { | 90 | { |
| 94 | BUG_ON(i + n > 6); | 91 | memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); |
| 95 | memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); | ||
| 96 | 92 | ||
| 97 | /* Also copy the first argument into orig_gpr3 */ | 93 | /* Also copy the first argument into orig_gpr3 */ |
| 98 | if (i == 0 && n > 0) | 94 | regs->orig_gpr3 = args[0]; |
| 99 | regs->orig_gpr3 = args[0]; | ||
| 100 | } | 95 | } |
| 101 | 96 | ||
| 102 | static inline int syscall_get_arch(void) | 97 | static inline int syscall_get_arch(void) |
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 683b5b3805bd..cd381e2291df 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
| 25 | #include <linux/kmemleak.h> | ||
| 25 | #include <linux/kvm_para.h> | 26 | #include <linux/kvm_para.h> |
| 26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 27 | #include <linux/of.h> | 28 | #include <linux/of.h> |
| @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void) | |||
| 712 | 713 | ||
| 713 | static __init void kvm_free_tmp(void) | 714 | static __init void kvm_free_tmp(void) |
| 714 | { | 715 | { |
| 716 | /* | ||
| 717 | * Inform kmemleak about the hole in the .bss section since the | ||
| 718 | * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. | ||
| 719 | */ | ||
| 720 | kmemleak_free_part(&kvm_tmp[kvm_tmp_index], | ||
| 721 | ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); | ||
| 715 | free_reserved_area(&kvm_tmp[kvm_tmp_index], | 722 | free_reserved_area(&kvm_tmp[kvm_tmp_index], |
| 716 | &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); | 723 | &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); |
| 717 | } | 724 | } |
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S index 844d8e774492..b7f6f6e0b6e8 100644 --- a/arch/powerpc/lib/memcmp_64.S +++ b/arch/powerpc/lib/memcmp_64.S | |||
| @@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp) | |||
| 215 | beq .Lzero | 215 | beq .Lzero |
| 216 | 216 | ||
| 217 | .Lcmp_rest_lt8bytes: | 217 | .Lcmp_rest_lt8bytes: |
| 218 | /* Here we have only less than 8 bytes to compare with. at least s1 | 218 | /* |
| 219 | * Address is aligned with 8 bytes. | 219 | * Here we have less than 8 bytes to compare. At least s1 is aligned to |
| 220 | * The next double words are load and shift right with appropriate | 220 | * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a |
| 221 | * bits. | 221 | * page boundary, otherwise we might read past the end of the buffer and |
| 222 | * trigger a page fault. We use 4K as the conservative minimum page | ||
| 223 | * size. If we detect that case we go to the byte-by-byte loop. | ||
| 224 | * | ||
| 225 | * Otherwise the next double word is loaded from s1 and s2, and shifted | ||
| 226 | * right to compare the appropriate bits. | ||
| 222 | */ | 227 | */ |
| 228 | clrldi r6,r4,(64-12) // r6 = r4 & 0xfff | ||
| 229 | cmpdi r6,0xff8 | ||
| 230 | bgt .Lshort | ||
| 231 | |||
| 223 | subfic r6,r5,8 | 232 | subfic r6,r5,8 |
| 224 | slwi r6,r6,3 | 233 | slwi r6,r6,3 |
| 225 | LD rA,0,r3 | 234 | LD rA,0,r3 |
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 549e9490ff2a..dcac37745b05 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h | |||
| @@ -51,6 +51,8 @@ | |||
| 51 | #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) | 51 | #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) |
| 52 | #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ | 52 | #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ |
| 53 | ___PPC_RA(base) | ((i) & 0xfffc)) | 53 | ___PPC_RA(base) | ((i) & 0xfffc)) |
| 54 | #define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \ | ||
| 55 | ___PPC_RA(base) | ___PPC_RB(b)) | ||
| 54 | #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ | 56 | #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ |
| 55 | ___PPC_RA(base) | ((i) & 0xfffc)) | 57 | ___PPC_RA(base) | ((i) & 0xfffc)) |
| 56 | #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ | 58 | #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ |
| @@ -65,7 +67,9 @@ | |||
| 65 | #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ | 67 | #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ |
| 66 | ___PPC_RA(base) | IMM_L(i)) | 68 | ___PPC_RA(base) | IMM_L(i)) |
| 67 | #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ | 69 | #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ |
| 68 | ___PPC_RA(base) | IMM_L(i)) | 70 | ___PPC_RA(base) | ((i) & 0xfffc)) |
| 71 | #define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \ | ||
| 72 | ___PPC_RA(base) | ___PPC_RB(b)) | ||
| 69 | #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ | 73 | #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ |
| 70 | ___PPC_RA(base) | IMM_L(i)) | 74 | ___PPC_RA(base) | IMM_L(i)) |
| 71 | #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ | 75 | #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ |
| @@ -85,17 +89,6 @@ | |||
| 85 | ___PPC_RA(a) | ___PPC_RB(b)) | 89 | ___PPC_RA(a) | ___PPC_RB(b)) |
| 86 | #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ | 90 | #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ |
| 87 | ___PPC_RA(a) | ___PPC_RB(b)) | 91 | ___PPC_RA(a) | ___PPC_RB(b)) |
| 88 | |||
| 89 | #ifdef CONFIG_PPC64 | ||
| 90 | #define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0) | ||
| 91 | #define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0) | ||
| 92 | #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) | ||
| 93 | #else | ||
| 94 | #define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) | ||
| 95 | #define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) | ||
| 96 | #define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) | ||
| 97 | #endif | ||
| 98 | |||
| 99 | #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) | 92 | #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) |
| 100 | #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) | 93 | #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) |
| 101 | #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ | 94 | #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ |
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index dc50a8d4b3b9..21744d8aa053 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h | |||
| @@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
| 122 | #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) | 122 | #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) |
| 123 | #endif | 123 | #endif |
| 124 | 124 | ||
| 125 | #define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) | ||
| 126 | #define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) | ||
| 127 | #define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) | ||
| 128 | |||
| 125 | #define SEEN_DATAREF 0x10000 /* might call external helpers */ | 129 | #define SEEN_DATAREF 0x10000 /* might call external helpers */ |
| 126 | #define SEEN_XREG 0x20000 /* X reg is used */ | 130 | #define SEEN_XREG 0x20000 /* X reg is used */ |
| 127 | #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary | 131 | #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary |
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index 3609be4692b3..47f441f351a6 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h | |||
| @@ -68,6 +68,26 @@ static const int b2p[] = { | |||
| 68 | /* PPC NVR range -- update this if we ever use NVRs below r27 */ | 68 | /* PPC NVR range -- update this if we ever use NVRs below r27 */ |
| 69 | #define BPF_PPC_NVR_MIN 27 | 69 | #define BPF_PPC_NVR_MIN 27 |
| 70 | 70 | ||
| 71 | /* | ||
| 72 | * WARNING: These can use TMP_REG_2 if the offset is not at word boundary, | ||
| 73 | * so ensure that it isn't in use already. | ||
| 74 | */ | ||
| 75 | #define PPC_BPF_LL(r, base, i) do { \ | ||
| 76 | if ((i) % 4) { \ | ||
| 77 | PPC_LI(b2p[TMP_REG_2], (i)); \ | ||
| 78 | PPC_LDX(r, base, b2p[TMP_REG_2]); \ | ||
| 79 | } else \ | ||
| 80 | PPC_LD(r, base, i); \ | ||
| 81 | } while(0) | ||
| 82 | #define PPC_BPF_STL(r, base, i) do { \ | ||
| 83 | if ((i) % 4) { \ | ||
| 84 | PPC_LI(b2p[TMP_REG_2], (i)); \ | ||
| 85 | PPC_STDX(r, base, b2p[TMP_REG_2]); \ | ||
| 86 | } else \ | ||
| 87 | PPC_STD(r, base, i); \ | ||
| 88 | } while(0) | ||
| 89 | #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) | ||
| 90 | |||
| 71 | #define SEEN_FUNC 0x1000 /* might call external helpers */ | 91 | #define SEEN_FUNC 0x1000 /* might call external helpers */ |
| 72 | #define SEEN_STACK 0x2000 /* uses BPF stack */ | 92 | #define SEEN_STACK 0x2000 /* uses BPF stack */ |
| 73 | #define SEEN_TAILCALL 0x4000 /* uses tail calls */ | 93 | #define SEEN_TAILCALL 0x4000 /* uses tail calls */ |
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 4194d3cfb60c..21a1dcd4b156 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c | |||
| @@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 | |||
| 252 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) | 252 | * if (tail_call_cnt > MAX_TAIL_CALL_CNT) |
| 253 | * goto out; | 253 | * goto out; |
| 254 | */ | 254 | */ |
| 255 | PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); | 255 | PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); |
| 256 | PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); | 256 | PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); |
| 257 | PPC_BCC(COND_GT, out); | 257 | PPC_BCC(COND_GT, out); |
| 258 | 258 | ||
| @@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 | |||
| 265 | /* prog = array->ptrs[index]; */ | 265 | /* prog = array->ptrs[index]; */ |
| 266 | PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); | 266 | PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); |
| 267 | PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); | 267 | PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); |
| 268 | PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); | 268 | PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); |
| 269 | 269 | ||
| 270 | /* | 270 | /* |
| 271 | * if (prog == NULL) | 271 | * if (prog == NULL) |
| @@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 | |||
| 275 | PPC_BCC(COND_EQ, out); | 275 | PPC_BCC(COND_EQ, out); |
| 276 | 276 | ||
| 277 | /* goto *(prog->bpf_func + prologue_size); */ | 277 | /* goto *(prog->bpf_func + prologue_size); */ |
| 278 | PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); | 278 | PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); |
| 279 | #ifdef PPC64_ELF_ABI_v1 | 279 | #ifdef PPC64_ELF_ABI_v1 |
| 280 | /* skip past the function descriptor */ | 280 | /* skip past the function descriptor */ |
| 281 | PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], | 281 | PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], |
| @@ -606,7 +606,7 @@ bpf_alu32_trunc: | |||
| 606 | * the instructions generated will remain the | 606 | * the instructions generated will remain the |
| 607 | * same across all passes | 607 | * same across all passes |
| 608 | */ | 608 | */ |
| 609 | PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx)); | 609 | PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); |
| 610 | PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); | 610 | PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); |
| 611 | PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); | 611 | PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); |
| 612 | break; | 612 | break; |
| @@ -662,7 +662,7 @@ emit_clear: | |||
| 662 | PPC_LI32(b2p[TMP_REG_1], imm); | 662 | PPC_LI32(b2p[TMP_REG_1], imm); |
| 663 | src_reg = b2p[TMP_REG_1]; | 663 | src_reg = b2p[TMP_REG_1]; |
| 664 | } | 664 | } |
| 665 | PPC_STD(src_reg, dst_reg, off); | 665 | PPC_BPF_STL(src_reg, dst_reg, off); |
| 666 | break; | 666 | break; |
| 667 | 667 | ||
| 668 | /* | 668 | /* |
| @@ -709,7 +709,7 @@ emit_clear: | |||
| 709 | break; | 709 | break; |
| 710 | /* dst = *(u64 *)(ul) (src + off) */ | 710 | /* dst = *(u64 *)(ul) (src + off) */ |
| 711 | case BPF_LDX | BPF_MEM | BPF_DW: | 711 | case BPF_LDX | BPF_MEM | BPF_DW: |
| 712 | PPC_LD(dst_reg, src_reg, off); | 712 | PPC_BPF_LL(dst_reg, src_reg, off); |
| 713 | break; | 713 | break; |
| 714 | 714 | ||
| 715 | /* | 715 | /* |
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index 6ed22127391b..921f12182f3e 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c | |||
| @@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu) | |||
| 77 | 77 | ||
| 78 | ret = drc.drc_index_start + (thread_index * drc.sequential_inc); | 78 | ret = drc.drc_index_start + (thread_index * drc.sequential_inc); |
| 79 | } else { | 79 | } else { |
| 80 | const __be32 *indexes; | 80 | u32 nr_drc_indexes, thread_drc_index; |
| 81 | |||
| 82 | indexes = of_get_property(dn, "ibm,drc-indexes", NULL); | ||
| 83 | if (indexes == NULL) | ||
| 84 | goto err_of_node_put; | ||
| 85 | 81 | ||
| 86 | /* | 82 | /* |
| 87 | * The first element indexes[0] is the number of drc_indexes | 83 | * The first element of ibm,drc-indexes array is the |
| 88 | * returned in the list. Hence thread_index+1 will get the | 84 | * number of drc_indexes returned in the list. Hence |
| 89 | * drc_index corresponding to core number thread_index. | 85 | * thread_index+1 will get the drc_index corresponding |
| 86 | * to core number thread_index. | ||
| 90 | */ | 87 | */ |
| 91 | ret = indexes[thread_index + 1]; | 88 | rc = of_property_read_u32_index(dn, "ibm,drc-indexes", |
| 89 | 0, &nr_drc_indexes); | ||
| 90 | if (rc) | ||
| 91 | goto err_of_node_put; | ||
| 92 | |||
| 93 | WARN_ON_ONCE(thread_index > nr_drc_indexes); | ||
| 94 | rc = of_property_read_u32_index(dn, "ibm,drc-indexes", | ||
| 95 | thread_index + 1, | ||
| 96 | &thread_drc_index); | ||
| 97 | if (rc) | ||
| 98 | goto err_of_node_put; | ||
| 99 | |||
| 100 | ret = thread_drc_index; | ||
| 92 | } | 101 | } |
| 93 | 102 | ||
| 94 | rc = 0; | 103 | rc = 0; |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index d97d52772789..452dcfd7e5dd 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
| @@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs, | |||
| 550 | "UE", | 550 | "UE", |
| 551 | "SLB", | 551 | "SLB", |
| 552 | "ERAT", | 552 | "ERAT", |
| 553 | "Unknown", | ||
| 553 | "TLB", | 554 | "TLB", |
| 554 | "D-Cache", | 555 | "D-Cache", |
| 555 | "Unknown", | 556 | "Unknown", |
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 57afe604b495..c207f6634b91 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h | |||
| @@ -26,7 +26,7 @@ enum fixed_addresses { | |||
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | #define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) | 28 | #define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) |
| 29 | #define FIXADDR_TOP (PAGE_OFFSET) | 29 | #define FIXADDR_TOP (VMALLOC_START) |
| 30 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 30 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
| 31 | 31 | ||
| 32 | #define FIXMAP_PAGE_IO PAGE_KERNEL | 32 | #define FIXMAP_PAGE_IO PAGE_KERNEL |
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index bba3da6ef157..a3d5273ded7c 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h | |||
| @@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 72 | 72 | ||
| 73 | static inline void syscall_get_arguments(struct task_struct *task, | 73 | static inline void syscall_get_arguments(struct task_struct *task, |
| 74 | struct pt_regs *regs, | 74 | struct pt_regs *regs, |
| 75 | unsigned int i, unsigned int n, | ||
| 76 | unsigned long *args) | 75 | unsigned long *args) |
| 77 | { | 76 | { |
| 78 | BUG_ON(i + n > 6); | 77 | args[0] = regs->orig_a0; |
| 79 | if (i == 0) { | 78 | args++; |
| 80 | args[0] = regs->orig_a0; | 79 | memcpy(args, ®s->a1, 5 * sizeof(args[0])); |
| 81 | args++; | ||
| 82 | i++; | ||
| 83 | n--; | ||
| 84 | } | ||
| 85 | memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); | ||
| 86 | } | 80 | } |
| 87 | 81 | ||
| 88 | static inline void syscall_set_arguments(struct task_struct *task, | 82 | static inline void syscall_set_arguments(struct task_struct *task, |
| 89 | struct pt_regs *regs, | 83 | struct pt_regs *regs, |
| 90 | unsigned int i, unsigned int n, | ||
| 91 | const unsigned long *args) | 84 | const unsigned long *args) |
| 92 | { | 85 | { |
| 93 | BUG_ON(i + n > 6); | 86 | regs->orig_a0 = args[0]; |
| 94 | if (i == 0) { | 87 | args++; |
| 95 | regs->orig_a0 = args[0]; | 88 | memcpy(®s->a1, args, 5 * sizeof(regs->a1)); |
| 96 | args++; | ||
| 97 | i++; | ||
| 98 | n--; | ||
| 99 | } | ||
| 100 | memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); | ||
| 101 | } | 89 | } |
| 102 | 90 | ||
| 103 | static inline int syscall_get_arch(void) | 91 | static inline int syscall_get_arch(void) |
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index a00168b980d2..fb53a8089e76 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h | |||
| @@ -300,7 +300,7 @@ do { \ | |||
| 300 | " .balign 4\n" \ | 300 | " .balign 4\n" \ |
| 301 | "4:\n" \ | 301 | "4:\n" \ |
| 302 | " li %0, %6\n" \ | 302 | " li %0, %6\n" \ |
| 303 | " jump 2b, %1\n" \ | 303 | " jump 3b, %1\n" \ |
| 304 | " .previous\n" \ | 304 | " .previous\n" \ |
| 305 | " .section __ex_table,\"a\"\n" \ | 305 | " .section __ex_table,\"a\"\n" \ |
| 306 | " .balign " RISCV_SZPTR "\n" \ | 306 | " .balign " RISCV_SZPTR "\n" \ |
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index f13f7f276639..598568168d35 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | ifdef CONFIG_FTRACE | 5 | ifdef CONFIG_FTRACE |
| 6 | CFLAGS_REMOVE_ftrace.o = -pg | 6 | CFLAGS_REMOVE_ftrace.o = -pg |
| 7 | CFLAGS_REMOVE_setup.o = -pg | ||
| 8 | endif | 7 | endif |
| 9 | 8 | ||
| 10 | extra-y += head.o | 9 | extra-y += head.o |
| @@ -29,8 +28,6 @@ obj-y += vdso.o | |||
| 29 | obj-y += cacheinfo.o | 28 | obj-y += cacheinfo.o |
| 30 | obj-y += vdso/ | 29 | obj-y += vdso/ |
| 31 | 30 | ||
| 32 | CFLAGS_setup.o := -mcmodel=medany | ||
| 33 | |||
| 34 | obj-$(CONFIG_FPU) += fpu.o | 31 | obj-$(CONFIG_FPU) += fpu.o |
| 35 | obj-$(CONFIG_SMP) += smpboot.o | 32 | obj-$(CONFIG_SMP) += smpboot.o |
| 36 | obj-$(CONFIG_SMP) += smp.o | 33 | obj-$(CONFIG_SMP) += smp.o |
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 7dd308129b40..2872edce894d 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c | |||
| @@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, | |||
| 141 | { | 141 | { |
| 142 | s32 hi20; | 142 | s32 hi20; |
| 143 | 143 | ||
| 144 | if (IS_ENABLED(CMODEL_MEDLOW)) { | 144 | if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) { |
| 145 | pr_err( | 145 | pr_err( |
| 146 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", | 146 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", |
| 147 | me->name, (long long)v, location); | 147 | me->name, (long long)v, location); |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index ecb654f6a79e..540a331d1376 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
| @@ -48,14 +48,6 @@ struct screen_info screen_info = { | |||
| 48 | }; | 48 | }; |
| 49 | #endif | 49 | #endif |
| 50 | 50 | ||
| 51 | unsigned long va_pa_offset; | ||
| 52 | EXPORT_SYMBOL(va_pa_offset); | ||
| 53 | unsigned long pfn_base; | ||
| 54 | EXPORT_SYMBOL(pfn_base); | ||
| 55 | |||
| 56 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; | ||
| 57 | EXPORT_SYMBOL(empty_zero_page); | ||
| 58 | |||
| 59 | /* The lucky hart to first increment this variable will boot the other cores */ | 51 | /* The lucky hart to first increment this variable will boot the other cores */ |
| 60 | atomic_t hart_lottery; | 52 | atomic_t hart_lottery; |
| 61 | unsigned long boot_cpu_hartid; | 53 | unsigned long boot_cpu_hartid; |
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index eb22ab49b3e0..b68aac701803 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile | |||
| @@ -1,3 +1,9 @@ | |||
| 1 | |||
| 2 | CFLAGS_init.o := -mcmodel=medany | ||
| 3 | ifdef CONFIG_FTRACE | ||
| 4 | CFLAGS_REMOVE_init.o = -pg | ||
| 5 | endif | ||
| 6 | |||
| 1 | obj-y += init.o | 7 | obj-y += init.o |
| 2 | obj-y += fault.o | 8 | obj-y += fault.o |
| 3 | obj-y += extable.o | 9 | obj-y += extable.o |
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index b379a75ac6a6..5fd8c922e1c2 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c | |||
| @@ -25,6 +25,10 @@ | |||
| 25 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
| 26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 27 | 27 | ||
| 28 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] | ||
| 29 | __page_aligned_bss; | ||
| 30 | EXPORT_SYMBOL(empty_zero_page); | ||
| 31 | |||
| 28 | static void __init zone_sizes_init(void) | 32 | static void __init zone_sizes_init(void) |
| 29 | { | 33 | { |
| 30 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; | 34 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; |
| @@ -143,6 +147,11 @@ void __init setup_bootmem(void) | |||
| 143 | } | 147 | } |
| 144 | } | 148 | } |
| 145 | 149 | ||
| 150 | unsigned long va_pa_offset; | ||
| 151 | EXPORT_SYMBOL(va_pa_offset); | ||
| 152 | unsigned long pfn_base; | ||
| 153 | EXPORT_SYMBOL(pfn_base); | ||
| 154 | |||
| 146 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; | 155 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
| 147 | pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); | 156 | pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); |
| 148 | 157 | ||
| @@ -172,6 +181,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) | |||
| 172 | } | 181 | } |
| 173 | } | 182 | } |
| 174 | 183 | ||
| 184 | /* | ||
| 185 | * setup_vm() is called from head.S with MMU-off. | ||
| 186 | * | ||
| 187 | * Following requirements should be honoured for setup_vm() to work | ||
| 188 | * correctly: | ||
| 189 | * 1) It should use PC-relative addressing for accessing kernel symbols. | ||
| 190 | * To achieve this we always use GCC cmodel=medany. | ||
| 191 | * 2) The compiler instrumentation for FTRACE will not work for setup_vm() | ||
| 192 | * so disable compiler instrumentation when FTRACE is enabled. | ||
| 193 | * | ||
| 194 | * Currently, the above requirements are honoured by using custom CFLAGS | ||
| 195 | * for init.o in mm/Makefile. | ||
| 196 | */ | ||
| 197 | |||
| 198 | #ifndef __riscv_cmodel_medany | ||
| 199 | #error "setup_vm() is called from head.S before relocate so it should " | ||
| 200 | "not use absolute addressing." | ||
| 201 | #endif | ||
| 202 | |||
| 175 | asmlinkage void __init setup_vm(void) | 203 | asmlinkage void __init setup_vm(void) |
| 176 | { | 204 | { |
| 177 | extern char _start; | 205 | extern char _start; |
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 1a6a7092d942..e94a0a28b5eb 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h | |||
| @@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, | |||
| 360 | return reg1; | 360 | return reg1; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | /* | ||
| 364 | * Interface to tell the AP bus code that a configuration | ||
| 365 | * change has happened. The bus code should at least do | ||
| 366 | * an ap bus resource rescan. | ||
| 367 | */ | ||
| 368 | #if IS_ENABLED(CONFIG_ZCRYPT) | ||
| 369 | void ap_bus_cfg_chg(void); | ||
| 370 | #else | ||
| 371 | static inline void ap_bus_cfg_chg(void){}; | ||
| 372 | #endif | ||
| 373 | |||
| 363 | #endif /* _ASM_S390_AP_H_ */ | 374 | #endif /* _ASM_S390_AP_H_ */ |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 7d22a474a040..f74639a05f0f 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
| @@ -252,11 +252,14 @@ do { \ | |||
| 252 | 252 | ||
| 253 | /* | 253 | /* |
| 254 | * Cache aliasing on the latest machines calls for a mapping granularity | 254 | * Cache aliasing on the latest machines calls for a mapping granularity |
| 255 | * of 512KB. For 64-bit processes use a 512KB alignment and a randomization | 255 | * of 512KB for the anonymous mapping base. For 64-bit processes use a |
| 256 | * of up to 1GB. For 31-bit processes the virtual address space is limited, | 256 | * 512KB alignment and a randomization of up to 1GB. For 31-bit processes |
| 257 | * use no alignment and limit the randomization to 8MB. | 257 | * the virtual address space is limited, use no alignment and limit the |
| 258 | * randomization to 8MB. | ||
| 259 | * For the additional randomization of the program break use 32MB for | ||
| 260 | * 64-bit and 8MB for 31-bit. | ||
| 258 | */ | 261 | */ |
| 259 | #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) | 262 | #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) |
| 260 | #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) | 263 | #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) |
| 261 | #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) | 264 | #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) |
| 262 | #define STACK_RND_MASK MMAP_RND_MASK | 265 | #define STACK_RND_MASK MMAP_RND_MASK |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index cc0947e08b6f..5b9f10b1e55d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
| @@ -91,52 +91,53 @@ struct lowcore { | |||
| 91 | __u64 hardirq_timer; /* 0x02e8 */ | 91 | __u64 hardirq_timer; /* 0x02e8 */ |
| 92 | __u64 softirq_timer; /* 0x02f0 */ | 92 | __u64 softirq_timer; /* 0x02f0 */ |
| 93 | __u64 steal_timer; /* 0x02f8 */ | 93 | __u64 steal_timer; /* 0x02f8 */ |
| 94 | __u64 last_update_timer; /* 0x0300 */ | 94 | __u64 avg_steal_timer; /* 0x0300 */ |
| 95 | __u64 last_update_clock; /* 0x0308 */ | 95 | __u64 last_update_timer; /* 0x0308 */ |
| 96 | __u64 int_clock; /* 0x0310 */ | 96 | __u64 last_update_clock; /* 0x0310 */ |
| 97 | __u64 mcck_clock; /* 0x0318 */ | 97 | __u64 int_clock; /* 0x0318*/ |
| 98 | __u64 clock_comparator; /* 0x0320 */ | 98 | __u64 mcck_clock; /* 0x0320 */ |
| 99 | __u64 boot_clock[2]; /* 0x0328 */ | 99 | __u64 clock_comparator; /* 0x0328 */ |
| 100 | __u64 boot_clock[2]; /* 0x0330 */ | ||
| 100 | 101 | ||
| 101 | /* Current process. */ | 102 | /* Current process. */ |
| 102 | __u64 current_task; /* 0x0338 */ | 103 | __u64 current_task; /* 0x0340 */ |
| 103 | __u64 kernel_stack; /* 0x0340 */ | 104 | __u64 kernel_stack; /* 0x0348 */ |
| 104 | 105 | ||
| 105 | /* Interrupt, DAT-off and restartstack. */ | 106 | /* Interrupt, DAT-off and restartstack. */ |
| 106 | __u64 async_stack; /* 0x0348 */ | 107 | __u64 async_stack; /* 0x0350 */ |
| 107 | __u64 nodat_stack; /* 0x0350 */ | 108 | __u64 nodat_stack; /* 0x0358 */ |
| 108 | __u64 restart_stack; /* 0x0358 */ | 109 | __u64 restart_stack; /* 0x0360 */ |
| 109 | 110 | ||
| 110 | /* Restart function and parameter. */ | 111 | /* Restart function and parameter. */ |
| 111 | __u64 restart_fn; /* 0x0360 */ | 112 | __u64 restart_fn; /* 0x0368 */ |
| 112 | __u64 restart_data; /* 0x0368 */ | 113 | __u64 restart_data; /* 0x0370 */ |
| 113 | __u64 restart_source; /* 0x0370 */ | 114 | __u64 restart_source; /* 0x0378 */ |
| 114 | 115 | ||
| 115 | /* Address space pointer. */ | 116 | /* Address space pointer. */ |
| 116 | __u64 kernel_asce; /* 0x0378 */ | 117 | __u64 kernel_asce; /* 0x0380 */ |
| 117 | __u64 user_asce; /* 0x0380 */ | 118 | __u64 user_asce; /* 0x0388 */ |
| 118 | __u64 vdso_asce; /* 0x0388 */ | 119 | __u64 vdso_asce; /* 0x0390 */ |
| 119 | 120 | ||
| 120 | /* | 121 | /* |
| 121 | * The lpp and current_pid fields form a | 122 | * The lpp and current_pid fields form a |
| 122 | * 64-bit value that is set as program | 123 | * 64-bit value that is set as program |
| 123 | * parameter with the LPP instruction. | 124 | * parameter with the LPP instruction. |
| 124 | */ | 125 | */ |
| 125 | __u32 lpp; /* 0x0390 */ | 126 | __u32 lpp; /* 0x0398 */ |
| 126 | __u32 current_pid; /* 0x0394 */ | 127 | __u32 current_pid; /* 0x039c */ |
| 127 | 128 | ||
| 128 | /* SMP info area */ | 129 | /* SMP info area */ |
| 129 | __u32 cpu_nr; /* 0x0398 */ | 130 | __u32 cpu_nr; /* 0x03a0 */ |
| 130 | __u32 softirq_pending; /* 0x039c */ | 131 | __u32 softirq_pending; /* 0x03a4 */ |
| 131 | __u32 preempt_count; /* 0x03a0 */ | 132 | __u32 preempt_count; /* 0x03a8 */ |
| 132 | __u32 spinlock_lockval; /* 0x03a4 */ | 133 | __u32 spinlock_lockval; /* 0x03ac */ |
| 133 | __u32 spinlock_index; /* 0x03a8 */ | 134 | __u32 spinlock_index; /* 0x03b0 */ |
| 134 | __u32 fpu_flags; /* 0x03ac */ | 135 | __u32 fpu_flags; /* 0x03b4 */ |
| 135 | __u64 percpu_offset; /* 0x03b0 */ | 136 | __u64 percpu_offset; /* 0x03b8 */ |
| 136 | __u64 vdso_per_cpu_data; /* 0x03b8 */ | 137 | __u64 vdso_per_cpu_data; /* 0x03c0 */ |
| 137 | __u64 machine_flags; /* 0x03c0 */ | 138 | __u64 machine_flags; /* 0x03c8 */ |
| 138 | __u64 gmap; /* 0x03c8 */ | 139 | __u64 gmap; /* 0x03d0 */ |
| 139 | __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ | 140 | __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ |
| 140 | 141 | ||
| 141 | /* br %r1 trampoline */ | 142 | /* br %r1 trampoline */ |
| 142 | __u16 br_r1_trampoline; /* 0x0400 */ | 143 | __u16 br_r1_trampoline; /* 0x0400 */ |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 96f9a9151fde..59c3e91f2cdb 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
| @@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 56 | 56 | ||
| 57 | static inline void syscall_get_arguments(struct task_struct *task, | 57 | static inline void syscall_get_arguments(struct task_struct *task, |
| 58 | struct pt_regs *regs, | 58 | struct pt_regs *regs, |
| 59 | unsigned int i, unsigned int n, | ||
| 60 | unsigned long *args) | 59 | unsigned long *args) |
| 61 | { | 60 | { |
| 62 | unsigned long mask = -1UL; | 61 | unsigned long mask = -1UL; |
| 62 | unsigned int n = 6; | ||
| 63 | 63 | ||
| 64 | /* | ||
| 65 | * No arguments for this syscall, there's nothing to do. | ||
| 66 | */ | ||
| 67 | if (!n) | ||
| 68 | return; | ||
| 69 | |||
| 70 | BUG_ON(i + n > 6); | ||
| 71 | #ifdef CONFIG_COMPAT | 64 | #ifdef CONFIG_COMPAT |
| 72 | if (test_tsk_thread_flag(task, TIF_31BIT)) | 65 | if (test_tsk_thread_flag(task, TIF_31BIT)) |
| 73 | mask = 0xffffffff; | 66 | mask = 0xffffffff; |
| 74 | #endif | 67 | #endif |
| 75 | while (n-- > 0) | 68 | while (n-- > 0) |
| 76 | if (i + n > 0) | 69 | if (n > 0) |
| 77 | args[n] = regs->gprs[2 + i + n] & mask; | 70 | args[n] = regs->gprs[2 + n] & mask; |
| 78 | if (i == 0) | 71 | |
| 79 | args[0] = regs->orig_gpr2 & mask; | 72 | args[0] = regs->orig_gpr2 & mask; |
| 80 | } | 73 | } |
| 81 | 74 | ||
| 82 | static inline void syscall_set_arguments(struct task_struct *task, | 75 | static inline void syscall_set_arguments(struct task_struct *task, |
| 83 | struct pt_regs *regs, | 76 | struct pt_regs *regs, |
| 84 | unsigned int i, unsigned int n, | ||
| 85 | const unsigned long *args) | 77 | const unsigned long *args) |
| 86 | { | 78 | { |
| 87 | BUG_ON(i + n > 6); | 79 | unsigned int n = 6; |
| 80 | |||
| 88 | while (n-- > 0) | 81 | while (n-- > 0) |
| 89 | if (i + n > 0) | 82 | if (n > 0) |
| 90 | regs->gprs[2 + i + n] = args[n]; | 83 | regs->gprs[2 + n] = args[n]; |
| 91 | if (i == 0) | 84 | regs->orig_gpr2 = args[0]; |
| 92 | regs->orig_gpr2 = args[0]; | ||
| 93 | } | 85 | } |
| 94 | 86 | ||
| 95 | static inline int syscall_get_arch(void) | 87 | static inline int syscall_get_arch(void) |
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index c6fad208c2fa..b6854812d2ed 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c | |||
| @@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event) | |||
| 196 | */ | 196 | */ |
| 197 | static int __hw_perf_event_init(struct perf_event *event) | 197 | static int __hw_perf_event_init(struct perf_event *event) |
| 198 | { | 198 | { |
| 199 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); | ||
| 200 | struct perf_event_attr *attr = &event->attr; | 199 | struct perf_event_attr *attr = &event->attr; |
| 200 | struct cpu_cf_events *cpuhw; | ||
| 201 | enum cpumf_ctr_set i; | 201 | enum cpumf_ctr_set i; |
| 202 | int err = 0; | 202 | int err = 0; |
| 203 | 203 | ||
| 204 | debug_sprintf_event(cf_diag_dbg, 5, | 204 | debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__, |
| 205 | "%s event %p cpu %d authorized %#x\n", __func__, | 205 | event, event->cpu); |
| 206 | event, event->cpu, cpuhw->info.auth_ctl); | ||
| 207 | 206 | ||
| 208 | event->hw.config = attr->config; | 207 | event->hw.config = attr->config; |
| 209 | event->hw.config_base = 0; | 208 | event->hw.config_base = 0; |
| 210 | local64_set(&event->count, 0); | ||
| 211 | 209 | ||
| 212 | /* Add all authorized counter sets to config_base */ | 210 | /* Add all authorized counter sets to config_base. The |
| 211 | * the hardware init function is either called per-cpu or just once | ||
| 212 | * for all CPUS (event->cpu == -1). This depends on the whether | ||
| 213 | * counting is started for all CPUs or on a per workload base where | ||
| 214 | * the perf event moves from one CPU to another CPU. | ||
| 215 | * Checking the authorization on any CPU is fine as the hardware | ||
| 216 | * applies the same authorization settings to all CPUs. | ||
| 217 | */ | ||
| 218 | cpuhw = &get_cpu_var(cpu_cf_events); | ||
| 213 | for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) | 219 | for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) |
| 214 | if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) | 220 | if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) |
| 215 | event->hw.config_base |= cpumf_ctr_ctl[i]; | 221 | event->hw.config_base |= cpumf_ctr_ctl[i]; |
| 222 | put_cpu_var(cpu_cf_events); | ||
| 216 | 223 | ||
| 217 | /* No authorized counter sets, nothing to count/sample */ | 224 | /* No authorized counter sets, nothing to count/sample */ |
| 218 | if (!event->hw.config_base) { | 225 | if (!event->hw.config_base) { |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 3fe1c77c361b..bd197baf1dc3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) | |||
| 266 | lc->percpu_offset = __per_cpu_offset[cpu]; | 266 | lc->percpu_offset = __per_cpu_offset[cpu]; |
| 267 | lc->kernel_asce = S390_lowcore.kernel_asce; | 267 | lc->kernel_asce = S390_lowcore.kernel_asce; |
| 268 | lc->machine_flags = S390_lowcore.machine_flags; | 268 | lc->machine_flags = S390_lowcore.machine_flags; |
| 269 | lc->user_timer = lc->system_timer = lc->steal_timer = 0; | 269 | lc->user_timer = lc->system_timer = |
| 270 | lc->steal_timer = lc->avg_steal_timer = 0; | ||
| 270 | __ctl_store(lc->cregs_save_area, 0, 15); | 271 | __ctl_store(lc->cregs_save_area, 0, 15); |
| 271 | save_access_regs((unsigned int *) lc->access_regs_save_area); | 272 | save_access_regs((unsigned int *) lc->access_regs_save_area); |
| 272 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, | 273 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 98f850e00008..a69a0911ed0e 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
| @@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime, | |||
| 124 | */ | 124 | */ |
| 125 | static int do_account_vtime(struct task_struct *tsk) | 125 | static int do_account_vtime(struct task_struct *tsk) |
| 126 | { | 126 | { |
| 127 | u64 timer, clock, user, guest, system, hardirq, softirq, steal; | 127 | u64 timer, clock, user, guest, system, hardirq, softirq; |
| 128 | 128 | ||
| 129 | timer = S390_lowcore.last_update_timer; | 129 | timer = S390_lowcore.last_update_timer; |
| 130 | clock = S390_lowcore.last_update_clock; | 130 | clock = S390_lowcore.last_update_clock; |
| @@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk) | |||
| 182 | if (softirq) | 182 | if (softirq) |
| 183 | account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); | 183 | account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); |
| 184 | 184 | ||
| 185 | steal = S390_lowcore.steal_timer; | ||
| 186 | if ((s64) steal > 0) { | ||
| 187 | S390_lowcore.steal_timer = 0; | ||
| 188 | account_steal_time(cputime_to_nsecs(steal)); | ||
| 189 | } | ||
| 190 | |||
| 191 | return virt_timer_forward(user + guest + system + hardirq + softirq); | 185 | return virt_timer_forward(user + guest + system + hardirq + softirq); |
| 192 | } | 186 | } |
| 193 | 187 | ||
| @@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev) | |||
| 213 | */ | 207 | */ |
| 214 | void vtime_flush(struct task_struct *tsk) | 208 | void vtime_flush(struct task_struct *tsk) |
| 215 | { | 209 | { |
| 210 | u64 steal, avg_steal; | ||
| 211 | |||
| 216 | if (do_account_vtime(tsk)) | 212 | if (do_account_vtime(tsk)) |
| 217 | virt_timer_expire(); | 213 | virt_timer_expire(); |
| 214 | |||
| 215 | steal = S390_lowcore.steal_timer; | ||
| 216 | avg_steal = S390_lowcore.avg_steal_timer / 2; | ||
| 217 | if ((s64) steal > 0) { | ||
| 218 | S390_lowcore.steal_timer = 0; | ||
| 219 | account_steal_time(steal); | ||
| 220 | avg_steal += steal; | ||
| 221 | } | ||
| 222 | S390_lowcore.avg_steal_timer = avg_steal; | ||
| 218 | } | 223 | } |
| 219 | 224 | ||
| 220 | /* | 225 | /* |
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 958f46da3a79..d91065e81a4e 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c | |||
| @@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = { | |||
| 164 | 164 | ||
| 165 | struct sh_clk_ops; | 165 | struct sh_clk_ops; |
| 166 | 166 | ||
| 167 | void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) | 167 | void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx) |
| 168 | { | 168 | { |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | void __init plat_irq_setup(void) | 171 | void __init __weak plat_irq_setup(void) |
| 172 | { | 172 | { |
| 173 | } | 173 | } |
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index a6ef3fee5f85..7bf2cb680d32 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
| @@ -9,6 +9,7 @@ generic-y += emergency-restart.h | |||
| 9 | generic-y += exec.h | 9 | generic-y += exec.h |
| 10 | generic-y += irq_regs.h | 10 | generic-y += irq_regs.h |
| 11 | generic-y += irq_work.h | 11 | generic-y += irq_work.h |
| 12 | generic-y += kvm_para.h | ||
| 12 | generic-y += local.h | 13 | generic-y += local.h |
| 13 | generic-y += local64.h | 14 | generic-y += local64.h |
| 14 | generic-y += mcs_spinlock.h | 15 | generic-y += mcs_spinlock.h |
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 6e118799831c..8c9d7e5e5dcc 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h | |||
| @@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 48 | 48 | ||
| 49 | static inline void syscall_get_arguments(struct task_struct *task, | 49 | static inline void syscall_get_arguments(struct task_struct *task, |
| 50 | struct pt_regs *regs, | 50 | struct pt_regs *regs, |
| 51 | unsigned int i, unsigned int n, | ||
| 52 | unsigned long *args) | 51 | unsigned long *args) |
| 53 | { | 52 | { |
| 54 | /* | ||
| 55 | * Do this simply for now. If we need to start supporting | ||
| 56 | * fetching arguments from arbitrary indices, this will need some | ||
| 57 | * extra logic. Presently there are no in-tree users that depend | ||
| 58 | * on this behaviour. | ||
| 59 | */ | ||
| 60 | BUG_ON(i); | ||
| 61 | 53 | ||
| 62 | /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ | 54 | /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ |
| 63 | switch (n) { | 55 | args[5] = regs->regs[1]; |
| 64 | case 6: args[5] = regs->regs[1]; | 56 | args[4] = regs->regs[0]; |
| 65 | case 5: args[4] = regs->regs[0]; | 57 | args[3] = regs->regs[7]; |
| 66 | case 4: args[3] = regs->regs[7]; | 58 | args[2] = regs->regs[6]; |
| 67 | case 3: args[2] = regs->regs[6]; | 59 | args[1] = regs->regs[5]; |
| 68 | case 2: args[1] = regs->regs[5]; | 60 | args[0] = regs->regs[4]; |
| 69 | case 1: args[0] = regs->regs[4]; | ||
| 70 | case 0: | ||
| 71 | break; | ||
| 72 | default: | ||
| 73 | BUG(); | ||
| 74 | } | ||
| 75 | } | 61 | } |
| 76 | 62 | ||
| 77 | static inline void syscall_set_arguments(struct task_struct *task, | 63 | static inline void syscall_set_arguments(struct task_struct *task, |
| 78 | struct pt_regs *regs, | 64 | struct pt_regs *regs, |
| 79 | unsigned int i, unsigned int n, | ||
| 80 | const unsigned long *args) | 65 | const unsigned long *args) |
| 81 | { | 66 | { |
| 82 | /* Same note as above applies */ | 67 | regs->regs[1] = args[5]; |
| 83 | BUG_ON(i); | 68 | regs->regs[0] = args[4]; |
| 84 | 69 | regs->regs[7] = args[3]; | |
| 85 | switch (n) { | 70 | regs->regs[6] = args[2]; |
| 86 | case 6: regs->regs[1] = args[5]; | 71 | regs->regs[5] = args[1]; |
| 87 | case 5: regs->regs[0] = args[4]; | 72 | regs->regs[4] = args[0]; |
| 88 | case 4: regs->regs[7] = args[3]; | ||
| 89 | case 3: regs->regs[6] = args[2]; | ||
| 90 | case 2: regs->regs[5] = args[1]; | ||
| 91 | case 1: regs->regs[4] = args[0]; | ||
| 92 | break; | ||
| 93 | default: | ||
| 94 | BUG(); | ||
| 95 | } | ||
| 96 | } | 73 | } |
| 97 | 74 | ||
| 98 | static inline int syscall_get_arch(void) | 75 | static inline int syscall_get_arch(void) |
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index 43882580c7f9..22fad97da066 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h | |||
| @@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 47 | 47 | ||
| 48 | static inline void syscall_get_arguments(struct task_struct *task, | 48 | static inline void syscall_get_arguments(struct task_struct *task, |
| 49 | struct pt_regs *regs, | 49 | struct pt_regs *regs, |
| 50 | unsigned int i, unsigned int n, | ||
| 51 | unsigned long *args) | 50 | unsigned long *args) |
| 52 | { | 51 | { |
| 53 | BUG_ON(i + n > 6); | 52 | memcpy(args, ®s->regs[2], 6 * sizeof(args[0])); |
| 54 | memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); | ||
| 55 | } | 53 | } |
| 56 | 54 | ||
| 57 | static inline void syscall_set_arguments(struct task_struct *task, | 55 | static inline void syscall_set_arguments(struct task_struct *task, |
| 58 | struct pt_regs *regs, | 56 | struct pt_regs *regs, |
| 59 | unsigned int i, unsigned int n, | ||
| 60 | const unsigned long *args) | 57 | const unsigned long *args) |
| 61 | { | 58 | { |
| 62 | BUG_ON(i + n > 6); | 59 | memcpy(®s->regs[2], args, 6 * sizeof(args[0])); |
| 63 | memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); | ||
| 64 | } | 60 | } |
| 65 | 61 | ||
| 66 | static inline int syscall_get_arch(void) | 62 | static inline int syscall_get_arch(void) |
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild index ecfbd40924dd..b8812c74c1de 100644 --- a/arch/sh/include/uapi/asm/Kbuild +++ b/arch/sh/include/uapi/asm/Kbuild | |||
| @@ -1,5 +1,4 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | 2 | ||
| 3 | generated-y += unistd_32.h | 3 | generated-y += unistd_32.h |
| 4 | generic-y += kvm_para.h | ||
| 5 | generic-y += ucontext.h | 4 | generic-y += ucontext.h |
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index b82f64e28f55..a22cfd5c0ee8 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild | |||
| @@ -9,6 +9,7 @@ generic-y += exec.h | |||
| 9 | generic-y += export.h | 9 | generic-y += export.h |
| 10 | generic-y += irq_regs.h | 10 | generic-y += irq_regs.h |
| 11 | generic-y += irq_work.h | 11 | generic-y += irq_work.h |
| 12 | generic-y += kvm_para.h | ||
| 12 | generic-y += linkage.h | 13 | generic-y += linkage.h |
| 13 | generic-y += local.h | 14 | generic-y += local.h |
| 14 | generic-y += local64.h | 15 | generic-y += local64.h |
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h index 053989e3f6a6..4d075434e816 100644 --- a/arch/sparc/include/asm/syscall.h +++ b/arch/sparc/include/asm/syscall.h | |||
| @@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 96 | 96 | ||
| 97 | static inline void syscall_get_arguments(struct task_struct *task, | 97 | static inline void syscall_get_arguments(struct task_struct *task, |
| 98 | struct pt_regs *regs, | 98 | struct pt_regs *regs, |
| 99 | unsigned int i, unsigned int n, | ||
| 100 | unsigned long *args) | 99 | unsigned long *args) |
| 101 | { | 100 | { |
| 102 | int zero_extend = 0; | 101 | int zero_extend = 0; |
| 103 | unsigned int j; | 102 | unsigned int j; |
| 103 | unsigned int n = 6; | ||
| 104 | 104 | ||
| 105 | #ifdef CONFIG_SPARC64 | 105 | #ifdef CONFIG_SPARC64 |
| 106 | if (test_tsk_thread_flag(task, TIF_32BIT)) | 106 | if (test_tsk_thread_flag(task, TIF_32BIT)) |
| @@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
| 108 | #endif | 108 | #endif |
| 109 | 109 | ||
| 110 | for (j = 0; j < n; j++) { | 110 | for (j = 0; j < n; j++) { |
| 111 | unsigned long val = regs->u_regs[UREG_I0 + i + j]; | 111 | unsigned long val = regs->u_regs[UREG_I0 + j]; |
| 112 | 112 | ||
| 113 | if (zero_extend) | 113 | if (zero_extend) |
| 114 | args[j] = (u32) val; | 114 | args[j] = (u32) val; |
| @@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
| 119 | 119 | ||
| 120 | static inline void syscall_set_arguments(struct task_struct *task, | 120 | static inline void syscall_set_arguments(struct task_struct *task, |
| 121 | struct pt_regs *regs, | 121 | struct pt_regs *regs, |
| 122 | unsigned int i, unsigned int n, | ||
| 123 | const unsigned long *args) | 122 | const unsigned long *args) |
| 124 | { | 123 | { |
| 125 | unsigned int j; | 124 | unsigned int i; |
| 126 | 125 | ||
| 127 | for (j = 0; j < n; j++) | 126 | for (i = 0; i < 6; i++) |
| 128 | regs->u_regs[UREG_I0 + i + j] = args[j]; | 127 | regs->u_regs[UREG_I0 + i] = args[i]; |
| 129 | } | 128 | } |
| 130 | 129 | ||
| 131 | static inline int syscall_get_arch(void) | 130 | static inline int syscall_get_arch(void) |
diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/sparc/include/uapi/asm/kvm_para.h +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h index 9fb9cf8cd39a..98e50c50c12e 100644 --- a/arch/um/include/asm/syscall-generic.h +++ b/arch/um/include/asm/syscall-generic.h | |||
| @@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 53 | 53 | ||
| 54 | static inline void syscall_get_arguments(struct task_struct *task, | 54 | static inline void syscall_get_arguments(struct task_struct *task, |
| 55 | struct pt_regs *regs, | 55 | struct pt_regs *regs, |
| 56 | unsigned int i, unsigned int n, | ||
| 57 | unsigned long *args) | 56 | unsigned long *args) |
| 58 | { | 57 | { |
| 59 | const struct uml_pt_regs *r = ®s->regs; | 58 | const struct uml_pt_regs *r = ®s->regs; |
| 60 | 59 | ||
| 61 | switch (i) { | 60 | *args++ = UPT_SYSCALL_ARG1(r); |
| 62 | case 0: | 61 | *args++ = UPT_SYSCALL_ARG2(r); |
| 63 | if (!n--) | 62 | *args++ = UPT_SYSCALL_ARG3(r); |
| 64 | break; | 63 | *args++ = UPT_SYSCALL_ARG4(r); |
| 65 | *args++ = UPT_SYSCALL_ARG1(r); | 64 | *args++ = UPT_SYSCALL_ARG5(r); |
| 66 | case 1: | 65 | *args = UPT_SYSCALL_ARG6(r); |
| 67 | if (!n--) | ||
| 68 | break; | ||
| 69 | *args++ = UPT_SYSCALL_ARG2(r); | ||
| 70 | case 2: | ||
| 71 | if (!n--) | ||
| 72 | break; | ||
| 73 | *args++ = UPT_SYSCALL_ARG3(r); | ||
| 74 | case 3: | ||
| 75 | if (!n--) | ||
| 76 | break; | ||
| 77 | *args++ = UPT_SYSCALL_ARG4(r); | ||
| 78 | case 4: | ||
| 79 | if (!n--) | ||
| 80 | break; | ||
| 81 | *args++ = UPT_SYSCALL_ARG5(r); | ||
| 82 | case 5: | ||
| 83 | if (!n--) | ||
| 84 | break; | ||
| 85 | *args++ = UPT_SYSCALL_ARG6(r); | ||
| 86 | case 6: | ||
| 87 | if (!n--) | ||
| 88 | break; | ||
| 89 | default: | ||
| 90 | BUG(); | ||
| 91 | break; | ||
| 92 | } | ||
| 93 | } | 66 | } |
| 94 | 67 | ||
| 95 | static inline void syscall_set_arguments(struct task_struct *task, | 68 | static inline void syscall_set_arguments(struct task_struct *task, |
| 96 | struct pt_regs *regs, | 69 | struct pt_regs *regs, |
| 97 | unsigned int i, unsigned int n, | ||
| 98 | const unsigned long *args) | 70 | const unsigned long *args) |
| 99 | { | 71 | { |
| 100 | struct uml_pt_regs *r = ®s->regs; | 72 | struct uml_pt_regs *r = ®s->regs; |
| 101 | 73 | ||
| 102 | switch (i) { | 74 | UPT_SYSCALL_ARG1(r) = *args++; |
| 103 | case 0: | 75 | UPT_SYSCALL_ARG2(r) = *args++; |
| 104 | if (!n--) | 76 | UPT_SYSCALL_ARG3(r) = *args++; |
| 105 | break; | 77 | UPT_SYSCALL_ARG4(r) = *args++; |
| 106 | UPT_SYSCALL_ARG1(r) = *args++; | 78 | UPT_SYSCALL_ARG5(r) = *args++; |
| 107 | case 1: | 79 | UPT_SYSCALL_ARG6(r) = *args; |
| 108 | if (!n--) | ||
| 109 | break; | ||
| 110 | UPT_SYSCALL_ARG2(r) = *args++; | ||
| 111 | case 2: | ||
| 112 | if (!n--) | ||
| 113 | break; | ||
| 114 | UPT_SYSCALL_ARG3(r) = *args++; | ||
| 115 | case 3: | ||
| 116 | if (!n--) | ||
| 117 | break; | ||
| 118 | UPT_SYSCALL_ARG4(r) = *args++; | ||
| 119 | case 4: | ||
| 120 | if (!n--) | ||
| 121 | break; | ||
| 122 | UPT_SYSCALL_ARG5(r) = *args++; | ||
| 123 | case 5: | ||
| 124 | if (!n--) | ||
| 125 | break; | ||
| 126 | UPT_SYSCALL_ARG6(r) = *args++; | ||
| 127 | case 6: | ||
| 128 | if (!n--) | ||
| 129 | break; | ||
| 130 | default: | ||
| 131 | BUG(); | ||
| 132 | break; | ||
| 133 | } | ||
| 134 | } | 80 | } |
| 135 | 81 | ||
| 136 | /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ | 82 | /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ |
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1d1544b6ca74..d77d953c04c1 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild | |||
| @@ -18,6 +18,7 @@ generic-y += irq_work.h | |||
| 18 | generic-y += kdebug.h | 18 | generic-y += kdebug.h |
| 19 | generic-y += kmap_types.h | 19 | generic-y += kmap_types.h |
| 20 | generic-y += kprobes.h | 20 | generic-y += kprobes.h |
| 21 | generic-y += kvm_para.h | ||
| 21 | generic-y += local.h | 22 | generic-y += local.h |
| 22 | generic-y += mcs_spinlock.h | 23 | generic-y += mcs_spinlock.h |
| 23 | generic-y += mm-arch-hooks.h | 24 | generic-y += mm-arch-hooks.h |
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generic-y += kvm_para.h | ||
| 2 | generic-y += ucontext.h | generic-y += ucontext.h | |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1f9b3cf437c..5ad92419be19 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -2217,14 +2217,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING | |||
| 2217 | If unsure, leave at the default value. | 2217 | If unsure, leave at the default value. |
| 2218 | 2218 | ||
| 2219 | config HOTPLUG_CPU | 2219 | config HOTPLUG_CPU |
| 2220 | bool "Support for hot-pluggable CPUs" | 2220 | def_bool y |
| 2221 | depends on SMP | 2221 | depends on SMP |
| 2222 | ---help--- | ||
| 2223 | Say Y here to allow turning CPUs off and on. CPUs can be | ||
| 2224 | controlled through /sys/devices/system/cpu. | ||
| 2225 | ( Note: power management support will enable this option | ||
| 2226 | automatically on SMP systems. ) | ||
| 2227 | Say N if you want to disable CPU hotplug. | ||
| 2228 | 2222 | ||
| 2229 | config BOOTPARAM_HOTPLUG_CPU0 | 2223 | config BOOTPARAM_HOTPLUG_CPU0 |
| 2230 | bool "Set default setting of cpu0_hotpluggable" | 2224 | bool "Set default setting of cpu0_hotpluggable" |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 2d8b9d8ca4f8..a587805c6687 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE | |||
| 219 | # Additionally, avoid generating expensive indirect jumps which | 219 | # Additionally, avoid generating expensive indirect jumps which |
| 220 | # are subject to retpolines for small number of switch cases. | 220 | # are subject to retpolines for small number of switch cases. |
| 221 | # clang turns off jump table generation by default when under | 221 | # clang turns off jump table generation by default when under |
| 222 | # retpoline builds, however, gcc does not for x86. | 222 | # retpoline builds, however, gcc does not for x86. This has |
| 223 | KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20) | 223 | # only been fixed starting from gcc stable version 8.4.0 and |
| 224 | # onwards, but not for older ones. See gcc bug #86952. | ||
| 225 | ifndef CONFIG_CC_IS_CLANG | ||
| 226 | KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables) | ||
| 227 | endif | ||
| 224 | endif | 228 | endif |
| 225 | 229 | ||
| 226 | archscripts: scripts_basic | 230 | archscripts: scripts_basic |
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index fd13655e0f9b..d2f184165934 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
| @@ -120,8 +120,6 @@ static inline void console_init(void) | |||
| 120 | 120 | ||
| 121 | void set_sev_encryption_mask(void); | 121 | void set_sev_encryption_mask(void); |
| 122 | 122 | ||
| 123 | #endif | ||
| 124 | |||
| 125 | /* acpi.c */ | 123 | /* acpi.c */ |
| 126 | #ifdef CONFIG_ACPI | 124 | #ifdef CONFIG_ACPI |
| 127 | acpi_physical_address get_rsdp_addr(void); | 125 | acpi_physical_address get_rsdp_addr(void); |
| @@ -135,3 +133,5 @@ int count_immovable_mem_regions(void); | |||
| 135 | #else | 133 | #else |
| 136 | static inline int count_immovable_mem_regions(void) { return 0; } | 134 | static inline int count_immovable_mem_regions(void) { return 0; } |
| 137 | #endif | 135 | #endif |
| 136 | |||
| 137 | #endif /* BOOT_COMPRESSED_MISC_H */ | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index ce95b8cbd229..0e56ff7e4848 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
| @@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; | |||
| 112 | test_cpu_cap(c, bit)) | 112 | test_cpu_cap(c, bit)) |
| 113 | 113 | ||
| 114 | #define this_cpu_has(bit) \ | 114 | #define this_cpu_has(bit) \ |
| 115 | (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ | 115 | (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ |
| 116 | x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) | 116 | x86_this_cpu_test_bit(bit, \ |
| 117 | (unsigned long __percpu *)&cpu_info.x86_capability)) | ||
| 117 | 118 | ||
| 118 | /* | 119 | /* |
| 119 | * This macro is for detection of features which need kernel | 120 | * This macro is for detection of features which need kernel |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a5db4475e72d..159b5988292f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache { | |||
| 253 | * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used | 253 | * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used |
| 254 | * by indirect shadow page can not be more than 15 bits. | 254 | * by indirect shadow page can not be more than 15 bits. |
| 255 | * | 255 | * |
| 256 | * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, | 256 | * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access, |
| 257 | * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. | 257 | * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. |
| 258 | */ | 258 | */ |
| 259 | union kvm_mmu_page_role { | 259 | union kvm_mmu_page_role { |
| 260 | u32 word; | 260 | u32 word; |
| 261 | struct { | 261 | struct { |
| 262 | unsigned level:4; | 262 | unsigned level:4; |
| 263 | unsigned cr4_pae:1; | 263 | unsigned gpte_is_8_bytes:1; |
| 264 | unsigned quadrant:2; | 264 | unsigned quadrant:2; |
| 265 | unsigned direct:1; | 265 | unsigned direct:1; |
| 266 | unsigned access:3; | 266 | unsigned access:3; |
| @@ -350,6 +350,7 @@ struct kvm_mmu_page { | |||
| 350 | }; | 350 | }; |
| 351 | 351 | ||
| 352 | struct kvm_pio_request { | 352 | struct kvm_pio_request { |
| 353 | unsigned long linear_rip; | ||
| 353 | unsigned long count; | 354 | unsigned long count; |
| 354 | int in; | 355 | int in; |
| 355 | int port; | 356 | int port; |
| @@ -568,6 +569,7 @@ struct kvm_vcpu_arch { | |||
| 568 | bool tpr_access_reporting; | 569 | bool tpr_access_reporting; |
| 569 | u64 ia32_xss; | 570 | u64 ia32_xss; |
| 570 | u64 microcode_version; | 571 | u64 microcode_version; |
| 572 | u64 arch_capabilities; | ||
| 571 | 573 | ||
| 572 | /* | 574 | /* |
| 573 | * Paging state of the vcpu | 575 | * Paging state of the vcpu |
| @@ -1192,6 +1194,8 @@ struct kvm_x86_ops { | |||
| 1192 | int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, | 1194 | int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, |
| 1193 | uint16_t *vmcs_version); | 1195 | uint16_t *vmcs_version); |
| 1194 | uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); | 1196 | uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); |
| 1197 | |||
| 1198 | bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu); | ||
| 1195 | }; | 1199 | }; |
| 1196 | 1200 | ||
| 1197 | struct kvm_arch_async_pf { | 1201 | struct kvm_arch_async_pf { |
| @@ -1252,7 +1256,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | |||
| 1252 | gfn_t gfn_offset, unsigned long mask); | 1256 | gfn_t gfn_offset, unsigned long mask); |
| 1253 | void kvm_mmu_zap_all(struct kvm *kvm); | 1257 | void kvm_mmu_zap_all(struct kvm *kvm); |
| 1254 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); | 1258 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); |
| 1255 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); | 1259 | unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); |
| 1256 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | 1260 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
| 1257 | 1261 | ||
| 1258 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); | 1262 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 63b3393bd98e..c53682303c9c 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h | |||
| @@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void) | |||
| 77 | return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); | 77 | return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | void set_real_mode_mem(phys_addr_t mem, size_t size); | 80 | static inline void set_real_mode_mem(phys_addr_t mem) |
| 81 | { | ||
| 82 | real_mode_header = (struct real_mode_header *) __va(mem); | ||
| 83 | } | ||
| 84 | |||
| 81 | void reserve_real_mode(void); | 85 | void reserve_real_mode(void); |
| 82 | 86 | ||
| 83 | #endif /* __ASSEMBLY__ */ | 87 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index d653139857af..4c305471ec33 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
| @@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 91 | 91 | ||
| 92 | static inline void syscall_get_arguments(struct task_struct *task, | 92 | static inline void syscall_get_arguments(struct task_struct *task, |
| 93 | struct pt_regs *regs, | 93 | struct pt_regs *regs, |
| 94 | unsigned int i, unsigned int n, | ||
| 95 | unsigned long *args) | 94 | unsigned long *args) |
| 96 | { | 95 | { |
| 97 | BUG_ON(i + n > 6); | 96 | memcpy(args, ®s->bx, 6 * sizeof(args[0])); |
| 98 | memcpy(args, ®s->bx + i, n * sizeof(args[0])); | ||
| 99 | } | 97 | } |
| 100 | 98 | ||
| 101 | static inline void syscall_set_arguments(struct task_struct *task, | 99 | static inline void syscall_set_arguments(struct task_struct *task, |
| @@ -116,124 +114,50 @@ static inline int syscall_get_arch(void) | |||
| 116 | 114 | ||
| 117 | static inline void syscall_get_arguments(struct task_struct *task, | 115 | static inline void syscall_get_arguments(struct task_struct *task, |
| 118 | struct pt_regs *regs, | 116 | struct pt_regs *regs, |
| 119 | unsigned int i, unsigned int n, | ||
| 120 | unsigned long *args) | 117 | unsigned long *args) |
| 121 | { | 118 | { |
| 122 | # ifdef CONFIG_IA32_EMULATION | 119 | # ifdef CONFIG_IA32_EMULATION |
| 123 | if (task->thread_info.status & TS_COMPAT) | 120 | if (task->thread_info.status & TS_COMPAT) { |
| 124 | switch (i) { | 121 | *args++ = regs->bx; |
| 125 | case 0: | 122 | *args++ = regs->cx; |
| 126 | if (!n--) break; | 123 | *args++ = regs->dx; |
| 127 | *args++ = regs->bx; | 124 | *args++ = regs->si; |
| 128 | case 1: | 125 | *args++ = regs->di; |
| 129 | if (!n--) break; | 126 | *args = regs->bp; |
| 130 | *args++ = regs->cx; | 127 | } else |
| 131 | case 2: | ||
| 132 | if (!n--) break; | ||
| 133 | *args++ = regs->dx; | ||
| 134 | case 3: | ||
| 135 | if (!n--) break; | ||
| 136 | *args++ = regs->si; | ||
| 137 | case 4: | ||
| 138 | if (!n--) break; | ||
| 139 | *args++ = regs->di; | ||
| 140 | case 5: | ||
| 141 | if (!n--) break; | ||
| 142 | *args++ = regs->bp; | ||
| 143 | case 6: | ||
| 144 | if (!n--) break; | ||
| 145 | default: | ||
| 146 | BUG(); | ||
| 147 | break; | ||
| 148 | } | ||
| 149 | else | ||
| 150 | # endif | 128 | # endif |
| 151 | switch (i) { | 129 | { |
| 152 | case 0: | 130 | *args++ = regs->di; |
| 153 | if (!n--) break; | 131 | *args++ = regs->si; |
| 154 | *args++ = regs->di; | 132 | *args++ = regs->dx; |
| 155 | case 1: | 133 | *args++ = regs->r10; |
| 156 | if (!n--) break; | 134 | *args++ = regs->r8; |
| 157 | *args++ = regs->si; | 135 | *args = regs->r9; |
| 158 | case 2: | 136 | } |
| 159 | if (!n--) break; | ||
| 160 | *args++ = regs->dx; | ||
| 161 | case 3: | ||
| 162 | if (!n--) break; | ||
| 163 | *args++ = regs->r10; | ||
| 164 | case 4: | ||
| 165 | if (!n--) break; | ||
| 166 | *args++ = regs->r8; | ||
| 167 | case 5: | ||
| 168 | if (!n--) break; | ||
| 169 | *args++ = regs->r9; | ||
| 170 | case 6: | ||
| 171 | if (!n--) break; | ||
| 172 | default: | ||
| 173 | BUG(); | ||
| 174 | break; | ||
| 175 | } | ||
| 176 | } | 137 | } |
| 177 | 138 | ||
| 178 | static inline void syscall_set_arguments(struct task_struct *task, | 139 | static inline void syscall_set_arguments(struct task_struct *task, |
| 179 | struct pt_regs *regs, | 140 | struct pt_regs *regs, |
| 180 | unsigned int i, unsigned int n, | ||
| 181 | const unsigned long *args) | 141 | const unsigned long *args) |
| 182 | { | 142 | { |
| 183 | # ifdef CONFIG_IA32_EMULATION | 143 | # ifdef CONFIG_IA32_EMULATION |
| 184 | if (task->thread_info.status & TS_COMPAT) | 144 | if (task->thread_info.status & TS_COMPAT) { |
| 185 | switch (i) { | 145 | regs->bx = *args++; |
| 186 | case 0: | 146 | regs->cx = *args++; |
| 187 | if (!n--) break; | 147 | regs->dx = *args++; |
| 188 | regs->bx = *args++; | 148 | regs->si = *args++; |
| 189 | case 1: | 149 | regs->di = *args++; |
| 190 | if (!n--) break; | 150 | regs->bp = *args; |
| 191 | regs->cx = *args++; | 151 | } else |
| 192 | case 2: | ||
| 193 | if (!n--) break; | ||
| 194 | regs->dx = *args++; | ||
| 195 | case 3: | ||
| 196 | if (!n--) break; | ||
| 197 | regs->si = *args++; | ||
| 198 | case 4: | ||
| 199 | if (!n--) break; | ||
| 200 | regs->di = *args++; | ||
| 201 | case 5: | ||
| 202 | if (!n--) break; | ||
| 203 | regs->bp = *args++; | ||
| 204 | case 6: | ||
| 205 | if (!n--) break; | ||
| 206 | default: | ||
| 207 | BUG(); | ||
| 208 | break; | ||
| 209 | } | ||
| 210 | else | ||
| 211 | # endif | 152 | # endif |
| 212 | switch (i) { | 153 | { |
| 213 | case 0: | 154 | regs->di = *args++; |
| 214 | if (!n--) break; | 155 | regs->si = *args++; |
| 215 | regs->di = *args++; | 156 | regs->dx = *args++; |
| 216 | case 1: | 157 | regs->r10 = *args++; |
| 217 | if (!n--) break; | 158 | regs->r8 = *args++; |
| 218 | regs->si = *args++; | 159 | regs->r9 = *args; |
| 219 | case 2: | 160 | } |
| 220 | if (!n--) break; | ||
| 221 | regs->dx = *args++; | ||
| 222 | case 3: | ||
| 223 | if (!n--) break; | ||
| 224 | regs->r10 = *args++; | ||
| 225 | case 4: | ||
| 226 | if (!n--) break; | ||
| 227 | regs->r8 = *args++; | ||
| 228 | case 5: | ||
| 229 | if (!n--) break; | ||
| 230 | regs->r9 = *args++; | ||
| 231 | case 6: | ||
| 232 | if (!n--) break; | ||
| 233 | default: | ||
| 234 | BUG(); | ||
| 235 | break; | ||
| 236 | } | ||
| 237 | } | 161 | } |
| 238 | 162 | ||
| 239 | static inline int syscall_get_arch(void) | 163 | static inline int syscall_get_arch(void) |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index de6f0d59a24f..2863c2026655 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
| @@ -206,6 +206,9 @@ xen_single_call(unsigned int call, | |||
| 206 | __HYPERCALL_DECLS; | 206 | __HYPERCALL_DECLS; |
| 207 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); | 207 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); |
| 208 | 208 | ||
| 209 | if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) | ||
| 210 | return -EINVAL; | ||
| 211 | |||
| 209 | asm volatile(CALL_NOSPEC | 212 | asm volatile(CALL_NOSPEC |
| 210 | : __HYPERCALL_5PARAM | 213 | : __HYPERCALL_5PARAM |
| 211 | : [thunk_target] "a" (&hypercall_page[call]) | 214 | : [thunk_target] "a" (&hypercall_page[call]) |
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index f33f11f69078..1573a0a6b525 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c | |||
| @@ -501,11 +501,8 @@ out_unlock: | |||
| 501 | void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) | 501 | void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) |
| 502 | { | 502 | { |
| 503 | unsigned long delay = msecs_to_jiffies(delay_ms); | 503 | unsigned long delay = msecs_to_jiffies(delay_ms); |
| 504 | struct rdt_resource *r; | ||
| 505 | int cpu; | 504 | int cpu; |
| 506 | 505 | ||
| 507 | r = &rdt_resources_all[RDT_RESOURCE_L3]; | ||
| 508 | |||
| 509 | cpu = cpumask_any(&dom->cpu_mask); | 506 | cpu = cpumask_any(&dom->cpu_mask); |
| 510 | dom->cqm_work_cpu = cpu; | 507 | dom->cqm_work_cpu = cpu; |
| 511 | 508 | ||
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 27c43525a05f..421899f6ad7b 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c | |||
| @@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, | |||
| 526 | new_config.enable = 0; | 526 | new_config.enable = 0; |
| 527 | stimer->config.as_uint64 = new_config.as_uint64; | 527 | stimer->config.as_uint64 = new_config.as_uint64; |
| 528 | 528 | ||
| 529 | stimer_mark_pending(stimer, false); | 529 | if (stimer->config.enable) |
| 530 | stimer_mark_pending(stimer, false); | ||
| 531 | |||
| 530 | return 0; | 532 | return 0; |
| 531 | } | 533 | } |
| 532 | 534 | ||
| @@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, | |||
| 542 | stimer->config.enable = 0; | 544 | stimer->config.enable = 0; |
| 543 | else if (stimer->config.auto_enable) | 545 | else if (stimer->config.auto_enable) |
| 544 | stimer->config.enable = 1; | 546 | stimer->config.enable = 1; |
| 545 | stimer_mark_pending(stimer, false); | 547 | |
| 548 | if (stimer->config.enable) | ||
| 549 | stimer_mark_pending(stimer, false); | ||
| 550 | |||
| 546 | return 0; | 551 | return 0; |
| 547 | } | 552 | } |
| 548 | 553 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7837ab001d80..eee455a8a612 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator { | |||
| 182 | 182 | ||
| 183 | static const union kvm_mmu_page_role mmu_base_role_mask = { | 183 | static const union kvm_mmu_page_role mmu_base_role_mask = { |
| 184 | .cr0_wp = 1, | 184 | .cr0_wp = 1, |
| 185 | .cr4_pae = 1, | 185 | .gpte_is_8_bytes = 1, |
| 186 | .nxe = 1, | 186 | .nxe = 1, |
| 187 | .smep_andnot_wp = 1, | 187 | .smep_andnot_wp = 1, |
| 188 | .smap_andnot_wp = 1, | 188 | .smap_andnot_wp = 1, |
| @@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, | |||
| 2205 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, | 2205 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 2206 | struct list_head *invalid_list); | 2206 | struct list_head *invalid_list); |
| 2207 | 2207 | ||
| 2208 | |||
| 2208 | #define for_each_valid_sp(_kvm, _sp, _gfn) \ | 2209 | #define for_each_valid_sp(_kvm, _sp, _gfn) \ |
| 2209 | hlist_for_each_entry(_sp, \ | 2210 | hlist_for_each_entry(_sp, \ |
| 2210 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ | 2211 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ |
| @@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, | |||
| 2215 | for_each_valid_sp(_kvm, _sp, _gfn) \ | 2216 | for_each_valid_sp(_kvm, _sp, _gfn) \ |
| 2216 | if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else | 2217 | if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else |
| 2217 | 2218 | ||
| 2219 | static inline bool is_ept_sp(struct kvm_mmu_page *sp) | ||
| 2220 | { | ||
| 2221 | return sp->role.cr0_wp && sp->role.smap_andnot_wp; | ||
| 2222 | } | ||
| 2223 | |||
| 2218 | /* @sp->gfn should be write-protected at the call site */ | 2224 | /* @sp->gfn should be write-protected at the call site */ |
| 2219 | static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 2225 | static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 2220 | struct list_head *invalid_list) | 2226 | struct list_head *invalid_list) |
| 2221 | { | 2227 | { |
| 2222 | if (sp->role.cr4_pae != !!is_pae(vcpu) | 2228 | if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || |
| 2223 | || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { | 2229 | vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { |
| 2224 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); | 2230 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
| 2225 | return false; | 2231 | return false; |
| 2226 | } | 2232 | } |
| @@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
| 2423 | role.level = level; | 2429 | role.level = level; |
| 2424 | role.direct = direct; | 2430 | role.direct = direct; |
| 2425 | if (role.direct) | 2431 | if (role.direct) |
| 2426 | role.cr4_pae = 0; | 2432 | role.gpte_is_8_bytes = true; |
| 2427 | role.access = access; | 2433 | role.access = access; |
| 2428 | if (!vcpu->arch.mmu->direct_map | 2434 | if (!vcpu->arch.mmu->direct_map |
| 2429 | && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { | 2435 | && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { |
| @@ -4794,7 +4800,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, | |||
| 4794 | 4800 | ||
| 4795 | role.base.access = ACC_ALL; | 4801 | role.base.access = ACC_ALL; |
| 4796 | role.base.nxe = !!is_nx(vcpu); | 4802 | role.base.nxe = !!is_nx(vcpu); |
| 4797 | role.base.cr4_pae = !!is_pae(vcpu); | ||
| 4798 | role.base.cr0_wp = is_write_protection(vcpu); | 4803 | role.base.cr0_wp = is_write_protection(vcpu); |
| 4799 | role.base.smm = is_smm(vcpu); | 4804 | role.base.smm = is_smm(vcpu); |
| 4800 | role.base.guest_mode = is_guest_mode(vcpu); | 4805 | role.base.guest_mode = is_guest_mode(vcpu); |
| @@ -4815,6 +4820,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) | |||
| 4815 | role.base.ad_disabled = (shadow_accessed_mask == 0); | 4820 | role.base.ad_disabled = (shadow_accessed_mask == 0); |
| 4816 | role.base.level = kvm_x86_ops->get_tdp_level(vcpu); | 4821 | role.base.level = kvm_x86_ops->get_tdp_level(vcpu); |
| 4817 | role.base.direct = true; | 4822 | role.base.direct = true; |
| 4823 | role.base.gpte_is_8_bytes = true; | ||
| 4818 | 4824 | ||
| 4819 | return role; | 4825 | return role; |
| 4820 | } | 4826 | } |
| @@ -4879,6 +4885,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) | |||
| 4879 | role.base.smap_andnot_wp = role.ext.cr4_smap && | 4885 | role.base.smap_andnot_wp = role.ext.cr4_smap && |
| 4880 | !is_write_protection(vcpu); | 4886 | !is_write_protection(vcpu); |
| 4881 | role.base.direct = !is_paging(vcpu); | 4887 | role.base.direct = !is_paging(vcpu); |
| 4888 | role.base.gpte_is_8_bytes = !!is_pae(vcpu); | ||
| 4882 | 4889 | ||
| 4883 | if (!is_long_mode(vcpu)) | 4890 | if (!is_long_mode(vcpu)) |
| 4884 | role.base.level = PT32E_ROOT_LEVEL; | 4891 | role.base.level = PT32E_ROOT_LEVEL; |
| @@ -4918,18 +4925,26 @@ static union kvm_mmu_role | |||
| 4918 | kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, | 4925 | kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, |
| 4919 | bool execonly) | 4926 | bool execonly) |
| 4920 | { | 4927 | { |
| 4921 | union kvm_mmu_role role; | 4928 | union kvm_mmu_role role = {0}; |
| 4922 | 4929 | ||
| 4923 | /* Base role is inherited from root_mmu */ | 4930 | /* SMM flag is inherited from root_mmu */ |
| 4924 | role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; | 4931 | role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; |
| 4925 | role.ext = kvm_calc_mmu_role_ext(vcpu); | ||
| 4926 | 4932 | ||
| 4927 | role.base.level = PT64_ROOT_4LEVEL; | 4933 | role.base.level = PT64_ROOT_4LEVEL; |
| 4934 | role.base.gpte_is_8_bytes = true; | ||
| 4928 | role.base.direct = false; | 4935 | role.base.direct = false; |
| 4929 | role.base.ad_disabled = !accessed_dirty; | 4936 | role.base.ad_disabled = !accessed_dirty; |
| 4930 | role.base.guest_mode = true; | 4937 | role.base.guest_mode = true; |
| 4931 | role.base.access = ACC_ALL; | 4938 | role.base.access = ACC_ALL; |
| 4932 | 4939 | ||
| 4940 | /* | ||
| 4941 | * WP=1 and NOT_WP=1 is an impossible combination, use WP and the | ||
| 4942 | * SMAP variation to denote shadow EPT entries. | ||
| 4943 | */ | ||
| 4944 | role.base.cr0_wp = true; | ||
| 4945 | role.base.smap_andnot_wp = true; | ||
| 4946 | |||
| 4947 | role.ext = kvm_calc_mmu_role_ext(vcpu); | ||
| 4933 | role.ext.execonly = execonly; | 4948 | role.ext.execonly = execonly; |
| 4934 | 4949 | ||
| 4935 | return role; | 4950 | return role; |
| @@ -5179,7 +5194,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, | |||
| 5179 | gpa, bytes, sp->role.word); | 5194 | gpa, bytes, sp->role.word); |
| 5180 | 5195 | ||
| 5181 | offset = offset_in_page(gpa); | 5196 | offset = offset_in_page(gpa); |
| 5182 | pte_size = sp->role.cr4_pae ? 8 : 4; | 5197 | pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; |
| 5183 | 5198 | ||
| 5184 | /* | 5199 | /* |
| 5185 | * Sometimes, the OS only writes the last one bytes to update status | 5200 | * Sometimes, the OS only writes the last one bytes to update status |
| @@ -5203,7 +5218,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) | |||
| 5203 | page_offset = offset_in_page(gpa); | 5218 | page_offset = offset_in_page(gpa); |
| 5204 | level = sp->role.level; | 5219 | level = sp->role.level; |
| 5205 | *nspte = 1; | 5220 | *nspte = 1; |
| 5206 | if (!sp->role.cr4_pae) { | 5221 | if (!sp->role.gpte_is_8_bytes) { |
| 5207 | page_offset <<= 1; /* 32->64 */ | 5222 | page_offset <<= 1; /* 32->64 */ |
| 5208 | /* | 5223 | /* |
| 5209 | * A 32-bit pde maps 4MB while the shadow pdes map | 5224 | * A 32-bit pde maps 4MB while the shadow pdes map |
| @@ -5393,10 +5408,12 @@ emulate: | |||
| 5393 | * This can happen if a guest gets a page-fault on data access but the HW | 5408 | * This can happen if a guest gets a page-fault on data access but the HW |
| 5394 | * table walker is not able to read the instruction page (e.g instruction | 5409 | * table walker is not able to read the instruction page (e.g instruction |
| 5395 | * page is not present in memory). In those cases we simply restart the | 5410 | * page is not present in memory). In those cases we simply restart the |
| 5396 | * guest. | 5411 | * guest, with the exception of AMD Erratum 1096 which is unrecoverable. |
| 5397 | */ | 5412 | */ |
| 5398 | if (unlikely(insn && !insn_len)) | 5413 | if (unlikely(insn && !insn_len)) { |
| 5399 | return 1; | 5414 | if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) |
| 5415 | return 1; | ||
| 5416 | } | ||
| 5400 | 5417 | ||
| 5401 | er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); | 5418 | er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); |
| 5402 | 5419 | ||
| @@ -5509,7 +5526,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
| 5509 | 5526 | ||
| 5510 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { | 5527 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { |
| 5511 | if (flush && lock_flush_tlb) { | 5528 | if (flush && lock_flush_tlb) { |
| 5512 | kvm_flush_remote_tlbs(kvm); | 5529 | kvm_flush_remote_tlbs_with_address(kvm, |
| 5530 | start_gfn, | ||
| 5531 | iterator.gfn - start_gfn + 1); | ||
| 5513 | flush = false; | 5532 | flush = false; |
| 5514 | } | 5533 | } |
| 5515 | cond_resched_lock(&kvm->mmu_lock); | 5534 | cond_resched_lock(&kvm->mmu_lock); |
| @@ -5517,7 +5536,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
| 5517 | } | 5536 | } |
| 5518 | 5537 | ||
| 5519 | if (flush && lock_flush_tlb) { | 5538 | if (flush && lock_flush_tlb) { |
| 5520 | kvm_flush_remote_tlbs(kvm); | 5539 | kvm_flush_remote_tlbs_with_address(kvm, start_gfn, |
| 5540 | end_gfn - start_gfn + 1); | ||
| 5521 | flush = false; | 5541 | flush = false; |
| 5522 | } | 5542 | } |
| 5523 | 5543 | ||
| @@ -6011,7 +6031,7 @@ out: | |||
| 6011 | /* | 6031 | /* |
| 6012 | * Calculate mmu pages needed for kvm. | 6032 | * Calculate mmu pages needed for kvm. |
| 6013 | */ | 6033 | */ |
| 6014 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) | 6034 | unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) |
| 6015 | { | 6035 | { |
| 6016 | unsigned int nr_mmu_pages; | 6036 | unsigned int nr_mmu_pages; |
| 6017 | unsigned int nr_pages = 0; | 6037 | unsigned int nr_pages = 0; |
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 9f6c855a0043..dd30dccd2ad5 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h | |||
| @@ -29,10 +29,10 @@ | |||
| 29 | \ | 29 | \ |
| 30 | role.word = __entry->role; \ | 30 | role.word = __entry->role; \ |
| 31 | \ | 31 | \ |
| 32 | trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s" \ | 32 | trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s" \ |
| 33 | " %snxe %sad root %u %s%c", \ | 33 | " %snxe %sad root %u %s%c", \ |
| 34 | __entry->gfn, role.level, \ | 34 | __entry->gfn, role.level, \ |
| 35 | role.cr4_pae ? " pae" : "", \ | 35 | role.gpte_is_8_bytes ? 8 : 4, \ |
| 36 | role.quadrant, \ | 36 | role.quadrant, \ |
| 37 | role.direct ? " direct" : "", \ | 37 | role.direct ? " direct" : "", \ |
| 38 | access_str[role.access], \ | 38 | access_str[role.access], \ |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b5b128a0a051..e0a791c3d4fc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -6422,11 +6422,11 @@ e_free: | |||
| 6422 | return ret; | 6422 | return ret; |
| 6423 | } | 6423 | } |
| 6424 | 6424 | ||
| 6425 | static int get_num_contig_pages(int idx, struct page **inpages, | 6425 | static unsigned long get_num_contig_pages(unsigned long idx, |
| 6426 | unsigned long npages) | 6426 | struct page **inpages, unsigned long npages) |
| 6427 | { | 6427 | { |
| 6428 | unsigned long paddr, next_paddr; | 6428 | unsigned long paddr, next_paddr; |
| 6429 | int i = idx + 1, pages = 1; | 6429 | unsigned long i = idx + 1, pages = 1; |
| 6430 | 6430 | ||
| 6431 | /* find the number of contiguous pages starting from idx */ | 6431 | /* find the number of contiguous pages starting from idx */ |
| 6432 | paddr = __sme_page_pa(inpages[idx]); | 6432 | paddr = __sme_page_pa(inpages[idx]); |
| @@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages, | |||
| 6445 | 6445 | ||
| 6446 | static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) | 6446 | static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 6447 | { | 6447 | { |
| 6448 | unsigned long vaddr, vaddr_end, next_vaddr, npages, size; | 6448 | unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; |
| 6449 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | 6449 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 6450 | struct kvm_sev_launch_update_data params; | 6450 | struct kvm_sev_launch_update_data params; |
| 6451 | struct sev_data_launch_update_data *data; | 6451 | struct sev_data_launch_update_data *data; |
| 6452 | struct page **inpages; | 6452 | struct page **inpages; |
| 6453 | int i, ret, pages; | 6453 | int ret; |
| 6454 | 6454 | ||
| 6455 | if (!sev_guest(kvm)) | 6455 | if (!sev_guest(kvm)) |
| 6456 | return -ENOTTY; | 6456 | return -ENOTTY; |
| @@ -6799,7 +6799,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6799 | struct page **src_p, **dst_p; | 6799 | struct page **src_p, **dst_p; |
| 6800 | struct kvm_sev_dbg debug; | 6800 | struct kvm_sev_dbg debug; |
| 6801 | unsigned long n; | 6801 | unsigned long n; |
| 6802 | int ret, size; | 6802 | unsigned int size; |
| 6803 | int ret; | ||
| 6803 | 6804 | ||
| 6804 | if (!sev_guest(kvm)) | 6805 | if (!sev_guest(kvm)) |
| 6805 | return -ENOTTY; | 6806 | return -ENOTTY; |
| @@ -6807,6 +6808,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6807 | if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) | 6808 | if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) |
| 6808 | return -EFAULT; | 6809 | return -EFAULT; |
| 6809 | 6810 | ||
| 6811 | if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) | ||
| 6812 | return -EINVAL; | ||
| 6813 | if (!debug.dst_uaddr) | ||
| 6814 | return -EINVAL; | ||
| 6815 | |||
| 6810 | vaddr = debug.src_uaddr; | 6816 | vaddr = debug.src_uaddr; |
| 6811 | size = debug.len; | 6817 | size = debug.len; |
| 6812 | vaddr_end = vaddr + size; | 6818 | vaddr_end = vaddr + size; |
| @@ -6857,8 +6863,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6857 | dst_vaddr, | 6863 | dst_vaddr, |
| 6858 | len, &argp->error); | 6864 | len, &argp->error); |
| 6859 | 6865 | ||
| 6860 | sev_unpin_memory(kvm, src_p, 1); | 6866 | sev_unpin_memory(kvm, src_p, n); |
| 6861 | sev_unpin_memory(kvm, dst_p, 1); | 6867 | sev_unpin_memory(kvm, dst_p, n); |
| 6862 | 6868 | ||
| 6863 | if (ret) | 6869 | if (ret) |
| 6864 | goto err; | 6870 | goto err; |
| @@ -7098,6 +7104,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, | |||
| 7098 | return -ENODEV; | 7104 | return -ENODEV; |
| 7099 | } | 7105 | } |
| 7100 | 7106 | ||
| 7107 | static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) | ||
| 7108 | { | ||
| 7109 | bool is_user, smap; | ||
| 7110 | |||
| 7111 | is_user = svm_get_cpl(vcpu) == 3; | ||
| 7112 | smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); | ||
| 7113 | |||
| 7114 | /* | ||
| 7115 | * Detect and workaround Errata 1096 Fam_17h_00_0Fh | ||
| 7116 | * | ||
| 7117 | * In non SEV guest, hypervisor will be able to read the guest | ||
| 7118 | * memory to decode the instruction pointer when insn_len is zero | ||
| 7119 | * so we return true to indicate that decoding is possible. | ||
| 7120 | * | ||
| 7121 | * But in the SEV guest, the guest memory is encrypted with the | ||
| 7122 | * guest specific key and hypervisor will not be able to decode the | ||
| 7123 | * instruction pointer so we will not able to workaround it. Lets | ||
| 7124 | * print the error and request to kill the guest. | ||
| 7125 | */ | ||
| 7126 | if (is_user && smap) { | ||
| 7127 | if (!sev_guest(vcpu->kvm)) | ||
| 7128 | return true; | ||
| 7129 | |||
| 7130 | pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n"); | ||
| 7131 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); | ||
| 7132 | } | ||
| 7133 | |||
| 7134 | return false; | ||
| 7135 | } | ||
| 7136 | |||
| 7101 | static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | 7137 | static struct kvm_x86_ops svm_x86_ops __ro_after_init = { |
| 7102 | .cpu_has_kvm_support = has_svm, | 7138 | .cpu_has_kvm_support = has_svm, |
| 7103 | .disabled_by_bios = is_disabled, | 7139 | .disabled_by_bios = is_disabled, |
| @@ -7231,6 +7267,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
| 7231 | 7267 | ||
| 7232 | .nested_enable_evmcs = nested_enable_evmcs, | 7268 | .nested_enable_evmcs = nested_enable_evmcs, |
| 7233 | .nested_get_evmcs_version = nested_get_evmcs_version, | 7269 | .nested_get_evmcs_version = nested_get_evmcs_version, |
| 7270 | |||
| 7271 | .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, | ||
| 7234 | }; | 7272 | }; |
| 7235 | 7273 | ||
| 7236 | static int __init svm_init(void) | 7274 | static int __init svm_init(void) |
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f24a2c225070..7ec9bb1dd723 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
| @@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, | |||
| 500 | } | 500 | } |
| 501 | } | 501 | } |
| 502 | 502 | ||
| 503 | static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) { | ||
| 504 | int msr; | ||
| 505 | |||
| 506 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 507 | unsigned word = msr / BITS_PER_LONG; | ||
| 508 | |||
| 509 | msr_bitmap[word] = ~0; | ||
| 510 | msr_bitmap[word + (0x800 / sizeof(long))] = ~0; | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 503 | /* | 514 | /* |
| 504 | * Merge L0's and L1's MSR bitmap, return false to indicate that | 515 | * Merge L0's and L1's MSR bitmap, return false to indicate that |
| 505 | * we do not use the hardware. | 516 | * we do not use the hardware. |
| @@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 541 | return false; | 552 | return false; |
| 542 | 553 | ||
| 543 | msr_bitmap_l1 = (unsigned long *)kmap(page); | 554 | msr_bitmap_l1 = (unsigned long *)kmap(page); |
| 544 | if (nested_cpu_has_apic_reg_virt(vmcs12)) { | ||
| 545 | /* | ||
| 546 | * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it | ||
| 547 | * just lets the processor take the value from the virtual-APIC page; | ||
| 548 | * take those 256 bits directly from the L1 bitmap. | ||
| 549 | */ | ||
| 550 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 551 | unsigned word = msr / BITS_PER_LONG; | ||
| 552 | msr_bitmap_l0[word] = msr_bitmap_l1[word]; | ||
| 553 | msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; | ||
| 554 | } | ||
| 555 | } else { | ||
| 556 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 557 | unsigned word = msr / BITS_PER_LONG; | ||
| 558 | msr_bitmap_l0[word] = ~0; | ||
| 559 | msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; | ||
| 560 | } | ||
| 561 | } | ||
| 562 | 555 | ||
| 563 | nested_vmx_disable_intercept_for_msr( | 556 | /* |
| 564 | msr_bitmap_l1, msr_bitmap_l0, | 557 | * To keep the control flow simple, pay eight 8-byte writes (sixteen |
| 565 | X2APIC_MSR(APIC_TASKPRI), | 558 | * 4-byte writes on 32-bit systems) up front to enable intercepts for |
| 566 | MSR_TYPE_W); | 559 | * the x2APIC MSR range and selectively disable them below. |
| 560 | */ | ||
| 561 | enable_x2apic_msr_intercepts(msr_bitmap_l0); | ||
| 562 | |||
| 563 | if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { | ||
| 564 | if (nested_cpu_has_apic_reg_virt(vmcs12)) { | ||
| 565 | /* | ||
| 566 | * L0 need not intercept reads for MSRs between 0x800 | ||
| 567 | * and 0x8ff, it just lets the processor take the value | ||
| 568 | * from the virtual-APIC page; take those 256 bits | ||
| 569 | * directly from the L1 bitmap. | ||
| 570 | */ | ||
| 571 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 572 | unsigned word = msr / BITS_PER_LONG; | ||
| 573 | |||
| 574 | msr_bitmap_l0[word] = msr_bitmap_l1[word]; | ||
| 575 | } | ||
| 576 | } | ||
| 567 | 577 | ||
| 568 | if (nested_cpu_has_vid(vmcs12)) { | ||
| 569 | nested_vmx_disable_intercept_for_msr( | ||
| 570 | msr_bitmap_l1, msr_bitmap_l0, | ||
| 571 | X2APIC_MSR(APIC_EOI), | ||
| 572 | MSR_TYPE_W); | ||
| 573 | nested_vmx_disable_intercept_for_msr( | 578 | nested_vmx_disable_intercept_for_msr( |
| 574 | msr_bitmap_l1, msr_bitmap_l0, | 579 | msr_bitmap_l1, msr_bitmap_l0, |
| 575 | X2APIC_MSR(APIC_SELF_IPI), | 580 | X2APIC_MSR(APIC_TASKPRI), |
| 576 | MSR_TYPE_W); | 581 | MSR_TYPE_R | MSR_TYPE_W); |
| 582 | |||
| 583 | if (nested_cpu_has_vid(vmcs12)) { | ||
| 584 | nested_vmx_disable_intercept_for_msr( | ||
| 585 | msr_bitmap_l1, msr_bitmap_l0, | ||
| 586 | X2APIC_MSR(APIC_EOI), | ||
| 587 | MSR_TYPE_W); | ||
| 588 | nested_vmx_disable_intercept_for_msr( | ||
| 589 | msr_bitmap_l1, msr_bitmap_l0, | ||
| 590 | X2APIC_MSR(APIC_SELF_IPI), | ||
| 591 | MSR_TYPE_W); | ||
| 592 | } | ||
| 577 | } | 593 | } |
| 578 | 594 | ||
| 579 | if (spec_ctrl) | 595 | if (spec_ctrl) |
| @@ -2585,6 +2601,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu, | |||
| 2585 | !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || | 2601 | !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || |
| 2586 | !nested_cr3_valid(vcpu, vmcs12->host_cr3)) | 2602 | !nested_cr3_valid(vcpu, vmcs12->host_cr3)) |
| 2587 | return -EINVAL; | 2603 | return -EINVAL; |
| 2604 | |||
| 2605 | if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) || | ||
| 2606 | is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)) | ||
| 2607 | return -EINVAL; | ||
| 2608 | |||
| 2588 | /* | 2609 | /* |
| 2589 | * If the load IA32_EFER VM-exit control is 1, bits reserved in the | 2610 | * If the load IA32_EFER VM-exit control is 1, bits reserved in the |
| 2590 | * IA32_EFER MSR must be 0 in the field for that register. In addition, | 2611 | * IA32_EFER MSR must be 0 in the field for that register. In addition, |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c73375e01ab8..ab432a930ae8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
| @@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 1683 | 1683 | ||
| 1684 | msr_info->data = to_vmx(vcpu)->spec_ctrl; | 1684 | msr_info->data = to_vmx(vcpu)->spec_ctrl; |
| 1685 | break; | 1685 | break; |
| 1686 | case MSR_IA32_ARCH_CAPABILITIES: | ||
| 1687 | if (!msr_info->host_initiated && | ||
| 1688 | !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) | ||
| 1689 | return 1; | ||
| 1690 | msr_info->data = to_vmx(vcpu)->arch_capabilities; | ||
| 1691 | break; | ||
| 1692 | case MSR_IA32_SYSENTER_CS: | 1686 | case MSR_IA32_SYSENTER_CS: |
| 1693 | msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); | 1687 | msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); |
| 1694 | break; | 1688 | break; |
| @@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 1895 | vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, | 1889 | vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, |
| 1896 | MSR_TYPE_W); | 1890 | MSR_TYPE_W); |
| 1897 | break; | 1891 | break; |
| 1898 | case MSR_IA32_ARCH_CAPABILITIES: | ||
| 1899 | if (!msr_info->host_initiated) | ||
| 1900 | return 1; | ||
| 1901 | vmx->arch_capabilities = data; | ||
| 1902 | break; | ||
| 1903 | case MSR_IA32_CR_PAT: | 1892 | case MSR_IA32_CR_PAT: |
| 1904 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | 1893 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
| 1905 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) | 1894 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) |
| @@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
| 4088 | ++vmx->nmsrs; | 4077 | ++vmx->nmsrs; |
| 4089 | } | 4078 | } |
| 4090 | 4079 | ||
| 4091 | vmx->arch_capabilities = kvm_get_arch_capabilities(); | ||
| 4092 | |||
| 4093 | vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); | 4080 | vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); |
| 4094 | 4081 | ||
| 4095 | /* 22.2.1, 20.8.1 */ | 4082 | /* 22.2.1, 20.8.1 */ |
| @@ -7409,6 +7396,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) | |||
| 7409 | return 0; | 7396 | return 0; |
| 7410 | } | 7397 | } |
| 7411 | 7398 | ||
| 7399 | static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) | ||
| 7400 | { | ||
| 7401 | return 0; | ||
| 7402 | } | ||
| 7403 | |||
| 7412 | static __init int hardware_setup(void) | 7404 | static __init int hardware_setup(void) |
| 7413 | { | 7405 | { |
| 7414 | unsigned long host_bndcfgs; | 7406 | unsigned long host_bndcfgs; |
| @@ -7711,6 +7703,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
| 7711 | .set_nested_state = NULL, | 7703 | .set_nested_state = NULL, |
| 7712 | .get_vmcs12_pages = NULL, | 7704 | .get_vmcs12_pages = NULL, |
| 7713 | .nested_enable_evmcs = NULL, | 7705 | .nested_enable_evmcs = NULL, |
| 7706 | .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, | ||
| 7714 | }; | 7707 | }; |
| 7715 | 7708 | ||
| 7716 | static void vmx_cleanup_l1d_flush(void) | 7709 | static void vmx_cleanup_l1d_flush(void) |
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 1554cb45b393..a1e00d0a2482 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h | |||
| @@ -190,7 +190,6 @@ struct vcpu_vmx { | |||
| 190 | u64 msr_guest_kernel_gs_base; | 190 | u64 msr_guest_kernel_gs_base; |
| 191 | #endif | 191 | #endif |
| 192 | 192 | ||
| 193 | u64 arch_capabilities; | ||
| 194 | u64 spec_ctrl; | 193 | u64 spec_ctrl; |
| 195 | 194 | ||
| 196 | u32 vm_entry_controls_shadow; | 195 | u32 vm_entry_controls_shadow; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 65e4559eef2f..099b851dabaf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1125,7 +1125,7 @@ static u32 msrs_to_save[] = { | |||
| 1125 | #endif | 1125 | #endif |
| 1126 | MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, | 1126 | MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, |
| 1127 | MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, | 1127 | MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, |
| 1128 | MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES, | 1128 | MSR_IA32_SPEC_CTRL, |
| 1129 | MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, | 1129 | MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, |
| 1130 | MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, | 1130 | MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, |
| 1131 | MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, | 1131 | MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, |
| @@ -1158,6 +1158,7 @@ static u32 emulated_msrs[] = { | |||
| 1158 | 1158 | ||
| 1159 | MSR_IA32_TSC_ADJUST, | 1159 | MSR_IA32_TSC_ADJUST, |
| 1160 | MSR_IA32_TSCDEADLINE, | 1160 | MSR_IA32_TSCDEADLINE, |
| 1161 | MSR_IA32_ARCH_CAPABILITIES, | ||
| 1161 | MSR_IA32_MISC_ENABLE, | 1162 | MSR_IA32_MISC_ENABLE, |
| 1162 | MSR_IA32_MCG_STATUS, | 1163 | MSR_IA32_MCG_STATUS, |
| 1163 | MSR_IA32_MCG_CTL, | 1164 | MSR_IA32_MCG_CTL, |
| @@ -2443,6 +2444,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2443 | if (msr_info->host_initiated) | 2444 | if (msr_info->host_initiated) |
| 2444 | vcpu->arch.microcode_version = data; | 2445 | vcpu->arch.microcode_version = data; |
| 2445 | break; | 2446 | break; |
| 2447 | case MSR_IA32_ARCH_CAPABILITIES: | ||
| 2448 | if (!msr_info->host_initiated) | ||
| 2449 | return 1; | ||
| 2450 | vcpu->arch.arch_capabilities = data; | ||
| 2451 | break; | ||
| 2446 | case MSR_EFER: | 2452 | case MSR_EFER: |
| 2447 | return set_efer(vcpu, data); | 2453 | return set_efer(vcpu, data); |
| 2448 | case MSR_K7_HWCR: | 2454 | case MSR_K7_HWCR: |
| @@ -2747,6 +2753,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2747 | case MSR_IA32_UCODE_REV: | 2753 | case MSR_IA32_UCODE_REV: |
| 2748 | msr_info->data = vcpu->arch.microcode_version; | 2754 | msr_info->data = vcpu->arch.microcode_version; |
| 2749 | break; | 2755 | break; |
| 2756 | case MSR_IA32_ARCH_CAPABILITIES: | ||
| 2757 | if (!msr_info->host_initiated && | ||
| 2758 | !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) | ||
| 2759 | return 1; | ||
| 2760 | msr_info->data = vcpu->arch.arch_capabilities; | ||
| 2761 | break; | ||
| 2750 | case MSR_IA32_TSC: | 2762 | case MSR_IA32_TSC: |
| 2751 | msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; | 2763 | msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; |
| 2752 | break; | 2764 | break; |
| @@ -6523,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, | |||
| 6523 | } | 6535 | } |
| 6524 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); | 6536 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); |
| 6525 | 6537 | ||
| 6538 | static int complete_fast_pio_out(struct kvm_vcpu *vcpu) | ||
| 6539 | { | ||
| 6540 | vcpu->arch.pio.count = 0; | ||
| 6541 | |||
| 6542 | if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) | ||
| 6543 | return 1; | ||
| 6544 | |||
| 6545 | return kvm_skip_emulated_instruction(vcpu); | ||
| 6546 | } | ||
| 6547 | |||
| 6526 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, | 6548 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, |
| 6527 | unsigned short port) | 6549 | unsigned short port) |
| 6528 | { | 6550 | { |
| 6529 | unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); | 6551 | unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); |
| 6530 | int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, | 6552 | int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, |
| 6531 | size, port, &val, 1); | 6553 | size, port, &val, 1); |
| 6532 | /* do not return to emulator after return from userspace */ | 6554 | |
| 6533 | vcpu->arch.pio.count = 0; | 6555 | if (!ret) { |
| 6556 | vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); | ||
| 6557 | vcpu->arch.complete_userspace_io = complete_fast_pio_out; | ||
| 6558 | } | ||
| 6534 | return ret; | 6559 | return ret; |
| 6535 | } | 6560 | } |
| 6536 | 6561 | ||
| @@ -6541,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) | |||
| 6541 | /* We should only ever be called with arch.pio.count equal to 1 */ | 6566 | /* We should only ever be called with arch.pio.count equal to 1 */ |
| 6542 | BUG_ON(vcpu->arch.pio.count != 1); | 6567 | BUG_ON(vcpu->arch.pio.count != 1); |
| 6543 | 6568 | ||
| 6569 | if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { | ||
| 6570 | vcpu->arch.pio.count = 0; | ||
| 6571 | return 1; | ||
| 6572 | } | ||
| 6573 | |||
| 6544 | /* For size less than 4 we merge, else we zero extend */ | 6574 | /* For size less than 4 we merge, else we zero extend */ |
| 6545 | val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) | 6575 | val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) |
| 6546 | : 0; | 6576 | : 0; |
| @@ -6553,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) | |||
| 6553 | vcpu->arch.pio.port, &val, 1); | 6583 | vcpu->arch.pio.port, &val, 1); |
| 6554 | kvm_register_write(vcpu, VCPU_REGS_RAX, val); | 6584 | kvm_register_write(vcpu, VCPU_REGS_RAX, val); |
| 6555 | 6585 | ||
| 6556 | return 1; | 6586 | return kvm_skip_emulated_instruction(vcpu); |
| 6557 | } | 6587 | } |
| 6558 | 6588 | ||
| 6559 | static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, | 6589 | static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, |
| @@ -6572,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, | |||
| 6572 | return ret; | 6602 | return ret; |
| 6573 | } | 6603 | } |
| 6574 | 6604 | ||
| 6605 | vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); | ||
| 6575 | vcpu->arch.complete_userspace_io = complete_fast_pio_in; | 6606 | vcpu->arch.complete_userspace_io = complete_fast_pio_in; |
| 6576 | 6607 | ||
| 6577 | return 0; | 6608 | return 0; |
| @@ -6579,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, | |||
| 6579 | 6610 | ||
| 6580 | int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) | 6611 | int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) |
| 6581 | { | 6612 | { |
| 6582 | int ret = kvm_skip_emulated_instruction(vcpu); | 6613 | int ret; |
| 6583 | 6614 | ||
| 6584 | /* | ||
| 6585 | * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered | ||
| 6586 | * KVM_EXIT_DEBUG here. | ||
| 6587 | */ | ||
| 6588 | if (in) | 6615 | if (in) |
| 6589 | return kvm_fast_pio_in(vcpu, size, port) && ret; | 6616 | ret = kvm_fast_pio_in(vcpu, size, port); |
| 6590 | else | 6617 | else |
| 6591 | return kvm_fast_pio_out(vcpu, size, port) && ret; | 6618 | ret = kvm_fast_pio_out(vcpu, size, port); |
| 6619 | return ret && kvm_skip_emulated_instruction(vcpu); | ||
| 6592 | } | 6620 | } |
| 6593 | EXPORT_SYMBOL_GPL(kvm_fast_pio); | 6621 | EXPORT_SYMBOL_GPL(kvm_fast_pio); |
| 6594 | 6622 | ||
| @@ -8733,6 +8761,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
| 8733 | 8761 | ||
| 8734 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 8762 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
| 8735 | { | 8763 | { |
| 8764 | vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); | ||
| 8736 | vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; | 8765 | vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; |
| 8737 | kvm_vcpu_mtrr_init(vcpu); | 8766 | kvm_vcpu_mtrr_init(vcpu); |
| 8738 | vcpu_load(vcpu); | 8767 | vcpu_load(vcpu); |
| @@ -9429,13 +9458,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
| 9429 | const struct kvm_memory_slot *new, | 9458 | const struct kvm_memory_slot *new, |
| 9430 | enum kvm_mr_change change) | 9459 | enum kvm_mr_change change) |
| 9431 | { | 9460 | { |
| 9432 | int nr_mmu_pages = 0; | ||
| 9433 | |||
| 9434 | if (!kvm->arch.n_requested_mmu_pages) | 9461 | if (!kvm->arch.n_requested_mmu_pages) |
| 9435 | nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 9462 | kvm_mmu_change_mmu_pages(kvm, |
| 9436 | 9463 | kvm_mmu_calculate_default_mmu_pages(kvm)); | |
| 9437 | if (nr_mmu_pages) | ||
| 9438 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | ||
| 9439 | 9464 | ||
| 9440 | /* | 9465 | /* |
| 9441 | * Dirty logging tracks sptes in 4k granularity, meaning that large | 9466 | * Dirty logging tracks sptes in 4k granularity, meaning that large |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index db3165714521..dc726e07d8ba 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
| @@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len) | |||
| 230 | /* Can we access it for direct reading/writing? Must be RAM: */ | 230 | /* Can we access it for direct reading/writing? Must be RAM: */ |
| 231 | int valid_phys_addr_range(phys_addr_t addr, size_t count) | 231 | int valid_phys_addr_range(phys_addr_t addr, size_t count) |
| 232 | { | 232 | { |
| 233 | return addr + count <= __pa(high_memory); | 233 | return addr + count - 1 <= __pa(high_memory - 1); |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | /* Can we access it through mmap? Must be a valid physical address: */ | 236 | /* Can we access it through mmap? Must be a valid physical address: */ |
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 458a0e2bcc57..a25a9fd987a9 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c | |||
| @@ -449,7 +449,7 @@ void __init efi_free_boot_services(void) | |||
| 449 | */ | 449 | */ |
| 450 | rm_size = real_mode_size_needed(); | 450 | rm_size = real_mode_size_needed(); |
| 451 | if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { | 451 | if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { |
| 452 | set_real_mode_mem(start, rm_size); | 452 | set_real_mode_mem(start); |
| 453 | start += rm_size; | 453 | start += rm_size; |
| 454 | size -= rm_size; | 454 | size -= rm_size; |
| 455 | } | 455 | } |
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index d10105825d57..7dce39c8c034 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c | |||
| @@ -15,15 +15,6 @@ u32 *trampoline_cr4_features; | |||
| 15 | /* Hold the pgd entry used on booting additional CPUs */ | 15 | /* Hold the pgd entry used on booting additional CPUs */ |
| 16 | pgd_t trampoline_pgd_entry; | 16 | pgd_t trampoline_pgd_entry; |
| 17 | 17 | ||
| 18 | void __init set_real_mode_mem(phys_addr_t mem, size_t size) | ||
| 19 | { | ||
| 20 | void *base = __va(mem); | ||
| 21 | |||
| 22 | real_mode_header = (struct real_mode_header *) base; | ||
| 23 | printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", | ||
| 24 | base, (unsigned long long)mem, size); | ||
| 25 | } | ||
| 26 | |||
| 27 | void __init reserve_real_mode(void) | 18 | void __init reserve_real_mode(void) |
| 28 | { | 19 | { |
| 29 | phys_addr_t mem; | 20 | phys_addr_t mem; |
| @@ -42,7 +33,7 @@ void __init reserve_real_mode(void) | |||
| 42 | } | 33 | } |
| 43 | 34 | ||
| 44 | memblock_reserve(mem, size); | 35 | memblock_reserve(mem, size); |
| 45 | set_real_mode_mem(mem, size); | 36 | set_real_mode_mem(mem); |
| 46 | } | 37 | } |
| 47 | 38 | ||
| 48 | static void __init setup_real_mode(void) | 39 | static void __init setup_real_mode(void) |
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 42b6cb3d16f7..3843198e03d4 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
| @@ -15,6 +15,7 @@ generic-y += irq_work.h | |||
| 15 | generic-y += kdebug.h | 15 | generic-y += kdebug.h |
| 16 | generic-y += kmap_types.h | 16 | generic-y += kmap_types.h |
| 17 | generic-y += kprobes.h | 17 | generic-y += kprobes.h |
| 18 | generic-y += kvm_para.h | ||
| 18 | generic-y += local.h | 19 | generic-y += local.h |
| 19 | generic-y += local64.h | 20 | generic-y += local64.h |
| 20 | generic-y += mcs_spinlock.h | 21 | generic-y += mcs_spinlock.h |
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index f7dd895b2353..0c14018d1c26 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
| @@ -187,15 +187,18 @@ struct thread_struct { | |||
| 187 | 187 | ||
| 188 | /* Clearing a0 terminates the backtrace. */ | 188 | /* Clearing a0 terminates the backtrace. */ |
| 189 | #define start_thread(regs, new_pc, new_sp) \ | 189 | #define start_thread(regs, new_pc, new_sp) \ |
| 190 | memset(regs, 0, sizeof(*regs)); \ | 190 | do { \ |
| 191 | regs->pc = new_pc; \ | 191 | memset((regs), 0, sizeof(*(regs))); \ |
| 192 | regs->ps = USER_PS_VALUE; \ | 192 | (regs)->pc = (new_pc); \ |
| 193 | regs->areg[1] = new_sp; \ | 193 | (regs)->ps = USER_PS_VALUE; \ |
| 194 | regs->areg[0] = 0; \ | 194 | (regs)->areg[1] = (new_sp); \ |
| 195 | regs->wmask = 1; \ | 195 | (regs)->areg[0] = 0; \ |
| 196 | regs->depc = 0; \ | 196 | (regs)->wmask = 1; \ |
| 197 | regs->windowbase = 0; \ | 197 | (regs)->depc = 0; \ |
| 198 | regs->windowstart = 1; | 198 | (regs)->windowbase = 0; \ |
| 199 | (regs)->windowstart = 1; \ | ||
| 200 | (regs)->syscall = NO_SYSCALL; \ | ||
| 201 | } while (0) | ||
| 199 | 202 | ||
| 200 | /* Forward declaration */ | 203 | /* Forward declaration */ |
| 201 | struct task_struct; | 204 | struct task_struct; |
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index a168bf81c7f4..91dc06d58060 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h | |||
| @@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 59 | 59 | ||
| 60 | static inline void syscall_get_arguments(struct task_struct *task, | 60 | static inline void syscall_get_arguments(struct task_struct *task, |
| 61 | struct pt_regs *regs, | 61 | struct pt_regs *regs, |
| 62 | unsigned int i, unsigned int n, | ||
| 63 | unsigned long *args) | 62 | unsigned long *args) |
| 64 | { | 63 | { |
| 65 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; | 64 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; |
| 66 | unsigned int j; | 65 | unsigned int i; |
| 67 | 66 | ||
| 68 | if (n == 0) | 67 | for (i = 0; i < 6; ++i) |
| 69 | return; | 68 | args[i] = regs->areg[reg[i]]; |
| 70 | |||
| 71 | WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS); | ||
| 72 | |||
| 73 | for (j = 0; j < n; ++j) { | ||
| 74 | if (i + j < SYSCALL_MAX_ARGS) | ||
| 75 | args[j] = regs->areg[reg[i + j]]; | ||
| 76 | else | ||
| 77 | args[j] = 0; | ||
| 78 | } | ||
| 79 | } | 69 | } |
| 80 | 70 | ||
| 81 | static inline void syscall_set_arguments(struct task_struct *task, | 71 | static inline void syscall_set_arguments(struct task_struct *task, |
| 82 | struct pt_regs *regs, | 72 | struct pt_regs *regs, |
| 83 | unsigned int i, unsigned int n, | ||
| 84 | const unsigned long *args) | 73 | const unsigned long *args) |
| 85 | { | 74 | { |
| 86 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; | 75 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; |
| 87 | unsigned int j; | 76 | unsigned int i; |
| 88 | |||
| 89 | if (n == 0) | ||
| 90 | return; | ||
| 91 | |||
| 92 | if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) { | ||
| 93 | if (i < SYSCALL_MAX_ARGS) | ||
| 94 | n = SYSCALL_MAX_ARGS - i; | ||
| 95 | else | ||
| 96 | return; | ||
| 97 | } | ||
| 98 | 77 | ||
| 99 | for (j = 0; j < n; ++j) | 78 | for (i = 0; i < 6; ++i) |
| 100 | regs->areg[reg[i + j]] = args[j]; | 79 | regs->areg[reg[i]] = args[i]; |
| 101 | } | 80 | } |
| 102 | 81 | ||
| 103 | asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); | 82 | asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); |
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild index 8a7ad40be463..7417847dc438 100644 --- a/arch/xtensa/include/uapi/asm/Kbuild +++ b/arch/xtensa/include/uapi/asm/Kbuild | |||
| @@ -1,2 +1 @@ | |||
| 1 | generated-y += unistd_32.h | generated-y += unistd_32.h | |
| 2 | generic-y += kvm_para.h | ||
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index e50f5124dc6f..e54af8b7e0f8 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S | |||
| @@ -1860,6 +1860,8 @@ ENTRY(system_call) | |||
| 1860 | l32i a7, a2, PT_SYSCALL | 1860 | l32i a7, a2, PT_SYSCALL |
| 1861 | 1861 | ||
| 1862 | 1: | 1862 | 1: |
| 1863 | s32i a7, a1, 4 | ||
| 1864 | |||
| 1863 | /* syscall = sys_call_table[syscall_nr] */ | 1865 | /* syscall = sys_call_table[syscall_nr] */ |
| 1864 | 1866 | ||
| 1865 | movi a4, sys_call_table | 1867 | movi a4, sys_call_table |
| @@ -1893,8 +1895,12 @@ ENTRY(system_call) | |||
| 1893 | retw | 1895 | retw |
| 1894 | 1896 | ||
| 1895 | 1: | 1897 | 1: |
| 1898 | l32i a4, a1, 4 | ||
| 1899 | l32i a3, a2, PT_SYSCALL | ||
| 1900 | s32i a4, a2, PT_SYSCALL | ||
| 1896 | mov a6, a2 | 1901 | mov a6, a2 |
| 1897 | call4 do_syscall_trace_leave | 1902 | call4 do_syscall_trace_leave |
| 1903 | s32i a3, a2, PT_SYSCALL | ||
| 1898 | retw | 1904 | retw |
| 1899 | 1905 | ||
| 1900 | ENDPROC(system_call) | 1906 | ENDPROC(system_call) |
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 174c11f13bba..b9f82510c650 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c | |||
| @@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data) | |||
| 253 | return 1; | 253 | return 1; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | /* | ||
| 257 | * level == 0 is for the return address from the caller of this function, | ||
| 258 | * not from this function itself. | ||
| 259 | */ | ||
| 256 | unsigned long return_address(unsigned level) | 260 | unsigned long return_address(unsigned level) |
| 257 | { | 261 | { |
| 258 | struct return_addr_data r = { | 262 | struct return_addr_data r = { |
| 259 | .skip = level + 1, | 263 | .skip = level, |
| 260 | }; | 264 | }; |
| 261 | walk_stackframe(stack_pointer(NULL), return_address_cb, &r); | 265 | walk_stackframe(stack_pointer(NULL), return_address_cb, &r); |
| 262 | return r.addr; | 266 | return r.addr; |
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 2fb7d1172228..03678c4afc39 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c | |||
| @@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) | |||
| 33 | 33 | ||
| 34 | pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); | 34 | pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); |
| 35 | if (!pte) | 35 | if (!pte) |
| 36 | panic("%s: Failed to allocate %zu bytes align=%lx\n", | 36 | panic("%s: Failed to allocate %lu bytes align=%lx\n", |
| 37 | __func__, n_pages * sizeof(pte_t), PAGE_SIZE); | 37 | __func__, n_pages * sizeof(pte_t), PAGE_SIZE); |
| 38 | 38 | ||
| 39 | for (i = 0; i < n_pages; ++i) | 39 | for (i = 0; i < n_pages; ++i) |
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 4c592496a16a..fac188dd78fa 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c | |||
| @@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) | |||
| 674 | * at least two nodes. | 674 | * at least two nodes. |
| 675 | */ | 675 | */ |
| 676 | return !(varied_queue_weights || multiple_classes_busy | 676 | return !(varied_queue_weights || multiple_classes_busy |
| 677 | #ifdef BFQ_GROUP_IOSCHED_ENABLED | 677 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 678 | || bfqd->num_groups_with_pending_reqs > 0 | 678 | || bfqd->num_groups_with_pending_reqs > 0 |
| 679 | #endif | 679 | #endif |
| 680 | ); | 680 | ); |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63311d1ff1ed..a11bef75483d 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
| @@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity, | |||
| 1012 | entity->on_st = true; | 1012 | entity->on_st = true; |
| 1013 | } | 1013 | } |
| 1014 | 1014 | ||
| 1015 | #ifdef BFQ_GROUP_IOSCHED_ENABLED | 1015 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 1016 | if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ | 1016 | if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ |
| 1017 | struct bfq_group *bfqg = | 1017 | struct bfq_group *bfqg = |
| 1018 | container_of(entity, struct bfq_group, entity); | 1018 | container_of(entity, struct bfq_group, entity); |
diff --git a/block/blk-core.c b/block/blk-core.c index 4673ebe42255..a55389ba8779 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, | |||
| 1245 | */ | 1245 | */ |
| 1246 | blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 1246 | blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) |
| 1247 | { | 1247 | { |
| 1248 | blk_qc_t unused; | ||
| 1249 | |||
| 1250 | if (blk_cloned_rq_check_limits(q, rq)) | 1248 | if (blk_cloned_rq_check_limits(q, rq)) |
| 1251 | return BLK_STS_IOERR; | 1249 | return BLK_STS_IOERR; |
| 1252 | 1250 | ||
| @@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * | |||
| 1262 | * bypass a potential scheduler on the bottom device for | 1260 | * bypass a potential scheduler on the bottom device for |
| 1263 | * insert. | 1261 | * insert. |
| 1264 | */ | 1262 | */ |
| 1265 | return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true); | 1263 | return blk_mq_request_issue_directly(rq, true); |
| 1266 | } | 1264 | } |
| 1267 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 1265 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
| 1268 | 1266 | ||
diff --git a/block/blk-flush.c b/block/blk-flush.c index 6e0f2d97fc6d..d95f94892015 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
| @@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) | |||
| 220 | blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); | 220 | blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); |
| 221 | flush_rq->tag = -1; | 221 | flush_rq->tag = -1; |
| 222 | } else { | 222 | } else { |
| 223 | blk_mq_put_driver_tag_hctx(hctx, flush_rq); | 223 | blk_mq_put_driver_tag(flush_rq); |
| 224 | flush_rq->internal_tag = -1; | 224 | flush_rq->internal_tag = -1; |
| 225 | } | 225 | } |
| 226 | 226 | ||
| @@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) | |||
| 324 | 324 | ||
| 325 | if (q->elevator) { | 325 | if (q->elevator) { |
| 326 | WARN_ON(rq->tag < 0); | 326 | WARN_ON(rq->tag < 0); |
| 327 | blk_mq_put_driver_tag_hctx(hctx, rq); | 327 | blk_mq_put_driver_tag(rq); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | /* | 330 | /* |
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 40905539afed..aa6bc5c02643 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
| @@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, | |||
| 423 | * busy in case of 'none' scheduler, and this way may save | 423 | * busy in case of 'none' scheduler, and this way may save |
| 424 | * us one extra enqueue & dequeue to sw queue. | 424 | * us one extra enqueue & dequeue to sw queue. |
| 425 | */ | 425 | */ |
| 426 | if (!hctx->dispatch_busy && !e && !run_queue_async) | 426 | if (!hctx->dispatch_busy && !e && !run_queue_async) { |
| 427 | blk_mq_try_issue_list_directly(hctx, list); | 427 | blk_mq_try_issue_list_directly(hctx, list); |
| 428 | else | 428 | if (list_empty(list)) |
| 429 | blk_mq_insert_requests(hctx, ctx, list); | 429 | return; |
| 430 | } | ||
| 431 | blk_mq_insert_requests(hctx, ctx, list); | ||
| 430 | } | 432 | } |
| 431 | 433 | ||
| 432 | blk_mq_run_hw_queue(hctx, run_queue_async); | 434 | blk_mq_run_hw_queue(hctx, run_queue_async); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 70b210a308c4..a9354835cf51 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) | |||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | /* | 61 | /* |
| 62 | * Check if any of the ctx's have pending work in this hardware queue | 62 | * Check if any of the ctx, dispatch list or elevator |
| 63 | * have pending work in this hardware queue. | ||
| 63 | */ | 64 | */ |
| 64 | static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) | 65 | static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) |
| 65 | { | 66 | { |
| @@ -1071,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, | |||
| 1071 | hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); | 1072 | hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); |
| 1072 | 1073 | ||
| 1073 | spin_lock(&hctx->dispatch_wait_lock); | 1074 | spin_lock(&hctx->dispatch_wait_lock); |
| 1074 | list_del_init(&wait->entry); | 1075 | if (!list_empty(&wait->entry)) { |
| 1076 | struct sbitmap_queue *sbq; | ||
| 1077 | |||
| 1078 | list_del_init(&wait->entry); | ||
| 1079 | sbq = &hctx->tags->bitmap_tags; | ||
| 1080 | atomic_dec(&sbq->ws_active); | ||
| 1081 | } | ||
| 1075 | spin_unlock(&hctx->dispatch_wait_lock); | 1082 | spin_unlock(&hctx->dispatch_wait_lock); |
| 1076 | 1083 | ||
| 1077 | blk_mq_run_hw_queue(hctx, true); | 1084 | blk_mq_run_hw_queue(hctx, true); |
| @@ -1087,6 +1094,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, | |||
| 1087 | static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, | 1094 | static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, |
| 1088 | struct request *rq) | 1095 | struct request *rq) |
| 1089 | { | 1096 | { |
| 1097 | struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; | ||
| 1090 | struct wait_queue_head *wq; | 1098 | struct wait_queue_head *wq; |
| 1091 | wait_queue_entry_t *wait; | 1099 | wait_queue_entry_t *wait; |
| 1092 | bool ret; | 1100 | bool ret; |
| @@ -1109,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, | |||
| 1109 | if (!list_empty_careful(&wait->entry)) | 1117 | if (!list_empty_careful(&wait->entry)) |
| 1110 | return false; | 1118 | return false; |
| 1111 | 1119 | ||
| 1112 | wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait; | 1120 | wq = &bt_wait_ptr(sbq, hctx)->wait; |
| 1113 | 1121 | ||
| 1114 | spin_lock_irq(&wq->lock); | 1122 | spin_lock_irq(&wq->lock); |
| 1115 | spin_lock(&hctx->dispatch_wait_lock); | 1123 | spin_lock(&hctx->dispatch_wait_lock); |
| @@ -1119,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, | |||
| 1119 | return false; | 1127 | return false; |
| 1120 | } | 1128 | } |
| 1121 | 1129 | ||
| 1130 | atomic_inc(&sbq->ws_active); | ||
| 1122 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | 1131 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; |
| 1123 | __add_wait_queue(wq, wait); | 1132 | __add_wait_queue(wq, wait); |
| 1124 | 1133 | ||
| @@ -1139,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, | |||
| 1139 | * someone else gets the wakeup. | 1148 | * someone else gets the wakeup. |
| 1140 | */ | 1149 | */ |
| 1141 | list_del_init(&wait->entry); | 1150 | list_del_init(&wait->entry); |
| 1151 | atomic_dec(&sbq->ws_active); | ||
| 1142 | spin_unlock(&hctx->dispatch_wait_lock); | 1152 | spin_unlock(&hctx->dispatch_wait_lock); |
| 1143 | spin_unlock_irq(&wq->lock); | 1153 | spin_unlock_irq(&wq->lock); |
| 1144 | 1154 | ||
| @@ -1701,11 +1711,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 1701 | unsigned int depth; | 1711 | unsigned int depth; |
| 1702 | 1712 | ||
| 1703 | list_splice_init(&plug->mq_list, &list); | 1713 | list_splice_init(&plug->mq_list, &list); |
| 1704 | plug->rq_count = 0; | ||
| 1705 | 1714 | ||
| 1706 | if (plug->rq_count > 2 && plug->multiple_queues) | 1715 | if (plug->rq_count > 2 && plug->multiple_queues) |
| 1707 | list_sort(NULL, &list, plug_rq_cmp); | 1716 | list_sort(NULL, &list, plug_rq_cmp); |
| 1708 | 1717 | ||
| 1718 | plug->rq_count = 0; | ||
| 1719 | |||
| 1709 | this_q = NULL; | 1720 | this_q = NULL; |
| 1710 | this_hctx = NULL; | 1721 | this_hctx = NULL; |
| 1711 | this_ctx = NULL; | 1722 | this_ctx = NULL; |
| @@ -1790,74 +1801,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, | |||
| 1790 | return ret; | 1801 | return ret; |
| 1791 | } | 1802 | } |
| 1792 | 1803 | ||
| 1793 | blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | 1804 | static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, |
| 1794 | struct request *rq, | 1805 | struct request *rq, |
| 1795 | blk_qc_t *cookie, | 1806 | blk_qc_t *cookie, |
| 1796 | bool bypass, bool last) | 1807 | bool bypass_insert, bool last) |
| 1797 | { | 1808 | { |
| 1798 | struct request_queue *q = rq->q; | 1809 | struct request_queue *q = rq->q; |
| 1799 | bool run_queue = true; | 1810 | bool run_queue = true; |
| 1800 | blk_status_t ret = BLK_STS_RESOURCE; | ||
| 1801 | int srcu_idx; | ||
| 1802 | bool force = false; | ||
| 1803 | 1811 | ||
| 1804 | hctx_lock(hctx, &srcu_idx); | ||
| 1805 | /* | 1812 | /* |
| 1806 | * hctx_lock is needed before checking quiesced flag. | 1813 | * RCU or SRCU read lock is needed before checking quiesced flag. |
| 1807 | * | 1814 | * |
| 1808 | * When queue is stopped or quiesced, ignore 'bypass', insert | 1815 | * When queue is stopped or quiesced, ignore 'bypass_insert' from |
| 1809 | * and return BLK_STS_OK to caller, and avoid driver to try to | 1816 | * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, |
| 1810 | * dispatch again. | 1817 | * and avoid driver to try to dispatch again. |
| 1811 | */ | 1818 | */ |
| 1812 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) { | 1819 | if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { |
| 1813 | run_queue = false; | 1820 | run_queue = false; |
| 1814 | bypass = false; | 1821 | bypass_insert = false; |
| 1815 | goto out_unlock; | 1822 | goto insert; |
| 1816 | } | 1823 | } |
| 1817 | 1824 | ||
| 1818 | if (unlikely(q->elevator && !bypass)) | 1825 | if (q->elevator && !bypass_insert) |
| 1819 | goto out_unlock; | 1826 | goto insert; |
| 1820 | 1827 | ||
| 1821 | if (!blk_mq_get_dispatch_budget(hctx)) | 1828 | if (!blk_mq_get_dispatch_budget(hctx)) |
| 1822 | goto out_unlock; | 1829 | goto insert; |
| 1823 | 1830 | ||
| 1824 | if (!blk_mq_get_driver_tag(rq)) { | 1831 | if (!blk_mq_get_driver_tag(rq)) { |
| 1825 | blk_mq_put_dispatch_budget(hctx); | 1832 | blk_mq_put_dispatch_budget(hctx); |
| 1826 | goto out_unlock; | 1833 | goto insert; |
| 1827 | } | 1834 | } |
| 1828 | 1835 | ||
| 1829 | /* | 1836 | return __blk_mq_issue_directly(hctx, rq, cookie, last); |
| 1830 | * Always add a request that has been through | 1837 | insert: |
| 1831 | *.queue_rq() to the hardware dispatch list. | 1838 | if (bypass_insert) |
| 1832 | */ | 1839 | return BLK_STS_RESOURCE; |
| 1833 | force = true; | 1840 | |
| 1834 | ret = __blk_mq_issue_directly(hctx, rq, cookie, last); | 1841 | blk_mq_request_bypass_insert(rq, run_queue); |
| 1835 | out_unlock: | 1842 | return BLK_STS_OK; |
| 1843 | } | ||
| 1844 | |||
| 1845 | static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | ||
| 1846 | struct request *rq, blk_qc_t *cookie) | ||
| 1847 | { | ||
| 1848 | blk_status_t ret; | ||
| 1849 | int srcu_idx; | ||
| 1850 | |||
| 1851 | might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); | ||
| 1852 | |||
| 1853 | hctx_lock(hctx, &srcu_idx); | ||
| 1854 | |||
| 1855 | ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); | ||
| 1856 | if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) | ||
| 1857 | blk_mq_request_bypass_insert(rq, true); | ||
| 1858 | else if (ret != BLK_STS_OK) | ||
| 1859 | blk_mq_end_request(rq, ret); | ||
| 1860 | |||
| 1861 | hctx_unlock(hctx, srcu_idx); | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) | ||
| 1865 | { | ||
| 1866 | blk_status_t ret; | ||
| 1867 | int srcu_idx; | ||
| 1868 | blk_qc_t unused_cookie; | ||
| 1869 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; | ||
| 1870 | |||
| 1871 | hctx_lock(hctx, &srcu_idx); | ||
| 1872 | ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); | ||
| 1836 | hctx_unlock(hctx, srcu_idx); | 1873 | hctx_unlock(hctx, srcu_idx); |
| 1837 | switch (ret) { | ||
| 1838 | case BLK_STS_OK: | ||
| 1839 | break; | ||
| 1840 | case BLK_STS_DEV_RESOURCE: | ||
| 1841 | case BLK_STS_RESOURCE: | ||
| 1842 | if (force) { | ||
| 1843 | blk_mq_request_bypass_insert(rq, run_queue); | ||
| 1844 | /* | ||
| 1845 | * We have to return BLK_STS_OK for the DM | ||
| 1846 | * to avoid livelock. Otherwise, we return | ||
| 1847 | * the real result to indicate whether the | ||
| 1848 | * request is direct-issued successfully. | ||
| 1849 | */ | ||
| 1850 | ret = bypass ? BLK_STS_OK : ret; | ||
| 1851 | } else if (!bypass) { | ||
| 1852 | blk_mq_sched_insert_request(rq, false, | ||
| 1853 | run_queue, false); | ||
| 1854 | } | ||
| 1855 | break; | ||
| 1856 | default: | ||
| 1857 | if (!bypass) | ||
| 1858 | blk_mq_end_request(rq, ret); | ||
| 1859 | break; | ||
| 1860 | } | ||
| 1861 | 1874 | ||
| 1862 | return ret; | 1875 | return ret; |
| 1863 | } | 1876 | } |
| @@ -1865,20 +1878,22 @@ out_unlock: | |||
| 1865 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | 1878 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, |
| 1866 | struct list_head *list) | 1879 | struct list_head *list) |
| 1867 | { | 1880 | { |
| 1868 | blk_qc_t unused; | ||
| 1869 | blk_status_t ret = BLK_STS_OK; | ||
| 1870 | |||
| 1871 | while (!list_empty(list)) { | 1881 | while (!list_empty(list)) { |
| 1882 | blk_status_t ret; | ||
| 1872 | struct request *rq = list_first_entry(list, struct request, | 1883 | struct request *rq = list_first_entry(list, struct request, |
| 1873 | queuelist); | 1884 | queuelist); |
| 1874 | 1885 | ||
| 1875 | list_del_init(&rq->queuelist); | 1886 | list_del_init(&rq->queuelist); |
| 1876 | if (ret == BLK_STS_OK) | 1887 | ret = blk_mq_request_issue_directly(rq, list_empty(list)); |
| 1877 | ret = blk_mq_try_issue_directly(hctx, rq, &unused, | 1888 | if (ret != BLK_STS_OK) { |
| 1878 | false, | 1889 | if (ret == BLK_STS_RESOURCE || |
| 1890 | ret == BLK_STS_DEV_RESOURCE) { | ||
| 1891 | blk_mq_request_bypass_insert(rq, | ||
| 1879 | list_empty(list)); | 1892 | list_empty(list)); |
| 1880 | else | 1893 | break; |
| 1881 | blk_mq_sched_insert_request(rq, false, true, false); | 1894 | } |
| 1895 | blk_mq_end_request(rq, ret); | ||
| 1896 | } | ||
| 1882 | } | 1897 | } |
| 1883 | 1898 | ||
| 1884 | /* | 1899 | /* |
| @@ -1886,7 +1901,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | |||
| 1886 | * the driver there was more coming, but that turned out to | 1901 | * the driver there was more coming, but that turned out to |
| 1887 | * be a lie. | 1902 | * be a lie. |
| 1888 | */ | 1903 | */ |
| 1889 | if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs) | 1904 | if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) |
| 1890 | hctx->queue->mq_ops->commit_rqs(hctx); | 1905 | hctx->queue->mq_ops->commit_rqs(hctx); |
| 1891 | } | 1906 | } |
| 1892 | 1907 | ||
| @@ -1993,19 +2008,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1993 | plug->rq_count--; | 2008 | plug->rq_count--; |
| 1994 | } | 2009 | } |
| 1995 | blk_add_rq_to_plug(plug, rq); | 2010 | blk_add_rq_to_plug(plug, rq); |
| 2011 | trace_block_plug(q); | ||
| 1996 | 2012 | ||
| 1997 | blk_mq_put_ctx(data.ctx); | 2013 | blk_mq_put_ctx(data.ctx); |
| 1998 | 2014 | ||
| 1999 | if (same_queue_rq) { | 2015 | if (same_queue_rq) { |
| 2000 | data.hctx = same_queue_rq->mq_hctx; | 2016 | data.hctx = same_queue_rq->mq_hctx; |
| 2017 | trace_block_unplug(q, 1, true); | ||
| 2001 | blk_mq_try_issue_directly(data.hctx, same_queue_rq, | 2018 | blk_mq_try_issue_directly(data.hctx, same_queue_rq, |
| 2002 | &cookie, false, true); | 2019 | &cookie); |
| 2003 | } | 2020 | } |
| 2004 | } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && | 2021 | } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && |
| 2005 | !data.hctx->dispatch_busy)) { | 2022 | !data.hctx->dispatch_busy)) { |
| 2006 | blk_mq_put_ctx(data.ctx); | 2023 | blk_mq_put_ctx(data.ctx); |
| 2007 | blk_mq_bio_to_request(rq, bio); | 2024 | blk_mq_bio_to_request(rq, bio); |
| 2008 | blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true); | 2025 | blk_mq_try_issue_directly(data.hctx, rq, &cookie); |
| 2009 | } else { | 2026 | } else { |
| 2010 | blk_mq_put_ctx(data.ctx); | 2027 | blk_mq_put_ctx(data.ctx); |
| 2011 | blk_mq_bio_to_request(rq, bio); | 2028 | blk_mq_bio_to_request(rq, bio); |
| @@ -2322,7 +2339,7 @@ static int blk_mq_init_hctx(struct request_queue *q, | |||
| 2322 | return 0; | 2339 | return 0; |
| 2323 | 2340 | ||
| 2324 | free_fq: | 2341 | free_fq: |
| 2325 | kfree(hctx->fq); | 2342 | blk_free_flush_queue(hctx->fq); |
| 2326 | exit_hctx: | 2343 | exit_hctx: |
| 2327 | if (set->ops->exit_hctx) | 2344 | if (set->ops->exit_hctx) |
| 2328 | set->ops->exit_hctx(hctx, hctx_idx); | 2345 | set->ops->exit_hctx(hctx, hctx_idx); |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 0ed8e5a8729f..423ea88ab6fb 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
| @@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); | |||
| 70 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | 70 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
| 71 | struct list_head *list); | 71 | struct list_head *list); |
| 72 | 72 | ||
| 73 | blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | 73 | /* Used by blk_insert_cloned_request() to issue request directly */ |
| 74 | struct request *rq, | 74 | blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); |
| 75 | blk_qc_t *cookie, | ||
| 76 | bool bypass, bool last); | ||
| 77 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | 75 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, |
| 78 | struct list_head *list); | 76 | struct list_head *list); |
| 79 | 77 | ||
| @@ -224,15 +222,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, | |||
| 224 | } | 222 | } |
| 225 | } | 223 | } |
| 226 | 224 | ||
| 227 | static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, | ||
| 228 | struct request *rq) | ||
| 229 | { | ||
| 230 | if (rq->tag == -1 || rq->internal_tag == -1) | ||
| 231 | return; | ||
| 232 | |||
| 233 | __blk_mq_put_driver_tag(hctx, rq); | ||
| 234 | } | ||
| 235 | |||
| 236 | static inline void blk_mq_put_driver_tag(struct request *rq) | 225 | static inline void blk_mq_put_driver_tag(struct request *rq) |
| 237 | { | 226 | { |
| 238 | if (rq->tag == -1 || rq->internal_tag == -1) | 227 | if (rq->tag == -1 || rq->internal_tag == -1) |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 62d3aa74277b..5e9d7348c16f 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
| @@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 81 | 81 | ||
| 82 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | 82 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
| 83 | 83 | ||
| 84 | /* Enable the requested GPE */ | 84 | /* Clear the GPE status */ |
| 85 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
| 86 | if (ACPI_FAILURE(status)) | ||
| 87 | return_ACPI_STATUS(status); | ||
| 85 | 88 | ||
| 89 | /* Enable the requested GPE */ | ||
| 86 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | 90 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); |
| 87 | return_ACPI_STATUS(status); | 91 | return_ACPI_STATUS(status); |
| 88 | } | 92 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 6ecbbabf1233..eec263c9019e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1043,9 +1043,6 @@ void __init acpi_early_init(void) | |||
| 1043 | 1043 | ||
| 1044 | acpi_permanent_mmap = true; | 1044 | acpi_permanent_mmap = true; |
| 1045 | 1045 | ||
| 1046 | /* Initialize debug output. Linux does not use ACPICA defaults */ | ||
| 1047 | acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; | ||
| 1048 | |||
| 1049 | #ifdef CONFIG_X86 | 1046 | #ifdef CONFIG_X86 |
| 1050 | /* | 1047 | /* |
| 1051 | * If the machine falls into the DMI check table, | 1048 | * If the machine falls into the DMI check table, |
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 1b207fca1420..d4244e7d0e38 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
| @@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
| 1150 | cpc_read(cpunum, nominal_reg, &nom); | 1150 | cpc_read(cpunum, nominal_reg, &nom); |
| 1151 | perf_caps->nominal_perf = nom; | 1151 | perf_caps->nominal_perf = nom; |
| 1152 | 1152 | ||
| 1153 | cpc_read(cpunum, guaranteed_reg, &guaranteed); | 1153 | if (guaranteed_reg->type != ACPI_TYPE_BUFFER || |
| 1154 | perf_caps->guaranteed_perf = guaranteed; | 1154 | IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { |
| 1155 | perf_caps->guaranteed_perf = 0; | ||
| 1156 | } else { | ||
| 1157 | cpc_read(cpunum, guaranteed_reg, &guaranteed); | ||
| 1158 | perf_caps->guaranteed_perf = guaranteed; | ||
| 1159 | } | ||
| 1155 | 1160 | ||
| 1156 | cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); | 1161 | cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); |
| 1157 | perf_caps->lowest_nonlinear_perf = min_nonlinear; | 1162 | perf_caps->lowest_nonlinear_perf = min_nonlinear; |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 8685882da64c..4b9c7ca492e6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
| @@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc, | |||
| 2057 | size_t object_size = 0; | 2057 | size_t object_size = 0; |
| 2058 | 2058 | ||
| 2059 | read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); | 2059 | read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); |
| 2060 | if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) | 2060 | if (offset > buffer->data_size || read_size < sizeof(*hdr) || |
| 2061 | !IS_ALIGNED(offset, sizeof(u32))) | ||
| 2061 | return 0; | 2062 | return 0; |
| 2062 | binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, | 2063 | binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, |
| 2063 | offset, read_size); | 2064 | offset, read_size); |
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 6389467670a0..195f120c4e8c 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
| @@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
| 927 | 927 | ||
| 928 | index = page - alloc->pages; | 928 | index = page - alloc->pages; |
| 929 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | 929 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; |
| 930 | |||
| 931 | mm = alloc->vma_vm_mm; | ||
| 932 | if (!mmget_not_zero(mm)) | ||
| 933 | goto err_mmget; | ||
| 934 | if (!down_write_trylock(&mm->mmap_sem)) | ||
| 935 | goto err_down_write_mmap_sem_failed; | ||
| 930 | vma = binder_alloc_get_vma(alloc); | 936 | vma = binder_alloc_get_vma(alloc); |
| 931 | if (vma) { | ||
| 932 | if (!mmget_not_zero(alloc->vma_vm_mm)) | ||
| 933 | goto err_mmget; | ||
| 934 | mm = alloc->vma_vm_mm; | ||
| 935 | if (!down_read_trylock(&mm->mmap_sem)) | ||
| 936 | goto err_down_write_mmap_sem_failed; | ||
| 937 | } | ||
| 938 | 937 | ||
| 939 | list_lru_isolate(lru, item); | 938 | list_lru_isolate(lru, item); |
| 940 | spin_unlock(lock); | 939 | spin_unlock(lock); |
| @@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
| 945 | zap_page_range(vma, page_addr, PAGE_SIZE); | 944 | zap_page_range(vma, page_addr, PAGE_SIZE); |
| 946 | 945 | ||
| 947 | trace_binder_unmap_user_end(alloc, index); | 946 | trace_binder_unmap_user_end(alloc, index); |
| 948 | |||
| 949 | up_read(&mm->mmap_sem); | ||
| 950 | mmput(mm); | ||
| 951 | } | 947 | } |
| 948 | up_write(&mm->mmap_sem); | ||
| 949 | mmput(mm); | ||
| 952 | 950 | ||
| 953 | trace_binder_unmap_kernel_start(alloc, index); | 951 | trace_binder_unmap_kernel_start(alloc, index); |
| 954 | 952 | ||
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index b3ed8f9953a8..173e6f2dd9af 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c | |||
| @@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev) | |||
| 52 | /* Per the spec, only slot type and drawer type ODD can be supported */ | 52 | /* Per the spec, only slot type and drawer type ODD can be supported */ |
| 53 | static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) | 53 | static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) |
| 54 | { | 54 | { |
| 55 | char buf[16]; | 55 | char *buf; |
| 56 | unsigned int ret; | 56 | unsigned int ret; |
| 57 | struct rm_feature_desc *desc = (void *)(buf + 8); | 57 | struct rm_feature_desc *desc; |
| 58 | struct ata_taskfile tf; | 58 | struct ata_taskfile tf; |
| 59 | static const char cdb[] = { GPCMD_GET_CONFIGURATION, | 59 | static const char cdb[] = { GPCMD_GET_CONFIGURATION, |
| 60 | 2, /* only 1 feature descriptor requested */ | 60 | 2, /* only 1 feature descriptor requested */ |
| 61 | 0, 3, /* 3, removable medium feature */ | 61 | 0, 3, /* 3, removable medium feature */ |
| 62 | 0, 0, 0,/* reserved */ | 62 | 0, 0, 0,/* reserved */ |
| 63 | 0, sizeof(buf), | 63 | 0, 16, |
| 64 | 0, 0, 0, | 64 | 0, 0, 0, |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | buf = kzalloc(16, GFP_KERNEL); | ||
| 68 | if (!buf) | ||
| 69 | return ODD_MECH_TYPE_UNSUPPORTED; | ||
| 70 | desc = (void *)(buf + 8); | ||
| 71 | |||
| 67 | ata_tf_init(dev, &tf); | 72 | ata_tf_init(dev, &tf); |
| 68 | tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 73 | tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
| 69 | tf.command = ATA_CMD_PACKET; | 74 | tf.command = ATA_CMD_PACKET; |
| 70 | tf.protocol = ATAPI_PROT_PIO; | 75 | tf.protocol = ATAPI_PROT_PIO; |
| 71 | tf.lbam = sizeof(buf); | 76 | tf.lbam = 16; |
| 72 | 77 | ||
| 73 | ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, | 78 | ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, |
| 74 | buf, sizeof(buf), 0); | 79 | buf, 16, 0); |
| 75 | if (ret) | 80 | if (ret) { |
| 81 | kfree(buf); | ||
| 76 | return ODD_MECH_TYPE_UNSUPPORTED; | 82 | return ODD_MECH_TYPE_UNSUPPORTED; |
| 83 | } | ||
| 77 | 84 | ||
| 78 | if (be16_to_cpu(desc->feature_code) != 3) | 85 | if (be16_to_cpu(desc->feature_code) != 3) { |
| 86 | kfree(buf); | ||
| 79 | return ODD_MECH_TYPE_UNSUPPORTED; | 87 | return ODD_MECH_TYPE_UNSUPPORTED; |
| 88 | } | ||
| 80 | 89 | ||
| 81 | if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) | 90 | if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) { |
| 91 | kfree(buf); | ||
| 82 | return ODD_MECH_TYPE_SLOT; | 92 | return ODD_MECH_TYPE_SLOT; |
| 83 | else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) | 93 | } else if (desc->mech_type == 1 && desc->load == 0 && |
| 94 | desc->eject == 1) { | ||
| 95 | kfree(buf); | ||
| 84 | return ODD_MECH_TYPE_DRAWER; | 96 | return ODD_MECH_TYPE_DRAWER; |
| 85 | else | 97 | } else { |
| 98 | kfree(buf); | ||
| 86 | return ODD_MECH_TYPE_UNSUPPORTED; | 99 | return ODD_MECH_TYPE_UNSUPPORTED; |
| 100 | } | ||
| 87 | } | 101 | } |
| 88 | 102 | ||
| 89 | /* Test if ODD is zero power ready by sense code */ | 103 | /* Test if ODD is zero power ready by sense code */ |
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 417a9f15c116..d7ac09c092f2 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c | |||
| @@ -1748,6 +1748,11 @@ static int __init null_init(void) | |||
| 1748 | return -EINVAL; | 1748 | return -EINVAL; |
| 1749 | } | 1749 | } |
| 1750 | 1750 | ||
| 1751 | if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { | ||
| 1752 | pr_err("null_blk: invalid home_node value\n"); | ||
| 1753 | g_home_node = NUMA_NO_NODE; | ||
| 1754 | } | ||
| 1755 | |||
| 1751 | if (g_queue_mode == NULL_Q_RQ) { | 1756 | if (g_queue_mode == NULL_Q_RQ) { |
| 1752 | pr_err("null_blk: legacy IO path no longer available\n"); | 1757 | pr_err("null_blk: legacy IO path no longer available\n"); |
| 1753 | return -EINVAL; | 1758 | return -EINVAL; |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 377a694dc228..6d415b20fb70 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
| @@ -314,6 +314,7 @@ static void pcd_init_units(void) | |||
| 314 | disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, | 314 | disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, |
| 315 | 1, BLK_MQ_F_SHOULD_MERGE); | 315 | 1, BLK_MQ_F_SHOULD_MERGE); |
| 316 | if (IS_ERR(disk->queue)) { | 316 | if (IS_ERR(disk->queue)) { |
| 317 | put_disk(disk); | ||
| 317 | disk->queue = NULL; | 318 | disk->queue = NULL; |
| 318 | continue; | 319 | continue; |
| 319 | } | 320 | } |
| @@ -750,6 +751,8 @@ static int pcd_detect(void) | |||
| 750 | 751 | ||
| 751 | printk("%s: No CD-ROM drive found\n", name); | 752 | printk("%s: No CD-ROM drive found\n", name); |
| 752 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { | 753 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { |
| 754 | if (!cd->disk) | ||
| 755 | continue; | ||
| 753 | blk_cleanup_queue(cd->disk->queue); | 756 | blk_cleanup_queue(cd->disk->queue); |
| 754 | cd->disk->queue = NULL; | 757 | cd->disk->queue = NULL; |
| 755 | blk_mq_free_tag_set(&cd->tag_set); | 758 | blk_mq_free_tag_set(&cd->tag_set); |
| @@ -1010,8 +1013,14 @@ static int __init pcd_init(void) | |||
| 1010 | pcd_probe_capabilities(); | 1013 | pcd_probe_capabilities(); |
| 1011 | 1014 | ||
| 1012 | if (register_blkdev(major, name)) { | 1015 | if (register_blkdev(major, name)) { |
| 1013 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) | 1016 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { |
| 1017 | if (!cd->disk) | ||
| 1018 | continue; | ||
| 1019 | |||
| 1020 | blk_cleanup_queue(cd->disk->queue); | ||
| 1021 | blk_mq_free_tag_set(&cd->tag_set); | ||
| 1014 | put_disk(cd->disk); | 1022 | put_disk(cd->disk); |
| 1023 | } | ||
| 1015 | return -EBUSY; | 1024 | return -EBUSY; |
| 1016 | } | 1025 | } |
| 1017 | 1026 | ||
| @@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void) | |||
| 1032 | int unit; | 1041 | int unit; |
| 1033 | 1042 | ||
| 1034 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { | 1043 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { |
| 1044 | if (!cd->disk) | ||
| 1045 | continue; | ||
| 1046 | |||
| 1035 | if (cd->present) { | 1047 | if (cd->present) { |
| 1036 | del_gendisk(cd->disk); | 1048 | del_gendisk(cd->disk); |
| 1037 | pi_release(cd->pi); | 1049 | pi_release(cd->pi); |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 103b617cdc31..35e6e271b219 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
| @@ -762,6 +762,8 @@ static int pf_detect(void) | |||
| 762 | 762 | ||
| 763 | printk("%s: No ATAPI disk detected\n", name); | 763 | printk("%s: No ATAPI disk detected\n", name); |
| 764 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { | 764 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 765 | if (!pf->disk) | ||
| 766 | continue; | ||
| 765 | blk_cleanup_queue(pf->disk->queue); | 767 | blk_cleanup_queue(pf->disk->queue); |
| 766 | pf->disk->queue = NULL; | 768 | pf->disk->queue = NULL; |
| 767 | blk_mq_free_tag_set(&pf->tag_set); | 769 | blk_mq_free_tag_set(&pf->tag_set); |
| @@ -1029,8 +1031,13 @@ static int __init pf_init(void) | |||
| 1029 | pf_busy = 0; | 1031 | pf_busy = 0; |
| 1030 | 1032 | ||
| 1031 | if (register_blkdev(major, name)) { | 1033 | if (register_blkdev(major, name)) { |
| 1032 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) | 1034 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 1035 | if (!pf->disk) | ||
| 1036 | continue; | ||
| 1037 | blk_cleanup_queue(pf->disk->queue); | ||
| 1038 | blk_mq_free_tag_set(&pf->tag_set); | ||
| 1033 | put_disk(pf->disk); | 1039 | put_disk(pf->disk); |
| 1040 | } | ||
| 1034 | return -EBUSY; | 1041 | return -EBUSY; |
| 1035 | } | 1042 | } |
| 1036 | 1043 | ||
| @@ -1051,6 +1058,9 @@ static void __exit pf_exit(void) | |||
| 1051 | int unit; | 1058 | int unit; |
| 1052 | unregister_blkdev(major, name); | 1059 | unregister_blkdev(major, name); |
| 1053 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { | 1060 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 1061 | if (!pf->disk) | ||
| 1062 | continue; | ||
| 1063 | |||
| 1054 | if (pf->present) | 1064 | if (pf->present) |
| 1055 | del_gendisk(pf->disk); | 1065 | del_gendisk(pf->disk); |
| 1056 | 1066 | ||
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 87ccef4bd69e..32a21b8d1d85 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
| @@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace) | |||
| 1090 | return 0; | 1090 | return 0; |
| 1091 | 1091 | ||
| 1092 | err_read: | 1092 | err_read: |
| 1093 | /* prevent double queue cleanup */ | ||
| 1094 | ace->gd->queue = NULL; | ||
| 1093 | put_disk(ace->gd); | 1095 | put_disk(ace->gd); |
| 1094 | err_alloc_disk: | 1096 | err_alloc_disk: |
| 1095 | blk_cleanup_queue(ace->queue); | 1097 | blk_cleanup_queue(ace->queue); |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e7a5f1d1c314..399cad7daae7 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
| @@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev, | |||
| 290 | struct zram *zram = dev_to_zram(dev); | 290 | struct zram *zram = dev_to_zram(dev); |
| 291 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; | 291 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
| 292 | int index; | 292 | int index; |
| 293 | char mode_buf[8]; | ||
| 294 | ssize_t sz; | ||
| 295 | 293 | ||
| 296 | sz = strscpy(mode_buf, buf, sizeof(mode_buf)); | 294 | if (!sysfs_streq(buf, "all")) |
| 297 | if (sz <= 0) | ||
| 298 | return -EINVAL; | ||
| 299 | |||
| 300 | /* ignore trailing new line */ | ||
| 301 | if (mode_buf[sz - 1] == '\n') | ||
| 302 | mode_buf[sz - 1] = 0x00; | ||
| 303 | |||
| 304 | if (strcmp(mode_buf, "all")) | ||
| 305 | return -EINVAL; | 295 | return -EINVAL; |
| 306 | 296 | ||
| 307 | down_read(&zram->init_lock); | 297 | down_read(&zram->init_lock); |
| @@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev, | |||
| 635 | struct bio bio; | 625 | struct bio bio; |
| 636 | struct bio_vec bio_vec; | 626 | struct bio_vec bio_vec; |
| 637 | struct page *page; | 627 | struct page *page; |
| 638 | ssize_t ret, sz; | 628 | ssize_t ret; |
| 639 | char mode_buf[8]; | 629 | int mode; |
| 640 | int mode = -1; | ||
| 641 | unsigned long blk_idx = 0; | 630 | unsigned long blk_idx = 0; |
| 642 | 631 | ||
| 643 | sz = strscpy(mode_buf, buf, sizeof(mode_buf)); | 632 | if (sysfs_streq(buf, "idle")) |
| 644 | if (sz <= 0) | ||
| 645 | return -EINVAL; | ||
| 646 | |||
| 647 | /* ignore trailing newline */ | ||
| 648 | if (mode_buf[sz - 1] == '\n') | ||
| 649 | mode_buf[sz - 1] = 0x00; | ||
| 650 | |||
| 651 | if (!strcmp(mode_buf, "idle")) | ||
| 652 | mode = IDLE_WRITEBACK; | 633 | mode = IDLE_WRITEBACK; |
| 653 | else if (!strcmp(mode_buf, "huge")) | 634 | else if (sysfs_streq(buf, "huge")) |
| 654 | mode = HUGE_WRITEBACK; | 635 | mode = HUGE_WRITEBACK; |
| 655 | 636 | else | |
| 656 | if (mode == -1) | ||
| 657 | return -EINVAL; | 637 | return -EINVAL; |
| 658 | 638 | ||
| 659 | down_read(&zram->init_lock); | 639 | down_read(&zram->init_lock); |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 72866a004f07..466ebd84ad17 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -348,7 +348,7 @@ config XILINX_HWICAP | |||
| 348 | 348 | ||
| 349 | config R3964 | 349 | config R3964 |
| 350 | tristate "Siemens R3964 line discipline" | 350 | tristate "Siemens R3964 line discipline" |
| 351 | depends on TTY | 351 | depends on TTY && BROKEN |
| 352 | ---help--- | 352 | ---help--- |
| 353 | This driver allows synchronous communication with devices using the | 353 | This driver allows synchronous communication with devices using the |
| 354 | Siemens R3964 packet protocol. Unless you are dealing with special | 354 | Siemens R3964 packet protocol. Unless you are dealing with special |
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index d8b77133a83a..f824563fc28d 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c | |||
| @@ -37,8 +37,8 @@ | |||
| 37 | * | 37 | * |
| 38 | * Returns size of the event. If it is an invalid event, returns 0. | 38 | * Returns size of the event. If it is an invalid event, returns 0. |
| 39 | */ | 39 | */ |
| 40 | static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event, | 40 | static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event, |
| 41 | struct tcg_pcr_event *event_header) | 41 | struct tcg_pcr_event *event_header) |
| 42 | { | 42 | { |
| 43 | struct tcg_efi_specid_event_head *efispecid; | 43 | struct tcg_efi_specid_event_head *efispecid; |
| 44 | struct tcg_event_field *event_field; | 44 | struct tcg_event_field *event_field; |
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 8856cce5a23b..817ae09a369e 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c | |||
| @@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) | |||
| 233 | __poll_t mask = 0; | 233 | __poll_t mask = 0; |
| 234 | 234 | ||
| 235 | poll_wait(file, &priv->async_wait, wait); | 235 | poll_wait(file, &priv->async_wait, wait); |
| 236 | mutex_lock(&priv->buffer_mutex); | ||
| 236 | 237 | ||
| 237 | if (!priv->response_read || priv->response_length) | 238 | /* |
| 239 | * The response_length indicates if there is still response | ||
| 240 | * (or part of it) to be consumed. Partial reads decrease it | ||
| 241 | * by the number of bytes read, and write resets it the zero. | ||
| 242 | */ | ||
| 243 | if (priv->response_length) | ||
| 238 | mask = EPOLLIN | EPOLLRDNORM; | 244 | mask = EPOLLIN | EPOLLRDNORM; |
| 239 | else | 245 | else |
| 240 | mask = EPOLLOUT | EPOLLWRNORM; | 246 | mask = EPOLLOUT | EPOLLWRNORM; |
| 241 | 247 | ||
| 248 | mutex_unlock(&priv->buffer_mutex); | ||
| 242 | return mask; | 249 | return mask; |
| 243 | } | 250 | } |
| 244 | 251 | ||
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 83ece5639f86..ae1030c9b086 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
| @@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev) | |||
| 402 | if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) | 402 | if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) |
| 403 | return 0; | 403 | return 0; |
| 404 | 404 | ||
| 405 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { | 405 | if (!tpm_chip_start(chip)) { |
| 406 | mutex_lock(&chip->tpm_mutex); | 406 | if (chip->flags & TPM_CHIP_FLAG_TPM2) |
| 407 | if (!tpm_chip_start(chip)) { | ||
| 408 | tpm2_shutdown(chip, TPM2_SU_STATE); | 407 | tpm2_shutdown(chip, TPM2_SU_STATE); |
| 409 | tpm_chip_stop(chip); | 408 | else |
| 410 | } | 409 | rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); |
| 411 | mutex_unlock(&chip->tpm_mutex); | 410 | |
| 412 | } else { | 411 | tpm_chip_stop(chip); |
| 413 | rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); | ||
| 414 | } | 412 | } |
| 415 | 413 | ||
| 416 | return rc; | 414 | return rc; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e22f0dbaebb1..2986119dd31f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu) | |||
| 385 | if (ret) | 385 | if (ret) |
| 386 | return ret; | 386 | return ret; |
| 387 | 387 | ||
| 388 | return cppc_perf.guaranteed_perf; | 388 | if (cppc_perf.guaranteed_perf) |
| 389 | return cppc_perf.guaranteed_perf; | ||
| 390 | |||
| 391 | return cppc_perf.nominal_perf; | ||
| 389 | } | 392 | } |
| 390 | 393 | ||
| 391 | #else /* CONFIG_ACPI_CPPC_LIB */ | 394 | #else /* CONFIG_ACPI_CPPC_LIB */ |
| @@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void) | |||
| 2593 | const struct x86_cpu_id *id; | 2596 | const struct x86_cpu_id *id; |
| 2594 | int rc; | 2597 | int rc; |
| 2595 | 2598 | ||
| 2599 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
| 2600 | return -ENODEV; | ||
| 2601 | |||
| 2596 | if (no_load) | 2602 | if (no_load) |
| 2597 | return -ENODEV; | 2603 | return -ENODEV; |
| 2598 | 2604 | ||
| @@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void) | |||
| 2608 | } else { | 2614 | } else { |
| 2609 | id = x86_match_cpu(intel_pstate_cpu_ids); | 2615 | id = x86_match_cpu(intel_pstate_cpu_ids); |
| 2610 | if (!id) { | 2616 | if (!id) { |
| 2611 | pr_info("CPU ID not supported\n"); | 2617 | pr_info("CPU model not supported\n"); |
| 2612 | return -ENODEV; | 2618 | return -ENODEV; |
| 2613 | } | 2619 | } |
| 2614 | 2620 | ||
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 3f49427766b8..2b51e0718c9f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c | |||
| @@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 189 | 189 | ||
| 190 | clk_put(priv->clk); | 190 | clk_put(priv->clk); |
| 191 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | 191 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
| 192 | kfree(priv); | ||
| 193 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); | 192 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); |
| 193 | kfree(priv); | ||
| 194 | 194 | ||
| 195 | return 0; | 195 | return 0; |
| 196 | } | 196 | } |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index b1eadc6652b5..7205d9f4029e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 865 | if (ret) | 865 | if (ret) |
| 866 | goto unmap_ctx; | 866 | goto unmap_ctx; |
| 867 | 867 | ||
| 868 | if (mapped_nents) { | 868 | if (mapped_nents) |
| 869 | sg_to_sec4_sg_last(req->src, mapped_nents, | 869 | sg_to_sec4_sg_last(req->src, mapped_nents, |
| 870 | edesc->sec4_sg + sec4_sg_src_index, | 870 | edesc->sec4_sg + sec4_sg_src_index, |
| 871 | 0); | 871 | 0); |
| 872 | if (*next_buflen) | 872 | else |
| 873 | scatterwalk_map_and_copy(next_buf, req->src, | ||
| 874 | to_hash - *buflen, | ||
| 875 | *next_buflen, 0); | ||
| 876 | } else { | ||
| 877 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - | 873 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - |
| 878 | 1); | 874 | 1); |
| 879 | } | ||
| 880 | 875 | ||
| 876 | if (*next_buflen) | ||
| 877 | scatterwalk_map_and_copy(next_buf, req->src, | ||
| 878 | to_hash - *buflen, | ||
| 879 | *next_buflen, 0); | ||
| 881 | desc = edesc->hw_desc; | 880 | desc = edesc->hw_desc; |
| 882 | 881 | ||
| 883 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 882 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 4e0eede599a8..ac0301b69593 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c | |||
| @@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
| 1578 | 1578 | ||
| 1579 | dmadev->nr_channels = nr_channels; | 1579 | dmadev->nr_channels = nr_channels; |
| 1580 | dmadev->nr_requests = nr_requests; | 1580 | dmadev->nr_requests = nr_requests; |
| 1581 | ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", | 1581 | device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", |
| 1582 | dmadev->ahb_addr_masks, | 1582 | dmadev->ahb_addr_masks, |
| 1583 | count); | 1583 | count); |
| 1584 | if (ret) | ||
| 1585 | return ret; | ||
| 1586 | dmadev->nr_ahb_addr_masks = count; | 1584 | dmadev->nr_ahb_addr_masks = count; |
| 1587 | 1585 | ||
| 1588 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1586 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c index 91b90c0cea73..12acdac85820 100644 --- a/drivers/gpio/gpio-adnp.c +++ b/drivers/gpio/gpio-adnp.c | |||
| @@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
| 132 | if (err < 0) | 132 | if (err < 0) |
| 133 | goto out; | 133 | goto out; |
| 134 | 134 | ||
| 135 | if (err & BIT(pos)) | 135 | if (value & BIT(pos)) { |
| 136 | err = -EACCES; | 136 | err = -EPERM; |
| 137 | goto out; | ||
| 138 | } | ||
| 137 | 139 | ||
| 138 | err = 0; | 140 | err = 0; |
| 139 | 141 | ||
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 854bce4fb9e7..217507002dbc 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c | |||
| @@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) | |||
| 1224 | 1224 | ||
| 1225 | gpio->offset_timer = | 1225 | gpio->offset_timer = |
| 1226 | devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); | 1226 | devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); |
| 1227 | if (!gpio->offset_timer) | ||
| 1228 | return -ENOMEM; | ||
| 1227 | 1229 | ||
| 1228 | return aspeed_gpio_setup_irqs(gpio, pdev); | 1230 | return aspeed_gpio_setup_irqs(gpio, pdev); |
| 1229 | } | 1231 | } |
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 0ecd2369c2ca..a09d2f9ebacc 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c | |||
| @@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev) | |||
| 148 | mutex_init(&exar_gpio->lock); | 148 | mutex_init(&exar_gpio->lock); |
| 149 | 149 | ||
| 150 | index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); | 150 | index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); |
| 151 | if (index < 0) | ||
| 152 | goto err_destroy; | ||
| 151 | 153 | ||
| 152 | sprintf(exar_gpio->name, "exar_gpio%d", index); | 154 | sprintf(exar_gpio->name, "exar_gpio%d", index); |
| 153 | exar_gpio->gpio_chip.label = exar_gpio->name; | 155 | exar_gpio->gpio_chip.label = exar_gpio->name; |
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 154d959e8993..b6a4efce7c92 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c | |||
| @@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file, | |||
| 204 | struct gpio_mockup_chip *chip; | 204 | struct gpio_mockup_chip *chip; |
| 205 | struct seq_file *sfile; | 205 | struct seq_file *sfile; |
| 206 | struct gpio_chip *gc; | 206 | struct gpio_chip *gc; |
| 207 | int val, cnt; | ||
| 207 | char buf[3]; | 208 | char buf[3]; |
| 208 | int val, rv; | ||
| 209 | 209 | ||
| 210 | if (*ppos != 0) | 210 | if (*ppos != 0) |
| 211 | return 0; | 211 | return 0; |
| @@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file, | |||
| 216 | gc = &chip->gc; | 216 | gc = &chip->gc; |
| 217 | 217 | ||
| 218 | val = gpio_mockup_get(gc, priv->offset); | 218 | val = gpio_mockup_get(gc, priv->offset); |
| 219 | snprintf(buf, sizeof(buf), "%d\n", val); | 219 | cnt = snprintf(buf, sizeof(buf), "%d\n", val); |
| 220 | 220 | ||
| 221 | rv = copy_to_user(usr_buf, buf, sizeof(buf)); | 221 | return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt); |
| 222 | if (rv) | ||
| 223 | return rv; | ||
| 224 | |||
| 225 | return sizeof(buf) - 1; | ||
| 226 | } | 222 | } |
| 227 | 223 | ||
| 228 | static ssize_t gpio_mockup_debugfs_write(struct file *file, | 224 | static ssize_t gpio_mockup_debugfs_write(struct file *file, |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 8b9c3ab70f6e..6a3ec575a404 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np, | |||
| 120 | * to determine if the flags should have inverted semantics. | 120 | * to determine if the flags should have inverted semantics. |
| 121 | */ | 121 | */ |
| 122 | if (IS_ENABLED(CONFIG_SPI_MASTER) && | 122 | if (IS_ENABLED(CONFIG_SPI_MASTER) && |
| 123 | of_property_read_bool(np, "cs-gpios")) { | 123 | of_property_read_bool(np, "cs-gpios") && |
| 124 | !strcmp(propname, "cs-gpios")) { | ||
| 124 | struct device_node *child; | 125 | struct device_node *child; |
| 125 | u32 cs; | 126 | u32 cs; |
| 126 | int ret; | 127 | int ret; |
| @@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np, | |||
| 142 | * conflict and the "spi-cs-high" flag will | 143 | * conflict and the "spi-cs-high" flag will |
| 143 | * take precedence. | 144 | * take precedence. |
| 144 | */ | 145 | */ |
| 145 | if (of_property_read_bool(np, "spi-cs-high")) { | 146 | if (of_property_read_bool(child, "spi-cs-high")) { |
| 146 | if (*flags & OF_GPIO_ACTIVE_LOW) { | 147 | if (*flags & OF_GPIO_ACTIVE_LOW) { |
| 147 | pr_warn("%s GPIO handle specifies active low - ignored\n", | 148 | pr_warn("%s GPIO handle specifies active low - ignored\n", |
| 148 | of_node_full_name(np)); | 149 | of_node_full_name(child)); |
| 149 | *flags &= ~OF_GPIO_ACTIVE_LOW; | 150 | *flags &= ~OF_GPIO_ACTIVE_LOW; |
| 150 | } | 151 | } |
| 151 | } else { | 152 | } else { |
| 152 | if (!(*flags & OF_GPIO_ACTIVE_LOW)) | 153 | if (!(*flags & OF_GPIO_ACTIVE_LOW)) |
| 153 | pr_info("%s enforce active low on chipselect handle\n", | 154 | pr_info("%s enforce active low on chipselect handle\n", |
| 154 | of_node_full_name(np)); | 155 | of_node_full_name(child)); |
| 155 | *flags |= OF_GPIO_ACTIVE_LOW; | 156 | *flags |= OF_GPIO_ACTIVE_LOW; |
| 156 | } | 157 | } |
| 157 | break; | 158 | break; |
| @@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip) | |||
| 717 | 718 | ||
| 718 | of_node_get(chip->of_node); | 719 | of_node_get(chip->of_node); |
| 719 | 720 | ||
| 720 | return of_gpiochip_scan_gpios(chip); | 721 | status = of_gpiochip_scan_gpios(chip); |
| 722 | if (status) { | ||
| 723 | of_node_put(chip->of_node); | ||
| 724 | gpiochip_remove_pin_ranges(chip); | ||
| 725 | } | ||
| 726 | |||
| 727 | return status; | ||
| 721 | } | 728 | } |
| 722 | 729 | ||
| 723 | void of_gpiochip_remove(struct gpio_chip *chip) | 730 | void of_gpiochip_remove(struct gpio_chip *chip) |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 144af0733581..0495bf1d480a 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) | |||
| 2776 | } | 2776 | } |
| 2777 | 2777 | ||
| 2778 | config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); | 2778 | config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); |
| 2779 | return gpio_set_config(chip, gpio_chip_hwgpio(desc), config); | 2779 | return chip->set_config(chip, gpio_chip_hwgpio(desc), config); |
| 2780 | } | 2780 | } |
| 2781 | EXPORT_SYMBOL_GPL(gpiod_set_debounce); | 2781 | EXPORT_SYMBOL_GPL(gpiod_set_debounce); |
| 2782 | 2782 | ||
| @@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) | |||
| 2813 | packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, | 2813 | packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, |
| 2814 | !transitory); | 2814 | !transitory); |
| 2815 | gpio = gpio_chip_hwgpio(desc); | 2815 | gpio = gpio_chip_hwgpio(desc); |
| 2816 | rc = gpio_set_config(chip, gpio, packed); | 2816 | rc = chip->set_config(chip, gpio, packed); |
| 2817 | if (rc == -ENOTSUPP) { | 2817 | if (rc == -ENOTSUPP) { |
| 2818 | dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", | 2818 | dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", |
| 2819 | gpio); | 2819 | gpio); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4f8fb4ecde34..ac0d646a7b74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, | |||
| 3625 | struct pci_dev *pdev = adev->pdev; | 3625 | struct pci_dev *pdev = adev->pdev; |
| 3626 | enum pci_bus_speed cur_speed; | 3626 | enum pci_bus_speed cur_speed; |
| 3627 | enum pcie_link_width cur_width; | 3627 | enum pcie_link_width cur_width; |
| 3628 | u32 ret = 1; | ||
| 3628 | 3629 | ||
| 3629 | *speed = PCI_SPEED_UNKNOWN; | 3630 | *speed = PCI_SPEED_UNKNOWN; |
| 3630 | *width = PCIE_LNK_WIDTH_UNKNOWN; | 3631 | *width = PCIE_LNK_WIDTH_UNKNOWN; |
| @@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, | |||
| 3632 | while (pdev) { | 3633 | while (pdev) { |
| 3633 | cur_speed = pcie_get_speed_cap(pdev); | 3634 | cur_speed = pcie_get_speed_cap(pdev); |
| 3634 | cur_width = pcie_get_width_cap(pdev); | 3635 | cur_width = pcie_get_width_cap(pdev); |
| 3636 | ret = pcie_bandwidth_available(adev->pdev, NULL, | ||
| 3637 | NULL, &cur_width); | ||
| 3638 | if (!ret) | ||
| 3639 | cur_width = PCIE_LNK_WIDTH_RESRV; | ||
| 3635 | 3640 | ||
| 3636 | if (cur_speed != PCI_SPEED_UNKNOWN) { | 3641 | if (cur_speed != PCI_SPEED_UNKNOWN) { |
| 3637 | if (*speed == PCI_SPEED_UNKNOWN) | 3642 | if (*speed == PCI_SPEED_UNKNOWN) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d0309e8c9d12..a11db2b1a63f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
| @@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) | |||
| 2405 | /* disable CG */ | 2405 | /* disable CG */ |
| 2406 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); | 2406 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); |
| 2407 | 2407 | ||
| 2408 | adev->gfx.rlc.funcs->reset(adev); | ||
| 2409 | |||
| 2410 | gfx_v9_0_init_pg(adev); | 2408 | gfx_v9_0_init_pg(adev); |
| 2411 | 2409 | ||
| 2412 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | 2410 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fb27783d7a54..81127f7d6ed1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc( | |||
| 5429 | struct amdgpu_dm_connector *aconnector = | 5429 | struct amdgpu_dm_connector *aconnector = |
| 5430 | to_amdgpu_dm_connector(new_con_state->base.connector); | 5430 | to_amdgpu_dm_connector(new_con_state->base.connector); |
| 5431 | struct drm_display_mode *mode = &new_crtc_state->base.mode; | 5431 | struct drm_display_mode *mode = &new_crtc_state->base.mode; |
| 5432 | int vrefresh = drm_mode_vrefresh(mode); | ||
| 5432 | 5433 | ||
| 5433 | new_crtc_state->vrr_supported = new_con_state->freesync_capable && | 5434 | new_crtc_state->vrr_supported = new_con_state->freesync_capable && |
| 5434 | aconnector->min_vfreq <= drm_mode_vrefresh(mode); | 5435 | vrefresh >= aconnector->min_vfreq && |
| 5436 | vrefresh <= aconnector->max_vfreq; | ||
| 5435 | 5437 | ||
| 5436 | if (new_crtc_state->vrr_supported) { | 5438 | if (new_crtc_state->vrr_supported) { |
| 5437 | new_crtc_state->stream->ignore_msa_timing_param = true; | 5439 | new_crtc_state->stream->ignore_msa_timing_param = true; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4eba3c4800b6..ea18e9c2d8ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -2660,12 +2660,18 @@ void core_link_enable_stream( | |||
| 2660 | void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) | 2660 | void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) |
| 2661 | { | 2661 | { |
| 2662 | struct dc *core_dc = pipe_ctx->stream->ctx->dc; | 2662 | struct dc *core_dc = pipe_ctx->stream->ctx->dc; |
| 2663 | struct dc_stream_state *stream = pipe_ctx->stream; | ||
| 2663 | 2664 | ||
| 2664 | core_dc->hwss.blank_stream(pipe_ctx); | 2665 | core_dc->hwss.blank_stream(pipe_ctx); |
| 2665 | 2666 | ||
| 2666 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) | 2667 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) |
| 2667 | deallocate_mst_payload(pipe_ctx); | 2668 | deallocate_mst_payload(pipe_ctx); |
| 2668 | 2669 | ||
| 2670 | if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) | ||
| 2671 | dal_ddc_service_write_scdc_data( | ||
| 2672 | stream->link->ddc, 0, | ||
| 2673 | stream->timing.flags.LTE_340MCSC_SCRAMBLE); | ||
| 2674 | |||
| 2669 | core_dc->hwss.disable_stream(pipe_ctx, option); | 2675 | core_dc->hwss.disable_stream(pipe_ctx, option); |
| 2670 | 2676 | ||
| 2671 | disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); | 2677 | disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 9aa7bec1b5fe..23b5b94a4939 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | |||
| @@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) | |||
| 91 | * MP0CLK DS | 91 | * MP0CLK DS |
| 92 | */ | 92 | */ |
| 93 | data->registry_data.disallowed_features = 0xE0041C00; | 93 | data->registry_data.disallowed_features = 0xE0041C00; |
| 94 | /* ECC feature should be disabled on old SMUs */ | ||
| 95 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); | ||
| 96 | hwmgr->smu_version = smum_get_argument(hwmgr); | ||
| 97 | if (hwmgr->smu_version < 0x282100) | ||
| 98 | data->registry_data.disallowed_features |= FEATURE_ECC_MASK; | ||
| 99 | |||
| 94 | data->registry_data.od_state_in_dc_support = 0; | 100 | data->registry_data.od_state_in_dc_support = 0; |
| 95 | data->registry_data.thermal_support = 1; | 101 | data->registry_data.thermal_support = 1; |
| 96 | data->registry_data.skip_baco_hardware = 0; | 102 | data->registry_data.skip_baco_hardware = 0; |
| @@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
| 357 | data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; | 363 | data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; |
| 358 | data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; | 364 | data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; |
| 359 | data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; | 365 | data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; |
| 366 | data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; | ||
| 360 | 367 | ||
| 361 | for (i = 0; i < GNLD_FEATURES_MAX; i++) { | 368 | for (i = 0; i < GNLD_FEATURES_MAX; i++) { |
| 362 | data->smu_features[i].smu_feature_bitmap = | 369 | data->smu_features[i].smu_feature_bitmap = |
| @@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) | |||
| 3020 | "FCLK_DS", | 3027 | "FCLK_DS", |
| 3021 | "MP1CLK_DS", | 3028 | "MP1CLK_DS", |
| 3022 | "MP0CLK_DS", | 3029 | "MP0CLK_DS", |
| 3023 | "XGMI"}; | 3030 | "XGMI", |
| 3031 | "ECC"}; | ||
| 3024 | static const char *output_title[] = { | 3032 | static const char *output_title[] = { |
| 3025 | "FEATURES", | 3033 | "FEATURES", |
| 3026 | "BITMASK", | 3034 | "BITMASK", |
| @@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) | |||
| 3462 | struct vega20_single_dpm_table *dpm_table; | 3470 | struct vega20_single_dpm_table *dpm_table; |
| 3463 | bool vblank_too_short = false; | 3471 | bool vblank_too_short = false; |
| 3464 | bool disable_mclk_switching; | 3472 | bool disable_mclk_switching; |
| 3473 | bool disable_fclk_switching; | ||
| 3465 | uint32_t i, latency; | 3474 | uint32_t i, latency; |
| 3466 | 3475 | ||
| 3467 | disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && | 3476 | disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && |
| @@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) | |||
| 3537 | if (hwmgr->display_config->nb_pstate_switch_disable) | 3546 | if (hwmgr->display_config->nb_pstate_switch_disable) |
| 3538 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; | 3547 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; |
| 3539 | 3548 | ||
| 3549 | if ((disable_mclk_switching && | ||
| 3550 | (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || | ||
| 3551 | hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) | ||
| 3552 | disable_fclk_switching = true; | ||
| 3553 | else | ||
| 3554 | disable_fclk_switching = false; | ||
| 3555 | |||
| 3540 | /* fclk */ | 3556 | /* fclk */ |
| 3541 | dpm_table = &(data->dpm_table.fclk_table); | 3557 | dpm_table = &(data->dpm_table.fclk_table); |
| 3542 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; | 3558 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; |
| 3543 | dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; | 3559 | dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; |
| 3544 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; | 3560 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; |
| 3545 | dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; | 3561 | dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; |
| 3546 | if (hwmgr->display_config->nb_pstate_switch_disable) | 3562 | if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) |
| 3547 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; | 3563 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; |
| 3548 | 3564 | ||
| 3549 | /* vclk */ | 3565 | /* vclk */ |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index a5bc758ae097..ac2a3118a0ae 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | |||
| @@ -80,6 +80,7 @@ enum { | |||
| 80 | GNLD_DS_MP1CLK, | 80 | GNLD_DS_MP1CLK, |
| 81 | GNLD_DS_MP0CLK, | 81 | GNLD_DS_MP0CLK, |
| 82 | GNLD_XGMI, | 82 | GNLD_XGMI, |
| 83 | GNLD_ECC, | ||
| 83 | 84 | ||
| 84 | GNLD_FEATURES_MAX | 85 | GNLD_FEATURES_MAX |
| 85 | }; | 86 | }; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 63d5cf691549..195c4ae67058 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h | |||
| @@ -99,7 +99,7 @@ | |||
| 99 | #define FEATURE_DS_MP1CLK_BIT 30 | 99 | #define FEATURE_DS_MP1CLK_BIT 30 |
| 100 | #define FEATURE_DS_MP0CLK_BIT 31 | 100 | #define FEATURE_DS_MP0CLK_BIT 31 |
| 101 | #define FEATURE_XGMI_BIT 32 | 101 | #define FEATURE_XGMI_BIT 32 |
| 102 | #define FEATURE_SPARE_33_BIT 33 | 102 | #define FEATURE_ECC_BIT 33 |
| 103 | #define FEATURE_SPARE_34_BIT 34 | 103 | #define FEATURE_SPARE_34_BIT 34 |
| 104 | #define FEATURE_SPARE_35_BIT 35 | 104 | #define FEATURE_SPARE_35_BIT 35 |
| 105 | #define FEATURE_SPARE_36_BIT 36 | 105 | #define FEATURE_SPARE_36_BIT 36 |
| @@ -165,7 +165,8 @@ | |||
| 165 | #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) | 165 | #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) |
| 166 | #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) | 166 | #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) |
| 167 | #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) | 167 | #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) |
| 168 | #define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) | 168 | #define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) |
| 169 | #define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) | ||
| 169 | 170 | ||
| 170 | #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 | 171 | #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 |
| 171 | #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 | 172 | #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 381581b01d48..05bbc2b622fc 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev) | |||
| 376 | synchronize_srcu(&drm_unplug_srcu); | 376 | synchronize_srcu(&drm_unplug_srcu); |
| 377 | 377 | ||
| 378 | drm_dev_unregister(dev); | 378 | drm_dev_unregister(dev); |
| 379 | 379 | drm_dev_put(dev); | |
| 380 | mutex_lock(&drm_global_mutex); | ||
| 381 | if (dev->open_count == 0) | ||
| 382 | drm_dev_put(dev); | ||
| 383 | mutex_unlock(&drm_global_mutex); | ||
| 384 | } | 380 | } |
| 385 | EXPORT_SYMBOL(drm_dev_unplug); | 381 | EXPORT_SYMBOL(drm_dev_unplug); |
| 386 | 382 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0e9349ff2d16..af2ab640cadb 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, | |||
| 1963 | best_depth = fmt->depth; | 1963 | best_depth = fmt->depth; |
| 1964 | } | 1964 | } |
| 1965 | } | 1965 | } |
| 1966 | if (sizes.surface_depth != best_depth) { | 1966 | if (sizes.surface_depth != best_depth && best_depth) { |
| 1967 | DRM_INFO("requested bpp %d, scaled depth down to %d", | 1967 | DRM_INFO("requested bpp %d, scaled depth down to %d", |
| 1968 | sizes.surface_bpp, best_depth); | 1968 | sizes.surface_bpp, best_depth); |
| 1969 | sizes.surface_depth = best_depth; | 1969 | sizes.surface_depth = best_depth; |
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 83a5bbca6e7e..7caa3c7ed978 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c | |||
| @@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 489 | 489 | ||
| 490 | drm_close_helper(filp); | 490 | drm_close_helper(filp); |
| 491 | 491 | ||
| 492 | if (!--dev->open_count) { | 492 | if (!--dev->open_count) |
| 493 | drm_lastclose(dev); | 493 | drm_lastclose(dev); |
| 494 | if (drm_dev_is_unplugged(dev)) | 494 | |
| 495 | drm_put_dev(dev); | ||
| 496 | } | ||
| 497 | mutex_unlock(&drm_global_mutex); | 495 | mutex_unlock(&drm_global_mutex); |
| 498 | 496 | ||
| 499 | drm_minor_release(minor); | 497 | drm_minor_release(minor); |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 35b4ec3f7618..3592d04c33b2 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
| @@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, | |||
| 1441 | } | 1441 | } |
| 1442 | 1442 | ||
| 1443 | if (index_mode) { | 1443 | if (index_mode) { |
| 1444 | if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { | 1444 | if (guest_gma >= I915_GTT_PAGE_SIZE) { |
| 1445 | ret = -EFAULT; | 1445 | ret = -EFAULT; |
| 1446 | goto err; | 1446 | goto err; |
| 1447 | } | 1447 | } |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 035479e273be..e3f9caa7839f 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
| @@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) | |||
| 448 | /** | 448 | /** |
| 449 | * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU | 449 | * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU |
| 450 | * @vgpu: a vGPU | 450 | * @vgpu: a vGPU |
| 451 | * @conncted: link state | 451 | * @connected: link state |
| 452 | * | 452 | * |
| 453 | * This function is used to trigger hotplug interrupt for vGPU | 453 | * This function is used to trigger hotplug interrupt for vGPU |
| 454 | * | 454 | * |
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 3e7e2b80c857..5d887f7cc0d5 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c | |||
| @@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 238 | default: | 238 | default: |
| 239 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); | 239 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); |
| 240 | } | 240 | } |
| 241 | |||
| 242 | info->size = (((p.stride * p.height * p.bpp) / 8) + | ||
| 243 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
| 244 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { | 241 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
| 245 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); | 242 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); |
| 246 | if (ret) | 243 | if (ret) |
| @@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 262 | info->x_hot = UINT_MAX; | 259 | info->x_hot = UINT_MAX; |
| 263 | info->y_hot = UINT_MAX; | 260 | info->y_hot = UINT_MAX; |
| 264 | } | 261 | } |
| 265 | |||
| 266 | info->size = (((info->stride * c.height * c.bpp) / 8) | ||
| 267 | + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
| 268 | } else { | 262 | } else { |
| 269 | gvt_vgpu_err("invalid plane id:%d\n", plane_id); | 263 | gvt_vgpu_err("invalid plane id:%d\n", plane_id); |
| 270 | return -EINVAL; | 264 | return -EINVAL; |
| 271 | } | 265 | } |
| 272 | 266 | ||
| 267 | info->size = (info->stride * info->height + PAGE_SIZE - 1) | ||
| 268 | >> PAGE_SHIFT; | ||
| 273 | if (info->size == 0) { | 269 | if (info->size == 0) { |
| 274 | gvt_vgpu_err("fb size is zero\n"); | 270 | gvt_vgpu_err("fb size is zero\n"); |
| 275 | return -EINVAL; | 271 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index c7103dd2d8d5..cf133ef03873 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, | |||
| 1882 | } | 1882 | } |
| 1883 | 1883 | ||
| 1884 | list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); | 1884 | list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); |
| 1885 | |||
| 1886 | mutex_lock(&gvt->gtt.ppgtt_mm_lock); | ||
| 1885 | list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); | 1887 | list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); |
| 1888 | mutex_unlock(&gvt->gtt.ppgtt_mm_lock); | ||
| 1889 | |||
| 1886 | return mm; | 1890 | return mm; |
| 1887 | } | 1891 | } |
| 1888 | 1892 | ||
| @@ -1942,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) | |||
| 1942 | */ | 1946 | */ |
| 1943 | void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) | 1947 | void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) |
| 1944 | { | 1948 | { |
| 1945 | atomic_dec(&mm->pincount); | 1949 | atomic_dec_if_positive(&mm->pincount); |
| 1946 | } | 1950 | } |
| 1947 | 1951 | ||
| 1948 | /** | 1952 | /** |
| @@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) | |||
| 1967 | if (ret) | 1971 | if (ret) |
| 1968 | return ret; | 1972 | return ret; |
| 1969 | 1973 | ||
| 1974 | mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); | ||
| 1970 | list_move_tail(&mm->ppgtt_mm.lru_list, | 1975 | list_move_tail(&mm->ppgtt_mm.lru_list, |
| 1971 | &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); | 1976 | &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); |
| 1972 | 1977 | mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); | |
| 1973 | } | 1978 | } |
| 1974 | 1979 | ||
| 1975 | return 0; | 1980 | return 0; |
| @@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) | |||
| 1980 | struct intel_vgpu_mm *mm; | 1985 | struct intel_vgpu_mm *mm; |
| 1981 | struct list_head *pos, *n; | 1986 | struct list_head *pos, *n; |
| 1982 | 1987 | ||
| 1988 | mutex_lock(&gvt->gtt.ppgtt_mm_lock); | ||
| 1989 | |||
| 1983 | list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { | 1990 | list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { |
| 1984 | mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); | 1991 | mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); |
| 1985 | 1992 | ||
| @@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) | |||
| 1987 | continue; | 1994 | continue; |
| 1988 | 1995 | ||
| 1989 | list_del_init(&mm->ppgtt_mm.lru_list); | 1996 | list_del_init(&mm->ppgtt_mm.lru_list); |
| 1997 | mutex_unlock(&gvt->gtt.ppgtt_mm_lock); | ||
| 1990 | invalidate_ppgtt_mm(mm); | 1998 | invalidate_ppgtt_mm(mm); |
| 1991 | return 1; | 1999 | return 1; |
| 1992 | } | 2000 | } |
| 2001 | mutex_unlock(&gvt->gtt.ppgtt_mm_lock); | ||
| 1993 | return 0; | 2002 | return 0; |
| 1994 | } | 2003 | } |
| 1995 | 2004 | ||
| @@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
| 2659 | } | 2668 | } |
| 2660 | } | 2669 | } |
| 2661 | INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); | 2670 | INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); |
| 2671 | mutex_init(&gvt->gtt.ppgtt_mm_lock); | ||
| 2662 | return 0; | 2672 | return 0; |
| 2663 | } | 2673 | } |
| 2664 | 2674 | ||
| @@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) | |||
| 2699 | list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { | 2709 | list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { |
| 2700 | mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); | 2710 | mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); |
| 2701 | if (mm->type == INTEL_GVT_MM_PPGTT) { | 2711 | if (mm->type == INTEL_GVT_MM_PPGTT) { |
| 2712 | mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock); | ||
| 2702 | list_del_init(&mm->ppgtt_mm.lru_list); | 2713 | list_del_init(&mm->ppgtt_mm.lru_list); |
| 2714 | mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock); | ||
| 2703 | if (mm->ppgtt_mm.shadowed) | 2715 | if (mm->ppgtt_mm.shadowed) |
| 2704 | invalidate_ppgtt_mm(mm); | 2716 | invalidate_ppgtt_mm(mm); |
| 2705 | } | 2717 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d8cb04cc946d..edb610dc5d86 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
| @@ -88,6 +88,7 @@ struct intel_gvt_gtt { | |||
| 88 | void (*mm_free_page_table)(struct intel_vgpu_mm *mm); | 88 | void (*mm_free_page_table)(struct intel_vgpu_mm *mm); |
| 89 | struct list_head oos_page_use_list_head; | 89 | struct list_head oos_page_use_list_head; |
| 90 | struct list_head oos_page_free_list_head; | 90 | struct list_head oos_page_free_list_head; |
| 91 | struct mutex ppgtt_mm_lock; | ||
| 91 | struct list_head ppgtt_mm_lru_list_head; | 92 | struct list_head ppgtt_mm_lru_list_head; |
| 92 | 93 | ||
| 93 | struct page *scratch_page; | 94 | struct page *scratch_page; |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 7d84cfb9051a..7902fb162d09 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
| @@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { | |||
| 132 | 132 | ||
| 133 | {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ | 133 | {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ |
| 134 | {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ | 134 | {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ |
| 135 | {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ | ||
| 135 | 136 | ||
| 136 | {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ | 137 | {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ |
| 137 | {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ | 138 | {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1bb8f936fdaa..05b953793316 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | |||
| 346 | int i = 0; | 346 | int i = 0; |
| 347 | 347 | ||
| 348 | if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) | 348 | if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) |
| 349 | return -1; | 349 | return -EINVAL; |
| 350 | 350 | ||
| 351 | if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { | 351 | if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { |
| 352 | px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; | 352 | px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; |
| @@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
| 410 | if (workload->shadow) | 410 | if (workload->shadow) |
| 411 | return 0; | 411 | return 0; |
| 412 | 412 | ||
| 413 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); | ||
| 414 | if (ret < 0) { | ||
| 415 | gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); | ||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 419 | /* pin shadow context by gvt even the shadow context will be pinned | 413 | /* pin shadow context by gvt even the shadow context will be pinned |
| 420 | * when i915 alloc request. That is because gvt will update the guest | 414 | * when i915 alloc request. That is because gvt will update the guest |
| 421 | * context from shadow context when workload is completed, and at that | 415 | * context from shadow context when workload is completed, and at that |
| @@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 678 | { | 672 | { |
| 679 | struct intel_vgpu *vgpu = workload->vgpu; | 673 | struct intel_vgpu *vgpu = workload->vgpu; |
| 680 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 674 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 675 | struct intel_vgpu_submission *s = &vgpu->submission; | ||
| 676 | struct i915_gem_context *shadow_ctx = s->shadow_ctx; | ||
| 677 | struct i915_request *rq; | ||
| 681 | int ring_id = workload->ring_id; | 678 | int ring_id = workload->ring_id; |
| 682 | int ret; | 679 | int ret; |
| 683 | 680 | ||
| @@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 687 | mutex_lock(&vgpu->vgpu_lock); | 684 | mutex_lock(&vgpu->vgpu_lock); |
| 688 | mutex_lock(&dev_priv->drm.struct_mutex); | 685 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 689 | 686 | ||
| 687 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); | ||
| 688 | if (ret < 0) { | ||
| 689 | gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); | ||
| 690 | goto err_req; | ||
| 691 | } | ||
| 692 | |||
| 690 | ret = intel_gvt_workload_req_alloc(workload); | 693 | ret = intel_gvt_workload_req_alloc(workload); |
| 691 | if (ret) | 694 | if (ret) |
| 692 | goto err_req; | 695 | goto err_req; |
| @@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 703 | 706 | ||
| 704 | ret = prepare_workload(workload); | 707 | ret = prepare_workload(workload); |
| 705 | out: | 708 | out: |
| 709 | if (ret) { | ||
| 710 | /* We might still need to add request with | ||
| 711 | * clean ctx to retire it properly.. | ||
| 712 | */ | ||
| 713 | rq = fetch_and_zero(&workload->req); | ||
| 714 | i915_request_put(rq); | ||
| 715 | } | ||
| 716 | |||
| 706 | if (!IS_ERR_OR_NULL(workload->req)) { | 717 | if (!IS_ERR_OR_NULL(workload->req)) { |
| 707 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", | 718 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", |
| 708 | ring_id, workload->req); | 719 | ring_id, workload->req); |
| @@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload( | |||
| 739 | goto out; | 750 | goto out; |
| 740 | } | 751 | } |
| 741 | 752 | ||
| 742 | if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) | 753 | if (!scheduler->current_vgpu->active || |
| 754 | list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) | ||
| 743 | goto out; | 755 | goto out; |
| 744 | 756 | ||
| 745 | /* | 757 | /* |
| @@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, | |||
| 1474 | intel_runtime_pm_put_unchecked(dev_priv); | 1486 | intel_runtime_pm_put_unchecked(dev_priv); |
| 1475 | } | 1487 | } |
| 1476 | 1488 | ||
| 1477 | if (ret && (vgpu_is_vm_unhealthy(ret))) { | 1489 | if (ret) { |
| 1478 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); | 1490 | if (vgpu_is_vm_unhealthy(ret)) |
| 1491 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); | ||
| 1479 | intel_vgpu_destroy_workload(workload); | 1492 | intel_vgpu_destroy_workload(workload); |
| 1480 | return ERR_PTR(ret); | 1493 | return ERR_PTR(ret); |
| 1481 | } | 1494 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0bd890c04fe4..f6f6e5b78e97 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) | |||
| 4830 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, | 4830 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, |
| 4831 | &ctx); | 4831 | &ctx); |
| 4832 | if (ret) { | 4832 | if (ret) { |
| 4833 | ret = -EINTR; | 4833 | if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { |
| 4834 | try_again = true; | ||
| 4835 | continue; | ||
| 4836 | } | ||
| 4834 | break; | 4837 | break; |
| 4835 | } | 4838 | } |
| 4836 | crtc = connector->state->crtc; | 4839 | crtc = connector->state->crtc; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9adc7bb9e69c..a67a63b5aa84 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void) | |||
| 2346 | INTEL_DEVID(dev_priv) == 0x5915 || \ | 2346 | INTEL_DEVID(dev_priv) == 0x5915 || \ |
| 2347 | INTEL_DEVID(dev_priv) == 0x591E) | 2347 | INTEL_DEVID(dev_priv) == 0x591E) |
| 2348 | #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ | 2348 | #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ |
| 2349 | INTEL_DEVID(dev_priv) == 0x87C0) | 2349 | INTEL_DEVID(dev_priv) == 0x87C0 || \ |
| 2350 | INTEL_DEVID(dev_priv) == 0x87CA) | ||
| 2350 | #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ | 2351 | #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
| 2351 | INTEL_INFO(dev_priv)->gt == 2) | 2352 | INTEL_INFO(dev_priv)->gt == 2) |
| 2352 | #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ | 2353 | #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 638a586469f9..047855dd8c6b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -2863,7 +2863,7 @@ enum i915_power_well_id { | |||
| 2863 | #define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) | 2863 | #define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) |
| 2864 | #define GEN11_GT_VDBOX_DISABLE_MASK 0xff | 2864 | #define GEN11_GT_VDBOX_DISABLE_MASK 0xff |
| 2865 | #define GEN11_GT_VEBOX_DISABLE_SHIFT 16 | 2865 | #define GEN11_GT_VEBOX_DISABLE_SHIFT 16 |
| 2866 | #define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) | 2866 | #define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT) |
| 2867 | 2867 | ||
| 2868 | #define GEN11_EU_DISABLE _MMIO(0x9134) | 2868 | #define GEN11_EU_DISABLE _MMIO(0x9134) |
| 2869 | #define GEN11_EU_DIS_MASK 0xFF | 2869 | #define GEN11_EU_DIS_MASK 0xFF |
| @@ -9243,7 +9243,7 @@ enum skl_power_gate { | |||
| 9243 | #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ | 9243 | #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ |
| 9244 | _TRANS_DDI_FUNC_CTL2_A) | 9244 | _TRANS_DDI_FUNC_CTL2_A) |
| 9245 | #define PORT_SYNC_MODE_ENABLE (1 << 4) | 9245 | #define PORT_SYNC_MODE_ENABLE (1 << 4) |
| 9246 | #define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) | 9246 | #define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0) |
| 9247 | #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) | 9247 | #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) |
| 9248 | #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 | 9248 | #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 |
| 9249 | 9249 | ||
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 32dce7176f63..b9b0ea4e2404 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c | |||
| @@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg) | |||
| 455 | struct i915_gem_context *ctx; | 455 | struct i915_gem_context *ctx; |
| 456 | 456 | ||
| 457 | ctx = live_context(i915, file); | 457 | ctx = live_context(i915, file); |
| 458 | if (!ctx) | 458 | if (IS_ERR(ctx)) |
| 459 | break; | 459 | break; |
| 460 | 460 | ||
| 461 | /* We will need some GGTT space for the rq's context */ | 461 | /* We will need some GGTT space for the rq's context */ |
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2281ed3eb774..8a4ebcb6405c 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c | |||
| @@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) | |||
| 337 | 337 | ||
| 338 | ret = drm_dev_register(drm, 0); | 338 | ret = drm_dev_register(drm, 0); |
| 339 | if (ret) | 339 | if (ret) |
| 340 | goto free_drm; | 340 | goto uninstall_irq; |
| 341 | 341 | ||
| 342 | drm_fbdev_generic_setup(drm, 32); | 342 | drm_fbdev_generic_setup(drm, 32); |
| 343 | 343 | ||
| 344 | return 0; | 344 | return 0; |
| 345 | 345 | ||
| 346 | uninstall_irq: | ||
| 347 | drm_irq_uninstall(drm); | ||
| 346 | free_drm: | 348 | free_drm: |
| 347 | drm_dev_put(drm); | 349 | drm_dev_put(drm); |
| 348 | 350 | ||
| @@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev) | |||
| 356 | 358 | ||
| 357 | static void meson_drv_unbind(struct device *dev) | 359 | static void meson_drv_unbind(struct device *dev) |
| 358 | { | 360 | { |
| 359 | struct drm_device *drm = dev_get_drvdata(dev); | 361 | struct meson_drm *priv = dev_get_drvdata(dev); |
| 360 | struct meson_drm *priv = drm->dev_private; | 362 | struct drm_device *drm = priv->drm; |
| 361 | 363 | ||
| 362 | if (priv->canvas) { | 364 | if (priv->canvas) { |
| 363 | meson_canvas_free(priv->canvas, priv->canvas_id_osd1); | 365 | meson_canvas_free(priv->canvas, priv->canvas_id_osd1); |
| @@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev) | |||
| 367 | } | 369 | } |
| 368 | 370 | ||
| 369 | drm_dev_unregister(drm); | 371 | drm_dev_unregister(drm); |
| 372 | drm_irq_uninstall(drm); | ||
| 370 | drm_kms_helper_poll_fini(drm); | 373 | drm_kms_helper_poll_fini(drm); |
| 371 | drm_mode_config_cleanup(drm); | 374 | drm_mode_config_cleanup(drm); |
| 372 | drm_dev_put(drm); | 375 | drm_dev_put(drm); |
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index e28814f4ea6c..563953ec6ad0 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c | |||
| @@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector, | |||
| 569 | DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); | 569 | DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); |
| 570 | 570 | ||
| 571 | /* If sink max TMDS clock, we reject the mode */ | 571 | /* If sink max TMDS clock, we reject the mode */ |
| 572 | if (mode->clock > connector->display_info.max_tmds_clock) | 572 | if (connector->display_info.max_tmds_clock && |
| 573 | mode->clock > connector->display_info.max_tmds_clock) | ||
| 573 | return MODE_BAD; | 574 | return MODE_BAD; |
| 574 | 575 | ||
| 575 | /* Check against non-VIC supported modes */ | 576 | /* Check against non-VIC supported modes */ |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index c7d4c6073ea5..0d4ade9d4722 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
| @@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop) | |||
| 541 | clk_disable(vop->hclk); | 541 | clk_disable(vop->hclk); |
| 542 | } | 542 | } |
| 543 | 543 | ||
| 544 | static void vop_win_disable(struct vop *vop, const struct vop_win_data *win) | ||
| 545 | { | ||
| 546 | if (win->phy->scl && win->phy->scl->ext) { | ||
| 547 | VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE); | ||
| 548 | VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE); | ||
| 549 | VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE); | ||
| 550 | VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE); | ||
| 551 | } | ||
| 552 | |||
| 553 | VOP_WIN_SET(vop, win, enable, 0); | ||
| 554 | } | ||
| 555 | |||
| 544 | static int vop_enable(struct drm_crtc *crtc) | 556 | static int vop_enable(struct drm_crtc *crtc) |
| 545 | { | 557 | { |
| 546 | struct vop *vop = to_vop(crtc); | 558 | struct vop *vop = to_vop(crtc); |
| @@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc) | |||
| 586 | struct vop_win *vop_win = &vop->win[i]; | 598 | struct vop_win *vop_win = &vop->win[i]; |
| 587 | const struct vop_win_data *win = vop_win->data; | 599 | const struct vop_win_data *win = vop_win->data; |
| 588 | 600 | ||
| 589 | VOP_WIN_SET(vop, win, enable, 0); | 601 | vop_win_disable(vop, win); |
| 590 | } | 602 | } |
| 591 | spin_unlock(&vop->reg_lock); | 603 | spin_unlock(&vop->reg_lock); |
| 592 | 604 | ||
| @@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, | |||
| 735 | 747 | ||
| 736 | spin_lock(&vop->reg_lock); | 748 | spin_lock(&vop->reg_lock); |
| 737 | 749 | ||
| 738 | VOP_WIN_SET(vop, win, enable, 0); | 750 | vop_win_disable(vop, win); |
| 739 | 751 | ||
| 740 | spin_unlock(&vop->reg_lock); | 752 | spin_unlock(&vop->reg_lock); |
| 741 | } | 753 | } |
| @@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop) | |||
| 1622 | int channel = i * 2 + 1; | 1634 | int channel = i * 2 + 1; |
| 1623 | 1635 | ||
| 1624 | VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); | 1636 | VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); |
| 1625 | VOP_WIN_SET(vop, win, enable, 0); | 1637 | vop_win_disable(vop, win); |
| 1626 | VOP_WIN_SET(vop, win, gate, 1); | 1638 | VOP_WIN_SET(vop, win, gate, 1); |
| 1627 | } | 1639 | } |
| 1628 | 1640 | ||
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index ba9b3cfb8c3d..b3436c2aed68 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c | |||
| @@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, | |||
| 378 | static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, | 378 | static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, |
| 379 | struct drm_plane_state *old_state) | 379 | struct drm_plane_state *old_state) |
| 380 | { | 380 | { |
| 381 | struct tegra_dc *dc = to_tegra_dc(old_state->crtc); | ||
| 382 | struct tegra_plane *p = to_tegra_plane(plane); | 381 | struct tegra_plane *p = to_tegra_plane(plane); |
| 382 | struct tegra_dc *dc; | ||
| 383 | u32 value; | 383 | u32 value; |
| 384 | 384 | ||
| 385 | /* rien ne va plus */ | 385 | /* rien ne va plus */ |
| 386 | if (!old_state || !old_state->crtc) | 386 | if (!old_state || !old_state->crtc) |
| 387 | return; | 387 | return; |
| 388 | 388 | ||
| 389 | dc = to_tegra_dc(old_state->crtc); | ||
| 390 | |||
| 389 | /* | 391 | /* |
| 390 | * XXX Legacy helpers seem to sometimes call ->atomic_disable() even | 392 | * XXX Legacy helpers seem to sometimes call ->atomic_disable() even |
| 391 | * on planes that are already disabled. Make sure we fallback to the | 393 | * on planes that are already disabled. Make sure we fallback to the |
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index 39bfed9623de..982ce37ecde1 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c | |||
| @@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic) | |||
| 106 | if (vic->booted) | 106 | if (vic->booted) |
| 107 | return 0; | 107 | return 0; |
| 108 | 108 | ||
| 109 | #ifdef CONFIG_IOMMU_API | ||
| 109 | if (vic->config->supports_sid) { | 110 | if (vic->config->supports_sid) { |
| 110 | struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); | 111 | struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); |
| 111 | u32 value; | 112 | u32 value; |
| @@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic) | |||
| 121 | vic_writel(vic, value, VIC_THI_STREAMID1); | 122 | vic_writel(vic, value, VIC_THI_STREAMID1); |
| 122 | } | 123 | } |
| 123 | } | 124 | } |
| 125 | #endif | ||
| 124 | 126 | ||
| 125 | /* setup clockgating registers */ | 127 | /* setup clockgating registers */ |
| 126 | vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | | 128 | vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 66885c24590f..c1bd5e3d9e4a 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c | |||
| @@ -18,18 +18,19 @@ | |||
| 18 | #include "udl_connector.h" | 18 | #include "udl_connector.h" |
| 19 | #include "udl_drv.h" | 19 | #include "udl_drv.h" |
| 20 | 20 | ||
| 21 | static bool udl_get_edid_block(struct udl_device *udl, int block_idx, | 21 | static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, |
| 22 | u8 *buff) | 22 | size_t len) |
| 23 | { | 23 | { |
| 24 | int ret, i; | 24 | int ret, i; |
| 25 | u8 *read_buff; | 25 | u8 *read_buff; |
| 26 | struct udl_device *udl = data; | ||
| 26 | 27 | ||
| 27 | read_buff = kmalloc(2, GFP_KERNEL); | 28 | read_buff = kmalloc(2, GFP_KERNEL); |
| 28 | if (!read_buff) | 29 | if (!read_buff) |
| 29 | return false; | 30 | return -1; |
| 30 | 31 | ||
| 31 | for (i = 0; i < EDID_LENGTH; i++) { | 32 | for (i = 0; i < len; i++) { |
| 32 | int bval = (i + block_idx * EDID_LENGTH) << 8; | 33 | int bval = (i + block * EDID_LENGTH) << 8; |
| 33 | ret = usb_control_msg(udl->udev, | 34 | ret = usb_control_msg(udl->udev, |
| 34 | usb_rcvctrlpipe(udl->udev, 0), | 35 | usb_rcvctrlpipe(udl->udev, 0), |
| 35 | (0x02), (0x80 | (0x02 << 5)), bval, | 36 | (0x02), (0x80 | (0x02 << 5)), bval, |
| @@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx, | |||
| 37 | if (ret < 1) { | 38 | if (ret < 1) { |
| 38 | DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); | 39 | DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); |
| 39 | kfree(read_buff); | 40 | kfree(read_buff); |
| 40 | return false; | 41 | return -1; |
| 41 | } | 42 | } |
| 42 | buff[i] = read_buff[1]; | 43 | buf[i] = read_buff[1]; |
| 43 | } | 44 | } |
| 44 | 45 | ||
| 45 | kfree(read_buff); | 46 | kfree(read_buff); |
| 46 | return true; | 47 | return 0; |
| 47 | } | ||
| 48 | |||
| 49 | static bool udl_get_edid(struct udl_device *udl, u8 **result_buff, | ||
| 50 | int *result_buff_size) | ||
| 51 | { | ||
| 52 | int i, extensions; | ||
| 53 | u8 *block_buff = NULL, *buff_ptr; | ||
| 54 | |||
| 55 | block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
| 56 | if (block_buff == NULL) | ||
| 57 | return false; | ||
| 58 | |||
| 59 | if (udl_get_edid_block(udl, 0, block_buff) && | ||
| 60 | memchr_inv(block_buff, 0, EDID_LENGTH)) { | ||
| 61 | extensions = ((struct edid *)block_buff)->extensions; | ||
| 62 | if (extensions > 0) { | ||
| 63 | /* we have to read all extensions one by one */ | ||
| 64 | *result_buff_size = EDID_LENGTH * (extensions + 1); | ||
| 65 | *result_buff = kmalloc(*result_buff_size, GFP_KERNEL); | ||
| 66 | buff_ptr = *result_buff; | ||
| 67 | if (buff_ptr == NULL) { | ||
| 68 | kfree(block_buff); | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | memcpy(buff_ptr, block_buff, EDID_LENGTH); | ||
| 72 | kfree(block_buff); | ||
| 73 | buff_ptr += EDID_LENGTH; | ||
| 74 | for (i = 1; i < extensions; ++i) { | ||
| 75 | if (udl_get_edid_block(udl, i, buff_ptr)) { | ||
| 76 | buff_ptr += EDID_LENGTH; | ||
| 77 | } else { | ||
| 78 | kfree(*result_buff); | ||
| 79 | *result_buff = NULL; | ||
| 80 | return false; | ||
| 81 | } | ||
| 82 | } | ||
| 83 | return true; | ||
| 84 | } | ||
| 85 | /* we have only base edid block */ | ||
| 86 | *result_buff = block_buff; | ||
| 87 | *result_buff_size = EDID_LENGTH; | ||
| 88 | return true; | ||
| 89 | } | ||
| 90 | |||
| 91 | kfree(block_buff); | ||
| 92 | |||
| 93 | return false; | ||
| 94 | } | 48 | } |
| 95 | 49 | ||
| 96 | static int udl_get_modes(struct drm_connector *connector) | 50 | static int udl_get_modes(struct drm_connector *connector) |
| @@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector, | |||
| 122 | static enum drm_connector_status | 76 | static enum drm_connector_status |
| 123 | udl_detect(struct drm_connector *connector, bool force) | 77 | udl_detect(struct drm_connector *connector, bool force) |
| 124 | { | 78 | { |
| 125 | u8 *edid_buff = NULL; | ||
| 126 | int edid_buff_size = 0; | ||
| 127 | struct udl_device *udl = connector->dev->dev_private; | 79 | struct udl_device *udl = connector->dev->dev_private; |
| 128 | struct udl_drm_connector *udl_connector = | 80 | struct udl_drm_connector *udl_connector = |
| 129 | container_of(connector, | 81 | container_of(connector, |
| @@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force) | |||
| 136 | udl_connector->edid = NULL; | 88 | udl_connector->edid = NULL; |
| 137 | } | 89 | } |
| 138 | 90 | ||
| 139 | 91 | udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl); | |
| 140 | if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) | 92 | if (!udl_connector->edid) |
| 141 | return connector_status_disconnected; | 93 | return connector_status_disconnected; |
| 142 | 94 | ||
| 143 | udl_connector->edid = (struct edid *)edid_buff; | ||
| 144 | |||
| 145 | return connector_status_connected; | 95 | return connector_status_connected; |
| 146 | } | 96 | } |
| 147 | 97 | ||
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 5930facd6d2d..11a8f99ba18c 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c | |||
| @@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, | |||
| 191 | ret = drm_gem_handle_create(file, &obj->base, handle); | 191 | ret = drm_gem_handle_create(file, &obj->base, handle); |
| 192 | drm_gem_object_put_unlocked(&obj->base); | 192 | drm_gem_object_put_unlocked(&obj->base); |
| 193 | if (ret) | 193 | if (ret) |
| 194 | goto err; | 194 | return ERR_PTR(ret); |
| 195 | 195 | ||
| 196 | return &obj->base; | 196 | return &obj->base; |
| 197 | |||
| 198 | err: | ||
| 199 | __vgem_gem_destroy(obj); | ||
| 200 | return ERR_PTR(ret); | ||
| 201 | } | 197 | } |
| 202 | 198 | ||
| 203 | static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | 199 | static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 138b0bb325cf..69048e73377d 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c | |||
| @@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, | |||
| 111 | 111 | ||
| 112 | ret = drm_gem_handle_create(file, &obj->gem, handle); | 112 | ret = drm_gem_handle_create(file, &obj->gem, handle); |
| 113 | drm_gem_object_put_unlocked(&obj->gem); | 113 | drm_gem_object_put_unlocked(&obj->gem); |
| 114 | if (ret) { | 114 | if (ret) |
| 115 | drm_gem_object_release(&obj->gem); | ||
| 116 | kfree(obj); | ||
| 117 | return ERR_PTR(ret); | 115 | return ERR_PTR(ret); |
| 118 | } | ||
| 119 | 116 | ||
| 120 | return &obj->gem; | 117 | return &obj->gem; |
| 121 | } | 118 | } |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 6ca8d322b487..4ca0cdfa6b33 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -150,6 +150,7 @@ config HID_ASUS | |||
| 150 | tristate "Asus" | 150 | tristate "Asus" |
| 151 | depends on LEDS_CLASS | 151 | depends on LEDS_CLASS |
| 152 | depends on ASUS_WMI || ASUS_WMI=n | 152 | depends on ASUS_WMI || ASUS_WMI=n |
| 153 | select POWER_SUPPLY | ||
| 153 | ---help--- | 154 | ---help--- |
| 154 | Support for Asus notebook built-in keyboard and touchpad via i2c, and | 155 | Support for Asus notebook built-in keyboard and touchpad via i2c, and |
| 155 | the Asus Republic of Gamers laptop keyboard special keys. | 156 | the Asus Republic of Gamers laptop keyboard special keys. |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9993b692598f..860e21ec6a49 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n) | |||
| 1301 | u32 hid_field_extract(const struct hid_device *hid, u8 *report, | 1301 | u32 hid_field_extract(const struct hid_device *hid, u8 *report, |
| 1302 | unsigned offset, unsigned n) | 1302 | unsigned offset, unsigned n) |
| 1303 | { | 1303 | { |
| 1304 | if (n > 32) { | 1304 | if (n > 256) { |
| 1305 | hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", | 1305 | hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n", |
| 1306 | n, current->comm); | 1306 | n, current->comm); |
| 1307 | n = 32; | 1307 | n = 256; |
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
| 1310 | return __extract(report, offset, n); | 1310 | return __extract(report, offset, n); |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index ac9fda1b5a72..1384e57182af 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
| @@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) | |||
| 1060 | seq_printf(f, "\n\n"); | 1060 | seq_printf(f, "\n\n"); |
| 1061 | 1061 | ||
| 1062 | /* dump parsed data and input mappings */ | 1062 | /* dump parsed data and input mappings */ |
| 1063 | if (down_interruptible(&hdev->driver_input_lock)) | ||
| 1064 | return 0; | ||
| 1065 | |||
| 1063 | hid_dump_device(hdev, f); | 1066 | hid_dump_device(hdev, f); |
| 1064 | seq_printf(f, "\n"); | 1067 | seq_printf(f, "\n"); |
| 1065 | hid_dump_input_mapping(hdev, f); | 1068 | hid_dump_input_mapping(hdev, f); |
| 1066 | 1069 | ||
| 1070 | up(&hdev->driver_input_lock); | ||
| 1071 | |||
| 1067 | return 0; | 1072 | return 0; |
| 1068 | } | 1073 | } |
| 1069 | 1074 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b6d93f4ad037..adce58f24f76 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -1083,6 +1083,7 @@ | |||
| 1083 | #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 | 1083 | #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 |
| 1084 | #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 | 1084 | #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 |
| 1085 | #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 | 1085 | #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 |
| 1086 | #define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e | ||
| 1086 | 1087 | ||
| 1087 | #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 | 1088 | #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 |
| 1088 | #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 | 1089 | #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 |
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index b10b1922c5bd..1fce0076e7dc 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
| @@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 998 | case 0x1b8: map_key_clear(KEY_VIDEO); break; | 998 | case 0x1b8: map_key_clear(KEY_VIDEO); break; |
| 999 | case 0x1bc: map_key_clear(KEY_MESSENGER); break; | 999 | case 0x1bc: map_key_clear(KEY_MESSENGER); break; |
| 1000 | case 0x1bd: map_key_clear(KEY_INFO); break; | 1000 | case 0x1bd: map_key_clear(KEY_INFO); break; |
| 1001 | case 0x1cb: map_key_clear(KEY_ASSISTANT); break; | ||
| 1001 | case 0x201: map_key_clear(KEY_NEW); break; | 1002 | case 0x201: map_key_clear(KEY_NEW); break; |
| 1002 | case 0x202: map_key_clear(KEY_OPEN); break; | 1003 | case 0x202: map_key_clear(KEY_OPEN); break; |
| 1003 | case 0x203: map_key_clear(KEY_CLOSE); break; | 1004 | case 0x203: map_key_clear(KEY_CLOSE); break; |
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 15ed6177a7a3..199cc256e9d9 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c | |||
| @@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) | |||
| 2111 | kfree(data); | 2111 | kfree(data); |
| 2112 | return -ENOMEM; | 2112 | return -ENOMEM; |
| 2113 | } | 2113 | } |
| 2114 | data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); | ||
| 2115 | if (!data->wq) { | ||
| 2116 | kfree(data->effect_ids); | ||
| 2117 | kfree(data); | ||
| 2118 | return -ENOMEM; | ||
| 2119 | } | ||
| 2120 | |||
| 2114 | data->hidpp = hidpp; | 2121 | data->hidpp = hidpp; |
| 2115 | data->feature_index = feature_index; | 2122 | data->feature_index = feature_index; |
| 2116 | data->version = version; | 2123 | data->version = version; |
| @@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) | |||
| 2155 | /* ignore boost value at response.fap.params[2] */ | 2162 | /* ignore boost value at response.fap.params[2] */ |
| 2156 | 2163 | ||
| 2157 | /* init the hardware command queue */ | 2164 | /* init the hardware command queue */ |
| 2158 | data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); | ||
| 2159 | atomic_set(&data->workqueue_size, 0); | 2165 | atomic_set(&data->workqueue_size, 0); |
| 2160 | 2166 | ||
| 2161 | /* initialize with zero autocenter to get wheel in usable state */ | 2167 | /* initialize with zero autocenter to get wheel in usable state */ |
| @@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) | |||
| 2608 | input_report_rel(mydata->input, REL_Y, v); | 2614 | input_report_rel(mydata->input, REL_Y, v); |
| 2609 | 2615 | ||
| 2610 | v = hid_snto32(data[6], 8); | 2616 | v = hid_snto32(data[6], 8); |
| 2611 | hidpp_scroll_counter_handle_scroll( | 2617 | if (v != 0) |
| 2612 | &hidpp->vertical_wheel_counter, v); | 2618 | hidpp_scroll_counter_handle_scroll( |
| 2619 | &hidpp->vertical_wheel_counter, v); | ||
| 2613 | 2620 | ||
| 2614 | input_sync(mydata->input); | 2621 | input_sync(mydata->input); |
| 2615 | } | 2622 | } |
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 953908f2267c..77ffba48cc73 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c | |||
| @@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
| 715 | { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, | 715 | { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, |
| 716 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, | 716 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, |
| 717 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, | 717 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, |
| 718 | { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, | ||
| 719 | { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, | 718 | { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, |
| 720 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, | 719 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, |
| 721 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, | 720 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, |
| @@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
| 855 | { } | 854 | { } |
| 856 | }; | 855 | }; |
| 857 | 856 | ||
| 858 | /** | 857 | /* |
| 859 | * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer | 858 | * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer |
| 860 | * | 859 | * |
| 861 | * There are composite devices for which we want to ignore only a certain | 860 | * There are composite devices for which we want to ignore only a certain |
| @@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev) | |||
| 996 | if (hdev->product == 0x0401 && | 995 | if (hdev->product == 0x0401 && |
| 997 | strncmp(hdev->name, "ELAN0800", 8) != 0) | 996 | strncmp(hdev->name, "ELAN0800", 8) != 0) |
| 998 | return true; | 997 | return true; |
| 998 | /* Same with product id 0x0400 */ | ||
| 999 | if (hdev->product == 0x0400 && | ||
| 1000 | strncmp(hdev->name, "QTEC0001", 8) != 0) | ||
| 1001 | return true; | ||
| 999 | break; | 1002 | break; |
| 1000 | } | 1003 | } |
| 1001 | 1004 | ||
| @@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev) | |||
| 1042 | } | 1045 | } |
| 1043 | 1046 | ||
| 1044 | if (bl_entry != NULL) | 1047 | if (bl_entry != NULL) |
| 1045 | dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", | 1048 | dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n", |
| 1046 | bl_entry->driver_data, bl_entry->vendor, | 1049 | bl_entry->driver_data, bl_entry->vendor, |
| 1047 | bl_entry->product); | 1050 | bl_entry->product); |
| 1048 | 1051 | ||
| @@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev) | |||
| 1209 | quirks |= bl_entry->driver_data; | 1212 | quirks |= bl_entry->driver_data; |
| 1210 | 1213 | ||
| 1211 | if (quirks) | 1214 | if (quirks) |
| 1212 | dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", | 1215 | dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n", |
| 1213 | quirks, hdev->vendor, hdev->product); | 1216 | quirks, hdev->vendor, hdev->product); |
| 1214 | return quirks; | 1217 | return quirks; |
| 1215 | } | 1218 | } |
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8141cadfca0e..8dae0f9b819e 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c | |||
| @@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam) | |||
| 499 | static int steam_register(struct steam_device *steam) | 499 | static int steam_register(struct steam_device *steam) |
| 500 | { | 500 | { |
| 501 | int ret; | 501 | int ret; |
| 502 | bool client_opened; | ||
| 502 | 503 | ||
| 503 | /* | 504 | /* |
| 504 | * This function can be called several times in a row with the | 505 | * This function can be called several times in a row with the |
| @@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam) | |||
| 511 | * Unlikely, but getting the serial could fail, and it is not so | 512 | * Unlikely, but getting the serial could fail, and it is not so |
| 512 | * important, so make up a serial number and go on. | 513 | * important, so make up a serial number and go on. |
| 513 | */ | 514 | */ |
| 515 | mutex_lock(&steam->mutex); | ||
| 514 | if (steam_get_serial(steam) < 0) | 516 | if (steam_get_serial(steam) < 0) |
| 515 | strlcpy(steam->serial_no, "XXXXXXXXXX", | 517 | strlcpy(steam->serial_no, "XXXXXXXXXX", |
| 516 | sizeof(steam->serial_no)); | 518 | sizeof(steam->serial_no)); |
| 519 | mutex_unlock(&steam->mutex); | ||
| 517 | 520 | ||
| 518 | hid_info(steam->hdev, "Steam Controller '%s' connected", | 521 | hid_info(steam->hdev, "Steam Controller '%s' connected", |
| 519 | steam->serial_no); | 522 | steam->serial_no); |
| @@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam) | |||
| 528 | } | 531 | } |
| 529 | 532 | ||
| 530 | mutex_lock(&steam->mutex); | 533 | mutex_lock(&steam->mutex); |
| 531 | if (!steam->client_opened) { | 534 | client_opened = steam->client_opened; |
| 535 | if (!client_opened) | ||
| 532 | steam_set_lizard_mode(steam, lizard_mode); | 536 | steam_set_lizard_mode(steam, lizard_mode); |
| 537 | mutex_unlock(&steam->mutex); | ||
| 538 | |||
| 539 | if (!client_opened) | ||
| 533 | ret = steam_input_register(steam); | 540 | ret = steam_input_register(steam); |
| 534 | } else { | 541 | else |
| 535 | ret = 0; | 542 | ret = 0; |
| 536 | } | ||
| 537 | mutex_unlock(&steam->mutex); | ||
| 538 | 543 | ||
| 539 | return ret; | 544 | return ret; |
| 540 | } | 545 | } |
| @@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev) | |||
| 630 | { | 635 | { |
| 631 | struct steam_device *steam = hdev->driver_data; | 636 | struct steam_device *steam = hdev->driver_data; |
| 632 | 637 | ||
| 638 | unsigned long flags; | ||
| 639 | bool connected; | ||
| 640 | |||
| 641 | spin_lock_irqsave(&steam->lock, flags); | ||
| 642 | connected = steam->connected; | ||
| 643 | spin_unlock_irqrestore(&steam->lock, flags); | ||
| 644 | |||
| 633 | mutex_lock(&steam->mutex); | 645 | mutex_lock(&steam->mutex); |
| 634 | steam->client_opened = false; | 646 | steam->client_opened = false; |
| 647 | if (connected) | ||
| 648 | steam_set_lizard_mode(steam, lizard_mode); | ||
| 635 | mutex_unlock(&steam->mutex); | 649 | mutex_unlock(&steam->mutex); |
| 636 | 650 | ||
| 637 | if (steam->connected) { | 651 | if (connected) |
| 638 | steam_set_lizard_mode(steam, lizard_mode); | ||
| 639 | steam_input_register(steam); | 652 | steam_input_register(steam); |
| 640 | } | ||
| 641 | } | 653 | } |
| 642 | 654 | ||
| 643 | static int steam_client_ll_raw_request(struct hid_device *hdev, | 655 | static int steam_client_ll_raw_request(struct hid_device *hdev, |
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 7710d9f957da..0187c9f8fc22 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c | |||
| @@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params, | |||
| 735 | goto cleanup; | 735 | goto cleanup; |
| 736 | } | 736 | } |
| 737 | rc = usb_string(udev, 201, ver_ptr, ver_len); | 737 | rc = usb_string(udev, 201, ver_ptr, ver_len); |
| 738 | if (ver_ptr == NULL) { | ||
| 739 | rc = -ENOMEM; | ||
| 740 | goto cleanup; | ||
| 741 | } | ||
| 742 | if (rc == -EPIPE) { | 738 | if (rc == -EPIPE) { |
| 743 | *ver_ptr = '\0'; | 739 | *ver_ptr = '\0'; |
| 744 | } else if (rc < 0) { | 740 | } else if (rc < 0) { |
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 90164fed08d3..4d1f24ee249c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c | |||
| @@ -184,6 +184,8 @@ static const struct i2c_hid_quirks { | |||
| 184 | I2C_HID_QUIRK_NO_RUNTIME_PM }, | 184 | I2C_HID_QUIRK_NO_RUNTIME_PM }, |
| 185 | { USB_VENDOR_ID_ELAN, HID_ANY_ID, | 185 | { USB_VENDOR_ID_ELAN, HID_ANY_ID, |
| 186 | I2C_HID_QUIRK_BOGUS_IRQ }, | 186 | I2C_HID_QUIRK_BOGUS_IRQ }, |
| 187 | { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E, | ||
| 188 | I2C_HID_QUIRK_NO_RUNTIME_PM }, | ||
| 187 | { 0, 0 } | 189 | { 0, 0 } |
| 188 | }; | 190 | }; |
| 189 | 191 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6f929bfa9fcd..d0f1dfe2bcbb 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -1759,6 +1759,7 @@ config SENSORS_VT8231 | |||
| 1759 | config SENSORS_W83773G | 1759 | config SENSORS_W83773G |
| 1760 | tristate "Nuvoton W83773G" | 1760 | tristate "Nuvoton W83773G" |
| 1761 | depends on I2C | 1761 | depends on I2C |
| 1762 | select REGMAP_I2C | ||
| 1762 | help | 1763 | help |
| 1763 | If you say yes here you get support for the Nuvoton W83773G hardware | 1764 | If you say yes here you get support for the Nuvoton W83773G hardware |
| 1764 | monitoring chip. | 1765 | monitoring chip. |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index e4f9f7ce92fa..f9abeeeead9e 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
| @@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = { | |||
| 640 | }; | 640 | }; |
| 641 | 641 | ||
| 642 | static const u32 ntc_temp_config[] = { | 642 | static const u32 ntc_temp_config[] = { |
| 643 | HWMON_T_INPUT, HWMON_T_TYPE, | 643 | HWMON_T_INPUT | HWMON_T_TYPE, |
| 644 | 0 | 644 | 0 |
| 645 | }; | 645 | }; |
| 646 | 646 | ||
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index b91a80abf724..4679acb4918e 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c | |||
| @@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ) | |||
| 890 | s++; | 890 | s++; |
| 891 | } | 891 | } |
| 892 | } | 892 | } |
| 893 | |||
| 894 | s = (sensors->power.num_sensors * 4) + 1; | ||
| 893 | } else { | 895 | } else { |
| 894 | for (i = 0; i < sensors->power.num_sensors; ++i) { | 896 | for (i = 0; i < sensors->power.num_sensors; ++i) { |
| 895 | s = i + 1; | 897 | s = i + 1; |
| @@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ) | |||
| 918 | show_power, NULL, 3, i); | 920 | show_power, NULL, 3, i); |
| 919 | attr++; | 921 | attr++; |
| 920 | } | 922 | } |
| 921 | } | ||
| 922 | 923 | ||
| 923 | if (sensors->caps.num_sensors >= 1) { | ||
| 924 | s = sensors->power.num_sensors + 1; | 924 | s = sensors->power.num_sensors + 1; |
| 925 | } | ||
| 925 | 926 | ||
| 927 | if (sensors->caps.num_sensors >= 1) { | ||
| 926 | snprintf(attr->name, sizeof(attr->name), "power%d_label", s); | 928 | snprintf(attr->name, sizeof(attr->name), "power%d_label", s); |
| 927 | attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, | 929 | attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, |
| 928 | 0, 0); | 930 | 0, 0); |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f2c681971201..f8979abb9a19 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -131,6 +131,7 @@ config I2C_I801 | |||
| 131 | Cannon Lake (PCH) | 131 | Cannon Lake (PCH) |
| 132 | Cedar Fork (PCH) | 132 | Cedar Fork (PCH) |
| 133 | Ice Lake (PCH) | 133 | Ice Lake (PCH) |
| 134 | Comet Lake (PCH) | ||
| 134 | 135 | ||
| 135 | This driver can also be built as a module. If so, the module | 136 | This driver can also be built as a module. If so, the module |
| 136 | will be called i2c-i801. | 137 | will be called i2c-i801. |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c91e145ef5a5..679c6c41f64b 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -71,6 +71,7 @@ | |||
| 71 | * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes | 71 | * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes |
| 72 | * Cedar Fork (PCH) 0x18df 32 hard yes yes yes | 72 | * Cedar Fork (PCH) 0x18df 32 hard yes yes yes |
| 73 | * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes | 73 | * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes |
| 74 | * Comet Lake (PCH) 0x02a3 32 hard yes yes yes | ||
| 74 | * | 75 | * |
| 75 | * Features supported by this driver: | 76 | * Features supported by this driver: |
| 76 | * Software PEC no | 77 | * Software PEC no |
| @@ -240,6 +241,7 @@ | |||
| 240 | #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 | 241 | #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 |
| 241 | #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 | 242 | #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 |
| 242 | #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 | 243 | #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 |
| 244 | #define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3 | ||
| 243 | 245 | ||
| 244 | struct i801_mux_config { | 246 | struct i801_mux_config { |
| 245 | char *gpio_chip; | 247 | char *gpio_chip; |
| @@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = { | |||
| 1038 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, | 1040 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, |
| 1039 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, | 1041 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, |
| 1040 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, | 1042 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, |
| 1043 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) }, | ||
| 1041 | { 0, } | 1044 | { 0, } |
| 1042 | }; | 1045 | }; |
| 1043 | 1046 | ||
| @@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1534 | case PCI_DEVICE_ID_INTEL_DNV_SMBUS: | 1537 | case PCI_DEVICE_ID_INTEL_DNV_SMBUS: |
| 1535 | case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: | 1538 | case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: |
| 1536 | case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: | 1539 | case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: |
| 1540 | case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS: | ||
| 1537 | priv->features |= FEATURE_I2C_BLOCK_READ; | 1541 | priv->features |= FEATURE_I2C_BLOCK_READ; |
| 1538 | priv->features |= FEATURE_IRQ; | 1542 | priv->features |= FEATURE_IRQ; |
| 1539 | priv->features |= FEATURE_SMBUS_PEC; | 1543 | priv->features |= FEATURE_SMBUS_PEC; |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 42fed40198a0..c0c3043b5d61 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
| 1169 | /* Init DMA config if supported */ | 1169 | /* Init DMA config if supported */ |
| 1170 | ret = i2c_imx_dma_request(i2c_imx, phy_addr); | 1170 | ret = i2c_imx_dma_request(i2c_imx, phy_addr); |
| 1171 | if (ret < 0) | 1171 | if (ret < 0) |
| 1172 | goto clk_notifier_unregister; | 1172 | goto del_adapter; |
| 1173 | 1173 | ||
| 1174 | dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); | 1174 | dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); |
| 1175 | return 0; /* Return OK */ | 1175 | return 0; /* Return OK */ |
| 1176 | 1176 | ||
| 1177 | del_adapter: | ||
| 1178 | i2c_del_adapter(&i2c_imx->adapter); | ||
| 1177 | clk_notifier_unregister: | 1179 | clk_notifier_unregister: |
| 1178 | clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); | 1180 | clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); |
| 1179 | rpm_disable: | 1181 | rpm_disable: |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 21cb088d6687..f7cdd2ab7f11 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -3169,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev, | |||
| 3169 | return; | 3169 | return; |
| 3170 | 3170 | ||
| 3171 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | 3171 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { |
| 3172 | int type, prot = 0; | ||
| 3172 | size_t length; | 3173 | size_t length; |
| 3173 | int prot = 0; | ||
| 3174 | 3174 | ||
| 3175 | if (devid < entry->devid_start || devid > entry->devid_end) | 3175 | if (devid < entry->devid_start || devid > entry->devid_end) |
| 3176 | continue; | 3176 | continue; |
| 3177 | 3177 | ||
| 3178 | type = IOMMU_RESV_DIRECT; | ||
| 3178 | length = entry->address_end - entry->address_start; | 3179 | length = entry->address_end - entry->address_start; |
| 3179 | if (entry->prot & IOMMU_PROT_IR) | 3180 | if (entry->prot & IOMMU_PROT_IR) |
| 3180 | prot |= IOMMU_READ; | 3181 | prot |= IOMMU_READ; |
| 3181 | if (entry->prot & IOMMU_PROT_IW) | 3182 | if (entry->prot & IOMMU_PROT_IW) |
| 3182 | prot |= IOMMU_WRITE; | 3183 | prot |= IOMMU_WRITE; |
| 3184 | if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) | ||
| 3185 | /* Exclusion range */ | ||
| 3186 | type = IOMMU_RESV_RESERVED; | ||
| 3183 | 3187 | ||
| 3184 | region = iommu_alloc_resv_region(entry->address_start, | 3188 | region = iommu_alloc_resv_region(entry->address_start, |
| 3185 | length, prot, | 3189 | length, prot, type); |
| 3186 | IOMMU_RESV_DIRECT); | ||
| 3187 | if (!region) { | 3190 | if (!region) { |
| 3188 | dev_err(dev, "Out of memory allocating dm-regions\n"); | 3191 | dev_err(dev, "Out of memory allocating dm-regions\n"); |
| 3189 | return; | 3192 | return; |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index f773792d77fd..1b1378619fc9 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m) | |||
| 2013 | if (e == NULL) | 2013 | if (e == NULL) |
| 2014 | return -ENOMEM; | 2014 | return -ENOMEM; |
| 2015 | 2015 | ||
| 2016 | if (m->flags & IVMD_FLAG_EXCL_RANGE) | ||
| 2017 | init_exclusion_range(m); | ||
| 2018 | |||
| 2016 | switch (m->type) { | 2019 | switch (m->type) { |
| 2017 | default: | 2020 | default: |
| 2018 | kfree(e); | 2021 | kfree(e); |
| @@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) | |||
| 2059 | 2062 | ||
| 2060 | while (p < end) { | 2063 | while (p < end) { |
| 2061 | m = (struct ivmd_header *)p; | 2064 | m = (struct ivmd_header *)p; |
| 2062 | if (m->flags & IVMD_FLAG_EXCL_RANGE) | 2065 | if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) |
| 2063 | init_exclusion_range(m); | ||
| 2064 | else if (m->flags & IVMD_FLAG_UNITY_MAP) | ||
| 2065 | init_unity_map_range(m); | 2066 | init_unity_map_range(m); |
| 2066 | 2067 | ||
| 2067 | p += m->length; | 2068 | p += m->length; |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index eae0741f72dc..87965e4d9647 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -374,6 +374,8 @@ | |||
| 374 | #define IOMMU_PROT_IR 0x01 | 374 | #define IOMMU_PROT_IR 0x01 |
| 375 | #define IOMMU_PROT_IW 0x02 | 375 | #define IOMMU_PROT_IW 0x02 |
| 376 | 376 | ||
| 377 | #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) | ||
| 378 | |||
| 377 | /* IOMMU capabilities */ | 379 | /* IOMMU capabilities */ |
| 378 | #define IOMMU_CAP_IOTLB 24 | 380 | #define IOMMU_CAP_IOTLB 24 |
| 379 | #define IOMMU_CAP_NPCACHE 26 | 381 | #define IOMMU_CAP_NPCACHE 26 |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index f101afc315ab..9a8a8870e267 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
| @@ -160,6 +160,14 @@ | |||
| 160 | 160 | ||
| 161 | #define ARM_V7S_TCR_PD1 BIT(5) | 161 | #define ARM_V7S_TCR_PD1 BIT(5) |
| 162 | 162 | ||
| 163 | #ifdef CONFIG_ZONE_DMA32 | ||
| 164 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 | ||
| 165 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 | ||
| 166 | #else | ||
| 167 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA | ||
| 168 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA | ||
| 169 | #endif | ||
| 170 | |||
| 163 | typedef u32 arm_v7s_iopte; | 171 | typedef u32 arm_v7s_iopte; |
| 164 | 172 | ||
| 165 | static bool selftest_running; | 173 | static bool selftest_running; |
| @@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, | |||
| 197 | void *table = NULL; | 205 | void *table = NULL; |
| 198 | 206 | ||
| 199 | if (lvl == 1) | 207 | if (lvl == 1) |
| 200 | table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); | 208 | table = (void *)__get_free_pages( |
| 209 | __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); | ||
| 201 | else if (lvl == 2) | 210 | else if (lvl == 2) |
| 202 | table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); | 211 | table = kmem_cache_zalloc(data->l2_tables, gfp); |
| 203 | phys = virt_to_phys(table); | 212 | phys = virt_to_phys(table); |
| 204 | if (phys != (arm_v7s_iopte)phys) | 213 | if (phys != (arm_v7s_iopte)phys) { |
| 205 | /* Doesn't fit in PTE */ | 214 | /* Doesn't fit in PTE */ |
| 215 | dev_err(dev, "Page table does not fit in PTE: %pa", &phys); | ||
| 206 | goto out_free; | 216 | goto out_free; |
| 217 | } | ||
| 207 | if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { | 218 | if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { |
| 208 | dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); | 219 | dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); |
| 209 | if (dma_mapping_error(dev, dma)) | 220 | if (dma_mapping_error(dev, dma)) |
| @@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, | |||
| 733 | data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", | 744 | data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", |
| 734 | ARM_V7S_TABLE_SIZE(2), | 745 | ARM_V7S_TABLE_SIZE(2), |
| 735 | ARM_V7S_TABLE_SIZE(2), | 746 | ARM_V7S_TABLE_SIZE(2), |
| 736 | SLAB_CACHE_DMA, NULL); | 747 | ARM_V7S_TABLE_SLAB_FLAGS, NULL); |
| 737 | if (!data->l2_tables) | 748 | if (!data->l2_tables) |
| 738 | goto out_free_data; | 749 | goto out_free_data; |
| 739 | 750 | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 33a982e33716..109de67d5d72 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) | |||
| 1105 | 1105 | ||
| 1106 | dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); | 1106 | dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); |
| 1107 | if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { | 1107 | if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { |
| 1108 | dev_warn(dev, | ||
| 1109 | "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", | ||
| 1110 | iommu_def_domain_type); | ||
| 1111 | dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); | 1108 | dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); |
| 1109 | if (dom) { | ||
| 1110 | dev_warn(dev, | ||
| 1111 | "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", | ||
| 1112 | iommu_def_domain_type); | ||
| 1113 | } | ||
| 1112 | } | 1114 | } |
| 1113 | 1115 | ||
| 1114 | group->default_domain = dom; | 1116 | group->default_domain = dom; |
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 4d85645c87f7..0928fd1f0e0c 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c | |||
| @@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev, | |||
| 4365 | if (m->clock2) | 4365 | if (m->clock2) |
| 4366 | test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); | 4366 | test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); |
| 4367 | 4367 | ||
| 4368 | if (ent->device == 0xB410) { | 4368 | if (ent->vendor == PCI_VENDOR_ID_DIGIUM && |
| 4369 | ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) { | ||
| 4369 | test_and_set_bit(HFC_CHIP_B410P, &hc->chip); | 4370 | test_and_set_bit(HFC_CHIP_B410P, &hc->chip); |
| 4370 | test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); | 4371 | test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); |
| 4371 | test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); | 4372 | test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); |
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 7fea18b0c15d..7cb4d685a1f1 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c | |||
| @@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client, | |||
| 513 | const struct i2c_device_id *id) | 513 | const struct i2c_device_id *id) |
| 514 | { | 514 | { |
| 515 | int devid; | 515 | int devid; |
| 516 | const struct of_device_id *of_id; | ||
| 516 | struct pca9532_data *data = i2c_get_clientdata(client); | 517 | struct pca9532_data *data = i2c_get_clientdata(client); |
| 517 | struct pca9532_platform_data *pca9532_pdata = | 518 | struct pca9532_platform_data *pca9532_pdata = |
| 518 | dev_get_platdata(&client->dev); | 519 | dev_get_platdata(&client->dev); |
| @@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client, | |||
| 528 | dev_err(&client->dev, "no platform data\n"); | 529 | dev_err(&client->dev, "no platform data\n"); |
| 529 | return -EINVAL; | 530 | return -EINVAL; |
| 530 | } | 531 | } |
| 531 | devid = (int)(uintptr_t)of_match_device( | 532 | of_id = of_match_device(of_pca9532_leds_match, |
| 532 | of_pca9532_leds_match, &client->dev)->data; | 533 | &client->dev); |
| 534 | if (unlikely(!of_id)) | ||
| 535 | return -EINVAL; | ||
| 536 | devid = (int)(uintptr_t) of_id->data; | ||
| 533 | } else { | 537 | } else { |
| 534 | devid = id->driver_data; | 538 | devid = id->driver_data; |
| 535 | } | 539 | } |
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index 3dd3ed46d473..136f86a1627d 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c | |||
| @@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev, | |||
| 122 | trigger_data->net_dev = NULL; | 122 | trigger_data->net_dev = NULL; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | strncpy(trigger_data->device_name, buf, size); | 125 | memcpy(trigger_data->device_name, buf, size); |
| 126 | trigger_data->device_name[size] = 0; | ||
| 126 | if (size > 0 && trigger_data->device_name[size - 1] == '\n') | 127 | if (size > 0 && trigger_data->device_name[size - 1] == '\n') |
| 127 | trigger_data->device_name[size - 1] = 0; | 128 | trigger_data->device_name[size - 1] = 0; |
| 128 | 129 | ||
| @@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb, | |||
| 301 | container_of(nb, struct led_netdev_data, notifier); | 302 | container_of(nb, struct led_netdev_data, notifier); |
| 302 | 303 | ||
| 303 | if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE | 304 | if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE |
| 304 | && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER | 305 | && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER) |
| 305 | && evt != NETDEV_CHANGENAME) | ||
| 306 | return NOTIFY_DONE; | 306 | return NOTIFY_DONE; |
| 307 | 307 | ||
| 308 | if (strcmp(dev->name, trigger_data->device_name)) | 308 | if (!(dev == trigger_data->net_dev || |
| 309 | (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name)))) | ||
| 309 | return NOTIFY_DONE; | 310 | return NOTIFY_DONE; |
| 310 | 311 | ||
| 311 | cancel_delayed_work_sync(&trigger_data->work); | 312 | cancel_delayed_work_sync(&trigger_data->work); |
| @@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb, | |||
| 320 | dev_hold(dev); | 321 | dev_hold(dev); |
| 321 | trigger_data->net_dev = dev; | 322 | trigger_data->net_dev = dev; |
| 322 | break; | 323 | break; |
| 323 | case NETDEV_CHANGENAME: | ||
| 324 | case NETDEV_UNREGISTER: | 324 | case NETDEV_UNREGISTER: |
| 325 | if (trigger_data->net_dev) { | 325 | dev_put(trigger_data->net_dev); |
| 326 | dev_put(trigger_data->net_dev); | 326 | trigger_data->net_dev = NULL; |
| 327 | trigger_data->net_dev = NULL; | ||
| 328 | } | ||
| 329 | break; | 327 | break; |
| 330 | case NETDEV_UP: | 328 | case NETDEV_UP: |
| 331 | case NETDEV_CHANGE: | 329 | case NETDEV_CHANGE: |
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 95c6d86ab5e8..c4ef1fceead6 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h | |||
| @@ -115,6 +115,7 @@ struct mapped_device { | |||
| 115 | struct srcu_struct io_barrier; | 115 | struct srcu_struct io_barrier; |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | void disable_discard(struct mapped_device *md); | ||
| 118 | void disable_write_same(struct mapped_device *md); | 119 | void disable_write_same(struct mapped_device *md); |
| 119 | void disable_write_zeroes(struct mapped_device *md); | 120 | void disable_write_zeroes(struct mapped_device *md); |
| 120 | 121 | ||
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index b53f30f16b4d..4b76f84424c3 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c | |||
| @@ -36,7 +36,7 @@ struct dm_device { | |||
| 36 | struct list_head list; | 36 | struct list_head list; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | const char *dm_allowed_targets[] __initconst = { | 39 | const char * const dm_allowed_targets[] __initconst = { |
| 40 | "crypt", | 40 | "crypt", |
| 41 | "delay", | 41 | "delay", |
| 42 | "linear", | 42 | "linear", |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index d57d997a52c8..7c678f50aaa3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
| @@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig | |||
| 913 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) | 913 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) |
| 914 | { | 914 | { |
| 915 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && | 915 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && |
| 916 | range2->logical_sector + range2->n_sectors > range2->logical_sector; | 916 | range1->logical_sector + range1->n_sectors > range2->logical_sector; |
| 917 | } | 917 | } |
| 918 | 918 | ||
| 919 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) | 919 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) |
| @@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity | |||
| 959 | struct dm_integrity_range *last_range = | 959 | struct dm_integrity_range *last_range = |
| 960 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); | 960 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); |
| 961 | struct task_struct *last_range_task; | 961 | struct task_struct *last_range_task; |
| 962 | if (!ranges_overlap(range, last_range)) | ||
| 963 | break; | ||
| 964 | last_range_task = last_range->task; | 962 | last_range_task = last_range->task; |
| 965 | list_del(&last_range->wait_entry); | 963 | list_del(&last_range->wait_entry); |
| 966 | if (!add_new_range(ic, last_range, false)) { | 964 | if (!add_new_range(ic, last_range, false)) { |
| @@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3185 | journal_watermark = val; | 3183 | journal_watermark = val; |
| 3186 | else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) | 3184 | else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) |
| 3187 | sync_msec = val; | 3185 | sync_msec = val; |
| 3188 | else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { | 3186 | else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { |
| 3189 | if (ic->meta_dev) { | 3187 | if (ic->meta_dev) { |
| 3190 | dm_put_device(ti, ic->meta_dev); | 3188 | dm_put_device(ti, ic->meta_dev); |
| 3191 | ic->meta_dev = NULL; | 3189 | ic->meta_dev = NULL; |
| @@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3204 | goto bad; | 3202 | goto bad; |
| 3205 | } | 3203 | } |
| 3206 | ic->sectors_per_block = val >> SECTOR_SHIFT; | 3204 | ic->sectors_per_block = val >> SECTOR_SHIFT; |
| 3207 | } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { | 3205 | } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { |
| 3208 | r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, | 3206 | r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, |
| 3209 | "Invalid internal_hash argument"); | 3207 | "Invalid internal_hash argument"); |
| 3210 | if (r) | 3208 | if (r) |
| 3211 | goto bad; | 3209 | goto bad; |
| 3212 | } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { | 3210 | } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { |
| 3213 | r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, | 3211 | r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, |
| 3214 | "Invalid journal_crypt argument"); | 3212 | "Invalid journal_crypt argument"); |
| 3215 | if (r) | 3213 | if (r) |
| 3216 | goto bad; | 3214 | goto bad; |
| 3217 | } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { | 3215 | } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { |
| 3218 | r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, | 3216 | r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, |
| 3219 | "Invalid journal_mac argument"); | 3217 | "Invalid journal_mac argument"); |
| 3220 | if (r) | 3218 | if (r) |
| @@ -3616,7 +3614,7 @@ static struct target_type integrity_target = { | |||
| 3616 | .io_hints = dm_integrity_io_hints, | 3614 | .io_hints = dm_integrity_io_hints, |
| 3617 | }; | 3615 | }; |
| 3618 | 3616 | ||
| 3619 | int __init dm_integrity_init(void) | 3617 | static int __init dm_integrity_init(void) |
| 3620 | { | 3618 | { |
| 3621 | int r; | 3619 | int r; |
| 3622 | 3620 | ||
| @@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void) | |||
| 3635 | return r; | 3633 | return r; |
| 3636 | } | 3634 | } |
| 3637 | 3635 | ||
| 3638 | void dm_integrity_exit(void) | 3636 | static void __exit dm_integrity_exit(void) |
| 3639 | { | 3637 | { |
| 3640 | dm_unregister_target(&integrity_target); | 3638 | dm_unregister_target(&integrity_target); |
| 3641 | kmem_cache_destroy(journal_io_cache); | 3639 | kmem_cache_destroy(journal_io_cache); |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 09773636602d..b66745bd08bb 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
| @@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) | |||
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (unlikely(error == BLK_STS_TARGET)) { | 224 | if (unlikely(error == BLK_STS_TARGET)) { |
| 225 | if (req_op(clone) == REQ_OP_WRITE_SAME && | 225 | if (req_op(clone) == REQ_OP_DISCARD && |
| 226 | !clone->q->limits.max_write_same_sectors) | 226 | !clone->q->limits.max_discard_sectors) |
| 227 | disable_discard(tio->md); | ||
| 228 | else if (req_op(clone) == REQ_OP_WRITE_SAME && | ||
| 229 | !clone->q->limits.max_write_same_sectors) | ||
| 227 | disable_write_same(tio->md); | 230 | disable_write_same(tio->md); |
| 228 | if (req_op(clone) == REQ_OP_WRITE_ZEROES && | 231 | else if (req_op(clone) == REQ_OP_WRITE_ZEROES && |
| 229 | !clone->q->limits.max_write_zeroes_sectors) | 232 | !clone->q->limits.max_write_zeroes_sectors) |
| 230 | disable_write_zeroes(tio->md); | 233 | disable_write_zeroes(tio->md); |
| 231 | } | 234 | } |
| 232 | 235 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ba9481f1bf3c..cde3b49b2a91 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) | |||
| 1844 | return true; | 1844 | return true; |
| 1845 | } | 1845 | } |
| 1846 | 1846 | ||
| 1847 | static int device_requires_stable_pages(struct dm_target *ti, | ||
| 1848 | struct dm_dev *dev, sector_t start, | ||
| 1849 | sector_t len, void *data) | ||
| 1850 | { | ||
| 1851 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
| 1852 | |||
| 1853 | return q && bdi_cap_stable_pages_required(q->backing_dev_info); | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | /* | ||
| 1857 | * If any underlying device requires stable pages, a table must require | ||
| 1858 | * them as well. Only targets that support iterate_devices are considered: | ||
| 1859 | * don't want error, zero, etc to require stable pages. | ||
| 1860 | */ | ||
| 1861 | static bool dm_table_requires_stable_pages(struct dm_table *t) | ||
| 1862 | { | ||
| 1863 | struct dm_target *ti; | ||
| 1864 | unsigned i; | ||
| 1865 | |||
| 1866 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
| 1867 | ti = dm_table_get_target(t, i); | ||
| 1868 | |||
| 1869 | if (ti->type->iterate_devices && | ||
| 1870 | ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) | ||
| 1871 | return true; | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | return false; | ||
| 1875 | } | ||
| 1876 | |||
| 1847 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1877 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
| 1848 | struct queue_limits *limits) | 1878 | struct queue_limits *limits) |
| 1849 | { | 1879 | { |
| @@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1897 | dm_table_verify_integrity(t); | 1927 | dm_table_verify_integrity(t); |
| 1898 | 1928 | ||
| 1899 | /* | 1929 | /* |
| 1930 | * Some devices don't use blk_integrity but still want stable pages | ||
| 1931 | * because they do their own checksumming. | ||
| 1932 | */ | ||
| 1933 | if (dm_table_requires_stable_pages(t)) | ||
| 1934 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | ||
| 1935 | else | ||
| 1936 | q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; | ||
| 1937 | |||
| 1938 | /* | ||
| 1900 | * Determine whether or not this queue's I/O timings contribute | 1939 | * Determine whether or not this queue's I/O timings contribute |
| 1901 | * to the entropy pool, Only request-based targets use this. | 1940 | * to the entropy pool, Only request-based targets use this. |
| 1902 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | 1941 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68d24056d0b1..043f0761e4a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) | |||
| 945 | } | 945 | } |
| 946 | } | 946 | } |
| 947 | 947 | ||
| 948 | void disable_discard(struct mapped_device *md) | ||
| 949 | { | ||
| 950 | struct queue_limits *limits = dm_get_queue_limits(md); | ||
| 951 | |||
| 952 | /* device doesn't really support DISCARD, disable it */ | ||
| 953 | limits->max_discard_sectors = 0; | ||
| 954 | blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); | ||
| 955 | } | ||
| 956 | |||
| 948 | void disable_write_same(struct mapped_device *md) | 957 | void disable_write_same(struct mapped_device *md) |
| 949 | { | 958 | { |
| 950 | struct queue_limits *limits = dm_get_queue_limits(md); | 959 | struct queue_limits *limits = dm_get_queue_limits(md); |
| @@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio) | |||
| 970 | dm_endio_fn endio = tio->ti->type->end_io; | 979 | dm_endio_fn endio = tio->ti->type->end_io; |
| 971 | 980 | ||
| 972 | if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { | 981 | if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { |
| 973 | if (bio_op(bio) == REQ_OP_WRITE_SAME && | 982 | if (bio_op(bio) == REQ_OP_DISCARD && |
| 974 | !bio->bi_disk->queue->limits.max_write_same_sectors) | 983 | !bio->bi_disk->queue->limits.max_discard_sectors) |
| 984 | disable_discard(md); | ||
| 985 | else if (bio_op(bio) == REQ_OP_WRITE_SAME && | ||
| 986 | !bio->bi_disk->queue->limits.max_write_same_sectors) | ||
| 975 | disable_write_same(md); | 987 | disable_write_same(md); |
| 976 | if (bio_op(bio) == REQ_OP_WRITE_ZEROES && | 988 | else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && |
| 977 | !bio->bi_disk->queue->limits.max_write_zeroes_sectors) | 989 | !bio->bi_disk->queue->limits.max_write_zeroes_sectors) |
| 978 | disable_write_zeroes(md); | 990 | disable_write_zeroes(md); |
| 979 | } | 991 | } |
| 980 | 992 | ||
| @@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
| 1042 | return -EINVAL; | 1054 | return -EINVAL; |
| 1043 | } | 1055 | } |
| 1044 | 1056 | ||
| 1045 | /* | 1057 | ti->max_io_len = (uint32_t) len; |
| 1046 | * BIO based queue uses its own splitting. When multipage bvecs | ||
| 1047 | * is switched on, size of the incoming bio may be too big to | ||
| 1048 | * be handled in some targets, such as crypt. | ||
| 1049 | * | ||
| 1050 | * When these targets are ready for the big bio, we can remove | ||
| 1051 | * the limit. | ||
| 1052 | */ | ||
| 1053 | ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); | ||
| 1054 | 1058 | ||
| 1055 | return 0; | 1059 | return 0; |
| 1056 | } | 1060 | } |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0ce2d8dfc5f1..26ad6468d13a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
| @@ -1246,7 +1246,7 @@ config MFD_STA2X11 | |||
| 1246 | 1246 | ||
| 1247 | config MFD_SUN6I_PRCM | 1247 | config MFD_SUN6I_PRCM |
| 1248 | bool "Allwinner A31 PRCM controller" | 1248 | bool "Allwinner A31 PRCM controller" |
| 1249 | depends on ARCH_SUNXI | 1249 | depends on ARCH_SUNXI || COMPILE_TEST |
| 1250 | select MFD_CORE | 1250 | select MFD_CORE |
| 1251 | help | 1251 | help |
| 1252 | Support for the PRCM (Power/Reset/Clock Management) unit available | 1252 | Support for the PRCM (Power/Reset/Clock Management) unit available |
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 69df27769c21..43ac71691fe4 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c | |||
| @@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = { | |||
| 53 | static const struct mfd_cell sprd_pmic_devs[] = { | 53 | static const struct mfd_cell sprd_pmic_devs[] = { |
| 54 | { | 54 | { |
| 55 | .name = "sc27xx-wdt", | 55 | .name = "sc27xx-wdt", |
| 56 | .of_compatible = "sprd,sc27xx-wdt", | 56 | .of_compatible = "sprd,sc2731-wdt", |
| 57 | }, { | 57 | }, { |
| 58 | .name = "sc27xx-rtc", | 58 | .name = "sc27xx-rtc", |
| 59 | .of_compatible = "sprd,sc27xx-rtc", | 59 | .of_compatible = "sprd,sc2731-rtc", |
| 60 | }, { | 60 | }, { |
| 61 | .name = "sc27xx-charger", | 61 | .name = "sc27xx-charger", |
| 62 | .of_compatible = "sprd,sc27xx-charger", | 62 | .of_compatible = "sprd,sc2731-charger", |
| 63 | }, { | 63 | }, { |
| 64 | .name = "sc27xx-chg-timer", | 64 | .name = "sc27xx-chg-timer", |
| 65 | .of_compatible = "sprd,sc27xx-chg-timer", | 65 | .of_compatible = "sprd,sc2731-chg-timer", |
| 66 | }, { | 66 | }, { |
| 67 | .name = "sc27xx-fast-chg", | 67 | .name = "sc27xx-fast-chg", |
| 68 | .of_compatible = "sprd,sc27xx-fast-chg", | 68 | .of_compatible = "sprd,sc2731-fast-chg", |
| 69 | }, { | 69 | }, { |
| 70 | .name = "sc27xx-chg-wdt", | 70 | .name = "sc27xx-chg-wdt", |
| 71 | .of_compatible = "sprd,sc27xx-chg-wdt", | 71 | .of_compatible = "sprd,sc2731-chg-wdt", |
| 72 | }, { | 72 | }, { |
| 73 | .name = "sc27xx-typec", | 73 | .name = "sc27xx-typec", |
| 74 | .of_compatible = "sprd,sc27xx-typec", | 74 | .of_compatible = "sprd,sc2731-typec", |
| 75 | }, { | 75 | }, { |
| 76 | .name = "sc27xx-flash", | 76 | .name = "sc27xx-flash", |
| 77 | .of_compatible = "sprd,sc27xx-flash", | 77 | .of_compatible = "sprd,sc2731-flash", |
| 78 | }, { | 78 | }, { |
| 79 | .name = "sc27xx-eic", | 79 | .name = "sc27xx-eic", |
| 80 | .of_compatible = "sprd,sc27xx-eic", | 80 | .of_compatible = "sprd,sc2731-eic", |
| 81 | }, { | 81 | }, { |
| 82 | .name = "sc27xx-efuse", | 82 | .name = "sc27xx-efuse", |
| 83 | .of_compatible = "sprd,sc27xx-efuse", | 83 | .of_compatible = "sprd,sc2731-efuse", |
| 84 | }, { | 84 | }, { |
| 85 | .name = "sc27xx-thermal", | 85 | .name = "sc27xx-thermal", |
| 86 | .of_compatible = "sprd,sc27xx-thermal", | 86 | .of_compatible = "sprd,sc2731-thermal", |
| 87 | }, { | 87 | }, { |
| 88 | .name = "sc27xx-adc", | 88 | .name = "sc27xx-adc", |
| 89 | .of_compatible = "sprd,sc27xx-adc", | 89 | .of_compatible = "sprd,sc2731-adc", |
| 90 | }, { | 90 | }, { |
| 91 | .name = "sc27xx-audio-codec", | 91 | .name = "sc27xx-audio-codec", |
| 92 | .of_compatible = "sprd,sc27xx-audio-codec", | 92 | .of_compatible = "sprd,sc2731-audio-codec", |
| 93 | }, { | 93 | }, { |
| 94 | .name = "sc27xx-regulator", | 94 | .name = "sc27xx-regulator", |
| 95 | .of_compatible = "sprd,sc27xx-regulator", | 95 | .of_compatible = "sprd,sc2731-regulator", |
| 96 | }, { | 96 | }, { |
| 97 | .name = "sc27xx-vibrator", | 97 | .name = "sc27xx-vibrator", |
| 98 | .of_compatible = "sprd,sc27xx-vibrator", | 98 | .of_compatible = "sprd,sc2731-vibrator", |
| 99 | }, { | 99 | }, { |
| 100 | .name = "sc27xx-keypad-led", | 100 | .name = "sc27xx-keypad-led", |
| 101 | .of_compatible = "sprd,sc27xx-keypad-led", | 101 | .of_compatible = "sprd,sc2731-keypad-led", |
| 102 | }, { | 102 | }, { |
| 103 | .name = "sc27xx-bltc", | 103 | .name = "sc27xx-bltc", |
| 104 | .of_compatible = "sprd,sc27xx-bltc", | 104 | .of_compatible = "sprd,sc2731-bltc", |
| 105 | }, { | 105 | }, { |
| 106 | .name = "sc27xx-fgu", | 106 | .name = "sc27xx-fgu", |
| 107 | .of_compatible = "sprd,sc27xx-fgu", | 107 | .of_compatible = "sprd,sc2731-fgu", |
| 108 | }, { | 108 | }, { |
| 109 | .name = "sc27xx-7sreset", | 109 | .name = "sc27xx-7sreset", |
| 110 | .of_compatible = "sprd,sc27xx-7sreset", | 110 | .of_compatible = "sprd,sc2731-7sreset", |
| 111 | }, { | 111 | }, { |
| 112 | .name = "sc27xx-poweroff", | 112 | .name = "sc27xx-poweroff", |
| 113 | .of_compatible = "sprd,sc27xx-poweroff", | 113 | .of_compatible = "sprd,sc2731-poweroff", |
| 114 | }, { | 114 | }, { |
| 115 | .name = "sc27xx-syscon", | 115 | .name = "sc27xx-syscon", |
| 116 | .of_compatible = "sprd,sc27xx-syscon", | 116 | .of_compatible = "sprd,sc2731-syscon", |
| 117 | }, | 117 | }, |
| 118 | }; | 118 | }; |
| 119 | 119 | ||
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 299016bc46d9..104477b512a2 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c | |||
| @@ -1245,6 +1245,28 @@ free: | |||
| 1245 | return status; | 1245 | return status; |
| 1246 | } | 1246 | } |
| 1247 | 1247 | ||
| 1248 | static int __maybe_unused twl_suspend(struct device *dev) | ||
| 1249 | { | ||
| 1250 | struct i2c_client *client = to_i2c_client(dev); | ||
| 1251 | |||
| 1252 | if (client->irq) | ||
| 1253 | disable_irq(client->irq); | ||
| 1254 | |||
| 1255 | return 0; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | static int __maybe_unused twl_resume(struct device *dev) | ||
| 1259 | { | ||
| 1260 | struct i2c_client *client = to_i2c_client(dev); | ||
| 1261 | |||
| 1262 | if (client->irq) | ||
| 1263 | enable_irq(client->irq); | ||
| 1264 | |||
| 1265 | return 0; | ||
| 1266 | } | ||
| 1267 | |||
| 1268 | static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume); | ||
| 1269 | |||
| 1248 | static const struct i2c_device_id twl_ids[] = { | 1270 | static const struct i2c_device_id twl_ids[] = { |
| 1249 | { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ | 1271 | { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ |
| 1250 | { "twl5030", 0 }, /* T2 updated */ | 1272 | { "twl5030", 0 }, /* T2 updated */ |
| @@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = { | |||
| 1262 | /* One Client Driver , 4 Clients */ | 1284 | /* One Client Driver , 4 Clients */ |
| 1263 | static struct i2c_driver twl_driver = { | 1285 | static struct i2c_driver twl_driver = { |
| 1264 | .driver.name = DRIVER_NAME, | 1286 | .driver.name = DRIVER_NAME, |
| 1287 | .driver.pm = &twl_dev_pm_ops, | ||
| 1265 | .id_table = twl_ids, | 1288 | .id_table = twl_ids, |
| 1266 | .probe = twl_probe, | 1289 | .probe = twl_probe, |
| 1267 | .remove = twl_remove, | 1290 | .remove = twl_remove, |
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index 3525236ed8d9..19c84214a7ea 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c | |||
| @@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref) | |||
| 179 | 179 | ||
| 180 | /* We also need to update CI for internal queues */ | 180 | /* We also need to update CI for internal queues */ |
| 181 | if (cs->submitted) { | 181 | if (cs->submitted) { |
| 182 | int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt); | ||
| 183 | |||
| 184 | WARN_ONCE((cs_cnt < 0), | ||
| 185 | "hl%d: error in CS active cnt %d\n", | ||
| 186 | hdev->id, cs_cnt); | ||
| 187 | |||
| 182 | hl_int_hw_queue_update_ci(cs); | 188 | hl_int_hw_queue_update_ci(cs); |
| 183 | 189 | ||
| 184 | spin_lock(&hdev->hw_queues_mirror_lock); | 190 | spin_lock(&hdev->hw_queues_mirror_lock); |
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index a53c12aff6ad..974a87789bd8 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c | |||
| @@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data) | |||
| 232 | struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; | 232 | struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; |
| 233 | enum vm_type_t *vm_type; | 233 | enum vm_type_t *vm_type; |
| 234 | bool once = true; | 234 | bool once = true; |
| 235 | u64 j; | ||
| 235 | int i; | 236 | int i; |
| 236 | 237 | ||
| 237 | if (!dev_entry->hdev->mmu_enable) | 238 | if (!dev_entry->hdev->mmu_enable) |
| @@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data) | |||
| 260 | } else { | 261 | } else { |
| 261 | phys_pg_pack = hnode->ptr; | 262 | phys_pg_pack = hnode->ptr; |
| 262 | seq_printf(s, | 263 | seq_printf(s, |
| 263 | " 0x%-14llx %-10u %-4u\n", | 264 | " 0x%-14llx %-10llu %-4u\n", |
| 264 | hnode->vaddr, phys_pg_pack->total_size, | 265 | hnode->vaddr, phys_pg_pack->total_size, |
| 265 | phys_pg_pack->handle); | 266 | phys_pg_pack->handle); |
| 266 | } | 267 | } |
| @@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data) | |||
| 282 | phys_pg_pack->page_size); | 283 | phys_pg_pack->page_size); |
| 283 | seq_puts(s, " physical address\n"); | 284 | seq_puts(s, " physical address\n"); |
| 284 | seq_puts(s, "---------------------\n"); | 285 | seq_puts(s, "---------------------\n"); |
| 285 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { | 286 | for (j = 0 ; j < phys_pg_pack->npages ; j++) { |
| 286 | seq_printf(s, " 0x%-14llx\n", | 287 | seq_printf(s, " 0x%-14llx\n", |
| 287 | phys_pg_pack->pages[i]); | 288 | phys_pg_pack->pages[j]); |
| 288 | } | 289 | } |
| 289 | } | 290 | } |
| 290 | spin_unlock(&vm->idr_lock); | 291 | spin_unlock(&vm->idr_lock); |
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index de46aa6ed154..77d51be66c7e 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c | |||
| @@ -11,6 +11,8 @@ | |||
| 11 | #include <linux/sched/signal.h> | 11 | #include <linux/sched/signal.h> |
| 12 | #include <linux/hwmon.h> | 12 | #include <linux/hwmon.h> |
| 13 | 13 | ||
| 14 | #define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) | ||
| 15 | |||
| 14 | bool hl_device_disabled_or_in_reset(struct hl_device *hdev) | 16 | bool hl_device_disabled_or_in_reset(struct hl_device *hdev) |
| 15 | { | 17 | { |
| 16 | if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) | 18 | if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) |
| @@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev) | |||
| 216 | spin_lock_init(&hdev->hw_queues_mirror_lock); | 218 | spin_lock_init(&hdev->hw_queues_mirror_lock); |
| 217 | atomic_set(&hdev->in_reset, 0); | 219 | atomic_set(&hdev->in_reset, 0); |
| 218 | atomic_set(&hdev->fd_open_cnt, 0); | 220 | atomic_set(&hdev->fd_open_cnt, 0); |
| 221 | atomic_set(&hdev->cs_active_cnt, 0); | ||
| 219 | 222 | ||
| 220 | return 0; | 223 | return 0; |
| 221 | 224 | ||
| @@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev) | |||
| 413 | 416 | ||
| 414 | pci_save_state(hdev->pdev); | 417 | pci_save_state(hdev->pdev); |
| 415 | 418 | ||
| 419 | /* Block future CS/VM/JOB completion operations */ | ||
| 420 | rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); | ||
| 421 | if (rc) { | ||
| 422 | dev_err(hdev->dev, "Can't suspend while in reset\n"); | ||
| 423 | return -EIO; | ||
| 424 | } | ||
| 425 | |||
| 426 | /* This blocks all other stuff that is not blocked by in_reset */ | ||
| 427 | hdev->disabled = true; | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Flush anyone that is inside the critical section of enqueue | ||
| 431 | * jobs to the H/W | ||
| 432 | */ | ||
| 433 | hdev->asic_funcs->hw_queues_lock(hdev); | ||
| 434 | hdev->asic_funcs->hw_queues_unlock(hdev); | ||
| 435 | |||
| 436 | /* Flush processes that are sending message to CPU */ | ||
| 437 | mutex_lock(&hdev->send_cpu_message_lock); | ||
| 438 | mutex_unlock(&hdev->send_cpu_message_lock); | ||
| 439 | |||
| 416 | rc = hdev->asic_funcs->suspend(hdev); | 440 | rc = hdev->asic_funcs->suspend(hdev); |
| 417 | if (rc) | 441 | if (rc) |
| 418 | dev_err(hdev->dev, | 442 | dev_err(hdev->dev, |
| @@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev) | |||
| 440 | 464 | ||
| 441 | pci_set_power_state(hdev->pdev, PCI_D0); | 465 | pci_set_power_state(hdev->pdev, PCI_D0); |
| 442 | pci_restore_state(hdev->pdev); | 466 | pci_restore_state(hdev->pdev); |
| 443 | rc = pci_enable_device(hdev->pdev); | 467 | rc = pci_enable_device_mem(hdev->pdev); |
| 444 | if (rc) { | 468 | if (rc) { |
| 445 | dev_err(hdev->dev, | 469 | dev_err(hdev->dev, |
| 446 | "Failed to enable PCI device in resume\n"); | 470 | "Failed to enable PCI device in resume\n"); |
| 447 | return rc; | 471 | return rc; |
| 448 | } | 472 | } |
| 449 | 473 | ||
| 474 | pci_set_master(hdev->pdev); | ||
| 475 | |||
| 450 | rc = hdev->asic_funcs->resume(hdev); | 476 | rc = hdev->asic_funcs->resume(hdev); |
| 451 | if (rc) { | 477 | if (rc) { |
| 452 | dev_err(hdev->dev, | 478 | dev_err(hdev->dev, "Failed to resume device after suspend\n"); |
| 453 | "Failed to enable PCI access from device CPU\n"); | 479 | goto disable_device; |
| 454 | return rc; | 480 | } |
| 481 | |||
| 482 | |||
| 483 | hdev->disabled = false; | ||
| 484 | atomic_set(&hdev->in_reset, 0); | ||
| 485 | |||
| 486 | rc = hl_device_reset(hdev, true, false); | ||
| 487 | if (rc) { | ||
| 488 | dev_err(hdev->dev, "Failed to reset device during resume\n"); | ||
| 489 | goto disable_device; | ||
| 455 | } | 490 | } |
| 456 | 491 | ||
| 457 | return 0; | 492 | return 0; |
| 493 | |||
| 494 | disable_device: | ||
| 495 | pci_clear_master(hdev->pdev); | ||
| 496 | pci_disable_device(hdev->pdev); | ||
| 497 | |||
| 498 | return rc; | ||
| 458 | } | 499 | } |
| 459 | 500 | ||
| 460 | static void hl_device_hard_reset_pending(struct work_struct *work) | 501 | static void hl_device_hard_reset_pending(struct work_struct *work) |
| @@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work) | |||
| 462 | struct hl_device_reset_work *device_reset_work = | 503 | struct hl_device_reset_work *device_reset_work = |
| 463 | container_of(work, struct hl_device_reset_work, reset_work); | 504 | container_of(work, struct hl_device_reset_work, reset_work); |
| 464 | struct hl_device *hdev = device_reset_work->hdev; | 505 | struct hl_device *hdev = device_reset_work->hdev; |
| 465 | u16 pending_cnt = HL_PENDING_RESET_PER_SEC; | 506 | u16 pending_total, pending_cnt; |
| 466 | struct task_struct *task = NULL; | 507 | struct task_struct *task = NULL; |
| 467 | 508 | ||
| 509 | if (hdev->pldm) | ||
| 510 | pending_total = HL_PLDM_PENDING_RESET_PER_SEC; | ||
| 511 | else | ||
| 512 | pending_total = HL_PENDING_RESET_PER_SEC; | ||
| 513 | |||
| 514 | pending_cnt = pending_total; | ||
| 515 | |||
| 468 | /* Flush all processes that are inside hl_open */ | 516 | /* Flush all processes that are inside hl_open */ |
| 469 | mutex_lock(&hdev->fd_open_cnt_lock); | 517 | mutex_lock(&hdev->fd_open_cnt_lock); |
| 470 | 518 | ||
| @@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work) | |||
| 489 | } | 537 | } |
| 490 | } | 538 | } |
| 491 | 539 | ||
| 540 | pending_cnt = pending_total; | ||
| 541 | |||
| 542 | while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) { | ||
| 543 | |||
| 544 | pending_cnt--; | ||
| 545 | |||
| 546 | ssleep(1); | ||
| 547 | } | ||
| 548 | |||
| 549 | if (atomic_read(&hdev->fd_open_cnt)) | ||
| 550 | dev_crit(hdev->dev, | ||
| 551 | "Going to hard reset with open user contexts\n"); | ||
| 552 | |||
| 492 | mutex_unlock(&hdev->fd_open_cnt_lock); | 553 | mutex_unlock(&hdev->fd_open_cnt_lock); |
| 493 | 554 | ||
| 494 | hl_device_reset(hdev, true, true); | 555 | hl_device_reset(hdev, true, true); |
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 238dd57c541b..ea979ebd62fb 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c | |||
| @@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev) | |||
| 1201 | return retval; | 1201 | return retval; |
| 1202 | } | 1202 | } |
| 1203 | 1203 | ||
| 1204 | static void goya_resume_external_queues(struct hl_device *hdev) | ||
| 1205 | { | ||
| 1206 | WREG32(mmDMA_QM_0_GLBL_CFG1, 0); | ||
| 1207 | WREG32(mmDMA_QM_1_GLBL_CFG1, 0); | ||
| 1208 | WREG32(mmDMA_QM_2_GLBL_CFG1, 0); | ||
| 1209 | WREG32(mmDMA_QM_3_GLBL_CFG1, 0); | ||
| 1210 | WREG32(mmDMA_QM_4_GLBL_CFG1, 0); | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | /* | 1204 | /* |
| 1214 | * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU | 1205 | * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU |
| 1215 | * | 1206 | * |
| @@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev) | |||
| 2178 | return retval; | 2169 | return retval; |
| 2179 | } | 2170 | } |
| 2180 | 2171 | ||
| 2181 | static void goya_resume_internal_queues(struct hl_device *hdev) | ||
| 2182 | { | ||
| 2183 | WREG32(mmMME_QM_GLBL_CFG1, 0); | ||
| 2184 | WREG32(mmMME_CMDQ_GLBL_CFG1, 0); | ||
| 2185 | |||
| 2186 | WREG32(mmTPC0_QM_GLBL_CFG1, 0); | ||
| 2187 | WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0); | ||
| 2188 | |||
| 2189 | WREG32(mmTPC1_QM_GLBL_CFG1, 0); | ||
| 2190 | WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0); | ||
| 2191 | |||
| 2192 | WREG32(mmTPC2_QM_GLBL_CFG1, 0); | ||
| 2193 | WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0); | ||
| 2194 | |||
| 2195 | WREG32(mmTPC3_QM_GLBL_CFG1, 0); | ||
| 2196 | WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0); | ||
| 2197 | |||
| 2198 | WREG32(mmTPC4_QM_GLBL_CFG1, 0); | ||
| 2199 | WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0); | ||
| 2200 | |||
| 2201 | WREG32(mmTPC5_QM_GLBL_CFG1, 0); | ||
| 2202 | WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0); | ||
| 2203 | |||
| 2204 | WREG32(mmTPC6_QM_GLBL_CFG1, 0); | ||
| 2205 | WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0); | ||
| 2206 | |||
| 2207 | WREG32(mmTPC7_QM_GLBL_CFG1, 0); | ||
| 2208 | WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0); | ||
| 2209 | } | ||
| 2210 | |||
| 2211 | static void goya_dma_stall(struct hl_device *hdev) | 2172 | static void goya_dma_stall(struct hl_device *hdev) |
| 2212 | { | 2173 | { |
| 2213 | WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); | 2174 | WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); |
| @@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev) | |||
| 2905 | { | 2866 | { |
| 2906 | int rc; | 2867 | int rc; |
| 2907 | 2868 | ||
| 2908 | rc = goya_stop_internal_queues(hdev); | ||
| 2909 | |||
| 2910 | if (rc) { | ||
| 2911 | dev_err(hdev->dev, "failed to stop internal queues\n"); | ||
| 2912 | return rc; | ||
| 2913 | } | ||
| 2914 | |||
| 2915 | rc = goya_stop_external_queues(hdev); | ||
| 2916 | |||
| 2917 | if (rc) { | ||
| 2918 | dev_err(hdev->dev, "failed to stop external queues\n"); | ||
| 2919 | return rc; | ||
| 2920 | } | ||
| 2921 | |||
| 2922 | rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); | 2869 | rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); |
| 2923 | if (rc) | 2870 | if (rc) |
| 2924 | dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); | 2871 | dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); |
| @@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev) | |||
| 2928 | 2875 | ||
| 2929 | int goya_resume(struct hl_device *hdev) | 2876 | int goya_resume(struct hl_device *hdev) |
| 2930 | { | 2877 | { |
| 2931 | int rc; | 2878 | return goya_init_iatu(hdev); |
| 2932 | |||
| 2933 | goya_resume_external_queues(hdev); | ||
| 2934 | goya_resume_internal_queues(hdev); | ||
| 2935 | |||
| 2936 | rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS); | ||
| 2937 | if (rc) | ||
| 2938 | dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); | ||
| 2939 | return rc; | ||
| 2940 | } | 2879 | } |
| 2941 | 2880 | ||
| 2942 | static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, | 2881 | static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, |
| @@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, | |||
| 3070 | 3009 | ||
| 3071 | *dma_handle = hdev->asic_prop.sram_base_address; | 3010 | *dma_handle = hdev->asic_prop.sram_base_address; |
| 3072 | 3011 | ||
| 3073 | base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; | 3012 | base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID]; |
| 3074 | 3013 | ||
| 3075 | switch (queue_id) { | 3014 | switch (queue_id) { |
| 3076 | case GOYA_QUEUE_ID_MME: | 3015 | case GOYA_QUEUE_ID_MME: |
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index a7c95e9f9b9a..a8ee52c880cd 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h | |||
| @@ -793,11 +793,11 @@ struct hl_vm_hash_node { | |||
| 793 | * struct hl_vm_phys_pg_pack - physical page pack. | 793 | * struct hl_vm_phys_pg_pack - physical page pack. |
| 794 | * @vm_type: describes the type of the virtual area descriptor. | 794 | * @vm_type: describes the type of the virtual area descriptor. |
| 795 | * @pages: the physical page array. | 795 | * @pages: the physical page array. |
| 796 | * @npages: num physical pages in the pack. | ||
| 797 | * @total_size: total size of all the pages in this list. | ||
| 796 | * @mapping_cnt: number of shared mappings. | 798 | * @mapping_cnt: number of shared mappings. |
| 797 | * @asid: the context related to this list. | 799 | * @asid: the context related to this list. |
| 798 | * @npages: num physical pages in the pack. | ||
| 799 | * @page_size: size of each page in the pack. | 800 | * @page_size: size of each page in the pack. |
| 800 | * @total_size: total size of all the pages in this list. | ||
| 801 | * @flags: HL_MEM_* flags related to this list. | 801 | * @flags: HL_MEM_* flags related to this list. |
| 802 | * @handle: the provided handle related to this list. | 802 | * @handle: the provided handle related to this list. |
| 803 | * @offset: offset from the first page. | 803 | * @offset: offset from the first page. |
| @@ -807,11 +807,11 @@ struct hl_vm_hash_node { | |||
| 807 | struct hl_vm_phys_pg_pack { | 807 | struct hl_vm_phys_pg_pack { |
| 808 | enum vm_type_t vm_type; /* must be first */ | 808 | enum vm_type_t vm_type; /* must be first */ |
| 809 | u64 *pages; | 809 | u64 *pages; |
| 810 | u64 npages; | ||
| 811 | u64 total_size; | ||
| 810 | atomic_t mapping_cnt; | 812 | atomic_t mapping_cnt; |
| 811 | u32 asid; | 813 | u32 asid; |
| 812 | u32 npages; | ||
| 813 | u32 page_size; | 814 | u32 page_size; |
| 814 | u32 total_size; | ||
| 815 | u32 flags; | 815 | u32 flags; |
| 816 | u32 handle; | 816 | u32 handle; |
| 817 | u32 offset; | 817 | u32 offset; |
| @@ -1056,13 +1056,15 @@ struct hl_device_reset_work { | |||
| 1056 | * @cb_pool_lock: protects the CB pool. | 1056 | * @cb_pool_lock: protects the CB pool. |
| 1057 | * @user_ctx: current user context executing. | 1057 | * @user_ctx: current user context executing. |
| 1058 | * @dram_used_mem: current DRAM memory consumption. | 1058 | * @dram_used_mem: current DRAM memory consumption. |
| 1059 | * @in_reset: is device in reset flow. | ||
| 1060 | * @curr_pll_profile: current PLL profile. | ||
| 1061 | * @fd_open_cnt: number of open user processes. | ||
| 1062 | * @timeout_jiffies: device CS timeout value. | 1059 | * @timeout_jiffies: device CS timeout value. |
| 1063 | * @max_power: the max power of the device, as configured by the sysadmin. This | 1060 | * @max_power: the max power of the device, as configured by the sysadmin. This |
| 1064 | * value is saved so in case of hard-reset, KMD will restore this | 1061 | * value is saved so in case of hard-reset, KMD will restore this |
| 1065 | * value and update the F/W after the re-initialization | 1062 | * value and update the F/W after the re-initialization |
| 1063 | * @in_reset: is device in reset flow. | ||
| 1064 | * @curr_pll_profile: current PLL profile. | ||
| 1065 | * @fd_open_cnt: number of open user processes. | ||
| 1066 | * @cs_active_cnt: number of active command submissions on this device (active | ||
| 1067 | * means already in H/W queues) | ||
| 1066 | * @major: habanalabs KMD major. | 1068 | * @major: habanalabs KMD major. |
| 1067 | * @high_pll: high PLL profile frequency. | 1069 | * @high_pll: high PLL profile frequency. |
| 1068 | * @soft_reset_cnt: number of soft reset since KMD loading. | 1070 | * @soft_reset_cnt: number of soft reset since KMD loading. |
| @@ -1128,11 +1130,12 @@ struct hl_device { | |||
| 1128 | struct hl_ctx *user_ctx; | 1130 | struct hl_ctx *user_ctx; |
| 1129 | 1131 | ||
| 1130 | atomic64_t dram_used_mem; | 1132 | atomic64_t dram_used_mem; |
| 1133 | u64 timeout_jiffies; | ||
| 1134 | u64 max_power; | ||
| 1131 | atomic_t in_reset; | 1135 | atomic_t in_reset; |
| 1132 | atomic_t curr_pll_profile; | 1136 | atomic_t curr_pll_profile; |
| 1133 | atomic_t fd_open_cnt; | 1137 | atomic_t fd_open_cnt; |
| 1134 | u64 timeout_jiffies; | 1138 | atomic_t cs_active_cnt; |
| 1135 | u64 max_power; | ||
| 1136 | u32 major; | 1139 | u32 major; |
| 1137 | u32 high_pll; | 1140 | u32 high_pll; |
| 1138 | u32 soft_reset_cnt; | 1141 | u32 soft_reset_cnt; |
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c index 67bece26417c..ef3bb6951360 100644 --- a/drivers/misc/habanalabs/hw_queue.c +++ b/drivers/misc/habanalabs/hw_queue.c | |||
| @@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) | |||
| 370 | spin_unlock(&hdev->hw_queues_mirror_lock); | 370 | spin_unlock(&hdev->hw_queues_mirror_lock); |
| 371 | } | 371 | } |
| 372 | 372 | ||
| 373 | list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { | 373 | atomic_inc(&hdev->cs_active_cnt); |
| 374 | |||
| 375 | list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) | ||
| 374 | if (job->ext_queue) | 376 | if (job->ext_queue) |
| 375 | ext_hw_queue_schedule_job(job); | 377 | ext_hw_queue_schedule_job(job); |
| 376 | else | 378 | else |
| 377 | int_hw_queue_schedule_job(job); | 379 | int_hw_queue_schedule_job(job); |
| 378 | } | ||
| 379 | 380 | ||
| 380 | cs->submitted = true; | 381 | cs->submitted = true; |
| 381 | 382 | ||
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 3a12fd1a5274..ce1fda40a8b8 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c | |||
| @@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, | |||
| 56 | struct hl_device *hdev = ctx->hdev; | 56 | struct hl_device *hdev = ctx->hdev; |
| 57 | struct hl_vm *vm = &hdev->vm; | 57 | struct hl_vm *vm = &hdev->vm; |
| 58 | struct hl_vm_phys_pg_pack *phys_pg_pack; | 58 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
| 59 | u64 paddr = 0; | 59 | u64 paddr = 0, total_size, num_pgs, i; |
| 60 | u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; | 60 | u32 num_curr_pgs, page_size, page_shift; |
| 61 | int handle, rc, i; | 61 | int handle, rc; |
| 62 | bool contiguous; | 62 | bool contiguous; |
| 63 | 63 | ||
| 64 | num_curr_pgs = 0; | 64 | num_curr_pgs = 0; |
| @@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, | |||
| 73 | paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); | 73 | paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); |
| 74 | if (!paddr) { | 74 | if (!paddr) { |
| 75 | dev_err(hdev->dev, | 75 | dev_err(hdev->dev, |
| 76 | "failed to allocate %u huge contiguous pages\n", | 76 | "failed to allocate %llu huge contiguous pages\n", |
| 77 | num_pgs); | 77 | num_pgs); |
| 78 | return -ENOMEM; | 78 | return -ENOMEM; |
| 79 | } | 79 | } |
| @@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, | |||
| 93 | phys_pg_pack->flags = args->flags; | 93 | phys_pg_pack->flags = args->flags; |
| 94 | phys_pg_pack->contiguous = contiguous; | 94 | phys_pg_pack->contiguous = contiguous; |
| 95 | 95 | ||
| 96 | phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); | 96 | phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); |
| 97 | if (!phys_pg_pack->pages) { | 97 | if (!phys_pg_pack->pages) { |
| 98 | rc = -ENOMEM; | 98 | rc = -ENOMEM; |
| 99 | goto pages_arr_err; | 99 | goto pages_arr_err; |
| @@ -148,7 +148,7 @@ page_err: | |||
| 148 | gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], | 148 | gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], |
| 149 | page_size); | 149 | page_size); |
| 150 | 150 | ||
| 151 | kfree(phys_pg_pack->pages); | 151 | kvfree(phys_pg_pack->pages); |
| 152 | pages_arr_err: | 152 | pages_arr_err: |
| 153 | kfree(phys_pg_pack); | 153 | kfree(phys_pg_pack); |
| 154 | pages_pack_err: | 154 | pages_pack_err: |
| @@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev, | |||
| 267 | struct hl_vm_phys_pg_pack *phys_pg_pack) | 267 | struct hl_vm_phys_pg_pack *phys_pg_pack) |
| 268 | { | 268 | { |
| 269 | struct hl_vm *vm = &hdev->vm; | 269 | struct hl_vm *vm = &hdev->vm; |
| 270 | int i; | 270 | u64 i; |
| 271 | 271 | ||
| 272 | if (!phys_pg_pack->created_from_userptr) { | 272 | if (!phys_pg_pack->created_from_userptr) { |
| 273 | if (phys_pg_pack->contiguous) { | 273 | if (phys_pg_pack->contiguous) { |
| @@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev, | |||
| 288 | } | 288 | } |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | kfree(phys_pg_pack->pages); | 291 | kvfree(phys_pg_pack->pages); |
| 292 | kfree(phys_pg_pack); | 292 | kfree(phys_pg_pack); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| @@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev, | |||
| 519 | * - Return the start address of the virtual block | 519 | * - Return the start address of the virtual block |
| 520 | */ | 520 | */ |
| 521 | static u64 get_va_block(struct hl_device *hdev, | 521 | static u64 get_va_block(struct hl_device *hdev, |
| 522 | struct hl_va_range *va_range, u32 size, u64 hint_addr, | 522 | struct hl_va_range *va_range, u64 size, u64 hint_addr, |
| 523 | bool is_userptr) | 523 | bool is_userptr) |
| 524 | { | 524 | { |
| 525 | struct hl_vm_va_block *va_block, *new_va_block = NULL; | 525 | struct hl_vm_va_block *va_block, *new_va_block = NULL; |
| @@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev, | |||
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | if (!new_va_block) { | 579 | if (!new_va_block) { |
| 580 | dev_err(hdev->dev, "no available va block for size %u\n", size); | 580 | dev_err(hdev->dev, "no available va block for size %llu\n", |
| 581 | size); | ||
| 581 | goto out; | 582 | goto out; |
| 582 | } | 583 | } |
| 583 | 584 | ||
| @@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, | |||
| 648 | struct hl_vm_phys_pg_pack *phys_pg_pack; | 649 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
| 649 | struct scatterlist *sg; | 650 | struct scatterlist *sg; |
| 650 | dma_addr_t dma_addr; | 651 | dma_addr_t dma_addr; |
| 651 | u64 page_mask; | 652 | u64 page_mask, total_npages; |
| 652 | u32 npages, total_npages, page_size = PAGE_SIZE; | 653 | u32 npages, page_size = PAGE_SIZE; |
| 653 | bool first = true, is_huge_page_opt = true; | 654 | bool first = true, is_huge_page_opt = true; |
| 654 | int rc, i, j; | 655 | int rc, i, j; |
| 655 | 656 | ||
| @@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, | |||
| 691 | 692 | ||
| 692 | page_mask = ~(((u64) page_size) - 1); | 693 | page_mask = ~(((u64) page_size) - 1); |
| 693 | 694 | ||
| 694 | phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); | 695 | phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), |
| 696 | GFP_KERNEL); | ||
| 695 | if (!phys_pg_pack->pages) { | 697 | if (!phys_pg_pack->pages) { |
| 696 | rc = -ENOMEM; | 698 | rc = -ENOMEM; |
| 697 | goto page_pack_arr_mem_err; | 699 | goto page_pack_arr_mem_err; |
| @@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, | |||
| 750 | struct hl_vm_phys_pg_pack *phys_pg_pack) | 752 | struct hl_vm_phys_pg_pack *phys_pg_pack) |
| 751 | { | 753 | { |
| 752 | struct hl_device *hdev = ctx->hdev; | 754 | struct hl_device *hdev = ctx->hdev; |
| 753 | u64 next_vaddr = vaddr, paddr; | 755 | u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i; |
| 754 | u32 page_size = phys_pg_pack->page_size; | 756 | u32 page_size = phys_pg_pack->page_size; |
| 755 | int i, rc = 0, mapped_pg_cnt = 0; | 757 | int rc = 0; |
| 756 | 758 | ||
| 757 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { | 759 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { |
| 758 | paddr = phys_pg_pack->pages[i]; | 760 | paddr = phys_pg_pack->pages[i]; |
| @@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, | |||
| 764 | rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); | 766 | rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); |
| 765 | if (rc) { | 767 | if (rc) { |
| 766 | dev_err(hdev->dev, | 768 | dev_err(hdev->dev, |
| 767 | "map failed for handle %u, npages: %d, mapped: %d", | 769 | "map failed for handle %u, npages: %llu, mapped: %llu", |
| 768 | phys_pg_pack->handle, phys_pg_pack->npages, | 770 | phys_pg_pack->handle, phys_pg_pack->npages, |
| 769 | mapped_pg_cnt); | 771 | mapped_pg_cnt); |
| 770 | goto err; | 772 | goto err; |
| @@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) | |||
| 985 | struct hl_vm_hash_node *hnode = NULL; | 987 | struct hl_vm_hash_node *hnode = NULL; |
| 986 | struct hl_userptr *userptr = NULL; | 988 | struct hl_userptr *userptr = NULL; |
| 987 | enum vm_type_t *vm_type; | 989 | enum vm_type_t *vm_type; |
| 988 | u64 next_vaddr; | 990 | u64 next_vaddr, i; |
| 989 | u32 page_size; | 991 | u32 page_size; |
| 990 | bool is_userptr; | 992 | bool is_userptr; |
| 991 | int i, rc; | 993 | int rc; |
| 992 | 994 | ||
| 993 | /* protect from double entrance */ | 995 | /* protect from double entrance */ |
| 994 | mutex_lock(&ctx->mem_hash_lock); | 996 | mutex_lock(&ctx->mem_hash_lock); |
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c index 2f2e99cb2743..3a5a2cec8305 100644 --- a/drivers/misc/habanalabs/mmu.c +++ b/drivers/misc/habanalabs/mmu.c | |||
| @@ -832,7 +832,7 @@ err: | |||
| 832 | int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) | 832 | int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) |
| 833 | { | 833 | { |
| 834 | struct hl_device *hdev = ctx->hdev; | 834 | struct hl_device *hdev = ctx->hdev; |
| 835 | u64 real_virt_addr; | 835 | u64 real_virt_addr, real_phys_addr; |
| 836 | u32 real_page_size, npages; | 836 | u32 real_page_size, npages; |
| 837 | int i, rc, mapped_cnt = 0; | 837 | int i, rc, mapped_cnt = 0; |
| 838 | 838 | ||
| @@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) | |||
| 857 | 857 | ||
| 858 | npages = page_size / real_page_size; | 858 | npages = page_size / real_page_size; |
| 859 | real_virt_addr = virt_addr; | 859 | real_virt_addr = virt_addr; |
| 860 | real_phys_addr = phys_addr; | ||
| 860 | 861 | ||
| 861 | for (i = 0 ; i < npages ; i++) { | 862 | for (i = 0 ; i < npages ; i++) { |
| 862 | rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, | 863 | rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr, |
| 863 | real_page_size); | 864 | real_page_size); |
| 864 | if (rc) | 865 | if (rc) |
| 865 | goto err; | 866 | goto err; |
| 866 | 867 | ||
| 867 | real_virt_addr += real_page_size; | 868 | real_virt_addr += real_page_size; |
| 869 | real_phys_addr += real_page_size; | ||
| 868 | mapped_cnt++; | 870 | mapped_cnt++; |
| 869 | } | 871 | } |
| 870 | 872 | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 72428b6bfc47..7b7286b4d81e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
| @@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
| 1876 | continue; | 1876 | continue; |
| 1877 | } | 1877 | } |
| 1878 | 1878 | ||
| 1879 | if (time_after(jiffies, timeo) && !chip_ready(map, adr)) | 1879 | /* |
| 1880 | * We check "time_after" and "!chip_good" before checking "chip_good" to avoid | ||
| 1881 | * the failure due to scheduling. | ||
| 1882 | */ | ||
| 1883 | if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) | ||
| 1880 | break; | 1884 | break; |
| 1881 | 1885 | ||
| 1882 | if (chip_good(map, adr, datum)) { | 1886 | if (chip_good(map, adr, datum)) { |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5e4ca082cfcd..7a96d168efc4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -216,8 +216,8 @@ config GENEVE | |||
| 216 | 216 | ||
| 217 | config GTP | 217 | config GTP |
| 218 | tristate "GPRS Tunneling Protocol datapath (GTP-U)" | 218 | tristate "GPRS Tunneling Protocol datapath (GTP-U)" |
| 219 | depends on INET && NET_UDP_TUNNEL | 219 | depends on INET |
| 220 | select NET_IP_TUNNEL | 220 | select NET_UDP_TUNNEL |
| 221 | ---help--- | 221 | ---help--- |
| 222 | This allows one to create gtp virtual interfaces that provide | 222 | This allows one to create gtp virtual interfaces that provide |
| 223 | the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol | 223 | the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol |
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0..4985268e2273 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c | |||
| @@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count); | |||
| 55 | 55 | ||
| 56 | static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) | 56 | static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) |
| 57 | { | 57 | { |
| 58 | return sprintf(buf, "%pM\n", slave->perm_hwaddr); | 58 | return sprintf(buf, "%*phC\n", |
| 59 | slave->dev->addr_len, | ||
| 60 | slave->perm_hwaddr); | ||
| 59 | } | 61 | } |
| 60 | static SLAVE_ATTR_RO(perm_hwaddr); | 62 | static SLAVE_ATTR_RO(perm_hwaddr); |
| 61 | 63 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dce84a2a65c7..c44b2822e4dd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c | |||
| @@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 427 | return 0; | 427 | return 0; |
| 428 | 428 | ||
| 429 | lane = mv88e6390x_serdes_get_lane(chip, port); | 429 | lane = mv88e6390x_serdes_get_lane(chip, port); |
| 430 | if (lane < 0) | 430 | if (lane < 0 && lane != -ENODEV) |
| 431 | return lane; | 431 | return lane; |
| 432 | 432 | ||
| 433 | if (chip->ports[port].serdes_irq) { | 433 | if (lane >= 0) { |
| 434 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | 434 | if (chip->ports[port].serdes_irq) { |
| 435 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | ||
| 436 | if (err) | ||
| 437 | return err; | ||
| 438 | } | ||
| 439 | |||
| 440 | err = mv88e6390x_serdes_power(chip, port, false); | ||
| 435 | if (err) | 441 | if (err) |
| 436 | return err; | 442 | return err; |
| 437 | } | 443 | } |
| 438 | 444 | ||
| 439 | err = mv88e6390x_serdes_power(chip, port, false); | 445 | chip->ports[port].cmode = 0; |
| 440 | if (err) | ||
| 441 | return err; | ||
| 442 | 446 | ||
| 443 | if (cmode) { | 447 | if (cmode) { |
| 444 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); | 448 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); |
| @@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 452 | if (err) | 456 | if (err) |
| 453 | return err; | 457 | return err; |
| 454 | 458 | ||
| 459 | chip->ports[port].cmode = cmode; | ||
| 460 | |||
| 461 | lane = mv88e6390x_serdes_get_lane(chip, port); | ||
| 462 | if (lane < 0) | ||
| 463 | return lane; | ||
| 464 | |||
| 455 | err = mv88e6390x_serdes_power(chip, port, true); | 465 | err = mv88e6390x_serdes_power(chip, port, true); |
| 456 | if (err) | 466 | if (err) |
| 457 | return err; | 467 | return err; |
| @@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 463 | } | 473 | } |
| 464 | } | 474 | } |
| 465 | 475 | ||
| 466 | chip->ports[port].cmode = cmode; | ||
| 467 | |||
| 468 | return 0; | 476 | return 0; |
| 469 | } | 477 | } |
| 470 | 478 | ||
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 576b37d12a63..c4fa400efdcc 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c | |||
| @@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) | |||
| 481 | qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); | 481 | qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); |
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | static u32 | ||
| 485 | qca8k_port_to_phy(int port) | ||
| 486 | { | ||
| 487 | /* From Andrew Lunn: | ||
| 488 | * Port 0 has no internal phy. | ||
| 489 | * Port 1 has an internal PHY at MDIO address 0. | ||
| 490 | * Port 2 has an internal PHY at MDIO address 1. | ||
| 491 | * ... | ||
| 492 | * Port 5 has an internal PHY at MDIO address 4. | ||
| 493 | * Port 6 has no internal PHY. | ||
| 494 | */ | ||
| 495 | |||
| 496 | return port - 1; | ||
| 497 | } | ||
| 498 | |||
| 499 | static int | ||
| 500 | qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) | ||
| 501 | { | ||
| 502 | u32 phy, val; | ||
| 503 | |||
| 504 | if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) | ||
| 505 | return -EINVAL; | ||
| 506 | |||
| 507 | /* callee is responsible for not passing bad ports, | ||
| 508 | * but we still would like to make spills impossible. | ||
| 509 | */ | ||
| 510 | phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; | ||
| 511 | val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | | ||
| 512 | QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | | ||
| 513 | QCA8K_MDIO_MASTER_REG_ADDR(regnum) | | ||
| 514 | QCA8K_MDIO_MASTER_DATA(data); | ||
| 515 | |||
| 516 | qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); | ||
| 517 | |||
| 518 | return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, | ||
| 519 | QCA8K_MDIO_MASTER_BUSY); | ||
| 520 | } | ||
| 521 | |||
| 522 | static int | ||
| 523 | qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) | ||
| 524 | { | ||
| 525 | u32 phy, val; | ||
| 526 | |||
| 527 | if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) | ||
| 528 | return -EINVAL; | ||
| 529 | |||
| 530 | /* callee is responsible for not passing bad ports, | ||
| 531 | * but we still would like to make spills impossible. | ||
| 532 | */ | ||
| 533 | phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; | ||
| 534 | val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | | ||
| 535 | QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | | ||
| 536 | QCA8K_MDIO_MASTER_REG_ADDR(regnum); | ||
| 537 | |||
| 538 | qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); | ||
| 539 | |||
| 540 | if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, | ||
| 541 | QCA8K_MDIO_MASTER_BUSY)) | ||
| 542 | return -ETIMEDOUT; | ||
| 543 | |||
| 544 | val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) & | ||
| 545 | QCA8K_MDIO_MASTER_DATA_MASK); | ||
| 546 | |||
| 547 | return val; | ||
| 548 | } | ||
| 549 | |||
| 550 | static int | ||
| 551 | qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) | ||
| 552 | { | ||
| 553 | struct qca8k_priv *priv = ds->priv; | ||
| 554 | |||
| 555 | return qca8k_mdio_write(priv, port, regnum, data); | ||
| 556 | } | ||
| 557 | |||
| 558 | static int | ||
| 559 | qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) | ||
| 560 | { | ||
| 561 | struct qca8k_priv *priv = ds->priv; | ||
| 562 | int ret; | ||
| 563 | |||
| 564 | ret = qca8k_mdio_read(priv, port, regnum); | ||
| 565 | |||
| 566 | if (ret < 0) | ||
| 567 | return 0xffff; | ||
| 568 | |||
| 569 | return ret; | ||
| 570 | } | ||
| 571 | |||
| 572 | static int | ||
| 573 | qca8k_setup_mdio_bus(struct qca8k_priv *priv) | ||
| 574 | { | ||
| 575 | u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; | ||
| 576 | struct device_node *ports, *port; | ||
| 577 | int err; | ||
| 578 | |||
| 579 | ports = of_get_child_by_name(priv->dev->of_node, "ports"); | ||
| 580 | if (!ports) | ||
| 581 | return -EINVAL; | ||
| 582 | |||
| 583 | for_each_available_child_of_node(ports, port) { | ||
| 584 | err = of_property_read_u32(port, "reg", ®); | ||
| 585 | if (err) | ||
| 586 | return err; | ||
| 587 | |||
| 588 | if (!dsa_is_user_port(priv->ds, reg)) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | if (of_property_read_bool(port, "phy-handle")) | ||
| 592 | external_mdio_mask |= BIT(reg); | ||
| 593 | else | ||
| 594 | internal_mdio_mask |= BIT(reg); | ||
| 595 | } | ||
| 596 | |||
| 597 | if (!external_mdio_mask && !internal_mdio_mask) { | ||
| 598 | dev_err(priv->dev, "no PHYs are defined.\n"); | ||
| 599 | return -EINVAL; | ||
| 600 | } | ||
| 601 | |||
| 602 | /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through | ||
| 603 | * the MDIO_MASTER register also _disconnects_ the external MDC | ||
| 604 | * passthrough to the internal PHYs. It's not possible to use both | ||
| 605 | * configurations at the same time! | ||
| 606 | * | ||
| 607 | * Because this came up during the review process: | ||
| 608 | * If the external mdio-bus driver is capable magically disabling | ||
| 609 | * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's | ||
| 610 | * accessors for the time being, it would be possible to pull this | ||
| 611 | * off. | ||
| 612 | */ | ||
| 613 | if (!!external_mdio_mask && !!internal_mdio_mask) { | ||
| 614 | dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); | ||
| 615 | return -EINVAL; | ||
| 616 | } | ||
| 617 | |||
| 618 | if (external_mdio_mask) { | ||
| 619 | /* Make sure to disable the internal mdio bus in cases | ||
| 620 | * a dt-overlay and driver reload changed the configuration | ||
| 621 | */ | ||
| 622 | |||
| 623 | qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, | ||
| 624 | QCA8K_MDIO_MASTER_EN); | ||
| 625 | return 0; | ||
| 626 | } | ||
| 627 | |||
| 628 | priv->ops.phy_read = qca8k_phy_read; | ||
| 629 | priv->ops.phy_write = qca8k_phy_write; | ||
| 630 | return 0; | ||
| 631 | } | ||
| 632 | |||
| 484 | static int | 633 | static int |
| 485 | qca8k_setup(struct dsa_switch *ds) | 634 | qca8k_setup(struct dsa_switch *ds) |
| 486 | { | 635 | { |
| @@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds) | |||
| 502 | if (IS_ERR(priv->regmap)) | 651 | if (IS_ERR(priv->regmap)) |
| 503 | pr_warn("regmap initialization failed"); | 652 | pr_warn("regmap initialization failed"); |
| 504 | 653 | ||
| 654 | ret = qca8k_setup_mdio_bus(priv); | ||
| 655 | if (ret) | ||
| 656 | return ret; | ||
| 657 | |||
| 505 | /* Initialize CPU port pad mode (xMII type, delays...) */ | 658 | /* Initialize CPU port pad mode (xMII type, delays...) */ |
| 506 | phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); | 659 | phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); |
| 507 | if (phy_mode < 0) { | 660 | if (phy_mode < 0) { |
| @@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) | |||
| 624 | qca8k_port_set_status(priv, port, 1); | 777 | qca8k_port_set_status(priv, port, 1); |
| 625 | } | 778 | } |
| 626 | 779 | ||
| 627 | static int | ||
| 628 | qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) | ||
| 629 | { | ||
| 630 | struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; | ||
| 631 | |||
| 632 | return mdiobus_read(priv->bus, phy, regnum); | ||
| 633 | } | ||
| 634 | |||
| 635 | static int | ||
| 636 | qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) | ||
| 637 | { | ||
| 638 | struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; | ||
| 639 | |||
| 640 | return mdiobus_write(priv->bus, phy, regnum, val); | ||
| 641 | } | ||
| 642 | |||
| 643 | static void | 780 | static void |
| 644 | qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) | 781 | qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) |
| 645 | { | 782 | { |
| @@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { | |||
| 879 | .setup = qca8k_setup, | 1016 | .setup = qca8k_setup, |
| 880 | .adjust_link = qca8k_adjust_link, | 1017 | .adjust_link = qca8k_adjust_link, |
| 881 | .get_strings = qca8k_get_strings, | 1018 | .get_strings = qca8k_get_strings, |
| 882 | .phy_read = qca8k_phy_read, | ||
| 883 | .phy_write = qca8k_phy_write, | ||
| 884 | .get_ethtool_stats = qca8k_get_ethtool_stats, | 1019 | .get_ethtool_stats = qca8k_get_ethtool_stats, |
| 885 | .get_sset_count = qca8k_get_sset_count, | 1020 | .get_sset_count = qca8k_get_sset_count, |
| 886 | .get_mac_eee = qca8k_get_mac_eee, | 1021 | .get_mac_eee = qca8k_get_mac_eee, |
| @@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev) | |||
| 923 | return -ENOMEM; | 1058 | return -ENOMEM; |
| 924 | 1059 | ||
| 925 | priv->ds->priv = priv; | 1060 | priv->ds->priv = priv; |
| 926 | priv->ds->ops = &qca8k_switch_ops; | 1061 | priv->ops = qca8k_switch_ops; |
| 1062 | priv->ds->ops = &priv->ops; | ||
| 927 | mutex_init(&priv->reg_mutex); | 1063 | mutex_init(&priv->reg_mutex); |
| 928 | dev_set_drvdata(&mdiodev->dev, priv); | 1064 | dev_set_drvdata(&mdiodev->dev, priv); |
| 929 | 1065 | ||
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index d146e54c8a6c..249fd62268e5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h | |||
| @@ -49,6 +49,18 @@ | |||
| 49 | #define QCA8K_MIB_FLUSH BIT(24) | 49 | #define QCA8K_MIB_FLUSH BIT(24) |
| 50 | #define QCA8K_MIB_CPU_KEEP BIT(20) | 50 | #define QCA8K_MIB_CPU_KEEP BIT(20) |
| 51 | #define QCA8K_MIB_BUSY BIT(17) | 51 | #define QCA8K_MIB_BUSY BIT(17) |
| 52 | #define QCA8K_MDIO_MASTER_CTRL 0x3c | ||
| 53 | #define QCA8K_MDIO_MASTER_BUSY BIT(31) | ||
| 54 | #define QCA8K_MDIO_MASTER_EN BIT(30) | ||
| 55 | #define QCA8K_MDIO_MASTER_READ BIT(27) | ||
| 56 | #define QCA8K_MDIO_MASTER_WRITE 0 | ||
| 57 | #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) | ||
| 58 | #define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) | ||
| 59 | #define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) | ||
| 60 | #define QCA8K_MDIO_MASTER_DATA(x) (x) | ||
| 61 | #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) | ||
| 62 | #define QCA8K_MDIO_MASTER_MAX_PORTS 5 | ||
| 63 | #define QCA8K_MDIO_MASTER_MAX_REG 32 | ||
| 52 | #define QCA8K_GOL_MAC_ADDR0 0x60 | 64 | #define QCA8K_GOL_MAC_ADDR0 0x60 |
| 53 | #define QCA8K_GOL_MAC_ADDR1 0x64 | 65 | #define QCA8K_GOL_MAC_ADDR1 0x64 |
| 54 | #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) | 66 | #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) |
| @@ -169,6 +181,7 @@ struct qca8k_priv { | |||
| 169 | struct dsa_switch *ds; | 181 | struct dsa_switch *ds; |
| 170 | struct mutex reg_mutex; | 182 | struct mutex reg_mutex; |
| 171 | struct device *dev; | 183 | struct device *dev; |
| 184 | struct dsa_switch_ops ops; | ||
| 172 | }; | 185 | }; |
| 173 | 186 | ||
| 174 | struct qca8k_mib_desc { | 187 | struct qca8k_mib_desc { |
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 808abb6b3671..b15752267c8d 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c | |||
| @@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev) | |||
| 1521 | static void set_rx_mode(struct net_device *dev) | 1521 | static void set_rx_mode(struct net_device *dev) |
| 1522 | { | 1522 | { |
| 1523 | int ioaddr = dev->base_addr; | 1523 | int ioaddr = dev->base_addr; |
| 1524 | short new_mode; | 1524 | unsigned short new_mode; |
| 1525 | 1525 | ||
| 1526 | if (dev->flags & IFF_PROMISC) { | 1526 | if (dev->flags & IFF_PROMISC) { |
| 1527 | if (corkscrew_debug > 3) | 1527 | if (corkscrew_debug > 3) |
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index 342ae08ec3c2..d60a86aa8aa8 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c | |||
| @@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count, | |||
| 153 | static void dayna_block_output(struct net_device *dev, int count, | 153 | static void dayna_block_output(struct net_device *dev, int count, |
| 154 | const unsigned char *buf, int start_page); | 154 | const unsigned char *buf, int start_page); |
| 155 | 155 | ||
| 156 | #define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) | ||
| 157 | |||
| 158 | /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ | 156 | /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ |
| 159 | static void slow_sane_get_8390_hdr(struct net_device *dev, | 157 | static void slow_sane_get_8390_hdr(struct net_device *dev, |
| 160 | struct e8390_pkt_hdr *hdr, int ring_page); | 158 | struct e8390_pkt_hdr *hdr, int ring_page); |
| @@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) | |||
| 233 | 231 | ||
| 234 | static enum mac8390_access mac8390_testio(unsigned long membase) | 232 | static enum mac8390_access mac8390_testio(unsigned long membase) |
| 235 | { | 233 | { |
| 236 | unsigned long outdata = 0xA5A0B5B0; | 234 | u32 outdata = 0xA5A0B5B0; |
| 237 | unsigned long indata = 0x00000000; | 235 | u32 indata = 0; |
| 236 | |||
| 238 | /* Try writing 32 bits */ | 237 | /* Try writing 32 bits */ |
| 239 | memcpy_toio((void __iomem *)membase, &outdata, 4); | 238 | nubus_writel(outdata, membase); |
| 240 | /* Now compare them */ | 239 | /* Now read it back */ |
| 241 | if (memcmp_withio(&outdata, membase, 4) == 0) | 240 | indata = nubus_readl(membase); |
| 241 | if (outdata == indata) | ||
| 242 | return ACCESS_32; | 242 | return ACCESS_32; |
| 243 | |||
| 244 | outdata = 0xC5C0D5D0; | ||
| 245 | indata = 0; | ||
| 246 | |||
| 243 | /* Write 16 bit output */ | 247 | /* Write 16 bit output */ |
| 244 | word_memcpy_tocard(membase, &outdata, 4); | 248 | word_memcpy_tocard(membase, &outdata, 4); |
| 245 | /* Now read it back */ | 249 | /* Now read it back */ |
| 246 | word_memcpy_fromcard(&indata, membase, 4); | 250 | word_memcpy_fromcard(&indata, membase, 4); |
| 247 | if (outdata == indata) | 251 | if (outdata == indata) |
| 248 | return ACCESS_16; | 252 | return ACCESS_16; |
| 253 | |||
| 249 | return ACCESS_UNKNOWN; | 254 | return ACCESS_UNKNOWN; |
| 250 | } | 255 | } |
| 251 | 256 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 74550ccc7a20..e2ffb159cbe2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
| @@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self, | |||
| 186 | } | 186 | } |
| 187 | if (buff->is_ip_cso) { | 187 | if (buff->is_ip_cso) { |
| 188 | __skb_incr_checksum_unnecessary(skb); | 188 | __skb_incr_checksum_unnecessary(skb); |
| 189 | if (buff->is_udp_cso || buff->is_tcp_cso) | ||
| 190 | __skb_incr_checksum_unnecessary(skb); | ||
| 191 | } else { | 189 | } else { |
| 192 | skb->ip_summed = CHECKSUM_NONE; | 190 | skb->ip_summed = CHECKSUM_NONE; |
| 193 | } | 191 | } |
| 192 | |||
| 193 | if (buff->is_udp_cso || buff->is_tcp_cso) | ||
| 194 | __skb_incr_checksum_unnecessary(skb); | ||
| 194 | } | 195 | } |
| 195 | 196 | ||
| 196 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | 197 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bb9d7b3a2b6..4c586ba4364b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, | |||
| 1133 | tpa_info = &rxr->rx_tpa[agg_id]; | 1133 | tpa_info = &rxr->rx_tpa[agg_id]; |
| 1134 | 1134 | ||
| 1135 | if (unlikely(cons != rxr->rx_next_cons)) { | 1135 | if (unlikely(cons != rxr->rx_next_cons)) { |
| 1136 | netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", | ||
| 1137 | cons, rxr->rx_next_cons); | ||
| 1136 | bnxt_sched_reset(bp, rxr); | 1138 | bnxt_sched_reset(bp, rxr); |
| 1137 | return; | 1139 | return; |
| 1138 | } | 1140 | } |
| @@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, | |||
| 1585 | } | 1587 | } |
| 1586 | 1588 | ||
| 1587 | cons = rxcmp->rx_cmp_opaque; | 1589 | cons = rxcmp->rx_cmp_opaque; |
| 1588 | rx_buf = &rxr->rx_buf_ring[cons]; | ||
| 1589 | data = rx_buf->data; | ||
| 1590 | data_ptr = rx_buf->data_ptr; | ||
| 1591 | if (unlikely(cons != rxr->rx_next_cons)) { | 1590 | if (unlikely(cons != rxr->rx_next_cons)) { |
| 1592 | int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); | 1591 | int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); |
| 1593 | 1592 | ||
| 1593 | netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", | ||
| 1594 | cons, rxr->rx_next_cons); | ||
| 1594 | bnxt_sched_reset(bp, rxr); | 1595 | bnxt_sched_reset(bp, rxr); |
| 1595 | return rc1; | 1596 | return rc1; |
| 1596 | } | 1597 | } |
| 1598 | rx_buf = &rxr->rx_buf_ring[cons]; | ||
| 1599 | data = rx_buf->data; | ||
| 1600 | data_ptr = rx_buf->data_ptr; | ||
| 1597 | prefetch(data_ptr); | 1601 | prefetch(data_ptr); |
| 1598 | 1602 | ||
| 1599 | misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); | 1603 | misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); |
| @@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, | |||
| 1610 | 1614 | ||
| 1611 | rx_buf->data = NULL; | 1615 | rx_buf->data = NULL; |
| 1612 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { | 1616 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { |
| 1617 | u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); | ||
| 1618 | |||
| 1613 | bnxt_reuse_rx_data(rxr, cons, data); | 1619 | bnxt_reuse_rx_data(rxr, cons, data); |
| 1614 | if (agg_bufs) | 1620 | if (agg_bufs) |
| 1615 | bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); | 1621 | bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); |
| 1616 | 1622 | ||
| 1617 | rc = -EIO; | 1623 | rc = -EIO; |
| 1624 | if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { | ||
| 1625 | netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); | ||
| 1626 | bnxt_sched_reset(bp, rxr); | ||
| 1627 | } | ||
| 1618 | goto next_rx; | 1628 | goto next_rx; |
| 1619 | } | 1629 | } |
| 1620 | 1630 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..060a6f386104 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp) | |||
| 4283 | pci_set_power_state(tp->pdev, PCI_D3hot); | 4283 | pci_set_power_state(tp->pdev, PCI_D3hot); |
| 4284 | } | 4284 | } |
| 4285 | 4285 | ||
| 4286 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) | 4286 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) |
| 4287 | { | 4287 | { |
| 4288 | switch (val & MII_TG3_AUX_STAT_SPDMASK) { | 4288 | switch (val & MII_TG3_AUX_STAT_SPDMASK) { |
| 4289 | case MII_TG3_AUX_STAT_10HALF: | 4289 | case MII_TG3_AUX_STAT_10HALF: |
| @@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) | |||
| 4787 | bool current_link_up; | 4787 | bool current_link_up; |
| 4788 | u32 bmsr, val; | 4788 | u32 bmsr, val; |
| 4789 | u32 lcl_adv, rmt_adv; | 4789 | u32 lcl_adv, rmt_adv; |
| 4790 | u16 current_speed; | 4790 | u32 current_speed; |
| 4791 | u8 current_duplex; | 4791 | u8 current_duplex; |
| 4792 | int i, err; | 4792 | int i, err; |
| 4793 | 4793 | ||
| @@ -5719,7 +5719,7 @@ out: | |||
| 5719 | static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) | 5719 | static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) |
| 5720 | { | 5720 | { |
| 5721 | u32 orig_pause_cfg; | 5721 | u32 orig_pause_cfg; |
| 5722 | u16 orig_active_speed; | 5722 | u32 orig_active_speed; |
| 5723 | u8 orig_active_duplex; | 5723 | u8 orig_active_duplex; |
| 5724 | u32 mac_status; | 5724 | u32 mac_status; |
| 5725 | bool current_link_up; | 5725 | bool current_link_up; |
| @@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) | |||
| 5823 | { | 5823 | { |
| 5824 | int err = 0; | 5824 | int err = 0; |
| 5825 | u32 bmsr, bmcr; | 5825 | u32 bmsr, bmcr; |
| 5826 | u16 current_speed = SPEED_UNKNOWN; | 5826 | u32 current_speed = SPEED_UNKNOWN; |
| 5827 | u8 current_duplex = DUPLEX_UNKNOWN; | 5827 | u8 current_duplex = DUPLEX_UNKNOWN; |
| 5828 | bool current_link_up = false; | 5828 | bool current_link_up = false; |
| 5829 | u32 local_adv, remote_adv, sgsr; | 5829 | u32 local_adv, remote_adv, sgsr; |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a772a33b685c..6953d0546acb 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
| @@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info { | |||
| 2873 | struct tg3_link_config { | 2873 | struct tg3_link_config { |
| 2874 | /* Describes what we're trying to get. */ | 2874 | /* Describes what we're trying to get. */ |
| 2875 | u32 advertising; | 2875 | u32 advertising; |
| 2876 | u16 speed; | 2876 | u32 speed; |
| 2877 | u8 duplex; | 2877 | u8 duplex; |
| 2878 | u8 autoneg; | 2878 | u8 autoneg; |
| 2879 | u8 flowctrl; | 2879 | u8 flowctrl; |
| @@ -2882,7 +2882,7 @@ struct tg3_link_config { | |||
| 2882 | u8 active_flowctrl; | 2882 | u8 active_flowctrl; |
| 2883 | 2883 | ||
| 2884 | u8 active_duplex; | 2884 | u8 active_duplex; |
| 2885 | u16 active_speed; | 2885 | u32 active_speed; |
| 2886 | u32 rmt_adv; | 2886 | u32 rmt_adv; |
| 2887 | }; | 2887 | }; |
| 2888 | 2888 | ||
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ad099fd01b45..3da2795e2486 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) | |||
| 898 | 898 | ||
| 899 | /* First, update TX stats if needed */ | 899 | /* First, update TX stats if needed */ |
| 900 | if (skb) { | 900 | if (skb) { |
| 901 | if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { | 901 | if (unlikely(skb_shinfo(skb)->tx_flags & |
| 902 | SKBTX_HW_TSTAMP) && | ||
| 903 | gem_ptp_do_txstamp(queue, skb, desc) == 0) { | ||
| 902 | /* skb now belongs to timestamp buffer | 904 | /* skb now belongs to timestamp buffer |
| 903 | * and will be removed later | 905 | * and will be removed later |
| 904 | */ | 906 | */ |
| @@ -3370,14 +3372,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, | |||
| 3370 | *hclk = devm_clk_get(&pdev->dev, "hclk"); | 3372 | *hclk = devm_clk_get(&pdev->dev, "hclk"); |
| 3371 | } | 3373 | } |
| 3372 | 3374 | ||
| 3373 | if (IS_ERR(*pclk)) { | 3375 | if (IS_ERR_OR_NULL(*pclk)) { |
| 3374 | err = PTR_ERR(*pclk); | 3376 | err = PTR_ERR(*pclk); |
| 3377 | if (!err) | ||
| 3378 | err = -ENODEV; | ||
| 3379 | |||
| 3375 | dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); | 3380 | dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); |
| 3376 | return err; | 3381 | return err; |
| 3377 | } | 3382 | } |
| 3378 | 3383 | ||
| 3379 | if (IS_ERR(*hclk)) { | 3384 | if (IS_ERR_OR_NULL(*hclk)) { |
| 3380 | err = PTR_ERR(*hclk); | 3385 | err = PTR_ERR(*hclk); |
| 3386 | if (!err) | ||
| 3387 | err = -ENODEV; | ||
| 3388 | |||
| 3381 | dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); | 3389 | dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); |
| 3382 | return err; | 3390 | return err; |
| 3383 | } | 3391 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index aa2be4807191..28eac9056211 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev) | |||
| 1328 | struct nicvf_cq_poll *cq_poll = NULL; | 1328 | struct nicvf_cq_poll *cq_poll = NULL; |
| 1329 | union nic_mbx mbx = {}; | 1329 | union nic_mbx mbx = {}; |
| 1330 | 1330 | ||
| 1331 | cancel_delayed_work_sync(&nic->link_change_work); | ||
| 1332 | |||
| 1333 | /* wait till all queued set_rx_mode tasks completes */ | 1331 | /* wait till all queued set_rx_mode tasks completes */ |
| 1334 | drain_workqueue(nic->nicvf_rx_mode_wq); | 1332 | if (nic->nicvf_rx_mode_wq) { |
| 1333 | cancel_delayed_work_sync(&nic->link_change_work); | ||
| 1334 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
| 1335 | } | ||
| 1335 | 1336 | ||
| 1336 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; | 1337 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; |
| 1337 | nicvf_send_msg_to_pf(nic, &mbx); | 1338 | nicvf_send_msg_to_pf(nic, &mbx); |
| @@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev) | |||
| 1452 | struct nicvf_cq_poll *cq_poll = NULL; | 1453 | struct nicvf_cq_poll *cq_poll = NULL; |
| 1453 | 1454 | ||
| 1454 | /* wait till all queued set_rx_mode tasks completes if any */ | 1455 | /* wait till all queued set_rx_mode tasks completes if any */ |
| 1455 | drain_workqueue(nic->nicvf_rx_mode_wq); | 1456 | if (nic->nicvf_rx_mode_wq) |
| 1457 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
| 1456 | 1458 | ||
| 1457 | netif_carrier_off(netdev); | 1459 | netif_carrier_off(netdev); |
| 1458 | 1460 | ||
| @@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev) | |||
| 1550 | /* Send VF config done msg to PF */ | 1552 | /* Send VF config done msg to PF */ |
| 1551 | nicvf_send_cfg_done(nic); | 1553 | nicvf_send_cfg_done(nic); |
| 1552 | 1554 | ||
| 1553 | INIT_DELAYED_WORK(&nic->link_change_work, | 1555 | if (nic->nicvf_rx_mode_wq) { |
| 1554 | nicvf_link_status_check_task); | 1556 | INIT_DELAYED_WORK(&nic->link_change_work, |
| 1555 | queue_delayed_work(nic->nicvf_rx_mode_wq, | 1557 | nicvf_link_status_check_task); |
| 1556 | &nic->link_change_work, 0); | 1558 | queue_delayed_work(nic->nicvf_rx_mode_wq, |
| 1559 | &nic->link_change_work, 0); | ||
| 1560 | } | ||
| 1557 | 1561 | ||
| 1558 | return 0; | 1562 | return 0; |
| 1559 | cleanup: | 1563 | cleanup: |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 5b4d3badcb73..e246f9733bb8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
| @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, | |||
| 105 | /* Check if page can be recycled */ | 105 | /* Check if page can be recycled */ |
| 106 | if (page) { | 106 | if (page) { |
| 107 | ref_count = page_ref_count(page); | 107 | ref_count = page_ref_count(page); |
| 108 | /* Check if this page has been used once i.e 'put_page' | 108 | /* This page can be recycled if internal ref_count and page's |
| 109 | * called after packet transmission i.e internal ref_count | 109 | * ref_count are equal, indicating that the page has been used |
| 110 | * and page's ref_count are equal i.e page can be recycled. | 110 | * once for packet transmission. For non-XDP mode, internal |
| 111 | * ref_count is always '1'. | ||
| 111 | */ | 112 | */ |
| 112 | if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) | 113 | if (rbdr->is_xdp) { |
| 113 | pgcache->ref_count--; | 114 | if (ref_count == pgcache->ref_count) |
| 114 | else | 115 | pgcache->ref_count--; |
| 115 | page = NULL; | 116 | else |
| 116 | 117 | page = NULL; | |
| 117 | /* In non-XDP mode, page's ref_count needs to be '1' for it | 118 | } else if (ref_count != 1) { |
| 118 | * to be recycled. | ||
| 119 | */ | ||
| 120 | if (!rbdr->is_xdp && (ref_count != 1)) | ||
| 121 | page = NULL; | 119 | page = NULL; |
| 120 | } | ||
| 122 | } | 121 | } |
| 123 | 122 | ||
| 124 | if (!page) { | 123 | if (!page) { |
| @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |||
| 365 | while (head < rbdr->pgcnt) { | 364 | while (head < rbdr->pgcnt) { |
| 366 | pgcache = &rbdr->pgcache[head]; | 365 | pgcache = &rbdr->pgcache[head]; |
| 367 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { | 366 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { |
| 368 | if (!rbdr->is_xdp) { | 367 | if (rbdr->is_xdp) { |
| 369 | put_page(pgcache->page); | 368 | page_ref_sub(pgcache->page, |
| 370 | continue; | 369 | pgcache->ref_count - 1); |
| 371 | } | 370 | } |
| 372 | page_ref_sub(pgcache->page, pgcache->ref_count - 1); | ||
| 373 | put_page(pgcache->page); | 371 | put_page(pgcache->page); |
| 374 | } | 372 | } |
| 375 | head++; | 373 | head++; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 3130b43bba52..02959035ed3f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | |||
| @@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) | |||
| 2620 | } | 2620 | } |
| 2621 | 2621 | ||
| 2622 | /* should never happen! */ | 2622 | /* should never happen! */ |
| 2623 | BUG_ON(1); | 2623 | BUG(); |
| 2624 | return NULL; | 2624 | return NULL; |
| 2625 | } | 2625 | } |
| 2626 | 2626 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 88773ca58e6b..b3da81e90132 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter, | |||
| 476 | break; | 476 | break; |
| 477 | 477 | ||
| 478 | default: | 478 | default: |
| 479 | BUG_ON(1); | 479 | BUG(); |
| 480 | } | 480 | } |
| 481 | 481 | ||
| 482 | return buf_size; | 482 | return buf_size; |
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004..e2919005ead3 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c | |||
| @@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, | |||
| 354 | ppmax = max; | 354 | ppmax = max; |
| 355 | 355 | ||
| 356 | /* pool size must be multiple of unsigned long */ | 356 | /* pool size must be multiple of unsigned long */ |
| 357 | bmap = BITS_TO_LONGS(ppmax); | 357 | bmap = ppmax / BITS_PER_TYPE(unsigned long); |
| 358 | if (!bmap) | ||
| 359 | return NULL; | ||
| 360 | |||
| 358 | ppmax = (bmap * sizeof(unsigned long)) << 3; | 361 | ppmax = (bmap * sizeof(unsigned long)) << 3; |
| 359 | 362 | ||
| 360 | alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; | 363 | alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; |
| @@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, | |||
| 402 | if (reserve_factor) { | 405 | if (reserve_factor) { |
| 403 | ppmax_pool = ppmax / reserve_factor; | 406 | ppmax_pool = ppmax / reserve_factor; |
| 404 | pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); | 407 | pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); |
| 408 | if (!pool) { | ||
| 409 | ppmax_pool = 0; | ||
| 410 | reserve_factor = 0; | ||
| 411 | } | ||
| 405 | 412 | ||
| 406 | pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", | 413 | pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", |
| 407 | ndev->name, ppmax, ppmax_pool, pool_index_max); | 414 | ndev->name, ppmax, ppmax_pool, pool_index_max); |
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 2ba49e959c3f..dc339dc1adb2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | |||
| @@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) | |||
| 815 | */ | 815 | */ |
| 816 | queue_mapping = skb_get_queue_mapping(skb); | 816 | queue_mapping = skb_get_queue_mapping(skb); |
| 817 | fq = &priv->fq[queue_mapping]; | 817 | fq = &priv->fq[queue_mapping]; |
| 818 | |||
| 819 | fd_len = dpaa2_fd_get_len(&fd); | ||
| 820 | nq = netdev_get_tx_queue(net_dev, queue_mapping); | ||
| 821 | netdev_tx_sent_queue(nq, fd_len); | ||
| 822 | |||
| 823 | /* Everything that happens after this enqueues might race with | ||
| 824 | * the Tx confirmation callback for this frame | ||
| 825 | */ | ||
| 818 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { | 826 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
| 819 | err = priv->enqueue(priv, fq, &fd, 0); | 827 | err = priv->enqueue(priv, fq, &fd, 0); |
| 820 | if (err != -EBUSY) | 828 | if (err != -EBUSY) |
| @@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) | |||
| 825 | percpu_stats->tx_errors++; | 833 | percpu_stats->tx_errors++; |
| 826 | /* Clean up everything, including freeing the skb */ | 834 | /* Clean up everything, including freeing the skb */ |
| 827 | free_tx_fd(priv, fq, &fd, false); | 835 | free_tx_fd(priv, fq, &fd, false); |
| 836 | netdev_tx_completed_queue(nq, 1, fd_len); | ||
| 828 | } else { | 837 | } else { |
| 829 | fd_len = dpaa2_fd_get_len(&fd); | ||
| 830 | percpu_stats->tx_packets++; | 838 | percpu_stats->tx_packets++; |
| 831 | percpu_stats->tx_bytes += fd_len; | 839 | percpu_stats->tx_bytes += fd_len; |
| 832 | |||
| 833 | nq = netdev_get_tx_queue(net_dev, queue_mapping); | ||
| 834 | netdev_tx_sent_queue(nq, fd_len); | ||
| 835 | } | 840 | } |
| 836 | 841 | ||
| 837 | return NETDEV_TX_OK; | 842 | return NETDEV_TX_OK; |
| @@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, | |||
| 1817 | dpaa2_fd_set_format(&fd, dpaa2_fd_single); | 1822 | dpaa2_fd_set_format(&fd, dpaa2_fd_single); |
| 1818 | dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); | 1823 | dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); |
| 1819 | 1824 | ||
| 1820 | fq = &priv->fq[smp_processor_id()]; | 1825 | fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; |
| 1821 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { | 1826 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
| 1822 | err = priv->enqueue(priv, fq, &fd, 0); | 1827 | err = priv->enqueue(priv, fq, &fd, 0); |
| 1823 | if (err != -EBUSY) | 1828 | if (err != -EBUSY) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b1..c7fa97a7e1f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
| @@ -150,7 +150,6 @@ out_buffer_fail: | |||
| 150 | /* free desc along with its attached buffer */ | 150 | /* free desc along with its attached buffer */ |
| 151 | static void hnae_free_desc(struct hnae_ring *ring) | 151 | static void hnae_free_desc(struct hnae_ring *ring) |
| 152 | { | 152 | { |
| 153 | hnae_free_buffers(ring); | ||
| 154 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, | 153 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, |
| 155 | ring->desc_num * sizeof(ring->desc[0]), | 154 | ring->desc_num * sizeof(ring->desc[0]), |
| 156 | ring_to_dma_dir(ring)); | 155 | ring_to_dma_dir(ring)); |
| @@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) | |||
| 183 | /* fini ring, also free the buffer for the ring */ | 182 | /* fini ring, also free the buffer for the ring */ |
| 184 | static void hnae_fini_ring(struct hnae_ring *ring) | 183 | static void hnae_fini_ring(struct hnae_ring *ring) |
| 185 | { | 184 | { |
| 185 | if (is_rx_ring(ring)) | ||
| 186 | hnae_free_buffers(ring); | ||
| 187 | |||
| 186 | hnae_free_desc(ring); | 188 | hnae_free_desc(ring); |
| 187 | kfree(ring->desc_cb); | 189 | kfree(ring->desc_cb); |
| 188 | ring->desc_cb = NULL; | 190 | ring->desc_cb = NULL; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 08a750fb60c4..d6fb83437230 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
| @@ -357,7 +357,7 @@ struct hnae_buf_ops { | |||
| 357 | }; | 357 | }; |
| 358 | 358 | ||
| 359 | struct hnae_queue { | 359 | struct hnae_queue { |
| 360 | void __iomem *io_base; | 360 | u8 __iomem *io_base; |
| 361 | phys_addr_t phy_base; | 361 | phys_addr_t phy_base; |
| 362 | struct hnae_ae_dev *dev; /* the device who use this queue */ | 362 | struct hnae_ae_dev *dev; /* the device who use this queue */ |
| 363 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; | 363 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a97228c93831..6c0507921623 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
| @@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) | |||
| 370 | static void hns_mac_param_get(struct mac_params *param, | 370 | static void hns_mac_param_get(struct mac_params *param, |
| 371 | struct hns_mac_cb *mac_cb) | 371 | struct hns_mac_cb *mac_cb) |
| 372 | { | 372 | { |
| 373 | param->vaddr = (void *)mac_cb->vaddr; | 373 | param->vaddr = mac_cb->vaddr; |
| 374 | param->mac_mode = hns_get_enet_interface(mac_cb); | 374 | param->mac_mode = hns_get_enet_interface(mac_cb); |
| 375 | ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); | 375 | ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); |
| 376 | param->mac_id = mac_cb->mac_id; | 376 | param->mac_id = mac_cb->mac_id; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index fbc75341bef7..22589799f1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
| @@ -187,7 +187,7 @@ struct mac_statistics { | |||
| 187 | /*mac para struct ,mac get param from nic or dsaf when initialize*/ | 187 | /*mac para struct ,mac get param from nic or dsaf when initialize*/ |
| 188 | struct mac_params { | 188 | struct mac_params { |
| 189 | char addr[ETH_ALEN]; | 189 | char addr[ETH_ALEN]; |
| 190 | void *vaddr; /*virtual address*/ | 190 | u8 __iomem *vaddr; /*virtual address*/ |
| 191 | struct device *dev; | 191 | struct device *dev; |
| 192 | u8 mac_id; | 192 | u8 mac_id; |
| 193 | /**< Ethernet operation mode (MAC-PHY interface and speed) */ | 193 | /**< Ethernet operation mode (MAC-PHY interface and speed) */ |
| @@ -402,7 +402,7 @@ struct mac_driver { | |||
| 402 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
| 403 | u8 mac_id; | 403 | u8 mac_id; |
| 404 | struct hns_mac_cb *mac_cb; | 404 | struct hns_mac_cb *mac_cb; |
| 405 | void __iomem *io_base; | 405 | u8 __iomem *io_base; |
| 406 | unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ | 406 | unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ |
| 407 | unsigned int virt_dev_num; | 407 | unsigned int virt_dev_num; |
| 408 | struct device *dev; | 408 | struct device *dev; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ac55db065f16..61eea6ac846f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
| @@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key( | |||
| 1602 | DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); | 1602 | DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); |
| 1603 | dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, | 1603 | dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, |
| 1604 | DSAF_TBL_TCAM_KEY_PORT_S, port); | 1604 | DSAF_TBL_TCAM_KEY_PORT_S, port); |
| 1605 | |||
| 1606 | mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); | ||
| 1607 | } | 1605 | } |
| 1608 | 1606 | ||
| 1609 | /** | 1607 | /** |
| @@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry( | |||
| 1663 | /* default config dvc to 0 */ | 1661 | /* default config dvc to 0 */ |
| 1664 | mac_data.tbl_ucast_dvc = 0; | 1662 | mac_data.tbl_ucast_dvc = 0; |
| 1665 | mac_data.tbl_ucast_out_port = mac_entry->port_num; | 1663 | mac_data.tbl_ucast_out_port = mac_entry->port_num; |
| 1666 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 1664 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
| 1667 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 1665 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
| 1668 | 1666 | ||
| 1669 | hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); | 1667 | hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); |
| 1670 | 1668 | ||
| @@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1786 | 0xff, | 1784 | 0xff, |
| 1787 | mc_mask); | 1785 | mc_mask); |
| 1788 | 1786 | ||
| 1789 | mask_key.high.val = le32_to_cpu(mask_key.high.val); | ||
| 1790 | mask_key.low.val = le32_to_cpu(mask_key.low.val); | ||
| 1791 | |||
| 1792 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); | 1787 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); |
| 1793 | } | 1788 | } |
| 1794 | 1789 | ||
| @@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1840 | dsaf_dev->ae_dev.name, mac_key.high.val, | 1835 | dsaf_dev->ae_dev.name, mac_key.high.val, |
| 1841 | mac_key.low.val, entry_index); | 1836 | mac_key.low.val, entry_index); |
| 1842 | 1837 | ||
| 1843 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 1838 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
| 1844 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 1839 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
| 1845 | 1840 | ||
| 1846 | /* config mc entry with mask */ | 1841 | /* config mc entry with mask */ |
| 1847 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, | 1842 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, |
| @@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1956 | /* config key mask */ | 1951 | /* config key mask */ |
| 1957 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); | 1952 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); |
| 1958 | 1953 | ||
| 1959 | mask_key.high.val = le32_to_cpu(mask_key.high.val); | ||
| 1960 | mask_key.low.val = le32_to_cpu(mask_key.low.val); | ||
| 1961 | |||
| 1962 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); | 1954 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); |
| 1963 | } | 1955 | } |
| 1964 | 1956 | ||
| @@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 2012 | soft_mac_entry += entry_index; | 2004 | soft_mac_entry += entry_index; |
| 2013 | soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; | 2005 | soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; |
| 2014 | } else { /* not zero, just del port, update */ | 2006 | } else { /* not zero, just del port, update */ |
| 2015 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 2007 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
| 2016 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 2008 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
| 2017 | 2009 | ||
| 2018 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, | 2010 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, |
| 2019 | &tcam_data, | 2011 | &tcam_data, |
| @@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void) | |||
| 2750 | return DSAF_DUMP_REGS_NUM; | 2742 | return DSAF_DUMP_REGS_NUM; |
| 2751 | } | 2743 | } |
| 2752 | 2744 | ||
| 2745 | static int hns_dsaf_get_port_id(u8 port) | ||
| 2746 | { | ||
| 2747 | if (port < DSAF_SERVICE_NW_NUM) | ||
| 2748 | return port; | ||
| 2749 | |||
| 2750 | if (port >= DSAF_BASE_INNER_PORT_NUM) | ||
| 2751 | return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; | ||
| 2752 | |||
| 2753 | return -EINVAL; | ||
| 2754 | } | ||
| 2755 | |||
| 2753 | static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) | 2756 | static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) |
| 2754 | { | 2757 | { |
| 2755 | struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; | 2758 | struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; |
| @@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) | |||
| 2815 | memset(&temp_key, 0x0, sizeof(temp_key)); | 2818 | memset(&temp_key, 0x0, sizeof(temp_key)); |
| 2816 | mask_entry.addr[0] = 0x01; | 2819 | mask_entry.addr[0] = 0x01; |
| 2817 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, | 2820 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, |
| 2818 | port, mask_entry.addr); | 2821 | 0xf, mask_entry.addr); |
| 2819 | tbl_tcam_mcast.tbl_mcast_item_vld = 1; | 2822 | tbl_tcam_mcast.tbl_mcast_item_vld = 1; |
| 2820 | tbl_tcam_mcast.tbl_mcast_old_en = 0; | 2823 | tbl_tcam_mcast.tbl_mcast_old_en = 0; |
| 2821 | 2824 | ||
| 2822 | if (port < DSAF_SERVICE_NW_NUM) { | 2825 | /* set MAC port to handle multicast */ |
| 2823 | mskid = port; | 2826 | mskid = hns_dsaf_get_port_id(port); |
| 2824 | } else if (port >= DSAF_BASE_INNER_PORT_NUM) { | 2827 | if (mskid == -EINVAL) { |
| 2825 | mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; | ||
| 2826 | } else { | ||
| 2827 | dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", | 2828 | dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", |
| 2828 | dsaf_dev->ae_dev.name, port, | 2829 | dsaf_dev->ae_dev.name, port, |
| 2829 | mask_key.high.val, mask_key.low.val); | 2830 | mask_key.high.val, mask_key.low.val); |
| 2830 | return; | 2831 | return; |
| 2831 | } | 2832 | } |
| 2833 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], | ||
| 2834 | mskid % 32, 1); | ||
| 2832 | 2835 | ||
| 2836 | /* set pool bit map to handle multicast */ | ||
| 2837 | mskid = hns_dsaf_get_port_id(port_num); | ||
| 2838 | if (mskid == -EINVAL) { | ||
| 2839 | dev_err(dsaf_dev->dev, | ||
| 2840 | "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", | ||
| 2841 | dsaf_dev->ae_dev.name, port_num, | ||
| 2842 | mask_key.high.val, mask_key.low.val); | ||
| 2843 | return; | ||
| 2844 | } | ||
| 2833 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], | 2845 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], |
| 2834 | mskid % 32, 1); | 2846 | mskid % 32, 1); |
| 2847 | |||
| 2835 | memcpy(&temp_key, &mask_key, sizeof(mask_key)); | 2848 | memcpy(&temp_key, &mask_key, sizeof(mask_key)); |
| 2836 | hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, | 2849 | hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, |
| 2837 | (struct dsaf_tbl_tcam_data *)(&mask_key), | 2850 | (struct dsaf_tbl_tcam_data *)(&mask_key), |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0e1cd99831a6..76cc8887e1a8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
| @@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 467 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
| 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); |
| 469 | 469 | ||
| 470 | int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); | ||
| 471 | |||
| 470 | #endif /* __HNS_DSAF_MAIN_H__ */ | 472 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 16294cd3c954..19b94879691f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c | |||
| @@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) | |||
| 670 | dsaf_set_field(origin, 1ull << 10, 10, en); | 670 | dsaf_set_field(origin, 1ull << 10, 10, en); |
| 671 | dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); | 671 | dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); |
| 672 | } else { | 672 | } else { |
| 673 | u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + | 673 | u8 __iomem *base_addr = mac_cb->serdes_vaddr + |
| 674 | (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); | 674 | (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); |
| 675 | dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); | 675 | dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); |
| 676 | } | 676 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 3d07c8a7639d..17c019106e6e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
| @@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static void __iomem * | 64 | static u8 __iomem * |
| 65 | hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) | 65 | hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) |
| 66 | { | 66 | { |
| 67 | return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; | 67 | return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; |
| @@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) | |||
| 111 | dsaf_dev->ppe_common[comm_index] = NULL; | 111 | dsaf_dev->ppe_common[comm_index] = NULL; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, | 114 | static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, |
| 115 | int ppe_idx) | 115 | int ppe_idx) |
| 116 | { | 116 | { |
| 117 | return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; | 117 | return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; |
| 118 | } | 118 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index f670e63a5a01..110c6e8222c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
| @@ -80,7 +80,7 @@ struct hns_ppe_cb { | |||
| 80 | struct hns_ppe_hw_stats hw_stats; | 80 | struct hns_ppe_hw_stats hw_stats; |
| 81 | 81 | ||
| 82 | u8 index; /* index in a ppe common device */ | 82 | u8 index; /* index in a ppe common device */ |
| 83 | void __iomem *io_base; | 83 | u8 __iomem *io_base; |
| 84 | int virq; | 84 | int virq; |
| 85 | u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ | 85 | u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ |
| 86 | u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ | 86 | u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ |
| @@ -89,7 +89,7 @@ struct hns_ppe_cb { | |||
| 89 | struct ppe_common_cb { | 89 | struct ppe_common_cb { |
| 90 | struct device *dev; | 90 | struct device *dev; |
| 91 | struct dsaf_device *dsaf_dev; | 91 | struct dsaf_device *dsaf_dev; |
| 92 | void __iomem *io_base; | 92 | u8 __iomem *io_base; |
| 93 | 93 | ||
| 94 | enum ppe_common_mode ppe_mode; | 94 | enum ppe_common_mode ppe_mode; |
| 95 | 95 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6bf346c11b25..ac3518ca4d7b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
| @@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) | |||
| 458 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; | 458 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; |
| 459 | } else { | 459 | } else { |
| 460 | ring = &q->tx_ring; | 460 | ring = &q->tx_ring; |
| 461 | ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + | 461 | ring->io_base = ring_pair_cb->q.io_base + |
| 462 | HNS_RCB_TX_REG_OFFSET; | 462 | HNS_RCB_TX_REG_OFFSET; |
| 463 | irq_idx = HNS_RCB_IRQ_IDX_TX; | 463 | irq_idx = HNS_RCB_IRQ_IDX_TX; |
| 464 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : | 464 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : |
| @@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) | |||
| 764 | } | 764 | } |
| 765 | } | 765 | } |
| 766 | 766 | ||
| 767 | static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) | 767 | static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) |
| 768 | { | 768 | { |
| 769 | struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; | 769 | struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; |
| 770 | 770 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b9733b0b8482..b9e7f11f0896 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
| @@ -1018,7 +1018,7 @@ | |||
| 1018 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 | 1018 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 |
| 1019 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 | 1019 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 |
| 1020 | 1020 | ||
| 1021 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) | 1021 | static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value) |
| 1022 | { | 1022 | { |
| 1023 | writel(value, base + reg); | 1023 | writel(value, base + reg); |
| 1024 | } | 1024 | } |
| @@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) | |||
| 1053 | #define dsaf_set_bit(origin, shift, val) \ | 1053 | #define dsaf_set_bit(origin, shift, val) \ |
| 1054 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) | 1054 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) |
| 1055 | 1055 | ||
| 1056 | static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, | 1056 | static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, |
| 1057 | u32 shift, u32 val) | 1057 | u32 shift, u32 val) |
| 1058 | { | 1058 | { |
| 1059 | u32 origin = dsaf_read_reg(base, reg); | 1059 | u32 origin = dsaf_read_reg(base, reg); |
| @@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, | |||
| 1073 | #define dsaf_get_bit(origin, shift) \ | 1073 | #define dsaf_get_bit(origin, shift) \ |
| 1074 | dsaf_get_field((origin), (1ull << (shift)), (shift)) | 1074 | dsaf_get_field((origin), (1ull << (shift)), (shift)) |
| 1075 | 1075 | ||
| 1076 | static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, | 1076 | static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, |
| 1077 | u32 shift) | 1077 | u32 shift) |
| 1078 | { | 1078 | { |
| 1079 | u32 origin; | 1079 | u32 origin; |
| @@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, | |||
| 1089 | dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) | 1089 | dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) |
| 1090 | 1090 | ||
| 1091 | #define dsaf_write_b(addr, data)\ | 1091 | #define dsaf_write_b(addr, data)\ |
| 1092 | writeb((data), (__iomem unsigned char *)(addr)) | 1092 | writeb((data), (__iomem u8 *)(addr)) |
| 1093 | #define dsaf_read_b(addr)\ | 1093 | #define dsaf_read_b(addr)\ |
| 1094 | readb((__iomem unsigned char *)(addr)) | 1094 | readb((__iomem u8 *)(addr)) |
| 1095 | 1095 | ||
| 1096 | #define hns_mac_reg_read64(drv, offset) \ | 1096 | #define hns_mac_reg_read64(drv, offset) \ |
| 1097 | readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) | 1097 | readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset)))) |
| 1098 | 1098 | ||
| 1099 | #endif /* _DSAF_REG_H */ | 1099 | #endif /* _DSAF_REG_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea..a60f207768fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c | |||
| @@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) | |||
| 129 | dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); | 129 | dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); |
| 130 | dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); | 130 | dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); |
| 131 | dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); | 131 | dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); |
| 132 | dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); | 132 | dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /** | 135 | /** |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 60e7d7ae3787..4cd86ba1f050 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
| @@ -29,9 +29,6 @@ | |||
| 29 | 29 | ||
| 30 | #define SERVICE_TIMER_HZ (1 * HZ) | 30 | #define SERVICE_TIMER_HZ (1 * HZ) |
| 31 | 31 | ||
| 32 | #define NIC_TX_CLEAN_MAX_NUM 256 | ||
| 33 | #define NIC_RX_CLEAN_MAX_NUM 64 | ||
| 34 | |||
| 35 | #define RCB_IRQ_NOT_INITED 0 | 32 | #define RCB_IRQ_NOT_INITED 0 |
| 36 | #define RCB_IRQ_INITED 1 | 33 | #define RCB_IRQ_INITED 1 |
| 37 | #define HNS_BUFFER_SIZE_2048 2048 | 34 | #define HNS_BUFFER_SIZE_2048 2048 |
| @@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, | |||
| 376 | wmb(); /* commit all data before submit */ | 373 | wmb(); /* commit all data before submit */ |
| 377 | assert(skb->queue_mapping < priv->ae_handle->q_num); | 374 | assert(skb->queue_mapping < priv->ae_handle->q_num); |
| 378 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); | 375 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); |
| 379 | ring->stats.tx_pkts++; | ||
| 380 | ring->stats.tx_bytes += skb->len; | ||
| 381 | 376 | ||
| 382 | return NETDEV_TX_OK; | 377 | return NETDEV_TX_OK; |
| 383 | 378 | ||
| @@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, | |||
| 999 | /* issue prefetch for next Tx descriptor */ | 994 | /* issue prefetch for next Tx descriptor */ |
| 1000 | prefetch(&ring->desc_cb[ring->next_to_clean]); | 995 | prefetch(&ring->desc_cb[ring->next_to_clean]); |
| 1001 | } | 996 | } |
| 997 | /* update tx ring statistics. */ | ||
| 998 | ring->stats.tx_pkts += pkts; | ||
| 999 | ring->stats.tx_bytes += bytes; | ||
| 1002 | 1000 | ||
| 1003 | NETIF_TX_UNLOCK(ring); | 1001 | NETIF_TX_UNLOCK(ring); |
| 1004 | 1002 | ||
| @@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
| 2152 | hns_nic_tx_fini_pro_v2; | 2150 | hns_nic_tx_fini_pro_v2; |
| 2153 | 2151 | ||
| 2154 | netif_napi_add(priv->netdev, &rd->napi, | 2152 | netif_napi_add(priv->netdev, &rd->napi, |
| 2155 | hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); | 2153 | hns_nic_common_poll, NAPI_POLL_WEIGHT); |
| 2156 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | 2154 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
| 2157 | } | 2155 | } |
| 2158 | for (i = h->q_num; i < h->q_num * 2; i++) { | 2156 | for (i = h->q_num; i < h->q_num * 2; i++) { |
| @@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
| 2165 | hns_nic_rx_fini_pro_v2; | 2163 | hns_nic_rx_fini_pro_v2; |
| 2166 | 2164 | ||
| 2167 | netif_napi_add(priv->netdev, &rd->napi, | 2165 | netif_napi_add(priv->netdev, &rd->napi, |
| 2168 | hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); | 2166 | hns_nic_common_poll, NAPI_POLL_WEIGHT); |
| 2169 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | 2167 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
| 2170 | } | 2168 | } |
| 2171 | 2169 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1c1f17ec6be2..162cb9afa0e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include "hns3_enet.h" | 22 | #include "hns3_enet.h" |
| 23 | 23 | ||
| 24 | #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) | 24 | #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) |
| 25 | #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) | ||
| 25 | 26 | ||
| 26 | static void hns3_clear_all_ring(struct hnae3_handle *h); | 27 | static void hns3_clear_all_ring(struct hnae3_handle *h); |
| 27 | static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); | 28 | static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); |
| @@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, | |||
| 1079 | 1080 | ||
| 1080 | desc_cb->length = size; | 1081 | desc_cb->length = size; |
| 1081 | 1082 | ||
| 1082 | frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; | 1083 | frag_buf_num = hns3_tx_bd_count(size); |
| 1083 | sizeoflast = size & HNS3_TX_LAST_SIZE_M; | 1084 | sizeoflast = size & HNS3_TX_LAST_SIZE_M; |
| 1084 | sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; | 1085 | sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; |
| 1085 | 1086 | ||
| @@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, | |||
| 1124 | int i; | 1125 | int i; |
| 1125 | 1126 | ||
| 1126 | size = skb_headlen(skb); | 1127 | size = skb_headlen(skb); |
| 1127 | buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; | 1128 | buf_num = hns3_tx_bd_count(size); |
| 1128 | 1129 | ||
| 1129 | frag_num = skb_shinfo(skb)->nr_frags; | 1130 | frag_num = skb_shinfo(skb)->nr_frags; |
| 1130 | for (i = 0; i < frag_num; i++) { | 1131 | for (i = 0; i < frag_num; i++) { |
| 1131 | frag = &skb_shinfo(skb)->frags[i]; | 1132 | frag = &skb_shinfo(skb)->frags[i]; |
| 1132 | size = skb_frag_size(frag); | 1133 | size = skb_frag_size(frag); |
| 1133 | bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> | 1134 | bdnum_for_frag = hns3_tx_bd_count(size); |
| 1134 | HNS3_MAX_BD_SIZE_OFFSET; | ||
| 1135 | if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) | 1135 | if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) |
| 1136 | return -ENOMEM; | 1136 | return -ENOMEM; |
| 1137 | 1137 | ||
| @@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, | |||
| 1139 | } | 1139 | } |
| 1140 | 1140 | ||
| 1141 | if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { | 1141 | if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { |
| 1142 | buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> | 1142 | buf_num = hns3_tx_bd_count(skb->len); |
| 1143 | HNS3_MAX_BD_SIZE_OFFSET; | ||
| 1144 | if (ring_space(ring) < buf_num) | 1143 | if (ring_space(ring) < buf_num) |
| 1145 | return -EBUSY; | 1144 | return -EBUSY; |
| 1146 | /* manual split the send packet */ | 1145 | /* manual split the send packet */ |
| @@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, | |||
| 1169 | buf_num = skb_shinfo(skb)->nr_frags + 1; | 1168 | buf_num = skb_shinfo(skb)->nr_frags + 1; |
| 1170 | 1169 | ||
| 1171 | if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { | 1170 | if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { |
| 1172 | buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; | 1171 | buf_num = hns3_tx_bd_count(skb->len); |
| 1173 | if (ring_space(ring) < buf_num) | 1172 | if (ring_space(ring) < buf_num) |
| 1174 | return -EBUSY; | 1173 | return -EBUSY; |
| 1175 | /* manual split the send packet */ | 1174 | /* manual split the send packet */ |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 1db0bd41d209..75669cd0c311 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | |||
| @@ -193,7 +193,6 @@ enum hns3_nic_state { | |||
| 193 | #define HNS3_VECTOR_INITED 1 | 193 | #define HNS3_VECTOR_INITED 1 |
| 194 | 194 | ||
| 195 | #define HNS3_MAX_BD_SIZE 65535 | 195 | #define HNS3_MAX_BD_SIZE 65535 |
| 196 | #define HNS3_MAX_BD_SIZE_OFFSET 16 | ||
| 197 | #define HNS3_MAX_BD_PER_FRAG 8 | 196 | #define HNS3_MAX_BD_PER_FRAG 8 |
| 198 | #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS | 197 | #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS |
| 199 | 198 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index fffe8c1c45d3..0fb61d440d3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # Makefile for the HISILICON network device drivers. | 3 | # Makefile for the HISILICON network device drivers. |
| 4 | # | 4 | # |
| 5 | 5 | ||
| 6 | ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 | 6 | ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_HNS3_HCLGE) += hclge.o | 8 | obj-$(CONFIG_HNS3_HCLGE) += hclge.o |
| 9 | hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o | 9 | hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd35845..6193f8fa7cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # Makefile for the HISILICON network device drivers. | 3 | # Makefile for the HISILICON network device drivers. |
| 4 | # | 4 | # |
| 5 | 5 | ||
| 6 | ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 | 6 | ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o | 8 | obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o |
| 9 | hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file | 9 | hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file |
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index baf5cc251f32..8b8a7d00e8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c | |||
| @@ -39,7 +39,7 @@ struct hns_mdio_sc_reg { | |||
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | struct hns_mdio_device { | 41 | struct hns_mdio_device { |
| 42 | void *vbase; /* mdio reg base address */ | 42 | u8 __iomem *vbase; /* mdio reg base address */ |
| 43 | struct regmap *subctrl_vbase; | 43 | struct regmap *subctrl_vbase; |
| 44 | struct hns_mdio_sc_reg sc_reg; | 44 | struct hns_mdio_sc_reg sc_reg; |
| 45 | }; | 45 | }; |
| @@ -96,21 +96,17 @@ enum mdio_c45_op_seq { | |||
| 96 | #define MDIO_SC_CLK_ST 0x531C | 96 | #define MDIO_SC_CLK_ST 0x531C |
| 97 | #define MDIO_SC_RESET_ST 0x5A1C | 97 | #define MDIO_SC_RESET_ST 0x5A1C |
| 98 | 98 | ||
| 99 | static void mdio_write_reg(void *base, u32 reg, u32 value) | 99 | static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value) |
| 100 | { | 100 | { |
| 101 | u8 __iomem *reg_addr = (u8 __iomem *)base; | 101 | writel_relaxed(value, base + reg); |
| 102 | |||
| 103 | writel_relaxed(value, reg_addr + reg); | ||
| 104 | } | 102 | } |
| 105 | 103 | ||
| 106 | #define MDIO_WRITE_REG(a, reg, value) \ | 104 | #define MDIO_WRITE_REG(a, reg, value) \ |
| 107 | mdio_write_reg((a)->vbase, (reg), (value)) | 105 | mdio_write_reg((a)->vbase, (reg), (value)) |
| 108 | 106 | ||
| 109 | static u32 mdio_read_reg(void *base, u32 reg) | 107 | static u32 mdio_read_reg(u8 __iomem *base, u32 reg) |
| 110 | { | 108 | { |
| 111 | u8 __iomem *reg_addr = (u8 __iomem *)base; | 109 | return readl_relaxed(base + reg); |
| 112 | |||
| 113 | return readl_relaxed(reg_addr + reg); | ||
| 114 | } | 110 | } |
| 115 | 111 | ||
| 116 | #define mdio_set_field(origin, mask, shift, val) \ | 112 | #define mdio_set_field(origin, mask, shift, val) \ |
| @@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg) | |||
| 121 | 117 | ||
| 122 | #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) | 118 | #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) |
| 123 | 119 | ||
| 124 | static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | 120 | static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, |
| 125 | u32 val) | 121 | u32 val) |
| 126 | { | 122 | { |
| 127 | u32 origin = mdio_read_reg(base, reg); | 123 | u32 origin = mdio_read_reg(base, reg); |
| @@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | |||
| 133 | #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ | 129 | #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ |
| 134 | mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) | 130 | mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) |
| 135 | 131 | ||
| 136 | static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) | 132 | static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) |
| 137 | { | 133 | { |
| 138 | u32 origin; | 134 | u32 origin; |
| 139 | 135 | ||
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 3baabdc89726..90b62c1412c8 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
| @@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev, | |||
| 3160 | 3160 | ||
| 3161 | if (ehea_add_adapter_mr(adapter)) { | 3161 | if (ehea_add_adapter_mr(adapter)) { |
| 3162 | pr_err("creating MR failed\n"); | 3162 | pr_err("creating MR failed\n"); |
| 3163 | of_node_put(eth_dn); | ||
| 3163 | return -EIO; | 3164 | return -EIO; |
| 3164 | } | 3165 | } |
| 3165 | 3166 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5ecbb1adcf3b..51cfe95f3e24 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, | |||
| 1885 | */ | 1885 | */ |
| 1886 | adapter->state = VNIC_PROBED; | 1886 | adapter->state = VNIC_PROBED; |
| 1887 | 1887 | ||
| 1888 | reinit_completion(&adapter->init_done); | ||
| 1888 | rc = init_crq_queue(adapter); | 1889 | rc = init_crq_queue(adapter); |
| 1889 | if (rc) { | 1890 | if (rc) { |
| 1890 | netdev_err(adapter->netdev, | 1891 | netdev_err(adapter->netdev, |
| @@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) | |||
| 4625 | old_num_rx_queues = adapter->req_rx_queues; | 4626 | old_num_rx_queues = adapter->req_rx_queues; |
| 4626 | old_num_tx_queues = adapter->req_tx_queues; | 4627 | old_num_tx_queues = adapter->req_tx_queues; |
| 4627 | 4628 | ||
| 4628 | init_completion(&adapter->init_done); | 4629 | reinit_completion(&adapter->init_done); |
| 4629 | adapter->init_done_rc = 0; | 4630 | adapter->init_done_rc = 0; |
| 4630 | ibmvnic_send_crq_init(adapter); | 4631 | ibmvnic_send_crq_init(adapter); |
| 4631 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | 4632 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
| @@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) | |||
| 4680 | 4681 | ||
| 4681 | adapter->from_passive_init = false; | 4682 | adapter->from_passive_init = false; |
| 4682 | 4683 | ||
| 4683 | init_completion(&adapter->init_done); | ||
| 4684 | adapter->init_done_rc = 0; | 4684 | adapter->init_done_rc = 0; |
| 4685 | ibmvnic_send_crq_init(adapter); | 4685 | ibmvnic_send_crq_init(adapter); |
| 4686 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | 4686 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
| @@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 4759 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); | 4759 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
| 4760 | INIT_LIST_HEAD(&adapter->rwi_list); | 4760 | INIT_LIST_HEAD(&adapter->rwi_list); |
| 4761 | spin_lock_init(&adapter->rwi_lock); | 4761 | spin_lock_init(&adapter->rwi_lock); |
| 4762 | init_completion(&adapter->init_done); | ||
| 4762 | adapter->resetting = false; | 4763 | adapter->resetting = false; |
| 4763 | 4764 | ||
| 4764 | adapter->mac_change_pending = false; | 4765 | adapter->mac_change_pending = false; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5a0419421511..ecef949f3baa 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
| @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) | |||
| 41 | /* create driver workqueue */ | 41 | /* create driver workqueue */ |
| 42 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, | 42 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, |
| 43 | fm10k_driver_name); | 43 | fm10k_driver_name); |
| 44 | if (!fm10k_workqueue) | ||
| 45 | return -ENOMEM; | ||
| 44 | 46 | ||
| 45 | fm10k_dbg_init(); | 47 | fm10k_dbg_init(); |
| 46 | 48 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d684998ba2b0..d3cc3427caad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -790,6 +790,8 @@ struct i40e_vsi { | |||
| 790 | 790 | ||
| 791 | /* VSI specific handlers */ | 791 | /* VSI specific handlers */ |
| 792 | irqreturn_t (*irq_handler)(int irq, void *data); | 792 | irqreturn_t (*irq_handler)(int irq, void *data); |
| 793 | |||
| 794 | unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ | ||
| 793 | } ____cacheline_internodealigned_in_smp; | 795 | } ____cacheline_internodealigned_in_smp; |
| 794 | 796 | ||
| 795 | struct i40e_netdev_priv { | 797 | struct i40e_netdev_priv { |
| @@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) | |||
| 1096 | return !!vsi->xdp_prog; | 1098 | return !!vsi->xdp_prog; |
| 1097 | } | 1099 | } |
| 1098 | 1100 | ||
| 1099 | static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) | ||
| 1100 | { | ||
| 1101 | bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); | ||
| 1102 | int qid = ring->queue_index; | ||
| 1103 | |||
| 1104 | if (ring_is_xdp(ring)) | ||
| 1105 | qid -= ring->vsi->alloc_queue_pairs; | ||
| 1106 | |||
| 1107 | if (!xdp_on) | ||
| 1108 | return NULL; | ||
| 1109 | |||
| 1110 | return xdp_get_umem_from_qid(ring->vsi->netdev, qid); | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); | 1101 | int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); |
| 1114 | int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); | 1102 | int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); |
| 1115 | int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, | 1103 | int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4c885801fa26..7874d0ec7fb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
| @@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 2573 | return -EOPNOTSUPP; | 2573 | return -EOPNOTSUPP; |
| 2574 | 2574 | ||
| 2575 | /* only magic packet is supported */ | 2575 | /* only magic packet is supported */ |
| 2576 | if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) | 2576 | if (wol->wolopts & ~WAKE_MAGIC) |
| 2577 | | (wol->wolopts != WAKE_FILTER)) | ||
| 2578 | return -EOPNOTSUPP; | 2577 | return -EOPNOTSUPP; |
| 2579 | 2578 | ||
| 2580 | /* is this a new value? */ | 2579 | /* is this a new value? */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index da62218eb70a..b1c265012c8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) | |||
| 3064 | } | 3064 | } |
| 3065 | 3065 | ||
| 3066 | /** | 3066 | /** |
| 3067 | * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled | ||
| 3068 | * @ring: The Tx or Rx ring | ||
| 3069 | * | ||
| 3070 | * Returns the UMEM or NULL. | ||
| 3071 | **/ | ||
| 3072 | static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) | ||
| 3073 | { | ||
| 3074 | bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); | ||
| 3075 | int qid = ring->queue_index; | ||
| 3076 | |||
| 3077 | if (ring_is_xdp(ring)) | ||
| 3078 | qid -= ring->vsi->alloc_queue_pairs; | ||
| 3079 | |||
| 3080 | if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) | ||
| 3081 | return NULL; | ||
| 3082 | |||
| 3083 | return xdp_get_umem_from_qid(ring->vsi->netdev, qid); | ||
| 3084 | } | ||
| 3085 | |||
| 3086 | /** | ||
| 3067 | * i40e_configure_tx_ring - Configure a transmit ring context and rest | 3087 | * i40e_configure_tx_ring - Configure a transmit ring context and rest |
| 3068 | * @ring: The Tx ring to configure | 3088 | * @ring: The Tx ring to configure |
| 3069 | * | 3089 | * |
| @@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 10064 | hash_init(vsi->mac_filter_hash); | 10084 | hash_init(vsi->mac_filter_hash); |
| 10065 | vsi->irqs_ready = false; | 10085 | vsi->irqs_ready = false; |
| 10066 | 10086 | ||
| 10087 | if (type == I40E_VSI_MAIN) { | ||
| 10088 | vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); | ||
| 10089 | if (!vsi->af_xdp_zc_qps) | ||
| 10090 | goto err_rings; | ||
| 10091 | } | ||
| 10092 | |||
| 10067 | ret = i40e_set_num_rings_in_vsi(vsi); | 10093 | ret = i40e_set_num_rings_in_vsi(vsi); |
| 10068 | if (ret) | 10094 | if (ret) |
| 10069 | goto err_rings; | 10095 | goto err_rings; |
| @@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 10082 | goto unlock_pf; | 10108 | goto unlock_pf; |
| 10083 | 10109 | ||
| 10084 | err_rings: | 10110 | err_rings: |
| 10111 | bitmap_free(vsi->af_xdp_zc_qps); | ||
| 10085 | pf->next_vsi = i - 1; | 10112 | pf->next_vsi = i - 1; |
| 10086 | kfree(vsi); | 10113 | kfree(vsi); |
| 10087 | unlock_pf: | 10114 | unlock_pf: |
| @@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) | |||
| 10162 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); | 10189 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); |
| 10163 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); | 10190 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); |
| 10164 | 10191 | ||
| 10192 | bitmap_free(vsi->af_xdp_zc_qps); | ||
| 10165 | i40e_vsi_free_arrays(vsi, true); | 10193 | i40e_vsi_free_arrays(vsi, true); |
| 10166 | i40e_clear_rss_config_user(vsi); | 10194 | i40e_clear_rss_config_user(vsi); |
| 10167 | 10195 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5fb4353c742b..31575c0bb884 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
| @@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
| 146 | static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) | 146 | static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
| 147 | { | 147 | { |
| 148 | struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); | 148 | struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); |
| 149 | struct timespec64 now; | 149 | struct timespec64 now, then; |
| 150 | 150 | ||
| 151 | then = ns_to_timespec64(delta); | ||
| 151 | mutex_lock(&pf->tmreg_lock); | 152 | mutex_lock(&pf->tmreg_lock); |
| 152 | 153 | ||
| 153 | i40e_ptp_read(pf, &now, NULL); | 154 | i40e_ptp_read(pf, &now, NULL); |
| 154 | timespec64_add_ns(&now, delta); | 155 | now = timespec64_add(now, then); |
| 155 | i40e_ptp_write(pf, (const struct timespec64 *)&now); | 156 | i40e_ptp_write(pf, (const struct timespec64 *)&now); |
| 156 | 157 | ||
| 157 | mutex_unlock(&pf->tmreg_lock); | 158 | mutex_unlock(&pf->tmreg_lock); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index b5c182e688e3..1b17486543ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c | |||
| @@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, | |||
| 102 | if (err) | 102 | if (err) |
| 103 | return err; | 103 | return err; |
| 104 | 104 | ||
| 105 | set_bit(qid, vsi->af_xdp_zc_qps); | ||
| 106 | |||
| 105 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | 107 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); |
| 106 | 108 | ||
| 107 | if (if_running) { | 109 | if (if_running) { |
| @@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) | |||
| 148 | return err; | 150 | return err; |
| 149 | } | 151 | } |
| 150 | 152 | ||
| 153 | clear_bit(qid, vsi->af_xdp_zc_qps); | ||
| 151 | i40e_xsk_umem_dma_unmap(vsi, umem); | 154 | i40e_xsk_umem_dma_unmap(vsi, umem); |
| 152 | 155 | ||
| 153 | if (if_running) { | 156 | if (if_running) { |
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 01fcfc6f3415..d2e2c50ce257 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h | |||
| @@ -194,6 +194,8 @@ | |||
| 194 | /* enable link status from external LINK_0 and LINK_1 pins */ | 194 | /* enable link status from external LINK_0 and LINK_1 pins */ |
| 195 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | 195 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ |
| 196 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | 196 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ |
| 197 | #define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ | ||
| 198 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ | ||
| 197 | #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ | 199 | #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ |
| 198 | #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ | 200 | #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ |
| 199 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | 201 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 69b230c53fed..3269d8e94744 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8740 | struct e1000_hw *hw = &adapter->hw; | 8740 | struct e1000_hw *hw = &adapter->hw; |
| 8741 | u32 ctrl, rctl, status; | 8741 | u32 ctrl, rctl, status; |
| 8742 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | 8742 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; |
| 8743 | #ifdef CONFIG_PM | 8743 | bool wake; |
| 8744 | int retval = 0; | ||
| 8745 | #endif | ||
| 8746 | 8744 | ||
| 8747 | rtnl_lock(); | 8745 | rtnl_lock(); |
| 8748 | netif_device_detach(netdev); | 8746 | netif_device_detach(netdev); |
| @@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8755 | igb_clear_interrupt_scheme(adapter); | 8753 | igb_clear_interrupt_scheme(adapter); |
| 8756 | rtnl_unlock(); | 8754 | rtnl_unlock(); |
| 8757 | 8755 | ||
| 8758 | #ifdef CONFIG_PM | ||
| 8759 | if (!runtime) { | ||
| 8760 | retval = pci_save_state(pdev); | ||
| 8761 | if (retval) | ||
| 8762 | return retval; | ||
| 8763 | } | ||
| 8764 | #endif | ||
| 8765 | |||
| 8766 | status = rd32(E1000_STATUS); | 8756 | status = rd32(E1000_STATUS); |
| 8767 | if (status & E1000_STATUS_LU) | 8757 | if (status & E1000_STATUS_LU) |
| 8768 | wufc &= ~E1000_WUFC_LNKC; | 8758 | wufc &= ~E1000_WUFC_LNKC; |
| @@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8779 | } | 8769 | } |
| 8780 | 8770 | ||
| 8781 | ctrl = rd32(E1000_CTRL); | 8771 | ctrl = rd32(E1000_CTRL); |
| 8782 | /* advertise wake from D3Cold */ | ||
| 8783 | #define E1000_CTRL_ADVD3WUC 0x00100000 | ||
| 8784 | /* phy power management enable */ | ||
| 8785 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | ||
| 8786 | ctrl |= E1000_CTRL_ADVD3WUC; | 8772 | ctrl |= E1000_CTRL_ADVD3WUC; |
| 8787 | wr32(E1000_CTRL, ctrl); | 8773 | wr32(E1000_CTRL, ctrl); |
| 8788 | 8774 | ||
| @@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8796 | wr32(E1000_WUFC, 0); | 8782 | wr32(E1000_WUFC, 0); |
| 8797 | } | 8783 | } |
| 8798 | 8784 | ||
| 8799 | *enable_wake = wufc || adapter->en_mng_pt; | 8785 | wake = wufc || adapter->en_mng_pt; |
| 8800 | if (!*enable_wake) | 8786 | if (!wake) |
| 8801 | igb_power_down_link(adapter); | 8787 | igb_power_down_link(adapter); |
| 8802 | else | 8788 | else |
| 8803 | igb_power_up_link(adapter); | 8789 | igb_power_up_link(adapter); |
| 8804 | 8790 | ||
| 8791 | if (enable_wake) | ||
| 8792 | *enable_wake = wake; | ||
| 8793 | |||
| 8805 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 8794 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
| 8806 | * would have already happened in close and is redundant. | 8795 | * would have already happened in close and is redundant. |
| 8807 | */ | 8796 | */ |
| @@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev) | |||
| 8844 | 8833 | ||
| 8845 | static int __maybe_unused igb_suspend(struct device *dev) | 8834 | static int __maybe_unused igb_suspend(struct device *dev) |
| 8846 | { | 8835 | { |
| 8847 | int retval; | 8836 | return __igb_shutdown(to_pci_dev(dev), NULL, 0); |
| 8848 | bool wake; | ||
| 8849 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 8850 | |||
| 8851 | retval = __igb_shutdown(pdev, &wake, 0); | ||
| 8852 | if (retval) | ||
| 8853 | return retval; | ||
| 8854 | |||
| 8855 | if (wake) { | ||
| 8856 | pci_prepare_to_sleep(pdev); | ||
| 8857 | } else { | ||
| 8858 | pci_wake_from_d3(pdev, false); | ||
| 8859 | pci_set_power_state(pdev, PCI_D3hot); | ||
| 8860 | } | ||
| 8861 | |||
| 8862 | return 0; | ||
| 8863 | } | 8837 | } |
| 8864 | 8838 | ||
| 8865 | static int __maybe_unused igb_resume(struct device *dev) | 8839 | static int __maybe_unused igb_resume(struct device *dev) |
| @@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) | |||
| 8930 | 8904 | ||
| 8931 | static int __maybe_unused igb_runtime_suspend(struct device *dev) | 8905 | static int __maybe_unused igb_runtime_suspend(struct device *dev) |
| 8932 | { | 8906 | { |
| 8933 | struct pci_dev *pdev = to_pci_dev(dev); | 8907 | return __igb_shutdown(to_pci_dev(dev), NULL, 1); |
| 8934 | int retval; | ||
| 8935 | bool wake; | ||
| 8936 | |||
| 8937 | retval = __igb_shutdown(pdev, &wake, 1); | ||
| 8938 | if (retval) | ||
| 8939 | return retval; | ||
| 8940 | |||
| 8941 | if (wake) { | ||
| 8942 | pci_prepare_to_sleep(pdev); | ||
| 8943 | } else { | ||
| 8944 | pci_wake_from_d3(pdev, false); | ||
| 8945 | pci_set_power_state(pdev, PCI_D3hot); | ||
| 8946 | } | ||
| 8947 | |||
| 8948 | return 0; | ||
| 8949 | } | 8908 | } |
| 8950 | 8909 | ||
| 8951 | static int __maybe_unused igb_runtime_resume(struct device *dev) | 8910 | static int __maybe_unused igb_runtime_resume(struct device *dev) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index cc4907f9ff02..2fb97967961c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
| @@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) | |||
| 905 | struct pci_dev *pdev = adapter->pdev; | 905 | struct pci_dev *pdev = adapter->pdev; |
| 906 | struct device *dev = &adapter->netdev->dev; | 906 | struct device *dev = &adapter->netdev->dev; |
| 907 | struct mii_bus *bus; | 907 | struct mii_bus *bus; |
| 908 | int err = -ENODEV; | ||
| 908 | 909 | ||
| 909 | adapter->mii_bus = devm_mdiobus_alloc(dev); | 910 | bus = devm_mdiobus_alloc(dev); |
| 910 | if (!adapter->mii_bus) | 911 | if (!bus) |
| 911 | return -ENOMEM; | 912 | return -ENOMEM; |
| 912 | 913 | ||
| 913 | bus = adapter->mii_bus; | ||
| 914 | |||
| 915 | switch (hw->device_id) { | 914 | switch (hw->device_id) { |
| 916 | /* C3000 SoCs */ | 915 | /* C3000 SoCs */ |
| 917 | case IXGBE_DEV_ID_X550EM_A_KR: | 916 | case IXGBE_DEV_ID_X550EM_A_KR: |
| @@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) | |||
| 949 | */ | 948 | */ |
| 950 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; | 949 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; |
| 951 | 950 | ||
| 952 | return mdiobus_register(bus); | 951 | err = mdiobus_register(bus); |
| 952 | if (!err) { | ||
| 953 | adapter->mii_bus = bus; | ||
| 954 | return 0; | ||
| 955 | } | ||
| 953 | 956 | ||
| 954 | ixgbe_no_mii_bus: | 957 | ixgbe_no_mii_bus: |
| 955 | devm_mdiobus_free(dev, bus); | 958 | devm_mdiobus_free(dev, bus); |
| 956 | adapter->mii_bus = NULL; | 959 | return err; |
| 957 | return -ENODEV; | ||
| 958 | } | 960 | } |
| 959 | 961 | ||
| 960 | /** | 962 | /** |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 122927f3a600..d5e5afbdca6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c | |||
| @@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, | |||
| 96 | if (!eproto) | 96 | if (!eproto) |
| 97 | return -EINVAL; | 97 | return -EINVAL; |
| 98 | 98 | ||
| 99 | if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) | ||
| 100 | return -EOPNOTSUPP; | ||
| 101 | |||
| 102 | err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); | 99 | err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); |
| 103 | if (err) | 100 | if (err) |
| 104 | return err; | 101 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index eac245a93f91..4ab0d030b544 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | |||
| @@ -122,7 +122,9 @@ out: | |||
| 122 | return err; | 122 | return err; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ | 125 | /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) |
| 126 | * minimum speed value is 40Gbps | ||
| 127 | */ | ||
| 126 | static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | 128 | static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) |
| 127 | { | 129 | { |
| 128 | u32 speed; | 130 | u32 speed; |
| @@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
| 130 | int err; | 132 | int err; |
| 131 | 133 | ||
| 132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); | 134 | err = mlx5e_port_linkspeed(priv->mdev, &speed); |
| 133 | if (err) { | 135 | if (err) |
| 134 | mlx5_core_warn(priv->mdev, "cannot get port speed\n"); | 136 | speed = SPEED_40000; |
| 135 | return 0; | 137 | speed = max_t(u32, speed, SPEED_40000); |
| 136 | } | ||
| 137 | 138 | ||
| 138 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; | 139 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; |
| 139 | 140 | ||
| @@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | 145 | static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, |
| 145 | u32 xoff, unsigned int mtu) | 146 | u32 xoff, unsigned int max_mtu) |
| 146 | { | 147 | { |
| 147 | int i; | 148 | int i; |
| 148 | 149 | ||
| @@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | if (port_buffer->buffer[i].size < | 157 | if (port_buffer->buffer[i].size < |
| 157 | (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) | 158 | (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) |
| 158 | return -ENOMEM; | 159 | return -ENOMEM; |
| 159 | 160 | ||
| 160 | port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; | 161 | port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; |
| 161 | port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; | 162 | port_buffer->buffer[i].xon = |
| 163 | port_buffer->buffer[i].xoff - max_mtu; | ||
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | return 0; | 166 | return 0; |
| @@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
| 166 | 168 | ||
| 167 | /** | 169 | /** |
| 168 | * update_buffer_lossy() | 170 | * update_buffer_lossy() |
| 169 | * mtu: device's MTU | 171 | * max_mtu: netdev's max_mtu |
| 170 | * pfc_en: <input> current pfc configuration | 172 | * pfc_en: <input> current pfc configuration |
| 171 | * buffer: <input> current prio to buffer mapping | 173 | * buffer: <input> current prio to buffer mapping |
| 172 | * xoff: <input> xoff value | 174 | * xoff: <input> xoff value |
| @@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
| 183 | * Return 0 if no error. | 185 | * Return 0 if no error. |
| 184 | * Set change to true if buffer configuration is modified. | 186 | * Set change to true if buffer configuration is modified. |
| 185 | */ | 187 | */ |
| 186 | static int update_buffer_lossy(unsigned int mtu, | 188 | static int update_buffer_lossy(unsigned int max_mtu, |
| 187 | u8 pfc_en, u8 *buffer, u32 xoff, | 189 | u8 pfc_en, u8 *buffer, u32 xoff, |
| 188 | struct mlx5e_port_buffer *port_buffer, | 190 | struct mlx5e_port_buffer *port_buffer, |
| 189 | bool *change) | 191 | bool *change) |
| @@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu, | |||
| 220 | } | 222 | } |
| 221 | 223 | ||
| 222 | if (changed) { | 224 | if (changed) { |
| 223 | err = update_xoff_threshold(port_buffer, xoff, mtu); | 225 | err = update_xoff_threshold(port_buffer, xoff, max_mtu); |
| 224 | if (err) | 226 | if (err) |
| 225 | return err; | 227 | return err; |
| 226 | 228 | ||
| @@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu, | |||
| 230 | return 0; | 232 | return 0; |
| 231 | } | 233 | } |
| 232 | 234 | ||
| 235 | #define MINIMUM_MAX_MTU 9216 | ||
| 233 | int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | 236 | int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, |
| 234 | u32 change, unsigned int mtu, | 237 | u32 change, unsigned int mtu, |
| 235 | struct ieee_pfc *pfc, | 238 | struct ieee_pfc *pfc, |
| @@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 241 | bool update_prio2buffer = false; | 244 | bool update_prio2buffer = false; |
| 242 | u8 buffer[MLX5E_MAX_PRIORITY]; | 245 | u8 buffer[MLX5E_MAX_PRIORITY]; |
| 243 | bool update_buffer = false; | 246 | bool update_buffer = false; |
| 247 | unsigned int max_mtu; | ||
| 244 | u32 total_used = 0; | 248 | u32 total_used = 0; |
| 245 | u8 curr_pfc_en; | 249 | u8 curr_pfc_en; |
| 246 | int err; | 250 | int err; |
| 247 | int i; | 251 | int i; |
| 248 | 252 | ||
| 249 | mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); | 253 | mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); |
| 254 | max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); | ||
| 250 | 255 | ||
| 251 | err = mlx5e_port_query_buffer(priv, &port_buffer); | 256 | err = mlx5e_port_query_buffer(priv, &port_buffer); |
| 252 | if (err) | 257 | if (err) |
| @@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 254 | 259 | ||
| 255 | if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { | 260 | if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { |
| 256 | update_buffer = true; | 261 | update_buffer = true; |
| 257 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 262 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
| 258 | if (err) | 263 | if (err) |
| 259 | return err; | 264 | return err; |
| 260 | } | 265 | } |
| @@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 264 | if (err) | 269 | if (err) |
| 265 | return err; | 270 | return err; |
| 266 | 271 | ||
| 267 | err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, | 272 | err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, |
| 268 | &port_buffer, &update_buffer); | 273 | &port_buffer, &update_buffer); |
| 269 | if (err) | 274 | if (err) |
| 270 | return err; | 275 | return err; |
| @@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 276 | if (err) | 281 | if (err) |
| 277 | return err; | 282 | return err; |
| 278 | 283 | ||
| 279 | err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, | 284 | err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, |
| 280 | &port_buffer, &update_buffer); | 285 | xoff, &port_buffer, &update_buffer); |
| 281 | if (err) | 286 | if (err) |
| 282 | return err; | 287 | return err; |
| 283 | } | 288 | } |
| @@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 301 | return -EINVAL; | 306 | return -EINVAL; |
| 302 | 307 | ||
| 303 | update_buffer = true; | 308 | update_buffer = true; |
| 304 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 309 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
| 305 | if (err) | 310 | if (err) |
| 306 | return err; | 311 | return err; |
| 307 | } | 312 | } |
| @@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 309 | /* Need to update buffer configuration if xoff value is changed */ | 314 | /* Need to update buffer configuration if xoff value is changed */ |
| 310 | if (!update_buffer && xoff != priv->dcbx.xoff) { | 315 | if (!update_buffer && xoff != priv->dcbx.xoff) { |
| 311 | update_buffer = true; | 316 | update_buffer = true; |
| 312 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 317 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
| 313 | if (err) | 318 | if (err) |
| 314 | return err; | 319 | return err; |
| 315 | } | 320 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 3078491cc0d0..1539cf3de5dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c | |||
| @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, | |||
| 45 | if (err) | 45 | if (err) |
| 46 | return err; | 46 | return err; |
| 47 | 47 | ||
| 48 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
| 48 | list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); | 49 | list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); |
| 50 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
| 49 | 51 | ||
| 50 | return 0; | 52 | return 0; |
| 51 | } | 53 | } |
| @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, | |||
| 53 | void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, | 55 | void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, |
| 54 | struct mlx5e_tir *tir) | 56 | struct mlx5e_tir *tir) |
| 55 | { | 57 | { |
| 58 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
| 56 | mlx5_core_destroy_tir(mdev, tir->tirn); | 59 | mlx5_core_destroy_tir(mdev, tir->tirn); |
| 57 | list_del(&tir->list); | 60 | list_del(&tir->list); |
| 61 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
| 58 | } | 62 | } |
| 59 | 63 | ||
| 60 | static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, | 64 | static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, |
| @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) | |||
| 114 | } | 118 | } |
| 115 | 119 | ||
| 116 | INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); | 120 | INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); |
| 121 | mutex_init(&mdev->mlx5e_res.td.list_lock); | ||
| 117 | 122 | ||
| 118 | return 0; | 123 | return 0; |
| 119 | 124 | ||
| @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) | |||
| 141 | { | 146 | { |
| 142 | struct mlx5_core_dev *mdev = priv->mdev; | 147 | struct mlx5_core_dev *mdev = priv->mdev; |
| 143 | struct mlx5e_tir *tir; | 148 | struct mlx5e_tir *tir; |
| 144 | int err = -ENOMEM; | 149 | int err = 0; |
| 145 | u32 tirn = 0; | 150 | u32 tirn = 0; |
| 146 | int inlen; | 151 | int inlen; |
| 147 | void *in; | 152 | void *in; |
| 148 | 153 | ||
| 149 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); | 154 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); |
| 150 | in = kvzalloc(inlen, GFP_KERNEL); | 155 | in = kvzalloc(inlen, GFP_KERNEL); |
| 151 | if (!in) | 156 | if (!in) { |
| 157 | err = -ENOMEM; | ||
| 152 | goto out; | 158 | goto out; |
| 159 | } | ||
| 153 | 160 | ||
| 154 | if (enable_uc_lb) | 161 | if (enable_uc_lb) |
| 155 | MLX5_SET(modify_tir_in, in, ctx.self_lb_block, | 162 | MLX5_SET(modify_tir_in, in, ctx.self_lb_block, |
| @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) | |||
| 157 | 164 | ||
| 158 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); | 165 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); |
| 159 | 166 | ||
| 167 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
| 160 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { | 168 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { |
| 161 | tirn = tir->tirn; | 169 | tirn = tir->tirn; |
| 162 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); | 170 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); |
| @@ -168,6 +176,7 @@ out: | |||
| 168 | kvfree(in); | 176 | kvfree(in); |
| 169 | if (err) | 177 | if (err) |
| 170 | netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); | 178 | netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); |
| 179 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
| 171 | 180 | ||
| 172 | return err; | 181 | return err; |
| 173 | } | 182 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a0987cc5fe4a..5efce4a3ff79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, | |||
| 603 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 603 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, | 606 | static void ptys2ethtool_adver_link(unsigned long *advertising_modes, |
| 607 | unsigned long *advertising_modes, | 607 | u32 eth_proto_cap, bool ext) |
| 608 | u32 eth_proto_cap) | ||
| 609 | { | 608 | { |
| 610 | unsigned long proto_cap = eth_proto_cap; | 609 | unsigned long proto_cap = eth_proto_cap; |
| 611 | struct ptys2ethtool_config *table; | 610 | struct ptys2ethtool_config *table; |
| 612 | u32 max_size; | 611 | u32 max_size; |
| 613 | int proto; | 612 | int proto; |
| 614 | 613 | ||
| 615 | mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); | 614 | table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; |
| 615 | max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : | ||
| 616 | ARRAY_SIZE(ptys2legacy_ethtool_table); | ||
| 617 | |||
| 616 | for_each_set_bit(proto, &proto_cap, max_size) | 618 | for_each_set_bit(proto, &proto_cap, max_size) |
| 617 | bitmap_or(advertising_modes, advertising_modes, | 619 | bitmap_or(advertising_modes, advertising_modes, |
| 618 | table[proto].advertised, | 620 | table[proto].advertised, |
| @@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, | |||
| 794 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); | 796 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); |
| 795 | } | 797 | } |
| 796 | 798 | ||
| 797 | static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, | 799 | static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause, |
| 798 | u8 tx_pause, u8 rx_pause, | 800 | struct ethtool_link_ksettings *link_ksettings, |
| 799 | struct ethtool_link_ksettings *link_ksettings) | 801 | bool ext) |
| 800 | { | 802 | { |
| 801 | unsigned long *advertising = link_ksettings->link_modes.advertising; | 803 | unsigned long *advertising = link_ksettings->link_modes.advertising; |
| 802 | ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); | 804 | ptys2ethtool_adver_link(advertising, eth_proto_cap, ext); |
| 803 | 805 | ||
| 804 | if (rx_pause) | 806 | if (rx_pause) |
| 805 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); | 807 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); |
| @@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, | |||
| 854 | struct ethtool_link_ksettings *link_ksettings) | 856 | struct ethtool_link_ksettings *link_ksettings) |
| 855 | { | 857 | { |
| 856 | unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; | 858 | unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; |
| 859 | bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | ||
| 857 | 860 | ||
| 858 | ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); | 861 | ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); |
| 859 | } | 862 | } |
| 860 | 863 | ||
| 861 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | 864 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, |
| @@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 872 | u8 an_disable_admin; | 875 | u8 an_disable_admin; |
| 873 | u8 an_status; | 876 | u8 an_status; |
| 874 | u8 connector_type; | 877 | u8 connector_type; |
| 878 | bool admin_ext; | ||
| 875 | bool ext; | 879 | bool ext; |
| 876 | int err; | 880 | int err; |
| 877 | 881 | ||
| @@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 886 | eth_proto_capability); | 890 | eth_proto_capability); |
| 887 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 891 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, |
| 888 | eth_proto_admin); | 892 | eth_proto_admin); |
| 893 | /* Fields: eth_proto_admin and ext_eth_proto_admin are | ||
| 894 | * mutually exclusive. Hence try reading legacy advertising | ||
| 895 | * when extended advertising is zero. | ||
| 896 | * admin_ext indicates how eth_proto_admin should be | ||
| 897 | * interpreted | ||
| 898 | */ | ||
| 899 | admin_ext = ext; | ||
| 900 | if (ext && !eth_proto_admin) { | ||
| 901 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false, | ||
| 902 | eth_proto_admin); | ||
| 903 | admin_ext = false; | ||
| 904 | } | ||
| 905 | |||
| 889 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 906 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, |
| 890 | eth_proto_oper); | 907 | eth_proto_oper); |
| 891 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); | 908 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); |
| @@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 899 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); | 916 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); |
| 900 | 917 | ||
| 901 | get_supported(mdev, eth_proto_cap, link_ksettings); | 918 | get_supported(mdev, eth_proto_cap, link_ksettings); |
| 902 | get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); | 919 | get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, |
| 920 | admin_ext); | ||
| 903 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); | 921 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); |
| 904 | 922 | ||
| 905 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; | 923 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; |
| @@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
| 997 | 1015 | ||
| 998 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) | 1016 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) |
| 999 | 1017 | ||
| 1000 | ext_requested = (link_ksettings->link_modes.advertising[0] > | 1018 | ext_requested = !!(link_ksettings->link_modes.advertising[0] > |
| 1001 | MLX5E_PTYS_EXT); | 1019 | MLX5E_PTYS_EXT || |
| 1020 | link_ksettings->link_modes.advertising[1]); | ||
| 1002 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | 1021 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); |
| 1003 | 1022 | ext_requested &= ext_supported; | |
| 1004 | /*when ptys_extended_ethernet is set legacy link modes are deprecated */ | ||
| 1005 | if (ext_requested != ext_supported) | ||
| 1006 | return -EPROTONOSUPPORT; | ||
| 1007 | 1023 | ||
| 1008 | speed = link_ksettings->base.speed; | 1024 | speed = link_ksettings->base.speed; |
| 1009 | ethtool2ptys_adver_func = ext_requested ? | 1025 | ethtool2ptys_adver_func = ext_requested ? |
| 1010 | mlx5e_ethtool2ptys_ext_adver_link : | 1026 | mlx5e_ethtool2ptys_ext_adver_link : |
| 1011 | mlx5e_ethtool2ptys_adver_link; | 1027 | mlx5e_ethtool2ptys_adver_link; |
| 1012 | err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); | 1028 | err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); |
| 1013 | if (err) { | 1029 | if (err) { |
| 1014 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", | 1030 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", |
| 1015 | __func__, err); | 1031 | __func__, err); |
| @@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
| 1037 | if (!an_changes && link_modes == eproto.admin) | 1053 | if (!an_changes && link_modes == eproto.admin) |
| 1038 | goto out; | 1054 | goto out; |
| 1039 | 1055 | ||
| 1040 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); | 1056 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); |
| 1041 | mlx5_toggle_port_link(mdev); | 1057 | mlx5_toggle_port_link(mdev); |
| 1042 | 1058 | ||
| 1043 | out: | 1059 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b4967a0ff8c7..d75dc44eb2ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, | |||
| 2158 | return true; | 2158 | return true; |
| 2159 | } | 2159 | } |
| 2160 | 2160 | ||
| 2161 | struct ip_ttl_word { | ||
| 2162 | __u8 ttl; | ||
| 2163 | __u8 protocol; | ||
| 2164 | __sum16 check; | ||
| 2165 | }; | ||
| 2166 | |||
| 2167 | struct ipv6_hoplimit_word { | ||
| 2168 | __be16 payload_len; | ||
| 2169 | __u8 nexthdr; | ||
| 2170 | __u8 hop_limit; | ||
| 2171 | }; | ||
| 2172 | |||
| 2173 | static bool is_action_keys_supported(const struct flow_action_entry *act) | ||
| 2174 | { | ||
| 2175 | u32 mask, offset; | ||
| 2176 | u8 htype; | ||
| 2177 | |||
| 2178 | htype = act->mangle.htype; | ||
| 2179 | offset = act->mangle.offset; | ||
| 2180 | mask = ~act->mangle.mask; | ||
| 2181 | /* For IPv4 & IPv6 header check 4 byte word, | ||
| 2182 | * to determine that modified fields | ||
| 2183 | * are NOT ttl & hop_limit only. | ||
| 2184 | */ | ||
| 2185 | if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { | ||
| 2186 | struct ip_ttl_word *ttl_word = | ||
| 2187 | (struct ip_ttl_word *)&mask; | ||
| 2188 | |||
| 2189 | if (offset != offsetof(struct iphdr, ttl) || | ||
| 2190 | ttl_word->protocol || | ||
| 2191 | ttl_word->check) { | ||
| 2192 | return true; | ||
| 2193 | } | ||
| 2194 | } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { | ||
| 2195 | struct ipv6_hoplimit_word *hoplimit_word = | ||
| 2196 | (struct ipv6_hoplimit_word *)&mask; | ||
| 2197 | |||
| 2198 | if (offset != offsetof(struct ipv6hdr, payload_len) || | ||
| 2199 | hoplimit_word->payload_len || | ||
| 2200 | hoplimit_word->nexthdr) { | ||
| 2201 | return true; | ||
| 2202 | } | ||
| 2203 | } | ||
| 2204 | return false; | ||
| 2205 | } | ||
| 2206 | |||
| 2161 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | 2207 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, |
| 2162 | struct flow_action *flow_action, | 2208 | struct flow_action *flow_action, |
| 2163 | u32 actions, | 2209 | u32 actions, |
| @@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 2165 | { | 2211 | { |
| 2166 | const struct flow_action_entry *act; | 2212 | const struct flow_action_entry *act; |
| 2167 | bool modify_ip_header; | 2213 | bool modify_ip_header; |
| 2168 | u8 htype, ip_proto; | ||
| 2169 | void *headers_v; | 2214 | void *headers_v; |
| 2170 | u16 ethertype; | 2215 | u16 ethertype; |
| 2216 | u8 ip_proto; | ||
| 2171 | int i; | 2217 | int i; |
| 2172 | 2218 | ||
| 2173 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) | 2219 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
| @@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 2187 | act->id != FLOW_ACTION_ADD) | 2233 | act->id != FLOW_ACTION_ADD) |
| 2188 | continue; | 2234 | continue; |
| 2189 | 2235 | ||
| 2190 | htype = act->mangle.htype; | 2236 | if (is_action_keys_supported(act)) { |
| 2191 | if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || | ||
| 2192 | htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { | ||
| 2193 | modify_ip_header = true; | 2237 | modify_ip_header = true; |
| 2194 | break; | 2238 | break; |
| 2195 | } | 2239 | } |
| @@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, | |||
| 2340 | return 0; | 2384 | return 0; |
| 2341 | } | 2385 | } |
| 2342 | 2386 | ||
| 2343 | static inline int cmp_encap_info(struct ip_tunnel_key *a, | 2387 | struct encap_key { |
| 2344 | struct ip_tunnel_key *b) | 2388 | struct ip_tunnel_key *ip_tun_key; |
| 2389 | int tunnel_type; | ||
| 2390 | }; | ||
| 2391 | |||
| 2392 | static inline int cmp_encap_info(struct encap_key *a, | ||
| 2393 | struct encap_key *b) | ||
| 2345 | { | 2394 | { |
| 2346 | return memcmp(a, b, sizeof(*a)); | 2395 | return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || |
| 2396 | a->tunnel_type != b->tunnel_type; | ||
| 2347 | } | 2397 | } |
| 2348 | 2398 | ||
| 2349 | static inline int hash_encap_info(struct ip_tunnel_key *key) | 2399 | static inline int hash_encap_info(struct encap_key *key) |
| 2350 | { | 2400 | { |
| 2351 | return jhash(key, sizeof(*key), 0); | 2401 | return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), |
| 2402 | key->tunnel_type); | ||
| 2352 | } | 2403 | } |
| 2353 | 2404 | ||
| 2354 | 2405 | ||
| @@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
| 2379 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; | 2430 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; |
| 2380 | struct mlx5e_tc_flow_parse_attr *parse_attr; | 2431 | struct mlx5e_tc_flow_parse_attr *parse_attr; |
| 2381 | struct ip_tunnel_info *tun_info; | 2432 | struct ip_tunnel_info *tun_info; |
| 2382 | struct ip_tunnel_key *key; | 2433 | struct encap_key key, e_key; |
| 2383 | struct mlx5e_encap_entry *e; | 2434 | struct mlx5e_encap_entry *e; |
| 2384 | unsigned short family; | 2435 | unsigned short family; |
| 2385 | uintptr_t hash_key; | 2436 | uintptr_t hash_key; |
| @@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
| 2389 | parse_attr = attr->parse_attr; | 2440 | parse_attr = attr->parse_attr; |
| 2390 | tun_info = &parse_attr->tun_info[out_index]; | 2441 | tun_info = &parse_attr->tun_info[out_index]; |
| 2391 | family = ip_tunnel_info_af(tun_info); | 2442 | family = ip_tunnel_info_af(tun_info); |
| 2392 | key = &tun_info->key; | 2443 | key.ip_tun_key = &tun_info->key; |
| 2444 | key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev); | ||
| 2393 | 2445 | ||
| 2394 | hash_key = hash_encap_info(key); | 2446 | hash_key = hash_encap_info(&key); |
| 2395 | 2447 | ||
| 2396 | hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, | 2448 | hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, |
| 2397 | encap_hlist, hash_key) { | 2449 | encap_hlist, hash_key) { |
| 2398 | if (!cmp_encap_info(&e->tun_info.key, key)) { | 2450 | e_key.ip_tun_key = &e->tun_info.key; |
| 2451 | e_key.tunnel_type = e->tunnel_type; | ||
| 2452 | if (!cmp_encap_info(&e_key, &key)) { | ||
| 2399 | found = true; | 2453 | found = true; |
| 2400 | break; | 2454 | break; |
| 2401 | } | 2455 | } |
| @@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, | |||
| 2657 | 2711 | ||
| 2658 | if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || | 2712 | if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || |
| 2659 | hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { | 2713 | hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { |
| 2660 | err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, | 2714 | err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, |
| 2661 | parse_attr, hdrs, extack); | 2715 | parse_attr, hdrs, extack); |
| 2662 | if (err) | 2716 | if (err) |
| 2663 | return err; | 2717 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ecd2c747f726..8a67fd197b79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, | |||
| 105 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); | 105 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); |
| 106 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); | 106 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); |
| 107 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | 107 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); |
| 108 | if (vport) | 108 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); |
| 109 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); | ||
| 110 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, | 109 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, |
| 111 | in, nic_vport_context); | 110 | in, nic_vport_context); |
| 112 | 111 | ||
| @@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, | |||
| 134 | MLX5_SET(modify_esw_vport_context_in, in, opcode, | 133 | MLX5_SET(modify_esw_vport_context_in, in, opcode, |
| 135 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); | 134 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); |
| 136 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); | 135 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); |
| 137 | if (vport) | 136 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); |
| 138 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); | ||
| 139 | return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | 137 | return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
| 140 | } | 138 | } |
| 141 | 139 | ||
| @@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) | |||
| 431 | { | 429 | { |
| 432 | int err; | 430 | int err; |
| 433 | 431 | ||
| 432 | memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); | ||
| 433 | |||
| 434 | err = esw_create_legacy_vepa_table(esw); | 434 | err = esw_create_legacy_vepa_table(esw); |
| 435 | if (err) | 435 | if (err) |
| 436 | return err; | 436 | return err; |
| @@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, | |||
| 2157 | 2157 | ||
| 2158 | /* Star rule to forward all traffic to uplink vport */ | 2158 | /* Star rule to forward all traffic to uplink vport */ |
| 2159 | memset(spec, 0, sizeof(*spec)); | 2159 | memset(spec, 0, sizeof(*spec)); |
| 2160 | memset(&dest, 0, sizeof(dest)); | ||
| 2160 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 2161 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
| 2161 | dest.vport.num = MLX5_VPORT_UPLINK; | 2162 | dest.vport.num = MLX5_VPORT_UPLINK; |
| 2162 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; | 2163 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f2260391be5b..9b2d78ee22b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) | |||
| 1611 | { | 1611 | { |
| 1612 | int err; | 1612 | int err; |
| 1613 | 1613 | ||
| 1614 | memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); | ||
| 1614 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); | 1615 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); |
| 1615 | 1616 | ||
| 1616 | err = esw_create_offloads_fdb_tables(esw, nvports); | 1617 | err = esw_create_offloads_fdb_tables(esw, nvports); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 5cf5f2a9d51f..8de64e88c670 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | |||
| @@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
| 217 | void *cmd; | 217 | void *cmd; |
| 218 | int ret; | 218 | int ret; |
| 219 | 219 | ||
| 220 | rcu_read_lock(); | ||
| 221 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | ||
| 222 | rcu_read_unlock(); | ||
| 223 | |||
| 224 | if (!flow) { | ||
| 225 | WARN_ONCE(1, "Received NULL pointer for handle\n"); | ||
| 226 | return -EINVAL; | ||
| 227 | } | ||
| 228 | |||
| 220 | buf = kzalloc(size, GFP_ATOMIC); | 229 | buf = kzalloc(size, GFP_ATOMIC); |
| 221 | if (!buf) | 230 | if (!buf) |
| 222 | return -ENOMEM; | 231 | return -ENOMEM; |
| 223 | 232 | ||
| 224 | cmd = (buf + 1); | 233 | cmd = (buf + 1); |
| 225 | 234 | ||
| 226 | rcu_read_lock(); | ||
| 227 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | ||
| 228 | rcu_read_unlock(); | ||
| 229 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); | 235 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); |
| 230 | 236 | ||
| 231 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); | 237 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); |
| @@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
| 238 | buf->complete = mlx_tls_kfree_complete; | 244 | buf->complete = mlx_tls_kfree_complete; |
| 239 | 245 | ||
| 240 | ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); | 246 | ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); |
| 247 | if (ret < 0) | ||
| 248 | kfree(buf); | ||
| 241 | 249 | ||
| 242 | return ret; | 250 | return ret; |
| 243 | } | 251 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70cc906a102b..76716419370d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = { | |||
| 164 | .size = 8, | 164 | .size = 8, |
| 165 | .limit = 4 | 165 | .limit = 4 |
| 166 | }, | 166 | }, |
| 167 | .mr_cache[16] = { | ||
| 168 | .size = 8, | ||
| 169 | .limit = 4 | ||
| 170 | }, | ||
| 171 | .mr_cache[17] = { | ||
| 172 | .size = 8, | ||
| 173 | .limit = 4 | ||
| 174 | }, | ||
| 175 | .mr_cache[18] = { | ||
| 176 | .size = 8, | ||
| 177 | .limit = 4 | ||
| 178 | }, | ||
| 179 | .mr_cache[19] = { | ||
| 180 | .size = 4, | ||
| 181 | .limit = 2 | ||
| 182 | }, | ||
| 183 | .mr_cache[20] = { | ||
| 184 | .size = 4, | ||
| 185 | .limit = 2 | ||
| 186 | }, | ||
| 187 | }, | 167 | }, |
| 188 | }; | 168 | }; |
| 189 | 169 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 7a15e932ed2f..c1c1965d7acc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c | |||
| @@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, | |||
| 113 | return 0; | 113 | return 0; |
| 114 | default: | 114 | default: |
| 115 | /* Do not consider thresholds for zero temperature. */ | 115 | /* Do not consider thresholds for zero temperature. */ |
| 116 | if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { | 116 | if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) { |
| 117 | *temp = 0; | 117 | *temp = 0; |
| 118 | return 0; | 118 | return 0; |
| 119 | } | 119 | } |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index bd6e9014bc74..7849119d407a 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
| @@ -142,6 +142,12 @@ struct ks8851_net { | |||
| 142 | 142 | ||
| 143 | static int msg_enable; | 143 | static int msg_enable; |
| 144 | 144 | ||
| 145 | /* SPI frame opcodes */ | ||
| 146 | #define KS_SPIOP_RD (0x00) | ||
| 147 | #define KS_SPIOP_WR (0x40) | ||
| 148 | #define KS_SPIOP_RXFIFO (0x80) | ||
| 149 | #define KS_SPIOP_TXFIFO (0xC0) | ||
| 150 | |||
| 145 | /* shift for byte-enable data */ | 151 | /* shift for byte-enable data */ |
| 146 | #define BYTE_EN(_x) ((_x) << 2) | 152 | #define BYTE_EN(_x) ((_x) << 2) |
| 147 | 153 | ||
| @@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
| 535 | /* set dma read address */ | 541 | /* set dma read address */ |
| 536 | ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); | 542 | ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); |
| 537 | 543 | ||
| 538 | /* start the packet dma process, and set auto-dequeue rx */ | 544 | /* start DMA access */ |
| 539 | ks8851_wrreg16(ks, KS_RXQCR, | 545 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); |
| 540 | ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); | ||
| 541 | 546 | ||
| 542 | if (rxlen > 4) { | 547 | if (rxlen > 4) { |
| 543 | unsigned int rxalign; | 548 | unsigned int rxalign; |
| @@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
| 568 | } | 573 | } |
| 569 | } | 574 | } |
| 570 | 575 | ||
| 571 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); | 576 | /* end DMA access and dequeue packet */ |
| 577 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF); | ||
| 572 | } | 578 | } |
| 573 | } | 579 | } |
| 574 | 580 | ||
| @@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work) | |||
| 785 | static int ks8851_net_open(struct net_device *dev) | 791 | static int ks8851_net_open(struct net_device *dev) |
| 786 | { | 792 | { |
| 787 | struct ks8851_net *ks = netdev_priv(dev); | 793 | struct ks8851_net *ks = netdev_priv(dev); |
| 794 | int ret; | ||
| 795 | |||
| 796 | ret = request_threaded_irq(dev->irq, NULL, ks8851_irq, | ||
| 797 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | ||
| 798 | dev->name, ks); | ||
| 799 | if (ret < 0) { | ||
| 800 | netdev_err(dev, "failed to get irq\n"); | ||
| 801 | return ret; | ||
| 802 | } | ||
| 788 | 803 | ||
| 789 | /* lock the card, even if we may not actually be doing anything | 804 | /* lock the card, even if we may not actually be doing anything |
| 790 | * else at the moment */ | 805 | * else at the moment */ |
| @@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev) | |||
| 849 | netif_dbg(ks, ifup, ks->netdev, "network device up\n"); | 864 | netif_dbg(ks, ifup, ks->netdev, "network device up\n"); |
| 850 | 865 | ||
| 851 | mutex_unlock(&ks->lock); | 866 | mutex_unlock(&ks->lock); |
| 867 | mii_check_link(&ks->mii); | ||
| 852 | return 0; | 868 | return 0; |
| 853 | } | 869 | } |
| 854 | 870 | ||
| @@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev) | |||
| 899 | dev_kfree_skb(txb); | 915 | dev_kfree_skb(txb); |
| 900 | } | 916 | } |
| 901 | 917 | ||
| 918 | free_irq(dev->irq, ks); | ||
| 919 | |||
| 902 | return 0; | 920 | return 0; |
| 903 | } | 921 | } |
| 904 | 922 | ||
| @@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1508 | 1526 | ||
| 1509 | spi_set_drvdata(spi, ks); | 1527 | spi_set_drvdata(spi, ks); |
| 1510 | 1528 | ||
| 1529 | netif_carrier_off(ks->netdev); | ||
| 1511 | ndev->if_port = IF_PORT_100BASET; | 1530 | ndev->if_port = IF_PORT_100BASET; |
| 1512 | ndev->netdev_ops = &ks8851_netdev_ops; | 1531 | ndev->netdev_ops = &ks8851_netdev_ops; |
| 1513 | ndev->irq = spi->irq; | 1532 | ndev->irq = spi->irq; |
| @@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1529 | ks8851_read_selftest(ks); | 1548 | ks8851_read_selftest(ks); |
| 1530 | ks8851_init_mac(ks); | 1549 | ks8851_init_mac(ks); |
| 1531 | 1550 | ||
| 1532 | ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, | ||
| 1533 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | ||
| 1534 | ndev->name, ks); | ||
| 1535 | if (ret < 0) { | ||
| 1536 | dev_err(&spi->dev, "failed to get irq\n"); | ||
| 1537 | goto err_irq; | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | ret = register_netdev(ndev); | 1551 | ret = register_netdev(ndev); |
| 1541 | if (ret) { | 1552 | if (ret) { |
| 1542 | dev_err(&spi->dev, "failed to register network device\n"); | 1553 | dev_err(&spi->dev, "failed to register network device\n"); |
| @@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1549 | 1560 | ||
| 1550 | return 0; | 1561 | return 0; |
| 1551 | 1562 | ||
| 1552 | |||
| 1553 | err_netdev: | 1563 | err_netdev: |
| 1554 | free_irq(ndev->irq, ks); | 1564 | err_id: |
| 1555 | |||
| 1556 | err_irq: | ||
| 1557 | if (gpio_is_valid(gpio)) | 1565 | if (gpio_is_valid(gpio)) |
| 1558 | gpio_set_value(gpio, 0); | 1566 | gpio_set_value(gpio, 0); |
| 1559 | err_id: | ||
| 1560 | regulator_disable(ks->vdd_reg); | 1567 | regulator_disable(ks->vdd_reg); |
| 1561 | err_reg: | 1568 | err_reg: |
| 1562 | regulator_disable(ks->vdd_io); | 1569 | regulator_disable(ks->vdd_io); |
| @@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi) | |||
| 1574 | dev_info(&spi->dev, "remove\n"); | 1581 | dev_info(&spi->dev, "remove\n"); |
| 1575 | 1582 | ||
| 1576 | unregister_netdev(priv->netdev); | 1583 | unregister_netdev(priv->netdev); |
| 1577 | free_irq(spi->irq, priv); | ||
| 1578 | if (gpio_is_valid(priv->gpio)) | 1584 | if (gpio_is_valid(priv->gpio)) |
| 1579 | gpio_set_value(priv->gpio, 0); | 1585 | gpio_set_value(priv->gpio, 0); |
| 1580 | regulator_disable(priv->vdd_reg); | 1586 | regulator_disable(priv->vdd_reg); |
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index 852256ef1f22..23da1e3ee429 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h | |||
| @@ -11,9 +11,15 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #define KS_CCR 0x08 | 13 | #define KS_CCR 0x08 |
| 14 | #define CCR_LE (1 << 10) /* KSZ8851-16MLL */ | ||
| 14 | #define CCR_EEPROM (1 << 9) | 15 | #define CCR_EEPROM (1 << 9) |
| 15 | #define CCR_SPI (1 << 8) | 16 | #define CCR_SPI (1 << 8) /* KSZ8851SNL */ |
| 16 | #define CCR_32PIN (1 << 0) | 17 | #define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */ |
| 18 | #define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */ | ||
| 19 | #define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */ | ||
| 20 | #define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */ | ||
| 21 | #define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */ | ||
| 22 | #define CCR_32PIN (1 << 0) /* KSZ8851SNL */ | ||
| 17 | 23 | ||
| 18 | /* MAC address registers */ | 24 | /* MAC address registers */ |
| 19 | #define KS_MAR(_m) (0x15 - (_m)) | 25 | #define KS_MAR(_m) (0x15 - (_m)) |
| @@ -112,13 +118,13 @@ | |||
| 112 | #define RXCR1_RXE (1 << 0) | 118 | #define RXCR1_RXE (1 << 0) |
| 113 | 119 | ||
| 114 | #define KS_RXCR2 0x76 | 120 | #define KS_RXCR2 0x76 |
| 115 | #define RXCR2_SRDBL_MASK (0x7 << 5) | 121 | #define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */ |
| 116 | #define RXCR2_SRDBL_SHIFT (5) | 122 | #define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */ |
| 117 | #define RXCR2_SRDBL_4B (0x0 << 5) | 123 | #define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */ |
| 118 | #define RXCR2_SRDBL_8B (0x1 << 5) | 124 | #define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */ |
| 119 | #define RXCR2_SRDBL_16B (0x2 << 5) | 125 | #define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */ |
| 120 | #define RXCR2_SRDBL_32B (0x3 << 5) | 126 | #define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */ |
| 121 | #define RXCR2_SRDBL_FRAME (0x4 << 5) | 127 | #define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */ |
| 122 | #define RXCR2_IUFFP (1 << 4) | 128 | #define RXCR2_IUFFP (1 << 4) |
| 123 | #define RXCR2_RXIUFCEZ (1 << 3) | 129 | #define RXCR2_RXIUFCEZ (1 << 3) |
| 124 | #define RXCR2_UDPLFE (1 << 2) | 130 | #define RXCR2_UDPLFE (1 << 2) |
| @@ -143,8 +149,10 @@ | |||
| 143 | #define RXFSHR_RXCE (1 << 0) | 149 | #define RXFSHR_RXCE (1 << 0) |
| 144 | 150 | ||
| 145 | #define KS_RXFHBCR 0x7E | 151 | #define KS_RXFHBCR 0x7E |
| 152 | #define RXFHBCR_CNT_MASK (0xfff << 0) | ||
| 153 | |||
| 146 | #define KS_TXQCR 0x80 | 154 | #define KS_TXQCR 0x80 |
| 147 | #define TXQCR_AETFE (1 << 2) | 155 | #define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */ |
| 148 | #define TXQCR_TXQMAM (1 << 1) | 156 | #define TXQCR_TXQMAM (1 << 1) |
| 149 | #define TXQCR_METFE (1 << 0) | 157 | #define TXQCR_METFE (1 << 0) |
| 150 | 158 | ||
| @@ -167,6 +175,10 @@ | |||
| 167 | 175 | ||
| 168 | #define KS_RXFDPR 0x86 | 176 | #define KS_RXFDPR 0x86 |
| 169 | #define RXFDPR_RXFPAI (1 << 14) | 177 | #define RXFDPR_RXFPAI (1 << 14) |
| 178 | #define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */ | ||
| 179 | #define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */ | ||
| 180 | #define RXFDPR_RXFP_MASK (0x7ff << 0) | ||
| 181 | #define RXFDPR_RXFP_SHIFT (0) | ||
| 170 | 182 | ||
| 171 | #define KS_RXDTTR 0x8C | 183 | #define KS_RXDTTR 0x8C |
| 172 | #define KS_RXDBCTR 0x8E | 184 | #define KS_RXDBCTR 0x8E |
| @@ -184,7 +196,7 @@ | |||
| 184 | #define IRQ_RXMPDI (1 << 4) | 196 | #define IRQ_RXMPDI (1 << 4) |
| 185 | #define IRQ_LDI (1 << 3) | 197 | #define IRQ_LDI (1 << 3) |
| 186 | #define IRQ_EDI (1 << 2) | 198 | #define IRQ_EDI (1 << 2) |
| 187 | #define IRQ_SPIBEI (1 << 1) | 199 | #define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */ |
| 188 | #define IRQ_DEDI (1 << 0) | 200 | #define IRQ_DEDI (1 << 0) |
| 189 | 201 | ||
| 190 | #define KS_RXFCTR 0x9C | 202 | #define KS_RXFCTR 0x9C |
| @@ -257,42 +269,37 @@ | |||
| 257 | #define KS_P1ANLPR 0xEE | 269 | #define KS_P1ANLPR 0xEE |
| 258 | 270 | ||
| 259 | #define KS_P1SCLMD 0xF4 | 271 | #define KS_P1SCLMD 0xF4 |
| 260 | #define P1SCLMD_LEDOFF (1 << 15) | ||
| 261 | #define P1SCLMD_TXIDS (1 << 14) | ||
| 262 | #define P1SCLMD_RESTARTAN (1 << 13) | ||
| 263 | #define P1SCLMD_DISAUTOMDIX (1 << 10) | ||
| 264 | #define P1SCLMD_FORCEMDIX (1 << 9) | ||
| 265 | #define P1SCLMD_AUTONEGEN (1 << 7) | ||
| 266 | #define P1SCLMD_FORCE100 (1 << 6) | ||
| 267 | #define P1SCLMD_FORCEFDX (1 << 5) | ||
| 268 | #define P1SCLMD_ADV_FLOW (1 << 4) | ||
| 269 | #define P1SCLMD_ADV_100BT_FDX (1 << 3) | ||
| 270 | #define P1SCLMD_ADV_100BT_HDX (1 << 2) | ||
| 271 | #define P1SCLMD_ADV_10BT_FDX (1 << 1) | ||
| 272 | #define P1SCLMD_ADV_10BT_HDX (1 << 0) | ||
| 273 | 272 | ||
| 274 | #define KS_P1CR 0xF6 | 273 | #define KS_P1CR 0xF6 |
| 275 | #define P1CR_HP_MDIX (1 << 15) | 274 | #define P1CR_LEDOFF (1 << 15) |
| 276 | #define P1CR_REV_POL (1 << 13) | 275 | #define P1CR_TXIDS (1 << 14) |
| 277 | #define P1CR_OP_100M (1 << 10) | 276 | #define P1CR_RESTARTAN (1 << 13) |
| 278 | #define P1CR_OP_FDX (1 << 9) | 277 | #define P1CR_DISAUTOMDIX (1 << 10) |
| 279 | #define P1CR_OP_MDI (1 << 7) | 278 | #define P1CR_FORCEMDIX (1 << 9) |
| 280 | #define P1CR_AN_DONE (1 << 6) | 279 | #define P1CR_AUTONEGEN (1 << 7) |
| 281 | #define P1CR_LINK_GOOD (1 << 5) | 280 | #define P1CR_FORCE100 (1 << 6) |
| 282 | #define P1CR_PNTR_FLOW (1 << 4) | 281 | #define P1CR_FORCEFDX (1 << 5) |
| 283 | #define P1CR_PNTR_100BT_FDX (1 << 3) | 282 | #define P1CR_ADV_FLOW (1 << 4) |
| 284 | #define P1CR_PNTR_100BT_HDX (1 << 2) | 283 | #define P1CR_ADV_100BT_FDX (1 << 3) |
| 285 | #define P1CR_PNTR_10BT_FDX (1 << 1) | 284 | #define P1CR_ADV_100BT_HDX (1 << 2) |
| 286 | #define P1CR_PNTR_10BT_HDX (1 << 0) | 285 | #define P1CR_ADV_10BT_FDX (1 << 1) |
| 286 | #define P1CR_ADV_10BT_HDX (1 << 0) | ||
| 287 | |||
| 288 | #define KS_P1SR 0xF8 | ||
| 289 | #define P1SR_HP_MDIX (1 << 15) | ||
| 290 | #define P1SR_REV_POL (1 << 13) | ||
| 291 | #define P1SR_OP_100M (1 << 10) | ||
| 292 | #define P1SR_OP_FDX (1 << 9) | ||
| 293 | #define P1SR_OP_MDI (1 << 7) | ||
| 294 | #define P1SR_AN_DONE (1 << 6) | ||
| 295 | #define P1SR_LINK_GOOD (1 << 5) | ||
| 296 | #define P1SR_PNTR_FLOW (1 << 4) | ||
| 297 | #define P1SR_PNTR_100BT_FDX (1 << 3) | ||
| 298 | #define P1SR_PNTR_100BT_HDX (1 << 2) | ||
| 299 | #define P1SR_PNTR_10BT_FDX (1 << 1) | ||
| 300 | #define P1SR_PNTR_10BT_HDX (1 << 0) | ||
| 287 | 301 | ||
| 288 | /* TX Frame control */ | 302 | /* TX Frame control */ |
| 289 | |||
| 290 | #define TXFR_TXIC (1 << 15) | 303 | #define TXFR_TXIC (1 << 15) |
| 291 | #define TXFR_TXFID_MASK (0x3f << 0) | 304 | #define TXFR_TXFID_MASK (0x3f << 0) |
| 292 | #define TXFR_TXFID_SHIFT (0) | 305 | #define TXFR_TXFID_SHIFT (0) |
| 293 | |||
| 294 | /* SPI frame opcodes */ | ||
| 295 | #define KS_SPIOP_RD (0x00) | ||
| 296 | #define KS_SPIOP_WR (0x40) | ||
| 297 | #define KS_SPIOP_RXFIFO (0x80) | ||
| 298 | #define KS_SPIOP_TXFIFO (0xC0) | ||
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 35f8c9ef204d..c946841c0a06 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c | |||
| @@ -40,6 +40,8 @@ | |||
| 40 | #include <linux/of_device.h> | 40 | #include <linux/of_device.h> |
| 41 | #include <linux/of_net.h> | 41 | #include <linux/of_net.h> |
| 42 | 42 | ||
| 43 | #include "ks8851.h" | ||
| 44 | |||
| 43 | #define DRV_NAME "ks8851_mll" | 45 | #define DRV_NAME "ks8851_mll" |
| 44 | 46 | ||
| 45 | static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; | 47 | static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; |
| @@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; | |||
| 48 | #define TX_BUF_SIZE 2000 | 50 | #define TX_BUF_SIZE 2000 |
| 49 | #define RX_BUF_SIZE 2000 | 51 | #define RX_BUF_SIZE 2000 |
| 50 | 52 | ||
| 51 | #define KS_CCR 0x08 | ||
| 52 | #define CCR_EEPROM (1 << 9) | ||
| 53 | #define CCR_SPI (1 << 8) | ||
| 54 | #define CCR_8BIT (1 << 7) | ||
| 55 | #define CCR_16BIT (1 << 6) | ||
| 56 | #define CCR_32BIT (1 << 5) | ||
| 57 | #define CCR_SHARED (1 << 4) | ||
| 58 | #define CCR_32PIN (1 << 0) | ||
| 59 | |||
| 60 | /* MAC address registers */ | ||
| 61 | #define KS_MARL 0x10 | ||
| 62 | #define KS_MARM 0x12 | ||
| 63 | #define KS_MARH 0x14 | ||
| 64 | |||
| 65 | #define KS_OBCR 0x20 | ||
| 66 | #define OBCR_ODS_16MA (1 << 6) | ||
| 67 | |||
| 68 | #define KS_EEPCR 0x22 | ||
| 69 | #define EEPCR_EESA (1 << 4) | ||
| 70 | #define EEPCR_EESB (1 << 3) | ||
| 71 | #define EEPCR_EEDO (1 << 2) | ||
| 72 | #define EEPCR_EESCK (1 << 1) | ||
| 73 | #define EEPCR_EECS (1 << 0) | ||
| 74 | |||
| 75 | #define KS_MBIR 0x24 | ||
| 76 | #define MBIR_TXMBF (1 << 12) | ||
| 77 | #define MBIR_TXMBFA (1 << 11) | ||
| 78 | #define MBIR_RXMBF (1 << 4) | ||
| 79 | #define MBIR_RXMBFA (1 << 3) | ||
| 80 | |||
| 81 | #define KS_GRR 0x26 | ||
| 82 | #define GRR_QMU (1 << 1) | ||
| 83 | #define GRR_GSR (1 << 0) | ||
| 84 | |||
| 85 | #define KS_WFCR 0x2A | ||
| 86 | #define WFCR_MPRXE (1 << 7) | ||
| 87 | #define WFCR_WF3E (1 << 3) | ||
| 88 | #define WFCR_WF2E (1 << 2) | ||
| 89 | #define WFCR_WF1E (1 << 1) | ||
| 90 | #define WFCR_WF0E (1 << 0) | ||
| 91 | |||
| 92 | #define KS_WF0CRC0 0x30 | ||
| 93 | #define KS_WF0CRC1 0x32 | ||
| 94 | #define KS_WF0BM0 0x34 | ||
| 95 | #define KS_WF0BM1 0x36 | ||
| 96 | #define KS_WF0BM2 0x38 | ||
| 97 | #define KS_WF0BM3 0x3A | ||
| 98 | |||
| 99 | #define KS_WF1CRC0 0x40 | ||
| 100 | #define KS_WF1CRC1 0x42 | ||
| 101 | #define KS_WF1BM0 0x44 | ||
| 102 | #define KS_WF1BM1 0x46 | ||
| 103 | #define KS_WF1BM2 0x48 | ||
| 104 | #define KS_WF1BM3 0x4A | ||
| 105 | |||
| 106 | #define KS_WF2CRC0 0x50 | ||
| 107 | #define KS_WF2CRC1 0x52 | ||
| 108 | #define KS_WF2BM0 0x54 | ||
| 109 | #define KS_WF2BM1 0x56 | ||
| 110 | #define KS_WF2BM2 0x58 | ||
| 111 | #define KS_WF2BM3 0x5A | ||
| 112 | |||
| 113 | #define KS_WF3CRC0 0x60 | ||
| 114 | #define KS_WF3CRC1 0x62 | ||
| 115 | #define KS_WF3BM0 0x64 | ||
| 116 | #define KS_WF3BM1 0x66 | ||
| 117 | #define KS_WF3BM2 0x68 | ||
| 118 | #define KS_WF3BM3 0x6A | ||
| 119 | |||
| 120 | #define KS_TXCR 0x70 | ||
| 121 | #define TXCR_TCGICMP (1 << 8) | ||
| 122 | #define TXCR_TCGUDP (1 << 7) | ||
| 123 | #define TXCR_TCGTCP (1 << 6) | ||
| 124 | #define TXCR_TCGIP (1 << 5) | ||
| 125 | #define TXCR_FTXQ (1 << 4) | ||
| 126 | #define TXCR_TXFCE (1 << 3) | ||
| 127 | #define TXCR_TXPE (1 << 2) | ||
| 128 | #define TXCR_TXCRC (1 << 1) | ||
| 129 | #define TXCR_TXE (1 << 0) | ||
| 130 | |||
| 131 | #define KS_TXSR 0x72 | ||
| 132 | #define TXSR_TXLC (1 << 13) | ||
| 133 | #define TXSR_TXMC (1 << 12) | ||
| 134 | #define TXSR_TXFID_MASK (0x3f << 0) | ||
| 135 | #define TXSR_TXFID_SHIFT (0) | ||
| 136 | #define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) | ||
| 137 | |||
| 138 | |||
| 139 | #define KS_RXCR1 0x74 | ||
| 140 | #define RXCR1_FRXQ (1 << 15) | ||
| 141 | #define RXCR1_RXUDPFCC (1 << 14) | ||
| 142 | #define RXCR1_RXTCPFCC (1 << 13) | ||
| 143 | #define RXCR1_RXIPFCC (1 << 12) | ||
| 144 | #define RXCR1_RXPAFMA (1 << 11) | ||
| 145 | #define RXCR1_RXFCE (1 << 10) | ||
| 146 | #define RXCR1_RXEFE (1 << 9) | ||
| 147 | #define RXCR1_RXMAFMA (1 << 8) | ||
| 148 | #define RXCR1_RXBE (1 << 7) | ||
| 149 | #define RXCR1_RXME (1 << 6) | ||
| 150 | #define RXCR1_RXUE (1 << 5) | ||
| 151 | #define RXCR1_RXAE (1 << 4) | ||
| 152 | #define RXCR1_RXINVF (1 << 1) | ||
| 153 | #define RXCR1_RXE (1 << 0) | ||
| 154 | #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ | 53 | #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ |
| 155 | RXCR1_RXMAFMA | RXCR1_RXPAFMA) | 54 | RXCR1_RXMAFMA | RXCR1_RXPAFMA) |
| 156 | |||
| 157 | #define KS_RXCR2 0x76 | ||
| 158 | #define RXCR2_SRDBL_MASK (0x7 << 5) | ||
| 159 | #define RXCR2_SRDBL_SHIFT (5) | ||
| 160 | #define RXCR2_SRDBL_4B (0x0 << 5) | ||
| 161 | #define RXCR2_SRDBL_8B (0x1 << 5) | ||
| 162 | #define RXCR2_SRDBL_16B (0x2 << 5) | ||
| 163 | #define RXCR2_SRDBL_32B (0x3 << 5) | ||
| 164 | /* #define RXCR2_SRDBL_FRAME (0x4 << 5) */ | ||
| 165 | #define RXCR2_IUFFP (1 << 4) | ||
| 166 | #define RXCR2_RXIUFCEZ (1 << 3) | ||
| 167 | #define RXCR2_UDPLFE (1 << 2) | ||
| 168 | #define RXCR2_RXICMPFCC (1 << 1) | ||
| 169 | #define RXCR2_RXSAF (1 << 0) | ||
| 170 | |||
| 171 | #define KS_TXMIR 0x78 | ||
| 172 | |||
| 173 | #define KS_RXFHSR 0x7C | ||
| 174 | #define RXFSHR_RXFV (1 << 15) | ||
| 175 | #define RXFSHR_RXICMPFCS (1 << 13) | ||
| 176 | #define RXFSHR_RXIPFCS (1 << 12) | ||
| 177 | #define RXFSHR_RXTCPFCS (1 << 11) | ||
| 178 | #define RXFSHR_RXUDPFCS (1 << 10) | ||
| 179 | #define RXFSHR_RXBF (1 << 7) | ||
| 180 | #define RXFSHR_RXMF (1 << 6) | ||
| 181 | #define RXFSHR_RXUF (1 << 5) | ||
| 182 | #define RXFSHR_RXMR (1 << 4) | ||
| 183 | #define RXFSHR_RXFT (1 << 3) | ||
| 184 | #define RXFSHR_RXFTL (1 << 2) | ||
| 185 | #define RXFSHR_RXRF (1 << 1) | ||
| 186 | #define RXFSHR_RXCE (1 << 0) | ||
| 187 | #define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\ | ||
| 188 | RXFSHR_RXFTL | RXFSHR_RXMR |\ | ||
| 189 | RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\ | ||
| 190 | RXFSHR_RXTCPFCS) | ||
| 191 | #define KS_RXFHBCR 0x7E | ||
| 192 | #define RXFHBCR_CNT_MASK 0x0FFF | ||
| 193 | |||
| 194 | #define KS_TXQCR 0x80 | ||
| 195 | #define TXQCR_AETFE (1 << 2) | ||
| 196 | #define TXQCR_TXQMAM (1 << 1) | ||
| 197 | #define TXQCR_METFE (1 << 0) | ||
| 198 | |||
| 199 | #define KS_RXQCR 0x82 | ||
| 200 | #define RXQCR_RXDTTS (1 << 12) | ||
| 201 | #define RXQCR_RXDBCTS (1 << 11) | ||
| 202 | #define RXQCR_RXFCTS (1 << 10) | ||
| 203 | #define RXQCR_RXIPHTOE (1 << 9) | ||
| 204 | #define RXQCR_RXDTTE (1 << 7) | ||
| 205 | #define RXQCR_RXDBCTE (1 << 6) | ||
| 206 | #define RXQCR_RXFCTE (1 << 5) | ||
| 207 | #define RXQCR_ADRFE (1 << 4) | ||
| 208 | #define RXQCR_SDA (1 << 3) | ||
| 209 | #define RXQCR_RRXEF (1 << 0) | ||
| 210 | #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) | 55 | #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) |
| 211 | 56 | ||
| 212 | #define KS_TXFDPR 0x84 | ||
| 213 | #define TXFDPR_TXFPAI (1 << 14) | ||
| 214 | #define TXFDPR_TXFP_MASK (0x7ff << 0) | ||
| 215 | #define TXFDPR_TXFP_SHIFT (0) | ||
| 216 | |||
| 217 | #define KS_RXFDPR 0x86 | ||
| 218 | #define RXFDPR_RXFPAI (1 << 14) | ||
| 219 | |||
| 220 | #define KS_RXDTTR 0x8C | ||
| 221 | #define KS_RXDBCTR 0x8E | ||
| 222 | |||
| 223 | #define KS_IER 0x90 | ||
| 224 | #define KS_ISR 0x92 | ||
| 225 | #define IRQ_LCI (1 << 15) | ||
| 226 | #define IRQ_TXI (1 << 14) | ||
| 227 | #define IRQ_RXI (1 << 13) | ||
| 228 | #define IRQ_RXOI (1 << 11) | ||
| 229 | #define IRQ_TXPSI (1 << 9) | ||
| 230 | #define IRQ_RXPSI (1 << 8) | ||
| 231 | #define IRQ_TXSAI (1 << 6) | ||
| 232 | #define IRQ_RXWFDI (1 << 5) | ||
| 233 | #define IRQ_RXMPDI (1 << 4) | ||
| 234 | #define IRQ_LDI (1 << 3) | ||
| 235 | #define IRQ_EDI (1 << 2) | ||
| 236 | #define IRQ_SPIBEI (1 << 1) | ||
| 237 | #define IRQ_DEDI (1 << 0) | ||
| 238 | |||
| 239 | #define KS_RXFCTR 0x9C | ||
| 240 | #define RXFCTR_THRESHOLD_MASK 0x00FF | ||
| 241 | |||
| 242 | #define KS_RXFC 0x9D | ||
| 243 | #define RXFCTR_RXFC_MASK (0xff << 8) | ||
| 244 | #define RXFCTR_RXFC_SHIFT (8) | ||
| 245 | #define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) | ||
| 246 | #define RXFCTR_RXFCT_MASK (0xff << 0) | ||
| 247 | #define RXFCTR_RXFCT_SHIFT (0) | ||
| 248 | |||
| 249 | #define KS_TXNTFSR 0x9E | ||
| 250 | |||
| 251 | #define KS_MAHTR0 0xA0 | ||
| 252 | #define KS_MAHTR1 0xA2 | ||
| 253 | #define KS_MAHTR2 0xA4 | ||
| 254 | #define KS_MAHTR3 0xA6 | ||
| 255 | |||
| 256 | #define KS_FCLWR 0xB0 | ||
| 257 | #define KS_FCHWR 0xB2 | ||
| 258 | #define KS_FCOWR 0xB4 | ||
| 259 | |||
| 260 | #define KS_CIDER 0xC0 | ||
| 261 | #define CIDER_ID 0x8870 | ||
| 262 | #define CIDER_REV_MASK (0x7 << 1) | ||
| 263 | #define CIDER_REV_SHIFT (1) | ||
| 264 | #define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) | ||
| 265 | |||
| 266 | #define KS_CGCR 0xC6 | ||
| 267 | #define KS_IACR 0xC8 | ||
| 268 | #define IACR_RDEN (1 << 12) | ||
| 269 | #define IACR_TSEL_MASK (0x3 << 10) | ||
| 270 | #define IACR_TSEL_SHIFT (10) | ||
| 271 | #define IACR_TSEL_MIB (0x3 << 10) | ||
| 272 | #define IACR_ADDR_MASK (0x1f << 0) | ||
| 273 | #define IACR_ADDR_SHIFT (0) | ||
| 274 | |||
| 275 | #define KS_IADLR 0xD0 | ||
| 276 | #define KS_IAHDR 0xD2 | ||
| 277 | |||
| 278 | #define KS_PMECR 0xD4 | ||
| 279 | #define PMECR_PME_DELAY (1 << 14) | ||
| 280 | #define PMECR_PME_POL (1 << 12) | ||
| 281 | #define PMECR_WOL_WAKEUP (1 << 11) | ||
| 282 | #define PMECR_WOL_MAGICPKT (1 << 10) | ||
| 283 | #define PMECR_WOL_LINKUP (1 << 9) | ||
| 284 | #define PMECR_WOL_ENERGY (1 << 8) | ||
| 285 | #define PMECR_AUTO_WAKE_EN (1 << 7) | ||
| 286 | #define PMECR_WAKEUP_NORMAL (1 << 6) | ||
| 287 | #define PMECR_WKEVT_MASK (0xf << 2) | ||
| 288 | #define PMECR_WKEVT_SHIFT (2) | ||
| 289 | #define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) | ||
| 290 | #define PMECR_WKEVT_ENERGY (0x1 << 2) | ||
| 291 | #define PMECR_WKEVT_LINK (0x2 << 2) | ||
| 292 | #define PMECR_WKEVT_MAGICPKT (0x4 << 2) | ||
| 293 | #define PMECR_WKEVT_FRAME (0x8 << 2) | ||
| 294 | #define PMECR_PM_MASK (0x3 << 0) | ||
| 295 | #define PMECR_PM_SHIFT (0) | ||
| 296 | #define PMECR_PM_NORMAL (0x0 << 0) | ||
| 297 | #define PMECR_PM_ENERGY (0x1 << 0) | ||
| 298 | #define PMECR_PM_SOFTDOWN (0x2 << 0) | ||
| 299 | #define PMECR_PM_POWERSAVE (0x3 << 0) | ||
| 300 | |||
| 301 | /* Standard MII PHY data */ | ||
| 302 | #define KS_P1MBCR 0xE4 | ||
| 303 | #define P1MBCR_FORCE_FDX (1 << 8) | ||
| 304 | |||
| 305 | #define KS_P1MBSR 0xE6 | ||
| 306 | #define P1MBSR_AN_COMPLETE (1 << 5) | ||
| 307 | #define P1MBSR_AN_CAPABLE (1 << 3) | ||
| 308 | #define P1MBSR_LINK_UP (1 << 2) | ||
| 309 | |||
| 310 | #define KS_PHY1ILR 0xE8 | ||
| 311 | #define KS_PHY1IHR 0xEA | ||
| 312 | #define KS_P1ANAR 0xEC | ||
| 313 | #define KS_P1ANLPR 0xEE | ||
| 314 | |||
| 315 | #define KS_P1SCLMD 0xF4 | ||
| 316 | #define P1SCLMD_LEDOFF (1 << 15) | ||
| 317 | #define P1SCLMD_TXIDS (1 << 14) | ||
| 318 | #define P1SCLMD_RESTARTAN (1 << 13) | ||
| 319 | #define P1SCLMD_DISAUTOMDIX (1 << 10) | ||
| 320 | #define P1SCLMD_FORCEMDIX (1 << 9) | ||
| 321 | #define P1SCLMD_AUTONEGEN (1 << 7) | ||
| 322 | #define P1SCLMD_FORCE100 (1 << 6) | ||
| 323 | #define P1SCLMD_FORCEFDX (1 << 5) | ||
| 324 | #define P1SCLMD_ADV_FLOW (1 << 4) | ||
| 325 | #define P1SCLMD_ADV_100BT_FDX (1 << 3) | ||
| 326 | #define P1SCLMD_ADV_100BT_HDX (1 << 2) | ||
| 327 | #define P1SCLMD_ADV_10BT_FDX (1 << 1) | ||
| 328 | #define P1SCLMD_ADV_10BT_HDX (1 << 0) | ||
| 329 | |||
| 330 | #define KS_P1CR 0xF6 | ||
| 331 | #define P1CR_HP_MDIX (1 << 15) | ||
| 332 | #define P1CR_REV_POL (1 << 13) | ||
| 333 | #define P1CR_OP_100M (1 << 10) | ||
| 334 | #define P1CR_OP_FDX (1 << 9) | ||
| 335 | #define P1CR_OP_MDI (1 << 7) | ||
| 336 | #define P1CR_AN_DONE (1 << 6) | ||
| 337 | #define P1CR_LINK_GOOD (1 << 5) | ||
| 338 | #define P1CR_PNTR_FLOW (1 << 4) | ||
| 339 | #define P1CR_PNTR_100BT_FDX (1 << 3) | ||
| 340 | #define P1CR_PNTR_100BT_HDX (1 << 2) | ||
| 341 | #define P1CR_PNTR_10BT_FDX (1 << 1) | ||
| 342 | #define P1CR_PNTR_10BT_HDX (1 << 0) | ||
| 343 | |||
| 344 | /* TX Frame control */ | ||
| 345 | |||
| 346 | #define TXFR_TXIC (1 << 15) | ||
| 347 | #define TXFR_TXFID_MASK (0x3f << 0) | ||
| 348 | #define TXFR_TXFID_SHIFT (0) | ||
| 349 | |||
| 350 | #define KS_P1SR 0xF8 | ||
| 351 | #define P1SR_HP_MDIX (1 << 15) | ||
| 352 | #define P1SR_REV_POL (1 << 13) | ||
| 353 | #define P1SR_OP_100M (1 << 10) | ||
| 354 | #define P1SR_OP_FDX (1 << 9) | ||
| 355 | #define P1SR_OP_MDI (1 << 7) | ||
| 356 | #define P1SR_AN_DONE (1 << 6) | ||
| 357 | #define P1SR_LINK_GOOD (1 << 5) | ||
| 358 | #define P1SR_PNTR_FLOW (1 << 4) | ||
| 359 | #define P1SR_PNTR_100BT_FDX (1 << 3) | ||
| 360 | #define P1SR_PNTR_100BT_HDX (1 << 2) | ||
| 361 | #define P1SR_PNTR_10BT_FDX (1 << 1) | ||
| 362 | #define P1SR_PNTR_10BT_HDX (1 << 0) | ||
| 363 | |||
| 364 | #define ENUM_BUS_NONE 0 | 57 | #define ENUM_BUS_NONE 0 |
| 365 | #define ENUM_BUS_8BIT 1 | 58 | #define ENUM_BUS_8BIT 1 |
| 366 | #define ENUM_BUS_16BIT 2 | 59 | #define ENUM_BUS_16BIT 2 |
| @@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks) | |||
| 1475 | ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); | 1168 | ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); |
| 1476 | 1169 | ||
| 1477 | /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ | 1170 | /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ |
| 1478 | ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); | 1171 | ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK); |
| 1479 | 1172 | ||
| 1480 | /* Setup RxQ Command Control (RXQCR) */ | 1173 | /* Setup RxQ Command Control (RXQCR) */ |
| 1481 | ks->rc_rxqcr = RXQCR_CMD_CNTL; | 1174 | ks->rc_rxqcr = RXQCR_CMD_CNTL; |
| @@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks) | |||
| 1488 | */ | 1181 | */ |
| 1489 | 1182 | ||
| 1490 | w = ks_rdreg16(ks, KS_P1MBCR); | 1183 | w = ks_rdreg16(ks, KS_P1MBCR); |
| 1491 | w &= ~P1MBCR_FORCE_FDX; | 1184 | w &= ~BMCR_FULLDPLX; |
| 1492 | ks_wrreg16(ks, KS_P1MBCR, w); | 1185 | ks_wrreg16(ks, KS_P1MBCR, w); |
| 1493 | 1186 | ||
| 1494 | w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; | 1187 | w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; |
| @@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev) | |||
| 1629 | ks_setup_int(ks); | 1322 | ks_setup_int(ks); |
| 1630 | 1323 | ||
| 1631 | data = ks_rdreg16(ks, KS_OBCR); | 1324 | data = ks_rdreg16(ks, KS_OBCR); |
| 1632 | ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); | 1325 | ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA); |
| 1633 | 1326 | ||
| 1634 | /* overwriting the default MAC address */ | 1327 | /* overwriting the default MAC address */ |
| 1635 | if (pdev->dev.of_node) { | 1328 | if (pdev->dev.of_node) { |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index eeda4ed98333..e336f6ee94f5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
| @@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, | |||
| 48 | 48 | ||
| 49 | tmp_push_vlan_tci = | 49 | tmp_push_vlan_tci = |
| 50 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | | 50 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | |
| 51 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | | 51 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); |
| 52 | NFP_FL_PUSH_VLAN_CFI; | ||
| 53 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); | 52 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); |
| 54 | } | 53 | } |
| 55 | 54 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4fcaf11ed56e..0ed51e79db00 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) | 26 | #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) |
| 27 | 27 | ||
| 28 | #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) | 28 | #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) |
| 29 | #define NFP_FLOWER_MASK_VLAN_CFI BIT(12) | 29 | #define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) |
| 30 | #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) | 30 | #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) |
| 31 | 31 | ||
| 32 | #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) | 32 | #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) |
| @@ -82,7 +82,6 @@ | |||
| 82 | #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) | 82 | #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) |
| 83 | 83 | ||
| 84 | #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) | 84 | #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) |
| 85 | #define NFP_FL_PUSH_VLAN_CFI BIT(12) | ||
| 86 | #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) | 85 | #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) |
| 87 | 86 | ||
| 88 | #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) | 87 | #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e03c8ef2c28c..9b8b843d0340 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
| @@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, | |||
| 30 | 30 | ||
| 31 | flow_rule_match_vlan(rule, &match); | 31 | flow_rule_match_vlan(rule, &match); |
| 32 | /* Populate the tci field. */ | 32 | /* Populate the tci field. */ |
| 33 | if (match.key->vlan_id || match.key->vlan_priority) { | 33 | tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; |
| 34 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 34 | tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
| 35 | match.key->vlan_priority) | | 35 | match.key->vlan_priority) | |
| 36 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 36 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
| 37 | match.key->vlan_id) | | 37 | match.key->vlan_id); |
| 38 | NFP_FLOWER_MASK_VLAN_CFI; | 38 | ext->tci = cpu_to_be16(tmp_tci); |
| 39 | ext->tci = cpu_to_be16(tmp_tci); | 39 | |
| 40 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 40 | tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; |
| 41 | match.mask->vlan_priority) | | 41 | tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
| 42 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 42 | match.mask->vlan_priority) | |
| 43 | match.mask->vlan_id) | | 43 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
| 44 | NFP_FLOWER_MASK_VLAN_CFI; | 44 | match.mask->vlan_id); |
| 45 | msk->tci = cpu_to_be16(tmp_tci); | 45 | msk->tci = cpu_to_be16(tmp_tci); |
| 46 | } | ||
| 47 | } | 46 | } |
| 48 | } | 47 | } |
| 49 | 48 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d2c803bb4e56..94d228c04496 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | |||
| @@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 195 | ret = dev_queue_xmit(skb); | 195 | ret = dev_queue_xmit(skb); |
| 196 | nfp_repr_inc_tx_stats(netdev, len, ret); | 196 | nfp_repr_inc_tx_stats(netdev, len, ret); |
| 197 | 197 | ||
| 198 | return ret; | 198 | return NETDEV_TX_OK; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | static int nfp_repr_stop(struct net_device *netdev) | 201 | static int nfp_repr_stop(struct net_device *netdev) |
| @@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, | |||
| 383 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | 383 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); |
| 384 | netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; | 384 | netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; |
| 385 | 385 | ||
| 386 | netdev->priv_flags |= IFF_NO_QUEUE; | 386 | netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; |
| 387 | netdev->features |= NETIF_F_LLTX; | 387 | netdev->features |= NETIF_F_LLTX; |
| 388 | 388 | ||
| 389 | if (nfp_app_has_tc(app)) { | 389 | if (nfp_app_has_tc(app)) { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 0c443ea98479..374a4d4371f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -497,7 +497,7 @@ struct qlcnic_hardware_context { | |||
| 497 | u16 board_type; | 497 | u16 board_type; |
| 498 | u16 supported_type; | 498 | u16 supported_type; |
| 499 | 499 | ||
| 500 | u16 link_speed; | 500 | u32 link_speed; |
| 501 | u16 link_duplex; | 501 | u16 link_duplex; |
| 502 | u16 link_autoneg; | 502 | u16 link_autoneg; |
| 503 | u16 module_type; | 503 | u16 module_type; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 3b0adda7cc9c..a4cd6f2cfb86 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
| @@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) | |||
| 1048 | 1048 | ||
| 1049 | for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { | 1049 | for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { |
| 1050 | skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); | 1050 | skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); |
| 1051 | if (!skb) | ||
| 1052 | break; | ||
| 1051 | qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); | 1053 | qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); |
| 1052 | skb_put(skb, QLCNIC_ILB_PKT_SIZE); | 1054 | skb_put(skb, QLCNIC_ILB_PKT_SIZE); |
| 1053 | adapter->ahw->diag_cnt = 0; | 1055 | adapter->ahw->diag_cnt = 0; |
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index cfb67b746595..58e0ca9093d3 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c | |||
| @@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev) | |||
| 482 | write_reg_high(ioaddr, IMR, ISRh_RxErr); | 482 | write_reg_high(ioaddr, IMR, ISRh_RxErr); |
| 483 | 483 | ||
| 484 | lp->tx_unit_busy = 0; | 484 | lp->tx_unit_busy = 0; |
| 485 | lp->pac_cnt_in_tx_buf = 0; | 485 | lp->pac_cnt_in_tx_buf = 0; |
| 486 | lp->saved_tx_size = 0; | 486 | lp->saved_tx_size = 0; |
| 487 | } | 487 | } |
| 488 | 488 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c29dde064078..ed651dde6ef9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
| 29 | #include <linux/firmware.h> | 29 | #include <linux/firmware.h> |
| 30 | #include <linux/prefetch.h> | 30 | #include <linux/prefetch.h> |
| 31 | #include <linux/pci-aspm.h> | ||
| 31 | #include <linux/ipv6.h> | 32 | #include <linux/ipv6.h> |
| 32 | #include <net/ip6_checksum.h> | 33 | #include <net/ip6_checksum.h> |
| 33 | 34 | ||
| @@ -678,6 +679,7 @@ struct rtl8169_private { | |||
| 678 | struct work_struct work; | 679 | struct work_struct work; |
| 679 | } wk; | 680 | } wk; |
| 680 | 681 | ||
| 682 | unsigned irq_enabled:1; | ||
| 681 | unsigned supports_gmii:1; | 683 | unsigned supports_gmii:1; |
| 682 | dma_addr_t counters_phys_addr; | 684 | dma_addr_t counters_phys_addr; |
| 683 | struct rtl8169_counters *counters; | 685 | struct rtl8169_counters *counters; |
| @@ -1293,6 +1295,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) | |||
| 1293 | static void rtl_irq_disable(struct rtl8169_private *tp) | 1295 | static void rtl_irq_disable(struct rtl8169_private *tp) |
| 1294 | { | 1296 | { |
| 1295 | RTL_W16(tp, IntrMask, 0); | 1297 | RTL_W16(tp, IntrMask, 0); |
| 1298 | tp->irq_enabled = 0; | ||
| 1296 | } | 1299 | } |
| 1297 | 1300 | ||
| 1298 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) | 1301 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) |
| @@ -1301,6 +1304,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp) | |||
| 1301 | 1304 | ||
| 1302 | static void rtl_irq_enable(struct rtl8169_private *tp) | 1305 | static void rtl_irq_enable(struct rtl8169_private *tp) |
| 1303 | { | 1306 | { |
| 1307 | tp->irq_enabled = 1; | ||
| 1304 | RTL_W16(tp, IntrMask, tp->irq_mask); | 1308 | RTL_W16(tp, IntrMask, tp->irq_mask); |
| 1305 | } | 1309 | } |
| 1306 | 1310 | ||
| @@ -5457,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) | |||
| 5457 | tp->cp_cmd |= PktCntrDisable | INTT_1; | 5461 | tp->cp_cmd |= PktCntrDisable | INTT_1; |
| 5458 | RTL_W16(tp, CPlusCmd, tp->cp_cmd); | 5462 | RTL_W16(tp, CPlusCmd, tp->cp_cmd); |
| 5459 | 5463 | ||
| 5460 | RTL_W16(tp, IntrMitigate, 0x5151); | 5464 | RTL_W16(tp, IntrMitigate, 0x5100); |
| 5461 | 5465 | ||
| 5462 | /* Work around for RxFIFO overflow. */ | 5466 | /* Work around for RxFIFO overflow. */ |
| 5463 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { | 5467 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { |
| @@ -6520,9 +6524,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
| 6520 | { | 6524 | { |
| 6521 | struct rtl8169_private *tp = dev_instance; | 6525 | struct rtl8169_private *tp = dev_instance; |
| 6522 | u16 status = RTL_R16(tp, IntrStatus); | 6526 | u16 status = RTL_R16(tp, IntrStatus); |
| 6523 | u16 irq_mask = RTL_R16(tp, IntrMask); | ||
| 6524 | 6527 | ||
| 6525 | if (status == 0xffff || !(status & irq_mask)) | 6528 | if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask)) |
| 6526 | return IRQ_NONE; | 6529 | return IRQ_NONE; |
| 6527 | 6530 | ||
| 6528 | if (unlikely(status & SYSErr)) { | 6531 | if (unlikely(status & SYSErr)) { |
| @@ -6540,7 +6543,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
| 6540 | set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); | 6543 | set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); |
| 6541 | } | 6544 | } |
| 6542 | 6545 | ||
| 6543 | if (status & RTL_EVENT_NAPI) { | 6546 | if (status & (RTL_EVENT_NAPI | LinkChg)) { |
| 6544 | rtl_irq_disable(tp); | 6547 | rtl_irq_disable(tp); |
| 6545 | napi_schedule_irqoff(&tp->napi); | 6548 | napi_schedule_irqoff(&tp->napi); |
| 6546 | } | 6549 | } |
| @@ -7350,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7350 | if (rc) | 7353 | if (rc) |
| 7351 | return rc; | 7354 | return rc; |
| 7352 | 7355 | ||
| 7356 | /* Disable ASPM completely as that cause random device stop working | ||
| 7357 | * problems as well as full system hangs for some PCIe devices users. | ||
| 7358 | */ | ||
| 7359 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); | ||
| 7360 | |||
| 7353 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 7361 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
| 7354 | rc = pcim_enable_device(pdev); | 7362 | rc = pcim_enable_device(pdev); |
| 7355 | if (rc < 0) { | 7363 | if (rc < 0) { |
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 6073387511f8..67f9bb6e941b 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c | |||
| @@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev) | |||
| 730 | status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); | 730 | status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); |
| 731 | 731 | ||
| 732 | /* Link ON & Not select default PHY & not ghost PHY */ | 732 | /* Link ON & Not select default PHY & not ghost PHY */ |
| 733 | if ((status & MII_STAT_LINK) && !default_phy && | 733 | if ((status & MII_STAT_LINK) && !default_phy && |
| 734 | (phy->phy_types != UNKNOWN)) | 734 | (phy->phy_types != UNKNOWN)) { |
| 735 | default_phy = phy; | 735 | default_phy = phy; |
| 736 | else { | 736 | } else { |
| 737 | status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); | 737 | status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); |
| 738 | mdio_write(net_dev, phy->phy_addr, MII_CONTROL, | 738 | mdio_write(net_dev, phy->phy_addr, MII_CONTROL, |
| 739 | status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); | 739 | status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); |
| @@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev) | |||
| 741 | phy_home = phy; | 741 | phy_home = phy; |
| 742 | else if(phy->phy_types == LAN) | 742 | else if(phy->phy_types == LAN) |
| 743 | phy_lan = phy; | 743 | phy_lan = phy; |
| 744 | } | 744 | } |
| 745 | } | 745 | } |
| 746 | 746 | ||
| 747 | if (!default_phy && phy_home) | 747 | if (!default_phy && phy_home) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 40d6356a7e73..3dfb07a78952 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h | |||
| @@ -29,11 +29,13 @@ | |||
| 29 | /* Specific functions used for Ring mode */ | 29 | /* Specific functions used for Ring mode */ |
| 30 | 30 | ||
| 31 | /* Enhanced descriptors */ | 31 | /* Enhanced descriptors */ |
| 32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) | 32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, |
| 33 | int bfsize) | ||
| 33 | { | 34 | { |
| 34 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB | 35 | if (bfsize == BUF_SIZE_16KiB) |
| 35 | << ERDES1_BUFFER2_SIZE_SHIFT) | 36 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB |
| 36 | & ERDES1_BUFFER2_SIZE_MASK); | 37 | << ERDES1_BUFFER2_SIZE_SHIFT) |
| 38 | & ERDES1_BUFFER2_SIZE_MASK); | ||
| 37 | 39 | ||
| 38 | if (end) | 40 | if (end) |
| 39 | p->des1 |= cpu_to_le32(ERDES1_END_RING); | 41 | p->des1 |= cpu_to_le32(ERDES1_END_RING); |
| @@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) | |||
| 59 | } | 61 | } |
| 60 | 62 | ||
| 61 | /* Normal descriptors */ | 63 | /* Normal descriptors */ |
| 62 | static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) | 64 | static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) |
| 63 | { | 65 | { |
| 64 | p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) | 66 | if (bfsize >= BUF_SIZE_2KiB) { |
| 65 | << RDES1_BUFFER2_SIZE_SHIFT) | 67 | int bfsize2; |
| 66 | & RDES1_BUFFER2_SIZE_MASK); | 68 | |
| 69 | bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); | ||
| 70 | p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) | ||
| 71 | & RDES1_BUFFER2_SIZE_MASK); | ||
| 72 | } | ||
| 67 | 73 | ||
| 68 | if (end) | 74 | if (end) |
| 69 | p->des1 |= cpu_to_le32(RDES1_END_RING); | 75 | p->des1 |= cpu_to_le32(RDES1_END_RING); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7fbb6a4dbf51..e061e9f5fad7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
| @@ -296,7 +296,7 @@ exit: | |||
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 298 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
| 299 | int mode, int end) | 299 | int mode, int end, int bfsize) |
| 300 | { | 300 | { |
| 301 | dwmac4_set_rx_owner(p, disable_rx_ic); | 301 | dwmac4_set_rx_owner(p, disable_rx_ic); |
| 302 | } | 302 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec997..98fa471da7c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c | |||
| @@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, | |||
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 125 | static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
| 126 | int mode, int end) | 126 | int mode, int end, int bfsize) |
| 127 | { | 127 | { |
| 128 | dwxgmac2_set_rx_owner(p, disable_rx_ic); | 128 | dwxgmac2_set_rx_owner(p, disable_rx_ic); |
| 129 | } | 129 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 5ef91a790f9d..5202d6ad7919 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c | |||
| @@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 201 | if (unlikely(rdes0 & RDES0_OWN)) | 201 | if (unlikely(rdes0 & RDES0_OWN)) |
| 202 | return dma_own; | 202 | return dma_own; |
| 203 | 203 | ||
| 204 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { | ||
| 205 | stats->rx_length_errors++; | ||
| 206 | return discard_frame; | ||
| 207 | } | ||
| 208 | |||
| 204 | if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { | 209 | if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { |
| 205 | if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { | 210 | if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { |
| 206 | x->rx_desc++; | 211 | x->rx_desc++; |
| @@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 231 | * It doesn't match with the information reported into the databook. | 236 | * It doesn't match with the information reported into the databook. |
| 232 | * At any rate, we need to understand if the CSUM hw computation is ok | 237 | * At any rate, we need to understand if the CSUM hw computation is ok |
| 233 | * and report this info to the upper layers. */ | 238 | * and report this info to the upper layers. */ |
| 234 | ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), | 239 | if (likely(ret == good_frame)) |
| 235 | !!(rdes0 & RDES0_FRAME_TYPE), | 240 | ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), |
| 236 | !!(rdes0 & ERDES0_RX_MAC_ADDR)); | 241 | !!(rdes0 & RDES0_FRAME_TYPE), |
| 242 | !!(rdes0 & ERDES0_RX_MAC_ADDR)); | ||
| 237 | 243 | ||
| 238 | if (unlikely(rdes0 & RDES0_DRIBBLING)) | 244 | if (unlikely(rdes0 & RDES0_DRIBBLING)) |
| 239 | x->dribbling_bit++; | 245 | x->dribbling_bit++; |
| @@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 259 | } | 265 | } |
| 260 | 266 | ||
| 261 | static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 267 | static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
| 262 | int mode, int end) | 268 | int mode, int end, int bfsize) |
| 263 | { | 269 | { |
| 270 | int bfsize1; | ||
| 271 | |||
| 264 | p->des0 |= cpu_to_le32(RDES0_OWN); | 272 | p->des0 |= cpu_to_le32(RDES0_OWN); |
| 265 | p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); | 273 | |
| 274 | bfsize1 = min(bfsize, BUF_SIZE_8KiB); | ||
| 275 | p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); | ||
| 266 | 276 | ||
| 267 | if (mode == STMMAC_CHAIN_MODE) | 277 | if (mode == STMMAC_CHAIN_MODE) |
| 268 | ehn_desc_rx_set_on_chain(p); | 278 | ehn_desc_rx_set_on_chain(p); |
| 269 | else | 279 | else |
| 270 | ehn_desc_rx_set_on_ring(p, end); | 280 | ehn_desc_rx_set_on_ring(p, end, bfsize); |
| 271 | 281 | ||
| 272 | if (disable_rx_ic) | 282 | if (disable_rx_ic) |
| 273 | p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); | 283 | p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3..5bb00234d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h | |||
| @@ -33,7 +33,7 @@ struct dma_extended_desc; | |||
| 33 | struct stmmac_desc_ops { | 33 | struct stmmac_desc_ops { |
| 34 | /* DMA RX descriptor ring initialization */ | 34 | /* DMA RX descriptor ring initialization */ |
| 35 | void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, | 35 | void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, |
| 36 | int end); | 36 | int end, int bfsize); |
| 37 | /* DMA TX descriptor ring initialization */ | 37 | /* DMA TX descriptor ring initialization */ |
| 38 | void (*init_tx_desc)(struct dma_desc *p, int mode, int end); | 38 | void (*init_tx_desc)(struct dma_desc *p, int mode, int end); |
| 39 | /* Invoked by the xmit function to prepare the tx descriptor */ | 39 | /* Invoked by the xmit function to prepare the tx descriptor */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba..b7dd4e3c760d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c | |||
| @@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 91 | return dma_own; | 91 | return dma_own; |
| 92 | 92 | ||
| 93 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { | 93 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { |
| 94 | pr_warn("%s: Oversized frame spanned multiple buffers\n", | ||
| 95 | __func__); | ||
| 96 | stats->rx_length_errors++; | 94 | stats->rx_length_errors++; |
| 97 | return discard_frame; | 95 | return discard_frame; |
| 98 | } | 96 | } |
| @@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 135 | } | 133 | } |
| 136 | 134 | ||
| 137 | static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, | 135 | static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, |
| 138 | int end) | 136 | int end, int bfsize) |
| 139 | { | 137 | { |
| 138 | int bfsize1; | ||
| 139 | |||
| 140 | p->des0 |= cpu_to_le32(RDES0_OWN); | 140 | p->des0 |= cpu_to_le32(RDES0_OWN); |
| 141 | p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); | 141 | |
| 142 | bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); | ||
| 143 | p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK); | ||
| 142 | 144 | ||
| 143 | if (mode == STMMAC_CHAIN_MODE) | 145 | if (mode == STMMAC_CHAIN_MODE) |
| 144 | ndesc_rx_set_on_chain(p, end); | 146 | ndesc_rx_set_on_chain(p, end); |
| 145 | else | 147 | else |
| 146 | ndesc_rx_set_on_ring(p, end); | 148 | ndesc_rx_set_on_ring(p, end, bfsize); |
| 147 | 149 | ||
| 148 | if (disable_rx_ic) | 150 | if (disable_rx_ic) |
| 149 | p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); | 151 | p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index d8c5bc412219..4d9bcb4d0378 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
| @@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
| 59 | 59 | ||
| 60 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); | 60 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); |
| 61 | stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, | 61 | stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, |
| 62 | STMMAC_RING_MODE, 1, false, skb->len); | 62 | STMMAC_RING_MODE, 0, false, skb->len); |
| 63 | tx_q->tx_skbuff[entry] = NULL; | 63 | tx_q->tx_skbuff[entry] = NULL; |
| 64 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | 64 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
| 65 | 65 | ||
| @@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
| 79 | 79 | ||
| 80 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); | 80 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); |
| 81 | stmmac_prepare_tx_desc(priv, desc, 0, len, csum, | 81 | stmmac_prepare_tx_desc(priv, desc, 0, len, csum, |
| 82 | STMMAC_RING_MODE, 1, true, skb->len); | 82 | STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb), |
| 83 | skb->len); | ||
| 83 | } else { | 84 | } else { |
| 84 | des2 = dma_map_single(priv->device, skb->data, | 85 | des2 = dma_map_single(priv->device, skb->data, |
| 85 | nopaged_len, DMA_TO_DEVICE); | 86 | nopaged_len, DMA_TO_DEVICE); |
| @@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
| 91 | tx_q->tx_skbuff_dma[entry].is_jumbo = true; | 92 | tx_q->tx_skbuff_dma[entry].is_jumbo = true; |
| 92 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); | 93 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); |
| 93 | stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, | 94 | stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, |
| 94 | STMMAC_RING_MODE, 1, true, skb->len); | 95 | STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb), |
| 96 | skb->len); | ||
| 95 | } | 97 | } |
| 96 | 98 | ||
| 97 | tx_q->cur_tx = entry; | 99 | tx_q->cur_tx = entry; |
| @@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc) | |||
| 111 | 113 | ||
| 112 | static void refill_desc3(void *priv_ptr, struct dma_desc *p) | 114 | static void refill_desc3(void *priv_ptr, struct dma_desc *p) |
| 113 | { | 115 | { |
| 114 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; | 116 | struct stmmac_rx_queue *rx_q = priv_ptr; |
| 117 | struct stmmac_priv *priv = rx_q->priv_data; | ||
| 115 | 118 | ||
| 116 | /* Fill DES3 in case of RING mode */ | 119 | /* Fill DES3 in case of RING mode */ |
| 117 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) | 120 | if (priv->dma_buf_sz == BUF_SIZE_16KiB) |
| 118 | p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); | 121 | p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); |
| 119 | } | 122 | } |
| 120 | 123 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 97c5e1aad88f..a26e36dbb5df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) | |||
| 1136 | if (priv->extend_desc) | 1136 | if (priv->extend_desc) |
| 1137 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, | 1137 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, |
| 1138 | priv->use_riwt, priv->mode, | 1138 | priv->use_riwt, priv->mode, |
| 1139 | (i == DMA_RX_SIZE - 1)); | 1139 | (i == DMA_RX_SIZE - 1), |
| 1140 | priv->dma_buf_sz); | ||
| 1140 | else | 1141 | else |
| 1141 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], | 1142 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], |
| 1142 | priv->use_riwt, priv->mode, | 1143 | priv->use_riwt, priv->mode, |
| 1143 | (i == DMA_RX_SIZE - 1)); | 1144 | (i == DMA_RX_SIZE - 1), |
| 1145 | priv->dma_buf_sz); | ||
| 1144 | } | 1146 | } |
| 1145 | 1147 | ||
| 1146 | /** | 1148 | /** |
| @@ -3216,14 +3218,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3216 | stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, | 3218 | stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, |
| 3217 | csum_insertion, priv->mode, 1, last_segment, | 3219 | csum_insertion, priv->mode, 1, last_segment, |
| 3218 | skb->len); | 3220 | skb->len); |
| 3219 | 3221 | } else { | |
| 3220 | /* The own bit must be the latest setting done when prepare the | 3222 | stmmac_set_tx_owner(priv, first); |
| 3221 | * descriptor and then barrier is needed to make sure that | ||
| 3222 | * all is coherent before granting the DMA engine. | ||
| 3223 | */ | ||
| 3224 | wmb(); | ||
| 3225 | } | 3223 | } |
| 3226 | 3224 | ||
| 3225 | /* The own bit must be the latest setting done when prepare the | ||
| 3226 | * descriptor and then barrier is needed to make sure that | ||
| 3227 | * all is coherent before granting the DMA engine. | ||
| 3228 | */ | ||
| 3229 | wmb(); | ||
| 3230 | |||
| 3227 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3231 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
| 3228 | 3232 | ||
| 3229 | stmmac_enable_dma_transmission(priv, priv->ioaddr); | 3233 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
| @@ -3350,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3350 | { | 3354 | { |
| 3351 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3355 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
| 3352 | struct stmmac_channel *ch = &priv->channel[queue]; | 3356 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 3353 | unsigned int entry = rx_q->cur_rx; | 3357 | unsigned int next_entry = rx_q->cur_rx; |
| 3354 | int coe = priv->hw->rx_csum; | 3358 | int coe = priv->hw->rx_csum; |
| 3355 | unsigned int next_entry; | ||
| 3356 | unsigned int count = 0; | 3359 | unsigned int count = 0; |
| 3357 | bool xmac; | 3360 | bool xmac; |
| 3358 | 3361 | ||
| @@ -3370,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3370 | stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); | 3373 | stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); |
| 3371 | } | 3374 | } |
| 3372 | while (count < limit) { | 3375 | while (count < limit) { |
| 3373 | int status; | 3376 | int entry, status; |
| 3374 | struct dma_desc *p; | 3377 | struct dma_desc *p; |
| 3375 | struct dma_desc *np; | 3378 | struct dma_desc *np; |
| 3376 | 3379 | ||
| 3380 | entry = next_entry; | ||
| 3381 | |||
| 3377 | if (priv->extend_desc) | 3382 | if (priv->extend_desc) |
| 3378 | p = (struct dma_desc *)(rx_q->dma_erx + entry); | 3383 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
| 3379 | else | 3384 | else |
| @@ -3429,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3429 | * ignored | 3434 | * ignored |
| 3430 | */ | 3435 | */ |
| 3431 | if (frame_len > priv->dma_buf_sz) { | 3436 | if (frame_len > priv->dma_buf_sz) { |
| 3432 | netdev_err(priv->dev, | 3437 | if (net_ratelimit()) |
| 3433 | "len %d larger than size (%d)\n", | 3438 | netdev_err(priv->dev, |
| 3434 | frame_len, priv->dma_buf_sz); | 3439 | "len %d larger than size (%d)\n", |
| 3440 | frame_len, priv->dma_buf_sz); | ||
| 3435 | priv->dev->stats.rx_length_errors++; | 3441 | priv->dev->stats.rx_length_errors++; |
| 3436 | break; | 3442 | continue; |
| 3437 | } | 3443 | } |
| 3438 | 3444 | ||
| 3439 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 3445 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
| @@ -3468,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3468 | dev_warn(priv->device, | 3474 | dev_warn(priv->device, |
| 3469 | "packet dropped\n"); | 3475 | "packet dropped\n"); |
| 3470 | priv->dev->stats.rx_dropped++; | 3476 | priv->dev->stats.rx_dropped++; |
| 3471 | break; | 3477 | continue; |
| 3472 | } | 3478 | } |
| 3473 | 3479 | ||
| 3474 | dma_sync_single_for_cpu(priv->device, | 3480 | dma_sync_single_for_cpu(priv->device, |
| @@ -3488,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3488 | } else { | 3494 | } else { |
| 3489 | skb = rx_q->rx_skbuff[entry]; | 3495 | skb = rx_q->rx_skbuff[entry]; |
| 3490 | if (unlikely(!skb)) { | 3496 | if (unlikely(!skb)) { |
| 3491 | netdev_err(priv->dev, | 3497 | if (net_ratelimit()) |
| 3492 | "%s: Inconsistent Rx chain\n", | 3498 | netdev_err(priv->dev, |
| 3493 | priv->dev->name); | 3499 | "%s: Inconsistent Rx chain\n", |
| 3500 | priv->dev->name); | ||
| 3494 | priv->dev->stats.rx_dropped++; | 3501 | priv->dev->stats.rx_dropped++; |
| 3495 | break; | 3502 | continue; |
| 3496 | } | 3503 | } |
| 3497 | prefetch(skb->data - NET_IP_ALIGN); | 3504 | prefetch(skb->data - NET_IP_ALIGN); |
| 3498 | rx_q->rx_skbuff[entry] = NULL; | 3505 | rx_q->rx_skbuff[entry] = NULL; |
| @@ -3527,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3527 | priv->dev->stats.rx_packets++; | 3534 | priv->dev->stats.rx_packets++; |
| 3528 | priv->dev->stats.rx_bytes += frame_len; | 3535 | priv->dev->stats.rx_bytes += frame_len; |
| 3529 | } | 3536 | } |
| 3530 | entry = next_entry; | ||
| 3531 | } | 3537 | } |
| 3532 | 3538 | ||
| 3533 | stmmac_rx_refill(priv, queue); | 3539 | stmmac_rx_refill(priv, queue); |
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 5174d318901e..0a920c5936b2 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c | |||
| @@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, | |||
| 3657 | 3657 | ||
| 3658 | ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, | 3658 | ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, |
| 3659 | gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); | 3659 | gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); |
| 3660 | if (ret) | 3660 | if (ret) { |
| 3661 | of_node_put(interfaces); | ||
| 3661 | return ret; | 3662 | return ret; |
| 3663 | } | ||
| 3662 | 3664 | ||
| 3663 | ret = netcp_txpipe_open(&gbe_dev->tx_pipe); | 3665 | ret = netcp_txpipe_open(&gbe_dev->tx_pipe); |
| 3664 | if (ret) | 3666 | if (ret) { |
| 3667 | of_node_put(interfaces); | ||
| 3665 | return ret; | 3668 | return ret; |
| 3669 | } | ||
| 3666 | 3670 | ||
| 3667 | /* Create network interfaces */ | 3671 | /* Create network interfaces */ |
| 3668 | INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); | 3672 | INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index ec7e7ec24ff9..4041c75997ba 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
| @@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev) | |||
| 1575 | ret = of_address_to_resource(np, 0, &dmares); | 1575 | ret = of_address_to_resource(np, 0, &dmares); |
| 1576 | if (ret) { | 1576 | if (ret) { |
| 1577 | dev_err(&pdev->dev, "unable to get DMA resource\n"); | 1577 | dev_err(&pdev->dev, "unable to get DMA resource\n"); |
| 1578 | of_node_put(np); | ||
| 1578 | goto free_netdev; | 1579 | goto free_netdev; |
| 1579 | } | 1580 | } |
| 1580 | lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); | 1581 | lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); |
| 1581 | if (IS_ERR(lp->dma_regs)) { | 1582 | if (IS_ERR(lp->dma_regs)) { |
| 1582 | dev_err(&pdev->dev, "could not map DMA regs\n"); | 1583 | dev_err(&pdev->dev, "could not map DMA regs\n"); |
| 1583 | ret = PTR_ERR(lp->dma_regs); | 1584 | ret = PTR_ERR(lp->dma_regs); |
| 1585 | of_node_put(np); | ||
| 1584 | goto free_netdev; | 1586 | goto free_netdev; |
| 1585 | } | 1587 | } |
| 1586 | lp->rx_irq = irq_of_parse_and_map(np, 1); | 1588 | lp->rx_irq = irq_of_parse_and_map(np, 1); |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e859ae2e42d5..49f41b64077b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -987,6 +987,7 @@ struct netvsc_device { | |||
| 987 | 987 | ||
| 988 | wait_queue_head_t wait_drain; | 988 | wait_queue_head_t wait_drain; |
| 989 | bool destroy; | 989 | bool destroy; |
| 990 | bool tx_disable; /* if true, do not wake up queue again */ | ||
| 990 | 991 | ||
| 991 | /* Receive buffer allocated by us but manages by NetVSP */ | 992 | /* Receive buffer allocated by us but manages by NetVSP */ |
| 992 | void *recv_buf; | 993 | void *recv_buf; |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 813d195bbd57..e0dce373cdd9 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
| 110 | 110 | ||
| 111 | init_waitqueue_head(&net_device->wait_drain); | 111 | init_waitqueue_head(&net_device->wait_drain); |
| 112 | net_device->destroy = false; | 112 | net_device->destroy = false; |
| 113 | net_device->tx_disable = false; | ||
| 113 | 114 | ||
| 114 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 115 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
| 115 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 116 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
| @@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, | |||
| 719 | } else { | 720 | } else { |
| 720 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | 721 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); |
| 721 | 722 | ||
| 722 | if (netif_tx_queue_stopped(txq) && | 723 | if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
| 723 | (hv_get_avail_to_write_percent(&channel->outbound) > | 724 | (hv_get_avail_to_write_percent(&channel->outbound) > |
| 724 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { | 725 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { |
| 725 | netif_tx_wake_queue(txq); | 726 | netif_tx_wake_queue(txq); |
| @@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( | |||
| 874 | } else if (ret == -EAGAIN) { | 875 | } else if (ret == -EAGAIN) { |
| 875 | netif_tx_stop_queue(txq); | 876 | netif_tx_stop_queue(txq); |
| 876 | ndev_ctx->eth_stats.stop_queue++; | 877 | ndev_ctx->eth_stats.stop_queue++; |
| 877 | if (atomic_read(&nvchan->queue_sends) < 1) { | 878 | if (atomic_read(&nvchan->queue_sends) < 1 && |
| 879 | !net_device->tx_disable) { | ||
| 878 | netif_tx_wake_queue(txq); | 880 | netif_tx_wake_queue(txq); |
| 879 | ndev_ctx->eth_stats.wake_queue++; | 881 | ndev_ctx->eth_stats.wake_queue++; |
| 880 | ret = -ENOSPC; | 882 | ret = -ENOSPC; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cf4897043e83..b20fb0fb595b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) | |||
| 109 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | static void netvsc_tx_enable(struct netvsc_device *nvscdev, | ||
| 113 | struct net_device *ndev) | ||
| 114 | { | ||
| 115 | nvscdev->tx_disable = false; | ||
| 116 | virt_wmb(); /* ensure queue wake up mechanism is on */ | ||
| 117 | |||
| 118 | netif_tx_wake_all_queues(ndev); | ||
| 119 | } | ||
| 120 | |||
| 112 | static int netvsc_open(struct net_device *net) | 121 | static int netvsc_open(struct net_device *net) |
| 113 | { | 122 | { |
| 114 | struct net_device_context *ndev_ctx = netdev_priv(net); | 123 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) | |||
| 129 | rdev = nvdev->extension; | 138 | rdev = nvdev->extension; |
| 130 | if (!rdev->link_state) { | 139 | if (!rdev->link_state) { |
| 131 | netif_carrier_on(net); | 140 | netif_carrier_on(net); |
| 132 | netif_tx_wake_all_queues(net); | 141 | netvsc_tx_enable(nvdev, net); |
| 133 | } | 142 | } |
| 134 | 143 | ||
| 135 | if (vf_netdev) { | 144 | if (vf_netdev) { |
| @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) | |||
| 184 | } | 193 | } |
| 185 | } | 194 | } |
| 186 | 195 | ||
| 196 | static void netvsc_tx_disable(struct netvsc_device *nvscdev, | ||
| 197 | struct net_device *ndev) | ||
| 198 | { | ||
| 199 | if (nvscdev) { | ||
| 200 | nvscdev->tx_disable = true; | ||
| 201 | virt_wmb(); /* ensure txq will not wake up after stop */ | ||
| 202 | } | ||
| 203 | |||
| 204 | netif_tx_disable(ndev); | ||
| 205 | } | ||
| 206 | |||
| 187 | static int netvsc_close(struct net_device *net) | 207 | static int netvsc_close(struct net_device *net) |
| 188 | { | 208 | { |
| 189 | struct net_device_context *net_device_ctx = netdev_priv(net); | 209 | struct net_device_context *net_device_ctx = netdev_priv(net); |
| @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) | |||
| 192 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 212 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
| 193 | int ret; | 213 | int ret; |
| 194 | 214 | ||
| 195 | netif_tx_disable(net); | 215 | netvsc_tx_disable(nvdev, net); |
| 196 | 216 | ||
| 197 | /* No need to close rndis filter if it is removed already */ | 217 | /* No need to close rndis filter if it is removed already */ |
| 198 | if (!nvdev) | 218 | if (!nvdev) |
| @@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev, | |||
| 920 | 940 | ||
| 921 | /* If device was up (receiving) then shutdown */ | 941 | /* If device was up (receiving) then shutdown */ |
| 922 | if (netif_running(ndev)) { | 942 | if (netif_running(ndev)) { |
| 923 | netif_tx_disable(ndev); | 943 | netvsc_tx_disable(nvdev, ndev); |
| 924 | 944 | ||
| 925 | ret = rndis_filter_close(nvdev); | 945 | ret = rndis_filter_close(nvdev); |
| 926 | if (ret) { | 946 | if (ret) { |
| @@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1908 | if (rdev->link_state) { | 1928 | if (rdev->link_state) { |
| 1909 | rdev->link_state = false; | 1929 | rdev->link_state = false; |
| 1910 | netif_carrier_on(net); | 1930 | netif_carrier_on(net); |
| 1911 | netif_tx_wake_all_queues(net); | 1931 | netvsc_tx_enable(net_device, net); |
| 1912 | } else { | 1932 | } else { |
| 1913 | notify = true; | 1933 | notify = true; |
| 1914 | } | 1934 | } |
| @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1918 | if (!rdev->link_state) { | 1938 | if (!rdev->link_state) { |
| 1919 | rdev->link_state = true; | 1939 | rdev->link_state = true; |
| 1920 | netif_carrier_off(net); | 1940 | netif_carrier_off(net); |
| 1921 | netif_tx_stop_all_queues(net); | 1941 | netvsc_tx_disable(net_device, net); |
| 1922 | } | 1942 | } |
| 1923 | kfree(event); | 1943 | kfree(event); |
| 1924 | break; | 1944 | break; |
| @@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1927 | if (!rdev->link_state) { | 1947 | if (!rdev->link_state) { |
| 1928 | rdev->link_state = true; | 1948 | rdev->link_state = true; |
| 1929 | netif_carrier_off(net); | 1949 | netif_carrier_off(net); |
| 1930 | netif_tx_stop_all_queues(net); | 1950 | netvsc_tx_disable(net_device, net); |
| 1931 | event->event = RNDIS_STATUS_MEDIA_CONNECT; | 1951 | event->event = RNDIS_STATUS_MEDIA_CONNECT; |
| 1932 | spin_lock_irqsave(&ndev_ctx->lock, flags); | 1952 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
| 1933 | list_add(&event->list, &ndev_ctx->reconfig_events); | 1953 | list_add(&event->list, &ndev_ctx->reconfig_events); |
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index cd1d8faccca5..cd6b95e673a5 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c | |||
| @@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi) | |||
| 1268 | INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); | 1268 | INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); |
| 1269 | lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), | 1269 | lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), |
| 1270 | WQ_MEM_RECLAIM); | 1270 | WQ_MEM_RECLAIM); |
| 1271 | if (unlikely(!lp->wqueue)) { | ||
| 1272 | ret = -ENOMEM; | ||
| 1273 | goto err_hw_init; | ||
| 1274 | } | ||
| 1271 | 1275 | ||
| 1272 | ret = adf7242_hw_init(lp); | 1276 | ret = adf7242_hw_init(lp); |
| 1273 | if (ret) | 1277 | if (ret) |
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index b6743f03dce0..3b88846de31b 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c | |||
| @@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
| 324 | goto out_err; | 324 | goto out_err; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | genlmsg_reply(skb, info); | 327 | res = genlmsg_reply(skb, info); |
| 328 | break; | 328 | break; |
| 329 | } | 329 | } |
| 330 | 330 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 071869db44cf..520657945b82 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE | |||
| 7 | help | 7 | help |
| 8 | MDIO devices and driver infrastructure code. | 8 | MDIO devices and driver infrastructure code. |
| 9 | 9 | ||
| 10 | if MDIO_DEVICE | ||
| 11 | |||
| 10 | config MDIO_BUS | 12 | config MDIO_BUS |
| 11 | tristate | 13 | tristate |
| 12 | default m if PHYLIB=m | 14 | default m if PHYLIB=m |
| @@ -179,6 +181,7 @@ config MDIO_XGENE | |||
| 179 | APM X-Gene SoC's. | 181 | APM X-Gene SoC's. |
| 180 | 182 | ||
| 181 | endif | 183 | endif |
| 184 | endif | ||
| 182 | 185 | ||
| 183 | config PHYLINK | 186 | config PHYLINK |
| 184 | tristate | 187 | tristate |
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 9605d4fe540b..cb86a3e90c7d 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
| @@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev) | |||
| 323 | 323 | ||
| 324 | bcm54xx_phydsp_config(phydev); | 324 | bcm54xx_phydsp_config(phydev); |
| 325 | 325 | ||
| 326 | /* Encode link speed into LED1 and LED3 pair (green/amber). | ||
| 327 | * Also flash these two LEDs on activity. This means configuring | ||
| 328 | * them for MULTICOLOR and encoding link/activity into them. | ||
| 329 | */ | ||
| 330 | val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) | | ||
| 331 | BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1); | ||
| 332 | bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val); | ||
| 333 | |||
| 334 | val = BCM_LED_MULTICOLOR_IN_PHASE | | ||
| 335 | BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) | | ||
| 336 | BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT); | ||
| 337 | bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val); | ||
| 338 | |||
| 326 | return 0; | 339 | return 0; |
| 327 | } | 340 | } |
| 328 | 341 | ||
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index bbd8c22067f3..97d45bd5b38e 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c | |||
| @@ -15,6 +15,8 @@ | |||
| 15 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
| 16 | 16 | ||
| 17 | #define DP83822_PHY_ID 0x2000a240 | 17 | #define DP83822_PHY_ID 0x2000a240 |
| 18 | #define DP83825I_PHY_ID 0x2000a150 | ||
| 19 | |||
| 18 | #define DP83822_DEVADDR 0x1f | 20 | #define DP83822_DEVADDR 0x1f |
| 19 | 21 | ||
| 20 | #define MII_DP83822_PHYSCR 0x11 | 22 | #define MII_DP83822_PHYSCR 0x11 |
| @@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev) | |||
| 304 | return 0; | 306 | return 0; |
| 305 | } | 307 | } |
| 306 | 308 | ||
| 309 | #define DP83822_PHY_DRIVER(_id, _name) \ | ||
| 310 | { \ | ||
| 311 | PHY_ID_MATCH_MODEL(_id), \ | ||
| 312 | .name = (_name), \ | ||
| 313 | .features = PHY_BASIC_FEATURES, \ | ||
| 314 | .soft_reset = dp83822_phy_reset, \ | ||
| 315 | .config_init = dp83822_config_init, \ | ||
| 316 | .get_wol = dp83822_get_wol, \ | ||
| 317 | .set_wol = dp83822_set_wol, \ | ||
| 318 | .ack_interrupt = dp83822_ack_interrupt, \ | ||
| 319 | .config_intr = dp83822_config_intr, \ | ||
| 320 | .suspend = dp83822_suspend, \ | ||
| 321 | .resume = dp83822_resume, \ | ||
| 322 | } | ||
| 323 | |||
| 307 | static struct phy_driver dp83822_driver[] = { | 324 | static struct phy_driver dp83822_driver[] = { |
| 308 | { | 325 | DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"), |
| 309 | .phy_id = DP83822_PHY_ID, | 326 | DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"), |
| 310 | .phy_id_mask = 0xfffffff0, | ||
| 311 | .name = "TI DP83822", | ||
| 312 | .features = PHY_BASIC_FEATURES, | ||
| 313 | .config_init = dp83822_config_init, | ||
| 314 | .soft_reset = dp83822_phy_reset, | ||
| 315 | .get_wol = dp83822_get_wol, | ||
| 316 | .set_wol = dp83822_set_wol, | ||
| 317 | .ack_interrupt = dp83822_ack_interrupt, | ||
| 318 | .config_intr = dp83822_config_intr, | ||
| 319 | .suspend = dp83822_suspend, | ||
| 320 | .resume = dp83822_resume, | ||
| 321 | }, | ||
| 322 | }; | 327 | }; |
| 323 | module_phy_driver(dp83822_driver); | 328 | module_phy_driver(dp83822_driver); |
| 324 | 329 | ||
| 325 | static struct mdio_device_id __maybe_unused dp83822_tbl[] = { | 330 | static struct mdio_device_id __maybe_unused dp83822_tbl[] = { |
| 326 | { DP83822_PHY_ID, 0xfffffff0 }, | 331 | { DP83822_PHY_ID, 0xfffffff0 }, |
| 332 | { DP83825I_PHY_ID, 0xfffffff0 }, | ||
| 327 | { }, | 333 | { }, |
| 328 | }; | 334 | }; |
| 329 | MODULE_DEVICE_TABLE(mdio, dp83822_tbl); | 335 | MODULE_DEVICE_TABLE(mdio, dp83822_tbl); |
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index a238388eb1a5..0eec2913c289 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c | |||
| @@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev) | |||
| 201 | static int meson_gxl_config_intr(struct phy_device *phydev) | 201 | static int meson_gxl_config_intr(struct phy_device *phydev) |
| 202 | { | 202 | { |
| 203 | u16 val; | 203 | u16 val; |
| 204 | int ret; | ||
| 204 | 205 | ||
| 205 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { | 206 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { |
| 206 | val = INTSRC_ANEG_PR | 207 | val = INTSRC_ANEG_PR |
| @@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev) | |||
| 213 | val = 0; | 214 | val = 0; |
| 214 | } | 215 | } |
| 215 | 216 | ||
| 217 | /* Ack any pending IRQ */ | ||
| 218 | ret = meson_gxl_ack_interrupt(phydev); | ||
| 219 | if (ret) | ||
| 220 | return ret; | ||
| 221 | |||
| 216 | return phy_write(phydev, INTSRC_MASK, val); | 222 | return phy_write(phydev, INTSRC_MASK, val); |
| 217 | } | 223 | } |
| 218 | 224 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 49fdd1ee798e..77068c545de0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev) | |||
| 1831 | { | 1831 | { |
| 1832 | int ret; | 1832 | int ret; |
| 1833 | 1833 | ||
| 1834 | ret = phy_write(phydev, MII_BMCR, BMCR_RESET); | 1834 | ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET); |
| 1835 | if (ret < 0) | 1835 | if (ret < 0) |
| 1836 | return ret; | 1836 | return ret; |
| 1837 | 1837 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 1d68921723dc..e9ca1c088d0b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1763 | int skb_xdp = 1; | 1763 | int skb_xdp = 1; |
| 1764 | bool frags = tun_napi_frags_enabled(tfile); | 1764 | bool frags = tun_napi_frags_enabled(tfile); |
| 1765 | 1765 | ||
| 1766 | if (!(tun->dev->flags & IFF_UP)) | ||
| 1767 | return -EIO; | ||
| 1768 | |||
| 1769 | if (!(tun->flags & IFF_NO_PI)) { | 1766 | if (!(tun->flags & IFF_NO_PI)) { |
| 1770 | if (len < sizeof(pi)) | 1767 | if (len < sizeof(pi)) |
| 1771 | return -EINVAL; | 1768 | return -EINVAL; |
| @@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1867 | err = skb_copy_datagram_from_iter(skb, 0, from, len); | 1864 | err = skb_copy_datagram_from_iter(skb, 0, from, len); |
| 1868 | 1865 | ||
| 1869 | if (err) { | 1866 | if (err) { |
| 1867 | err = -EFAULT; | ||
| 1868 | drop: | ||
| 1870 | this_cpu_inc(tun->pcpu_stats->rx_dropped); | 1869 | this_cpu_inc(tun->pcpu_stats->rx_dropped); |
| 1871 | kfree_skb(skb); | 1870 | kfree_skb(skb); |
| 1872 | if (frags) { | 1871 | if (frags) { |
| @@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1874 | mutex_unlock(&tfile->napi_mutex); | 1873 | mutex_unlock(&tfile->napi_mutex); |
| 1875 | } | 1874 | } |
| 1876 | 1875 | ||
| 1877 | return -EFAULT; | 1876 | return err; |
| 1878 | } | 1877 | } |
| 1879 | } | 1878 | } |
| 1880 | 1879 | ||
| @@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1958 | !tfile->detached) | 1957 | !tfile->detached) |
| 1959 | rxhash = __skb_get_hash_symmetric(skb); | 1958 | rxhash = __skb_get_hash_symmetric(skb); |
| 1960 | 1959 | ||
| 1960 | rcu_read_lock(); | ||
| 1961 | if (unlikely(!(tun->dev->flags & IFF_UP))) { | ||
| 1962 | err = -EIO; | ||
| 1963 | rcu_read_unlock(); | ||
| 1964 | goto drop; | ||
| 1965 | } | ||
| 1966 | |||
| 1961 | if (frags) { | 1967 | if (frags) { |
| 1962 | /* Exercise flow dissector code path. */ | 1968 | /* Exercise flow dissector code path. */ |
| 1963 | u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); | 1969 | u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); |
| @@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1965 | if (unlikely(headlen > skb_headlen(skb))) { | 1971 | if (unlikely(headlen > skb_headlen(skb))) { |
| 1966 | this_cpu_inc(tun->pcpu_stats->rx_dropped); | 1972 | this_cpu_inc(tun->pcpu_stats->rx_dropped); |
| 1967 | napi_free_frags(&tfile->napi); | 1973 | napi_free_frags(&tfile->napi); |
| 1974 | rcu_read_unlock(); | ||
| 1968 | mutex_unlock(&tfile->napi_mutex); | 1975 | mutex_unlock(&tfile->napi_mutex); |
| 1969 | WARN_ON(1); | 1976 | WARN_ON(1); |
| 1970 | return -ENOMEM; | 1977 | return -ENOMEM; |
| @@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1992 | } else { | 1999 | } else { |
| 1993 | netif_rx_ni(skb); | 2000 | netif_rx_ni(skb); |
| 1994 | } | 2001 | } |
| 2002 | rcu_read_unlock(); | ||
| 1995 | 2003 | ||
| 1996 | stats = get_cpu_ptr(tun->pcpu_stats); | 2004 | stats = get_cpu_ptr(tun->pcpu_stats); |
| 1997 | u64_stats_update_begin(&stats->syncp); | 2005 | u64_stats_update_begin(&stats->syncp); |
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 820a2fe7d027..aff995be2a31 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c | |||
| @@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = { | |||
| 1301 | .tx_fixup = aqc111_tx_fixup, | 1301 | .tx_fixup = aqc111_tx_fixup, |
| 1302 | }; | 1302 | }; |
| 1303 | 1303 | ||
| 1304 | static const struct driver_info qnap_info = { | ||
| 1305 | .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter", | ||
| 1306 | .bind = aqc111_bind, | ||
| 1307 | .unbind = aqc111_unbind, | ||
| 1308 | .status = aqc111_status, | ||
| 1309 | .link_reset = aqc111_link_reset, | ||
| 1310 | .reset = aqc111_reset, | ||
| 1311 | .stop = aqc111_stop, | ||
| 1312 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | | ||
| 1313 | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, | ||
| 1314 | .rx_fixup = aqc111_rx_fixup, | ||
| 1315 | .tx_fixup = aqc111_tx_fixup, | ||
| 1316 | }; | ||
| 1317 | |||
| 1304 | static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) | 1318 | static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) |
| 1305 | { | 1319 | { |
| 1306 | struct usbnet *dev = usb_get_intfdata(intf); | 1320 | struct usbnet *dev = usb_get_intfdata(intf); |
| @@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = { | |||
| 1455 | {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, | 1469 | {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, |
| 1456 | {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, | 1470 | {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, |
| 1457 | {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, | 1471 | {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, |
| 1472 | {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)}, | ||
| 1458 | { },/* END */ | 1473 | { },/* END */ |
| 1459 | }; | 1474 | }; |
| 1460 | MODULE_DEVICE_TABLE(usb, products); | 1475 | MODULE_DEVICE_TABLE(usb, products); |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5512a1038721..3e9b2c319e45 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -851,6 +851,14 @@ static const struct usb_device_id products[] = { | |||
| 851 | .driver_info = 0, | 851 | .driver_info = 0, |
| 852 | }, | 852 | }, |
| 853 | 853 | ||
| 854 | /* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */ | ||
| 855 | { | ||
| 856 | USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM, | ||
| 857 | USB_CDC_SUBCLASS_ETHERNET, | ||
| 858 | USB_CDC_PROTO_NONE), | ||
| 859 | .driver_info = 0, | ||
| 860 | }, | ||
| 861 | |||
| 854 | /* WHITELIST!!! | 862 | /* WHITELIST!!! |
| 855 | * | 863 | * |
| 856 | * CDC Ether uses two interfaces, not necessarily consecutive. | 864 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 74bebbdb4b15..9195f3476b1d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = { | |||
| 1203 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | 1203 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
| 1204 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ | 1204 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ |
| 1205 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ | 1205 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ |
| 1206 | {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ | ||
| 1206 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ | 1207 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ |
| 1207 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 1208 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
| 1208 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1209 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7c1430ed0244..cd15c32b2e43 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
| @@ -1273,9 +1273,14 @@ static void vrf_setup(struct net_device *dev) | |||
| 1273 | 1273 | ||
| 1274 | /* default to no qdisc; user can add if desired */ | 1274 | /* default to no qdisc; user can add if desired */ |
| 1275 | dev->priv_flags |= IFF_NO_QUEUE; | 1275 | dev->priv_flags |= IFF_NO_QUEUE; |
| 1276 | dev->priv_flags |= IFF_NO_RX_HANDLER; | ||
| 1276 | 1277 | ||
| 1277 | dev->min_mtu = 0; | 1278 | /* VRF devices do not care about MTU, but if the MTU is set |
| 1278 | dev->max_mtu = 0; | 1279 | * too low then the ipv4 and ipv6 protocols are disabled |
| 1280 | * which breaks networking. | ||
| 1281 | */ | ||
| 1282 | dev->min_mtu = IPV6_MIN_MTU; | ||
| 1283 | dev->max_mtu = ETH_MAX_MTU; | ||
| 1279 | } | 1284 | } |
| 1280 | 1285 | ||
| 1281 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], | 1286 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 077f1b9f2761..d76dfed8d9bb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) | |||
| 4335 | /* If vxlan->dev is in the same netns, it has already been added | 4335 | /* If vxlan->dev is in the same netns, it has already been added |
| 4336 | * to the list by the previous loop. | 4336 | * to the list by the previous loop. |
| 4337 | */ | 4337 | */ |
| 4338 | if (!net_eq(dev_net(vxlan->dev), net)) { | 4338 | if (!net_eq(dev_net(vxlan->dev), net)) |
| 4339 | gro_cells_destroy(&vxlan->gro_cells); | ||
| 4340 | unregister_netdevice_queue(vxlan->dev, head); | 4339 | unregister_netdevice_queue(vxlan->dev, head); |
| 4341 | } | ||
| 4342 | } | 4340 | } |
| 4343 | 4341 | ||
| 4344 | for (h = 0; h < PORT_HASH_SIZE; ++h) | 4342 | for (h = 0; h < PORT_HASH_SIZE; ++h) |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index e9822a3ec373..94132cfd1f56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c | |||
| @@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, | |||
| 460 | static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, | 460 | static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, |
| 461 | struct cfg80211_pmsr_result *res) | 461 | struct cfg80211_pmsr_result *res) |
| 462 | { | 462 | { |
| 463 | s64 rtt_avg = res->ftm.rtt_avg * 100; | 463 | s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); |
| 464 | |||
| 465 | do_div(rtt_avg, 6666); | ||
| 466 | 464 | ||
| 467 | IWL_DEBUG_INFO(mvm, "entry %d\n", index); | 465 | IWL_DEBUG_INFO(mvm, "entry %d\n", index); |
| 468 | IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); | 466 | IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); |
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6eedc0ec7661..76629b98c78d 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c | |||
| @@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |||
| 130 | static void | 130 | static void |
| 131 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) | 131 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) |
| 132 | { | 132 | { |
| 133 | iowrite32(q->desc_dma, &q->regs->desc_base); | ||
| 134 | iowrite32(q->ndesc, &q->regs->ring_size); | ||
| 133 | q->head = ioread32(&q->regs->dma_idx); | 135 | q->head = ioread32(&q->regs->dma_idx); |
| 134 | q->tail = q->head; | 136 | q->tail = q->head; |
| 135 | iowrite32(q->head, &q->regs->cpu_idx); | 137 | iowrite32(q->head, &q->regs->cpu_idx); |
| @@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) | |||
| 180 | else | 182 | else |
| 181 | mt76_dma_sync_idx(dev, q); | 183 | mt76_dma_sync_idx(dev, q); |
| 182 | 184 | ||
| 183 | wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; | 185 | wake = wake && q->stopped && |
| 186 | qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; | ||
| 187 | if (wake) | ||
| 188 | q->stopped = false; | ||
| 184 | 189 | ||
| 185 | if (!q->queued) | 190 | if (!q->queued) |
| 186 | wake_up(&dev->tx_wait); | 191 | wake_up(&dev->tx_wait); |
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index a033745adb2f..316167404729 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c | |||
| @@ -679,19 +679,15 @@ out: | |||
| 679 | return ret; | 679 | return ret; |
| 680 | } | 680 | } |
| 681 | 681 | ||
| 682 | static void | 682 | void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, |
| 683 | mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, | 683 | struct ieee80211_sta *sta) |
| 684 | struct ieee80211_sta *sta) | ||
| 685 | { | 684 | { |
| 686 | struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; | 685 | struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; |
| 687 | int idx = wcid->idx; | 686 | int i, idx = wcid->idx; |
| 688 | int i; | ||
| 689 | 687 | ||
| 690 | rcu_assign_pointer(dev->wcid[idx], NULL); | 688 | rcu_assign_pointer(dev->wcid[idx], NULL); |
| 691 | synchronize_rcu(); | 689 | synchronize_rcu(); |
| 692 | 690 | ||
| 693 | mutex_lock(&dev->mutex); | ||
| 694 | |||
| 695 | if (dev->drv->sta_remove) | 691 | if (dev->drv->sta_remove) |
| 696 | dev->drv->sta_remove(dev, vif, sta); | 692 | dev->drv->sta_remove(dev, vif, sta); |
| 697 | 693 | ||
| @@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, | |||
| 699 | for (i = 0; i < ARRAY_SIZE(sta->txq); i++) | 695 | for (i = 0; i < ARRAY_SIZE(sta->txq); i++) |
| 700 | mt76_txq_remove(dev, sta->txq[i]); | 696 | mt76_txq_remove(dev, sta->txq[i]); |
| 701 | mt76_wcid_free(dev->wcid_mask, idx); | 697 | mt76_wcid_free(dev->wcid_mask, idx); |
| 698 | } | ||
| 699 | EXPORT_SYMBOL_GPL(__mt76_sta_remove); | ||
| 702 | 700 | ||
| 701 | static void | ||
| 702 | mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, | ||
| 703 | struct ieee80211_sta *sta) | ||
| 704 | { | ||
| 705 | mutex_lock(&dev->mutex); | ||
| 706 | __mt76_sta_remove(dev, vif, sta); | ||
| 703 | mutex_unlock(&dev->mutex); | 707 | mutex_unlock(&dev->mutex); |
| 704 | } | 708 | } |
| 705 | 709 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5dfb0601f101..bcbfd3c4a44b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h | |||
| @@ -126,6 +126,7 @@ struct mt76_queue { | |||
| 126 | int ndesc; | 126 | int ndesc; |
| 127 | int queued; | 127 | int queued; |
| 128 | int buf_size; | 128 | int buf_size; |
| 129 | bool stopped; | ||
| 129 | 130 | ||
| 130 | u8 buf_offset; | 131 | u8 buf_offset; |
| 131 | u8 hw_idx; | 132 | u8 hw_idx; |
| @@ -143,6 +144,7 @@ struct mt76_mcu_ops { | |||
| 143 | const struct mt76_reg_pair *rp, int len); | 144 | const struct mt76_reg_pair *rp, int len); |
| 144 | int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, | 145 | int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, |
| 145 | struct mt76_reg_pair *rp, int len); | 146 | struct mt76_reg_pair *rp, int len); |
| 147 | int (*mcu_restart)(struct mt76_dev *dev); | ||
| 146 | }; | 148 | }; |
| 147 | 149 | ||
| 148 | struct mt76_queue_ops { | 150 | struct mt76_queue_ops { |
| @@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
| 693 | struct ieee80211_sta *sta, | 695 | struct ieee80211_sta *sta, |
| 694 | enum ieee80211_sta_state old_state, | 696 | enum ieee80211_sta_state old_state, |
| 695 | enum ieee80211_sta_state new_state); | 697 | enum ieee80211_sta_state new_state); |
| 698 | void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, | ||
| 699 | struct ieee80211_sta *sta); | ||
| 696 | 700 | ||
| 697 | struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); | 701 | struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); |
| 698 | 702 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index afcd86f735b4..4dcb465095d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c | |||
| @@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) | |||
| 135 | 135 | ||
| 136 | out: | 136 | out: |
| 137 | mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); | 137 | mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); |
| 138 | if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > | 138 | if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask)) |
| 139 | __sw_hweight8(dev->beacon_mask)) | ||
| 140 | dev->beacon_check++; | 139 | dev->beacon_check++; |
| 141 | } | 140 | } |
| 142 | 141 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index d69e82c66ab2..b3ae0aaea62a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c | |||
| @@ -27,12 +27,16 @@ static void | |||
| 27 | mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) | 27 | mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) |
| 28 | { | 28 | { |
| 29 | __le32 *txd = (__le32 *)skb->data; | 29 | __le32 *txd = (__le32 *)skb->data; |
| 30 | struct ieee80211_hdr *hdr; | ||
| 31 | struct ieee80211_sta *sta; | ||
| 30 | struct mt7603_sta *msta; | 32 | struct mt7603_sta *msta; |
| 31 | struct mt76_wcid *wcid; | 33 | struct mt76_wcid *wcid; |
| 34 | void *priv; | ||
| 32 | int idx; | 35 | int idx; |
| 33 | u32 val; | 36 | u32 val; |
| 37 | u8 tid; | ||
| 34 | 38 | ||
| 35 | if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) | 39 | if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr)) |
| 36 | goto free; | 40 | goto free; |
| 37 | 41 | ||
| 38 | val = le32_to_cpu(txd[1]); | 42 | val = le32_to_cpu(txd[1]); |
| @@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) | |||
| 46 | if (!wcid) | 50 | if (!wcid) |
| 47 | goto free; | 51 | goto free; |
| 48 | 52 | ||
| 49 | msta = container_of(wcid, struct mt7603_sta, wcid); | 53 | priv = msta = container_of(wcid, struct mt7603_sta, wcid); |
| 50 | val = le32_to_cpu(txd[0]); | 54 | val = le32_to_cpu(txd[0]); |
| 51 | skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); | 55 | skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); |
| 52 | 56 | ||
| 57 | val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); | ||
| 58 | val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT); | ||
| 59 | txd[0] = cpu_to_le32(val); | ||
| 60 | |||
| 61 | sta = container_of(priv, struct ieee80211_sta, drv_priv); | ||
| 62 | hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; | ||
| 63 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
| 64 | ieee80211_sta_set_buffered(sta, tid, true); | ||
| 65 | |||
| 53 | spin_lock_bh(&dev->ps_lock); | 66 | spin_lock_bh(&dev->ps_lock); |
| 54 | __skb_queue_tail(&msta->psq, skb); | 67 | __skb_queue_tail(&msta->psq, skb); |
| 55 | if (skb_queue_len(&msta->psq) >= 64) { | 68 | if (skb_queue_len(&msta->psq) >= 64) { |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 15cc8f33b34d..d54dda67d036 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c | |||
| @@ -112,7 +112,7 @@ static void | |||
| 112 | mt7603_phy_init(struct mt7603_dev *dev) | 112 | mt7603_phy_init(struct mt7603_dev *dev) |
| 113 | { | 113 | { |
| 114 | int rx_chains = dev->mt76.antenna_mask; | 114 | int rx_chains = dev->mt76.antenna_mask; |
| 115 | int tx_chains = __sw_hweight8(rx_chains) - 1; | 115 | int tx_chains = hweight8(rx_chains) - 1; |
| 116 | 116 | ||
| 117 | mt76_rmw(dev, MT_WF_RMAC_RMCR, | 117 | mt76_rmw(dev, MT_WF_RMAC_RMCR, |
| 118 | (MT_WF_RMAC_RMCR_SMPS_MODE | | 118 | (MT_WF_RMAC_RMCR_SMPS_MODE | |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 0a0115861b51..5e31d7da96fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c | |||
| @@ -1072,7 +1072,7 @@ out: | |||
| 1072 | case MT_PHY_TYPE_HT: | 1072 | case MT_PHY_TYPE_HT: |
| 1073 | final_rate_flags |= IEEE80211_TX_RC_MCS; | 1073 | final_rate_flags |= IEEE80211_TX_RC_MCS; |
| 1074 | final_rate &= GENMASK(5, 0); | 1074 | final_rate &= GENMASK(5, 0); |
| 1075 | if (i > 15) | 1075 | if (final_rate > 15) |
| 1076 | return false; | 1076 | return false; |
| 1077 | break; | 1077 | break; |
| 1078 | default: | 1078 | default: |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index b10775ed92e6..cc0fe0933b2d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
| 6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
| 7 | #include "mt7603.h" | 7 | #include "mt7603.h" |
| 8 | #include "mac.h" | ||
| 8 | #include "eeprom.h" | 9 | #include "eeprom.h" |
| 9 | 10 | ||
| 10 | static int | 11 | static int |
| @@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) | |||
| 386 | } | 387 | } |
| 387 | 388 | ||
| 388 | static void | 389 | static void |
| 390 | mt7603_ps_set_more_data(struct sk_buff *skb) | ||
| 391 | { | ||
| 392 | struct ieee80211_hdr *hdr; | ||
| 393 | |||
| 394 | hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; | ||
| 395 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
| 396 | } | ||
| 397 | |||
| 398 | static void | ||
| 389 | mt7603_release_buffered_frames(struct ieee80211_hw *hw, | 399 | mt7603_release_buffered_frames(struct ieee80211_hw *hw, |
| 390 | struct ieee80211_sta *sta, | 400 | struct ieee80211_sta *sta, |
| 391 | u16 tids, int nframes, | 401 | u16 tids, int nframes, |
| @@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw, | |||
| 399 | 409 | ||
| 400 | __skb_queue_head_init(&list); | 410 | __skb_queue_head_init(&list); |
| 401 | 411 | ||
| 412 | mt7603_wtbl_set_ps(dev, msta, false); | ||
| 413 | |||
| 402 | spin_lock_bh(&dev->ps_lock); | 414 | spin_lock_bh(&dev->ps_lock); |
| 403 | skb_queue_walk_safe(&msta->psq, skb, tmp) { | 415 | skb_queue_walk_safe(&msta->psq, skb, tmp) { |
| 404 | if (!nframes) | 416 | if (!nframes) |
| @@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw, | |||
| 409 | 421 | ||
| 410 | skb_set_queue_mapping(skb, MT_TXQ_PSD); | 422 | skb_set_queue_mapping(skb, MT_TXQ_PSD); |
| 411 | __skb_unlink(skb, &msta->psq); | 423 | __skb_unlink(skb, &msta->psq); |
| 424 | mt7603_ps_set_more_data(skb); | ||
| 412 | __skb_queue_tail(&list, skb); | 425 | __skb_queue_tail(&list, skb); |
| 413 | nframes--; | 426 | nframes--; |
| 414 | } | 427 | } |
| 415 | spin_unlock_bh(&dev->ps_lock); | 428 | spin_unlock_bh(&dev->ps_lock); |
| 416 | 429 | ||
| 430 | if (!skb_queue_empty(&list)) | ||
| 431 | ieee80211_sta_eosp(sta); | ||
| 432 | |||
| 417 | mt7603_ps_tx_list(dev, &list); | 433 | mt7603_ps_tx_list(dev, &list); |
| 418 | 434 | ||
| 419 | if (nframes) | 435 | if (nframes) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 4b0713f1fd5e..d06905ea8cc6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c | |||
| @@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev) | |||
| 433 | { | 433 | { |
| 434 | struct cfg80211_chan_def *chandef = &dev->mt76.chandef; | 434 | struct cfg80211_chan_def *chandef = &dev->mt76.chandef; |
| 435 | struct ieee80211_hw *hw = mt76_hw(dev); | 435 | struct ieee80211_hw *hw = mt76_hw(dev); |
| 436 | int n_chains = __sw_hweight8(dev->mt76.antenna_mask); | 436 | int n_chains = hweight8(dev->mt76.antenna_mask); |
| 437 | struct { | 437 | struct { |
| 438 | u8 control_chan; | 438 | u8 control_chan; |
| 439 | u8 center_chan; | 439 | u8 center_chan; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index e13fea80d970..b920be1f5718 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c | |||
| @@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev) | |||
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | mem_base = devm_ioremap_resource(&pdev->dev, res); | 25 | mem_base = devm_ioremap_resource(&pdev->dev, res); |
| 26 | if (!mem_base) { | 26 | if (IS_ERR(mem_base)) { |
| 27 | dev_err(&pdev->dev, "Failed to get memory resource\n"); | 27 | dev_err(&pdev->dev, "Failed to get memory resource\n"); |
| 28 | return -EINVAL; | 28 | return PTR_ERR(mem_base); |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, | 31 | mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index 0290ba5869a5..736f81752b5b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h | |||
| @@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = { | |||
| 46 | { MT_MM20_PROT_CFG, 0x01742004 }, | 46 | { MT_MM20_PROT_CFG, 0x01742004 }, |
| 47 | { MT_MM40_PROT_CFG, 0x03f42084 }, | 47 | { MT_MM40_PROT_CFG, 0x03f42084 }, |
| 48 | { MT_TXOP_CTRL_CFG, 0x0000583f }, | 48 | { MT_TXOP_CTRL_CFG, 0x0000583f }, |
| 49 | { MT_TX_RTS_CFG, 0x00092b20 }, | 49 | { MT_TX_RTS_CFG, 0x00ffff20 }, |
| 50 | { MT_EXP_ACK_TIME, 0x002400ca }, | 50 | { MT_EXP_ACK_TIME, 0x002400ca }, |
| 51 | { MT_TXOP_HLDR_ET, 0x00000002 }, | 51 | { MT_TXOP_HLDR_ET, 0x00000002 }, |
| 52 | { MT_XIFS_TIME_CFG, 0x33a41010 }, | 52 | { MT_XIFS_TIME_CFG, 0x33a41010 }, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 91718647da02..e5a06f74a6f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c | |||
| @@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, | |||
| 229 | struct usb_device *usb_dev = interface_to_usbdev(usb_intf); | 229 | struct usb_device *usb_dev = interface_to_usbdev(usb_intf); |
| 230 | struct mt76x02_dev *dev; | 230 | struct mt76x02_dev *dev; |
| 231 | struct mt76_dev *mdev; | 231 | struct mt76_dev *mdev; |
| 232 | u32 asic_rev, mac_rev; | 232 | u32 mac_rev; |
| 233 | int ret; | 233 | int ret; |
| 234 | 234 | ||
| 235 | mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, | 235 | mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, |
| @@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, | |||
| 262 | goto err; | 262 | goto err; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | asic_rev = mt76_rr(dev, MT_ASIC_VERSION); | 265 | mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); |
| 266 | mac_rev = mt76_rr(dev, MT_MAC_CSR0); | 266 | mac_rev = mt76_rr(dev, MT_MAC_CSR0); |
| 267 | dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", | 267 | dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", |
| 268 | asic_rev, mac_rev); | 268 | mdev->rev, mac_rev); |
| 269 | if (!is_mt76x0(dev)) { | ||
| 270 | ret = -ENODEV; | ||
| 271 | goto err; | ||
| 272 | } | ||
| 269 | 273 | ||
| 270 | /* Note: vendor driver skips this check for MT76X0U */ | 274 | /* Note: vendor driver skips this check for MT76X0U */ |
| 271 | if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) | 275 | if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 6915cce5def9..07061eb4d1e1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h | |||
| @@ -51,6 +51,7 @@ struct mt76x02_calibration { | |||
| 51 | u16 false_cca; | 51 | u16 false_cca; |
| 52 | s8 avg_rssi_all; | 52 | s8 avg_rssi_all; |
| 53 | s8 agc_gain_adjust; | 53 | s8 agc_gain_adjust; |
| 54 | s8 agc_lowest_gain; | ||
| 54 | s8 low_gain; | 55 | s8 low_gain; |
| 55 | 56 | ||
| 56 | s8 temp_vco; | 57 | s8 temp_vco; |
| @@ -114,8 +115,11 @@ struct mt76x02_dev { | |||
| 114 | struct mt76x02_dfs_pattern_detector dfs_pd; | 115 | struct mt76x02_dfs_pattern_detector dfs_pd; |
| 115 | 116 | ||
| 116 | /* edcca monitor */ | 117 | /* edcca monitor */ |
| 118 | unsigned long ed_trigger_timeout; | ||
| 117 | bool ed_tx_blocked; | 119 | bool ed_tx_blocked; |
| 118 | bool ed_monitor; | 120 | bool ed_monitor; |
| 121 | u8 ed_monitor_enabled; | ||
| 122 | u8 ed_monitor_learning; | ||
| 119 | u8 ed_trigger; | 123 | u8 ed_trigger; |
| 120 | u8 ed_silent; | 124 | u8 ed_silent; |
| 121 | ktime_t ed_time; | 125 | ktime_t ed_time; |
| @@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev); | |||
| 188 | 192 | ||
| 189 | void mt76x02_init_debugfs(struct mt76x02_dev *dev); | 193 | void mt76x02_init_debugfs(struct mt76x02_dev *dev); |
| 190 | 194 | ||
| 195 | static inline bool is_mt76x0(struct mt76x02_dev *dev) | ||
| 196 | { | ||
| 197 | return mt76_chip(&dev->mt76) == 0x7610 || | ||
| 198 | mt76_chip(&dev->mt76) == 0x7630 || | ||
| 199 | mt76_chip(&dev->mt76) == 0x7650; | ||
| 200 | } | ||
| 201 | |||
| 191 | static inline bool is_mt76x2(struct mt76x02_dev *dev) | 202 | static inline bool is_mt76x2(struct mt76x02_dev *dev) |
| 192 | { | 203 | { |
| 193 | return mt76_chip(&dev->mt76) == 0x7612 || | 204 | return mt76_chip(&dev->mt76) == 0x7612 || |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index 7580c5c986ff..b1d6fd4861e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c | |||
| @@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data) | |||
| 116 | return 0; | 116 | return 0; |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static int | ||
| 120 | mt76_edcca_set(void *data, u64 val) | ||
| 121 | { | ||
| 122 | struct mt76x02_dev *dev = data; | ||
| 123 | enum nl80211_dfs_regions region = dev->dfs_pd.region; | ||
| 124 | |||
| 125 | dev->ed_monitor_enabled = !!val; | ||
| 126 | dev->ed_monitor = dev->ed_monitor_enabled && | ||
| 127 | region == NL80211_DFS_ETSI; | ||
| 128 | mt76x02_edcca_init(dev, true); | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | static int | ||
| 134 | mt76_edcca_get(void *data, u64 *val) | ||
| 135 | { | ||
| 136 | struct mt76x02_dev *dev = data; | ||
| 137 | |||
| 138 | *val = dev->ed_monitor_enabled; | ||
| 139 | return 0; | ||
| 140 | } | ||
| 141 | |||
| 142 | DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set, | ||
| 143 | "%lld\n"); | ||
| 144 | |||
| 119 | void mt76x02_init_debugfs(struct mt76x02_dev *dev) | 145 | void mt76x02_init_debugfs(struct mt76x02_dev *dev) |
| 120 | { | 146 | { |
| 121 | struct dentry *dir; | 147 | struct dentry *dir; |
| @@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev) | |||
| 127 | debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); | 153 | debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); |
| 128 | debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); | 154 | debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); |
| 129 | 155 | ||
| 156 | debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca); | ||
| 130 | debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); | 157 | debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); |
| 131 | debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); | 158 | debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); |
| 132 | debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, | 159 | debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index e4649103efd4..17d12d212d1b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c | |||
| @@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev, | |||
| 885 | if (dfs_pd->region != region) { | 885 | if (dfs_pd->region != region) { |
| 886 | tasklet_disable(&dfs_pd->dfs_tasklet); | 886 | tasklet_disable(&dfs_pd->dfs_tasklet); |
| 887 | 887 | ||
| 888 | dev->ed_monitor = region == NL80211_DFS_ETSI; | 888 | dev->ed_monitor = dev->ed_monitor_enabled && |
| 889 | region == NL80211_DFS_ETSI; | ||
| 889 | mt76x02_edcca_init(dev, true); | 890 | mt76x02_edcca_init(dev, true); |
| 890 | 891 | ||
| 891 | dfs_pd->region = region; | 892 | dfs_pd->region = region; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 91ff6598eccf..9ed231abe916 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c | |||
| @@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, | |||
| 67 | } | 67 | } |
| 68 | EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); | 68 | EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); |
| 69 | 69 | ||
| 70 | void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, | ||
| 71 | struct ieee80211_key_conf *key) | ||
| 72 | { | ||
| 73 | enum mt76x02_cipher_type cipher; | ||
| 74 | u8 key_data[32]; | ||
| 75 | u32 iv, eiv; | ||
| 76 | u64 pn; | ||
| 77 | |||
| 78 | cipher = mt76x02_mac_get_key_info(key, key_data); | ||
| 79 | iv = mt76_rr(dev, MT_WCID_IV(idx)); | ||
| 80 | eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4); | ||
| 81 | |||
| 82 | pn = (u64)eiv << 16; | ||
| 83 | if (cipher == MT_CIPHER_TKIP) { | ||
| 84 | pn |= (iv >> 16) & 0xff; | ||
| 85 | pn |= (iv & 0xff) << 8; | ||
| 86 | } else if (cipher >= MT_CIPHER_AES_CCMP) { | ||
| 87 | pn |= iv & 0xffff; | ||
| 88 | } else { | ||
| 89 | return; | ||
| 90 | } | ||
| 91 | |||
| 92 | atomic64_set(&key->tx_pn, pn); | ||
| 93 | } | ||
| 94 | |||
| 95 | |||
| 70 | int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, | 96 | int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, |
| 71 | struct ieee80211_key_conf *key) | 97 | struct ieee80211_key_conf *key) |
| 72 | { | 98 | { |
| 73 | enum mt76x02_cipher_type cipher; | 99 | enum mt76x02_cipher_type cipher; |
| 74 | u8 key_data[32]; | 100 | u8 key_data[32]; |
| 75 | u8 iv_data[8]; | 101 | u8 iv_data[8]; |
| 102 | u64 pn; | ||
| 76 | 103 | ||
| 77 | cipher = mt76x02_mac_get_key_info(key, key_data); | 104 | cipher = mt76x02_mac_get_key_info(key, key_data); |
| 78 | if (cipher == MT_CIPHER_NONE && key) | 105 | if (cipher == MT_CIPHER_NONE && key) |
| @@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, | |||
| 85 | if (key) { | 112 | if (key) { |
| 86 | mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, | 113 | mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, |
| 87 | !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); | 114 | !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); |
| 115 | |||
| 116 | pn = atomic64_read(&key->tx_pn); | ||
| 117 | |||
| 88 | iv_data[3] = key->keyidx << 6; | 118 | iv_data[3] = key->keyidx << 6; |
| 89 | if (cipher >= MT_CIPHER_TKIP) | 119 | if (cipher >= MT_CIPHER_TKIP) { |
| 90 | iv_data[3] |= 0x20; | 120 | iv_data[3] |= 0x20; |
| 121 | put_unaligned_le32(pn >> 16, &iv_data[4]); | ||
| 122 | } | ||
| 123 | |||
| 124 | if (cipher == MT_CIPHER_TKIP) { | ||
| 125 | iv_data[0] = (pn >> 8) & 0xff; | ||
| 126 | iv_data[1] = (iv_data[0] | 0x20) & 0x7f; | ||
| 127 | iv_data[2] = pn & 0xff; | ||
| 128 | } else if (cipher >= MT_CIPHER_AES_CCMP) { | ||
| 129 | put_unaligned_le16((pn & 0xffff), &iv_data[0]); | ||
| 130 | } | ||
| 91 | } | 131 | } |
| 92 | 132 | ||
| 93 | mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); | 133 | mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); |
| @@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable) | |||
| 920 | } | 960 | } |
| 921 | } | 961 | } |
| 922 | mt76x02_edcca_tx_enable(dev, true); | 962 | mt76x02_edcca_tx_enable(dev, true); |
| 963 | dev->ed_monitor_learning = true; | ||
| 923 | 964 | ||
| 924 | /* clear previous CCA timer value */ | 965 | /* clear previous CCA timer value */ |
| 925 | mt76_rr(dev, MT_ED_CCA_TIMER); | 966 | mt76_rr(dev, MT_ED_CCA_TIMER); |
| @@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init); | |||
| 929 | 970 | ||
| 930 | #define MT_EDCCA_TH 92 | 971 | #define MT_EDCCA_TH 92 |
| 931 | #define MT_EDCCA_BLOCK_TH 2 | 972 | #define MT_EDCCA_BLOCK_TH 2 |
| 973 | #define MT_EDCCA_LEARN_TH 50 | ||
| 974 | #define MT_EDCCA_LEARN_CCA 180 | ||
| 975 | #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ) | ||
| 976 | |||
| 932 | static void mt76x02_edcca_check(struct mt76x02_dev *dev) | 977 | static void mt76x02_edcca_check(struct mt76x02_dev *dev) |
| 933 | { | 978 | { |
| 934 | ktime_t cur_time; | 979 | ktime_t cur_time; |
| @@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev) | |||
| 951 | dev->ed_trigger = 0; | 996 | dev->ed_trigger = 0; |
| 952 | } | 997 | } |
| 953 | 998 | ||
| 954 | if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && | 999 | if (dev->cal.agc_lowest_gain && |
| 955 | !dev->ed_tx_blocked) | 1000 | dev->cal.false_cca > MT_EDCCA_LEARN_CCA && |
| 1001 | dev->ed_trigger > MT_EDCCA_LEARN_TH) { | ||
| 1002 | dev->ed_monitor_learning = false; | ||
| 1003 | dev->ed_trigger_timeout = jiffies + 20 * HZ; | ||
| 1004 | } else if (!dev->ed_monitor_learning && | ||
| 1005 | time_is_after_jiffies(dev->ed_trigger_timeout)) { | ||
| 1006 | dev->ed_monitor_learning = true; | ||
| 1007 | mt76x02_edcca_tx_enable(dev, true); | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | if (dev->ed_monitor_learning) | ||
| 1011 | return; | ||
| 1012 | |||
| 1013 | if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked) | ||
| 956 | mt76x02_edcca_tx_enable(dev, false); | 1014 | mt76x02_edcca_tx_enable(dev, false); |
| 957 | else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && | 1015 | else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked) |
| 958 | dev->ed_tx_blocked) | ||
| 959 | mt76x02_edcca_tx_enable(dev, true); | 1016 | mt76x02_edcca_tx_enable(dev, true); |
| 960 | } | 1017 | } |
| 961 | 1018 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 6b1f25d2f64c..caeeef96c42f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h | |||
| @@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, | |||
| 177 | u8 key_idx, struct ieee80211_key_conf *key); | 177 | u8 key_idx, struct ieee80211_key_conf *key); |
| 178 | int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, | 178 | int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, |
| 179 | struct ieee80211_key_conf *key); | 179 | struct ieee80211_key_conf *key); |
| 180 | void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, | ||
| 181 | struct ieee80211_key_conf *key); | ||
| 180 | void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, | 182 | void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, |
| 181 | u8 *mac); | 183 | u8 *mac); |
| 182 | void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); | 184 | void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 1229f19f2b02..daaed1220147 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
| 20 | 20 | ||
| 21 | #include "mt76x02.h" | 21 | #include "mt76x02.h" |
| 22 | #include "mt76x02_mcu.h" | ||
| 22 | #include "mt76x02_trace.h" | 23 | #include "mt76x02_trace.h" |
| 23 | 24 | ||
| 24 | struct beacon_bc_data { | 25 | struct beacon_bc_data { |
| @@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev) | |||
| 418 | return i < 4; | 419 | return i < 4; |
| 419 | } | 420 | } |
| 420 | 421 | ||
| 422 | static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | ||
| 423 | struct ieee80211_sta *sta, | ||
| 424 | struct ieee80211_key_conf *key, void *data) | ||
| 425 | { | ||
| 426 | struct mt76x02_dev *dev = hw->priv; | ||
| 427 | struct mt76_wcid *wcid; | ||
| 428 | |||
| 429 | if (!sta) | ||
| 430 | return; | ||
| 431 | |||
| 432 | wcid = (struct mt76_wcid *) sta->drv_priv; | ||
| 433 | |||
| 434 | if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv) | ||
| 435 | return; | ||
| 436 | |||
| 437 | mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); | ||
| 438 | } | ||
| 439 | |||
| 440 | static void mt76x02_reset_state(struct mt76x02_dev *dev) | ||
| 441 | { | ||
| 442 | int i; | ||
| 443 | |||
| 444 | lockdep_assert_held(&dev->mt76.mutex); | ||
| 445 | |||
| 446 | clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); | ||
| 447 | |||
| 448 | rcu_read_lock(); | ||
| 449 | ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); | ||
| 450 | rcu_read_unlock(); | ||
| 451 | |||
| 452 | for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) { | ||
| 453 | struct ieee80211_sta *sta; | ||
| 454 | struct ieee80211_vif *vif; | ||
| 455 | struct mt76x02_sta *msta; | ||
| 456 | struct mt76_wcid *wcid; | ||
| 457 | void *priv; | ||
| 458 | |||
| 459 | wcid = rcu_dereference_protected(dev->mt76.wcid[i], | ||
| 460 | lockdep_is_held(&dev->mt76.mutex)); | ||
| 461 | if (!wcid) | ||
| 462 | continue; | ||
| 463 | |||
| 464 | priv = msta = container_of(wcid, struct mt76x02_sta, wcid); | ||
| 465 | sta = container_of(priv, struct ieee80211_sta, drv_priv); | ||
| 466 | |||
| 467 | priv = msta->vif; | ||
| 468 | vif = container_of(priv, struct ieee80211_vif, drv_priv); | ||
| 469 | |||
| 470 | __mt76_sta_remove(&dev->mt76, vif, sta); | ||
| 471 | memset(msta, 0, sizeof(*msta)); | ||
| 472 | } | ||
| 473 | |||
| 474 | dev->vif_mask = 0; | ||
| 475 | dev->beacon_mask = 0; | ||
| 476 | } | ||
| 477 | |||
| 421 | static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) | 478 | static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) |
| 422 | { | 479 | { |
| 423 | u32 mask = dev->mt76.mmio.irqmask; | 480 | u32 mask = dev->mt76.mmio.irqmask; |
| 481 | bool restart = dev->mt76.mcu_ops->mcu_restart; | ||
| 424 | int i; | 482 | int i; |
| 425 | 483 | ||
| 426 | ieee80211_stop_queues(dev->mt76.hw); | 484 | ieee80211_stop_queues(dev->mt76.hw); |
| @@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) | |||
| 434 | 492 | ||
| 435 | mutex_lock(&dev->mt76.mutex); | 493 | mutex_lock(&dev->mt76.mutex); |
| 436 | 494 | ||
| 495 | if (restart) | ||
| 496 | mt76x02_reset_state(dev); | ||
| 497 | |||
| 437 | if (dev->beacon_mask) | 498 | if (dev->beacon_mask) |
| 438 | mt76_clear(dev, MT_BEACON_TIME_CFG, | 499 | mt76_clear(dev, MT_BEACON_TIME_CFG, |
| 439 | MT_BEACON_TIME_CFG_BEACON_TX | | 500 | MT_BEACON_TIME_CFG_BEACON_TX | |
| @@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) | |||
| 452 | /* let fw reset DMA */ | 513 | /* let fw reset DMA */ |
| 453 | mt76_set(dev, 0x734, 0x3); | 514 | mt76_set(dev, 0x734, 0x3); |
| 454 | 515 | ||
| 516 | if (restart) | ||
| 517 | dev->mt76.mcu_ops->mcu_restart(&dev->mt76); | ||
| 518 | |||
| 455 | for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) | 519 | for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) |
| 456 | mt76_queue_tx_cleanup(dev, i, true); | 520 | mt76_queue_tx_cleanup(dev, i, true); |
| 457 | 521 | ||
| 458 | for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) | 522 | for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) |
| 459 | mt76_queue_rx_reset(dev, i); | 523 | mt76_queue_rx_reset(dev, i); |
| 460 | 524 | ||
| 461 | mt76_wr(dev, MT_MAC_SYS_CTRL, | 525 | mt76x02_mac_start(dev); |
| 462 | MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); | 526 | |
| 463 | mt76_set(dev, MT_WPDMA_GLO_CFG, | ||
| 464 | MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); | ||
| 465 | if (dev->ed_monitor) | 527 | if (dev->ed_monitor) |
| 466 | mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); | 528 | mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); |
| 467 | 529 | ||
| 468 | if (dev->beacon_mask) | 530 | if (dev->beacon_mask && !restart) |
| 469 | mt76_set(dev, MT_BEACON_TIME_CFG, | 531 | mt76_set(dev, MT_BEACON_TIME_CFG, |
| 470 | MT_BEACON_TIME_CFG_BEACON_TX | | 532 | MT_BEACON_TIME_CFG_BEACON_TX | |
| 471 | MT_BEACON_TIME_CFG_TBTT_EN); | 533 | MT_BEACON_TIME_CFG_TBTT_EN); |
| @@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) | |||
| 486 | napi_schedule(&dev->mt76.napi[i]); | 548 | napi_schedule(&dev->mt76.napi[i]); |
| 487 | } | 549 | } |
| 488 | 550 | ||
| 489 | ieee80211_wake_queues(dev->mt76.hw); | 551 | if (restart) { |
| 490 | 552 | mt76x02_mcu_function_select(dev, Q_SELECT, 1); | |
| 491 | mt76_txq_schedule_all(&dev->mt76); | 553 | ieee80211_restart_hw(dev->mt76.hw); |
| 554 | } else { | ||
| 555 | ieee80211_wake_queues(dev->mt76.hw); | ||
| 556 | mt76_txq_schedule_all(&dev->mt76); | ||
| 557 | } | ||
| 492 | } | 558 | } |
| 493 | 559 | ||
| 494 | static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) | 560 | static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index a020c757ba5c..a54b63a96eae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c | |||
| @@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev) | |||
| 194 | ret = true; | 194 | ret = true; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit; | ||
| 198 | |||
| 197 | return ret; | 199 | return ret; |
| 198 | } | 200 | } |
| 199 | EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); | 201 | EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 43f07461c8d3..6fb52b596d42 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c | |||
| @@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, | |||
| 85 | 85 | ||
| 86 | mt76x02_insert_hdr_pad(skb); | 86 | mt76x02_insert_hdr_pad(skb); |
| 87 | 87 | ||
| 88 | txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); | 88 | txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi)); |
| 89 | mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); | 89 | mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); |
| 90 | skb_push(skb, sizeof(struct mt76x02_txwi)); | ||
| 90 | 91 | ||
| 91 | pid = mt76_tx_status_skb_add(mdev, wcid, skb); | 92 | pid = mt76_tx_status_skb_add(mdev, wcid, skb); |
| 92 | txwi->pktid = pid; | 93 | txwi->pktid = pid; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index a48c261b0c63..cd072ac614f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c | |||
| @@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, | |||
| 237 | struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; | 237 | struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; |
| 238 | int idx = 0; | 238 | int idx = 0; |
| 239 | 239 | ||
| 240 | memset(msta, 0, sizeof(*msta)); | ||
| 241 | |||
| 240 | idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); | 242 | idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); |
| 241 | if (idx < 0) | 243 | if (idx < 0) |
| 242 | return -ENOSPC; | 244 | return -ENOSPC; |
| @@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, | |||
| 274 | struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; | 276 | struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; |
| 275 | struct mt76_txq *mtxq; | 277 | struct mt76_txq *mtxq; |
| 276 | 278 | ||
| 279 | memset(mvif, 0, sizeof(*mvif)); | ||
| 280 | |||
| 277 | mvif->idx = idx; | 281 | mvif->idx = idx; |
| 278 | mvif->group_wcid.idx = MT_VIF_WCID(idx); | 282 | mvif->group_wcid.idx = MT_VIF_WCID(idx); |
| 279 | mvif->group_wcid.hw_key_idx = -1; | 283 | mvif->group_wcid.hw_key_idx = -1; |
| @@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) | |||
| 289 | struct mt76x02_dev *dev = hw->priv; | 293 | struct mt76x02_dev *dev = hw->priv; |
| 290 | unsigned int idx = 0; | 294 | unsigned int idx = 0; |
| 291 | 295 | ||
| 296 | /* Allow to change address in HW if we create first interface. */ | ||
| 297 | if (!dev->vif_mask && | ||
| 298 | (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) || | ||
| 299 | memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1))) | ||
| 300 | mt76x02_mac_setaddr(dev, vif->addr); | ||
| 301 | |||
| 292 | if (vif->addr[0] & BIT(1)) | 302 | if (vif->addr[0] & BIT(1)) |
| 293 | idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); | 303 | idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); |
| 294 | 304 | ||
| @@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) | |||
| 311 | if (dev->vif_mask & BIT(idx)) | 321 | if (dev->vif_mask & BIT(idx)) |
| 312 | return -EBUSY; | 322 | return -EBUSY; |
| 313 | 323 | ||
| 314 | /* Allow to change address in HW if we create first interface. */ | ||
| 315 | if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr)) | ||
| 316 | mt76x02_mac_setaddr(dev, vif->addr); | ||
| 317 | |||
| 318 | dev->vif_mask |= BIT(idx); | 324 | dev->vif_mask |= BIT(idx); |
| 319 | 325 | ||
| 320 | mt76x02_vif_init(dev, vif, idx); | 326 | mt76x02_vif_init(dev, vif, idx); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index f8534362e2c8..a30ef2c5a9db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c | |||
| @@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev) | |||
| 106 | { MT_TX_SW_CFG1, 0x00010000 }, | 106 | { MT_TX_SW_CFG1, 0x00010000 }, |
| 107 | { MT_TX_SW_CFG2, 0x00000000 }, | 107 | { MT_TX_SW_CFG2, 0x00000000 }, |
| 108 | { MT_TXOP_CTRL_CFG, 0x0400583f }, | 108 | { MT_TXOP_CTRL_CFG, 0x0400583f }, |
| 109 | { MT_TX_RTS_CFG, 0x00100020 }, | 109 | { MT_TX_RTS_CFG, 0x00ffff20 }, |
| 110 | { MT_TX_TIMEOUT_CFG, 0x000a2290 }, | 110 | { MT_TX_TIMEOUT_CFG, 0x000a2290 }, |
| 111 | { MT_TX_RETRY_CFG, 0x47f01f0f }, | 111 | { MT_TX_RETRY_CFG, 0x47f01f0f }, |
| 112 | { MT_EXP_ACK_TIME, 0x002c00dc }, | 112 | { MT_EXP_ACK_TIME, 0x002c00dc }, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index 6c619f1c65c9..d7abe3d73bad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h | |||
| @@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, | |||
| 71 | 71 | ||
| 72 | void mt76x2_cleanup(struct mt76x02_dev *dev); | 72 | void mt76x2_cleanup(struct mt76x02_dev *dev); |
| 73 | 73 | ||
| 74 | int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard); | ||
| 74 | void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); | 75 | void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); |
| 75 | void mt76x2_init_txpower(struct mt76x02_dev *dev, | 76 | void mt76x2_init_txpower(struct mt76x02_dev *dev, |
| 76 | struct ieee80211_supported_band *sband); | 77 | struct ieee80211_supported_band *sband); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 984d9c4c2e1a..d3927a13e92e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c | |||
| @@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev) | |||
| 77 | } | 77 | } |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) | 80 | int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) |
| 81 | { | 81 | { |
| 82 | const u8 *macaddr = dev->mt76.macaddr; | 82 | const u8 *macaddr = dev->mt76.macaddr; |
| 83 | u32 val; | 83 | u32 val; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index 03e24ae7f66c..605dc66ae83b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c | |||
| @@ -165,9 +165,30 @@ error: | |||
| 165 | return -ENOENT; | 165 | return -ENOENT; |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static int | ||
| 169 | mt76pci_mcu_restart(struct mt76_dev *mdev) | ||
| 170 | { | ||
| 171 | struct mt76x02_dev *dev; | ||
| 172 | int ret; | ||
| 173 | |||
| 174 | dev = container_of(mdev, struct mt76x02_dev, mt76); | ||
| 175 | |||
| 176 | mt76x02_mcu_cleanup(dev); | ||
| 177 | mt76x2_mac_reset(dev, true); | ||
| 178 | |||
| 179 | ret = mt76pci_load_firmware(dev); | ||
| 180 | if (ret) | ||
| 181 | return ret; | ||
| 182 | |||
| 183 | mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); | ||
| 184 | |||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | |||
| 168 | int mt76x2_mcu_init(struct mt76x02_dev *dev) | 188 | int mt76x2_mcu_init(struct mt76x02_dev *dev) |
| 169 | { | 189 | { |
| 170 | static const struct mt76_mcu_ops mt76x2_mcu_ops = { | 190 | static const struct mt76_mcu_ops mt76x2_mcu_ops = { |
| 191 | .mcu_restart = mt76pci_mcu_restart, | ||
| 171 | .mcu_send_msg = mt76x02_mcu_msg_send, | 192 | .mcu_send_msg = mt76x02_mcu_msg_send, |
| 172 | }; | 193 | }; |
| 173 | int ret; | 194 | int ret; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index 1848e8ab2e21..769a9b972044 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c | |||
| @@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) | |||
| 260 | gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; | 260 | gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; |
| 261 | gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; | 261 | gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; |
| 262 | 262 | ||
| 263 | if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) | 263 | val = 0x1836 << 16; |
| 264 | if (!mt76x2_has_ext_lna(dev) && | ||
| 265 | dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) | ||
| 264 | val = 0x1e42 << 16; | 266 | val = 0x1e42 << 16; |
| 265 | else | 267 | |
| 266 | val = 0x1836 << 16; | 268 | if (mt76x2_has_ext_lna(dev) && |
| 269 | dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ && | ||
| 270 | dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40) | ||
| 271 | val = 0x0f36 << 16; | ||
| 267 | 272 | ||
| 268 | val |= 0xf8; | 273 | val |= 0xf8; |
| 269 | 274 | ||
| @@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) | |||
| 280 | { | 285 | { |
| 281 | u8 *gain = dev->cal.agc_gain_init; | 286 | u8 *gain = dev->cal.agc_gain_init; |
| 282 | u8 low_gain_delta, gain_delta; | 287 | u8 low_gain_delta, gain_delta; |
| 288 | u32 agc_35, agc_37; | ||
| 283 | bool gain_change; | 289 | bool gain_change; |
| 284 | int low_gain; | 290 | int low_gain; |
| 285 | u32 val; | 291 | u32 val; |
| @@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) | |||
| 318 | else | 324 | else |
| 319 | low_gain_delta = 14; | 325 | low_gain_delta = 14; |
| 320 | 326 | ||
| 327 | agc_37 = 0x2121262c; | ||
| 328 | if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) | ||
| 329 | agc_35 = 0x11111516; | ||
| 330 | else if (low_gain == 2) | ||
| 331 | agc_35 = agc_37 = 0x08080808; | ||
| 332 | else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) | ||
| 333 | agc_35 = 0x10101014; | ||
| 334 | else | ||
| 335 | agc_35 = 0x11111116; | ||
| 336 | |||
| 321 | if (low_gain == 2) { | 337 | if (low_gain == 2) { |
| 322 | mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); | 338 | mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); |
| 323 | mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); | 339 | mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); |
| @@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) | |||
| 326 | dev->cal.agc_gain_adjust = 0; | 342 | dev->cal.agc_gain_adjust = 0; |
| 327 | } else { | 343 | } else { |
| 328 | mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); | 344 | mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); |
| 329 | if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) | ||
| 330 | mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); | ||
| 331 | else | ||
| 332 | mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); | ||
| 333 | mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); | ||
| 334 | gain_delta = 0; | 345 | gain_delta = 0; |
| 335 | dev->cal.agc_gain_adjust = low_gain_delta; | 346 | dev->cal.agc_gain_adjust = low_gain_delta; |
| 336 | } | 347 | } |
| 337 | 348 | ||
| 349 | mt76_wr(dev, MT_BBP(AGC, 35), agc_35); | ||
| 350 | mt76_wr(dev, MT_BBP(AGC, 37), agc_37); | ||
| 351 | |||
| 338 | dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; | 352 | dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; |
| 339 | dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; | 353 | dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; |
| 340 | mt76x2_phy_set_gain_val(dev); | 354 | mt76x2_phy_set_gain_val(dev); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index ddb6b2c48e01..ac0f13d46299 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c | |||
| @@ -21,11 +21,10 @@ | |||
| 21 | #include "mt76x2u.h" | 21 | #include "mt76x2u.h" |
| 22 | 22 | ||
| 23 | static const struct usb_device_id mt76x2u_device_table[] = { | 23 | static const struct usb_device_id mt76x2u_device_table[] = { |
| 24 | { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */ | ||
| 25 | { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ | 24 | { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ |
| 26 | { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ | 25 | { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ |
| 27 | { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ | 26 | { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ |
| 28 | { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */ | 27 | { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */ |
| 29 | { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ | 28 | { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ |
| 30 | { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ | 29 | { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ |
| 31 | { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ | 30 | { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ |
| @@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf, | |||
| 66 | 65 | ||
| 67 | mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); | 66 | mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); |
| 68 | dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); | 67 | dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); |
| 68 | if (!is_mt76x2(dev)) { | ||
| 69 | err = -ENODEV; | ||
| 70 | goto err; | ||
| 71 | } | ||
| 69 | 72 | ||
| 70 | err = mt76x2u_register_device(dev); | 73 | err = mt76x2u_register_device(dev); |
| 71 | if (err < 0) | 74 | if (err < 0) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index 5e84b4535cb1..3b82345756ea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c | |||
| @@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev) | |||
| 93 | mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); | 93 | mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); |
| 94 | mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); | 94 | mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); |
| 95 | mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); | 95 | mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); |
| 96 | mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20); | ||
| 97 | 96 | ||
| 98 | mt76_wr(dev, MT_WMM_AIFSN, 0x2273); | 97 | mt76_wr(dev, MT_WMM_AIFSN, 0x2273); |
| 99 | mt76_wr(dev, MT_WMM_CWMIN, 0x2344); | 98 | mt76_wr(dev, MT_WMM_CWMIN, 0x2344); |
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 5a349fe3e576..2585df512335 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c | |||
| @@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, | |||
| 289 | dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); | 289 | dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); |
| 290 | dev->queue_ops->kick(dev, q); | 290 | dev->queue_ops->kick(dev, q); |
| 291 | 291 | ||
| 292 | if (q->queued > q->ndesc - 8) | 292 | if (q->queued > q->ndesc - 8 && !q->stopped) { |
| 293 | ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); | 293 | ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); |
| 294 | q->stopped = true; | ||
| 295 | } | ||
| 296 | |||
| 294 | spin_unlock_bh(&q->lock); | 297 | spin_unlock_bh(&q->lock); |
| 295 | } | 298 | } |
| 296 | EXPORT_SYMBOL_GPL(mt76_tx); | 299 | EXPORT_SYMBOL_GPL(mt76_tx); |
| @@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, | |||
| 374 | if (last_skb) { | 377 | if (last_skb) { |
| 375 | mt76_queue_ps_skb(dev, sta, last_skb, true); | 378 | mt76_queue_ps_skb(dev, sta, last_skb, true); |
| 376 | dev->queue_ops->kick(dev, hwq); | 379 | dev->queue_ops->kick(dev, hwq); |
| 380 | } else { | ||
| 381 | ieee80211_sta_eosp(sta); | ||
| 377 | } | 382 | } |
| 383 | |||
| 378 | spin_unlock_bh(&hwq->lock); | 384 | spin_unlock_bh(&hwq->lock); |
| 379 | } | 385 | } |
| 380 | EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); | 386 | EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); |
| @@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) | |||
| 577 | struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; | 583 | struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; |
| 578 | struct mt76_queue *hwq = mtxq->hwq; | 584 | struct mt76_queue *hwq = mtxq->hwq; |
| 579 | 585 | ||
| 586 | if (!test_bit(MT76_STATE_RUNNING, &dev->state)) | ||
| 587 | return; | ||
| 588 | |||
| 580 | spin_lock_bh(&hwq->lock); | 589 | spin_lock_bh(&hwq->lock); |
| 581 | if (list_empty(&mtxq->list)) | 590 | if (list_empty(&mtxq->list)) |
| 582 | list_add_tail(&mtxq->list, &hwq->swq); | 591 | list_add_tail(&mtxq->list, &hwq->swq); |
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index ae6ada370597..4c1abd492405 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c | |||
| @@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data) | |||
| 655 | spin_lock_bh(&q->lock); | 655 | spin_lock_bh(&q->lock); |
| 656 | } | 656 | } |
| 657 | mt76_txq_schedule(dev, q); | 657 | mt76_txq_schedule(dev, q); |
| 658 | wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; | 658 | |
| 659 | wake = q->stopped && q->queued < q->ndesc - 8; | ||
| 660 | if (wake) | ||
| 661 | q->stopped = false; | ||
| 662 | |||
| 659 | if (!q->queued) | 663 | if (!q->queued) |
| 660 | wake_up(&dev->tx_wait); | 664 | wake_up(&dev->tx_wait); |
| 661 | 665 | ||
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index d8b7863f7926..6ae7f14dc9bf 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c | |||
| @@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf, | |||
| 303 | mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); | 303 | mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); |
| 304 | dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", | 304 | dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", |
| 305 | asic_rev, mac_rev); | 305 | asic_rev, mac_rev); |
| 306 | if ((asic_rev >> 16) != 0x7601) { | ||
| 307 | ret = -ENODEV; | ||
| 308 | goto err; | ||
| 309 | } | ||
| 306 | 310 | ||
| 307 | /* Note: vendor driver skips this check for MT7601U */ | 311 | /* Note: vendor driver skips this check for MT7601U */ |
| 308 | if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) | 312 | if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2839bb70badf..f0716f6ce41f 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
| @@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) | |||
| 404 | static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, | 404 | static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, |
| 405 | struct nvme_ns *ns) | 405 | struct nvme_ns *ns) |
| 406 | { | 406 | { |
| 407 | enum nvme_ana_state old; | ||
| 408 | |||
| 409 | mutex_lock(&ns->head->lock); | 407 | mutex_lock(&ns->head->lock); |
| 410 | old = ns->ana_state; | ||
| 411 | ns->ana_grpid = le32_to_cpu(desc->grpid); | 408 | ns->ana_grpid = le32_to_cpu(desc->grpid); |
| 412 | ns->ana_state = desc->state; | 409 | ns->ana_state = desc->state; |
| 413 | clear_bit(NVME_NS_ANA_PENDING, &ns->flags); | 410 | clear_bit(NVME_NS_ANA_PENDING, &ns->flags); |
| 414 | 411 | ||
| 415 | if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) | 412 | if (nvme_state_is_live(ns->ana_state)) |
| 416 | nvme_mpath_set_live(ns); | 413 | nvme_mpath_set_live(ns); |
| 417 | mutex_unlock(&ns->head->lock); | 414 | mutex_unlock(&ns->head->lock); |
| 418 | } | 415 | } |
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index e7e08889865e..68c49dd67210 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c | |||
| @@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, | |||
| 627 | return ret; | 627 | return ret; |
| 628 | } | 628 | } |
| 629 | 629 | ||
| 630 | static inline void nvme_tcp_end_request(struct request *rq, __le16 status) | 630 | static inline void nvme_tcp_end_request(struct request *rq, u16 status) |
| 631 | { | 631 | { |
| 632 | union nvme_result res = {}; | 632 | union nvme_result res = {}; |
| 633 | 633 | ||
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 2d73b66e3686..b3e765a95af8 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) | |||
| 509 | 509 | ||
| 510 | ret = nvmet_p2pmem_ns_enable(ns); | 510 | ret = nvmet_p2pmem_ns_enable(ns); |
| 511 | if (ret) | 511 | if (ret) |
| 512 | goto out_unlock; | 512 | goto out_dev_disable; |
| 513 | 513 | ||
| 514 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | 514 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
| 515 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); | 515 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
| @@ -550,7 +550,7 @@ out_unlock: | |||
| 550 | out_dev_put: | 550 | out_dev_put: |
| 551 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | 551 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
| 552 | pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); | 552 | pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
| 553 | 553 | out_dev_disable: | |
| 554 | nvmet_ns_dev_disable(ns); | 554 | nvmet_ns_dev_disable(ns); |
| 555 | goto out_unlock; | 555 | goto out_unlock; |
| 556 | } | 556 | } |
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 3e43212d3c1c..bc6ebb51b0bf 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c | |||
| @@ -75,11 +75,11 @@ err: | |||
| 75 | return ret; | 75 | return ret; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) | 78 | static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg) |
| 79 | { | 79 | { |
| 80 | bv->bv_page = sg_page_iter_page(iter); | 80 | bv->bv_page = sg_page(sg); |
| 81 | bv->bv_offset = iter->sg->offset; | 81 | bv->bv_offset = sg->offset; |
| 82 | bv->bv_len = PAGE_SIZE - iter->sg->offset; | 82 | bv->bv_len = sg->length; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, | 85 | static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, |
| @@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) | |||
| 128 | 128 | ||
| 129 | static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) | 129 | static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) |
| 130 | { | 130 | { |
| 131 | ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); | 131 | ssize_t nr_bvec = req->sg_cnt; |
| 132 | struct sg_page_iter sg_pg_iter; | ||
| 133 | unsigned long bv_cnt = 0; | 132 | unsigned long bv_cnt = 0; |
| 134 | bool is_sync = false; | 133 | bool is_sync = false; |
| 135 | size_t len = 0, total_len = 0; | 134 | size_t len = 0, total_len = 0; |
| 136 | ssize_t ret = 0; | 135 | ssize_t ret = 0; |
| 137 | loff_t pos; | 136 | loff_t pos; |
| 138 | 137 | int i; | |
| 138 | struct scatterlist *sg; | ||
| 139 | 139 | ||
| 140 | if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) | 140 | if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) |
| 141 | is_sync = true; | 141 | is_sync = true; |
| @@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) | |||
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | memset(&req->f.iocb, 0, sizeof(struct kiocb)); | 149 | memset(&req->f.iocb, 0, sizeof(struct kiocb)); |
| 150 | for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { | 150 | for_each_sg(req->sg, sg, req->sg_cnt, i) { |
| 151 | nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); | 151 | nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); |
| 152 | len += req->f.bvec[bv_cnt].bv_len; | 152 | len += req->f.bvec[bv_cnt].bv_len; |
| 153 | total_len += req->f.bvec[bv_cnt].bv_len; | 153 | total_len += req->f.bvec[bv_cnt].bv_len; |
| 154 | bv_cnt++; | 154 | bv_cnt++; |
| @@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req) | |||
| 225 | 225 | ||
| 226 | static void nvmet_file_execute_rw(struct nvmet_req *req) | 226 | static void nvmet_file_execute_rw(struct nvmet_req *req) |
| 227 | { | 227 | { |
| 228 | ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); | 228 | ssize_t nr_bvec = req->sg_cnt; |
| 229 | 229 | ||
| 230 | if (!req->sg_cnt || !nr_bvec) { | 230 | if (!req->sg_cnt || !nr_bvec) { |
| 231 | nvmet_req_complete(req, 0); | 231 | nvmet_req_complete(req, 0); |
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 1be571c20062..6bad04cbb1d3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
| @@ -157,8 +157,12 @@ | |||
| 157 | #define DBG_IRT(x...) | 157 | #define DBG_IRT(x...) |
| 158 | #endif | 158 | #endif |
| 159 | 159 | ||
| 160 | #ifdef CONFIG_64BIT | ||
| 161 | #define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) | ||
| 162 | #else | ||
| 160 | #define COMPARE_IRTE_ADDR(irte, hpa) \ | 163 | #define COMPARE_IRTE_ADDR(irte, hpa) \ |
| 161 | ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) | 164 | ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) |
| 165 | #endif | ||
| 162 | 166 | ||
| 163 | #define IOSAPIC_REG_SELECT 0x00 | 167 | #define IOSAPIC_REG_SELECT 0x00 |
| 164 | #define IOSAPIC_REG_WINDOW 0x10 | 168 | #define IOSAPIC_REG_WINDOW 0x10 |
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 56dd83a45e55..5484a46dafda 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c | |||
| @@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port) | |||
| 213 | struct pardevice *parport_open(int devnum, const char *name) | 213 | struct pardevice *parport_open(int devnum, const char *name) |
| 214 | { | 214 | { |
| 215 | struct daisydev *p = topology; | 215 | struct daisydev *p = topology; |
| 216 | struct pardev_cb par_cb; | ||
| 217 | struct parport *port; | 216 | struct parport *port; |
| 218 | struct pardevice *dev; | 217 | struct pardevice *dev; |
| 219 | int daisy; | 218 | int daisy; |
| 220 | 219 | ||
| 221 | memset(&par_cb, 0, sizeof(par_cb)); | ||
| 222 | spin_lock(&topology_lock); | 220 | spin_lock(&topology_lock); |
| 223 | while (p && p->devnum != devnum) | 221 | while (p && p->devnum != devnum) |
| 224 | p = p->next; | 222 | p = p->next; |
| @@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name) | |||
| 232 | port = parport_get_port(p->port); | 230 | port = parport_get_port(p->port); |
| 233 | spin_unlock(&topology_lock); | 231 | spin_unlock(&topology_lock); |
| 234 | 232 | ||
| 235 | dev = parport_register_dev_model(port, name, &par_cb, devnum); | 233 | dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); |
| 236 | parport_put_port(port); | 234 | parport_put_port(port); |
| 237 | if (!dev) | 235 | if (!dev) |
| 238 | return NULL; | 236 | return NULL; |
| @@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port) | |||
| 482 | kfree(deviceid); | 480 | kfree(deviceid); |
| 483 | return detected; | 481 | return detected; |
| 484 | } | 482 | } |
| 485 | |||
| 486 | static int daisy_drv_probe(struct pardevice *par_dev) | ||
| 487 | { | ||
| 488 | struct device_driver *drv = par_dev->dev.driver; | ||
| 489 | |||
| 490 | if (strcmp(drv->name, "daisy_drv")) | ||
| 491 | return -ENODEV; | ||
| 492 | if (strcmp(par_dev->name, daisy_dev_name)) | ||
| 493 | return -ENODEV; | ||
| 494 | |||
| 495 | return 0; | ||
| 496 | } | ||
| 497 | |||
| 498 | static struct parport_driver daisy_driver = { | ||
| 499 | .name = "daisy_drv", | ||
| 500 | .probe = daisy_drv_probe, | ||
| 501 | .devmodel = true, | ||
| 502 | }; | ||
| 503 | |||
| 504 | int daisy_drv_init(void) | ||
| 505 | { | ||
| 506 | return parport_register_driver(&daisy_driver); | ||
| 507 | } | ||
| 508 | |||
| 509 | void daisy_drv_exit(void) | ||
| 510 | { | ||
| 511 | parport_unregister_driver(&daisy_driver); | ||
| 512 | } | ||
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index e5e6a463a941..e035174ba205 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c | |||
| @@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, | |||
| 257 | ssize_t parport_device_id (int devnum, char *buffer, size_t count) | 257 | ssize_t parport_device_id (int devnum, char *buffer, size_t count) |
| 258 | { | 258 | { |
| 259 | ssize_t retval = -ENXIO; | 259 | ssize_t retval = -ENXIO; |
| 260 | struct pardevice *dev = parport_open(devnum, daisy_dev_name); | 260 | struct pardevice *dev = parport_open (devnum, "Device ID probe"); |
| 261 | if (!dev) | 261 | if (!dev) |
| 262 | return -ENXIO; | 262 | return -ENXIO; |
| 263 | 263 | ||
diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 0171b8dbcdcd..5dc53d420ca8 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c | |||
| @@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = { | |||
| 137 | 137 | ||
| 138 | int parport_bus_init(void) | 138 | int parport_bus_init(void) |
| 139 | { | 139 | { |
| 140 | int retval; | 140 | return bus_register(&parport_bus_type); |
| 141 | |||
| 142 | retval = bus_register(&parport_bus_type); | ||
| 143 | if (retval) | ||
| 144 | return retval; | ||
| 145 | daisy_drv_init(); | ||
| 146 | |||
| 147 | return 0; | ||
| 148 | } | 141 | } |
| 149 | 142 | ||
| 150 | void parport_bus_exit(void) | 143 | void parport_bus_exit(void) |
| 151 | { | 144 | { |
| 152 | daisy_drv_exit(); | ||
| 153 | bus_unregister(&parport_bus_type); | 145 | bus_unregister(&parport_bus_type); |
| 154 | } | 146 | } |
| 155 | 147 | ||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 224d88634115..d994839a3e24 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); | |||
| 273 | u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, | 273 | u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, |
| 274 | enum pcie_link_width *width); | 274 | enum pcie_link_width *width); |
| 275 | void __pcie_print_link_status(struct pci_dev *dev, bool verbose); | 275 | void __pcie_print_link_status(struct pci_dev *dev, bool verbose); |
| 276 | void pcie_report_downtraining(struct pci_dev *dev); | ||
| 276 | 277 | ||
| 277 | /* Single Root I/O Virtualization */ | 278 | /* Single Root I/O Virtualization */ |
| 278 | struct pci_sriov { | 279 | struct pci_sriov { |
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c index d2eae3b7cc0f..4fa9e3523ee1 100644 --- a/drivers/pci/pcie/bw_notification.c +++ b/drivers/pci/pcie/bw_notification.c | |||
| @@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev) | |||
| 30 | { | 30 | { |
| 31 | u16 lnk_ctl; | 31 | u16 lnk_ctl; |
| 32 | 32 | ||
| 33 | pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); | ||
| 34 | |||
| 33 | pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); | 35 | pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); |
| 34 | lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; | 36 | lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; |
| 35 | pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); | 37 | pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); |
| @@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev) | |||
| 44 | pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); | 46 | pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); |
| 45 | } | 47 | } |
| 46 | 48 | ||
| 47 | static irqreturn_t pcie_bw_notification_handler(int irq, void *context) | 49 | static irqreturn_t pcie_bw_notification_irq(int irq, void *context) |
| 48 | { | 50 | { |
| 49 | struct pcie_device *srv = context; | 51 | struct pcie_device *srv = context; |
| 50 | struct pci_dev *port = srv->port; | 52 | struct pci_dev *port = srv->port; |
| 51 | struct pci_dev *dev; | ||
| 52 | u16 link_status, events; | 53 | u16 link_status, events; |
| 53 | int ret; | 54 | int ret; |
| 54 | 55 | ||
| @@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context) | |||
| 58 | if (ret != PCIBIOS_SUCCESSFUL || !events) | 59 | if (ret != PCIBIOS_SUCCESSFUL || !events) |
| 59 | return IRQ_NONE; | 60 | return IRQ_NONE; |
| 60 | 61 | ||
| 62 | pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); | ||
| 63 | pcie_update_link_speed(port->subordinate, link_status); | ||
| 64 | return IRQ_WAKE_THREAD; | ||
| 65 | } | ||
| 66 | |||
| 67 | static irqreturn_t pcie_bw_notification_handler(int irq, void *context) | ||
| 68 | { | ||
| 69 | struct pcie_device *srv = context; | ||
| 70 | struct pci_dev *port = srv->port; | ||
| 71 | struct pci_dev *dev; | ||
| 72 | |||
| 61 | /* | 73 | /* |
| 62 | * Print status from downstream devices, not this root port or | 74 | * Print status from downstream devices, not this root port or |
| 63 | * downstream switch port. | 75 | * downstream switch port. |
| 64 | */ | 76 | */ |
| 65 | down_read(&pci_bus_sem); | 77 | down_read(&pci_bus_sem); |
| 66 | list_for_each_entry(dev, &port->subordinate->devices, bus_list) | 78 | list_for_each_entry(dev, &port->subordinate->devices, bus_list) |
| 67 | __pcie_print_link_status(dev, false); | 79 | pcie_report_downtraining(dev); |
| 68 | up_read(&pci_bus_sem); | 80 | up_read(&pci_bus_sem); |
| 69 | 81 | ||
| 70 | pcie_update_link_speed(port->subordinate, link_status); | ||
| 71 | pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); | ||
| 72 | return IRQ_HANDLED; | 82 | return IRQ_HANDLED; |
| 73 | } | 83 | } |
| 74 | 84 | ||
| @@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv) | |||
| 80 | if (!pcie_link_bandwidth_notification_supported(srv->port)) | 90 | if (!pcie_link_bandwidth_notification_supported(srv->port)) |
| 81 | return -ENODEV; | 91 | return -ENODEV; |
| 82 | 92 | ||
| 83 | ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, | 93 | ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq, |
| 94 | pcie_bw_notification_handler, | ||
| 84 | IRQF_SHARED, "PCIe BW notif", srv); | 95 | IRQF_SHARED, "PCIe BW notif", srv); |
| 85 | if (ret) | 96 | if (ret) |
| 86 | return ret; | 97 | return ret; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2ec0df04e0dc..7e12d0163863 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) | |||
| 2388 | return dev; | 2388 | return dev; |
| 2389 | } | 2389 | } |
| 2390 | 2390 | ||
| 2391 | static void pcie_report_downtraining(struct pci_dev *dev) | 2391 | void pcie_report_downtraining(struct pci_dev *dev) |
| 2392 | { | 2392 | { |
| 2393 | if (!pci_is_pcie(dev)) | 2393 | if (!pci_is_pcie(dev)) |
| 2394 | return; | 2394 | return; |
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 5163097b43df..4bbd9ede38c8 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c | |||
| @@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, | |||
| 485 | struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); | 485 | struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); |
| 486 | int new_mode; | 486 | int new_mode; |
| 487 | 487 | ||
| 488 | if (phy->index != 0) | 488 | if (phy->index != 0) { |
| 489 | if (mode == PHY_MODE_USB_HOST) | ||
| 490 | return 0; | ||
| 489 | return -EINVAL; | 491 | return -EINVAL; |
| 492 | } | ||
| 490 | 493 | ||
| 491 | switch (mode) { | 494 | switch (mode) { |
| 492 | case PHY_MODE_USB_HOST: | 495 | case PHY_MODE_USB_HOST: |
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 91751617b37a..c53a2185a039 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c | |||
| @@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) | |||
| 130 | arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); | 130 | arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); |
| 131 | arb->rstc.ops = &meson_audio_arb_rstc_ops; | 131 | arb->rstc.ops = &meson_audio_arb_rstc_ops; |
| 132 | arb->rstc.of_node = dev->of_node; | 132 | arb->rstc.of_node = dev->of_node; |
| 133 | arb->rstc.owner = THIS_MODULE; | ||
| 133 | 134 | ||
| 134 | /* | 135 | /* |
| 135 | * Enable general : | 136 | * Enable general : |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index a71734c41693..f933c06bff4f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
| @@ -667,9 +667,9 @@ config RTC_DRV_S5M | |||
| 667 | will be called rtc-s5m. | 667 | will be called rtc-s5m. |
| 668 | 668 | ||
| 669 | config RTC_DRV_SD3078 | 669 | config RTC_DRV_SD3078 |
| 670 | tristate "ZXW Crystal SD3078" | 670 | tristate "ZXW Shenzhen whwave SD3078" |
| 671 | help | 671 | help |
| 672 | If you say yes here you get support for the ZXW Crystal | 672 | If you say yes here you get support for the ZXW Shenzhen whwave |
| 673 | SD3078 RTC chips. | 673 | SD3078 RTC chips. |
| 674 | 674 | ||
| 675 | This driver can also be built as a module. If so, the module | 675 | This driver can also be built as a module. If so, the module |
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index e5444296075e..4d6bf9304ceb 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c | |||
| @@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev) | |||
| 298 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); | 298 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); |
| 299 | 299 | ||
| 300 | if (device_may_wakeup(dev)) | 300 | if (device_may_wakeup(dev)) |
| 301 | enable_irq_wake(cros_ec_rtc->cros_ec->irq); | 301 | return enable_irq_wake(cros_ec_rtc->cros_ec->irq); |
| 302 | 302 | ||
| 303 | return 0; | 303 | return 0; |
| 304 | } | 304 | } |
| @@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev) | |||
| 309 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); | 309 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); |
| 310 | 310 | ||
| 311 | if (device_may_wakeup(dev)) | 311 | if (device_may_wakeup(dev)) |
| 312 | disable_irq_wake(cros_ec_rtc->cros_ec->irq); | 312 | return disable_irq_wake(cros_ec_rtc->cros_ec->irq); |
| 313 | 313 | ||
| 314 | return 0; | 314 | return 0; |
| 315 | } | 315 | } |
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index b4e054c64bad..69b54e5556c0 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c | |||
| @@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev) | |||
| 480 | da9063_data_to_tm(data, &rtc->alarm_time, rtc); | 480 | da9063_data_to_tm(data, &rtc->alarm_time, rtc); |
| 481 | rtc->rtc_sync = false; | 481 | rtc->rtc_sync = false; |
| 482 | 482 | ||
| 483 | /* | ||
| 484 | * TODO: some models have alarms on a minute boundary but still support | ||
| 485 | * real hardware interrupts. Add this once the core supports it. | ||
| 486 | */ | ||
| 487 | if (config->rtc_data_start != RTC_SEC) | ||
| 488 | rtc->rtc_dev->uie_unsupported = 1; | ||
| 489 | |||
| 483 | irq_alarm = platform_get_irq_byname(pdev, "ALARM"); | 490 | irq_alarm = platform_get_irq_byname(pdev, "ALARM"); |
| 484 | ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, | 491 | ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, |
| 485 | da9063_alarm_event, | 492 | da9063_alarm_event, |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index d417b203cbc5..1d3de2a3d1a4 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
| @@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 374 | static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) | 374 | static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) |
| 375 | { | 375 | { |
| 376 | unsigned int byte; | 376 | unsigned int byte; |
| 377 | int value = 0xff; /* return 0xff for ignored values */ | 377 | int value = -1; /* return -1 for ignored values */ |
| 378 | 378 | ||
| 379 | byte = readb(rtc->regbase + reg_off); | 379 | byte = readb(rtc->regbase + reg_off); |
| 380 | if (byte & AR_ENB) { | 380 | if (byte & AR_ENB) { |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4159c63a5fd2..a835b31aad99 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <asm/crw.h> | 24 | #include <asm/crw.h> |
| 25 | #include <asm/isc.h> | 25 | #include <asm/isc.h> |
| 26 | #include <asm/ebcdic.h> | 26 | #include <asm/ebcdic.h> |
| 27 | #include <asm/ap.h> | ||
| 27 | 28 | ||
| 28 | #include "css.h" | 29 | #include "css.h" |
| 29 | #include "cio.h" | 30 | #include "cio.h" |
| @@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) | |||
| 586 | " failed (rc=%d).\n", ret); | 587 | " failed (rc=%d).\n", ret); |
| 587 | } | 588 | } |
| 588 | 589 | ||
| 590 | static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) | ||
| 591 | { | ||
| 592 | CIO_CRW_EVENT(3, "chsc: ap config changed\n"); | ||
| 593 | if (sei_area->rs != 5) | ||
| 594 | return; | ||
| 595 | |||
| 596 | ap_bus_cfg_chg(); | ||
| 597 | } | ||
| 598 | |||
| 589 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) | 599 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) |
| 590 | { | 600 | { |
| 591 | switch (sei_area->cc) { | 601 | switch (sei_area->cc) { |
| @@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) | |||
| 612 | case 2: /* i/o resource accessibility */ | 622 | case 2: /* i/o resource accessibility */ |
| 613 | chsc_process_sei_res_acc(sei_area); | 623 | chsc_process_sei_res_acc(sei_area); |
| 614 | break; | 624 | break; |
| 625 | case 3: /* ap config changed */ | ||
| 626 | chsc_process_sei_ap_cfg_chg(sei_area); | ||
| 627 | break; | ||
| 615 | case 7: /* channel-path-availability information */ | 628 | case 7: /* channel-path-availability information */ |
| 616 | chsc_process_sei_chp_avail(sei_area); | 629 | chsc_process_sei_chp_avail(sei_area); |
| 617 | break; | 630 | break; |
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index a10cec0e86eb..0b3b9de45c60 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c | |||
| @@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) | |||
| 72 | { | 72 | { |
| 73 | struct vfio_ccw_private *private; | 73 | struct vfio_ccw_private *private; |
| 74 | struct irb *irb; | 74 | struct irb *irb; |
| 75 | bool is_final; | ||
| 75 | 76 | ||
| 76 | private = container_of(work, struct vfio_ccw_private, io_work); | 77 | private = container_of(work, struct vfio_ccw_private, io_work); |
| 77 | irb = &private->irb; | 78 | irb = &private->irb; |
| 78 | 79 | ||
| 80 | is_final = !(scsw_actl(&irb->scsw) & | ||
| 81 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); | ||
| 79 | if (scsw_is_solicited(&irb->scsw)) { | 82 | if (scsw_is_solicited(&irb->scsw)) { |
| 80 | cp_update_scsw(&private->cp, &irb->scsw); | 83 | cp_update_scsw(&private->cp, &irb->scsw); |
| 81 | cp_free(&private->cp); | 84 | if (is_final) |
| 85 | cp_free(&private->cp); | ||
| 82 | } | 86 | } |
| 83 | memcpy(private->io_region->irb_area, irb, sizeof(*irb)); | 87 | memcpy(private->io_region->irb_area, irb, sizeof(*irb)); |
| 84 | 88 | ||
| 85 | if (private->io_trigger) | 89 | if (private->io_trigger) |
| 86 | eventfd_signal(private->io_trigger, 1); | 90 | eventfd_signal(private->io_trigger, 1); |
| 87 | 91 | ||
| 88 | if (private->mdev) | 92 | if (private->mdev && is_final) |
| 89 | private->state = VFIO_CCW_STATE_IDLE; | 93 | private->state = VFIO_CCW_STATE_IDLE; |
| 90 | } | 94 | } |
| 91 | 95 | ||
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e15816ff1265..1546389d71db 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
| @@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev) | |||
| 810 | struct ap_device *ap_dev = to_ap_dev(dev); | 810 | struct ap_device *ap_dev = to_ap_dev(dev); |
| 811 | struct ap_driver *ap_drv = ap_dev->drv; | 811 | struct ap_driver *ap_drv = ap_dev->drv; |
| 812 | 812 | ||
| 813 | /* prepare ap queue device removal */ | ||
| 813 | if (is_queue_dev(dev)) | 814 | if (is_queue_dev(dev)) |
| 814 | ap_queue_remove(to_ap_queue(dev)); | 815 | ap_queue_prepare_remove(to_ap_queue(dev)); |
| 816 | |||
| 817 | /* driver's chance to clean up gracefully */ | ||
| 815 | if (ap_drv->remove) | 818 | if (ap_drv->remove) |
| 816 | ap_drv->remove(ap_dev); | 819 | ap_drv->remove(ap_dev); |
| 817 | 820 | ||
| 821 | /* now do the ap queue device remove */ | ||
| 822 | if (is_queue_dev(dev)) | ||
| 823 | ap_queue_remove(to_ap_queue(dev)); | ||
| 824 | |||
| 818 | /* Remove queue/card from list of active queues/cards */ | 825 | /* Remove queue/card from list of active queues/cards */ |
| 819 | spin_lock_bh(&ap_list_lock); | 826 | spin_lock_bh(&ap_list_lock); |
| 820 | if (is_card_dev(dev)) | 827 | if (is_card_dev(dev)) |
| @@ -861,6 +868,16 @@ void ap_bus_force_rescan(void) | |||
| 861 | EXPORT_SYMBOL(ap_bus_force_rescan); | 868 | EXPORT_SYMBOL(ap_bus_force_rescan); |
| 862 | 869 | ||
| 863 | /* | 870 | /* |
| 871 | * A config change has happened, force an ap bus rescan. | ||
| 872 | */ | ||
| 873 | void ap_bus_cfg_chg(void) | ||
| 874 | { | ||
| 875 | AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__); | ||
| 876 | |||
| 877 | ap_bus_force_rescan(); | ||
| 878 | } | ||
| 879 | |||
| 880 | /* | ||
| 864 | * hex2bitmap() - parse hex mask string and set bitmap. | 881 | * hex2bitmap() - parse hex mask string and set bitmap. |
| 865 | * Valid strings are "0x012345678" with at least one valid hex number. | 882 | * Valid strings are "0x012345678" with at least one valid hex number. |
| 866 | * Rest of the bitmap to the right is padded with 0. No spaces allowed | 883 | * Rest of the bitmap to the right is padded with 0. No spaces allowed |
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index d0059eae5d94..15a98a673c5c 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h | |||
| @@ -91,6 +91,7 @@ enum ap_state { | |||
| 91 | AP_STATE_WORKING, | 91 | AP_STATE_WORKING, |
| 92 | AP_STATE_QUEUE_FULL, | 92 | AP_STATE_QUEUE_FULL, |
| 93 | AP_STATE_SUSPEND_WAIT, | 93 | AP_STATE_SUSPEND_WAIT, |
| 94 | AP_STATE_REMOVE, /* about to be removed from driver */ | ||
| 94 | AP_STATE_UNBOUND, /* momentary not bound to a driver */ | 95 | AP_STATE_UNBOUND, /* momentary not bound to a driver */ |
| 95 | AP_STATE_BORKED, /* broken */ | 96 | AP_STATE_BORKED, /* broken */ |
| 96 | NR_AP_STATES | 97 | NR_AP_STATES |
| @@ -252,6 +253,7 @@ void ap_bus_force_rescan(void); | |||
| 252 | 253 | ||
| 253 | void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); | 254 | void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); |
| 254 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); | 255 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); |
| 256 | void ap_queue_prepare_remove(struct ap_queue *aq); | ||
| 255 | void ap_queue_remove(struct ap_queue *aq); | 257 | void ap_queue_remove(struct ap_queue *aq); |
| 256 | void ap_queue_suspend(struct ap_device *ap_dev); | 258 | void ap_queue_suspend(struct ap_device *ap_dev); |
| 257 | void ap_queue_resume(struct ap_device *ap_dev); | 259 | void ap_queue_resume(struct ap_device *ap_dev); |
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index ba261210c6da..6a340f2c3556 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c | |||
| @@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { | |||
| 420 | [AP_EVENT_POLL] = ap_sm_suspend_read, | 420 | [AP_EVENT_POLL] = ap_sm_suspend_read, |
| 421 | [AP_EVENT_TIMEOUT] = ap_sm_nop, | 421 | [AP_EVENT_TIMEOUT] = ap_sm_nop, |
| 422 | }, | 422 | }, |
| 423 | [AP_STATE_REMOVE] = { | ||
| 424 | [AP_EVENT_POLL] = ap_sm_nop, | ||
| 425 | [AP_EVENT_TIMEOUT] = ap_sm_nop, | ||
| 426 | }, | ||
| 423 | [AP_STATE_UNBOUND] = { | 427 | [AP_STATE_UNBOUND] = { |
| 424 | [AP_EVENT_POLL] = ap_sm_nop, | 428 | [AP_EVENT_POLL] = ap_sm_nop, |
| 425 | [AP_EVENT_TIMEOUT] = ap_sm_nop, | 429 | [AP_EVENT_TIMEOUT] = ap_sm_nop, |
| @@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq) | |||
| 740 | } | 744 | } |
| 741 | EXPORT_SYMBOL(ap_flush_queue); | 745 | EXPORT_SYMBOL(ap_flush_queue); |
| 742 | 746 | ||
| 743 | void ap_queue_remove(struct ap_queue *aq) | 747 | void ap_queue_prepare_remove(struct ap_queue *aq) |
| 744 | { | 748 | { |
| 745 | ap_flush_queue(aq); | 749 | spin_lock_bh(&aq->lock); |
| 750 | /* flush queue */ | ||
| 751 | __ap_flush_queue(aq); | ||
| 752 | /* set REMOVE state to prevent new messages are queued in */ | ||
| 753 | aq->state = AP_STATE_REMOVE; | ||
| 746 | del_timer_sync(&aq->timeout); | 754 | del_timer_sync(&aq->timeout); |
| 755 | spin_unlock_bh(&aq->lock); | ||
| 756 | } | ||
| 747 | 757 | ||
| 748 | /* reset with zero, also clears irq registration */ | 758 | void ap_queue_remove(struct ap_queue *aq) |
| 759 | { | ||
| 760 | /* | ||
| 761 | * all messages have been flushed and the state is | ||
| 762 | * AP_STATE_REMOVE. Now reset with zero which also | ||
| 763 | * clears the irq registration and move the state | ||
| 764 | * to AP_STATE_UNBOUND to signal that this queue | ||
| 765 | * is not used by any driver currently. | ||
| 766 | */ | ||
| 749 | spin_lock_bh(&aq->lock); | 767 | spin_lock_bh(&aq->lock); |
| 750 | ap_zapq(aq->qid); | 768 | ap_zapq(aq->qid); |
| 751 | aq->state = AP_STATE_UNBOUND; | 769 | aq->state = AP_STATE_UNBOUND; |
| 752 | spin_unlock_bh(&aq->lock); | 770 | spin_unlock_bh(&aq->lock); |
| 753 | } | 771 | } |
| 754 | EXPORT_SYMBOL(ap_queue_remove); | ||
| 755 | 772 | ||
| 756 | void ap_queue_reinit_state(struct ap_queue *aq) | 773 | void ap_queue_reinit_state(struct ap_queue *aq) |
| 757 | { | 774 | { |
| @@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq) | |||
| 760 | ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); | 777 | ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); |
| 761 | spin_unlock_bh(&aq->lock); | 778 | spin_unlock_bh(&aq->lock); |
| 762 | } | 779 | } |
| 763 | EXPORT_SYMBOL(ap_queue_reinit_state); | ||
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index eb93c2d27d0a..689c2af7026a 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
| @@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) | |||
| 586 | 586 | ||
| 587 | static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, | 587 | static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, |
| 588 | struct zcrypt_queue *zq, | 588 | struct zcrypt_queue *zq, |
| 589 | struct module **pmod, | ||
| 589 | unsigned int weight) | 590 | unsigned int weight) |
| 590 | { | 591 | { |
| 591 | if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) | 592 | if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) |
| @@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, | |||
| 595 | atomic_add(weight, &zc->load); | 596 | atomic_add(weight, &zc->load); |
| 596 | atomic_add(weight, &zq->load); | 597 | atomic_add(weight, &zq->load); |
| 597 | zq->request_count++; | 598 | zq->request_count++; |
| 599 | *pmod = zq->queue->ap_dev.drv->driver.owner; | ||
| 598 | return zq; | 600 | return zq; |
| 599 | } | 601 | } |
| 600 | 602 | ||
| 601 | static inline void zcrypt_drop_queue(struct zcrypt_card *zc, | 603 | static inline void zcrypt_drop_queue(struct zcrypt_card *zc, |
| 602 | struct zcrypt_queue *zq, | 604 | struct zcrypt_queue *zq, |
| 605 | struct module *mod, | ||
| 603 | unsigned int weight) | 606 | unsigned int weight) |
| 604 | { | 607 | { |
| 605 | struct module *mod = zq->queue->ap_dev.drv->driver.owner; | ||
| 606 | |||
| 607 | zq->request_count--; | 608 | zq->request_count--; |
| 608 | atomic_sub(weight, &zc->load); | 609 | atomic_sub(weight, &zc->load); |
| 609 | atomic_sub(weight, &zq->load); | 610 | atomic_sub(weight, &zq->load); |
| @@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, | |||
| 653 | unsigned int weight, pref_weight; | 654 | unsigned int weight, pref_weight; |
| 654 | unsigned int func_code; | 655 | unsigned int func_code; |
| 655 | int qid = 0, rc = -ENODEV; | 656 | int qid = 0, rc = -ENODEV; |
| 657 | struct module *mod; | ||
| 656 | 658 | ||
| 657 | trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); | 659 | trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); |
| 658 | 660 | ||
| @@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, | |||
| 706 | pref_weight = weight; | 708 | pref_weight = weight; |
| 707 | } | 709 | } |
| 708 | } | 710 | } |
| 709 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); | 711 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); |
| 710 | spin_unlock(&zcrypt_list_lock); | 712 | spin_unlock(&zcrypt_list_lock); |
| 711 | 713 | ||
| 712 | if (!pref_zq) { | 714 | if (!pref_zq) { |
| @@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, | |||
| 718 | rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); | 720 | rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); |
| 719 | 721 | ||
| 720 | spin_lock(&zcrypt_list_lock); | 722 | spin_lock(&zcrypt_list_lock); |
| 721 | zcrypt_drop_queue(pref_zc, pref_zq, weight); | 723 | zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); |
| 722 | spin_unlock(&zcrypt_list_lock); | 724 | spin_unlock(&zcrypt_list_lock); |
| 723 | 725 | ||
| 724 | out: | 726 | out: |
| @@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, | |||
| 735 | unsigned int weight, pref_weight; | 737 | unsigned int weight, pref_weight; |
| 736 | unsigned int func_code; | 738 | unsigned int func_code; |
| 737 | int qid = 0, rc = -ENODEV; | 739 | int qid = 0, rc = -ENODEV; |
| 740 | struct module *mod; | ||
| 738 | 741 | ||
| 739 | trace_s390_zcrypt_req(crt, TP_ICARSACRT); | 742 | trace_s390_zcrypt_req(crt, TP_ICARSACRT); |
| 740 | 743 | ||
| @@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, | |||
| 788 | pref_weight = weight; | 791 | pref_weight = weight; |
| 789 | } | 792 | } |
| 790 | } | 793 | } |
| 791 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); | 794 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); |
| 792 | spin_unlock(&zcrypt_list_lock); | 795 | spin_unlock(&zcrypt_list_lock); |
| 793 | 796 | ||
| 794 | if (!pref_zq) { | 797 | if (!pref_zq) { |
| @@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, | |||
| 800 | rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); | 803 | rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); |
| 801 | 804 | ||
| 802 | spin_lock(&zcrypt_list_lock); | 805 | spin_lock(&zcrypt_list_lock); |
| 803 | zcrypt_drop_queue(pref_zc, pref_zq, weight); | 806 | zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); |
| 804 | spin_unlock(&zcrypt_list_lock); | 807 | spin_unlock(&zcrypt_list_lock); |
| 805 | 808 | ||
| 806 | out: | 809 | out: |
| @@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, | |||
| 819 | unsigned int func_code; | 822 | unsigned int func_code; |
| 820 | unsigned short *domain; | 823 | unsigned short *domain; |
| 821 | int qid = 0, rc = -ENODEV; | 824 | int qid = 0, rc = -ENODEV; |
| 825 | struct module *mod; | ||
| 822 | 826 | ||
| 823 | trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); | 827 | trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); |
| 824 | 828 | ||
| @@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, | |||
| 865 | pref_weight = weight; | 869 | pref_weight = weight; |
| 866 | } | 870 | } |
| 867 | } | 871 | } |
| 868 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); | 872 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); |
| 869 | spin_unlock(&zcrypt_list_lock); | 873 | spin_unlock(&zcrypt_list_lock); |
| 870 | 874 | ||
| 871 | if (!pref_zq) { | 875 | if (!pref_zq) { |
| @@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, | |||
| 881 | rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); | 885 | rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); |
| 882 | 886 | ||
| 883 | spin_lock(&zcrypt_list_lock); | 887 | spin_lock(&zcrypt_list_lock); |
| 884 | zcrypt_drop_queue(pref_zc, pref_zq, weight); | 888 | zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); |
| 885 | spin_unlock(&zcrypt_list_lock); | 889 | spin_unlock(&zcrypt_list_lock); |
| 886 | 890 | ||
| 887 | out: | 891 | out: |
| @@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, | |||
| 932 | unsigned int func_code; | 936 | unsigned int func_code; |
| 933 | struct ap_message ap_msg; | 937 | struct ap_message ap_msg; |
| 934 | int qid = 0, rc = -ENODEV; | 938 | int qid = 0, rc = -ENODEV; |
| 939 | struct module *mod; | ||
| 935 | 940 | ||
| 936 | trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); | 941 | trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); |
| 937 | 942 | ||
| @@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, | |||
| 1000 | pref_weight = weight; | 1005 | pref_weight = weight; |
| 1001 | } | 1006 | } |
| 1002 | } | 1007 | } |
| 1003 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); | 1008 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); |
| 1004 | spin_unlock(&zcrypt_list_lock); | 1009 | spin_unlock(&zcrypt_list_lock); |
| 1005 | 1010 | ||
| 1006 | if (!pref_zq) { | 1011 | if (!pref_zq) { |
| @@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, | |||
| 1012 | rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); | 1017 | rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); |
| 1013 | 1018 | ||
| 1014 | spin_lock(&zcrypt_list_lock); | 1019 | spin_lock(&zcrypt_list_lock); |
| 1015 | zcrypt_drop_queue(pref_zc, pref_zq, weight); | 1020 | zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); |
| 1016 | spin_unlock(&zcrypt_list_lock); | 1021 | spin_unlock(&zcrypt_list_lock); |
| 1017 | 1022 | ||
| 1018 | out_free: | 1023 | out_free: |
| @@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer) | |||
| 1033 | struct ap_message ap_msg; | 1038 | struct ap_message ap_msg; |
| 1034 | unsigned int domain; | 1039 | unsigned int domain; |
| 1035 | int qid = 0, rc = -ENODEV; | 1040 | int qid = 0, rc = -ENODEV; |
| 1041 | struct module *mod; | ||
| 1036 | 1042 | ||
| 1037 | trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); | 1043 | trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); |
| 1038 | 1044 | ||
| @@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer) | |||
| 1064 | pref_weight = weight; | 1070 | pref_weight = weight; |
| 1065 | } | 1071 | } |
| 1066 | } | 1072 | } |
| 1067 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); | 1073 | pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); |
| 1068 | spin_unlock(&zcrypt_list_lock); | 1074 | spin_unlock(&zcrypt_list_lock); |
| 1069 | 1075 | ||
| 1070 | if (!pref_zq) { | 1076 | if (!pref_zq) { |
| @@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer) | |||
| 1076 | rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); | 1082 | rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); |
| 1077 | 1083 | ||
| 1078 | spin_lock(&zcrypt_list_lock); | 1084 | spin_lock(&zcrypt_list_lock); |
| 1079 | zcrypt_drop_queue(pref_zc, pref_zq, weight); | 1085 | zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); |
| 1080 | spin_unlock(&zcrypt_list_lock); | 1086 | spin_unlock(&zcrypt_list_lock); |
| 1081 | 1087 | ||
| 1082 | out: | 1088 | out: |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 197b0f5b63e7..44bd6f04c145 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, | |||
| 1150 | 1150 | ||
| 1151 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) | 1151 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) |
| 1152 | { | 1152 | { |
| 1153 | struct sk_buff *skb; | ||
| 1154 | |||
| 1153 | /* release may never happen from within CQ tasklet scope */ | 1155 | /* release may never happen from within CQ tasklet scope */ |
| 1154 | WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); | 1156 | WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); |
| 1155 | 1157 | ||
| 1156 | if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) | 1158 | if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) |
| 1157 | qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); | 1159 | qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); |
| 1158 | 1160 | ||
| 1159 | __skb_queue_purge(&buf->skb_list); | 1161 | while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) |
| 1162 | consume_skb(skb); | ||
| 1160 | } | 1163 | } |
| 1161 | 1164 | ||
| 1162 | static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | 1165 | static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8efb2e8ff8f4..c3067fd3bd9e 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
| @@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, | |||
| 629 | } /* else fall through */ | 629 | } /* else fall through */ |
| 630 | 630 | ||
| 631 | QETH_TXQ_STAT_INC(queue, tx_dropped); | 631 | QETH_TXQ_STAT_INC(queue, tx_dropped); |
| 632 | QETH_TXQ_STAT_INC(queue, tx_errors); | 632 | kfree_skb(skb); |
| 633 | dev_kfree_skb_any(skb); | ||
| 634 | netif_wake_queue(dev); | 633 | netif_wake_queue(dev); |
| 635 | return NETDEV_TX_OK; | 634 | return NETDEV_TX_OK; |
| 636 | } | 635 | } |
| @@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) | |||
| 645 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 644 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
| 646 | int rc; | 645 | int rc; |
| 647 | 646 | ||
| 647 | qeth_l2_vnicc_set_defaults(card); | ||
| 648 | |||
| 648 | if (gdev->dev.type == &qeth_generic_devtype) { | 649 | if (gdev->dev.type == &qeth_generic_devtype) { |
| 649 | rc = qeth_l2_create_device_attributes(&gdev->dev); | 650 | rc = qeth_l2_create_device_attributes(&gdev->dev); |
| 650 | if (rc) | 651 | if (rc) |
| @@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) | |||
| 652 | } | 653 | } |
| 653 | 654 | ||
| 654 | hash_init(card->mac_htable); | 655 | hash_init(card->mac_htable); |
| 655 | card->info.hwtrap = 0; | ||
| 656 | qeth_l2_vnicc_set_defaults(card); | ||
| 657 | return 0; | 656 | return 0; |
| 658 | } | 657 | } |
| 659 | 658 | ||
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 7e68d9d16859..53712cf26406 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, | |||
| 2096 | 2096 | ||
| 2097 | tx_drop: | 2097 | tx_drop: |
| 2098 | QETH_TXQ_STAT_INC(queue, tx_dropped); | 2098 | QETH_TXQ_STAT_INC(queue, tx_dropped); |
| 2099 | QETH_TXQ_STAT_INC(queue, tx_errors); | 2099 | kfree_skb(skb); |
| 2100 | dev_kfree_skb_any(skb); | ||
| 2101 | netif_wake_queue(dev); | 2100 | netif_wake_queue(dev); |
| 2102 | return NETDEV_TX_OK; | 2101 | return NETDEV_TX_OK; |
| 2103 | } | 2102 | } |
| @@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) | |||
| 2253 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 2252 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
| 2254 | int rc; | 2253 | int rc; |
| 2255 | 2254 | ||
| 2255 | hash_init(card->ip_htable); | ||
| 2256 | |||
| 2256 | if (gdev->dev.type == &qeth_generic_devtype) { | 2257 | if (gdev->dev.type == &qeth_generic_devtype) { |
| 2257 | rc = qeth_l3_create_device_attributes(&gdev->dev); | 2258 | rc = qeth_l3_create_device_attributes(&gdev->dev); |
| 2258 | if (rc) | 2259 | if (rc) |
| 2259 | return rc; | 2260 | return rc; |
| 2260 | } | 2261 | } |
| 2261 | hash_init(card->ip_htable); | 2262 | |
| 2262 | hash_init(card->ip_mc_htable); | 2263 | hash_init(card->ip_mc_htable); |
| 2263 | card->info.hwtrap = 0; | ||
| 2264 | return 0; | 2264 | return 0; |
| 2265 | } | 2265 | } |
| 2266 | 2266 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 744a64680d5b..e8fc28dba8df 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
| @@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) | |||
| 624 | add_timer(&erp_action->timer); | 624 | add_timer(&erp_action->timer); |
| 625 | } | 625 | } |
| 626 | 626 | ||
| 627 | void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, | ||
| 628 | int clear, char *dbftag) | ||
| 629 | { | ||
| 630 | unsigned long flags; | ||
| 631 | struct zfcp_port *port; | ||
| 632 | |||
| 633 | write_lock_irqsave(&adapter->erp_lock, flags); | ||
| 634 | read_lock(&adapter->port_list_lock); | ||
| 635 | list_for_each_entry(port, &adapter->port_list, list) | ||
| 636 | _zfcp_erp_port_forced_reopen(port, clear, dbftag); | ||
| 637 | read_unlock(&adapter->port_list_lock); | ||
| 638 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
| 639 | } | ||
| 640 | |||
| 627 | static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, | 641 | static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, |
| 628 | int clear, char *dbftag) | 642 | int clear, char *dbftag) |
| 629 | { | 643 | { |
| @@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) | |||
| 1341 | struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); | 1355 | struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); |
| 1342 | int lun_status; | 1356 | int lun_status; |
| 1343 | 1357 | ||
| 1358 | if (sdev->sdev_state == SDEV_DEL || | ||
| 1359 | sdev->sdev_state == SDEV_CANCEL) | ||
| 1360 | continue; | ||
| 1344 | if (zsdev->port != port) | 1361 | if (zsdev->port != port) |
| 1345 | continue; | 1362 | continue; |
| 1346 | /* LUN under port of interest */ | 1363 | /* LUN under port of interest */ |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3fce47b0b21b..c6acca521ffe 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
| @@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, | |||
| 70 | char *dbftag); | 70 | char *dbftag); |
| 71 | extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); | 71 | extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); |
| 72 | extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); | 72 | extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); |
| 73 | extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, | ||
| 74 | int clear, char *dbftag); | ||
| 73 | extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); | 75 | extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); |
| 74 | extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); | 76 | extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); |
| 75 | extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); | 77 | extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index db00b5e3abbe..33eddb02ee30 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
| @@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, | |||
| 239 | list_for_each_entry(port, &adapter->port_list, list) { | 239 | list_for_each_entry(port, &adapter->port_list, list) { |
| 240 | if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) | 240 | if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) |
| 241 | zfcp_fc_test_link(port); | 241 | zfcp_fc_test_link(port); |
| 242 | if (!port->d_id) | ||
| 243 | zfcp_erp_port_reopen(port, | ||
| 244 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
| 245 | "fcrscn1"); | ||
| 246 | } | 242 | } |
| 247 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 243 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
| 248 | } | 244 | } |
| @@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, | |||
| 250 | static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) | 246 | static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) |
| 251 | { | 247 | { |
| 252 | struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; | 248 | struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; |
| 249 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
| 253 | struct fc_els_rscn *head; | 250 | struct fc_els_rscn *head; |
| 254 | struct fc_els_rscn_page *page; | 251 | struct fc_els_rscn_page *page; |
| 255 | u16 i; | 252 | u16 i; |
| @@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) | |||
| 263 | no_entries = be16_to_cpu(head->rscn_plen) / | 260 | no_entries = be16_to_cpu(head->rscn_plen) / |
| 264 | sizeof(struct fc_els_rscn_page); | 261 | sizeof(struct fc_els_rscn_page); |
| 265 | 262 | ||
| 263 | if (no_entries > 1) { | ||
| 264 | /* handle failed ports */ | ||
| 265 | unsigned long flags; | ||
| 266 | struct zfcp_port *port; | ||
| 267 | |||
| 268 | read_lock_irqsave(&adapter->port_list_lock, flags); | ||
| 269 | list_for_each_entry(port, &adapter->port_list, list) { | ||
| 270 | if (port->d_id) | ||
| 271 | continue; | ||
| 272 | zfcp_erp_port_reopen(port, | ||
| 273 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
| 274 | "fcrscn1"); | ||
| 275 | } | ||
| 276 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
| 277 | } | ||
| 278 | |||
| 266 | for (i = 1; i < no_entries; i++) { | 279 | for (i = 1; i < no_entries; i++) { |
| 267 | /* skip head and start with 1st element */ | 280 | /* skip head and start with 1st element */ |
| 268 | page++; | 281 | page++; |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index f4f6a07c5222..221d0dfb8493 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
| @@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | |||
| 368 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | 368 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; |
| 369 | int ret = SUCCESS, fc_ret; | 369 | int ret = SUCCESS, fc_ret; |
| 370 | 370 | ||
| 371 | if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { | ||
| 372 | zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p"); | ||
| 373 | zfcp_erp_wait(adapter); | ||
| 374 | } | ||
| 371 | zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); | 375 | zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); |
| 372 | zfcp_erp_wait(adapter); | 376 | zfcp_erp_wait(adapter); |
| 373 | fc_ret = fc_block_scsi_eh(scpnt); | 377 | fc_ret = fc_block_scsi_eh(scpnt); |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 1df5171594b8..11fb68d7e60d 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) | |||
| 2640 | return capacity; | 2640 | return capacity; |
| 2641 | } | 2641 | } |
| 2642 | 2642 | ||
| 2643 | static inline int aac_pci_offline(struct aac_dev *dev) | ||
| 2644 | { | ||
| 2645 | return pci_channel_offline(dev->pdev) || dev->handle_pci_error; | ||
| 2646 | } | ||
| 2647 | |||
| 2643 | static inline int aac_adapter_check_health(struct aac_dev *dev) | 2648 | static inline int aac_adapter_check_health(struct aac_dev *dev) |
| 2644 | { | 2649 | { |
| 2645 | if (unlikely(pci_channel_offline(dev->pdev))) | 2650 | if (unlikely(aac_pci_offline(dev))) |
| 2646 | return -1; | 2651 | return -1; |
| 2647 | 2652 | ||
| 2648 | return (dev)->a_ops.adapter_check_health(dev); | 2653 | return (dev)->a_ops.adapter_check_health(dev); |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e67e032936ef..78430a7b294c 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
| @@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
| 672 | return -ETIMEDOUT; | 672 | return -ETIMEDOUT; |
| 673 | } | 673 | } |
| 674 | 674 | ||
| 675 | if (unlikely(pci_channel_offline(dev->pdev))) | 675 | if (unlikely(aac_pci_offline(dev))) |
| 676 | return -EFAULT; | 676 | return -EFAULT; |
| 677 | 677 | ||
| 678 | if ((blink = aac_adapter_check_health(dev)) > 0) { | 678 | if ((blink = aac_adapter_check_health(dev)) > 0) { |
| @@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, | |||
| 772 | 772 | ||
| 773 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 773 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
| 774 | 774 | ||
| 775 | if (unlikely(pci_channel_offline(dev->pdev))) | 775 | if (unlikely(aac_pci_offline(dev))) |
| 776 | return -EFAULT; | 776 | return -EFAULT; |
| 777 | 777 | ||
| 778 | fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; | 778 | fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index dbaa4f131433..3ad997ac3510 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
| @@ -139,6 +139,7 @@ static const struct { | |||
| 139 | { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, | 139 | { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, |
| 140 | 140 | ||
| 141 | { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, | 141 | { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, |
| 142 | { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." }, | ||
| 142 | }; | 143 | }; |
| 143 | 144 | ||
| 144 | static void ibmvfc_npiv_login(struct ibmvfc_host *); | 145 | static void ibmvfc_npiv_login(struct ibmvfc_host *); |
| @@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) | |||
| 1494 | if (rsp->flags & FCP_RSP_LEN_VALID) | 1495 | if (rsp->flags & FCP_RSP_LEN_VALID) |
| 1495 | rsp_code = rsp->data.info.rsp_code; | 1496 | rsp_code = rsp->data.info.rsp_code; |
| 1496 | 1497 | ||
| 1497 | scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " | 1498 | scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) " |
| 1498 | "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", | 1499 | "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", |
| 1499 | cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, | 1500 | cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error), |
| 1500 | rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); | 1501 | rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); |
| 1501 | } | 1502 | } |
| 1502 | 1503 | ||
| @@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) | |||
| 2022 | sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " | 2023 | sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " |
| 2023 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, | 2024 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, |
| 2024 | ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), | 2025 | ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), |
| 2025 | rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, | 2026 | be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, |
| 2026 | fc_rsp->scsi_status); | 2027 | fc_rsp->scsi_status); |
| 2027 | rsp_rc = -EIO; | 2028 | rsp_rc = -EIO; |
| 2028 | } else | 2029 | } else |
| @@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) | |||
| 2381 | sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " | 2382 | sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " |
| 2382 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", | 2383 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", |
| 2383 | ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), | 2384 | ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), |
| 2384 | rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, | 2385 | be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, |
| 2385 | fc_rsp->scsi_status); | 2386 | fc_rsp->scsi_status); |
| 2386 | rsp_rc = -EIO; | 2387 | rsp_rc = -EIO; |
| 2387 | } else | 2388 | } else |
| @@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) | |||
| 2755 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); | 2756 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); |
| 2756 | if (crq->format == IBMVFC_PARTITION_MIGRATED) { | 2757 | if (crq->format == IBMVFC_PARTITION_MIGRATED) { |
| 2757 | /* We need to re-setup the interpartition connection */ | 2758 | /* We need to re-setup the interpartition connection */ |
| 2758 | dev_info(vhost->dev, "Re-enabling adapter\n"); | 2759 | dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); |
| 2759 | vhost->client_migrated = 1; | 2760 | vhost->client_migrated = 1; |
| 2760 | ibmvfc_purge_requests(vhost, DID_REQUEUE); | 2761 | ibmvfc_purge_requests(vhost, DID_REQUEUE); |
| 2761 | ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); | 2762 | ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); |
| 2762 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); | 2763 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); |
| 2763 | } else { | 2764 | } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { |
| 2764 | dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); | 2765 | dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); |
| 2765 | ibmvfc_purge_requests(vhost, DID_ERROR); | 2766 | ibmvfc_purge_requests(vhost, DID_ERROR); |
| 2766 | ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); | 2767 | ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); |
| 2767 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); | 2768 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); |
| 2769 | } else { | ||
| 2770 | dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); | ||
| 2768 | } | 2771 | } |
| 2769 | return; | 2772 | return; |
| 2770 | case IBMVFC_CRQ_CMD_RSP: | 2773 | case IBMVFC_CRQ_CMD_RSP: |
| @@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
| 3348 | 3351 | ||
| 3349 | tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", | 3352 | tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", |
| 3350 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), | 3353 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 3351 | rsp->status, rsp->error, status); | 3354 | be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status); |
| 3352 | break; | 3355 | break; |
| 3353 | } | 3356 | } |
| 3354 | 3357 | ||
| @@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) | |||
| 3446 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3449 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 3447 | 3450 | ||
| 3448 | tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3451 | tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
| 3449 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error, | 3452 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 3450 | ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, | 3453 | be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), |
| 3451 | ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); | 3454 | ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), |
| 3455 | ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status); | ||
| 3452 | break; | 3456 | break; |
| 3453 | } | 3457 | } |
| 3454 | 3458 | ||
| @@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) | |||
| 3619 | fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; | 3623 | fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; |
| 3620 | tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3624 | tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
| 3621 | ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), | 3625 | ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), |
| 3622 | mad->iu.status, mad->iu.error, | 3626 | be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error), |
| 3623 | ibmvfc_get_fc_type(fc_reason), fc_reason, | 3627 | ibmvfc_get_fc_type(fc_reason), fc_reason, |
| 3624 | ibmvfc_get_ls_explain(fc_explain), fc_explain, status); | 3628 | ibmvfc_get_ls_explain(fc_explain), fc_explain, status); |
| 3625 | break; | 3629 | break; |
| @@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) | |||
| 3831 | 3835 | ||
| 3832 | tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3836 | tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
| 3833 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), | 3837 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 3834 | rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), | 3838 | be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), |
| 3835 | rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), | 3839 | ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), |
| 3836 | rsp->fc_explain, status); | 3840 | ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), |
| 3841 | status); | ||
| 3837 | break; | 3842 | break; |
| 3838 | } | 3843 | } |
| 3839 | 3844 | ||
| @@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) | |||
| 3959 | level += ibmvfc_retry_host_init(vhost); | 3964 | level += ibmvfc_retry_host_init(vhost); |
| 3960 | ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", | 3965 | ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", |
| 3961 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), | 3966 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 3962 | rsp->status, rsp->error); | 3967 | be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); |
| 3963 | break; | 3968 | break; |
| 3964 | case IBMVFC_MAD_DRIVER_FAILED: | 3969 | case IBMVFC_MAD_DRIVER_FAILED: |
| 3965 | break; | 3970 | break; |
| @@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
| 4024 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | 4029 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); |
| 4025 | ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", | 4030 | ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", |
| 4026 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), | 4031 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 4027 | rsp->status, rsp->error); | 4032 | be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); |
| 4028 | ibmvfc_free_event(evt); | 4033 | ibmvfc_free_event(evt); |
| 4029 | return; | 4034 | return; |
| 4030 | case IBMVFC_MAD_CRQ_ERROR: | 4035 | case IBMVFC_MAD_CRQ_ERROR: |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index b81a53c4a9a8..459cc288ba1d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
| @@ -78,9 +78,14 @@ enum ibmvfc_crq_valid { | |||
| 78 | IBMVFC_CRQ_XPORT_EVENT = 0xFF, | 78 | IBMVFC_CRQ_XPORT_EVENT = 0xFF, |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | enum ibmvfc_crq_format { | 81 | enum ibmvfc_crq_init_msg { |
| 82 | IBMVFC_CRQ_INIT = 0x01, | 82 | IBMVFC_CRQ_INIT = 0x01, |
| 83 | IBMVFC_CRQ_INIT_COMPLETE = 0x02, | 83 | IBMVFC_CRQ_INIT_COMPLETE = 0x02, |
| 84 | }; | ||
| 85 | |||
| 86 | enum ibmvfc_crq_xport_evts { | ||
| 87 | IBMVFC_PARTNER_FAILED = 0x01, | ||
| 88 | IBMVFC_PARTNER_DEREGISTER = 0x02, | ||
| 84 | IBMVFC_PARTITION_MIGRATED = 0x06, | 89 | IBMVFC_PARTITION_MIGRATED = 0x06, |
| 85 | }; | 90 | }; |
| 86 | 91 | ||
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c98f264f1d83..a497b2c0cb79 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
| @@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 3878 | * wake up the thread. | 3878 | * wake up the thread. |
| 3879 | */ | 3879 | */ |
| 3880 | spin_lock(&lpfc_cmd->buf_lock); | 3880 | spin_lock(&lpfc_cmd->buf_lock); |
| 3881 | if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { | 3881 | lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; |
| 3882 | lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; | 3882 | if (lpfc_cmd->waitq) { |
| 3883 | if (lpfc_cmd->waitq) | 3883 | wake_up(lpfc_cmd->waitq); |
| 3884 | wake_up(lpfc_cmd->waitq); | ||
| 3885 | lpfc_cmd->waitq = NULL; | 3884 | lpfc_cmd->waitq = NULL; |
| 3886 | } | 3885 | } |
| 3887 | spin_unlock(&lpfc_cmd->buf_lock); | 3886 | spin_unlock(&lpfc_cmd->buf_lock); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index e57774472e75..1d8c584ec1e9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
| @@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
| 3281 | 3281 | ||
| 3282 | if (smid < ioc->hi_priority_smid) { | 3282 | if (smid < ioc->hi_priority_smid) { |
| 3283 | struct scsiio_tracker *st; | 3283 | struct scsiio_tracker *st; |
| 3284 | void *request; | ||
| 3284 | 3285 | ||
| 3285 | st = _get_st_from_smid(ioc, smid); | 3286 | st = _get_st_from_smid(ioc, smid); |
| 3286 | if (!st) { | 3287 | if (!st) { |
| 3287 | _base_recovery_check(ioc); | 3288 | _base_recovery_check(ioc); |
| 3288 | return; | 3289 | return; |
| 3289 | } | 3290 | } |
| 3291 | |||
| 3292 | /* Clear MPI request frame */ | ||
| 3293 | request = mpt3sas_base_get_msg_frame(ioc, smid); | ||
| 3294 | memset(request, 0, ioc->request_sz); | ||
| 3295 | |||
| 3290 | mpt3sas_base_clear_st(ioc, st); | 3296 | mpt3sas_base_clear_st(ioc, st); |
| 3291 | _base_recovery_check(ioc); | 3297 | _base_recovery_check(ioc); |
| 3292 | return; | 3298 | return; |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8bb5b8f9f4d2..1ccfbc7eebe0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
| @@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
| 1462 | { | 1462 | { |
| 1463 | struct scsi_cmnd *scmd = NULL; | 1463 | struct scsi_cmnd *scmd = NULL; |
| 1464 | struct scsiio_tracker *st; | 1464 | struct scsiio_tracker *st; |
| 1465 | Mpi25SCSIIORequest_t *mpi_request; | ||
| 1465 | 1466 | ||
| 1466 | if (smid > 0 && | 1467 | if (smid > 0 && |
| 1467 | smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { | 1468 | smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { |
| 1468 | u32 unique_tag = smid - 1; | 1469 | u32 unique_tag = smid - 1; |
| 1469 | 1470 | ||
| 1471 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); | ||
| 1472 | |||
| 1473 | /* | ||
| 1474 | * If SCSI IO request is outstanding at driver level then | ||
| 1475 | * DevHandle filed must be non-zero. If DevHandle is zero | ||
| 1476 | * then it means that this smid is free at driver level, | ||
| 1477 | * so return NULL. | ||
| 1478 | */ | ||
| 1479 | if (!mpi_request->DevHandle) | ||
| 1480 | return scmd; | ||
| 1481 | |||
| 1470 | scmd = scsi_host_find_tag(ioc->shost, unique_tag); | 1482 | scmd = scsi_host_find_tag(ioc->shost, unique_tag); |
| 1471 | if (scmd) { | 1483 | if (scmd) { |
| 1472 | st = scsi_cmd_priv(scmd); | 1484 | st = scsi_cmd_priv(scmd); |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e74a62448ba4..e5db9a9954dc 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
| @@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) | |||
| 1392 | 1392 | ||
| 1393 | static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) | 1393 | static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) |
| 1394 | { | 1394 | { |
| 1395 | struct qedi_nvm_iscsi_image nvm_image; | ||
| 1396 | |||
| 1397 | qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, | 1395 | qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, |
| 1398 | sizeof(nvm_image), | 1396 | sizeof(struct qedi_nvm_iscsi_image), |
| 1399 | &qedi->nvm_buf_dma, GFP_KERNEL); | 1397 | &qedi->nvm_buf_dma, GFP_KERNEL); |
| 1400 | if (!qedi->iscsi_image) { | 1398 | if (!qedi->iscsi_image) { |
| 1401 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); | 1399 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); |
| @@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data) | |||
| 2236 | static int qedi_get_boot_info(struct qedi_ctx *qedi) | 2234 | static int qedi_get_boot_info(struct qedi_ctx *qedi) |
| 2237 | { | 2235 | { |
| 2238 | int ret = 1; | 2236 | int ret = 1; |
| 2239 | struct qedi_nvm_iscsi_image nvm_image; | ||
| 2240 | 2237 | ||
| 2241 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | 2238 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, |
| 2242 | "Get NVM iSCSI CFG image\n"); | 2239 | "Get NVM iSCSI CFG image\n"); |
| 2243 | ret = qedi_ops->common->nvm_get_image(qedi->cdev, | 2240 | ret = qedi_ops->common->nvm_get_image(qedi->cdev, |
| 2244 | QED_NVM_IMAGE_ISCSI_CFG, | 2241 | QED_NVM_IMAGE_ISCSI_CFG, |
| 2245 | (char *)qedi->iscsi_image, | 2242 | (char *)qedi->iscsi_image, |
| 2246 | sizeof(nvm_image)); | 2243 | sizeof(struct qedi_nvm_iscsi_image)); |
| 2247 | if (ret) | 2244 | if (ret) |
| 2248 | QEDI_ERR(&qedi->dbg_ctx, | 2245 | QEDI_ERR(&qedi->dbg_ctx, |
| 2249 | "Could not get NVM image. ret = %d\n", ret); | 2246 | "Could not get NVM image. ret = %d\n", ret); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 16a18d5d856f..6e4f4931ae17 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
| @@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 3203 | if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) | 3203 | if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) |
| 3204 | return -EINVAL; | 3204 | return -EINVAL; |
| 3205 | ep = iscsi_lookup_endpoint(transport_fd); | 3205 | ep = iscsi_lookup_endpoint(transport_fd); |
| 3206 | if (!ep) | ||
| 3207 | return -EINVAL; | ||
| 3206 | conn = cls_conn->dd_data; | 3208 | conn = cls_conn->dd_data; |
| 3207 | qla_conn = conn->dd_data; | 3209 | qla_conn = conn->dd_data; |
| 3208 | qla_conn->qla_ep = ep->dd_data; | 3210 | qla_conn->qla_ep = ep->dd_data; |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c4cbfd07b916..a08ff3bd6310 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -238,6 +238,7 @@ static struct { | |||
| 238 | {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 238 | {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
| 239 | {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 239 | {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
| 240 | {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 240 | {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
| 241 | {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
| 241 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, | 242 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, |
| 242 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, | 243 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, |
| 243 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ | 244 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ |
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 5a58cbf3a75d..c14006ac98f9 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c | |||
| @@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { | |||
| 75 | {"NETAPP", "INF-01-00", "rdac", }, | 75 | {"NETAPP", "INF-01-00", "rdac", }, |
| 76 | {"LSI", "INF-01-00", "rdac", }, | 76 | {"LSI", "INF-01-00", "rdac", }, |
| 77 | {"ENGENIO", "INF-01-00", "rdac", }, | 77 | {"ENGENIO", "INF-01-00", "rdac", }, |
| 78 | {"LENOVO", "DE_Series", "rdac", }, | ||
| 78 | {NULL, NULL, NULL }, | 79 | {NULL, NULL, NULL }, |
| 79 | }; | 80 | }; |
| 80 | 81 | ||
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 6a9040faed00..3b119ca0cc0c 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
| @@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr, | |||
| 771 | 771 | ||
| 772 | mutex_lock(&sdev->state_mutex); | 772 | mutex_lock(&sdev->state_mutex); |
| 773 | ret = scsi_device_set_state(sdev, state); | 773 | ret = scsi_device_set_state(sdev, state); |
| 774 | /* | ||
| 775 | * If the device state changes to SDEV_RUNNING, we need to run | ||
| 776 | * the queue to avoid I/O hang. | ||
| 777 | */ | ||
| 778 | if (ret == 0 && state == SDEV_RUNNING) | ||
| 779 | blk_mq_run_hw_queues(sdev->request_queue, true); | ||
| 774 | mutex_unlock(&sdev->state_mutex); | 780 | mutex_unlock(&sdev->state_mutex); |
| 775 | 781 | ||
| 776 | return ret == 0 ? count : -EINVAL; | 782 | return ret == 0 ? count : -EINVAL; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 251db30d0882..2b2bc4b49d78 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) | |||
| 1415 | scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); | 1415 | scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); |
| 1416 | } | 1416 | } |
| 1417 | 1417 | ||
| 1418 | /* | ||
| 1419 | * XXX and what if there are packets in flight and this close() | ||
| 1420 | * XXX is followed by a "rmmod sd_mod"? | ||
| 1421 | */ | ||
| 1422 | |||
| 1423 | scsi_disk_put(sdkp); | 1418 | scsi_disk_put(sdkp); |
| 1424 | } | 1419 | } |
| 1425 | 1420 | ||
| @@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, | |||
| 3076 | unsigned int opt_xfer_bytes = | 3071 | unsigned int opt_xfer_bytes = |
| 3077 | logical_to_bytes(sdp, sdkp->opt_xfer_blocks); | 3072 | logical_to_bytes(sdp, sdkp->opt_xfer_blocks); |
| 3078 | 3073 | ||
| 3074 | if (sdkp->opt_xfer_blocks == 0) | ||
| 3075 | return false; | ||
| 3076 | |||
| 3079 | if (sdkp->opt_xfer_blocks > dev_max) { | 3077 | if (sdkp->opt_xfer_blocks > dev_max) { |
| 3080 | sd_first_printk(KERN_WARNING, sdkp, | 3078 | sd_first_printk(KERN_WARNING, sdkp, |
| 3081 | "Optimal transfer size %u logical blocks " \ | 3079 | "Optimal transfer size %u logical blocks " \ |
| @@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev) | |||
| 3505 | { | 3503 | { |
| 3506 | struct scsi_disk *sdkp = to_scsi_disk(dev); | 3504 | struct scsi_disk *sdkp = to_scsi_disk(dev); |
| 3507 | struct gendisk *disk = sdkp->disk; | 3505 | struct gendisk *disk = sdkp->disk; |
| 3508 | 3506 | struct request_queue *q = disk->queue; | |
| 3507 | |||
| 3509 | ida_free(&sd_index_ida, sdkp->index); | 3508 | ida_free(&sd_index_ida, sdkp->index); |
| 3510 | 3509 | ||
| 3510 | /* | ||
| 3511 | * Wait until all requests that are in progress have completed. | ||
| 3512 | * This is necessary to avoid that e.g. scsi_end_request() crashes | ||
| 3513 | * due to clearing the disk->private_data pointer. Wait from inside | ||
| 3514 | * scsi_disk_release() instead of from sd_release() to avoid that | ||
| 3515 | * freezing and unfreezing the request queue affects user space I/O | ||
| 3516 | * in case multiple processes open a /dev/sd... node concurrently. | ||
| 3517 | */ | ||
| 3518 | blk_mq_freeze_queue(q); | ||
| 3519 | blk_mq_unfreeze_queue(q); | ||
| 3520 | |||
| 3511 | disk->private_data = NULL; | 3521 | disk->private_data = NULL; |
| 3512 | put_disk(disk); | 3522 | put_disk(disk); |
| 3513 | put_device(&sdkp->device->sdev_gendev); | 3523 | put_device(&sdkp->device->sdev_gendev); |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 84380bae20f1..8472de1007ff 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -385,7 +385,7 @@ enum storvsc_request_type { | |||
| 385 | * This is the end of Protocol specific defines. | 385 | * This is the end of Protocol specific defines. |
| 386 | */ | 386 | */ |
| 387 | 387 | ||
| 388 | static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); | 388 | static int storvsc_ringbuffer_size = (128 * 1024); |
| 389 | static u32 max_outstanding_req_per_channel; | 389 | static u32 max_outstanding_req_per_channel; |
| 390 | 390 | ||
| 391 | static int storvsc_vcpus_per_sub_channel = 4; | 391 | static int storvsc_vcpus_per_sub_channel = 4; |
| @@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) | |||
| 668 | { | 668 | { |
| 669 | struct device *dev = &device->device; | 669 | struct device *dev = &device->device; |
| 670 | struct storvsc_device *stor_device; | 670 | struct storvsc_device *stor_device; |
| 671 | int num_cpus = num_online_cpus(); | ||
| 672 | int num_sc; | 671 | int num_sc; |
| 673 | struct storvsc_cmd_request *request; | 672 | struct storvsc_cmd_request *request; |
| 674 | struct vstor_packet *vstor_packet; | 673 | struct vstor_packet *vstor_packet; |
| 675 | int ret, t; | 674 | int ret, t; |
| 676 | 675 | ||
| 677 | num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); | 676 | /* |
| 677 | * If the number of CPUs is artificially restricted, such as | ||
| 678 | * with maxcpus=1 on the kernel boot line, Hyper-V could offer | ||
| 679 | * sub-channels >= the number of CPUs. These sub-channels | ||
| 680 | * should not be created. The primary channel is already created | ||
| 681 | * and assigned to one CPU, so check against # CPUs - 1. | ||
| 682 | */ | ||
| 683 | num_sc = min((int)(num_online_cpus() - 1), max_chns); | ||
| 684 | if (!num_sc) | ||
| 685 | return; | ||
| 686 | |||
| 678 | stor_device = get_out_stor_device(device); | 687 | stor_device = get_out_stor_device(device); |
| 679 | if (!stor_device) | 688 | if (!stor_device) |
| 680 | return; | 689 | return; |
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c index 9351349cf0a9..1e0041ec8132 100644 --- a/drivers/soc/bcm/bcm2835-power.c +++ b/drivers/soc/bcm/bcm2835-power.c | |||
| @@ -150,7 +150,12 @@ struct bcm2835_power { | |||
| 150 | 150 | ||
| 151 | static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) | 151 | static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) |
| 152 | { | 152 | { |
| 153 | u64 start = ktime_get_ns(); | 153 | u64 start; |
| 154 | |||
| 155 | if (!reg) | ||
| 156 | return 0; | ||
| 157 | |||
| 158 | start = ktime_get_ns(); | ||
| 154 | 159 | ||
| 155 | /* Enable the module's async AXI bridges. */ | 160 | /* Enable the module's async AXI bridges. */ |
| 156 | ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); | 161 | ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); |
| @@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) | |||
| 165 | 170 | ||
| 166 | static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) | 171 | static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) |
| 167 | { | 172 | { |
| 168 | u64 start = ktime_get_ns(); | 173 | u64 start; |
| 174 | |||
| 175 | if (!reg) | ||
| 176 | return 0; | ||
| 177 | |||
| 178 | start = ktime_get_ns(); | ||
| 169 | 179 | ||
| 170 | /* Enable the module's async AXI bridges. */ | 180 | /* Enable the module's async AXI bridges. */ |
| 171 | ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); | 181 | ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); |
| @@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain) | |||
| 475 | } | 485 | } |
| 476 | } | 486 | } |
| 477 | 487 | ||
| 478 | static void | 488 | static int |
| 479 | bcm2835_init_power_domain(struct bcm2835_power *power, | 489 | bcm2835_init_power_domain(struct bcm2835_power *power, |
| 480 | int pd_xlate_index, const char *name) | 490 | int pd_xlate_index, const char *name) |
| 481 | { | 491 | { |
| @@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power, | |||
| 483 | struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; | 493 | struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; |
| 484 | 494 | ||
| 485 | dom->clk = devm_clk_get(dev->parent, name); | 495 | dom->clk = devm_clk_get(dev->parent, name); |
| 496 | if (IS_ERR(dom->clk)) { | ||
| 497 | int ret = PTR_ERR(dom->clk); | ||
| 498 | |||
| 499 | if (ret == -EPROBE_DEFER) | ||
| 500 | return ret; | ||
| 501 | |||
| 502 | /* Some domains don't have a clk, so make sure that we | ||
| 503 | * don't deref an error pointer later. | ||
| 504 | */ | ||
| 505 | dom->clk = NULL; | ||
| 506 | } | ||
| 486 | 507 | ||
| 487 | dom->base.name = name; | 508 | dom->base.name = name; |
| 488 | dom->base.power_on = bcm2835_power_pd_power_on; | 509 | dom->base.power_on = bcm2835_power_pd_power_on; |
| @@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power, | |||
| 495 | pm_genpd_init(&dom->base, NULL, true); | 516 | pm_genpd_init(&dom->base, NULL, true); |
| 496 | 517 | ||
| 497 | power->pd_xlate.domains[pd_xlate_index] = &dom->base; | 518 | power->pd_xlate.domains[pd_xlate_index] = &dom->base; |
| 519 | |||
| 520 | return 0; | ||
| 498 | } | 521 | } |
| 499 | 522 | ||
| 500 | /** bcm2835_reset_reset - Resets a block that has a reset line in the | 523 | /** bcm2835_reset_reset - Resets a block that has a reset line in the |
| @@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev) | |||
| 592 | { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, | 615 | { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, |
| 593 | { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, | 616 | { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, |
| 594 | }; | 617 | }; |
| 595 | int ret, i; | 618 | int ret = 0, i; |
| 596 | u32 id; | 619 | u32 id; |
| 597 | 620 | ||
| 598 | power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); | 621 | power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); |
| @@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev) | |||
| 619 | 642 | ||
| 620 | power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); | 643 | power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); |
| 621 | 644 | ||
| 622 | for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) | 645 | for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) { |
| 623 | bcm2835_init_power_domain(power, i, power_domain_names[i]); | 646 | ret = bcm2835_init_power_domain(power, i, power_domain_names[i]); |
| 647 | if (ret) | ||
| 648 | goto fail; | ||
| 649 | } | ||
| 624 | 650 | ||
| 625 | for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { | 651 | for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { |
| 626 | pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, | 652 | pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, |
| @@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev) | |||
| 634 | 660 | ||
| 635 | ret = devm_reset_controller_register(dev, &power->reset); | 661 | ret = devm_reset_controller_register(dev, &power->reset); |
| 636 | if (ret) | 662 | if (ret) |
| 637 | return ret; | 663 | goto fail; |
| 638 | 664 | ||
| 639 | of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); | 665 | of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); |
| 640 | 666 | ||
| 641 | dev_info(dev, "Broadcom BCM2835 power domains driver"); | 667 | dev_info(dev, "Broadcom BCM2835 power domains driver"); |
| 642 | return 0; | 668 | return 0; |
| 669 | |||
| 670 | fail: | ||
| 671 | for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) { | ||
| 672 | struct generic_pm_domain *dom = &power->domains[i].base; | ||
| 673 | |||
| 674 | if (dom->name) | ||
| 675 | pm_genpd_remove(dom); | ||
| 676 | } | ||
| 677 | return ret; | ||
| 643 | } | 678 | } |
| 644 | 679 | ||
| 645 | static int bcm2835_power_remove(struct platform_device *pdev) | 680 | static int bcm2835_power_remove(struct platform_device *pdev) |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index c0901b96cfe4..62951e836cbc 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
| @@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig" | |||
| 114 | 114 | ||
| 115 | source "drivers/staging/mt7621-mmc/Kconfig" | 115 | source "drivers/staging/mt7621-mmc/Kconfig" |
| 116 | 116 | ||
| 117 | source "drivers/staging/mt7621-eth/Kconfig" | ||
| 118 | |||
| 119 | source "drivers/staging/mt7621-dts/Kconfig" | 117 | source "drivers/staging/mt7621-dts/Kconfig" |
| 120 | 118 | ||
| 121 | source "drivers/staging/gasket/Kconfig" | 119 | source "drivers/staging/gasket/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 57c6bce13ff4..d1b17ddcd354 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
| @@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/ | |||
| 47 | obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ | 47 | obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ |
| 48 | obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ | 48 | obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ |
| 49 | obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ | 49 | obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ |
| 50 | obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/ | ||
| 51 | obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ | 50 | obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ |
| 52 | obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ | 51 | obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ |
| 53 | obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ | 52 | obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ |
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig index 687537203d9c..d9725888af6f 100644 --- a/drivers/staging/axis-fifo/Kconfig +++ b/drivers/staging/axis-fifo/Kconfig | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | # | 3 | # |
| 4 | config XIL_AXIS_FIFO | 4 | config XIL_AXIS_FIFO |
| 5 | tristate "Xilinx AXI-Stream FIFO IP core driver" | 5 | tristate "Xilinx AXI-Stream FIFO IP core driver" |
| 6 | depends on OF | ||
| 6 | default n | 7 | default n |
| 7 | help | 8 | help |
| 8 | This adds support for the Xilinx AXI-Stream | 9 | This adds support for the Xilinx AXI-Stream |
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index a7d569cfca5d..0dff1ac057cd 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h | |||
| @@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev, | |||
| 1001 | unsigned int mask); | 1001 | unsigned int mask); |
| 1002 | unsigned int comedi_dio_update_state(struct comedi_subdevice *s, | 1002 | unsigned int comedi_dio_update_state(struct comedi_subdevice *s, |
| 1003 | unsigned int *data); | 1003 | unsigned int *data); |
| 1004 | unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, | ||
| 1005 | struct comedi_cmd *cmd); | ||
| 1004 | unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); | 1006 | unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); |
| 1005 | unsigned int comedi_nscans_left(struct comedi_subdevice *s, | 1007 | unsigned int comedi_nscans_left(struct comedi_subdevice *s, |
| 1006 | unsigned int nscans); | 1008 | unsigned int nscans); |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index eefa62f42c0f..5a32b8fc000e 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
| @@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s, | |||
| 394 | EXPORT_SYMBOL_GPL(comedi_dio_update_state); | 394 | EXPORT_SYMBOL_GPL(comedi_dio_update_state); |
| 395 | 395 | ||
| 396 | /** | 396 | /** |
| 397 | * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes | 397 | * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in |
| 398 | * bytes | ||
| 398 | * @s: COMEDI subdevice. | 399 | * @s: COMEDI subdevice. |
| 400 | * @cmd: COMEDI command. | ||
| 399 | * | 401 | * |
| 400 | * Determines the overall scan length according to the subdevice type and the | 402 | * Determines the overall scan length according to the subdevice type and the |
| 401 | * number of channels in the scan. | 403 | * number of channels in the scan for the specified command. |
| 402 | * | 404 | * |
| 403 | * For digital input, output or input/output subdevices, samples for | 405 | * For digital input, output or input/output subdevices, samples for |
| 404 | * multiple channels are assumed to be packed into one or more unsigned | 406 | * multiple channels are assumed to be packed into one or more unsigned |
| @@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state); | |||
| 408 | * | 410 | * |
| 409 | * Returns the overall scan length in bytes. | 411 | * Returns the overall scan length in bytes. |
| 410 | */ | 412 | */ |
| 411 | unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) | 413 | unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, |
| 414 | struct comedi_cmd *cmd) | ||
| 412 | { | 415 | { |
| 413 | struct comedi_cmd *cmd = &s->async->cmd; | ||
| 414 | unsigned int num_samples; | 416 | unsigned int num_samples; |
| 415 | unsigned int bits_per_sample; | 417 | unsigned int bits_per_sample; |
| 416 | 418 | ||
| @@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) | |||
| 427 | } | 429 | } |
| 428 | return comedi_samples_to_bytes(s, num_samples); | 430 | return comedi_samples_to_bytes(s, num_samples); |
| 429 | } | 431 | } |
| 432 | EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd); | ||
| 433 | |||
| 434 | /** | ||
| 435 | * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes | ||
| 436 | * @s: COMEDI subdevice. | ||
| 437 | * | ||
| 438 | * Determines the overall scan length according to the subdevice type and the | ||
| 439 | * number of channels in the scan for the current command. | ||
| 440 | * | ||
| 441 | * For digital input, output or input/output subdevices, samples for | ||
| 442 | * multiple channels are assumed to be packed into one or more unsigned | ||
| 443 | * short or unsigned int values according to the subdevice's %SDF_LSAMPL | ||
| 444 | * flag. For other types of subdevice, samples are assumed to occupy a | ||
| 445 | * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. | ||
| 446 | * | ||
| 447 | * Returns the overall scan length in bytes. | ||
| 448 | */ | ||
| 449 | unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) | ||
| 450 | { | ||
| 451 | struct comedi_cmd *cmd = &s->async->cmd; | ||
| 452 | |||
| 453 | return comedi_bytes_per_scan_cmd(s, cmd); | ||
| 454 | } | ||
| 430 | EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); | 455 | EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); |
| 431 | 456 | ||
| 432 | static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, | 457 | static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, |
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 5edf59ac6706..b04dad8c7092 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c | |||
| @@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, | |||
| 3545 | struct comedi_subdevice *s, struct comedi_cmd *cmd) | 3545 | struct comedi_subdevice *s, struct comedi_cmd *cmd) |
| 3546 | { | 3546 | { |
| 3547 | struct ni_private *devpriv = dev->private; | 3547 | struct ni_private *devpriv = dev->private; |
| 3548 | unsigned int bytes_per_scan; | ||
| 3548 | int err = 0; | 3549 | int err = 0; |
| 3549 | 3550 | ||
| 3550 | /* Step 1 : check if triggers are trivially valid */ | 3551 | /* Step 1 : check if triggers are trivially valid */ |
| @@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, | |||
| 3579 | err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); | 3580 | err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); |
| 3580 | err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, | 3581 | err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, |
| 3581 | cmd->chanlist_len); | 3582 | cmd->chanlist_len); |
| 3582 | err |= comedi_check_trigger_arg_max(&cmd->stop_arg, | 3583 | bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd); |
| 3583 | s->async->prealloc_bufsz / | 3584 | if (bytes_per_scan) { |
| 3584 | comedi_bytes_per_scan(s)); | 3585 | err |= comedi_check_trigger_arg_max(&cmd->stop_arg, |
| 3586 | s->async->prealloc_bufsz / | ||
| 3587 | bytes_per_scan); | ||
| 3588 | } | ||
| 3585 | 3589 | ||
| 3586 | if (err) | 3590 | if (err) |
| 3587 | return 3; | 3591 | return 3; |
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c index 829f7b12e0dc..9bbc68729c11 100644 --- a/drivers/staging/erofs/dir.c +++ b/drivers/staging/erofs/dir.c | |||
| @@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = { | |||
| 23 | [EROFS_FT_SYMLINK] = DT_LNK, | 23 | [EROFS_FT_SYMLINK] = DT_LNK, |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | static void debug_one_dentry(unsigned char d_type, const char *de_name, | ||
| 27 | unsigned int de_namelen) | ||
| 28 | { | ||
| 29 | #ifdef CONFIG_EROFS_FS_DEBUG | ||
| 30 | /* since the on-disk name could not have the trailing '\0' */ | ||
| 31 | unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; | ||
| 32 | |||
| 33 | memcpy(dbg_namebuf, de_name, de_namelen); | ||
| 34 | dbg_namebuf[de_namelen] = '\0'; | ||
| 35 | |||
| 36 | debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, | ||
| 37 | de_namelen, d_type); | ||
| 38 | #endif | ||
| 39 | } | ||
| 40 | |||
| 26 | static int erofs_fill_dentries(struct dir_context *ctx, | 41 | static int erofs_fill_dentries(struct dir_context *ctx, |
| 27 | void *dentry_blk, unsigned int *ofs, | 42 | void *dentry_blk, unsigned int *ofs, |
| 28 | unsigned int nameoff, unsigned int maxsize) | 43 | unsigned int nameoff, unsigned int maxsize) |
| @@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx, | |||
| 33 | de = dentry_blk + *ofs; | 48 | de = dentry_blk + *ofs; |
| 34 | while (de < end) { | 49 | while (de < end) { |
| 35 | const char *de_name; | 50 | const char *de_name; |
| 36 | int de_namelen; | 51 | unsigned int de_namelen; |
| 37 | unsigned char d_type; | 52 | unsigned char d_type; |
| 38 | #ifdef CONFIG_EROFS_FS_DEBUG | ||
| 39 | unsigned int dbg_namelen; | ||
| 40 | unsigned char dbg_namebuf[EROFS_NAME_LEN]; | ||
| 41 | #endif | ||
| 42 | 53 | ||
| 43 | if (unlikely(de->file_type < EROFS_FT_MAX)) | 54 | if (de->file_type < EROFS_FT_MAX) |
| 44 | d_type = erofs_filetype_table[de->file_type]; | 55 | d_type = erofs_filetype_table[de->file_type]; |
| 45 | else | 56 | else |
| 46 | d_type = DT_UNKNOWN; | 57 | d_type = DT_UNKNOWN; |
| @@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx, | |||
| 48 | nameoff = le16_to_cpu(de->nameoff); | 59 | nameoff = le16_to_cpu(de->nameoff); |
| 49 | de_name = (char *)dentry_blk + nameoff; | 60 | de_name = (char *)dentry_blk + nameoff; |
| 50 | 61 | ||
| 51 | de_namelen = unlikely(de + 1 >= end) ? | 62 | /* the last dirent in the block? */ |
| 52 | /* last directory entry */ | 63 | if (de + 1 >= end) |
| 53 | strnlen(de_name, maxsize - nameoff) : | 64 | de_namelen = strnlen(de_name, maxsize - nameoff); |
| 54 | le16_to_cpu(de[1].nameoff) - nameoff; | 65 | else |
| 66 | de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; | ||
| 55 | 67 | ||
| 56 | /* a corrupted entry is found */ | 68 | /* a corrupted entry is found */ |
| 57 | if (unlikely(de_namelen < 0)) { | 69 | if (unlikely(nameoff + de_namelen > maxsize || |
| 70 | de_namelen > EROFS_NAME_LEN)) { | ||
| 58 | DBG_BUGON(1); | 71 | DBG_BUGON(1); |
| 59 | return -EIO; | 72 | return -EIO; |
| 60 | } | 73 | } |
| 61 | 74 | ||
| 62 | #ifdef CONFIG_EROFS_FS_DEBUG | 75 | debug_one_dentry(d_type, de_name, de_namelen); |
| 63 | dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen); | ||
| 64 | memcpy(dbg_namebuf, de_name, dbg_namelen); | ||
| 65 | dbg_namebuf[dbg_namelen] = '\0'; | ||
| 66 | |||
| 67 | debugln("%s, found de_name %s de_len %d d_type %d", __func__, | ||
| 68 | dbg_namebuf, de_namelen, d_type); | ||
| 69 | #endif | ||
| 70 | |||
| 71 | if (!dir_emit(ctx, de_name, de_namelen, | 76 | if (!dir_emit(ctx, de_name, de_namelen, |
| 72 | le64_to_cpu(de->nid), d_type)) | 77 | le64_to_cpu(de->nid), d_type)) |
| 73 | /* stopped by some reason */ | 78 | /* stopped by some reason */ |
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 8715bc50e09c..31eef8395774 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c | |||
| @@ -972,6 +972,7 @@ repeat: | |||
| 972 | overlapped = false; | 972 | overlapped = false; |
| 973 | compressed_pages = grp->compressed_pages; | 973 | compressed_pages = grp->compressed_pages; |
| 974 | 974 | ||
| 975 | err = 0; | ||
| 975 | for (i = 0; i < clusterpages; ++i) { | 976 | for (i = 0; i < clusterpages; ++i) { |
| 976 | unsigned int pagenr; | 977 | unsigned int pagenr; |
| 977 | 978 | ||
| @@ -981,26 +982,39 @@ repeat: | |||
| 981 | DBG_BUGON(!page); | 982 | DBG_BUGON(!page); |
| 982 | DBG_BUGON(!page->mapping); | 983 | DBG_BUGON(!page->mapping); |
| 983 | 984 | ||
| 984 | if (z_erofs_is_stagingpage(page)) | 985 | if (!z_erofs_is_stagingpage(page)) { |
| 985 | continue; | ||
| 986 | #ifdef EROFS_FS_HAS_MANAGED_CACHE | 986 | #ifdef EROFS_FS_HAS_MANAGED_CACHE |
| 987 | if (page->mapping == MNGD_MAPPING(sbi)) { | 987 | if (page->mapping == MNGD_MAPPING(sbi)) { |
| 988 | DBG_BUGON(!PageUptodate(page)); | 988 | if (unlikely(!PageUptodate(page))) |
| 989 | continue; | 989 | err = -EIO; |
| 990 | } | 990 | continue; |
| 991 | } | ||
| 991 | #endif | 992 | #endif |
| 992 | 993 | ||
| 993 | /* only non-head page could be reused as a compressed page */ | 994 | /* |
| 994 | pagenr = z_erofs_onlinepage_index(page); | 995 | * only if non-head page can be selected |
| 996 | * for inplace decompression | ||
| 997 | */ | ||
| 998 | pagenr = z_erofs_onlinepage_index(page); | ||
| 995 | 999 | ||
| 996 | DBG_BUGON(pagenr >= nr_pages); | 1000 | DBG_BUGON(pagenr >= nr_pages); |
| 997 | DBG_BUGON(pages[pagenr]); | 1001 | DBG_BUGON(pages[pagenr]); |
| 998 | ++sparsemem_pages; | 1002 | ++sparsemem_pages; |
| 999 | pages[pagenr] = page; | 1003 | pages[pagenr] = page; |
| 1000 | 1004 | ||
| 1001 | overlapped = true; | 1005 | overlapped = true; |
| 1006 | } | ||
| 1007 | |||
| 1008 | /* PG_error needs checking for inplaced and staging pages */ | ||
| 1009 | if (unlikely(PageError(page))) { | ||
| 1010 | DBG_BUGON(PageUptodate(page)); | ||
| 1011 | err = -EIO; | ||
| 1012 | } | ||
| 1002 | } | 1013 | } |
| 1003 | 1014 | ||
| 1015 | if (unlikely(err)) | ||
| 1016 | goto out; | ||
| 1017 | |||
| 1004 | llen = (nr_pages << PAGE_SHIFT) - work->pageofs; | 1018 | llen = (nr_pages << PAGE_SHIFT) - work->pageofs; |
| 1005 | 1019 | ||
| 1006 | if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { | 1020 | if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { |
| @@ -1029,6 +1043,10 @@ repeat: | |||
| 1029 | 1043 | ||
| 1030 | skip_allocpage: | 1044 | skip_allocpage: |
| 1031 | vout = erofs_vmap(pages, nr_pages); | 1045 | vout = erofs_vmap(pages, nr_pages); |
| 1046 | if (!vout) { | ||
| 1047 | err = -ENOMEM; | ||
| 1048 | goto out; | ||
| 1049 | } | ||
| 1032 | 1050 | ||
| 1033 | err = z_erofs_vle_unzip_vmap(compressed_pages, | 1051 | err = z_erofs_vle_unzip_vmap(compressed_pages, |
| 1034 | clusterpages, vout, llen, work->pageofs, overlapped); | 1052 | clusterpages, vout, llen, work->pageofs, overlapped); |
| @@ -1194,6 +1212,7 @@ repeat: | |||
| 1194 | if (page->mapping == mc) { | 1212 | if (page->mapping == mc) { |
| 1195 | WRITE_ONCE(grp->compressed_pages[nr], page); | 1213 | WRITE_ONCE(grp->compressed_pages[nr], page); |
| 1196 | 1214 | ||
| 1215 | ClearPageError(page); | ||
| 1197 | if (!PagePrivate(page)) { | 1216 | if (!PagePrivate(page)) { |
| 1198 | /* | 1217 | /* |
| 1199 | * impossible to be !PagePrivate(page) for | 1218 | * impossible to be !PagePrivate(page) for |
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c index 48b263a2731a..0daac9b984a8 100644 --- a/drivers/staging/erofs/unzip_vle_lz4.c +++ b/drivers/staging/erofs/unzip_vle_lz4.c | |||
| @@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, | |||
| 136 | 136 | ||
| 137 | nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); | 137 | nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); |
| 138 | 138 | ||
| 139 | if (clusterpages == 1) | 139 | if (clusterpages == 1) { |
| 140 | vin = kmap_atomic(compressed_pages[0]); | 140 | vin = kmap_atomic(compressed_pages[0]); |
| 141 | else | 141 | } else { |
| 142 | vin = erofs_vmap(compressed_pages, clusterpages); | 142 | vin = erofs_vmap(compressed_pages, clusterpages); |
| 143 | if (!vin) | ||
| 144 | return -ENOMEM; | ||
| 145 | } | ||
| 143 | 146 | ||
| 144 | preempt_disable(); | 147 | preempt_disable(); |
| 145 | vout = erofs_pcpubuf[smp_processor_id()].data; | 148 | vout = erofs_pcpubuf[smp_processor_id()].data; |
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts index b73385540216..250c15ace2a7 100644 --- a/drivers/staging/mt7621-dts/gbpc1.dts +++ b/drivers/staging/mt7621-dts/gbpc1.dts | |||
| @@ -117,22 +117,6 @@ | |||
| 117 | status = "okay"; | 117 | status = "okay"; |
| 118 | }; | 118 | }; |
| 119 | 119 | ||
| 120 | ðernet { | ||
| 121 | //mtd-mac-address = <&factory 0xe000>; | ||
| 122 | gmac1: mac@0 { | ||
| 123 | compatible = "mediatek,eth-mac"; | ||
| 124 | reg = <0>; | ||
| 125 | phy-handle = <&phy1>; | ||
| 126 | }; | ||
| 127 | |||
| 128 | mdio-bus { | ||
| 129 | phy1: ethernet-phy@1 { | ||
| 130 | reg = <1>; | ||
| 131 | phy-mode = "rgmii"; | ||
| 132 | }; | ||
| 133 | }; | ||
| 134 | }; | ||
| 135 | |||
| 136 | &pinctrl { | 120 | &pinctrl { |
| 137 | state_default: pinctrl0 { | 121 | state_default: pinctrl0 { |
| 138 | gpio { | 122 | gpio { |
| @@ -141,3 +125,16 @@ | |||
| 141 | }; | 125 | }; |
| 142 | }; | 126 | }; |
| 143 | }; | 127 | }; |
| 128 | |||
| 129 | &switch0 { | ||
| 130 | ports { | ||
| 131 | port@0 { | ||
| 132 | label = "ethblack"; | ||
| 133 | status = "ok"; | ||
| 134 | }; | ||
| 135 | port@4 { | ||
| 136 | label = "ethblue"; | ||
| 137 | status = "ok"; | ||
| 138 | }; | ||
| 139 | }; | ||
| 140 | }; | ||
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi index 6aff3680ce4b..17020e24abd2 100644 --- a/drivers/staging/mt7621-dts/mt7621.dtsi +++ b/drivers/staging/mt7621-dts/mt7621.dtsi | |||
| @@ -372,16 +372,83 @@ | |||
| 372 | 372 | ||
| 373 | mediatek,ethsys = <ðsys>; | 373 | mediatek,ethsys = <ðsys>; |
| 374 | 374 | ||
| 375 | mediatek,switch = <&gsw>; | ||
| 376 | 375 | ||
| 376 | gmac0: mac@0 { | ||
| 377 | compatible = "mediatek,eth-mac"; | ||
| 378 | reg = <0>; | ||
| 379 | phy-mode = "rgmii"; | ||
| 380 | fixed-link { | ||
| 381 | speed = <1000>; | ||
| 382 | full-duplex; | ||
| 383 | pause; | ||
| 384 | }; | ||
| 385 | }; | ||
| 386 | gmac1: mac@1 { | ||
| 387 | compatible = "mediatek,eth-mac"; | ||
| 388 | reg = <1>; | ||
| 389 | status = "off"; | ||
| 390 | phy-mode = "rgmii"; | ||
| 391 | phy-handle = <&phy5>; | ||
| 392 | }; | ||
| 377 | mdio-bus { | 393 | mdio-bus { |
| 378 | #address-cells = <1>; | 394 | #address-cells = <1>; |
| 379 | #size-cells = <0>; | 395 | #size-cells = <0>; |
| 380 | 396 | ||
| 381 | phy1f: ethernet-phy@1f { | 397 | phy5: ethernet-phy@5 { |
| 382 | reg = <0x1f>; | 398 | reg = <5>; |
| 383 | phy-mode = "rgmii"; | 399 | phy-mode = "rgmii"; |
| 384 | }; | 400 | }; |
| 401 | |||
| 402 | switch0: switch0@0 { | ||
| 403 | compatible = "mediatek,mt7621"; | ||
| 404 | #address-cells = <1>; | ||
| 405 | #size-cells = <0>; | ||
| 406 | reg = <0>; | ||
| 407 | mediatek,mcm; | ||
| 408 | resets = <&rstctrl 2>; | ||
| 409 | reset-names = "mcm"; | ||
| 410 | |||
| 411 | ports { | ||
| 412 | #address-cells = <1>; | ||
| 413 | #size-cells = <0>; | ||
| 414 | reg = <0>; | ||
| 415 | port@0 { | ||
| 416 | status = "off"; | ||
| 417 | reg = <0>; | ||
| 418 | label = "lan0"; | ||
| 419 | }; | ||
| 420 | port@1 { | ||
| 421 | status = "off"; | ||
| 422 | reg = <1>; | ||
| 423 | label = "lan1"; | ||
| 424 | }; | ||
| 425 | port@2 { | ||
| 426 | status = "off"; | ||
| 427 | reg = <2>; | ||
| 428 | label = "lan2"; | ||
| 429 | }; | ||
| 430 | port@3 { | ||
| 431 | status = "off"; | ||
| 432 | reg = <3>; | ||
| 433 | label = "lan3"; | ||
| 434 | }; | ||
| 435 | port@4 { | ||
| 436 | status = "off"; | ||
| 437 | reg = <4>; | ||
| 438 | label = "lan4"; | ||
| 439 | }; | ||
| 440 | port@6 { | ||
| 441 | reg = <6>; | ||
| 442 | label = "cpu"; | ||
| 443 | ethernet = <&gmac0>; | ||
| 444 | phy-mode = "trgmii"; | ||
| 445 | fixed-link { | ||
| 446 | speed = <1000>; | ||
| 447 | full-duplex; | ||
| 448 | }; | ||
| 449 | }; | ||
| 450 | }; | ||
| 451 | }; | ||
| 385 | }; | 452 | }; |
| 386 | }; | 453 | }; |
| 387 | 454 | ||
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt deleted file mode 100644 index 596b38552697..000000000000 --- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt +++ /dev/null | |||
| @@ -1,48 +0,0 @@ | |||
| 1 | Mediatek Gigabit Switch | ||
| 2 | ======================= | ||
| 3 | |||
| 4 | The mediatek gigabit switch can be found on Mediatek SoCs. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | - compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw", | ||
| 8 | "mediatek,mt7623-gsw" | ||
| 9 | - reg: Address and length of the register set for the device | ||
| 10 | - interrupts: Should contain the gigabit switches interrupt | ||
| 11 | |||
| 12 | |||
| 13 | Additional required properties for ARM based SoCs: | ||
| 14 | - mediatek,reset-pin: phandle describing the reset GPIO | ||
| 15 | - clocks: the clocks used by the switch | ||
| 16 | - clock-names: the names of the clocks listed in the clocks property | ||
| 17 | these should be "trgpll", "esw", "gp2", "gp1" | ||
| 18 | - mt7530-supply: the phandle of the regulator used to power the switch | ||
| 19 | - mediatek,pctl-regmap: phandle to the port control regmap. this is used to | ||
| 20 | setup the drive current | ||
| 21 | |||
| 22 | |||
| 23 | Optional properties: | ||
| 24 | - interrupt-parent: Should be the phandle for the interrupt controller | ||
| 25 | that services interrupts for this device | ||
| 26 | |||
| 27 | Example: | ||
| 28 | |||
| 29 | gsw: switch@1b100000 { | ||
| 30 | compatible = "mediatek,mt7623-gsw"; | ||
| 31 | reg = <0 0x1b110000 0 0x300000>; | ||
| 32 | |||
| 33 | interrupt-parent = <&pio>; | ||
| 34 | interrupts = <168 IRQ_TYPE_EDGE_RISING>; | ||
| 35 | |||
| 36 | clocks = <&apmixedsys CLK_APMIXED_TRGPLL>, | ||
| 37 | <ðsys CLK_ETHSYS_ESW>, | ||
| 38 | <ðsys CLK_ETHSYS_GP2>, | ||
| 39 | <ðsys CLK_ETHSYS_GP1>; | ||
| 40 | clock-names = "trgpll", "esw", "gp2", "gp1"; | ||
| 41 | |||
| 42 | mt7530-supply = <&mt6323_vpa_reg>; | ||
| 43 | |||
| 44 | mediatek,pctl-regmap = <&syscfg_pctl_a>; | ||
| 45 | mediatek,reset-pin = <&pio 15 0>; | ||
| 46 | |||
| 47 | status = "okay"; | ||
| 48 | }; | ||
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig deleted file mode 100644 index 44ea86c7a96c..000000000000 --- a/drivers/staging/mt7621-eth/Kconfig +++ /dev/null | |||
| @@ -1,39 +0,0 @@ | |||
| 1 | config NET_VENDOR_MEDIATEK_STAGING | ||
| 2 | bool "MediaTek ethernet driver - staging version" | ||
| 3 | depends on RALINK | ||
| 4 | ---help--- | ||
| 5 | If you have an MT7621 Mediatek SoC with ethernet, say Y. | ||
| 6 | |||
| 7 | if NET_VENDOR_MEDIATEK_STAGING | ||
| 8 | choice | ||
| 9 | prompt "MAC type" | ||
| 10 | |||
| 11 | config NET_MEDIATEK_MT7621 | ||
| 12 | bool "MT7621" | ||
| 13 | depends on MIPS && SOC_MT7621 | ||
| 14 | |||
| 15 | endchoice | ||
| 16 | |||
| 17 | config NET_MEDIATEK_SOC_STAGING | ||
| 18 | tristate "MediaTek SoC Gigabit Ethernet support" | ||
| 19 | depends on NET_VENDOR_MEDIATEK_STAGING | ||
| 20 | select PHYLIB | ||
| 21 | ---help--- | ||
| 22 | This driver supports the gigabit ethernet MACs in the | ||
| 23 | MediaTek SoC family. | ||
| 24 | |||
| 25 | config NET_MEDIATEK_MDIO | ||
| 26 | def_bool NET_MEDIATEK_SOC_STAGING | ||
| 27 | depends on NET_MEDIATEK_MT7621 | ||
| 28 | select PHYLIB | ||
| 29 | |||
| 30 | config NET_MEDIATEK_MDIO_MT7620 | ||
| 31 | def_bool NET_MEDIATEK_SOC_STAGING | ||
| 32 | depends on NET_MEDIATEK_MT7621 | ||
| 33 | select NET_MEDIATEK_MDIO | ||
| 34 | |||
| 35 | config NET_MEDIATEK_GSW_MT7621 | ||
| 36 | def_tristate NET_MEDIATEK_SOC_STAGING | ||
| 37 | depends on NET_MEDIATEK_MT7621 | ||
| 38 | |||
| 39 | endif #NET_VENDOR_MEDIATEK_STAGING | ||
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile deleted file mode 100644 index 018bcc3596b3..000000000000 --- a/drivers/staging/mt7621-eth/Makefile +++ /dev/null | |||
| @@ -1,14 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Makefile for the Ralink SoCs built-in ethernet macs | ||
| 3 | # | ||
| 4 | |||
| 5 | mtk-eth-soc-y += mtk_eth_soc.o ethtool.o | ||
| 6 | |||
| 7 | mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o | ||
| 8 | mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o | ||
| 9 | |||
| 10 | mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o | ||
| 11 | |||
| 12 | obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o | ||
| 13 | |||
| 14 | obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o | ||
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO deleted file mode 100644 index f9e47d4b4cd4..000000000000 --- a/drivers/staging/mt7621-eth/TODO +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | |||
| 2 | - verify devicetree documentation is consistent with code | ||
| 3 | - fix ethtool - currently doesn't return valid data. | ||
| 4 | - general code review and clean up | ||
| 5 | - add support for second MAC on mt7621 | ||
| 6 | - convert gsw code to use switchdev interfaces | ||
| 7 | - md7620_mmi_write etc should probably be wrapped | ||
| 8 | in a regmap abstraction. | ||
| 9 | - Get soc_mt7621 to work with QDMA TX if possible. | ||
| 10 | - Ensure phys are correctly configured when a cable | ||
| 11 | is plugged in. | ||
| 12 | |||
| 13 | Cc: NeilBrown <neil@brown.name> | ||
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c deleted file mode 100644 index 8c4228e2c987..000000000000 --- a/drivers/staging/mt7621-eth/ethtool.c +++ /dev/null | |||
| @@ -1,250 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; version 2 of the License | ||
| 5 | * | ||
| 6 | * This program is distributed in the hope that it will be useful, | ||
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 9 | * GNU General Public License for more details. | ||
| 10 | * | ||
| 11 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 12 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 13 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include "mtk_eth_soc.h" | ||
| 17 | #include "ethtool.h" | ||
| 18 | |||
| 19 | struct mtk_stat { | ||
| 20 | char name[ETH_GSTRING_LEN]; | ||
| 21 | unsigned int idx; | ||
| 22 | }; | ||
| 23 | |||
| 24 | #define MTK_HW_STAT(stat) { \ | ||
| 25 | .name = #stat, \ | ||
| 26 | .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \ | ||
| 27 | } | ||
| 28 | |||
| 29 | static const struct mtk_stat mtk_ethtool_hw_stats[] = { | ||
| 30 | MTK_HW_STAT(tx_bytes), | ||
| 31 | MTK_HW_STAT(tx_packets), | ||
| 32 | MTK_HW_STAT(tx_skip), | ||
| 33 | MTK_HW_STAT(tx_collisions), | ||
| 34 | MTK_HW_STAT(rx_bytes), | ||
| 35 | MTK_HW_STAT(rx_packets), | ||
| 36 | MTK_HW_STAT(rx_overflow), | ||
| 37 | MTK_HW_STAT(rx_fcs_errors), | ||
| 38 | MTK_HW_STAT(rx_short_errors), | ||
| 39 | MTK_HW_STAT(rx_long_errors), | ||
| 40 | MTK_HW_STAT(rx_checksum_errors), | ||
| 41 | MTK_HW_STAT(rx_flow_control_packets), | ||
| 42 | }; | ||
| 43 | |||
| 44 | #define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats) | ||
| 45 | |||
| 46 | static int mtk_get_link_ksettings(struct net_device *dev, | ||
| 47 | struct ethtool_link_ksettings *cmd) | ||
| 48 | { | ||
| 49 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 50 | int err; | ||
| 51 | |||
| 52 | if (!mac->phy_dev) | ||
| 53 | return -ENODEV; | ||
| 54 | |||
| 55 | if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) { | ||
| 56 | err = phy_read_status(mac->phy_dev); | ||
| 57 | if (err) | ||
| 58 | return -ENODEV; | ||
| 59 | } | ||
| 60 | |||
| 61 | phy_ethtool_ksettings_get(mac->phy_dev, cmd); | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | |||
| 65 | static int mtk_set_link_ksettings(struct net_device *dev, | ||
| 66 | const struct ethtool_link_ksettings *cmd) | ||
| 67 | { | ||
| 68 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 69 | |||
| 70 | if (!mac->phy_dev) | ||
| 71 | return -ENODEV; | ||
| 72 | |||
| 73 | if (cmd->base.phy_address != mac->phy_dev->mdio.addr) { | ||
| 74 | if (mac->hw->phy->phy_node[cmd->base.phy_address]) { | ||
| 75 | mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address]; | ||
| 76 | mac->phy_flags = MTK_PHY_FLAG_PORT; | ||
| 77 | } else if (mac->hw->mii_bus) { | ||
| 78 | mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus, | ||
| 79 | cmd->base.phy_address); | ||
| 80 | if (!mac->phy_dev) | ||
| 81 | return -ENODEV; | ||
| 82 | mac->phy_flags = MTK_PHY_FLAG_ATTACH; | ||
| 83 | } else { | ||
| 84 | return -ENODEV; | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | return phy_ethtool_ksettings_set(mac->phy_dev, cmd); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void mtk_get_drvinfo(struct net_device *dev, | ||
| 92 | struct ethtool_drvinfo *info) | ||
| 93 | { | ||
| 94 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 95 | struct mtk_soc_data *soc = mac->hw->soc; | ||
| 96 | |||
| 97 | strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); | ||
| 98 | strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); | ||
| 99 | |||
| 100 | if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) | ||
| 101 | info->n_stats = MTK_HW_STATS_LEN; | ||
| 102 | } | ||
| 103 | |||
| 104 | static u32 mtk_get_msglevel(struct net_device *dev) | ||
| 105 | { | ||
| 106 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 107 | |||
| 108 | return mac->hw->msg_enable; | ||
| 109 | } | ||
| 110 | |||
| 111 | static void mtk_set_msglevel(struct net_device *dev, u32 value) | ||
| 112 | { | ||
| 113 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 114 | |||
| 115 | mac->hw->msg_enable = value; | ||
| 116 | } | ||
| 117 | |||
| 118 | static int mtk_nway_reset(struct net_device *dev) | ||
| 119 | { | ||
| 120 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 121 | |||
| 122 | if (!mac->phy_dev) | ||
| 123 | return -EOPNOTSUPP; | ||
| 124 | |||
| 125 | return genphy_restart_aneg(mac->phy_dev); | ||
| 126 | } | ||
| 127 | |||
| 128 | static u32 mtk_get_link(struct net_device *dev) | ||
| 129 | { | ||
| 130 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 131 | int err; | ||
| 132 | |||
| 133 | if (!mac->phy_dev) | ||
| 134 | goto out_get_link; | ||
| 135 | |||
| 136 | if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) { | ||
| 137 | err = genphy_update_link(mac->phy_dev); | ||
| 138 | if (err) | ||
| 139 | goto out_get_link; | ||
| 140 | } | ||
| 141 | |||
| 142 | return mac->phy_dev->link; | ||
| 143 | |||
| 144 | out_get_link: | ||
| 145 | return ethtool_op_get_link(dev); | ||
| 146 | } | ||
| 147 | |||
| 148 | static int mtk_set_ringparam(struct net_device *dev, | ||
| 149 | struct ethtool_ringparam *ring) | ||
| 150 | { | ||
| 151 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 152 | |||
| 153 | if ((ring->tx_pending < 2) || | ||
| 154 | (ring->rx_pending < 2) || | ||
| 155 | (ring->rx_pending > mac->hw->soc->dma_ring_size) || | ||
| 156 | (ring->tx_pending > mac->hw->soc->dma_ring_size)) | ||
| 157 | return -EINVAL; | ||
| 158 | |||
| 159 | dev->netdev_ops->ndo_stop(dev); | ||
| 160 | |||
| 161 | mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1); | ||
| 162 | mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1); | ||
| 163 | |||
| 164 | return dev->netdev_ops->ndo_open(dev); | ||
| 165 | } | ||
| 166 | |||
| 167 | static void mtk_get_ringparam(struct net_device *dev, | ||
| 168 | struct ethtool_ringparam *ring) | ||
| 169 | { | ||
| 170 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 171 | |||
| 172 | ring->rx_max_pending = mac->hw->soc->dma_ring_size; | ||
| 173 | ring->tx_max_pending = mac->hw->soc->dma_ring_size; | ||
| 174 | ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size; | ||
| 175 | ring->tx_pending = mac->hw->tx_ring.tx_ring_size; | ||
| 176 | } | ||
| 177 | |||
| 178 | static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
| 179 | { | ||
| 180 | int i; | ||
| 181 | |||
| 182 | switch (stringset) { | ||
| 183 | case ETH_SS_STATS: | ||
| 184 | for (i = 0; i < MTK_HW_STATS_LEN; i++) { | ||
| 185 | memcpy(data, mtk_ethtool_hw_stats[i].name, | ||
| 186 | ETH_GSTRING_LEN); | ||
| 187 | data += ETH_GSTRING_LEN; | ||
| 188 | } | ||
| 189 | break; | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | static int mtk_get_sset_count(struct net_device *dev, int sset) | ||
| 194 | { | ||
| 195 | switch (sset) { | ||
| 196 | case ETH_SS_STATS: | ||
| 197 | return MTK_HW_STATS_LEN; | ||
| 198 | default: | ||
| 199 | return -EOPNOTSUPP; | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 203 | static void mtk_get_ethtool_stats(struct net_device *dev, | ||
| 204 | struct ethtool_stats *stats, u64 *data) | ||
| 205 | { | ||
| 206 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 207 | struct mtk_hw_stats *hwstats = mac->hw_stats; | ||
| 208 | unsigned int start; | ||
| 209 | int i; | ||
| 210 | |||
| 211 | if (netif_running(dev) && netif_device_present(dev)) { | ||
| 212 | if (spin_trylock(&hwstats->stats_lock)) { | ||
| 213 | mtk_stats_update_mac(mac); | ||
| 214 | spin_unlock(&hwstats->stats_lock); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | do { | ||
| 219 | start = u64_stats_fetch_begin_irq(&hwstats->syncp); | ||
| 220 | for (i = 0; i < MTK_HW_STATS_LEN; i++) | ||
| 221 | data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx]; | ||
| 222 | |||
| 223 | } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); | ||
| 224 | } | ||
| 225 | |||
| 226 | static struct ethtool_ops mtk_ethtool_ops = { | ||
| 227 | .get_link_ksettings = mtk_get_link_ksettings, | ||
| 228 | .set_link_ksettings = mtk_set_link_ksettings, | ||
| 229 | .get_drvinfo = mtk_get_drvinfo, | ||
| 230 | .get_msglevel = mtk_get_msglevel, | ||
| 231 | .set_msglevel = mtk_set_msglevel, | ||
| 232 | .nway_reset = mtk_nway_reset, | ||
| 233 | .get_link = mtk_get_link, | ||
| 234 | .set_ringparam = mtk_set_ringparam, | ||
| 235 | .get_ringparam = mtk_get_ringparam, | ||
| 236 | }; | ||
| 237 | |||
| 238 | void mtk_set_ethtool_ops(struct net_device *netdev) | ||
| 239 | { | ||
| 240 | struct mtk_mac *mac = netdev_priv(netdev); | ||
| 241 | struct mtk_soc_data *soc = mac->hw->soc; | ||
| 242 | |||
| 243 | if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) { | ||
| 244 | mtk_ethtool_ops.get_strings = mtk_get_strings; | ||
| 245 | mtk_ethtool_ops.get_sset_count = mtk_get_sset_count; | ||
| 246 | mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats; | ||
| 247 | } | ||
| 248 | |||
| 249 | netdev->ethtool_ops = &mtk_ethtool_ops; | ||
| 250 | } | ||
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h deleted file mode 100644 index 0071469aea6c..000000000000 --- a/drivers/staging/mt7621-eth/ethtool.h +++ /dev/null | |||
| @@ -1,15 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 4 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 5 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef MTK_ETHTOOL_H | ||
| 9 | #define MTK_ETHTOOL_H | ||
| 10 | |||
| 11 | #include <linux/ethtool.h> | ||
| 12 | |||
| 13 | void mtk_set_ethtool_ops(struct net_device *netdev); | ||
| 14 | |||
| 15 | #endif /* MTK_ETHTOOL_H */ | ||
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h deleted file mode 100644 index 70f7e5481952..000000000000 --- a/drivers/staging/mt7621-eth/gsw_mt7620.h +++ /dev/null | |||
| @@ -1,277 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _RALINK_GSW_MT7620_H__ | ||
| 16 | #define _RALINK_GSW_MT7620_H__ | ||
| 17 | |||
| 18 | #define GSW_REG_PHY_TIMEOUT (5 * HZ) | ||
| 19 | |||
| 20 | #define MT7620_GSW_REG_PIAC 0x0004 | ||
| 21 | |||
| 22 | #define GSW_NUM_VLANS 16 | ||
| 23 | #define GSW_NUM_VIDS 4096 | ||
| 24 | #define GSW_NUM_PORTS 7 | ||
| 25 | #define GSW_PORT6 6 | ||
| 26 | |||
| 27 | #define GSW_MDIO_ACCESS BIT(31) | ||
| 28 | #define GSW_MDIO_READ BIT(19) | ||
| 29 | #define GSW_MDIO_WRITE BIT(18) | ||
| 30 | #define GSW_MDIO_START BIT(16) | ||
| 31 | #define GSW_MDIO_ADDR_SHIFT 20 | ||
| 32 | #define GSW_MDIO_REG_SHIFT 25 | ||
| 33 | |||
| 34 | #define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100)) | ||
| 35 | #define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100)) | ||
| 36 | #define GSW_REG_SMACCR0 0x3fE4 | ||
| 37 | #define GSW_REG_SMACCR1 0x3fE8 | ||
| 38 | #define GSW_REG_CKGCR 0x3ff0 | ||
| 39 | |||
| 40 | #define GSW_REG_IMR 0x7008 | ||
| 41 | #define GSW_REG_ISR 0x700c | ||
| 42 | #define GSW_REG_GPC1 0x7014 | ||
| 43 | |||
| 44 | #define SYSC_REG_CHIP_REV_ID 0x0c | ||
| 45 | #define SYSC_REG_CFG 0x10 | ||
| 46 | #define SYSC_REG_CFG1 0x14 | ||
| 47 | #define RST_CTRL_MCM BIT(2) | ||
| 48 | #define SYSC_PAD_RGMII2_MDIO 0x58 | ||
| 49 | #define SYSC_GPIO_MODE 0x60 | ||
| 50 | |||
| 51 | #define PORT_IRQ_ST_CHG 0x7f | ||
| 52 | |||
| 53 | #define MT7621_ESW_PHY_POLLING 0x0000 | ||
| 54 | #define MT7620_ESW_PHY_POLLING 0x7000 | ||
| 55 | |||
| 56 | #define PMCR_IPG BIT(18) | ||
| 57 | #define PMCR_MAC_MODE BIT(16) | ||
| 58 | #define PMCR_FORCE BIT(15) | ||
| 59 | #define PMCR_TX_EN BIT(14) | ||
| 60 | #define PMCR_RX_EN BIT(13) | ||
| 61 | #define PMCR_BACKOFF BIT(9) | ||
| 62 | #define PMCR_BACKPRES BIT(8) | ||
| 63 | #define PMCR_RX_FC BIT(5) | ||
| 64 | #define PMCR_TX_FC BIT(4) | ||
| 65 | #define PMCR_SPEED(_x) (_x << 2) | ||
| 66 | #define PMCR_DUPLEX BIT(1) | ||
| 67 | #define PMCR_LINK BIT(0) | ||
| 68 | |||
| 69 | #define PHY_AN_EN BIT(31) | ||
| 70 | #define PHY_PRE_EN BIT(30) | ||
| 71 | #define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24) | ||
| 72 | |||
| 73 | /* ethernet subsystem config register */ | ||
| 74 | #define ETHSYS_SYSCFG0 0x14 | ||
| 75 | /* ethernet subsystem clock register */ | ||
| 76 | #define ETHSYS_CLKCFG0 0x2c | ||
| 77 | #define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) | ||
| 78 | |||
| 79 | /* p5 RGMII wrapper TX clock control register */ | ||
| 80 | #define MT7530_P5RGMIITXCR 0x7b04 | ||
| 81 | /* p5 RGMII wrapper RX clock control register */ | ||
| 82 | #define MT7530_P5RGMIIRXCR 0x7b00 | ||
| 83 | /* TRGMII TDX ODT registers */ | ||
| 84 | #define MT7530_TRGMII_TD0_ODT 0x7a54 | ||
| 85 | #define MT7530_TRGMII_TD1_ODT 0x7a5c | ||
| 86 | #define MT7530_TRGMII_TD2_ODT 0x7a64 | ||
| 87 | #define MT7530_TRGMII_TD3_ODT 0x7a6c | ||
| 88 | #define MT7530_TRGMII_TD4_ODT 0x7a74 | ||
| 89 | #define MT7530_TRGMII_TD5_ODT 0x7a7c | ||
| 90 | /* TRGMII TCK ctrl register */ | ||
| 91 | #define MT7530_TRGMII_TCK_CTRL 0x7a78 | ||
| 92 | /* TRGMII Tx ctrl register */ | ||
| 93 | #define MT7530_TRGMII_TXCTRL 0x7a40 | ||
| 94 | /* port 6 extended control register */ | ||
| 95 | #define MT7530_P6ECR 0x7830 | ||
| 96 | /* IO driver control register */ | ||
| 97 | #define MT7530_IO_DRV_CR 0x7810 | ||
| 98 | /* top signal control register */ | ||
| 99 | #define MT7530_TOP_SIG_CTRL 0x7808 | ||
| 100 | /* modified hwtrap register */ | ||
| 101 | #define MT7530_MHWTRAP 0x7804 | ||
| 102 | /* hwtrap status register */ | ||
| 103 | #define MT7530_HWTRAP 0x7800 | ||
| 104 | /* status interrupt register */ | ||
| 105 | #define MT7530_SYS_INT_STS 0x700c | ||
| 106 | /* system nterrupt register */ | ||
| 107 | #define MT7530_SYS_INT_EN 0x7008 | ||
| 108 | /* system control register */ | ||
| 109 | #define MT7530_SYS_CTRL 0x7000 | ||
| 110 | /* port MAC status register */ | ||
| 111 | #define MT7530_PMSR_P(x) (0x3008 + (x * 0x100)) | ||
| 112 | /* port MAC control register */ | ||
| 113 | #define MT7530_PMCR_P(x) (0x3000 + (x * 0x100)) | ||
| 114 | |||
| 115 | #define MT7621_XTAL_SHIFT 6 | ||
| 116 | #define MT7621_XTAL_MASK 0x7 | ||
| 117 | #define MT7621_XTAL_25 6 | ||
| 118 | #define MT7621_XTAL_40 3 | ||
| 119 | #define MT7621_MDIO_DRV_MASK (3 << 4) | ||
| 120 | #define MT7621_GE1_MODE_MASK (3 << 12) | ||
| 121 | |||
| 122 | #define TRGMII_TXCTRL_TXC_INV BIT(30) | ||
| 123 | #define P6ECR_INTF_MODE_RGMII BIT(1) | ||
| 124 | #define P5RGMIIRXCR_C_ALIGN BIT(8) | ||
| 125 | #define P5RGMIIRXCR_DELAY_2 BIT(1) | ||
| 126 | #define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2)) | ||
| 127 | |||
| 128 | /* TOP_SIG_CTRL bits */ | ||
| 129 | #define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16)) | ||
| 130 | |||
| 131 | /* MHWTRAP bits */ | ||
| 132 | #define MHWTRAP_MANUAL BIT(16) | ||
| 133 | #define MHWTRAP_P5_MAC_SEL BIT(13) | ||
| 134 | #define MHWTRAP_P6_DIS BIT(8) | ||
| 135 | #define MHWTRAP_P5_RGMII_MODE BIT(7) | ||
| 136 | #define MHWTRAP_P5_DIS BIT(6) | ||
| 137 | #define MHWTRAP_PHY_ACCESS BIT(5) | ||
| 138 | |||
| 139 | /* HWTRAP bits */ | ||
| 140 | #define HWTRAP_XTAL_SHIFT 9 | ||
| 141 | #define HWTRAP_XTAL_MASK 0x3 | ||
| 142 | |||
| 143 | /* SYS_CTRL bits */ | ||
| 144 | #define SYS_CTRL_SW_RST BIT(1) | ||
| 145 | #define SYS_CTRL_REG_RST BIT(0) | ||
| 146 | |||
| 147 | /* PMCR bits */ | ||
| 148 | #define PMCR_IFG_XMIT_96 BIT(18) | ||
| 149 | #define PMCR_MAC_MODE BIT(16) | ||
| 150 | #define PMCR_FORCE_MODE BIT(15) | ||
| 151 | #define PMCR_TX_EN BIT(14) | ||
| 152 | #define PMCR_RX_EN BIT(13) | ||
| 153 | #define PMCR_BACK_PRES_EN BIT(9) | ||
| 154 | #define PMCR_BACKOFF_EN BIT(8) | ||
| 155 | #define PMCR_TX_FC_EN BIT(5) | ||
| 156 | #define PMCR_RX_FC_EN BIT(4) | ||
| 157 | #define PMCR_FORCE_SPEED_1000 BIT(3) | ||
| 158 | #define PMCR_FORCE_FDX BIT(1) | ||
| 159 | #define PMCR_FORCE_LNK BIT(0) | ||
| 160 | #define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \ | ||
| 161 | PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \ | ||
| 162 | PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \ | ||
| 163 | PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \ | ||
| 164 | PMCR_FORCE_LNK) | ||
| 165 | |||
| 166 | #define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \ | ||
| 167 | PMCR_TX_FC_EN | PMCR_RX_FC_EN) | ||
| 168 | |||
| 169 | /* TRGMII control registers */ | ||
| 170 | #define GSW_INTF_MODE 0x390 | ||
| 171 | #define GSW_TRGMII_TD0_ODT 0x354 | ||
| 172 | #define GSW_TRGMII_TD1_ODT 0x35c | ||
| 173 | #define GSW_TRGMII_TD2_ODT 0x364 | ||
| 174 | #define GSW_TRGMII_TD3_ODT 0x36c | ||
| 175 | #define GSW_TRGMII_TXCTL_ODT 0x374 | ||
| 176 | #define GSW_TRGMII_TCK_ODT 0x37c | ||
| 177 | #define GSW_TRGMII_RCK_CTRL 0x300 | ||
| 178 | |||
| 179 | #define INTF_MODE_TRGMII BIT(1) | ||
| 180 | #define TRGMII_RCK_CTRL_RX_RST BIT(31) | ||
| 181 | |||
| 182 | /* Mac control registers */ | ||
| 183 | #define MTK_MAC_P2_MCR 0x200 | ||
| 184 | #define MTK_MAC_P1_MCR 0x100 | ||
| 185 | |||
| 186 | #define MAC_MCR_MAX_RX_2K BIT(29) | ||
| 187 | #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) | ||
| 188 | #define MAC_MCR_FORCE_MODE BIT(15) | ||
| 189 | #define MAC_MCR_TX_EN BIT(14) | ||
| 190 | #define MAC_MCR_RX_EN BIT(13) | ||
| 191 | #define MAC_MCR_BACKOFF_EN BIT(9) | ||
| 192 | #define MAC_MCR_BACKPR_EN BIT(8) | ||
| 193 | #define MAC_MCR_FORCE_RX_FC BIT(5) | ||
| 194 | #define MAC_MCR_FORCE_TX_FC BIT(4) | ||
| 195 | #define MAC_MCR_SPEED_1000 BIT(3) | ||
| 196 | #define MAC_MCR_FORCE_DPX BIT(1) | ||
| 197 | #define MAC_MCR_FORCE_LINK BIT(0) | ||
| 198 | #define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \ | ||
| 199 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \ | ||
| 200 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \ | ||
| 201 | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \ | ||
| 202 | MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \ | ||
| 203 | MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK) | ||
| 204 | #define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \ | ||
| 205 | MAC_MCR_FIXED_LINK) | ||
| 206 | |||
| 207 | /* possible XTAL speed */ | ||
| 208 | #define MT7623_XTAL_40 0 | ||
| 209 | #define MT7623_XTAL_20 1 | ||
| 210 | #define MT7623_XTAL_25 3 | ||
| 211 | |||
| 212 | /* GPIO port control registers */ | ||
| 213 | #define GPIO_OD33_CTRL8 0x4c0 | ||
| 214 | #define GPIO_BIAS_CTRL 0xed0 | ||
| 215 | #define GPIO_DRV_SEL10 0xf00 | ||
| 216 | |||
| 217 | /* on MT7620 the functio of port 4 can be software configured */ | ||
| 218 | enum { | ||
| 219 | PORT4_EPHY = 0, | ||
| 220 | PORT4_EXT, | ||
| 221 | }; | ||
| 222 | |||
| 223 | /* struct mt7620_gsw - the structure that holds the SoC specific data | ||
| 224 | * @dev: The Device struct | ||
| 225 | * @base: The base address | ||
| 226 | * @piac_offset: The PIAC base may change depending on SoC | ||
| 227 | * @irq: The IRQ we are using | ||
| 228 | * @port4: The port4 mode on MT7620 | ||
| 229 | * @autopoll: Is MDIO autopolling enabled | ||
| 230 | * @ethsys: The ethsys register map | ||
| 231 | * @pctl: The pin control register map | ||
| 232 | * @clk_gsw: The switch clock | ||
| 233 | * @clk_gp1: The gmac1 clock | ||
| 234 | * @clk_gp2: The gmac2 clock | ||
| 235 | * @clk_trgpll: The trgmii pll clock | ||
| 236 | */ | ||
| 237 | struct mt7620_gsw { | ||
| 238 | struct device *dev; | ||
| 239 | void __iomem *base; | ||
| 240 | u32 piac_offset; | ||
| 241 | int irq; | ||
| 242 | int port4; | ||
| 243 | unsigned long int autopoll; | ||
| 244 | |||
| 245 | struct regmap *ethsys; | ||
| 246 | struct regmap *pctl; | ||
| 247 | |||
| 248 | struct clk *clk_gsw; | ||
| 249 | struct clk *clk_gp1; | ||
| 250 | struct clk *clk_gp2; | ||
| 251 | struct clk *clk_trgpll; | ||
| 252 | }; | ||
| 253 | |||
| 254 | /* switch register I/O wrappers */ | ||
| 255 | void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg); | ||
| 256 | u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg); | ||
| 257 | |||
| 258 | /* the callback used by the driver core to bringup the switch */ | ||
| 259 | int mtk_gsw_init(struct mtk_eth *eth); | ||
| 260 | |||
| 261 | /* MDIO access wrappers */ | ||
| 262 | int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); | ||
| 263 | int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); | ||
| 264 | void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port); | ||
| 265 | int mt7620_has_carrier(struct mtk_eth *eth); | ||
| 266 | void mt7620_print_link_state(struct mtk_eth *eth, int port, int link, | ||
| 267 | int speed, int duplex); | ||
| 268 | void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val); | ||
| 269 | u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg); | ||
| 270 | void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg); | ||
| 271 | |||
| 272 | u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, | ||
| 273 | u32 phy_register, u32 write_data); | ||
| 274 | u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg); | ||
| 275 | void mt7620_handle_carrier(struct mtk_eth *eth); | ||
| 276 | |||
| 277 | #endif | ||
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c deleted file mode 100644 index 53767b17bad9..000000000000 --- a/drivers/staging/mt7621-eth/gsw_mt7621.c +++ /dev/null | |||
| @@ -1,297 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/of_device.h> | ||
| 20 | #include <linux/of_irq.h> | ||
| 21 | |||
| 22 | #include <ralink_regs.h> | ||
| 23 | |||
| 24 | #include "mtk_eth_soc.h" | ||
| 25 | #include "gsw_mt7620.h" | ||
| 26 | |||
| 27 | void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg) | ||
| 28 | { | ||
| 29 | iowrite32(val, gsw->base + reg); | ||
| 30 | } | ||
| 31 | EXPORT_SYMBOL_GPL(mtk_switch_w32); | ||
| 32 | |||
| 33 | u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg) | ||
| 34 | { | ||
| 35 | return ioread32(gsw->base + reg); | ||
| 36 | } | ||
| 37 | EXPORT_SYMBOL_GPL(mtk_switch_r32); | ||
| 38 | |||
| 39 | static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth) | ||
| 40 | { | ||
| 41 | struct mtk_eth *eth = (struct mtk_eth *)_eth; | ||
| 42 | struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; | ||
| 43 | u32 reg, i; | ||
| 44 | |||
| 45 | reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS); | ||
| 46 | |||
| 47 | for (i = 0; i < 5; i++) { | ||
| 48 | unsigned int link; | ||
| 49 | |||
| 50 | if ((reg & BIT(i)) == 0) | ||
| 51 | continue; | ||
| 52 | |||
| 53 | link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1; | ||
| 54 | |||
| 55 | if (link == eth->link[i]) | ||
| 56 | continue; | ||
| 57 | |||
| 58 | eth->link[i] = link; | ||
| 59 | if (link) | ||
| 60 | netdev_info(*eth->netdev, | ||
| 61 | "port %d link up\n", i); | ||
| 62 | else | ||
| 63 | netdev_info(*eth->netdev, | ||
| 64 | "port %d link down\n", i); | ||
| 65 | } | ||
| 66 | |||
| 67 | mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f); | ||
| 68 | |||
| 69 | return IRQ_HANDLED; | ||
| 70 | } | ||
| 71 | |||
| 72 | static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw, | ||
| 73 | struct device_node *np) | ||
| 74 | { | ||
| 75 | u32 i; | ||
| 76 | u32 val; | ||
| 77 | |||
| 78 | /* hardware reset the switch */ | ||
| 79 | mtk_reset(eth, RST_CTRL_MCM); | ||
| 80 | mdelay(10); | ||
| 81 | |||
| 82 | /* reduce RGMII2 PAD driving strength */ | ||
| 83 | rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO); | ||
| 84 | |||
| 85 | /* gpio mux - RGMII1=Normal mode */ | ||
| 86 | rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE); | ||
| 87 | |||
| 88 | /* set GMAC1 RGMII mode */ | ||
| 89 | rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1); | ||
| 90 | |||
| 91 | /* enable MDIO to control MT7530 */ | ||
| 92 | rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE); | ||
| 93 | |||
| 94 | /* turn off all PHYs */ | ||
| 95 | for (i = 0; i <= 4; i++) { | ||
| 96 | val = _mt7620_mii_read(gsw, i, 0x0); | ||
| 97 | val |= BIT(11); | ||
| 98 | _mt7620_mii_write(gsw, i, 0x0, val); | ||
| 99 | } | ||
| 100 | |||
| 101 | /* reset the switch */ | ||
| 102 | mt7530_mdio_w32(gsw, MT7530_SYS_CTRL, | ||
| 103 | SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); | ||
| 104 | usleep_range(10, 20); | ||
| 105 | |||
| 106 | if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) { | ||
| 107 | /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */ | ||
| 108 | mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR); | ||
| 109 | mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK); | ||
| 110 | } else { | ||
| 111 | /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */ | ||
| 112 | mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR); | ||
| 113 | mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC); | ||
| 114 | } | ||
| 115 | |||
| 116 | /* GE2, Link down */ | ||
| 117 | mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR); | ||
| 118 | |||
| 119 | /* Enable Port 6, P5 as GMAC5, P5 disable */ | ||
| 120 | val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP); | ||
| 121 | /* Enable Port 6 */ | ||
| 122 | val &= ~MHWTRAP_P6_DIS; | ||
| 123 | /* Disable Port 5 */ | ||
| 124 | val |= MHWTRAP_P5_DIS; | ||
| 125 | /* manual override of HW-Trap */ | ||
| 126 | val |= MHWTRAP_MANUAL; | ||
| 127 | mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val); | ||
| 128 | |||
| 129 | val = rt_sysc_r32(SYSC_REG_CFG); | ||
| 130 | val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK; | ||
| 131 | if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) { | ||
| 132 | /* 40Mhz */ | ||
| 133 | |||
| 134 | /* disable MT7530 core clock */ | ||
| 135 | _mt7620_mii_write(gsw, 0, 13, 0x1f); | ||
| 136 | _mt7620_mii_write(gsw, 0, 14, 0x410); | ||
| 137 | _mt7620_mii_write(gsw, 0, 13, 0x401f); | ||
| 138 | _mt7620_mii_write(gsw, 0, 14, 0x0); | ||
| 139 | |||
| 140 | /* disable MT7530 PLL */ | ||
| 141 | _mt7620_mii_write(gsw, 0, 13, 0x1f); | ||
| 142 | _mt7620_mii_write(gsw, 0, 14, 0x40d); | ||
| 143 | _mt7620_mii_write(gsw, 0, 13, 0x401f); | ||
| 144 | _mt7620_mii_write(gsw, 0, 14, 0x2020); | ||
| 145 | |||
| 146 | /* for MT7530 core clock = 500Mhz */ | ||
| 147 | _mt7620_mii_write(gsw, 0, 13, 0x1f); | ||
| 148 | _mt7620_mii_write(gsw, 0, 14, 0x40e); | ||
| 149 | _mt7620_mii_write(gsw, 0, 13, 0x401f); | ||
| 150 | _mt7620_mii_write(gsw, 0, 14, 0x119); | ||
| 151 | |||
| 152 | /* enable MT7530 PLL */ | ||
| 153 | _mt7620_mii_write(gsw, 0, 13, 0x1f); | ||
| 154 | _mt7620_mii_write(gsw, 0, 14, 0x40d); | ||
| 155 | _mt7620_mii_write(gsw, 0, 13, 0x401f); | ||
| 156 | _mt7620_mii_write(gsw, 0, 14, 0x2820); | ||
| 157 | |||
| 158 | usleep_range(20, 40); | ||
| 159 | |||
| 160 | /* enable MT7530 core clock */ | ||
| 161 | _mt7620_mii_write(gsw, 0, 13, 0x1f); | ||
| 162 | _mt7620_mii_write(gsw, 0, 14, 0x410); | ||
| 163 | _mt7620_mii_write(gsw, 0, 13, 0x401f); | ||
| 164 | } | ||
| 165 | |||
| 166 | /* RGMII */ | ||
| 167 | _mt7620_mii_write(gsw, 0, 14, 0x1); | ||
| 168 | |||
| 169 | /* set MT7530 central align */ | ||
| 170 | mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR); | ||
| 171 | mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0, | ||
| 172 | MT7530_TRGMII_TXCTRL); | ||
| 173 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855); | ||
| 174 | |||
| 175 | /* delay setting for 10/1000M */ | ||
| 176 | mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR, | ||
| 177 | P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2); | ||
| 178 | mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14); | ||
| 179 | |||
| 180 | /* lower Tx Driving*/ | ||
| 181 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44); | ||
| 182 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44); | ||
| 183 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44); | ||
| 184 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44); | ||
| 185 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44); | ||
| 186 | mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44); | ||
| 187 | |||
| 188 | /* turn on all PHYs */ | ||
| 189 | for (i = 0; i <= 4; i++) { | ||
| 190 | val = _mt7620_mii_read(gsw, i, 0); | ||
| 191 | val &= ~BIT(11); | ||
| 192 | _mt7620_mii_write(gsw, i, 0, val); | ||
| 193 | } | ||
| 194 | |||
| 195 | #define MT7530_NUM_PORTS 8 | ||
| 196 | #define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8)) | ||
| 197 | #define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8)) | ||
| 198 | #define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8)) | ||
| 199 | #define MT7530_CPU_PORT 6 | ||
| 200 | |||
| 201 | /* This is copied from mt7530_apply_config in libreCMC driver */ | ||
| 202 | { | ||
| 203 | int i; | ||
| 204 | |||
| 205 | for (i = 0; i < MT7530_NUM_PORTS; i++) | ||
| 206 | mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000); | ||
| 207 | |||
| 208 | mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT), | ||
| 209 | 0x00ff0000); | ||
| 210 | |||
| 211 | for (i = 0; i < MT7530_NUM_PORTS; i++) | ||
| 212 | mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0); | ||
| 213 | } | ||
| 214 | |||
| 215 | /* enable irq */ | ||
| 216 | mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL); | ||
| 217 | mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f); | ||
| 218 | } | ||
| 219 | |||
| 220 | static const struct of_device_id mediatek_gsw_match[] = { | ||
| 221 | { .compatible = "mediatek,mt7621-gsw" }, | ||
| 222 | {}, | ||
| 223 | }; | ||
| 224 | MODULE_DEVICE_TABLE(of, mediatek_gsw_match); | ||
| 225 | |||
| 226 | int mtk_gsw_init(struct mtk_eth *eth) | ||
| 227 | { | ||
| 228 | struct device_node *np = eth->switch_np; | ||
| 229 | struct platform_device *pdev = of_find_device_by_node(np); | ||
| 230 | struct mt7620_gsw *gsw; | ||
| 231 | |||
| 232 | if (!pdev) | ||
| 233 | return -ENODEV; | ||
| 234 | |||
| 235 | if (!of_device_is_compatible(np, mediatek_gsw_match->compatible)) | ||
| 236 | return -EINVAL; | ||
| 237 | |||
| 238 | gsw = platform_get_drvdata(pdev); | ||
| 239 | eth->sw_priv = gsw; | ||
| 240 | |||
| 241 | if (!gsw->irq) | ||
| 242 | return -EINVAL; | ||
| 243 | |||
| 244 | request_irq(gsw->irq, gsw_interrupt_mt7621, 0, | ||
| 245 | "gsw", eth); | ||
| 246 | disable_irq(gsw->irq); | ||
| 247 | |||
| 248 | mt7621_hw_init(eth, gsw, np); | ||
| 249 | |||
| 250 | enable_irq(gsw->irq); | ||
| 251 | |||
| 252 | return 0; | ||
| 253 | } | ||
| 254 | EXPORT_SYMBOL_GPL(mtk_gsw_init); | ||
| 255 | |||
| 256 | static int mt7621_gsw_probe(struct platform_device *pdev) | ||
| 257 | { | ||
| 258 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 259 | struct mt7620_gsw *gsw; | ||
| 260 | |||
| 261 | gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL); | ||
| 262 | if (!gsw) | ||
| 263 | return -ENOMEM; | ||
| 264 | |||
| 265 | gsw->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 266 | if (IS_ERR(gsw->base)) | ||
| 267 | return PTR_ERR(gsw->base); | ||
| 268 | |||
| 269 | gsw->dev = &pdev->dev; | ||
| 270 | gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | ||
| 271 | |||
| 272 | platform_set_drvdata(pdev, gsw); | ||
| 273 | |||
| 274 | return 0; | ||
| 275 | } | ||
| 276 | |||
| 277 | static int mt7621_gsw_remove(struct platform_device *pdev) | ||
| 278 | { | ||
| 279 | platform_set_drvdata(pdev, NULL); | ||
| 280 | |||
| 281 | return 0; | ||
| 282 | } | ||
| 283 | |||
| 284 | static struct platform_driver gsw_driver = { | ||
| 285 | .probe = mt7621_gsw_probe, | ||
| 286 | .remove = mt7621_gsw_remove, | ||
| 287 | .driver = { | ||
| 288 | .name = "mt7621-gsw", | ||
| 289 | .of_match_table = mediatek_gsw_match, | ||
| 290 | }, | ||
| 291 | }; | ||
| 292 | |||
| 293 | module_platform_driver(gsw_driver); | ||
| 294 | |||
| 295 | MODULE_LICENSE("GPL"); | ||
| 296 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
| 297 | MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC"); | ||
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c deleted file mode 100644 index 5fea6a447eed..000000000000 --- a/drivers/staging/mt7621-eth/mdio.c +++ /dev/null | |||
| @@ -1,275 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 6 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 7 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/phy.h> | ||
| 13 | #include <linux/of_net.h> | ||
| 14 | #include <linux/of_mdio.h> | ||
| 15 | |||
| 16 | #include "mtk_eth_soc.h" | ||
| 17 | #include "mdio.h" | ||
| 18 | |||
| 19 | static int mtk_mdio_reset(struct mii_bus *bus) | ||
| 20 | { | ||
| 21 | /* TODO */ | ||
| 22 | return 0; | ||
| 23 | } | ||
| 24 | |||
| 25 | static void mtk_phy_link_adjust(struct net_device *dev) | ||
| 26 | { | ||
| 27 | struct mtk_eth *eth = netdev_priv(dev); | ||
| 28 | unsigned long flags; | ||
| 29 | int i; | ||
| 30 | |||
| 31 | spin_lock_irqsave(ð->phy->lock, flags); | ||
| 32 | for (i = 0; i < 8; i++) { | ||
| 33 | if (eth->phy->phy_node[i]) { | ||
| 34 | struct phy_device *phydev = eth->phy->phy[i]; | ||
| 35 | int status_change = 0; | ||
| 36 | |||
| 37 | if (phydev->link) | ||
| 38 | if (eth->phy->duplex[i] != phydev->duplex || | ||
| 39 | eth->phy->speed[i] != phydev->speed) | ||
| 40 | status_change = 1; | ||
| 41 | |||
| 42 | if (phydev->link != eth->link[i]) | ||
| 43 | status_change = 1; | ||
| 44 | |||
| 45 | switch (phydev->speed) { | ||
| 46 | case SPEED_1000: | ||
| 47 | case SPEED_100: | ||
| 48 | case SPEED_10: | ||
| 49 | eth->link[i] = phydev->link; | ||
| 50 | eth->phy->duplex[i] = phydev->duplex; | ||
| 51 | eth->phy->speed[i] = phydev->speed; | ||
| 52 | |||
| 53 | if (status_change && | ||
| 54 | eth->soc->mdio_adjust_link) | ||
| 55 | eth->soc->mdio_adjust_link(eth, i); | ||
| 56 | break; | ||
| 57 | } | ||
| 58 | } | ||
| 59 | } | ||
| 60 | spin_unlock_irqrestore(ð->phy->lock, flags); | ||
| 61 | } | ||
| 62 | |||
| 63 | int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac, | ||
| 64 | struct device_node *phy_node) | ||
| 65 | { | ||
| 66 | const __be32 *_port = NULL; | ||
| 67 | struct phy_device *phydev; | ||
| 68 | int phy_mode, port; | ||
| 69 | |||
| 70 | _port = of_get_property(phy_node, "reg", NULL); | ||
| 71 | |||
| 72 | if (!_port || (be32_to_cpu(*_port) >= 0x20)) { | ||
| 73 | pr_err("%pOFn: invalid port id\n", phy_node); | ||
| 74 | return -EINVAL; | ||
| 75 | } | ||
| 76 | port = be32_to_cpu(*_port); | ||
| 77 | phy_mode = of_get_phy_mode(phy_node); | ||
| 78 | if (phy_mode < 0) { | ||
| 79 | dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); | ||
| 80 | eth->phy->phy_node[port] = NULL; | ||
| 81 | return -EINVAL; | ||
| 82 | } | ||
| 83 | |||
| 84 | phydev = of_phy_connect(eth->netdev[mac->id], phy_node, | ||
| 85 | mtk_phy_link_adjust, 0, phy_mode); | ||
| 86 | if (!phydev) { | ||
| 87 | dev_err(eth->dev, "could not connect to PHY\n"); | ||
| 88 | eth->phy->phy_node[port] = NULL; | ||
| 89 | return -ENODEV; | ||
| 90 | } | ||
| 91 | |||
| 92 | phydev->supported &= PHY_1000BT_FEATURES; | ||
| 93 | phydev->advertising = phydev->supported; | ||
| 94 | |||
| 95 | dev_info(eth->dev, | ||
| 96 | "connected port %d to PHY at %s [uid=%08x, driver=%s]\n", | ||
| 97 | port, phydev_name(phydev), phydev->phy_id, | ||
| 98 | phydev->drv->name); | ||
| 99 | |||
| 100 | eth->phy->phy[port] = phydev; | ||
| 101 | eth->link[port] = 0; | ||
| 102 | |||
| 103 | return 0; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac, | ||
| 107 | struct phy_device *phy) | ||
| 108 | { | ||
| 109 | phy_attach(eth->netdev[mac->id], phydev_name(phy), | ||
| 110 | PHY_INTERFACE_MODE_MII); | ||
| 111 | |||
| 112 | phy->autoneg = AUTONEG_ENABLE; | ||
| 113 | phy->speed = 0; | ||
| 114 | phy->duplex = 0; | ||
| 115 | phy_set_max_speed(phy, SPEED_100); | ||
| 116 | phy->advertising = phy->supported | ADVERTISED_Autoneg; | ||
| 117 | |||
| 118 | phy_start_aneg(phy); | ||
| 119 | } | ||
| 120 | |||
| 121 | static int mtk_phy_connect(struct mtk_mac *mac) | ||
| 122 | { | ||
| 123 | struct mtk_eth *eth = mac->hw; | ||
| 124 | int i; | ||
| 125 | |||
| 126 | for (i = 0; i < 8; i++) { | ||
| 127 | if (eth->phy->phy_node[i]) { | ||
| 128 | if (!mac->phy_dev) { | ||
| 129 | mac->phy_dev = eth->phy->phy[i]; | ||
| 130 | mac->phy_flags = MTK_PHY_FLAG_PORT; | ||
| 131 | } | ||
| 132 | } else if (eth->mii_bus) { | ||
| 133 | struct phy_device *phy; | ||
| 134 | |||
| 135 | phy = mdiobus_get_phy(eth->mii_bus, i); | ||
| 136 | if (phy) { | ||
| 137 | phy_init(eth, mac, phy); | ||
| 138 | if (!mac->phy_dev) { | ||
| 139 | mac->phy_dev = phy; | ||
| 140 | mac->phy_flags = MTK_PHY_FLAG_ATTACH; | ||
| 141 | } | ||
| 142 | } | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | static void mtk_phy_disconnect(struct mtk_mac *mac) | ||
| 150 | { | ||
| 151 | struct mtk_eth *eth = mac->hw; | ||
| 152 | unsigned long flags; | ||
| 153 | int i; | ||
| 154 | |||
| 155 | for (i = 0; i < 8; i++) | ||
| 156 | if (eth->phy->phy_fixed[i]) { | ||
| 157 | spin_lock_irqsave(ð->phy->lock, flags); | ||
| 158 | eth->link[i] = 0; | ||
| 159 | if (eth->soc->mdio_adjust_link) | ||
| 160 | eth->soc->mdio_adjust_link(eth, i); | ||
| 161 | spin_unlock_irqrestore(ð->phy->lock, flags); | ||
| 162 | } else if (eth->phy->phy[i]) { | ||
| 163 | phy_disconnect(eth->phy->phy[i]); | ||
| 164 | } else if (eth->mii_bus) { | ||
| 165 | struct phy_device *phy = | ||
| 166 | mdiobus_get_phy(eth->mii_bus, i); | ||
| 167 | |||
| 168 | if (phy) | ||
| 169 | phy_detach(phy); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | |||
| 173 | static void mtk_phy_start(struct mtk_mac *mac) | ||
| 174 | { | ||
| 175 | struct mtk_eth *eth = mac->hw; | ||
| 176 | unsigned long flags; | ||
| 177 | int i; | ||
| 178 | |||
| 179 | for (i = 0; i < 8; i++) { | ||
| 180 | if (eth->phy->phy_fixed[i]) { | ||
| 181 | spin_lock_irqsave(ð->phy->lock, flags); | ||
| 182 | eth->link[i] = 1; | ||
| 183 | if (eth->soc->mdio_adjust_link) | ||
| 184 | eth->soc->mdio_adjust_link(eth, i); | ||
| 185 | spin_unlock_irqrestore(ð->phy->lock, flags); | ||
| 186 | } else if (eth->phy->phy[i]) { | ||
| 187 | phy_start(eth->phy->phy[i]); | ||
| 188 | } | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | static void mtk_phy_stop(struct mtk_mac *mac) | ||
| 193 | { | ||
| 194 | struct mtk_eth *eth = mac->hw; | ||
| 195 | unsigned long flags; | ||
| 196 | int i; | ||
| 197 | |||
| 198 | for (i = 0; i < 8; i++) | ||
| 199 | if (eth->phy->phy_fixed[i]) { | ||
| 200 | spin_lock_irqsave(ð->phy->lock, flags); | ||
| 201 | eth->link[i] = 0; | ||
| 202 | if (eth->soc->mdio_adjust_link) | ||
| 203 | eth->soc->mdio_adjust_link(eth, i); | ||
| 204 | spin_unlock_irqrestore(ð->phy->lock, flags); | ||
| 205 | } else if (eth->phy->phy[i]) { | ||
| 206 | phy_stop(eth->phy->phy[i]); | ||
| 207 | } | ||
| 208 | } | ||
| 209 | |||
| 210 | static struct mtk_phy phy_ralink = { | ||
| 211 | .connect = mtk_phy_connect, | ||
| 212 | .disconnect = mtk_phy_disconnect, | ||
| 213 | .start = mtk_phy_start, | ||
| 214 | .stop = mtk_phy_stop, | ||
| 215 | }; | ||
| 216 | |||
| 217 | int mtk_mdio_init(struct mtk_eth *eth) | ||
| 218 | { | ||
| 219 | struct device_node *mii_np; | ||
| 220 | int err; | ||
| 221 | |||
| 222 | if (!eth->soc->mdio_read || !eth->soc->mdio_write) | ||
| 223 | return 0; | ||
| 224 | |||
| 225 | spin_lock_init(&phy_ralink.lock); | ||
| 226 | eth->phy = &phy_ralink; | ||
| 227 | |||
| 228 | mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); | ||
| 229 | if (!mii_np) { | ||
| 230 | dev_err(eth->dev, "no %s child node found", "mdio-bus"); | ||
| 231 | return -ENODEV; | ||
| 232 | } | ||
| 233 | |||
| 234 | if (!of_device_is_available(mii_np)) { | ||
| 235 | err = 0; | ||
| 236 | goto err_put_node; | ||
| 237 | } | ||
| 238 | |||
| 239 | eth->mii_bus = mdiobus_alloc(); | ||
| 240 | if (!eth->mii_bus) { | ||
| 241 | err = -ENOMEM; | ||
| 242 | goto err_put_node; | ||
| 243 | } | ||
| 244 | |||
| 245 | eth->mii_bus->name = "mdio"; | ||
| 246 | eth->mii_bus->read = eth->soc->mdio_read; | ||
| 247 | eth->mii_bus->write = eth->soc->mdio_write; | ||
| 248 | eth->mii_bus->reset = mtk_mdio_reset; | ||
| 249 | eth->mii_bus->priv = eth; | ||
| 250 | eth->mii_bus->parent = eth->dev; | ||
| 251 | |||
| 252 | snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); | ||
| 253 | err = of_mdiobus_register(eth->mii_bus, mii_np); | ||
| 254 | if (err) | ||
| 255 | goto err_free_bus; | ||
| 256 | |||
| 257 | return 0; | ||
| 258 | |||
| 259 | err_free_bus: | ||
| 260 | kfree(eth->mii_bus); | ||
| 261 | err_put_node: | ||
| 262 | of_node_put(mii_np); | ||
| 263 | eth->mii_bus = NULL; | ||
| 264 | return err; | ||
| 265 | } | ||
| 266 | |||
| 267 | void mtk_mdio_cleanup(struct mtk_eth *eth) | ||
| 268 | { | ||
| 269 | if (!eth->mii_bus) | ||
| 270 | return; | ||
| 271 | |||
| 272 | mdiobus_unregister(eth->mii_bus); | ||
| 273 | of_node_put(eth->mii_bus->dev.of_node); | ||
| 274 | kfree(eth->mii_bus); | ||
| 275 | } | ||
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h deleted file mode 100644 index b14e23842a01..000000000000 --- a/drivers/staging/mt7621-eth/mdio.h +++ /dev/null | |||
| @@ -1,27 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _RALINK_MDIO_H__ | ||
| 16 | #define _RALINK_MDIO_H__ | ||
| 17 | |||
| 18 | #ifdef CONFIG_NET_MEDIATEK_MDIO | ||
| 19 | int mtk_mdio_init(struct mtk_eth *eth); | ||
| 20 | void mtk_mdio_cleanup(struct mtk_eth *eth); | ||
| 21 | int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac, | ||
| 22 | struct device_node *phy_node); | ||
| 23 | #else | ||
| 24 | static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; } | ||
| 25 | static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {} | ||
| 26 | #endif | ||
| 27 | #endif | ||
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c deleted file mode 100644 index ced605c2914e..000000000000 --- a/drivers/staging/mt7621-eth/mdio_mt7620.c +++ /dev/null | |||
| @@ -1,173 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | |||
| 19 | #include "mtk_eth_soc.h" | ||
| 20 | #include "gsw_mt7620.h" | ||
| 21 | #include "mdio.h" | ||
| 22 | |||
| 23 | static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw) | ||
| 24 | { | ||
| 25 | unsigned long t_start = jiffies; | ||
| 26 | |||
| 27 | while (1) { | ||
| 28 | if (!(mtk_switch_r32(gsw, | ||
| 29 | gsw->piac_offset + MT7620_GSW_REG_PIAC) & | ||
| 30 | GSW_MDIO_ACCESS)) | ||
| 31 | return 0; | ||
| 32 | if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT)) | ||
| 33 | break; | ||
| 34 | } | ||
| 35 | |||
| 36 | dev_err(gsw->dev, "mdio: MDIO timeout\n"); | ||
| 37 | return -1; | ||
| 38 | } | ||
| 39 | |||
| 40 | u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, | ||
| 41 | u32 phy_register, u32 write_data) | ||
| 42 | { | ||
| 43 | if (mt7620_mii_busy_wait(gsw)) | ||
| 44 | return -1; | ||
| 45 | |||
| 46 | write_data &= 0xffff; | ||
| 47 | |||
| 48 | mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE | | ||
| 49 | (phy_register << GSW_MDIO_REG_SHIFT) | | ||
| 50 | (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data, | ||
| 51 | MT7620_GSW_REG_PIAC); | ||
| 52 | |||
| 53 | if (mt7620_mii_busy_wait(gsw)) | ||
| 54 | return -1; | ||
| 55 | |||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | EXPORT_SYMBOL_GPL(_mt7620_mii_write); | ||
| 59 | |||
| 60 | u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg) | ||
| 61 | { | ||
| 62 | u32 d; | ||
| 63 | |||
| 64 | if (mt7620_mii_busy_wait(gsw)) | ||
| 65 | return 0xffff; | ||
| 66 | |||
| 67 | mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ | | ||
| 68 | (phy_reg << GSW_MDIO_REG_SHIFT) | | ||
| 69 | (phy_addr << GSW_MDIO_ADDR_SHIFT), | ||
| 70 | MT7620_GSW_REG_PIAC); | ||
| 71 | |||
| 72 | if (mt7620_mii_busy_wait(gsw)) | ||
| 73 | return 0xffff; | ||
| 74 | |||
| 75 | d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff; | ||
| 76 | |||
| 77 | return d; | ||
| 78 | } | ||
| 79 | EXPORT_SYMBOL_GPL(_mt7620_mii_read); | ||
| 80 | |||
| 81 | int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) | ||
| 82 | { | ||
| 83 | struct mtk_eth *eth = bus->priv; | ||
| 84 | struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; | ||
| 85 | |||
| 86 | return _mt7620_mii_write(gsw, phy_addr, phy_reg, val); | ||
| 87 | } | ||
| 88 | |||
| 89 | int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | ||
| 90 | { | ||
| 91 | struct mtk_eth *eth = bus->priv; | ||
| 92 | struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; | ||
| 93 | |||
| 94 | return _mt7620_mii_read(gsw, phy_addr, phy_reg); | ||
| 95 | } | ||
| 96 | |||
| 97 | void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val) | ||
| 98 | { | ||
| 99 | _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff); | ||
| 100 | _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff); | ||
| 101 | _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16); | ||
| 102 | } | ||
| 103 | EXPORT_SYMBOL_GPL(mt7530_mdio_w32); | ||
| 104 | |||
| 105 | u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg) | ||
| 106 | { | ||
| 107 | u16 high, low; | ||
| 108 | |||
| 109 | _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff); | ||
| 110 | low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf); | ||
| 111 | high = _mt7620_mii_read(gsw, 0x1f, 0x10); | ||
| 112 | |||
| 113 | return (high << 16) | (low & 0xffff); | ||
| 114 | } | ||
| 115 | EXPORT_SYMBOL_GPL(mt7530_mdio_r32); | ||
| 116 | |||
| 117 | void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg) | ||
| 118 | { | ||
| 119 | u32 val = mt7530_mdio_r32(gsw, reg); | ||
| 120 | |||
| 121 | val &= ~mask; | ||
| 122 | val |= set; | ||
| 123 | mt7530_mdio_w32(gsw, reg, val); | ||
| 124 | } | ||
| 125 | EXPORT_SYMBOL_GPL(mt7530_mdio_m32); | ||
| 126 | |||
| 127 | static unsigned char *mtk_speed_str(int speed) | ||
| 128 | { | ||
| 129 | switch (speed) { | ||
| 130 | case 2: | ||
| 131 | case SPEED_1000: | ||
| 132 | return "1000"; | ||
| 133 | case 1: | ||
| 134 | case SPEED_100: | ||
| 135 | return "100"; | ||
| 136 | case 0: | ||
| 137 | case SPEED_10: | ||
| 138 | return "10"; | ||
| 139 | } | ||
| 140 | |||
| 141 | return "? "; | ||
| 142 | } | ||
| 143 | |||
| 144 | int mt7620_has_carrier(struct mtk_eth *eth) | ||
| 145 | { | ||
| 146 | struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; | ||
| 147 | int i; | ||
| 148 | |||
| 149 | for (i = 0; i < GSW_PORT6; i++) | ||
| 150 | if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1) | ||
| 151 | return 1; | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | |||
| 155 | void mt7620_print_link_state(struct mtk_eth *eth, int port, int link, | ||
| 156 | int speed, int duplex) | ||
| 157 | { | ||
| 158 | struct mt7620_gsw *gsw = eth->sw_priv; | ||
| 159 | |||
| 160 | if (link) | ||
| 161 | dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n", | ||
| 162 | port, mtk_speed_str(speed), | ||
| 163 | (duplex) ? "Full" : "Half"); | ||
| 164 | else | ||
| 165 | dev_info(gsw->dev, "port %d link down\n", port); | ||
| 166 | } | ||
| 167 | |||
| 168 | void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port) | ||
| 169 | { | ||
| 170 | mt7620_print_link_state(eth, port, eth->link[port], | ||
| 171 | eth->phy->speed[port], | ||
| 172 | (eth->phy->duplex[port] == DUPLEX_FULL)); | ||
| 173 | } | ||
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c deleted file mode 100644 index 6027b19f7bc2..000000000000 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.c +++ /dev/null | |||
| @@ -1,2176 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | #include <linux/dma-mapping.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/skbuff.h> | ||
| 21 | #include <linux/etherdevice.h> | ||
| 22 | #include <linux/ethtool.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/of_device.h> | ||
| 25 | #include <linux/mfd/syscon.h> | ||
| 26 | #include <linux/clk.h> | ||
| 27 | #include <linux/of_net.h> | ||
| 28 | #include <linux/of_mdio.h> | ||
| 29 | #include <linux/if_vlan.h> | ||
| 30 | #include <linux/reset.h> | ||
| 31 | #include <linux/tcp.h> | ||
| 32 | #include <linux/io.h> | ||
| 33 | #include <linux/bug.h> | ||
| 34 | #include <linux/regmap.h> | ||
| 35 | |||
| 36 | #include "mtk_eth_soc.h" | ||
| 37 | #include "mdio.h" | ||
| 38 | #include "ethtool.h" | ||
| 39 | |||
| 40 | #define MAX_RX_LENGTH 1536 | ||
| 41 | #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) | ||
| 42 | #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) | ||
| 43 | #define DMA_DUMMY_DESC 0xffffffff | ||
| 44 | #define MTK_DEFAULT_MSG_ENABLE \ | ||
| 45 | (NETIF_MSG_DRV | \ | ||
| 46 | NETIF_MSG_PROBE | \ | ||
| 47 | NETIF_MSG_LINK | \ | ||
| 48 | NETIF_MSG_TIMER | \ | ||
| 49 | NETIF_MSG_IFDOWN | \ | ||
| 50 | NETIF_MSG_IFUP | \ | ||
| 51 | NETIF_MSG_RX_ERR | \ | ||
| 52 | NETIF_MSG_TX_ERR) | ||
| 53 | |||
| 54 | #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) | ||
| 55 | #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1)) | ||
| 56 | #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1)) | ||
| 57 | |||
| 58 | #define SYSC_REG_RSTCTRL 0x34 | ||
| 59 | |||
| 60 | static int mtk_msg_level = -1; | ||
| 61 | module_param_named(msg_level, mtk_msg_level, int, 0); | ||
| 62 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); | ||
| 63 | |||
| 64 | static const u16 mtk_reg_table_default[MTK_REG_COUNT] = { | ||
| 65 | [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG, | ||
| 66 | [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG, | ||
| 67 | [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG, | ||
| 68 | [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0, | ||
| 69 | [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0, | ||
| 70 | [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0, | ||
| 71 | [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0, | ||
| 72 | [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0, | ||
| 73 | [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0, | ||
| 74 | [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0, | ||
| 75 | [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0, | ||
| 76 | [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE, | ||
| 77 | [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS, | ||
| 78 | [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0, | ||
| 79 | [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT, | ||
| 80 | [MTK_REG_MTK_RST_GL] = MTK_RST_GL, | ||
| 81 | }; | ||
| 82 | |||
| 83 | static const u16 *mtk_reg_table = mtk_reg_table_default; | ||
| 84 | |||
| 85 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg) | ||
| 86 | { | ||
| 87 | __raw_writel(val, eth->base + reg); | ||
| 88 | } | ||
| 89 | |||
| 90 | u32 mtk_r32(struct mtk_eth *eth, unsigned int reg) | ||
| 91 | { | ||
| 92 | return __raw_readl(eth->base + reg); | ||
| 93 | } | ||
| 94 | |||
| 95 | static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg) | ||
| 96 | { | ||
| 97 | mtk_w32(eth, val, mtk_reg_table[reg]); | ||
| 98 | } | ||
| 99 | |||
| 100 | static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg) | ||
| 101 | { | ||
| 102 | return mtk_r32(eth, mtk_reg_table[reg]); | ||
| 103 | } | ||
| 104 | |||
| 105 | /* these bits are also exposed via the reset-controller API. however the switch | ||
| 106 | * and FE need to be brought out of reset in the exakt same moemtn and the | ||
| 107 | * reset-controller api does not provide this feature yet. Do the reset manually | ||
| 108 | * until we fixed the reset-controller api to be able to do this | ||
| 109 | */ | ||
| 110 | void mtk_reset(struct mtk_eth *eth, u32 reset_bits) | ||
| 111 | { | ||
| 112 | u32 val; | ||
| 113 | |||
| 114 | regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val); | ||
| 115 | val |= reset_bits; | ||
| 116 | regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val); | ||
| 117 | usleep_range(10, 20); | ||
| 118 | val &= ~reset_bits; | ||
| 119 | regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val); | ||
| 120 | usleep_range(10, 20); | ||
| 121 | } | ||
| 122 | EXPORT_SYMBOL(mtk_reset); | ||
| 123 | |||
| 124 | static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask) | ||
| 125 | { | ||
| 126 | if (eth->soc->dma_type & MTK_PDMA) | ||
| 127 | mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS); | ||
| 128 | if (eth->soc->dma_type & MTK_QDMA) | ||
| 129 | mtk_w32(eth, mask, MTK_QMTK_INT_STATUS); | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline u32 mtk_irq_pending(struct mtk_eth *eth) | ||
| 133 | { | ||
| 134 | u32 status = 0; | ||
| 135 | |||
| 136 | if (eth->soc->dma_type & MTK_PDMA) | ||
| 137 | status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS); | ||
| 138 | if (eth->soc->dma_type & MTK_QDMA) | ||
| 139 | status |= mtk_r32(eth, MTK_QMTK_INT_STATUS); | ||
| 140 | |||
| 141 | return status; | ||
| 142 | } | ||
| 143 | |||
| 144 | static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask) | ||
| 145 | { | ||
| 146 | u32 status_reg = MTK_REG_MTK_INT_STATUS; | ||
| 147 | |||
| 148 | if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2]) | ||
| 149 | status_reg = MTK_REG_MTK_INT_STATUS2; | ||
| 150 | |||
| 151 | mtk_reg_w32(eth, mask, status_reg); | ||
| 152 | } | ||
| 153 | |||
| 154 | static u32 mtk_irq_pending_status(struct mtk_eth *eth) | ||
| 155 | { | ||
| 156 | u32 status_reg = MTK_REG_MTK_INT_STATUS; | ||
| 157 | |||
| 158 | if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2]) | ||
| 159 | status_reg = MTK_REG_MTK_INT_STATUS2; | ||
| 160 | |||
| 161 | return mtk_reg_r32(eth, status_reg); | ||
| 162 | } | ||
| 163 | |||
| 164 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | ||
| 165 | { | ||
| 166 | u32 val; | ||
| 167 | |||
| 168 | if (eth->soc->dma_type & MTK_PDMA) { | ||
| 169 | val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | ||
| 170 | mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE); | ||
| 171 | /* flush write */ | ||
| 172 | mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | ||
| 173 | } | ||
| 174 | if (eth->soc->dma_type & MTK_QDMA) { | ||
| 175 | val = mtk_r32(eth, MTK_QMTK_INT_ENABLE); | ||
| 176 | mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE); | ||
| 177 | /* flush write */ | ||
| 178 | mtk_r32(eth, MTK_QMTK_INT_ENABLE); | ||
| 179 | } | ||
| 180 | } | ||
| 181 | |||
| 182 | static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) | ||
| 183 | { | ||
| 184 | u32 val; | ||
| 185 | |||
| 186 | if (eth->soc->dma_type & MTK_PDMA) { | ||
| 187 | val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | ||
| 188 | mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE); | ||
| 189 | /* flush write */ | ||
| 190 | mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | ||
| 191 | } | ||
| 192 | if (eth->soc->dma_type & MTK_QDMA) { | ||
| 193 | val = mtk_r32(eth, MTK_QMTK_INT_ENABLE); | ||
| 194 | mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE); | ||
| 195 | /* flush write */ | ||
| 196 | mtk_r32(eth, MTK_QMTK_INT_ENABLE); | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline u32 mtk_irq_enabled(struct mtk_eth *eth) | ||
| 201 | { | ||
| 202 | u32 enabled = 0; | ||
| 203 | |||
| 204 | if (eth->soc->dma_type & MTK_PDMA) | ||
| 205 | enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | ||
| 206 | if (eth->soc->dma_type & MTK_QDMA) | ||
| 207 | enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE); | ||
| 208 | |||
| 209 | return enabled; | ||
| 210 | } | ||
| 211 | |||
| 212 | static inline void mtk_hw_set_macaddr(struct mtk_mac *mac, | ||
| 213 | unsigned char *macaddr) | ||
| 214 | { | ||
| 215 | unsigned long flags; | ||
| 216 | |||
| 217 | spin_lock_irqsave(&mac->hw->page_lock, flags); | ||
| 218 | mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH); | ||
| 219 | mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | | ||
| 220 | (macaddr[4] << 8) | macaddr[5], | ||
| 221 | MTK_GDMA1_MAC_ADRL); | ||
| 222 | spin_unlock_irqrestore(&mac->hw->page_lock, flags); | ||
| 223 | } | ||
| 224 | |||
| 225 | static int mtk_set_mac_address(struct net_device *dev, void *p) | ||
| 226 | { | ||
| 227 | int ret = eth_mac_addr(dev, p); | ||
| 228 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 229 | struct mtk_eth *eth = mac->hw; | ||
| 230 | |||
| 231 | if (ret) | ||
| 232 | return ret; | ||
| 233 | |||
| 234 | if (eth->soc->set_mac) | ||
| 235 | eth->soc->set_mac(mac, dev->dev_addr); | ||
| 236 | else | ||
| 237 | mtk_hw_set_macaddr(mac, p); | ||
| 238 | |||
| 239 | return 0; | ||
| 240 | } | ||
| 241 | |||
| 242 | static inline int mtk_max_frag_size(int mtu) | ||
| 243 | { | ||
| 244 | /* make sure buf_size will be at least MAX_RX_LENGTH */ | ||
| 245 | if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH) | ||
| 246 | mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN; | ||
| 247 | |||
| 248 | return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + | ||
| 249 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 250 | } | ||
| 251 | |||
| 252 | static inline int mtk_max_buf_size(int frag_size) | ||
| 253 | { | ||
| 254 | int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - | ||
| 255 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 256 | |||
| 257 | WARN_ON(buf_size < MAX_RX_LENGTH); | ||
| 258 | |||
| 259 | return buf_size; | ||
| 260 | } | ||
| 261 | |||
| 262 | static inline void mtk_get_rxd(struct mtk_rx_dma *rxd, | ||
| 263 | struct mtk_rx_dma *dma_rxd) | ||
| 264 | { | ||
| 265 | rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); | ||
| 266 | rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); | ||
| 267 | rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); | ||
| 268 | rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); | ||
| 269 | } | ||
| 270 | |||
| 271 | static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd, | ||
| 272 | struct mtk_tx_dma *dma_txd) | ||
| 273 | { | ||
| 274 | WRITE_ONCE(dma_txd->txd1, txd->txd1); | ||
| 275 | WRITE_ONCE(dma_txd->txd3, txd->txd3); | ||
| 276 | WRITE_ONCE(dma_txd->txd4, txd->txd4); | ||
| 277 | /* clean dma done flag last */ | ||
| 278 | WRITE_ONCE(dma_txd->txd2, txd->txd2); | ||
| 279 | } | ||
| 280 | |||
| 281 | static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring) | ||
| 282 | { | ||
| 283 | int i; | ||
| 284 | |||
| 285 | if (ring->rx_data && ring->rx_dma) { | ||
| 286 | for (i = 0; i < ring->rx_ring_size; i++) { | ||
| 287 | if (!ring->rx_data[i]) | ||
| 288 | continue; | ||
| 289 | if (!ring->rx_dma[i].rxd1) | ||
| 290 | continue; | ||
| 291 | dma_unmap_single(eth->dev, | ||
| 292 | ring->rx_dma[i].rxd1, | ||
| 293 | ring->rx_buf_size, | ||
| 294 | DMA_FROM_DEVICE); | ||
| 295 | skb_free_frag(ring->rx_data[i]); | ||
| 296 | } | ||
| 297 | kfree(ring->rx_data); | ||
| 298 | ring->rx_data = NULL; | ||
| 299 | } | ||
| 300 | |||
| 301 | if (ring->rx_dma) { | ||
| 302 | dma_free_coherent(eth->dev, | ||
| 303 | ring->rx_ring_size * sizeof(*ring->rx_dma), | ||
| 304 | ring->rx_dma, | ||
| 305 | ring->rx_phys); | ||
| 306 | ring->rx_dma = NULL; | ||
| 307 | } | ||
| 308 | } | ||
| 309 | |||
| 310 | static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring) | ||
| 311 | { | ||
| 312 | int i, pad = 0; | ||
| 313 | |||
| 314 | ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); | ||
| 315 | ring->rx_buf_size = mtk_max_buf_size(ring->frag_size); | ||
| 316 | ring->rx_ring_size = eth->soc->dma_ring_size; | ||
| 317 | ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data), | ||
| 318 | GFP_KERNEL); | ||
| 319 | if (!ring->rx_data) | ||
| 320 | goto no_rx_mem; | ||
| 321 | |||
| 322 | for (i = 0; i < ring->rx_ring_size; i++) { | ||
| 323 | ring->rx_data[i] = netdev_alloc_frag(ring->frag_size); | ||
| 324 | if (!ring->rx_data[i]) | ||
| 325 | goto no_rx_mem; | ||
| 326 | } | ||
| 327 | |||
| 328 | ring->rx_dma = | ||
| 329 | dma_alloc_coherent(eth->dev, | ||
| 330 | ring->rx_ring_size * sizeof(*ring->rx_dma), | ||
| 331 | &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO); | ||
| 332 | if (!ring->rx_dma) | ||
| 333 | goto no_rx_mem; | ||
| 334 | |||
| 335 | if (!eth->soc->rx_2b_offset) | ||
| 336 | pad = NET_IP_ALIGN; | ||
| 337 | |||
| 338 | for (i = 0; i < ring->rx_ring_size; i++) { | ||
| 339 | dma_addr_t dma_addr = dma_map_single(eth->dev, | ||
| 340 | ring->rx_data[i] + NET_SKB_PAD + pad, | ||
| 341 | ring->rx_buf_size, | ||
| 342 | DMA_FROM_DEVICE); | ||
| 343 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | ||
| 344 | goto no_rx_mem; | ||
| 345 | ring->rx_dma[i].rxd1 = (unsigned int)dma_addr; | ||
| 346 | |||
| 347 | if (eth->soc->rx_sg_dma) | ||
| 348 | ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); | ||
| 349 | else | ||
| 350 | ring->rx_dma[i].rxd2 = RX_DMA_LSO; | ||
| 351 | } | ||
| 352 | ring->rx_calc_idx = ring->rx_ring_size - 1; | ||
| 353 | /* make sure that all changes to the dma ring are flushed before we | ||
| 354 | * continue | ||
| 355 | */ | ||
| 356 | wmb(); | ||
| 357 | |||
| 358 | return 0; | ||
| 359 | |||
| 360 | no_rx_mem: | ||
| 361 | return -ENOMEM; | ||
| 362 | } | ||
| 363 | |||
| 364 | static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | ||
| 365 | { | ||
| 366 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | ||
| 367 | dma_unmap_single(dev, | ||
| 368 | dma_unmap_addr(tx_buf, dma_addr0), | ||
| 369 | dma_unmap_len(tx_buf, dma_len0), | ||
| 370 | DMA_TO_DEVICE); | ||
| 371 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | ||
| 372 | dma_unmap_page(dev, | ||
| 373 | dma_unmap_addr(tx_buf, dma_addr0), | ||
| 374 | dma_unmap_len(tx_buf, dma_len0), | ||
| 375 | DMA_TO_DEVICE); | ||
| 376 | } | ||
| 377 | if (tx_buf->flags & MTK_TX_FLAGS_PAGE1) | ||
| 378 | dma_unmap_page(dev, | ||
| 379 | dma_unmap_addr(tx_buf, dma_addr1), | ||
| 380 | dma_unmap_len(tx_buf, dma_len1), | ||
| 381 | DMA_TO_DEVICE); | ||
| 382 | |||
| 383 | tx_buf->flags = 0; | ||
| 384 | if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC)) | ||
| 385 | dev_kfree_skb_any(tx_buf->skb); | ||
| 386 | tx_buf->skb = NULL; | ||
| 387 | } | ||
| 388 | |||
| 389 | static void mtk_pdma_tx_clean(struct mtk_eth *eth) | ||
| 390 | { | ||
| 391 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 392 | int i; | ||
| 393 | |||
| 394 | if (ring->tx_buf) { | ||
| 395 | for (i = 0; i < ring->tx_ring_size; i++) | ||
| 396 | mtk_txd_unmap(eth->dev, &ring->tx_buf[i]); | ||
| 397 | kfree(ring->tx_buf); | ||
| 398 | ring->tx_buf = NULL; | ||
| 399 | } | ||
| 400 | |||
| 401 | if (ring->tx_dma) { | ||
| 402 | dma_free_coherent(eth->dev, | ||
| 403 | ring->tx_ring_size * sizeof(*ring->tx_dma), | ||
| 404 | ring->tx_dma, | ||
| 405 | ring->tx_phys); | ||
| 406 | ring->tx_dma = NULL; | ||
| 407 | } | ||
| 408 | } | ||
| 409 | |||
| 410 | static void mtk_qdma_tx_clean(struct mtk_eth *eth) | ||
| 411 | { | ||
| 412 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 413 | int i; | ||
| 414 | |||
| 415 | if (ring->tx_buf) { | ||
| 416 | for (i = 0; i < ring->tx_ring_size; i++) | ||
| 417 | mtk_txd_unmap(eth->dev, &ring->tx_buf[i]); | ||
| 418 | kfree(ring->tx_buf); | ||
| 419 | ring->tx_buf = NULL; | ||
| 420 | } | ||
| 421 | |||
| 422 | if (ring->tx_dma) { | ||
| 423 | dma_free_coherent(eth->dev, | ||
| 424 | ring->tx_ring_size * sizeof(*ring->tx_dma), | ||
| 425 | ring->tx_dma, | ||
| 426 | ring->tx_phys); | ||
| 427 | ring->tx_dma = NULL; | ||
| 428 | } | ||
| 429 | } | ||
| 430 | |||
| 431 | void mtk_stats_update_mac(struct mtk_mac *mac) | ||
| 432 | { | ||
| 433 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | ||
| 434 | unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]; | ||
| 435 | u64 stats; | ||
| 436 | |||
| 437 | base += hw_stats->reg_offset; | ||
| 438 | |||
| 439 | u64_stats_update_begin(&hw_stats->syncp); | ||
| 440 | |||
| 441 | if (mac->hw->soc->new_stats) { | ||
| 442 | hw_stats->rx_bytes += mtk_r32(mac->hw, base); | ||
| 443 | stats = mtk_r32(mac->hw, base + 0x04); | ||
| 444 | if (stats) | ||
| 445 | hw_stats->rx_bytes += (stats << 32); | ||
| 446 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); | ||
| 447 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); | ||
| 448 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); | ||
| 449 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); | ||
| 450 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); | ||
| 451 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); | ||
| 452 | hw_stats->rx_flow_control_packets += | ||
| 453 | mtk_r32(mac->hw, base + 0x24); | ||
| 454 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); | ||
| 455 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); | ||
| 456 | hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); | ||
| 457 | stats = mtk_r32(mac->hw, base + 0x34); | ||
| 458 | if (stats) | ||
| 459 | hw_stats->tx_bytes += (stats << 32); | ||
| 460 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); | ||
| 461 | } else { | ||
| 462 | hw_stats->tx_bytes += mtk_r32(mac->hw, base); | ||
| 463 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04); | ||
| 464 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08); | ||
| 465 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c); | ||
| 466 | hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20); | ||
| 467 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24); | ||
| 468 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28); | ||
| 469 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c); | ||
| 470 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30); | ||
| 471 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34); | ||
| 472 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38); | ||
| 473 | hw_stats->rx_flow_control_packets += | ||
| 474 | mtk_r32(mac->hw, base + 0x3c); | ||
| 475 | } | ||
| 476 | |||
| 477 | u64_stats_update_end(&hw_stats->syncp); | ||
| 478 | } | ||
| 479 | |||
| 480 | static void mtk_get_stats64(struct net_device *dev, | ||
| 481 | struct rtnl_link_stats64 *storage) | ||
| 482 | { | ||
| 483 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 484 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | ||
| 485 | unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]; | ||
| 486 | unsigned int start; | ||
| 487 | |||
| 488 | if (!base) { | ||
| 489 | netdev_stats_to_stats64(storage, &dev->stats); | ||
| 490 | return; | ||
| 491 | } | ||
| 492 | |||
| 493 | if (netif_running(dev) && netif_device_present(dev)) { | ||
| 494 | if (spin_trylock(&hw_stats->stats_lock)) { | ||
| 495 | mtk_stats_update_mac(mac); | ||
| 496 | spin_unlock(&hw_stats->stats_lock); | ||
| 497 | } | ||
| 498 | } | ||
| 499 | |||
| 500 | do { | ||
| 501 | start = u64_stats_fetch_begin_irq(&hw_stats->syncp); | ||
| 502 | storage->rx_packets = hw_stats->rx_packets; | ||
| 503 | storage->tx_packets = hw_stats->tx_packets; | ||
| 504 | storage->rx_bytes = hw_stats->rx_bytes; | ||
| 505 | storage->tx_bytes = hw_stats->tx_bytes; | ||
| 506 | storage->collisions = hw_stats->tx_collisions; | ||
| 507 | storage->rx_length_errors = hw_stats->rx_short_errors + | ||
| 508 | hw_stats->rx_long_errors; | ||
| 509 | storage->rx_over_errors = hw_stats->rx_overflow; | ||
| 510 | storage->rx_crc_errors = hw_stats->rx_fcs_errors; | ||
| 511 | storage->rx_errors = hw_stats->rx_checksum_errors; | ||
| 512 | storage->tx_aborted_errors = hw_stats->tx_skip; | ||
| 513 | } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); | ||
| 514 | |||
| 515 | storage->tx_errors = dev->stats.tx_errors; | ||
| 516 | storage->rx_dropped = dev->stats.rx_dropped; | ||
| 517 | storage->tx_dropped = dev->stats.tx_dropped; | ||
| 518 | } | ||
| 519 | |||
| 520 | static int mtk_vlan_rx_add_vid(struct net_device *dev, | ||
| 521 | __be16 proto, u16 vid) | ||
| 522 | { | ||
| 523 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 524 | struct mtk_eth *eth = mac->hw; | ||
| 525 | u32 idx = (vid & 0xf); | ||
| 526 | u32 vlan_cfg; | ||
| 527 | |||
| 528 | if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) && | ||
| 529 | (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) | ||
| 530 | return 0; | ||
| 531 | |||
| 532 | if (test_bit(idx, ð->vlan_map)) { | ||
| 533 | netdev_warn(dev, "disable tx vlan offload\n"); | ||
| 534 | dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX; | ||
| 535 | netdev_update_features(dev); | ||
| 536 | } else { | ||
| 537 | vlan_cfg = mtk_r32(eth, | ||
| 538 | mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + | ||
| 539 | ((idx >> 1) << 2)); | ||
| 540 | if (idx & 0x1) { | ||
| 541 | vlan_cfg &= 0xffff; | ||
| 542 | vlan_cfg |= (vid << 16); | ||
| 543 | } else { | ||
| 544 | vlan_cfg &= 0xffff0000; | ||
| 545 | vlan_cfg |= vid; | ||
| 546 | } | ||
| 547 | mtk_w32(eth, | ||
| 548 | vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + | ||
| 549 | ((idx >> 1) << 2)); | ||
| 550 | set_bit(idx, ð->vlan_map); | ||
| 551 | } | ||
| 552 | |||
| 553 | return 0; | ||
| 554 | } | ||
| 555 | |||
| 556 | static int mtk_vlan_rx_kill_vid(struct net_device *dev, | ||
| 557 | __be16 proto, u16 vid) | ||
| 558 | { | ||
| 559 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 560 | struct mtk_eth *eth = mac->hw; | ||
| 561 | u32 idx = (vid & 0xf); | ||
| 562 | |||
| 563 | if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) && | ||
| 564 | (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) | ||
| 565 | return 0; | ||
| 566 | |||
| 567 | clear_bit(idx, ð->vlan_map); | ||
| 568 | |||
| 569 | return 0; | ||
| 570 | } | ||
| 571 | |||
| 572 | static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring) | ||
| 573 | { | ||
| 574 | barrier(); | ||
| 575 | return (u32)(ring->tx_ring_size - | ||
| 576 | ((ring->tx_next_idx - ring->tx_free_idx) & | ||
| 577 | (ring->tx_ring_size - 1))); | ||
| 578 | } | ||
| 579 | |||
| 580 | static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth) | ||
| 581 | { | ||
| 582 | unsigned int len; | ||
| 583 | int ret; | ||
| 584 | |||
| 585 | if (unlikely(skb->len >= VLAN_ETH_ZLEN)) | ||
| 586 | return 0; | ||
| 587 | |||
| 588 | if (eth->soc->padding_64b && !eth->soc->padding_bug) | ||
| 589 | return 0; | ||
| 590 | |||
| 591 | if (skb_vlan_tag_present(skb)) | ||
| 592 | len = ETH_ZLEN; | ||
| 593 | else if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | ||
| 594 | len = VLAN_ETH_ZLEN; | ||
| 595 | else if (!eth->soc->padding_64b) | ||
| 596 | len = ETH_ZLEN; | ||
| 597 | else | ||
| 598 | return 0; | ||
| 599 | |||
| 600 | if (skb->len >= len) | ||
| 601 | return 0; | ||
| 602 | |||
| 603 | ret = skb_pad(skb, len - skb->len); | ||
| 604 | if (ret < 0) | ||
| 605 | return ret; | ||
| 606 | skb->len = len; | ||
| 607 | skb_set_tail_pointer(skb, len); | ||
| 608 | |||
| 609 | return ret; | ||
| 610 | } | ||
| 611 | |||
| 612 | static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev, | ||
| 613 | int tx_num, struct mtk_tx_ring *ring, bool gso) | ||
| 614 | { | ||
| 615 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 616 | struct mtk_eth *eth = mac->hw; | ||
| 617 | struct skb_frag_struct *frag; | ||
| 618 | struct mtk_tx_dma txd, *ptxd; | ||
| 619 | struct mtk_tx_buf *tx_buf; | ||
| 620 | int i, j, k, frag_size, frag_map_size, offset; | ||
| 621 | dma_addr_t mapped_addr; | ||
| 622 | unsigned int nr_frags; | ||
| 623 | u32 def_txd4; | ||
| 624 | |||
| 625 | if (mtk_skb_padto(skb, eth)) { | ||
| 626 | netif_warn(eth, tx_err, dev, "tx padding failed!\n"); | ||
| 627 | return -1; | ||
| 628 | } | ||
| 629 | |||
| 630 | tx_buf = &ring->tx_buf[ring->tx_next_idx]; | ||
| 631 | memset(tx_buf, 0, sizeof(*tx_buf)); | ||
| 632 | memset(&txd, 0, sizeof(txd)); | ||
| 633 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
| 634 | |||
| 635 | /* init tx descriptor */ | ||
| 636 | def_txd4 = eth->soc->txd4; | ||
| 637 | txd.txd4 = def_txd4; | ||
| 638 | |||
| 639 | if (eth->soc->mac_count > 1) | ||
| 640 | txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; | ||
| 641 | |||
| 642 | if (gso) | ||
| 643 | txd.txd4 |= TX_DMA_TSO; | ||
| 644 | |||
| 645 | /* TX Checksum offload */ | ||
| 646 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
| 647 | txd.txd4 |= TX_DMA_CHKSUM; | ||
| 648 | |||
| 649 | /* VLAN header offload */ | ||
| 650 | if (skb_vlan_tag_present(skb)) { | ||
| 651 | u16 tag = skb_vlan_tag_get(skb); | ||
| 652 | |||
| 653 | txd.txd4 |= TX_DMA_INS_VLAN | | ||
| 654 | ((tag >> VLAN_PRIO_SHIFT) << 4) | | ||
| 655 | (tag & 0xF); | ||
| 656 | } | ||
| 657 | |||
| 658 | mapped_addr = dma_map_single(&dev->dev, skb->data, | ||
| 659 | skb_headlen(skb), DMA_TO_DEVICE); | ||
| 660 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | ||
| 661 | return -1; | ||
| 662 | |||
| 663 | txd.txd1 = mapped_addr; | ||
| 664 | txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb)); | ||
| 665 | |||
| 666 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | ||
| 667 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | ||
| 668 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | ||
| 669 | |||
| 670 | /* TX SG offload */ | ||
| 671 | j = ring->tx_next_idx; | ||
| 672 | k = 0; | ||
| 673 | for (i = 0; i < nr_frags; i++) { | ||
| 674 | offset = 0; | ||
| 675 | frag = &skb_shinfo(skb)->frags[i]; | ||
| 676 | frag_size = skb_frag_size(frag); | ||
| 677 | |||
| 678 | while (frag_size > 0) { | ||
| 679 | frag_map_size = min(frag_size, TX_DMA_BUF_LEN); | ||
| 680 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | ||
| 681 | frag_map_size, | ||
| 682 | DMA_TO_DEVICE); | ||
| 683 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | ||
| 684 | goto err_dma; | ||
| 685 | |||
| 686 | if (k & 0x1) { | ||
| 687 | j = NEXT_TX_DESP_IDX(j); | ||
| 688 | txd.txd1 = mapped_addr; | ||
| 689 | txd.txd2 = TX_DMA_PLEN0(frag_map_size); | ||
| 690 | txd.txd4 = def_txd4; | ||
| 691 | |||
| 692 | tx_buf = &ring->tx_buf[j]; | ||
| 693 | memset(tx_buf, 0, sizeof(*tx_buf)); | ||
| 694 | |||
| 695 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | ||
| 696 | dma_unmap_addr_set(tx_buf, dma_addr0, | ||
| 697 | mapped_addr); | ||
| 698 | dma_unmap_len_set(tx_buf, dma_len0, | ||
| 699 | frag_map_size); | ||
| 700 | } else { | ||
| 701 | txd.txd3 = mapped_addr; | ||
| 702 | txd.txd2 |= TX_DMA_PLEN1(frag_map_size); | ||
| 703 | |||
| 704 | tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; | ||
| 705 | tx_buf->flags |= MTK_TX_FLAGS_PAGE1; | ||
| 706 | dma_unmap_addr_set(tx_buf, dma_addr1, | ||
| 707 | mapped_addr); | ||
| 708 | dma_unmap_len_set(tx_buf, dma_len1, | ||
| 709 | frag_map_size); | ||
| 710 | |||
| 711 | if (!((i == (nr_frags - 1)) && | ||
| 712 | (frag_map_size == frag_size))) { | ||
| 713 | mtk_set_txd_pdma(&txd, | ||
| 714 | &ring->tx_dma[j]); | ||
| 715 | memset(&txd, 0, sizeof(txd)); | ||
| 716 | } | ||
| 717 | } | ||
| 718 | frag_size -= frag_map_size; | ||
| 719 | offset += frag_map_size; | ||
| 720 | k++; | ||
| 721 | } | ||
| 722 | } | ||
| 723 | |||
| 724 | /* set last segment */ | ||
| 725 | if (k & 0x1) | ||
| 726 | txd.txd2 |= TX_DMA_LS1; | ||
| 727 | else | ||
| 728 | txd.txd2 |= TX_DMA_LS0; | ||
| 729 | mtk_set_txd_pdma(&txd, &ring->tx_dma[j]); | ||
| 730 | |||
| 731 | /* store skb to cleanup */ | ||
| 732 | tx_buf->skb = skb; | ||
| 733 | |||
| 734 | netdev_sent_queue(dev, skb->len); | ||
| 735 | skb_tx_timestamp(skb); | ||
| 736 | |||
| 737 | ring->tx_next_idx = NEXT_TX_DESP_IDX(j); | ||
| 738 | /* make sure that all changes to the dma ring are flushed before we | ||
| 739 | * continue | ||
| 740 | */ | ||
| 741 | wmb(); | ||
| 742 | atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); | ||
| 743 | |||
| 744 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | ||
| 745 | mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0); | ||
| 746 | |||
| 747 | return 0; | ||
| 748 | |||
| 749 | err_dma: | ||
| 750 | j = ring->tx_next_idx; | ||
| 751 | for (i = 0; i < tx_num; i++) { | ||
| 752 | ptxd = &ring->tx_dma[j]; | ||
| 753 | tx_buf = &ring->tx_buf[j]; | ||
| 754 | |||
| 755 | /* unmap dma */ | ||
| 756 | mtk_txd_unmap(&dev->dev, tx_buf); | ||
| 757 | |||
| 758 | ptxd->txd2 = TX_DMA_DESP2_DEF; | ||
| 759 | j = NEXT_TX_DESP_IDX(j); | ||
| 760 | } | ||
| 761 | /* make sure that all changes to the dma ring are flushed before we | ||
| 762 | * continue | ||
| 763 | */ | ||
| 764 | wmb(); | ||
| 765 | return -1; | ||
| 766 | } | ||
| 767 | |||
| 768 | /* the qdma core needs scratch memory to be setup */ | ||
| 769 | static int mtk_init_fq_dma(struct mtk_eth *eth) | ||
| 770 | { | ||
| 771 | dma_addr_t dma_addr, phy_ring_head, phy_ring_tail; | ||
| 772 | int cnt = eth->soc->dma_ring_size; | ||
| 773 | int i; | ||
| 774 | |||
| 775 | eth->scratch_ring = dma_alloc_coherent(eth->dev, | ||
| 776 | cnt * sizeof(struct mtk_tx_dma), | ||
| 777 | &phy_ring_head, | ||
| 778 | GFP_ATOMIC | __GFP_ZERO); | ||
| 779 | if (unlikely(!eth->scratch_ring)) | ||
| 780 | return -ENOMEM; | ||
| 781 | |||
| 782 | eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE, | ||
| 783 | GFP_KERNEL); | ||
| 784 | dma_addr = dma_map_single(eth->dev, | ||
| 785 | eth->scratch_head, cnt * QDMA_PAGE_SIZE, | ||
| 786 | DMA_FROM_DEVICE); | ||
| 787 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | ||
| 788 | return -ENOMEM; | ||
| 789 | |||
| 790 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); | ||
| 791 | phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1)); | ||
| 792 | |||
| 793 | for (i = 0; i < cnt; i++) { | ||
| 794 | eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE)); | ||
| 795 | if (i < cnt - 1) | ||
| 796 | eth->scratch_ring[i].txd2 = (phy_ring_head + | ||
| 797 | ((i + 1) * sizeof(struct mtk_tx_dma))); | ||
| 798 | eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE); | ||
| 799 | } | ||
| 800 | |||
| 801 | mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); | ||
| 802 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); | ||
| 803 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); | ||
| 804 | mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); | ||
| 805 | |||
| 806 | return 0; | ||
| 807 | } | ||
| 808 | |||
| 809 | static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) | ||
| 810 | { | ||
| 811 | void *ret = ring->tx_dma; | ||
| 812 | |||
| 813 | return ret + (desc - ring->tx_phys); | ||
| 814 | } | ||
| 815 | |||
| 816 | static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring, | ||
| 817 | struct mtk_tx_dma *txd) | ||
| 818 | { | ||
| 819 | return mtk_qdma_phys_to_virt(ring, txd->txd2); | ||
| 820 | } | ||
| 821 | |||
| 822 | static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | ||
| 823 | struct mtk_tx_dma *txd) | ||
| 824 | { | ||
| 825 | int idx = txd - ring->tx_dma; | ||
| 826 | |||
| 827 | return &ring->tx_buf[idx]; | ||
| 828 | } | ||
| 829 | |||
| 830 | static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev, | ||
| 831 | int tx_num, struct mtk_tx_ring *ring, bool gso) | ||
| 832 | { | ||
| 833 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 834 | struct mtk_eth *eth = mac->hw; | ||
| 835 | struct mtk_tx_dma *itxd, *txd; | ||
| 836 | struct mtk_tx_buf *tx_buf; | ||
| 837 | dma_addr_t mapped_addr; | ||
| 838 | unsigned int nr_frags; | ||
| 839 | int i, n_desc = 1; | ||
| 840 | u32 txd4 = eth->soc->txd4; | ||
| 841 | |||
| 842 | itxd = ring->tx_next_free; | ||
| 843 | if (itxd == ring->tx_last_free) | ||
| 844 | return -ENOMEM; | ||
| 845 | |||
| 846 | if (eth->soc->mac_count > 1) | ||
| 847 | txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; | ||
| 848 | |||
| 849 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | ||
| 850 | memset(tx_buf, 0, sizeof(*tx_buf)); | ||
| 851 | |||
| 852 | if (gso) | ||
| 853 | txd4 |= TX_DMA_TSO; | ||
| 854 | |||
| 855 | /* TX Checksum offload */ | ||
| 856 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
| 857 | txd4 |= TX_DMA_CHKSUM; | ||
| 858 | |||
| 859 | /* VLAN header offload */ | ||
| 860 | if (skb_vlan_tag_present(skb)) | ||
| 861 | txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb); | ||
| 862 | |||
| 863 | mapped_addr = dma_map_single(&dev->dev, skb->data, | ||
| 864 | skb_headlen(skb), DMA_TO_DEVICE); | ||
| 865 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | ||
| 866 | return -ENOMEM; | ||
| 867 | |||
| 868 | WRITE_ONCE(itxd->txd1, mapped_addr); | ||
| 869 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | ||
| 870 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | ||
| 871 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | ||
| 872 | |||
| 873 | /* TX SG offload */ | ||
| 874 | txd = itxd; | ||
| 875 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
| 876 | for (i = 0; i < nr_frags; i++) { | ||
| 877 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | ||
| 878 | unsigned int offset = 0; | ||
| 879 | int frag_size = skb_frag_size(frag); | ||
| 880 | |||
| 881 | while (frag_size) { | ||
| 882 | bool last_frag = false; | ||
| 883 | unsigned int frag_map_size; | ||
| 884 | |||
| 885 | txd = mtk_tx_next_qdma(ring, txd); | ||
| 886 | if (txd == ring->tx_last_free) | ||
| 887 | goto err_dma; | ||
| 888 | |||
| 889 | n_desc++; | ||
| 890 | frag_map_size = min(frag_size, TX_DMA_BUF_LEN); | ||
| 891 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | ||
| 892 | frag_map_size, | ||
| 893 | DMA_TO_DEVICE); | ||
| 894 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | ||
| 895 | goto err_dma; | ||
| 896 | |||
| 897 | if (i == nr_frags - 1 && | ||
| 898 | (frag_size - frag_map_size) == 0) | ||
| 899 | last_frag = true; | ||
| 900 | |||
| 901 | WRITE_ONCE(txd->txd1, mapped_addr); | ||
| 902 | WRITE_ONCE(txd->txd3, (QDMA_TX_SWC | | ||
| 903 | TX_DMA_PLEN0(frag_map_size) | | ||
| 904 | last_frag * TX_DMA_LS0) | | ||
| 905 | mac->id); | ||
| 906 | WRITE_ONCE(txd->txd4, 0); | ||
| 907 | |||
| 908 | tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; | ||
| 909 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | ||
| 910 | memset(tx_buf, 0, sizeof(*tx_buf)); | ||
| 911 | |||
| 912 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | ||
| 913 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | ||
| 914 | dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); | ||
| 915 | frag_size -= frag_map_size; | ||
| 916 | offset += frag_map_size; | ||
| 917 | } | ||
| 918 | } | ||
| 919 | |||
| 920 | /* store skb to cleanup */ | ||
| 921 | tx_buf->skb = skb; | ||
| 922 | |||
| 923 | WRITE_ONCE(itxd->txd4, txd4); | ||
| 924 | WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | | ||
| 925 | (!nr_frags * TX_DMA_LS0))); | ||
| 926 | |||
| 927 | netdev_sent_queue(dev, skb->len); | ||
| 928 | skb_tx_timestamp(skb); | ||
| 929 | |||
| 930 | ring->tx_next_free = mtk_tx_next_qdma(ring, txd); | ||
| 931 | atomic_sub(n_desc, &ring->tx_free_count); | ||
| 932 | |||
| 933 | /* make sure that all changes to the dma ring are flushed before we | ||
| 934 | * continue | ||
| 935 | */ | ||
| 936 | wmb(); | ||
| 937 | |||
| 938 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | ||
| 939 | mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); | ||
| 940 | |||
| 941 | return 0; | ||
| 942 | |||
| 943 | err_dma: | ||
| 944 | do { | ||
| 945 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | ||
| 946 | |||
| 947 | /* unmap dma */ | ||
| 948 | mtk_txd_unmap(&dev->dev, tx_buf); | ||
| 949 | |||
| 950 | itxd->txd3 = TX_DMA_DESP2_DEF; | ||
| 951 | itxd = mtk_tx_next_qdma(ring, itxd); | ||
| 952 | } while (itxd != txd); | ||
| 953 | |||
| 954 | return -ENOMEM; | ||
| 955 | } | ||
| 956 | |||
| 957 | static inline int mtk_cal_txd_req(struct sk_buff *skb) | ||
| 958 | { | ||
| 959 | int i, nfrags; | ||
| 960 | struct skb_frag_struct *frag; | ||
| 961 | |||
| 962 | nfrags = 1; | ||
| 963 | if (skb_is_gso(skb)) { | ||
| 964 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
| 965 | frag = &skb_shinfo(skb)->frags[i]; | ||
| 966 | nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN); | ||
| 967 | } | ||
| 968 | } else { | ||
| 969 | nfrags += skb_shinfo(skb)->nr_frags; | ||
| 970 | } | ||
| 971 | |||
| 972 | return DIV_ROUND_UP(nfrags, 2); | ||
| 973 | } | ||
| 974 | |||
| 975 | static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 976 | { | ||
| 977 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 978 | struct mtk_eth *eth = mac->hw; | ||
| 979 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 980 | struct net_device_stats *stats = &dev->stats; | ||
| 981 | int tx_num; | ||
| 982 | int len = skb->len; | ||
| 983 | bool gso = false; | ||
| 984 | |||
| 985 | tx_num = mtk_cal_txd_req(skb); | ||
| 986 | if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) { | ||
| 987 | netif_stop_queue(dev); | ||
| 988 | netif_err(eth, tx_queued, dev, | ||
| 989 | "Tx Ring full when queue awake!\n"); | ||
| 990 | return NETDEV_TX_BUSY; | ||
| 991 | } | ||
| 992 | |||
| 993 | /* TSO: fill MSS info in tcp checksum field */ | ||
| 994 | if (skb_is_gso(skb)) { | ||
| 995 | if (skb_cow_head(skb, 0)) { | ||
| 996 | netif_warn(eth, tx_err, dev, | ||
| 997 | "GSO expand head fail.\n"); | ||
| 998 | goto drop; | ||
| 999 | } | ||
| 1000 | |||
| 1001 | if (skb_shinfo(skb)->gso_type & | ||
| 1002 | (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | ||
| 1003 | gso = true; | ||
| 1004 | tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); | ||
| 1005 | } | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0) | ||
| 1009 | goto drop; | ||
| 1010 | |||
| 1011 | stats->tx_packets++; | ||
| 1012 | stats->tx_bytes += len; | ||
| 1013 | |||
| 1014 | if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) { | ||
| 1015 | netif_stop_queue(dev); | ||
| 1016 | smp_mb(); | ||
| 1017 | if (unlikely(atomic_read(&ring->tx_free_count) > | ||
| 1018 | ring->tx_thresh)) | ||
| 1019 | netif_wake_queue(dev); | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | return NETDEV_TX_OK; | ||
| 1023 | |||
| 1024 | drop: | ||
| 1025 | stats->tx_dropped++; | ||
| 1026 | dev_kfree_skb(skb); | ||
| 1027 | return NETDEV_TX_OK; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | static int mtk_poll_rx(struct napi_struct *napi, int budget, | ||
| 1031 | struct mtk_eth *eth, u32 rx_intr) | ||
| 1032 | { | ||
| 1033 | struct mtk_soc_data *soc = eth->soc; | ||
| 1034 | struct mtk_rx_ring *ring = ð->rx_ring[0]; | ||
| 1035 | int idx = ring->rx_calc_idx; | ||
| 1036 | u32 checksum_bit; | ||
| 1037 | struct sk_buff *skb; | ||
| 1038 | u8 *data, *new_data; | ||
| 1039 | struct mtk_rx_dma *rxd, trxd; | ||
| 1040 | int done = 0, pad; | ||
| 1041 | |||
| 1042 | if (eth->soc->hw_features & NETIF_F_RXCSUM) | ||
| 1043 | checksum_bit = soc->checksum_bit; | ||
| 1044 | else | ||
| 1045 | checksum_bit = 0; | ||
| 1046 | |||
| 1047 | if (eth->soc->rx_2b_offset) | ||
| 1048 | pad = 0; | ||
| 1049 | else | ||
| 1050 | pad = NET_IP_ALIGN; | ||
| 1051 | |||
| 1052 | while (done < budget) { | ||
| 1053 | struct net_device *netdev; | ||
| 1054 | unsigned int pktlen; | ||
| 1055 | dma_addr_t dma_addr; | ||
| 1056 | int mac = 0; | ||
| 1057 | |||
| 1058 | idx = NEXT_RX_DESP_IDX(idx); | ||
| 1059 | rxd = &ring->rx_dma[idx]; | ||
| 1060 | data = ring->rx_data[idx]; | ||
| 1061 | |||
| 1062 | mtk_get_rxd(&trxd, rxd); | ||
| 1063 | if (!(trxd.rxd2 & RX_DMA_DONE)) | ||
| 1064 | break; | ||
| 1065 | |||
| 1066 | /* find out which mac the packet come from. values start at 1 */ | ||
| 1067 | if (eth->soc->mac_count > 1) { | ||
| 1068 | mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & | ||
| 1069 | RX_DMA_FPORT_MASK; | ||
| 1070 | mac--; | ||
| 1071 | if (mac < 0 || mac >= eth->soc->mac_count) | ||
| 1072 | goto release_desc; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | netdev = eth->netdev[mac]; | ||
| 1076 | |||
| 1077 | /* alloc new buffer */ | ||
| 1078 | new_data = napi_alloc_frag(ring->frag_size); | ||
| 1079 | if (unlikely(!new_data || !netdev)) { | ||
| 1080 | netdev->stats.rx_dropped++; | ||
| 1081 | goto release_desc; | ||
| 1082 | } | ||
| 1083 | dma_addr = dma_map_single(&netdev->dev, | ||
| 1084 | new_data + NET_SKB_PAD + pad, | ||
| 1085 | ring->rx_buf_size, | ||
| 1086 | DMA_FROM_DEVICE); | ||
| 1087 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | ||
| 1088 | skb_free_frag(new_data); | ||
| 1089 | goto release_desc; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | /* receive data */ | ||
| 1093 | skb = build_skb(data, ring->frag_size); | ||
| 1094 | if (unlikely(!skb)) { | ||
| 1095 | put_page(virt_to_head_page(new_data)); | ||
| 1096 | goto release_desc; | ||
| 1097 | } | ||
| 1098 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | ||
| 1099 | |||
| 1100 | dma_unmap_single(&netdev->dev, trxd.rxd1, | ||
| 1101 | ring->rx_buf_size, DMA_FROM_DEVICE); | ||
| 1102 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | ||
| 1103 | skb->dev = netdev; | ||
| 1104 | skb_put(skb, pktlen); | ||
| 1105 | if (trxd.rxd4 & checksum_bit) | ||
| 1106 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 1107 | else | ||
| 1108 | skb_checksum_none_assert(skb); | ||
| 1109 | skb->protocol = eth_type_trans(skb, netdev); | ||
| 1110 | |||
| 1111 | netdev->stats.rx_packets++; | ||
| 1112 | netdev->stats.rx_bytes += pktlen; | ||
| 1113 | |||
| 1114 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && | ||
| 1115 | RX_DMA_VID(trxd.rxd3)) | ||
| 1116 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | ||
| 1117 | RX_DMA_VID(trxd.rxd3)); | ||
| 1118 | napi_gro_receive(napi, skb); | ||
| 1119 | |||
| 1120 | ring->rx_data[idx] = new_data; | ||
| 1121 | rxd->rxd1 = (unsigned int)dma_addr; | ||
| 1122 | |||
| 1123 | release_desc: | ||
| 1124 | if (eth->soc->rx_sg_dma) | ||
| 1125 | rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); | ||
| 1126 | else | ||
| 1127 | rxd->rxd2 = RX_DMA_LSO; | ||
| 1128 | |||
| 1129 | ring->rx_calc_idx = idx; | ||
| 1130 | /* make sure that all changes to the dma ring are flushed before | ||
| 1131 | * we continue | ||
| 1132 | */ | ||
| 1133 | wmb(); | ||
| 1134 | if (eth->soc->dma_type == MTK_QDMA) | ||
| 1135 | mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0); | ||
| 1136 | else | ||
| 1137 | mtk_reg_w32(eth, ring->rx_calc_idx, | ||
| 1138 | MTK_REG_RX_CALC_IDX0); | ||
| 1139 | done++; | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | if (done < budget) | ||
| 1143 | mtk_irq_ack(eth, rx_intr); | ||
| 1144 | |||
| 1145 | return done; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again) | ||
| 1149 | { | ||
| 1150 | struct sk_buff *skb; | ||
| 1151 | struct mtk_tx_buf *tx_buf; | ||
| 1152 | int done = 0; | ||
| 1153 | u32 idx, hwidx; | ||
| 1154 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 1155 | unsigned int bytes = 0; | ||
| 1156 | |||
| 1157 | idx = ring->tx_free_idx; | ||
| 1158 | hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0); | ||
| 1159 | |||
| 1160 | while ((idx != hwidx) && budget) { | ||
| 1161 | tx_buf = &ring->tx_buf[idx]; | ||
| 1162 | skb = tx_buf->skb; | ||
| 1163 | |||
| 1164 | if (!skb) | ||
| 1165 | break; | ||
| 1166 | |||
| 1167 | if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { | ||
| 1168 | bytes += skb->len; | ||
| 1169 | done++; | ||
| 1170 | budget--; | ||
| 1171 | } | ||
| 1172 | mtk_txd_unmap(eth->dev, tx_buf); | ||
| 1173 | idx = NEXT_TX_DESP_IDX(idx); | ||
| 1174 | } | ||
| 1175 | ring->tx_free_idx = idx; | ||
| 1176 | atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); | ||
| 1177 | |||
| 1178 | /* read hw index again make sure no new tx packet */ | ||
| 1179 | if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0)) | ||
| 1180 | *tx_again = 1; | ||
| 1181 | |||
| 1182 | if (done) | ||
| 1183 | netdev_completed_queue(*eth->netdev, done, bytes); | ||
| 1184 | |||
| 1185 | return done; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again) | ||
| 1189 | { | ||
| 1190 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 1191 | struct mtk_tx_dma *desc; | ||
| 1192 | struct sk_buff *skb; | ||
| 1193 | struct mtk_tx_buf *tx_buf; | ||
| 1194 | int total = 0, done[MTK_MAX_DEVS]; | ||
| 1195 | unsigned int bytes[MTK_MAX_DEVS]; | ||
| 1196 | u32 cpu, dma; | ||
| 1197 | int i; | ||
| 1198 | |||
| 1199 | memset(done, 0, sizeof(done)); | ||
| 1200 | memset(bytes, 0, sizeof(bytes)); | ||
| 1201 | |||
| 1202 | cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); | ||
| 1203 | dma = mtk_r32(eth, MTK_QTX_DRX_PTR); | ||
| 1204 | |||
| 1205 | desc = mtk_qdma_phys_to_virt(ring, cpu); | ||
| 1206 | |||
| 1207 | while ((cpu != dma) && budget) { | ||
| 1208 | u32 next_cpu = desc->txd2; | ||
| 1209 | int mac; | ||
| 1210 | |||
| 1211 | desc = mtk_tx_next_qdma(ring, desc); | ||
| 1212 | if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0) | ||
| 1213 | break; | ||
| 1214 | |||
| 1215 | mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & | ||
| 1216 | TX_DMA_FPORT_MASK; | ||
| 1217 | mac--; | ||
| 1218 | |||
| 1219 | tx_buf = mtk_desc_to_tx_buf(ring, desc); | ||
| 1220 | skb = tx_buf->skb; | ||
| 1221 | if (!skb) | ||
| 1222 | break; | ||
| 1223 | |||
| 1224 | if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { | ||
| 1225 | bytes[mac] += skb->len; | ||
| 1226 | done[mac]++; | ||
| 1227 | budget--; | ||
| 1228 | } | ||
| 1229 | mtk_txd_unmap(eth->dev, tx_buf); | ||
| 1230 | |||
| 1231 | ring->tx_last_free->txd2 = next_cpu; | ||
| 1232 | ring->tx_last_free = desc; | ||
| 1233 | atomic_inc(&ring->tx_free_count); | ||
| 1234 | |||
| 1235 | cpu = next_cpu; | ||
| 1236 | } | ||
| 1237 | |||
| 1238 | mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); | ||
| 1239 | |||
| 1240 | /* read hw index again make sure no new tx packet */ | ||
| 1241 | if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) | ||
| 1242 | *tx_again = true; | ||
| 1243 | |||
| 1244 | for (i = 0; i < eth->soc->mac_count; i++) { | ||
| 1245 | if (!done[i]) | ||
| 1246 | continue; | ||
| 1247 | netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); | ||
| 1248 | total += done[i]; | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | return total; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr, | ||
| 1255 | bool *tx_again) | ||
| 1256 | { | ||
| 1257 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 1258 | struct net_device *netdev = eth->netdev[0]; | ||
| 1259 | int done; | ||
| 1260 | |||
| 1261 | done = eth->tx_ring.tx_poll(eth, budget, tx_again); | ||
| 1262 | if (!*tx_again) | ||
| 1263 | mtk_irq_ack(eth, tx_intr); | ||
| 1264 | |||
| 1265 | if (!done) | ||
| 1266 | return 0; | ||
| 1267 | |||
| 1268 | smp_mb(); | ||
| 1269 | if (unlikely(!netif_queue_stopped(netdev))) | ||
| 1270 | return done; | ||
| 1271 | |||
| 1272 | if (atomic_read(&ring->tx_free_count) > ring->tx_thresh) | ||
| 1273 | netif_wake_queue(netdev); | ||
| 1274 | |||
| 1275 | return done; | ||
| 1276 | } | ||
| 1277 | |||
| 1278 | static void mtk_stats_update(struct mtk_eth *eth) | ||
| 1279 | { | ||
| 1280 | int i; | ||
| 1281 | |||
| 1282 | for (i = 0; i < eth->soc->mac_count; i++) { | ||
| 1283 | if (!eth->mac[i] || !eth->mac[i]->hw_stats) | ||
| 1284 | continue; | ||
| 1285 | if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { | ||
| 1286 | mtk_stats_update_mac(eth->mac[i]); | ||
| 1287 | spin_unlock(ð->mac[i]->hw_stats->stats_lock); | ||
| 1288 | } | ||
| 1289 | } | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | static int mtk_poll(struct napi_struct *napi, int budget) | ||
| 1293 | { | ||
| 1294 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); | ||
| 1295 | u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr; | ||
| 1296 | int tx_done, rx_done; | ||
| 1297 | bool tx_again = false; | ||
| 1298 | |||
| 1299 | status = mtk_irq_pending(eth); | ||
| 1300 | mtk_status = mtk_irq_pending_status(eth); | ||
| 1301 | tx_intr = eth->soc->tx_int; | ||
| 1302 | rx_intr = eth->soc->rx_int; | ||
| 1303 | status_intr = eth->soc->status_int; | ||
| 1304 | tx_done = 0; | ||
| 1305 | rx_done = 0; | ||
| 1306 | tx_again = 0; | ||
| 1307 | |||
| 1308 | if (status & tx_intr) | ||
| 1309 | tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again); | ||
| 1310 | |||
| 1311 | if (status & rx_intr) | ||
| 1312 | rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); | ||
| 1313 | |||
| 1314 | if (unlikely(mtk_status & status_intr)) { | ||
| 1315 | mtk_stats_update(eth); | ||
| 1316 | mtk_irq_ack_status(eth, status_intr); | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | if (unlikely(netif_msg_intr(eth))) { | ||
| 1320 | mask = mtk_irq_enabled(eth); | ||
| 1321 | netdev_info(eth->netdev[0], | ||
| 1322 | "done tx %d, rx %d, intr 0x%08x/0x%x\n", | ||
| 1323 | tx_done, rx_done, status, mask); | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | if (tx_again || rx_done == budget) | ||
| 1327 | return budget; | ||
| 1328 | |||
| 1329 | status = mtk_irq_pending(eth); | ||
| 1330 | if (status & (tx_intr | rx_intr)) | ||
| 1331 | return budget; | ||
| 1332 | |||
| 1333 | napi_complete(napi); | ||
| 1334 | mtk_irq_enable(eth, tx_intr | rx_intr); | ||
| 1335 | |||
| 1336 | return rx_done; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | static int mtk_pdma_tx_alloc(struct mtk_eth *eth) | ||
| 1340 | { | ||
| 1341 | int i; | ||
| 1342 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 1343 | |||
| 1344 | ring->tx_ring_size = eth->soc->dma_ring_size; | ||
| 1345 | ring->tx_free_idx = 0; | ||
| 1346 | ring->tx_next_idx = 0; | ||
| 1347 | ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, | ||
| 1348 | MAX_SKB_FRAGS); | ||
| 1349 | |||
| 1350 | ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), | ||
| 1351 | GFP_KERNEL); | ||
| 1352 | if (!ring->tx_buf) | ||
| 1353 | goto no_tx_mem; | ||
| 1354 | |||
| 1355 | ring->tx_dma = | ||
| 1356 | dma_alloc_coherent(eth->dev, | ||
| 1357 | ring->tx_ring_size * sizeof(*ring->tx_dma), | ||
| 1358 | &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO); | ||
| 1359 | if (!ring->tx_dma) | ||
| 1360 | goto no_tx_mem; | ||
| 1361 | |||
| 1362 | for (i = 0; i < ring->tx_ring_size; i++) { | ||
| 1363 | ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF; | ||
| 1364 | ring->tx_dma[i].txd4 = eth->soc->txd4; | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); | ||
| 1368 | ring->tx_map = mtk_pdma_tx_map; | ||
| 1369 | ring->tx_poll = mtk_pdma_tx_poll; | ||
| 1370 | ring->tx_clean = mtk_pdma_tx_clean; | ||
| 1371 | |||
| 1372 | /* make sure that all changes to the dma ring are flushed before we | ||
| 1373 | * continue | ||
| 1374 | */ | ||
| 1375 | wmb(); | ||
| 1376 | |||
| 1377 | mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0); | ||
| 1378 | mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0); | ||
| 1379 | mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0); | ||
| 1380 | mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG); | ||
| 1381 | |||
| 1382 | return 0; | ||
| 1383 | |||
| 1384 | no_tx_mem: | ||
| 1385 | return -ENOMEM; | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth) | ||
| 1389 | { | ||
| 1390 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 1391 | int i, sz = sizeof(*ring->tx_dma); | ||
| 1392 | |||
| 1393 | ring->tx_ring_size = eth->soc->dma_ring_size; | ||
| 1394 | ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), | ||
| 1395 | GFP_KERNEL); | ||
| 1396 | if (!ring->tx_buf) | ||
| 1397 | goto no_tx_mem; | ||
| 1398 | |||
| 1399 | ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, | ||
| 1400 | &ring->tx_phys, | ||
| 1401 | GFP_ATOMIC | __GFP_ZERO); | ||
| 1402 | if (!ring->tx_dma) | ||
| 1403 | goto no_tx_mem; | ||
| 1404 | |||
| 1405 | for (i = 0; i < ring->tx_ring_size; i++) { | ||
| 1406 | int next = (i + 1) % ring->tx_ring_size; | ||
| 1407 | u32 next_ptr = ring->tx_phys + next * sz; | ||
| 1408 | |||
| 1409 | ring->tx_dma[i].txd2 = next_ptr; | ||
| 1410 | ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF; | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2); | ||
| 1414 | ring->tx_next_free = &ring->tx_dma[0]; | ||
| 1415 | ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2]; | ||
| 1416 | ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, | ||
| 1417 | MAX_SKB_FRAGS); | ||
| 1418 | |||
| 1419 | ring->tx_map = mtk_qdma_tx_map; | ||
| 1420 | ring->tx_poll = mtk_qdma_tx_poll; | ||
| 1421 | ring->tx_clean = mtk_qdma_tx_clean; | ||
| 1422 | |||
| 1423 | /* make sure that all changes to the dma ring are flushed before we | ||
| 1424 | * continue | ||
| 1425 | */ | ||
| 1426 | wmb(); | ||
| 1427 | |||
| 1428 | mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR); | ||
| 1429 | mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR); | ||
| 1430 | mtk_w32(eth, | ||
| 1431 | ring->tx_phys + ((ring->tx_ring_size - 1) * sz), | ||
| 1432 | MTK_QTX_CRX_PTR); | ||
| 1433 | mtk_w32(eth, | ||
| 1434 | ring->tx_phys + ((ring->tx_ring_size - 1) * sz), | ||
| 1435 | MTK_QTX_DRX_PTR); | ||
| 1436 | |||
| 1437 | return 0; | ||
| 1438 | |||
| 1439 | no_tx_mem: | ||
| 1440 | return -ENOMEM; | ||
| 1441 | } | ||
| 1442 | |||
| 1443 | static int mtk_qdma_init(struct mtk_eth *eth, int ring) | ||
| 1444 | { | ||
| 1445 | int err; | ||
| 1446 | |||
| 1447 | err = mtk_init_fq_dma(eth); | ||
| 1448 | if (err) | ||
| 1449 | return err; | ||
| 1450 | |||
| 1451 | err = mtk_qdma_tx_alloc_tx(eth); | ||
| 1452 | if (err) | ||
| 1453 | return err; | ||
| 1454 | |||
| 1455 | err = mtk_dma_rx_alloc(eth, ð->rx_ring[ring]); | ||
| 1456 | if (err) | ||
| 1457 | return err; | ||
| 1458 | |||
| 1459 | mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0); | ||
| 1460 | mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0); | ||
| 1461 | mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0); | ||
| 1462 | mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); | ||
| 1463 | mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); | ||
| 1464 | |||
| 1465 | /* Enable random early drop and set drop threshold automatically */ | ||
| 1466 | mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES); | ||
| 1467 | mtk_w32(eth, 0x0, MTK_QDMA_HRED2); | ||
| 1468 | |||
| 1469 | return 0; | ||
| 1470 | } | ||
| 1471 | |||
| 1472 | static int mtk_pdma_qdma_init(struct mtk_eth *eth) | ||
| 1473 | { | ||
| 1474 | int err = mtk_qdma_init(eth, 1); | ||
| 1475 | |||
| 1476 | if (err) | ||
| 1477 | return err; | ||
| 1478 | |||
| 1479 | err = mtk_dma_rx_alloc(eth, ð->rx_ring[0]); | ||
| 1480 | if (err) | ||
| 1481 | return err; | ||
| 1482 | |||
| 1483 | mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0); | ||
| 1484 | mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0); | ||
| 1485 | mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0); | ||
| 1486 | mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG); | ||
| 1487 | |||
| 1488 | return 0; | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | static int mtk_pdma_init(struct mtk_eth *eth) | ||
| 1492 | { | ||
| 1493 | struct mtk_rx_ring *ring = ð->rx_ring[0]; | ||
| 1494 | int err; | ||
| 1495 | |||
| 1496 | err = mtk_pdma_tx_alloc(eth); | ||
| 1497 | if (err) | ||
| 1498 | return err; | ||
| 1499 | |||
| 1500 | err = mtk_dma_rx_alloc(eth, ring); | ||
| 1501 | if (err) | ||
| 1502 | return err; | ||
| 1503 | |||
| 1504 | mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0); | ||
| 1505 | mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0); | ||
| 1506 | mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0); | ||
| 1507 | mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG); | ||
| 1508 | |||
| 1509 | return 0; | ||
| 1510 | } | ||
| 1511 | |||
| 1512 | static void mtk_dma_free(struct mtk_eth *eth) | ||
| 1513 | { | ||
| 1514 | int i; | ||
| 1515 | |||
| 1516 | for (i = 0; i < eth->soc->mac_count; i++) | ||
| 1517 | if (eth->netdev[i]) | ||
| 1518 | netdev_reset_queue(eth->netdev[i]); | ||
| 1519 | eth->tx_ring.tx_clean(eth); | ||
| 1520 | mtk_clean_rx(eth, ð->rx_ring[0]); | ||
| 1521 | mtk_clean_rx(eth, ð->rx_ring[1]); | ||
| 1522 | kfree(eth->scratch_head); | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | static void mtk_tx_timeout(struct net_device *dev) | ||
| 1526 | { | ||
| 1527 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1528 | struct mtk_eth *eth = mac->hw; | ||
| 1529 | struct mtk_tx_ring *ring = ð->tx_ring; | ||
| 1530 | |||
| 1531 | eth->netdev[mac->id]->stats.tx_errors++; | ||
| 1532 | netif_err(eth, tx_err, dev, | ||
| 1533 | "transmit timed out\n"); | ||
| 1534 | if (eth->soc->dma_type & MTK_PDMA) { | ||
| 1535 | netif_info(eth, drv, dev, "pdma_cfg:%08x\n", | ||
| 1536 | mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG)); | ||
| 1537 | netif_info(eth, drv, dev, | ||
| 1538 | "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", | ||
| 1539 | 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0), | ||
| 1540 | mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0), | ||
| 1541 | mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0), | ||
| 1542 | mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0), | ||
| 1543 | ring->tx_free_idx, | ||
| 1544 | ring->tx_next_idx); | ||
| 1545 | } | ||
| 1546 | if (eth->soc->dma_type & MTK_QDMA) { | ||
| 1547 | netif_info(eth, drv, dev, "qdma_cfg:%08x\n", | ||
| 1548 | mtk_r32(eth, MTK_QDMA_GLO_CFG)); | ||
| 1549 | netif_info(eth, drv, dev, | ||
| 1550 | "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n", | ||
| 1551 | 0, mtk_r32(eth, MTK_QTX_CTX_PTR), | ||
| 1552 | mtk_r32(eth, MTK_QTX_DTX_PTR), | ||
| 1553 | mtk_r32(eth, MTK_QTX_CRX_PTR), | ||
| 1554 | mtk_r32(eth, MTK_QTX_DRX_PTR), | ||
| 1555 | atomic_read(&ring->tx_free_count)); | ||
| 1556 | } | ||
| 1557 | netif_info(eth, drv, dev, | ||
| 1558 | "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n", | ||
| 1559 | 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0), | ||
| 1560 | mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0), | ||
| 1561 | mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0), | ||
| 1562 | mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0)); | ||
| 1563 | |||
| 1564 | schedule_work(&mac->pending_work); | ||
| 1565 | } | ||
| 1566 | |||
| 1567 | static irqreturn_t mtk_handle_irq(int irq, void *_eth) | ||
| 1568 | { | ||
| 1569 | struct mtk_eth *eth = _eth; | ||
| 1570 | u32 status, int_mask; | ||
| 1571 | |||
| 1572 | status = mtk_irq_pending(eth); | ||
| 1573 | if (unlikely(!status)) | ||
| 1574 | return IRQ_NONE; | ||
| 1575 | |||
| 1576 | int_mask = (eth->soc->rx_int | eth->soc->tx_int); | ||
| 1577 | if (likely(status & int_mask)) { | ||
| 1578 | if (likely(napi_schedule_prep(ð->rx_napi))) | ||
| 1579 | __napi_schedule(ð->rx_napi); | ||
| 1580 | } else { | ||
| 1581 | mtk_irq_ack(eth, status); | ||
| 1582 | } | ||
| 1583 | mtk_irq_disable(eth, int_mask); | ||
| 1584 | |||
| 1585 | return IRQ_HANDLED; | ||
| 1586 | } | ||
| 1587 | |||
| 1588 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1589 | static void mtk_poll_controller(struct net_device *dev) | ||
| 1590 | { | ||
| 1591 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1592 | struct mtk_eth *eth = mac->hw; | ||
| 1593 | u32 int_mask = eth->soc->tx_int | eth->soc->rx_int; | ||
| 1594 | |||
| 1595 | mtk_irq_disable(eth, int_mask); | ||
| 1596 | mtk_handle_irq(dev->irq, dev); | ||
| 1597 | mtk_irq_enable(eth, int_mask); | ||
| 1598 | } | ||
| 1599 | #endif | ||
| 1600 | |||
| 1601 | int mtk_set_clock_cycle(struct mtk_eth *eth) | ||
| 1602 | { | ||
| 1603 | unsigned long sysclk = eth->sysclk; | ||
| 1604 | |||
| 1605 | sysclk /= MTK_US_CYC_CNT_DIVISOR; | ||
| 1606 | sysclk <<= MTK_US_CYC_CNT_SHIFT; | ||
| 1607 | |||
| 1608 | mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) & | ||
| 1609 | ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) | | ||
| 1610 | sysclk, | ||
| 1611 | MTK_GLO_CFG); | ||
| 1612 | return 0; | ||
| 1613 | } | ||
| 1614 | |||
| 1615 | void mtk_fwd_config(struct mtk_eth *eth) | ||
| 1616 | { | ||
| 1617 | u32 fwd_cfg; | ||
| 1618 | |||
| 1619 | fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG); | ||
| 1620 | |||
| 1621 | /* disable jumbo frame */ | ||
| 1622 | if (eth->soc->jumbo_frame) | ||
| 1623 | fwd_cfg &= ~MTK_GDM1_JMB_EN; | ||
| 1624 | |||
| 1625 | /* set unicast/multicast/broadcast frame to cpu */ | ||
| 1626 | fwd_cfg &= ~0xffff; | ||
| 1627 | |||
| 1628 | mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG); | ||
| 1629 | } | ||
| 1630 | |||
| 1631 | void mtk_csum_config(struct mtk_eth *eth) | ||
| 1632 | { | ||
| 1633 | if (eth->soc->hw_features & NETIF_F_RXCSUM) | ||
| 1634 | mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) | | ||
| 1635 | (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN), | ||
| 1636 | MTK_GDMA1_FWD_CFG); | ||
| 1637 | else | ||
| 1638 | mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) & | ||
| 1639 | ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN), | ||
| 1640 | MTK_GDMA1_FWD_CFG); | ||
| 1641 | if (eth->soc->hw_features & NETIF_F_IP_CSUM) | ||
| 1642 | mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) | | ||
| 1643 | (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN), | ||
| 1644 | MTK_CDMA_CSG_CFG); | ||
| 1645 | else | ||
| 1646 | mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) & | ||
| 1647 | ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN), | ||
| 1648 | MTK_CDMA_CSG_CFG); | ||
| 1649 | } | ||
| 1650 | |||
| 1651 | static int mtk_start_dma(struct mtk_eth *eth) | ||
| 1652 | { | ||
| 1653 | unsigned long flags; | ||
| 1654 | u32 val; | ||
| 1655 | int err; | ||
| 1656 | |||
| 1657 | if (eth->soc->dma_type == MTK_PDMA) | ||
| 1658 | err = mtk_pdma_init(eth); | ||
| 1659 | else if (eth->soc->dma_type == MTK_QDMA) | ||
| 1660 | err = mtk_qdma_init(eth, 0); | ||
| 1661 | else | ||
| 1662 | err = mtk_pdma_qdma_init(eth); | ||
| 1663 | if (err) { | ||
| 1664 | mtk_dma_free(eth); | ||
| 1665 | return err; | ||
| 1666 | } | ||
| 1667 | |||
| 1668 | spin_lock_irqsave(ð->page_lock, flags); | ||
| 1669 | |||
| 1670 | val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN; | ||
| 1671 | if (eth->soc->rx_2b_offset) | ||
| 1672 | val |= MTK_RX_2B_OFFSET; | ||
| 1673 | val |= eth->soc->pdma_glo_cfg; | ||
| 1674 | |||
| 1675 | if (eth->soc->dma_type & MTK_PDMA) | ||
| 1676 | mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG); | ||
| 1677 | |||
| 1678 | if (eth->soc->dma_type & MTK_QDMA) | ||
| 1679 | mtk_w32(eth, val, MTK_QDMA_GLO_CFG); | ||
| 1680 | |||
| 1681 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
| 1682 | |||
| 1683 | return 0; | ||
| 1684 | } | ||
| 1685 | |||
| 1686 | static int mtk_open(struct net_device *dev) | ||
| 1687 | { | ||
| 1688 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1689 | struct mtk_eth *eth = mac->hw; | ||
| 1690 | |||
| 1691 | dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); | ||
| 1692 | |||
| 1693 | if (!atomic_read(ð->dma_refcnt)) { | ||
| 1694 | int err = mtk_start_dma(eth); | ||
| 1695 | |||
| 1696 | if (err) | ||
| 1697 | return err; | ||
| 1698 | |||
| 1699 | napi_enable(ð->rx_napi); | ||
| 1700 | mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int); | ||
| 1701 | } | ||
| 1702 | atomic_inc(ð->dma_refcnt); | ||
| 1703 | |||
| 1704 | if (eth->phy) | ||
| 1705 | eth->phy->start(mac); | ||
| 1706 | |||
| 1707 | if (eth->soc->has_carrier && eth->soc->has_carrier(eth)) | ||
| 1708 | netif_carrier_on(dev); | ||
| 1709 | |||
| 1710 | netif_start_queue(dev); | ||
| 1711 | eth->soc->fwd_config(eth); | ||
| 1712 | |||
| 1713 | return 0; | ||
| 1714 | } | ||
| 1715 | |||
| 1716 | static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) | ||
| 1717 | { | ||
| 1718 | unsigned long flags; | ||
| 1719 | u32 val; | ||
| 1720 | int i; | ||
| 1721 | |||
| 1722 | /* stop the dma enfine */ | ||
| 1723 | spin_lock_irqsave(ð->page_lock, flags); | ||
| 1724 | val = mtk_r32(eth, glo_cfg); | ||
| 1725 | mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), | ||
| 1726 | glo_cfg); | ||
| 1727 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
| 1728 | |||
| 1729 | /* wait for dma stop */ | ||
| 1730 | for (i = 0; i < 10; i++) { | ||
| 1731 | val = mtk_r32(eth, glo_cfg); | ||
| 1732 | if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { | ||
| 1733 | msleep(20); | ||
| 1734 | continue; | ||
| 1735 | } | ||
| 1736 | break; | ||
| 1737 | } | ||
| 1738 | } | ||
| 1739 | |||
| 1740 | static int mtk_stop(struct net_device *dev) | ||
| 1741 | { | ||
| 1742 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1743 | struct mtk_eth *eth = mac->hw; | ||
| 1744 | |||
| 1745 | netif_tx_disable(dev); | ||
| 1746 | if (eth->phy) | ||
| 1747 | eth->phy->stop(mac); | ||
| 1748 | |||
| 1749 | if (!atomic_dec_and_test(ð->dma_refcnt)) | ||
| 1750 | return 0; | ||
| 1751 | |||
| 1752 | mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int); | ||
| 1753 | napi_disable(ð->rx_napi); | ||
| 1754 | |||
| 1755 | if (eth->soc->dma_type & MTK_PDMA) | ||
| 1756 | mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]); | ||
| 1757 | |||
| 1758 | if (eth->soc->dma_type & MTK_QDMA) | ||
| 1759 | mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); | ||
| 1760 | |||
| 1761 | mtk_dma_free(eth); | ||
| 1762 | |||
| 1763 | return 0; | ||
| 1764 | } | ||
| 1765 | |||
| 1766 | static int __init mtk_init_hw(struct mtk_eth *eth) | ||
| 1767 | { | ||
| 1768 | int i, err; | ||
| 1769 | |||
| 1770 | eth->soc->reset_fe(eth); | ||
| 1771 | |||
| 1772 | if (eth->soc->switch_init) | ||
| 1773 | if (eth->soc->switch_init(eth)) { | ||
| 1774 | dev_err(eth->dev, "failed to initialize switch core\n"); | ||
| 1775 | return -ENODEV; | ||
| 1776 | } | ||
| 1777 | |||
| 1778 | err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, | ||
| 1779 | dev_name(eth->dev), eth); | ||
| 1780 | if (err) | ||
| 1781 | return err; | ||
| 1782 | |||
| 1783 | err = mtk_mdio_init(eth); | ||
| 1784 | if (err) | ||
| 1785 | return err; | ||
| 1786 | |||
| 1787 | /* disable delay and normal interrupt */ | ||
| 1788 | mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG); | ||
| 1789 | if (eth->soc->dma_type & MTK_QDMA) | ||
| 1790 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); | ||
| 1791 | mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int); | ||
| 1792 | |||
| 1793 | /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */ | ||
| 1794 | if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) | ||
| 1795 | for (i = 0; i < 16; i += 2) | ||
| 1796 | mtk_w32(eth, ((i + 1) << 16) + i, | ||
| 1797 | mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + | ||
| 1798 | (i * 2)); | ||
| 1799 | |||
| 1800 | if (eth->soc->fwd_config(eth)) | ||
| 1801 | dev_err(eth->dev, "unable to get clock\n"); | ||
| 1802 | |||
| 1803 | if (mtk_reg_table[MTK_REG_MTK_RST_GL]) { | ||
| 1804 | mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL); | ||
| 1805 | mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL); | ||
| 1806 | } | ||
| 1807 | |||
| 1808 | return 0; | ||
| 1809 | } | ||
| 1810 | |||
| 1811 | static int __init mtk_init(struct net_device *dev) | ||
| 1812 | { | ||
| 1813 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1814 | struct mtk_eth *eth = mac->hw; | ||
| 1815 | struct device_node *port; | ||
| 1816 | const char *mac_addr; | ||
| 1817 | int err; | ||
| 1818 | |||
| 1819 | mac_addr = of_get_mac_address(mac->of_node); | ||
| 1820 | if (mac_addr) | ||
| 1821 | ether_addr_copy(dev->dev_addr, mac_addr); | ||
| 1822 | |||
| 1823 | /* If the mac address is invalid, use random mac address */ | ||
| 1824 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
| 1825 | eth_hw_addr_random(dev); | ||
| 1826 | dev_err(eth->dev, "generated random MAC address %pM\n", | ||
| 1827 | dev->dev_addr); | ||
| 1828 | } | ||
| 1829 | mac->hw->soc->set_mac(mac, dev->dev_addr); | ||
| 1830 | |||
| 1831 | if (eth->soc->port_init) | ||
| 1832 | for_each_child_of_node(mac->of_node, port) | ||
| 1833 | if (of_device_is_compatible(port, | ||
| 1834 | "mediatek,eth-port") && | ||
| 1835 | of_device_is_available(port)) | ||
| 1836 | eth->soc->port_init(eth, mac, port); | ||
| 1837 | |||
| 1838 | if (eth->phy) { | ||
| 1839 | err = eth->phy->connect(mac); | ||
| 1840 | if (err) | ||
| 1841 | return err; | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | return 0; | ||
| 1845 | } | ||
| 1846 | |||
| 1847 | static void mtk_uninit(struct net_device *dev) | ||
| 1848 | { | ||
| 1849 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1850 | struct mtk_eth *eth = mac->hw; | ||
| 1851 | |||
| 1852 | if (eth->phy) | ||
| 1853 | eth->phy->disconnect(mac); | ||
| 1854 | mtk_mdio_cleanup(eth); | ||
| 1855 | |||
| 1856 | mtk_irq_disable(eth, ~0); | ||
| 1857 | free_irq(dev->irq, dev); | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
| 1861 | { | ||
| 1862 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1863 | |||
| 1864 | if (!mac->phy_dev) | ||
| 1865 | return -ENODEV; | ||
| 1866 | |||
| 1867 | switch (cmd) { | ||
| 1868 | case SIOCGMIIPHY: | ||
| 1869 | case SIOCGMIIREG: | ||
| 1870 | case SIOCSMIIREG: | ||
| 1871 | return phy_mii_ioctl(mac->phy_dev, ifr, cmd); | ||
| 1872 | default: | ||
| 1873 | break; | ||
| 1874 | } | ||
| 1875 | |||
| 1876 | return -EOPNOTSUPP; | ||
| 1877 | } | ||
| 1878 | |||
| 1879 | static int mtk_change_mtu(struct net_device *dev, int new_mtu) | ||
| 1880 | { | ||
| 1881 | struct mtk_mac *mac = netdev_priv(dev); | ||
| 1882 | struct mtk_eth *eth = mac->hw; | ||
| 1883 | int frag_size, old_mtu; | ||
| 1884 | u32 fwd_cfg; | ||
| 1885 | |||
| 1886 | if (!eth->soc->jumbo_frame) | ||
| 1887 | return eth_change_mtu(dev, new_mtu); | ||
| 1888 | |||
| 1889 | frag_size = mtk_max_frag_size(new_mtu); | ||
| 1890 | if (new_mtu < 68 || frag_size > PAGE_SIZE) | ||
| 1891 | return -EINVAL; | ||
| 1892 | |||
| 1893 | old_mtu = dev->mtu; | ||
| 1894 | dev->mtu = new_mtu; | ||
| 1895 | |||
| 1896 | /* return early if the buffer sizes will not change */ | ||
| 1897 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | ||
| 1898 | return 0; | ||
| 1899 | if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN) | ||
| 1900 | return 0; | ||
| 1901 | |||
| 1902 | if (new_mtu <= ETH_DATA_LEN) | ||
| 1903 | eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN); | ||
| 1904 | else | ||
| 1905 | eth->rx_ring[0].frag_size = PAGE_SIZE; | ||
| 1906 | eth->rx_ring[0].rx_buf_size = | ||
| 1907 | mtk_max_buf_size(eth->rx_ring[0].frag_size); | ||
| 1908 | |||
| 1909 | if (!netif_running(dev)) | ||
| 1910 | return 0; | ||
| 1911 | |||
| 1912 | mtk_stop(dev); | ||
| 1913 | fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG); | ||
| 1914 | if (new_mtu <= ETH_DATA_LEN) { | ||
| 1915 | fwd_cfg &= ~MTK_GDM1_JMB_EN; | ||
| 1916 | } else { | ||
| 1917 | fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT); | ||
| 1918 | fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) << | ||
| 1919 | MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN; | ||
| 1920 | } | ||
| 1921 | mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG); | ||
| 1922 | |||
| 1923 | return mtk_open(dev); | ||
| 1924 | } | ||
| 1925 | |||
| 1926 | static void mtk_pending_work(struct work_struct *work) | ||
| 1927 | { | ||
| 1928 | struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); | ||
| 1929 | struct mtk_eth *eth = mac->hw; | ||
| 1930 | struct net_device *dev = eth->netdev[mac->id]; | ||
| 1931 | int err; | ||
| 1932 | |||
| 1933 | rtnl_lock(); | ||
| 1934 | mtk_stop(dev); | ||
| 1935 | |||
| 1936 | err = mtk_open(dev); | ||
| 1937 | if (err) { | ||
| 1938 | netif_alert(eth, ifup, dev, | ||
| 1939 | "Driver up/down cycle failed, closing device.\n"); | ||
| 1940 | dev_close(dev); | ||
| 1941 | } | ||
| 1942 | rtnl_unlock(); | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | static int mtk_cleanup(struct mtk_eth *eth) | ||
| 1946 | { | ||
| 1947 | int i; | ||
| 1948 | |||
| 1949 | for (i = 0; i < eth->soc->mac_count; i++) { | ||
| 1950 | struct mtk_mac *mac = netdev_priv(eth->netdev[i]); | ||
| 1951 | |||
| 1952 | if (!eth->netdev[i]) | ||
| 1953 | continue; | ||
| 1954 | |||
| 1955 | unregister_netdev(eth->netdev[i]); | ||
| 1956 | free_netdev(eth->netdev[i]); | ||
| 1957 | cancel_work_sync(&mac->pending_work); | ||
| 1958 | } | ||
| 1959 | |||
| 1960 | return 0; | ||
| 1961 | } | ||
| 1962 | |||
| 1963 | static const struct net_device_ops mtk_netdev_ops = { | ||
| 1964 | .ndo_init = mtk_init, | ||
| 1965 | .ndo_uninit = mtk_uninit, | ||
| 1966 | .ndo_open = mtk_open, | ||
| 1967 | .ndo_stop = mtk_stop, | ||
| 1968 | .ndo_start_xmit = mtk_start_xmit, | ||
| 1969 | .ndo_set_mac_address = mtk_set_mac_address, | ||
| 1970 | .ndo_validate_addr = eth_validate_addr, | ||
| 1971 | .ndo_do_ioctl = mtk_do_ioctl, | ||
| 1972 | .ndo_change_mtu = mtk_change_mtu, | ||
| 1973 | .ndo_tx_timeout = mtk_tx_timeout, | ||
| 1974 | .ndo_get_stats64 = mtk_get_stats64, | ||
| 1975 | .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid, | ||
| 1976 | .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid, | ||
| 1977 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1978 | .ndo_poll_controller = mtk_poll_controller, | ||
| 1979 | #endif | ||
| 1980 | }; | ||
| 1981 | |||
| 1982 | static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | ||
| 1983 | { | ||
| 1984 | struct mtk_mac *mac; | ||
| 1985 | const __be32 *_id = of_get_property(np, "reg", NULL); | ||
| 1986 | int id, err; | ||
| 1987 | |||
| 1988 | if (!_id) { | ||
| 1989 | dev_err(eth->dev, "missing mac id\n"); | ||
| 1990 | return -EINVAL; | ||
| 1991 | } | ||
| 1992 | id = be32_to_cpup(_id); | ||
| 1993 | if (id >= eth->soc->mac_count || eth->netdev[id]) { | ||
| 1994 | dev_err(eth->dev, "%d is not a valid mac id\n", id); | ||
| 1995 | return -EINVAL; | ||
| 1996 | } | ||
| 1997 | |||
| 1998 | eth->netdev[id] = alloc_etherdev(sizeof(*mac)); | ||
| 1999 | if (!eth->netdev[id]) { | ||
| 2000 | dev_err(eth->dev, "alloc_etherdev failed\n"); | ||
| 2001 | return -ENOMEM; | ||
| 2002 | } | ||
| 2003 | mac = netdev_priv(eth->netdev[id]); | ||
| 2004 | eth->mac[id] = mac; | ||
| 2005 | mac->id = id; | ||
| 2006 | mac->hw = eth; | ||
| 2007 | mac->of_node = np; | ||
| 2008 | INIT_WORK(&mac->pending_work, mtk_pending_work); | ||
| 2009 | |||
| 2010 | if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) { | ||
| 2011 | mac->hw_stats = devm_kzalloc(eth->dev, | ||
| 2012 | sizeof(*mac->hw_stats), | ||
| 2013 | GFP_KERNEL); | ||
| 2014 | if (!mac->hw_stats) { | ||
| 2015 | err = -ENOMEM; | ||
| 2016 | goto free_netdev; | ||
| 2017 | } | ||
| 2018 | spin_lock_init(&mac->hw_stats->stats_lock); | ||
| 2019 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | ||
| 2020 | } | ||
| 2021 | |||
| 2022 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | ||
| 2023 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; | ||
| 2024 | eth->netdev[id]->base_addr = (unsigned long)eth->base; | ||
| 2025 | |||
| 2026 | if (eth->soc->init_data) | ||
| 2027 | eth->soc->init_data(eth->soc, eth->netdev[id]); | ||
| 2028 | |||
| 2029 | eth->netdev[id]->vlan_features = eth->soc->hw_features & | ||
| 2030 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); | ||
| 2031 | eth->netdev[id]->features |= eth->soc->hw_features; | ||
| 2032 | |||
| 2033 | if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) | ||
| 2034 | eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | ||
| 2035 | |||
| 2036 | mtk_set_ethtool_ops(eth->netdev[id]); | ||
| 2037 | |||
| 2038 | err = register_netdev(eth->netdev[id]); | ||
| 2039 | if (err) { | ||
| 2040 | dev_err(eth->dev, "error bringing up device\n"); | ||
| 2041 | err = -ENOMEM; | ||
| 2042 | goto free_netdev; | ||
| 2043 | } | ||
| 2044 | eth->netdev[id]->irq = eth->irq; | ||
| 2045 | netif_info(eth, probe, eth->netdev[id], | ||
| 2046 | "mediatek frame engine at 0x%08lx, irq %d\n", | ||
| 2047 | eth->netdev[id]->base_addr, eth->netdev[id]->irq); | ||
| 2048 | |||
| 2049 | return 0; | ||
| 2050 | |||
| 2051 | free_netdev: | ||
| 2052 | free_netdev(eth->netdev[id]); | ||
| 2053 | return err; | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | static int mtk_probe(struct platform_device *pdev) | ||
| 2057 | { | ||
| 2058 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 2059 | const struct of_device_id *match; | ||
| 2060 | struct device_node *mac_np; | ||
| 2061 | struct mtk_soc_data *soc; | ||
| 2062 | struct mtk_eth *eth; | ||
| 2063 | struct clk *sysclk; | ||
| 2064 | int err; | ||
| 2065 | |||
| 2066 | device_reset(&pdev->dev); | ||
| 2067 | |||
| 2068 | match = of_match_device(of_mtk_match, &pdev->dev); | ||
| 2069 | soc = (struct mtk_soc_data *)match->data; | ||
| 2070 | |||
| 2071 | if (soc->reg_table) | ||
| 2072 | mtk_reg_table = soc->reg_table; | ||
| 2073 | |||
| 2074 | eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); | ||
| 2075 | if (!eth) | ||
| 2076 | return -ENOMEM; | ||
| 2077 | |||
| 2078 | eth->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 2079 | if (IS_ERR(eth->base)) | ||
| 2080 | return PTR_ERR(eth->base); | ||
| 2081 | |||
| 2082 | spin_lock_init(ð->page_lock); | ||
| 2083 | |||
| 2084 | eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | ||
| 2085 | "mediatek,ethsys"); | ||
| 2086 | if (IS_ERR(eth->ethsys)) | ||
| 2087 | return PTR_ERR(eth->ethsys); | ||
| 2088 | |||
| 2089 | eth->irq = platform_get_irq(pdev, 0); | ||
| 2090 | if (eth->irq < 0) { | ||
| 2091 | dev_err(&pdev->dev, "no IRQ resource found\n"); | ||
| 2092 | return -ENXIO; | ||
| 2093 | } | ||
| 2094 | |||
| 2095 | sysclk = devm_clk_get(&pdev->dev, NULL); | ||
| 2096 | if (IS_ERR(sysclk)) { | ||
| 2097 | dev_err(&pdev->dev, | ||
| 2098 | "the clock is not defined in the devicetree\n"); | ||
| 2099 | return -ENXIO; | ||
| 2100 | } | ||
| 2101 | eth->sysclk = clk_get_rate(sysclk); | ||
| 2102 | |||
| 2103 | eth->switch_np = of_parse_phandle(pdev->dev.of_node, | ||
| 2104 | "mediatek,switch", 0); | ||
| 2105 | if (soc->has_switch && !eth->switch_np) { | ||
| 2106 | dev_err(&pdev->dev, "failed to read switch phandle\n"); | ||
| 2107 | return -ENODEV; | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | eth->dev = &pdev->dev; | ||
| 2111 | eth->soc = soc; | ||
| 2112 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); | ||
| 2113 | |||
| 2114 | err = mtk_init_hw(eth); | ||
| 2115 | if (err) | ||
| 2116 | return err; | ||
| 2117 | |||
| 2118 | if (eth->soc->mac_count > 1) { | ||
| 2119 | for_each_child_of_node(pdev->dev.of_node, mac_np) { | ||
| 2120 | if (!of_device_is_compatible(mac_np, | ||
| 2121 | "mediatek,eth-mac")) | ||
| 2122 | continue; | ||
| 2123 | |||
| 2124 | if (!of_device_is_available(mac_np)) | ||
| 2125 | continue; | ||
| 2126 | |||
| 2127 | err = mtk_add_mac(eth, mac_np); | ||
| 2128 | if (err) | ||
| 2129 | goto err_free_dev; | ||
| 2130 | } | ||
| 2131 | |||
| 2132 | init_dummy_netdev(ð->dummy_dev); | ||
| 2133 | netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, | ||
| 2134 | soc->napi_weight); | ||
| 2135 | } else { | ||
| 2136 | err = mtk_add_mac(eth, pdev->dev.of_node); | ||
| 2137 | if (err) | ||
| 2138 | goto err_free_dev; | ||
| 2139 | netif_napi_add(eth->netdev[0], ð->rx_napi, mtk_poll, | ||
| 2140 | soc->napi_weight); | ||
| 2141 | } | ||
| 2142 | |||
| 2143 | platform_set_drvdata(pdev, eth); | ||
| 2144 | |||
| 2145 | return 0; | ||
| 2146 | |||
| 2147 | err_free_dev: | ||
| 2148 | mtk_cleanup(eth); | ||
| 2149 | return err; | ||
| 2150 | } | ||
| 2151 | |||
| 2152 | static int mtk_remove(struct platform_device *pdev) | ||
| 2153 | { | ||
| 2154 | struct mtk_eth *eth = platform_get_drvdata(pdev); | ||
| 2155 | |||
| 2156 | netif_napi_del(ð->rx_napi); | ||
| 2157 | mtk_cleanup(eth); | ||
| 2158 | platform_set_drvdata(pdev, NULL); | ||
| 2159 | |||
| 2160 | return 0; | ||
| 2161 | } | ||
| 2162 | |||
| 2163 | static struct platform_driver mtk_driver = { | ||
| 2164 | .probe = mtk_probe, | ||
| 2165 | .remove = mtk_remove, | ||
| 2166 | .driver = { | ||
| 2167 | .name = "mtk_soc_eth", | ||
| 2168 | .of_match_table = of_mtk_match, | ||
| 2169 | }, | ||
| 2170 | }; | ||
| 2171 | |||
| 2172 | module_platform_driver(mtk_driver); | ||
| 2173 | |||
| 2174 | MODULE_LICENSE("GPL"); | ||
| 2175 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
| 2176 | MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); | ||
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h deleted file mode 100644 index e6ed80433f49..000000000000 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.h +++ /dev/null | |||
| @@ -1,716 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef MTK_ETH_H | ||
| 16 | #define MTK_ETH_H | ||
| 17 | |||
| 18 | #include <linux/mii.h> | ||
| 19 | #include <linux/interrupt.h> | ||
| 20 | #include <linux/netdevice.h> | ||
| 21 | #include <linux/dma-mapping.h> | ||
| 22 | #include <linux/phy.h> | ||
| 23 | #include <linux/ethtool.h> | ||
| 24 | #include <linux/version.h> | ||
| 25 | #include <linux/atomic.h> | ||
| 26 | |||
| 27 | /* these registers have different offsets depending on the SoC. we use a lookup | ||
| 28 | * table for these | ||
| 29 | */ | ||
| 30 | enum mtk_reg { | ||
| 31 | MTK_REG_PDMA_GLO_CFG = 0, | ||
| 32 | MTK_REG_PDMA_RST_CFG, | ||
| 33 | MTK_REG_DLY_INT_CFG, | ||
| 34 | MTK_REG_TX_BASE_PTR0, | ||
| 35 | MTK_REG_TX_MAX_CNT0, | ||
| 36 | MTK_REG_TX_CTX_IDX0, | ||
| 37 | MTK_REG_TX_DTX_IDX0, | ||
| 38 | MTK_REG_RX_BASE_PTR0, | ||
| 39 | MTK_REG_RX_MAX_CNT0, | ||
| 40 | MTK_REG_RX_CALC_IDX0, | ||
| 41 | MTK_REG_RX_DRX_IDX0, | ||
| 42 | MTK_REG_MTK_INT_ENABLE, | ||
| 43 | MTK_REG_MTK_INT_STATUS, | ||
| 44 | MTK_REG_MTK_DMA_VID_BASE, | ||
| 45 | MTK_REG_MTK_COUNTER_BASE, | ||
| 46 | MTK_REG_MTK_RST_GL, | ||
| 47 | MTK_REG_MTK_INT_STATUS2, | ||
| 48 | MTK_REG_COUNT | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* delayed interrupt bits */ | ||
| 52 | #define MTK_DELAY_EN_INT 0x80 | ||
| 53 | #define MTK_DELAY_MAX_INT 0x04 | ||
| 54 | #define MTK_DELAY_MAX_TOUT 0x04 | ||
| 55 | #define MTK_DELAY_TIME 20 | ||
| 56 | #define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \ | ||
| 57 | | MTK_DELAY_MAX_TOUT) | ||
| 58 | #define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN) | ||
| 59 | #define MTK_PSE_FQFC_CFG_INIT 0x80504000 | ||
| 60 | #define MTK_PSE_FQFC_CFG_256Q 0xff908000 | ||
| 61 | |||
| 62 | /* interrupt bits */ | ||
| 63 | #define MTK_CNT_PPE_AF BIT(31) | ||
| 64 | #define MTK_CNT_GDM_AF BIT(29) | ||
| 65 | #define MTK_PSE_P2_FC BIT(26) | ||
| 66 | #define MTK_PSE_BUF_DROP BIT(24) | ||
| 67 | #define MTK_GDM_OTHER_DROP BIT(23) | ||
| 68 | #define MTK_PSE_P1_FC BIT(22) | ||
| 69 | #define MTK_PSE_P0_FC BIT(21) | ||
| 70 | #define MTK_PSE_FQ_EMPTY BIT(20) | ||
| 71 | #define MTK_GE1_STA_CHG BIT(18) | ||
| 72 | #define MTK_TX_COHERENT BIT(17) | ||
| 73 | #define MTK_RX_COHERENT BIT(16) | ||
| 74 | #define MTK_TX_DONE_INT3 BIT(11) | ||
| 75 | #define MTK_TX_DONE_INT2 BIT(10) | ||
| 76 | #define MTK_TX_DONE_INT1 BIT(9) | ||
| 77 | #define MTK_TX_DONE_INT0 BIT(8) | ||
| 78 | #define MTK_RX_DONE_INT0 BIT(2) | ||
| 79 | #define MTK_TX_DLY_INT BIT(1) | ||
| 80 | #define MTK_RX_DLY_INT BIT(0) | ||
| 81 | |||
| 82 | #define MTK_RX_DONE_INT MTK_RX_DONE_INT0 | ||
| 83 | #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ | ||
| 84 | MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) | ||
| 85 | |||
| 86 | #define RT5350_RX_DLY_INT BIT(30) | ||
| 87 | #define RT5350_TX_DLY_INT BIT(28) | ||
| 88 | #define RT5350_RX_DONE_INT1 BIT(17) | ||
| 89 | #define RT5350_RX_DONE_INT0 BIT(16) | ||
| 90 | #define RT5350_TX_DONE_INT3 BIT(3) | ||
| 91 | #define RT5350_TX_DONE_INT2 BIT(2) | ||
| 92 | #define RT5350_TX_DONE_INT1 BIT(1) | ||
| 93 | #define RT5350_TX_DONE_INT0 BIT(0) | ||
| 94 | |||
| 95 | #define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1) | ||
| 96 | #define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \ | ||
| 97 | RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3) | ||
| 98 | |||
| 99 | /* registers */ | ||
| 100 | #define MTK_GDMA_OFFSET 0x0020 | ||
| 101 | #define MTK_PSE_OFFSET 0x0040 | ||
| 102 | #define MTK_GDMA2_OFFSET 0x0060 | ||
| 103 | #define MTK_CDMA_OFFSET 0x0080 | ||
| 104 | #define MTK_DMA_VID0 0x00a8 | ||
| 105 | #define MTK_PDMA_OFFSET 0x0100 | ||
| 106 | #define MTK_PPE_OFFSET 0x0200 | ||
| 107 | #define MTK_CMTABLE_OFFSET 0x0400 | ||
| 108 | #define MTK_POLICYTABLE_OFFSET 0x1000 | ||
| 109 | |||
| 110 | #define MT7621_GDMA_OFFSET 0x0500 | ||
| 111 | #define MT7620_GDMA_OFFSET 0x0600 | ||
| 112 | |||
| 113 | #define RT5350_PDMA_OFFSET 0x0800 | ||
| 114 | #define RT5350_SDM_OFFSET 0x0c00 | ||
| 115 | |||
| 116 | #define MTK_MDIO_ACCESS 0x00 | ||
| 117 | #define MTK_MDIO_CFG 0x04 | ||
| 118 | #define MTK_GLO_CFG 0x08 | ||
| 119 | #define MTK_RST_GL 0x0C | ||
| 120 | #define MTK_INT_STATUS 0x10 | ||
| 121 | #define MTK_INT_ENABLE 0x14 | ||
| 122 | #define MTK_MDIO_CFG2 0x18 | ||
| 123 | #define MTK_FOC_TS_T 0x1C | ||
| 124 | |||
| 125 | #define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00) | ||
| 126 | #define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04) | ||
| 127 | #define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08) | ||
| 128 | #define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C) | ||
| 129 | #define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10) | ||
| 130 | |||
| 131 | #define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00) | ||
| 132 | #define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04) | ||
| 133 | #define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08) | ||
| 134 | #define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C) | ||
| 135 | #define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10) | ||
| 136 | |||
| 137 | #define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00) | ||
| 138 | #define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04) | ||
| 139 | #define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08) | ||
| 140 | #define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C) | ||
| 141 | |||
| 142 | #define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00) | ||
| 143 | #define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04) | ||
| 144 | |||
| 145 | #define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000)) | ||
| 146 | |||
| 147 | /* FIXME this might be different for different SOCs */ | ||
| 148 | #define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00) | ||
| 149 | |||
| 150 | #define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00) | ||
| 151 | #define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04) | ||
| 152 | #define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08) | ||
| 153 | #define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C) | ||
| 154 | #define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10) | ||
| 155 | #define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14) | ||
| 156 | #define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18) | ||
| 157 | #define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C) | ||
| 158 | #define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20) | ||
| 159 | #define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24) | ||
| 160 | #define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28) | ||
| 161 | #define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C) | ||
| 162 | #define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30) | ||
| 163 | #define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34) | ||
| 164 | #define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38) | ||
| 165 | #define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C) | ||
| 166 | #define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100) | ||
| 167 | #define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104) | ||
| 168 | #define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108) | ||
| 169 | #define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C) | ||
| 170 | #define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110) | ||
| 171 | #define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114) | ||
| 172 | #define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118) | ||
| 173 | #define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C) | ||
| 174 | #define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204) | ||
| 175 | #define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208) | ||
| 176 | #define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c) | ||
| 177 | #define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220) | ||
| 178 | #define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228) | ||
| 179 | #define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280) | ||
| 180 | |||
| 181 | #define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00) | ||
| 182 | #define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04) | ||
| 183 | #define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08) | ||
| 184 | #define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C) | ||
| 185 | #define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10) | ||
| 186 | #define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14) | ||
| 187 | #define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18) | ||
| 188 | #define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C) | ||
| 189 | #define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20) | ||
| 190 | #define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24) | ||
| 191 | #define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28) | ||
| 192 | #define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C) | ||
| 193 | #define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30) | ||
| 194 | #define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34) | ||
| 195 | #define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38) | ||
| 196 | #define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C) | ||
| 197 | #define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40) | ||
| 198 | #define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44) | ||
| 199 | #define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48) | ||
| 200 | #define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C) | ||
| 201 | #define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50) | ||
| 202 | #define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54) | ||
| 203 | #define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58) | ||
| 204 | #define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C) | ||
| 205 | #define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60) | ||
| 206 | #define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64) | ||
| 207 | #define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68) | ||
| 208 | #define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C) | ||
| 209 | |||
| 210 | /* Switch DMA configuration */ | ||
| 211 | #define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00) | ||
| 212 | #define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04) | ||
| 213 | #define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08) | ||
| 214 | #define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C) | ||
| 215 | #define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10) | ||
| 216 | #define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100) | ||
| 217 | #define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104) | ||
| 218 | #define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108) | ||
| 219 | #define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C) | ||
| 220 | #define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110) | ||
| 221 | |||
| 222 | #define RT5350_SDM_ICS_EN BIT(16) | ||
| 223 | #define RT5350_SDM_TCS_EN BIT(17) | ||
| 224 | #define RT5350_SDM_UCS_EN BIT(18) | ||
| 225 | |||
| 226 | /* QDMA registers */ | ||
| 227 | #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) | ||
| 228 | #define MTK_QTX_SCH(x) (0x1804 + (x * 0x10)) | ||
| 229 | #define MTK_QRX_BASE_PTR0 0x1900 | ||
| 230 | #define MTK_QRX_MAX_CNT0 0x1904 | ||
| 231 | #define MTK_QRX_CRX_IDX0 0x1908 | ||
| 232 | #define MTK_QRX_DRX_IDX0 0x190C | ||
| 233 | #define MTK_QDMA_GLO_CFG 0x1A04 | ||
| 234 | #define MTK_QDMA_RST_IDX 0x1A08 | ||
| 235 | #define MTK_QDMA_DELAY_INT 0x1A0C | ||
| 236 | #define MTK_QDMA_FC_THRES 0x1A10 | ||
| 237 | #define MTK_QMTK_INT_STATUS 0x1A18 | ||
| 238 | #define MTK_QMTK_INT_ENABLE 0x1A1C | ||
| 239 | #define MTK_QDMA_HRED2 0x1A44 | ||
| 240 | |||
| 241 | #define MTK_QTX_CTX_PTR 0x1B00 | ||
| 242 | #define MTK_QTX_DTX_PTR 0x1B04 | ||
| 243 | |||
| 244 | #define MTK_QTX_CRX_PTR 0x1B10 | ||
| 245 | #define MTK_QTX_DRX_PTR 0x1B14 | ||
| 246 | |||
| 247 | #define MTK_QDMA_FQ_HEAD 0x1B20 | ||
| 248 | #define MTK_QDMA_FQ_TAIL 0x1B24 | ||
| 249 | #define MTK_QDMA_FQ_CNT 0x1B28 | ||
| 250 | #define MTK_QDMA_FQ_BLEN 0x1B2C | ||
| 251 | |||
| 252 | #define QDMA_PAGE_SIZE 2048 | ||
| 253 | #define QDMA_TX_OWNER_CPU BIT(31) | ||
| 254 | #define QDMA_TX_SWC BIT(14) | ||
| 255 | #define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16) | ||
| 256 | #define QDMA_RES_THRES 4 | ||
| 257 | |||
| 258 | /* MDIO_CFG register bits */ | ||
| 259 | #define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29) | ||
| 260 | #define MTK_MDIO_CFG_GP1_BP_EN BIT(16) | ||
| 261 | #define MTK_MDIO_CFG_GP1_FRC_EN BIT(15) | ||
| 262 | #define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13) | ||
| 263 | #define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13) | ||
| 264 | #define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13) | ||
| 265 | #define MTK_MDIO_CFG_GP1_DUPLEX BIT(12) | ||
| 266 | #define MTK_MDIO_CFG_GP1_FC_TX BIT(11) | ||
| 267 | #define MTK_MDIO_CFG_GP1_FC_RX BIT(10) | ||
| 268 | #define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9) | ||
| 269 | #define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8) | ||
| 270 | #define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6) | ||
| 271 | #define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6) | ||
| 272 | #define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6) | ||
| 273 | #define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6) | ||
| 274 | #define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5) | ||
| 275 | #define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4) | ||
| 276 | #define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2) | ||
| 277 | #define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2) | ||
| 278 | #define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2) | ||
| 279 | #define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2) | ||
| 280 | #define MTK_MDIO_CFG_TX_CLK_SKEW_0 0 | ||
| 281 | #define MTK_MDIO_CFG_TX_CLK_SKEW_200 1 | ||
| 282 | #define MTK_MDIO_CFG_TX_CLK_SKEW_400 2 | ||
| 283 | #define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3 | ||
| 284 | |||
| 285 | /* uni-cast port */ | ||
| 286 | #define MTK_GDM1_JMB_LEN_MASK 0xf | ||
| 287 | #define MTK_GDM1_JMB_LEN_SHIFT 28 | ||
| 288 | #define MTK_GDM1_ICS_EN BIT(22) | ||
| 289 | #define MTK_GDM1_TCS_EN BIT(21) | ||
| 290 | #define MTK_GDM1_UCS_EN BIT(20) | ||
| 291 | #define MTK_GDM1_JMB_EN BIT(19) | ||
| 292 | #define MTK_GDM1_STRPCRC BIT(16) | ||
| 293 | #define MTK_GDM1_UFRC_P_CPU (0 << 12) | ||
| 294 | #define MTK_GDM1_UFRC_P_GDMA1 (1 << 12) | ||
| 295 | #define MTK_GDM1_UFRC_P_PPE (6 << 12) | ||
| 296 | |||
| 297 | /* checksums */ | ||
| 298 | #define MTK_ICS_GEN_EN BIT(2) | ||
| 299 | #define MTK_UCS_GEN_EN BIT(1) | ||
| 300 | #define MTK_TCS_GEN_EN BIT(0) | ||
| 301 | |||
| 302 | /* dma mode */ | ||
| 303 | #define MTK_PDMA BIT(0) | ||
| 304 | #define MTK_QDMA BIT(1) | ||
| 305 | #define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA) | ||
| 306 | |||
| 307 | /* dma ring */ | ||
| 308 | #define MTK_PST_DRX_IDX0 BIT(16) | ||
| 309 | #define MTK_PST_DTX_IDX3 BIT(3) | ||
| 310 | #define MTK_PST_DTX_IDX2 BIT(2) | ||
| 311 | #define MTK_PST_DTX_IDX1 BIT(1) | ||
| 312 | #define MTK_PST_DTX_IDX0 BIT(0) | ||
| 313 | |||
| 314 | #define MTK_RX_2B_OFFSET BIT(31) | ||
| 315 | #define MTK_TX_WB_DDONE BIT(6) | ||
| 316 | #define MTK_RX_DMA_BUSY BIT(3) | ||
| 317 | #define MTK_TX_DMA_BUSY BIT(1) | ||
| 318 | #define MTK_RX_DMA_EN BIT(2) | ||
| 319 | #define MTK_TX_DMA_EN BIT(0) | ||
| 320 | |||
| 321 | #define MTK_PDMA_SIZE_4DWORDS (0 << 4) | ||
| 322 | #define MTK_PDMA_SIZE_8DWORDS (1 << 4) | ||
| 323 | #define MTK_PDMA_SIZE_16DWORDS (2 << 4) | ||
| 324 | |||
| 325 | #define MTK_US_CYC_CNT_MASK 0xff | ||
| 326 | #define MTK_US_CYC_CNT_SHIFT 0x8 | ||
| 327 | #define MTK_US_CYC_CNT_DIVISOR 1000000 | ||
| 328 | |||
| 329 | /* PDMA descriptor rxd2 */ | ||
| 330 | #define RX_DMA_DONE BIT(31) | ||
| 331 | #define RX_DMA_LSO BIT(30) | ||
| 332 | #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) | ||
| 333 | #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) | ||
| 334 | #define RX_DMA_TAG BIT(15) | ||
| 335 | |||
| 336 | /* PDMA descriptor rxd3 */ | ||
| 337 | #define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff) | ||
| 338 | #define RX_DMA_VID(_x) ((_x) & 0xfff) | ||
| 339 | |||
| 340 | /* PDMA descriptor rxd4 */ | ||
| 341 | #define RX_DMA_L4VALID BIT(30) | ||
| 342 | #define RX_DMA_FPORT_SHIFT 19 | ||
| 343 | #define RX_DMA_FPORT_MASK 0x7 | ||
| 344 | |||
| 345 | struct mtk_rx_dma { | ||
| 346 | unsigned int rxd1; | ||
| 347 | unsigned int rxd2; | ||
| 348 | unsigned int rxd3; | ||
| 349 | unsigned int rxd4; | ||
| 350 | } __packed __aligned(4); | ||
| 351 | |||
| 352 | /* PDMA tx descriptor bits */ | ||
| 353 | #define TX_DMA_BUF_LEN 0x3fff | ||
| 354 | #define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16) | ||
| 355 | #define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16) | ||
| 356 | #define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN) | ||
| 357 | #define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN) | ||
| 358 | #define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN) | ||
| 359 | #define TX_DMA_LS1 BIT(14) | ||
| 360 | #define TX_DMA_LS0 BIT(30) | ||
| 361 | #define TX_DMA_DONE BIT(31) | ||
| 362 | #define TX_DMA_FPORT_SHIFT 25 | ||
| 363 | #define TX_DMA_FPORT_MASK 0x7 | ||
| 364 | #define TX_DMA_INS_VLAN_MT7621 BIT(16) | ||
| 365 | #define TX_DMA_INS_VLAN BIT(7) | ||
| 366 | #define TX_DMA_INS_PPPOE BIT(12) | ||
| 367 | #define TX_DMA_TAG BIT(15) | ||
| 368 | #define TX_DMA_TAG_MASK BIT(15) | ||
| 369 | #define TX_DMA_QN(_x) ((_x) << 16) | ||
| 370 | #define TX_DMA_PN(_x) ((_x) << 24) | ||
| 371 | #define TX_DMA_QN_MASK TX_DMA_QN(0x7) | ||
| 372 | #define TX_DMA_PN_MASK TX_DMA_PN(0x7) | ||
| 373 | #define TX_DMA_UDF BIT(20) | ||
| 374 | #define TX_DMA_CHKSUM (0x7 << 29) | ||
| 375 | #define TX_DMA_TSO BIT(28) | ||
| 376 | #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1)) | ||
| 377 | |||
| 378 | /* frame engine counters */ | ||
| 379 | #define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00) | ||
| 380 | #define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300) | ||
| 381 | #define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40) | ||
| 382 | |||
| 383 | /* phy device flags */ | ||
| 384 | #define MTK_PHY_FLAG_PORT BIT(0) | ||
| 385 | #define MTK_PHY_FLAG_ATTACH BIT(1) | ||
| 386 | |||
| 387 | struct mtk_tx_dma { | ||
| 388 | unsigned int txd1; | ||
| 389 | unsigned int txd2; | ||
| 390 | unsigned int txd3; | ||
| 391 | unsigned int txd4; | ||
| 392 | } __packed __aligned(4); | ||
| 393 | |||
| 394 | struct mtk_eth; | ||
| 395 | struct mtk_mac; | ||
| 396 | |||
| 397 | /* manage the attached phys */ | ||
| 398 | struct mtk_phy { | ||
| 399 | spinlock_t lock; | ||
| 400 | |||
| 401 | struct phy_device *phy[8]; | ||
| 402 | struct device_node *phy_node[8]; | ||
| 403 | const __be32 *phy_fixed[8]; | ||
| 404 | int duplex[8]; | ||
| 405 | int speed[8]; | ||
| 406 | int tx_fc[8]; | ||
| 407 | int rx_fc[8]; | ||
| 408 | int (*connect)(struct mtk_mac *mac); | ||
| 409 | void (*disconnect)(struct mtk_mac *mac); | ||
| 410 | void (*start)(struct mtk_mac *mac); | ||
| 411 | void (*stop)(struct mtk_mac *mac); | ||
| 412 | }; | ||
| 413 | |||
| 414 | /* struct mtk_soc_data - the structure that holds the SoC specific data | ||
| 415 | * @reg_table: Some of the legacy registers changed their location | ||
| 416 | * over time. Their offsets are stored in this table | ||
| 417 | * | ||
| 418 | * @init_data: Some features depend on the silicon revision. This | ||
| 419 | * callback allows runtime modification of the content of | ||
| 420 | * this struct | ||
| 421 | * @reset_fe: This callback is used to trigger the reset of the frame | ||
| 422 | * engine | ||
| 423 | * @set_mac: This callback is used to set the unicast mac address | ||
| 424 | * filter | ||
| 425 | * @fwd_config: This callback is used to setup the forward config | ||
| 426 | * register of the MAC | ||
| 427 | * @switch_init: This callback is used to bring up the switch core | ||
| 428 | * @port_init: Some SoCs have ports that can be router to a switch port | ||
| 429 | * or an external PHY. This callback is used to setup these | ||
| 430 | * ports. | ||
| 431 | * @has_carrier: This callback allows driver to check if there is a cable | ||
| 432 | * attached. | ||
| 433 | * @mdio_init: This callbck is used to setup the MDIO bus if one is | ||
| 434 | * present | ||
| 435 | * @mdio_cleanup: This callback is used to cleanup the MDIO state. | ||
| 436 | * @mdio_write: This callback is used to write data to the MDIO bus. | ||
| 437 | * @mdio_read: This callback is used to write data to the MDIO bus. | ||
| 438 | * @mdio_adjust_link: This callback is used to apply the PHY settings. | ||
| 439 | * @piac_offset: the PIAC register has a different different base offset | ||
| 440 | * @hw_features: feature set depends on the SoC type | ||
| 441 | * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs | ||
| 442 | * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs | ||
| 443 | * @dma_type: SoCs is PDMA, QDMA or a mix of the 2 | ||
| 444 | * @pdma_glo_cfg: the default DMA configuration | ||
| 445 | * @rx_int: the TX interrupt bits used by the SoC | ||
| 446 | * @tx_int: the TX interrupt bits used by the SoC | ||
| 447 | * @status_int: the Status interrupt bits used by the SoC | ||
| 448 | * @checksum_bit: the bits used to turn on HW checksumming | ||
| 449 | * @txd4: default value of the TXD4 descriptor | ||
| 450 | * @mac_count: the number of MACs that the SoC has | ||
| 451 | * @new_stats: there is a old and new way to read hardware stats | ||
| 452 | * registers | ||
| 453 | * @jumbo_frame: does the SoC support jumbo frames ? | ||
| 454 | * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes | ||
| 455 | * @rx_sg_dma: scatter gather support | ||
| 456 | * @padding_64b enable 64 bit padding | ||
| 457 | * @padding_bug: rt2880 has a padding bug | ||
| 458 | * @has_switch: does the SoC have a built-in switch | ||
| 459 | * | ||
| 460 | * Although all of the supported SoCs share the same basic functionality, there | ||
| 461 | * are several SoC specific functions and features that we need to support. This | ||
| 462 | * struct holds the SoC specific data so that the common core can figure out | ||
| 463 | * how to setup and use these differences. | ||
| 464 | */ | ||
| 465 | struct mtk_soc_data { | ||
| 466 | const u16 *reg_table; | ||
| 467 | |||
| 468 | void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev); | ||
| 469 | void (*reset_fe)(struct mtk_eth *eth); | ||
| 470 | void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr); | ||
| 471 | int (*fwd_config)(struct mtk_eth *eth); | ||
| 472 | int (*switch_init)(struct mtk_eth *eth); | ||
| 473 | void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac, | ||
| 474 | struct device_node *port); | ||
| 475 | int (*has_carrier)(struct mtk_eth *eth); | ||
| 476 | int (*mdio_init)(struct mtk_eth *eth); | ||
| 477 | void (*mdio_cleanup)(struct mtk_eth *eth); | ||
| 478 | int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg, | ||
| 479 | u16 val); | ||
| 480 | int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg); | ||
| 481 | void (*mdio_adjust_link)(struct mtk_eth *eth, int port); | ||
| 482 | u32 piac_offset; | ||
| 483 | netdev_features_t hw_features; | ||
| 484 | u32 dma_ring_size; | ||
| 485 | u32 napi_weight; | ||
| 486 | u32 dma_type; | ||
| 487 | u32 pdma_glo_cfg; | ||
| 488 | u32 rx_int; | ||
| 489 | u32 tx_int; | ||
| 490 | u32 status_int; | ||
| 491 | u32 checksum_bit; | ||
| 492 | u32 txd4; | ||
| 493 | u32 mac_count; | ||
| 494 | |||
| 495 | u32 new_stats:1; | ||
| 496 | u32 jumbo_frame:1; | ||
| 497 | u32 rx_2b_offset:1; | ||
| 498 | u32 rx_sg_dma:1; | ||
| 499 | u32 padding_64b:1; | ||
| 500 | u32 padding_bug:1; | ||
| 501 | u32 has_switch:1; | ||
| 502 | }; | ||
| 503 | |||
| 504 | #define MTK_STAT_OFFSET 0x40 | ||
| 505 | |||
| 506 | /* struct mtk_hw_stats - the structure that holds the traffic statistics. | ||
| 507 | * @stats_lock: make sure that stats operations are atomic | ||
| 508 | * @reg_offset: the status register offset of the SoC | ||
| 509 | * @syncp: the refcount | ||
| 510 | * | ||
| 511 | * All of the supported SoCs have hardware counters for traffic statstics. | ||
| 512 | * Whenever the status IRQ triggers we can read the latest stats from these | ||
| 513 | * counters and store them in this struct. | ||
| 514 | */ | ||
| 515 | struct mtk_hw_stats { | ||
| 516 | spinlock_t stats_lock; | ||
| 517 | u32 reg_offset; | ||
| 518 | struct u64_stats_sync syncp; | ||
| 519 | |||
| 520 | u64 tx_bytes; | ||
| 521 | u64 tx_packets; | ||
| 522 | u64 tx_skip; | ||
| 523 | u64 tx_collisions; | ||
| 524 | u64 rx_bytes; | ||
| 525 | u64 rx_packets; | ||
| 526 | u64 rx_overflow; | ||
| 527 | u64 rx_fcs_errors; | ||
| 528 | u64 rx_short_errors; | ||
| 529 | u64 rx_long_errors; | ||
| 530 | u64 rx_checksum_errors; | ||
| 531 | u64 rx_flow_control_packets; | ||
| 532 | }; | ||
| 533 | |||
| 534 | /* PDMA descriptor can point at 1-2 segments. This enum allows us to track how | ||
| 535 | * memory was allocated so that it can be freed properly | ||
| 536 | */ | ||
| 537 | enum mtk_tx_flags { | ||
| 538 | MTK_TX_FLAGS_SINGLE0 = 0x01, | ||
| 539 | MTK_TX_FLAGS_PAGE0 = 0x02, | ||
| 540 | MTK_TX_FLAGS_PAGE1 = 0x04, | ||
| 541 | }; | ||
| 542 | |||
| 543 | /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at | ||
| 544 | * by the TX descriptor s | ||
| 545 | * @skb: The SKB pointer of the packet being sent | ||
| 546 | * @dma_addr0: The base addr of the first segment | ||
| 547 | * @dma_len0: The length of the first segment | ||
| 548 | * @dma_addr1: The base addr of the second segment | ||
| 549 | * @dma_len1: The length of the second segment | ||
| 550 | */ | ||
| 551 | struct mtk_tx_buf { | ||
| 552 | struct sk_buff *skb; | ||
| 553 | u32 flags; | ||
| 554 | DEFINE_DMA_UNMAP_ADDR(dma_addr0); | ||
| 555 | DEFINE_DMA_UNMAP_LEN(dma_len0); | ||
| 556 | DEFINE_DMA_UNMAP_ADDR(dma_addr1); | ||
| 557 | DEFINE_DMA_UNMAP_LEN(dma_len1); | ||
| 558 | }; | ||
| 559 | |||
| 560 | /* struct mtk_tx_ring - This struct holds info describing a TX ring | ||
| 561 | * @tx_dma: The descriptor ring | ||
| 562 | * @tx_buf: The memory pointed at by the ring | ||
| 563 | * @tx_phys: The physical addr of tx_buf | ||
| 564 | * @tx_next_free: Pointer to the next free descriptor | ||
| 565 | * @tx_last_free: Pointer to the last free descriptor | ||
| 566 | * @tx_thresh: The threshold of minimum amount of free descriptors | ||
| 567 | * @tx_map: Callback to map a new packet into the ring | ||
| 568 | * @tx_poll: Callback for the housekeeping function | ||
| 569 | * @tx_clean: Callback for the cleanup function | ||
| 570 | * @tx_ring_size: How many descriptors are in the ring | ||
| 571 | * @tx_free_idx: The index of th next free descriptor | ||
| 572 | * @tx_next_idx: QDMA uses a linked list. This element points to the next | ||
| 573 | * free descriptor in the list | ||
| 574 | * @tx_free_count: QDMA uses a linked list. Track how many free descriptors | ||
| 575 | * are present | ||
| 576 | */ | ||
| 577 | struct mtk_tx_ring { | ||
| 578 | struct mtk_tx_dma *tx_dma; | ||
| 579 | struct mtk_tx_buf *tx_buf; | ||
| 580 | dma_addr_t tx_phys; | ||
| 581 | struct mtk_tx_dma *tx_next_free; | ||
| 582 | struct mtk_tx_dma *tx_last_free; | ||
| 583 | u16 tx_thresh; | ||
| 584 | int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num, | ||
| 585 | struct mtk_tx_ring *ring, bool gso); | ||
| 586 | int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again); | ||
| 587 | void (*tx_clean)(struct mtk_eth *eth); | ||
| 588 | |||
| 589 | /* PDMA only */ | ||
| 590 | u16 tx_ring_size; | ||
| 591 | u16 tx_free_idx; | ||
| 592 | |||
| 593 | /* QDMA only */ | ||
| 594 | u16 tx_next_idx; | ||
| 595 | atomic_t tx_free_count; | ||
| 596 | }; | ||
| 597 | |||
| 598 | /* struct mtk_rx_ring - This struct holds info describing a RX ring | ||
| 599 | * @rx_dma: The descriptor ring | ||
| 600 | * @rx_data: The memory pointed at by the ring | ||
| 601 | * @trx_phys: The physical addr of rx_buf | ||
| 602 | * @rx_ring_size: How many descriptors are in the ring | ||
| 603 | * @rx_buf_size: The size of each packet buffer | ||
| 604 | * @rx_calc_idx: The current head of ring | ||
| 605 | */ | ||
| 606 | struct mtk_rx_ring { | ||
| 607 | struct mtk_rx_dma *rx_dma; | ||
| 608 | u8 **rx_data; | ||
| 609 | dma_addr_t rx_phys; | ||
| 610 | u16 rx_ring_size; | ||
| 611 | u16 frag_size; | ||
| 612 | u16 rx_buf_size; | ||
| 613 | u16 rx_calc_idx; | ||
| 614 | }; | ||
| 615 | |||
| 616 | /* currently no SoC has more than 2 macs */ | ||
| 617 | #define MTK_MAX_DEVS 2 | ||
| 618 | |||
| 619 | /* struct mtk_eth - This is the main datasructure for holding the state | ||
| 620 | * of the driver | ||
| 621 | * @dev: The device pointer | ||
| 622 | * @base: The mapped register i/o base | ||
| 623 | * @page_lock: Make sure that register operations are atomic | ||
| 624 | * @soc: pointer to our SoC specific data | ||
| 625 | * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a | ||
| 626 | * dummy for NAPI to work | ||
| 627 | * @netdev: The netdev instances | ||
| 628 | * @mac: Each netdev is linked to a physical MAC | ||
| 629 | * @switch_np: The phandle for the switch | ||
| 630 | * @irq: The IRQ that we are using | ||
| 631 | * @msg_enable: Ethtool msg level | ||
| 632 | * @ysclk: The sysclk rate - neeed for calibration | ||
| 633 | * @ethsys: The register map pointing at the range used to setup | ||
| 634 | * MII modes | ||
| 635 | * @dma_refcnt: track how many netdevs are using the DMA engine | ||
| 636 | * @tx_ring: Pointer to the memore holding info about the TX ring | ||
| 637 | * @rx_ring: Pointer to the memore holding info about the RX ring | ||
| 638 | * @rx_napi: The NAPI struct | ||
| 639 | * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring | ||
| 640 | * @scratch_head: The scratch memory that scratch_ring points to. | ||
| 641 | * @phy: Info about the attached PHYs | ||
| 642 | * @mii_bus: If there is a bus we need to create an instance for it | ||
| 643 | * @link: Track if the ports have a physical link | ||
| 644 | * @sw_priv: Pointer to the switches private data | ||
| 645 | * @vlan_map: RX VID tracking | ||
| 646 | */ | ||
| 647 | |||
| 648 | struct mtk_eth { | ||
| 649 | struct device *dev; | ||
| 650 | void __iomem *base; | ||
| 651 | spinlock_t page_lock; | ||
| 652 | struct mtk_soc_data *soc; | ||
| 653 | struct net_device dummy_dev; | ||
| 654 | struct net_device *netdev[MTK_MAX_DEVS]; | ||
| 655 | struct mtk_mac *mac[MTK_MAX_DEVS]; | ||
| 656 | struct device_node *switch_np; | ||
| 657 | int irq; | ||
| 658 | u32 msg_enable; | ||
| 659 | unsigned long sysclk; | ||
| 660 | struct regmap *ethsys; | ||
| 661 | atomic_t dma_refcnt; | ||
| 662 | struct mtk_tx_ring tx_ring; | ||
| 663 | struct mtk_rx_ring rx_ring[2]; | ||
| 664 | struct napi_struct rx_napi; | ||
| 665 | struct mtk_tx_dma *scratch_ring; | ||
| 666 | void *scratch_head; | ||
| 667 | struct mtk_phy *phy; | ||
| 668 | struct mii_bus *mii_bus; | ||
| 669 | int link[8]; | ||
| 670 | void *sw_priv; | ||
| 671 | unsigned long vlan_map; | ||
| 672 | }; | ||
| 673 | |||
| 674 | /* struct mtk_mac - the structure that holds the info about the MACs of the | ||
| 675 | * SoC | ||
| 676 | * @id: The number of the MAC | ||
| 677 | * @of_node: Our devicetree node | ||
| 678 | * @hw: Backpointer to our main datastruture | ||
| 679 | * @hw_stats: Packet statistics counter | ||
| 680 | * @phy_dev: The attached PHY if available | ||
| 681 | * @phy_flags: The PHYs flags | ||
| 682 | * @pending_work: The workqueue used to reset the dma ring | ||
| 683 | */ | ||
| 684 | struct mtk_mac { | ||
| 685 | int id; | ||
| 686 | struct device_node *of_node; | ||
| 687 | struct mtk_eth *hw; | ||
| 688 | struct mtk_hw_stats *hw_stats; | ||
| 689 | struct phy_device *phy_dev; | ||
| 690 | u32 phy_flags; | ||
| 691 | struct work_struct pending_work; | ||
| 692 | }; | ||
| 693 | |||
| 694 | /* the struct describing the SoC. these are declared in the soc_xyz.c files */ | ||
| 695 | extern const struct of_device_id of_mtk_match[]; | ||
| 696 | |||
| 697 | /* read the hardware status register */ | ||
| 698 | void mtk_stats_update_mac(struct mtk_mac *mac); | ||
| 699 | |||
| 700 | /* default checksum setup handler */ | ||
| 701 | void mtk_reset(struct mtk_eth *eth, u32 reset_bits); | ||
| 702 | |||
| 703 | /* register i/o wrappers */ | ||
| 704 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg); | ||
| 705 | u32 mtk_r32(struct mtk_eth *eth, unsigned int reg); | ||
| 706 | |||
| 707 | /* default clock calibration handler */ | ||
| 708 | int mtk_set_clock_cycle(struct mtk_eth *eth); | ||
| 709 | |||
| 710 | /* default checksum setup handler */ | ||
| 711 | void mtk_csum_config(struct mtk_eth *eth); | ||
| 712 | |||
| 713 | /* default forward config handler */ | ||
| 714 | void mtk_fwd_config(struct mtk_eth *eth); | ||
| 715 | |||
| 716 | #endif /* MTK_ETH_H */ | ||
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c deleted file mode 100644 index 5d63b5d96f6b..000000000000 --- a/drivers/staging/mt7621-eth/soc_mt7621.c +++ /dev/null | |||
| @@ -1,161 +0,0 @@ | |||
| 1 | /* This program is free software; you can redistribute it and/or modify | ||
| 2 | * it under the terms of the GNU General Public License as published by | ||
| 3 | * the Free Software Foundation; version 2 of the License | ||
| 4 | * | ||
| 5 | * This program is distributed in the hope that it will be useful, | ||
| 6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 8 | * GNU General Public License for more details. | ||
| 9 | * | ||
| 10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | ||
| 11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | ||
| 12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/if_vlan.h> | ||
| 18 | #include <linux/of_net.h> | ||
| 19 | |||
| 20 | #include <asm/mach-ralink/ralink_regs.h> | ||
| 21 | |||
| 22 | #include "mtk_eth_soc.h" | ||
| 23 | #include "gsw_mt7620.h" | ||
| 24 | #include "mdio.h" | ||
| 25 | |||
| 26 | #define MT7620_CDMA_CSG_CFG 0x400 | ||
| 27 | #define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00) | ||
| 28 | #define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04) | ||
| 29 | #define MT7621_RESET_FE BIT(6) | ||
| 30 | #define MT7621_L4_VALID BIT(24) | ||
| 31 | |||
| 32 | #define MT7621_TX_DMA_UDF BIT(19) | ||
| 33 | |||
| 34 | #define CDMA_ICS_EN BIT(2) | ||
| 35 | #define CDMA_UCS_EN BIT(1) | ||
| 36 | #define CDMA_TCS_EN BIT(0) | ||
| 37 | |||
| 38 | #define GDMA_ICS_EN BIT(22) | ||
| 39 | #define GDMA_TCS_EN BIT(21) | ||
| 40 | #define GDMA_UCS_EN BIT(20) | ||
| 41 | |||
| 42 | /* frame engine counters */ | ||
| 43 | #define MT7621_REG_MIB_OFFSET 0x2000 | ||
| 44 | #define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00) | ||
| 45 | #define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400) | ||
| 46 | #define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40) | ||
| 47 | |||
| 48 | #define GSW_REG_GDMA1_MAC_ADRL 0x508 | ||
| 49 | #define GSW_REG_GDMA1_MAC_ADRH 0x50C | ||
| 50 | #define GSW_REG_GDMA2_MAC_ADRL 0x1508 | ||
| 51 | #define GSW_REG_GDMA2_MAC_ADRH 0x150C | ||
| 52 | |||
| 53 | #define MT7621_MTK_RST_GL 0x04 | ||
| 54 | #define MT7620_MTK_INT_STATUS2 0x08 | ||
| 55 | |||
| 56 | /* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29) | ||
| 57 | * but after test it should be BIT(13). | ||
| 58 | */ | ||
| 59 | #define MT7621_MTK_GDM1_AF BIT(28) | ||
| 60 | #define MT7621_MTK_GDM2_AF BIT(29) | ||
| 61 | |||
| 62 | static const u16 mt7621_reg_table[MTK_REG_COUNT] = { | ||
| 63 | [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, | ||
| 64 | [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, | ||
| 65 | [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, | ||
| 66 | [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, | ||
| 67 | [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, | ||
| 68 | [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, | ||
| 69 | [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0, | ||
| 70 | [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, | ||
| 71 | [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, | ||
| 72 | [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, | ||
| 73 | [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0, | ||
| 74 | [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE, | ||
| 75 | [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS, | ||
| 76 | [MTK_REG_MTK_DMA_VID_BASE] = 0, | ||
| 77 | [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT, | ||
| 78 | [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL, | ||
| 79 | [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2, | ||
| 80 | }; | ||
| 81 | |||
| 82 | static void mt7621_mtk_reset(struct mtk_eth *eth) | ||
| 83 | { | ||
| 84 | mtk_reset(eth, MT7621_RESET_FE); | ||
| 85 | } | ||
| 86 | |||
| 87 | static int mt7621_fwd_config(struct mtk_eth *eth) | ||
| 88 | { | ||
| 89 | /* Setup GMAC1 only, there is no support for GMAC2 yet */ | ||
| 90 | mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff, | ||
| 91 | MT7620_GDMA1_FWD_CFG); | ||
| 92 | |||
| 93 | /* Enable RX checksum */ | ||
| 94 | mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN | | ||
| 95 | GDMA_TCS_EN | GDMA_UCS_EN), | ||
| 96 | MT7620_GDMA1_FWD_CFG); | ||
| 97 | |||
| 98 | /* Enable RX VLan Offloading */ | ||
| 99 | mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL); | ||
| 100 | |||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr) | ||
| 105 | { | ||
| 106 | unsigned long flags; | ||
| 107 | |||
| 108 | spin_lock_irqsave(&mac->hw->page_lock, flags); | ||
| 109 | if (mac->id == 0) { | ||
| 110 | mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1], | ||
| 111 | GSW_REG_GDMA1_MAC_ADRH); | ||
| 112 | mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) | | ||
| 113 | (hwaddr[4] << 8) | hwaddr[5], | ||
| 114 | GSW_REG_GDMA1_MAC_ADRL); | ||
| 115 | } | ||
| 116 | if (mac->id == 1) { | ||
| 117 | mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1], | ||
| 118 | GSW_REG_GDMA2_MAC_ADRH); | ||
| 119 | mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) | | ||
| 120 | (hwaddr[4] << 8) | hwaddr[5], | ||
| 121 | GSW_REG_GDMA2_MAC_ADRL); | ||
| 122 | } | ||
| 123 | spin_unlock_irqrestore(&mac->hw->page_lock, flags); | ||
| 124 | } | ||
| 125 | |||
| 126 | static struct mtk_soc_data mt7621_data = { | ||
| 127 | .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
| 128 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | | ||
| 129 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | | ||
| 130 | NETIF_F_IPV6_CSUM, | ||
| 131 | .dma_type = MTK_PDMA, | ||
| 132 | .dma_ring_size = 256, | ||
| 133 | .napi_weight = 64, | ||
| 134 | .new_stats = 1, | ||
| 135 | .padding_64b = 1, | ||
| 136 | .rx_2b_offset = 1, | ||
| 137 | .rx_sg_dma = 1, | ||
| 138 | .has_switch = 1, | ||
| 139 | .mac_count = 2, | ||
| 140 | .reset_fe = mt7621_mtk_reset, | ||
| 141 | .set_mac = mt7621_set_mac, | ||
| 142 | .fwd_config = mt7621_fwd_config, | ||
| 143 | .switch_init = mtk_gsw_init, | ||
| 144 | .reg_table = mt7621_reg_table, | ||
| 145 | .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS, | ||
| 146 | .rx_int = RT5350_RX_DONE_INT, | ||
| 147 | .tx_int = RT5350_TX_DONE_INT, | ||
| 148 | .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF, | ||
| 149 | .checksum_bit = MT7621_L4_VALID, | ||
| 150 | .has_carrier = mt7620_has_carrier, | ||
| 151 | .mdio_read = mt7620_mdio_read, | ||
| 152 | .mdio_write = mt7620_mdio_write, | ||
| 153 | .mdio_adjust_link = mt7620_mdio_link_adjust, | ||
| 154 | }; | ||
| 155 | |||
| 156 | const struct of_device_id of_mtk_match[] = { | ||
| 157 | { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data }, | ||
| 158 | {}, | ||
| 159 | }; | ||
| 160 | |||
| 161 | MODULE_DEVICE_TABLE(of, of_mtk_match); | ||
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig index d33533872a16..c8fa17cfa807 100644 --- a/drivers/staging/mt7621-pci/Kconfig +++ b/drivers/staging/mt7621-pci/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config PCI_MT7621 | 1 | config PCI_MT7621 |
| 2 | tristate "MediaTek MT7621 PCI Controller" | 2 | tristate "MediaTek MT7621 PCI Controller" |
| 3 | depends on RALINK | 3 | depends on RALINK |
| 4 | depends on PCI | ||
| 4 | select PCI_DRIVERS_GENERIC | 5 | select PCI_DRIVERS_GENERIC |
| 5 | help | 6 | help |
| 6 | This selects a driver for the MediaTek MT7621 PCI Controller. | 7 | This selects a driver for the MediaTek MT7621 PCI Controller. |
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index d6248eecf123..2aee64fdaec5 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c | |||
| @@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) | |||
| 163 | goto no_phy; | 163 | goto no_phy; |
| 164 | 164 | ||
| 165 | phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, | 165 | phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, |
| 166 | PHY_INTERFACE_MODE_GMII); | 166 | priv->phy_mode); |
| 167 | of_node_put(phy_node); | 167 | of_node_put(phy_node); |
| 168 | 168 | ||
| 169 | if (!phydev) | 169 | if (!phydev) |
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index ce61c5670ef6..986db76705cc 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c | |||
| @@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip, | |||
| 653 | return np; | 653 | return np; |
| 654 | } | 654 | } |
| 655 | 655 | ||
| 656 | static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port) | 656 | static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface, |
| 657 | int port) | ||
| 657 | { | 658 | { |
| 659 | struct device_node *np = priv->of_node; | ||
| 658 | u32 delay_value; | 660 | u32 delay_value; |
| 661 | bool rx_delay; | ||
| 662 | bool tx_delay; | ||
| 659 | 663 | ||
| 660 | if (!of_property_read_u32(np, "rx-delay", &delay_value)) | 664 | /* By default, both RX/TX delay is enabled in |
| 665 | * __cvmx_helper_rgmii_enable(). | ||
| 666 | */ | ||
| 667 | rx_delay = true; | ||
| 668 | tx_delay = true; | ||
| 669 | |||
| 670 | if (!of_property_read_u32(np, "rx-delay", &delay_value)) { | ||
| 661 | cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); | 671 | cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); |
| 662 | if (!of_property_read_u32(np, "tx-delay", &delay_value)) | 672 | rx_delay = delay_value > 0; |
| 673 | } | ||
| 674 | if (!of_property_read_u32(np, "tx-delay", &delay_value)) { | ||
| 663 | cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); | 675 | cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); |
| 676 | tx_delay = delay_value > 0; | ||
| 677 | } | ||
| 678 | |||
| 679 | if (!rx_delay && !tx_delay) | ||
| 680 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; | ||
| 681 | else if (!rx_delay) | ||
| 682 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID; | ||
| 683 | else if (!tx_delay) | ||
| 684 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID; | ||
| 685 | else | ||
| 686 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII; | ||
| 664 | } | 687 | } |
| 665 | 688 | ||
| 666 | static int cvm_oct_probe(struct platform_device *pdev) | 689 | static int cvm_oct_probe(struct platform_device *pdev) |
| @@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev) | |||
| 825 | priv->port = port; | 848 | priv->port = port; |
| 826 | priv->queue = cvmx_pko_get_base_queue(priv->port); | 849 | priv->queue = cvmx_pko_get_base_queue(priv->port); |
| 827 | priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; | 850 | priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; |
| 851 | priv->phy_mode = PHY_INTERFACE_MODE_NA; | ||
| 828 | for (qos = 0; qos < 16; qos++) | 852 | for (qos = 0; qos < 16; qos++) |
| 829 | skb_queue_head_init(&priv->tx_free_list[qos]); | 853 | skb_queue_head_init(&priv->tx_free_list[qos]); |
| 830 | for (qos = 0; qos < cvmx_pko_get_num_queues(port); | 854 | for (qos = 0; qos < cvmx_pko_get_num_queues(port); |
| @@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev) | |||
| 856 | break; | 880 | break; |
| 857 | 881 | ||
| 858 | case CVMX_HELPER_INTERFACE_MODE_SGMII: | 882 | case CVMX_HELPER_INTERFACE_MODE_SGMII: |
| 883 | priv->phy_mode = PHY_INTERFACE_MODE_SGMII; | ||
| 859 | dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; | 884 | dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; |
| 860 | strcpy(dev->name, "eth%d"); | 885 | strcpy(dev->name, "eth%d"); |
| 861 | break; | 886 | break; |
| @@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev) | |||
| 865 | strcpy(dev->name, "spi%d"); | 890 | strcpy(dev->name, "spi%d"); |
| 866 | break; | 891 | break; |
| 867 | 892 | ||
| 868 | case CVMX_HELPER_INTERFACE_MODE_RGMII: | ||
| 869 | case CVMX_HELPER_INTERFACE_MODE_GMII: | 893 | case CVMX_HELPER_INTERFACE_MODE_GMII: |
| 894 | priv->phy_mode = PHY_INTERFACE_MODE_GMII; | ||
| 895 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; | ||
| 896 | strcpy(dev->name, "eth%d"); | ||
| 897 | break; | ||
| 898 | |||
| 899 | case CVMX_HELPER_INTERFACE_MODE_RGMII: | ||
| 870 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; | 900 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; |
| 871 | strcpy(dev->name, "eth%d"); | 901 | strcpy(dev->name, "eth%d"); |
| 872 | cvm_set_rgmii_delay(priv->of_node, interface, | 902 | cvm_set_rgmii_delay(priv, interface, |
| 873 | port_index); | 903 | port_index); |
| 874 | break; | 904 | break; |
| 875 | } | 905 | } |
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 4a07e7f43d12..be570d33685a 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #define OCTEON_ETHERNET_H | 12 | #define OCTEON_ETHERNET_H |
| 13 | 13 | ||
| 14 | #include <linux/of.h> | 14 | #include <linux/of.h> |
| 15 | 15 | #include <linux/phy.h> | |
| 16 | #include <asm/octeon/cvmx-helper-board.h> | 16 | #include <asm/octeon/cvmx-helper-board.h> |
| 17 | 17 | ||
| 18 | /** | 18 | /** |
| @@ -33,6 +33,8 @@ struct octeon_ethernet { | |||
| 33 | * cvmx_helper_interface_mode_t | 33 | * cvmx_helper_interface_mode_t |
| 34 | */ | 34 | */ |
| 35 | int imode; | 35 | int imode; |
| 36 | /* PHY mode */ | ||
| 37 | phy_interface_t phy_mode; | ||
| 36 | /* List of outstanding tx buffers per queue */ | 38 | /* List of outstanding tx buffers per queue */ |
| 37 | struct sk_buff_head tx_free_list[16]; | 39 | struct sk_buff_head tx_free_list[16]; |
| 38 | unsigned int last_speed; | 40 | unsigned int last_speed; |
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c index 80b8d4153414..a54286498a47 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c | |||
| @@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon) | |||
| 45 | { | 45 | { |
| 46 | unsigned char lob; | 46 | unsigned char lob; |
| 47 | int ret, i; | 47 | int ret, i; |
| 48 | struct dcon_gpio *pin = &gpios_asis[0]; | 48 | const struct dcon_gpio *pin = &gpios_asis[0]; |
| 49 | 49 | ||
| 50 | for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) { | 50 | for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) { |
| 51 | gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name, | 51 | gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name, |
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c index 1723a47a96b4..952f2ab51347 100644 --- a/drivers/staging/rtl8188eu/core/rtw_xmit.c +++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c | |||
| @@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) | |||
| 174 | 174 | ||
| 175 | pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; | 175 | pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; |
| 176 | 176 | ||
| 177 | rtw_alloc_hwxmits(padapter); | 177 | res = rtw_alloc_hwxmits(padapter); |
| 178 | if (res == _FAIL) | ||
| 179 | goto exit; | ||
| 178 | rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); | 180 | rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); |
| 179 | 181 | ||
| 180 | for (i = 0; i < 4; i++) | 182 | for (i = 0; i < 4; i++) |
| @@ -1503,7 +1505,7 @@ exit: | |||
| 1503 | return res; | 1505 | return res; |
| 1504 | } | 1506 | } |
| 1505 | 1507 | ||
| 1506 | void rtw_alloc_hwxmits(struct adapter *padapter) | 1508 | s32 rtw_alloc_hwxmits(struct adapter *padapter) |
| 1507 | { | 1509 | { |
| 1508 | struct hw_xmit *hwxmits; | 1510 | struct hw_xmit *hwxmits; |
| 1509 | struct xmit_priv *pxmitpriv = &padapter->xmitpriv; | 1511 | struct xmit_priv *pxmitpriv = &padapter->xmitpriv; |
| @@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) | |||
| 1512 | 1514 | ||
| 1513 | pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, | 1515 | pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, |
| 1514 | sizeof(struct hw_xmit), GFP_KERNEL); | 1516 | sizeof(struct hw_xmit), GFP_KERNEL); |
| 1517 | if (!pxmitpriv->hwxmits) | ||
| 1518 | return _FAIL; | ||
| 1515 | 1519 | ||
| 1516 | hwxmits = pxmitpriv->hwxmits; | 1520 | hwxmits = pxmitpriv->hwxmits; |
| 1517 | 1521 | ||
| @@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) | |||
| 1519 | hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; | 1523 | hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; |
| 1520 | hwxmits[2] .sta_queue = &pxmitpriv->be_pending; | 1524 | hwxmits[2] .sta_queue = &pxmitpriv->be_pending; |
| 1521 | hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; | 1525 | hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; |
| 1526 | return _SUCCESS; | ||
| 1522 | } | 1527 | } |
| 1523 | 1528 | ||
| 1524 | void rtw_free_hwxmits(struct adapter *padapter) | 1529 | void rtw_free_hwxmits(struct adapter *padapter) |
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h index 788f59c74ea1..ba7e15fbde72 100644 --- a/drivers/staging/rtl8188eu/include/rtw_xmit.h +++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h | |||
| @@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, | |||
| 336 | void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); | 336 | void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); |
| 337 | s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); | 337 | s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); |
| 338 | void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); | 338 | void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); |
| 339 | void rtw_alloc_hwxmits(struct adapter *padapter); | 339 | s32 rtw_alloc_hwxmits(struct adapter *padapter); |
| 340 | void rtw_free_hwxmits(struct adapter *padapter); | 340 | void rtw_free_hwxmits(struct adapter *padapter); |
| 341 | s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); | 341 | s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); |
| 342 | 342 | ||
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index 1920d02f7c9f..8c36acedf507 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c | |||
| @@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf) | |||
| 147 | 147 | ||
| 148 | static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) | 148 | static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) |
| 149 | { | 149 | { |
| 150 | u32 val; | ||
| 151 | void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); | ||
| 152 | struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; | 150 | struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; |
| 153 | 151 | ||
| 154 | if (pcmd->rsp && pcmd->rspsz > 0) | 152 | r8712_free_cmd_obj(pcmd); |
| 155 | memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz); | ||
| 156 | pcmd_callback = cmd_callback[pcmd->cmdcode].callback; | ||
| 157 | if (!pcmd_callback) | ||
| 158 | r8712_free_cmd_obj(pcmd); | ||
| 159 | else | ||
| 160 | pcmd_callback(padapter, pcmd); | ||
| 161 | return H2C_SUCCESS; | 153 | return H2C_SUCCESS; |
| 162 | } | 154 | } |
| 163 | 155 | ||
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h index 92fb77666d44..1ef86b8c592f 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.h +++ b/drivers/staging/rtl8712/rtl8712_cmd.h | |||
| @@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd { | |||
| 140 | static struct _cmd_callback cmd_callback[] = { | 140 | static struct _cmd_callback cmd_callback[] = { |
| 141 | {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ | 141 | {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ |
| 142 | {GEN_CMD_CODE(_Write_MACREG), NULL}, | 142 | {GEN_CMD_CODE(_Write_MACREG), NULL}, |
| 143 | {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, | 143 | {GEN_CMD_CODE(_Read_BBREG), NULL}, |
| 144 | {GEN_CMD_CODE(_Write_BBREG), NULL}, | 144 | {GEN_CMD_CODE(_Write_BBREG), NULL}, |
| 145 | {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, | 145 | {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, |
| 146 | {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ | 146 | {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ |
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index 094d61bcb469..b87f13a0b563 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c | |||
| @@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) | |||
| 260 | } | 260 | } |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | rtw_alloc_hwxmits(padapter); | 263 | res = rtw_alloc_hwxmits(padapter); |
| 264 | if (res == _FAIL) | ||
| 265 | goto exit; | ||
| 264 | rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); | 266 | rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); |
| 265 | 267 | ||
| 266 | for (i = 0; i < 4; i++) { | 268 | for (i = 0; i < 4; i++) { |
| @@ -2144,7 +2146,7 @@ exit: | |||
| 2144 | return res; | 2146 | return res; |
| 2145 | } | 2147 | } |
| 2146 | 2148 | ||
| 2147 | void rtw_alloc_hwxmits(struct adapter *padapter) | 2149 | s32 rtw_alloc_hwxmits(struct adapter *padapter) |
| 2148 | { | 2150 | { |
| 2149 | struct hw_xmit *hwxmits; | 2151 | struct hw_xmit *hwxmits; |
| 2150 | struct xmit_priv *pxmitpriv = &padapter->xmitpriv; | 2152 | struct xmit_priv *pxmitpriv = &padapter->xmitpriv; |
| @@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) | |||
| 2155 | 2157 | ||
| 2156 | pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); | 2158 | pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); |
| 2157 | 2159 | ||
| 2158 | if (pxmitpriv->hwxmits == NULL) { | 2160 | if (!pxmitpriv->hwxmits) |
| 2159 | DBG_871X("alloc hwxmits fail!...\n"); | 2161 | return _FAIL; |
| 2160 | return; | ||
| 2161 | } | ||
| 2162 | 2162 | ||
| 2163 | hwxmits = pxmitpriv->hwxmits; | 2163 | hwxmits = pxmitpriv->hwxmits; |
| 2164 | 2164 | ||
| @@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) | |||
| 2204 | 2204 | ||
| 2205 | } | 2205 | } |
| 2206 | 2206 | ||
| 2207 | 2207 | return _SUCCESS; | |
| 2208 | } | 2208 | } |
| 2209 | 2209 | ||
| 2210 | void rtw_free_hwxmits(struct adapter *padapter) | 2210 | void rtw_free_hwxmits(struct adapter *padapter) |
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h index 1b38b9182b31..37f42b2f22f1 100644 --- a/drivers/staging/rtl8723bs/include/rtw_xmit.h +++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h | |||
| @@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); | |||
| 487 | void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); | 487 | void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); |
| 488 | 488 | ||
| 489 | 489 | ||
| 490 | void rtw_alloc_hwxmits(struct adapter *padapter); | 490 | s32 rtw_alloc_hwxmits(struct adapter *padapter); |
| 491 | void rtw_free_hwxmits(struct adapter *padapter); | 491 | void rtw_free_hwxmits(struct adapter *padapter); |
| 492 | 492 | ||
| 493 | 493 | ||
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c index 9930ed954abb..4cc77b2016e1 100644 --- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c +++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c | |||
| @@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv, | |||
| 180 | 180 | ||
| 181 | rtlpriv->phydm.internal = | 181 | rtlpriv->phydm.internal = |
| 182 | kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); | 182 | kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); |
| 183 | if (!rtlpriv->phydm.internal) | ||
| 184 | return 0; | ||
| 183 | 185 | ||
| 184 | _rtl_phydm_init_com_info(rtlpriv, ic, params); | 186 | _rtl_phydm_init_com_info(rtlpriv, ic, params); |
| 185 | 187 | ||
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c index f061dd1382aa..cf6b7a80b753 100644 --- a/drivers/staging/rtlwifi/rtl8822be/fw.c +++ b/drivers/staging/rtlwifi/rtl8822be/fw.c | |||
| @@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) | |||
| 743 | u1_rsvd_page_loc, 3); | 743 | u1_rsvd_page_loc, 3); |
| 744 | 744 | ||
| 745 | skb = dev_alloc_skb(totalpacketlen); | 745 | skb = dev_alloc_skb(totalpacketlen); |
| 746 | if (!skb) | ||
| 747 | return; | ||
| 746 | memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, | 748 | memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, |
| 747 | totalpacketlen); | 749 | totalpacketlen); |
| 748 | 750 | ||
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index edff6ce85655..9d85a3a1af4c 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c | |||
| @@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, | |||
| 210 | return -EINVAL; | 210 | return -EINVAL; |
| 211 | 211 | ||
| 212 | spin_lock_irqsave(&speakup_info.spinlock, flags); | 212 | spin_lock_irqsave(&speakup_info.spinlock, flags); |
| 213 | synth_soft.alive = 1; | ||
| 213 | while (1) { | 214 | while (1) { |
| 214 | prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); | 215 | prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); |
| 215 | if (!unicode) | 216 | if (synth_current() == &synth_soft) { |
| 216 | synth_buffer_skip_nonlatin1(); | 217 | if (!unicode) |
| 217 | if (!synth_buffer_empty() || speakup_info.flushing) | 218 | synth_buffer_skip_nonlatin1(); |
| 218 | break; | 219 | if (!synth_buffer_empty() || speakup_info.flushing) |
| 220 | break; | ||
| 221 | } | ||
| 219 | spin_unlock_irqrestore(&speakup_info.spinlock, flags); | 222 | spin_unlock_irqrestore(&speakup_info.spinlock, flags); |
| 220 | if (fp->f_flags & O_NONBLOCK) { | 223 | if (fp->f_flags & O_NONBLOCK) { |
| 221 | finish_wait(&speakup_event, &wait); | 224 | finish_wait(&speakup_event, &wait); |
| @@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, | |||
| 235 | 238 | ||
| 236 | /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ | 239 | /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ |
| 237 | while (chars_sent <= count - bytes_per_ch) { | 240 | while (chars_sent <= count - bytes_per_ch) { |
| 241 | if (synth_current() != &synth_soft) | ||
| 242 | break; | ||
| 238 | if (speakup_info.flushing) { | 243 | if (speakup_info.flushing) { |
| 239 | speakup_info.flushing = 0; | 244 | speakup_info.flushing = 0; |
| 240 | ch = '\x18'; | 245 | ch = '\x18'; |
| @@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait) | |||
| 331 | poll_wait(fp, &speakup_event, wait); | 336 | poll_wait(fp, &speakup_event, wait); |
| 332 | 337 | ||
| 333 | spin_lock_irqsave(&speakup_info.spinlock, flags); | 338 | spin_lock_irqsave(&speakup_info.spinlock, flags); |
| 334 | if (!synth_buffer_empty() || speakup_info.flushing) | 339 | if (synth_current() == &synth_soft && |
| 340 | (!synth_buffer_empty() || speakup_info.flushing)) | ||
| 335 | ret = EPOLLIN | EPOLLRDNORM; | 341 | ret = EPOLLIN | EPOLLRDNORM; |
| 336 | spin_unlock_irqrestore(&speakup_info.spinlock, flags); | 342 | spin_unlock_irqrestore(&speakup_info.spinlock, flags); |
| 337 | return ret; | 343 | return ret; |
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h index c8e688878fc7..ac6a74883af4 100644 --- a/drivers/staging/speakup/spk_priv.h +++ b/drivers/staging/speakup/spk_priv.h | |||
| @@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n); | |||
| 74 | int synth_release_region(unsigned long start, unsigned long n); | 74 | int synth_release_region(unsigned long start, unsigned long n); |
| 75 | int synth_add(struct spk_synth *in_synth); | 75 | int synth_add(struct spk_synth *in_synth); |
| 76 | void synth_remove(struct spk_synth *in_synth); | 76 | void synth_remove(struct spk_synth *in_synth); |
| 77 | struct spk_synth *synth_current(void); | ||
| 77 | 78 | ||
| 78 | extern struct speakup_info_t speakup_info; | 79 | extern struct speakup_info_t speakup_info; |
| 79 | 80 | ||
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index 25f259ee4ffc..3568bfb89912 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c | |||
| @@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth) | |||
| 481 | } | 481 | } |
| 482 | EXPORT_SYMBOL_GPL(synth_remove); | 482 | EXPORT_SYMBOL_GPL(synth_remove); |
| 483 | 483 | ||
| 484 | struct spk_synth *synth_current(void) | ||
| 485 | { | ||
| 486 | return synth; | ||
| 487 | } | ||
| 488 | EXPORT_SYMBOL_GPL(synth_current); | ||
| 489 | |||
| 484 | short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; | 490 | short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 804daf83be35..064d0db4c51e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | |||
| @@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev) | |||
| 3513 | struct device_node *fw_node; | 3513 | struct device_node *fw_node; |
| 3514 | const struct of_device_id *of_id; | 3514 | const struct of_device_id *of_id; |
| 3515 | struct vchiq_drvdata *drvdata; | 3515 | struct vchiq_drvdata *drvdata; |
| 3516 | struct device *vchiq_dev; | ||
| 3516 | int err; | 3517 | int err; |
| 3517 | 3518 | ||
| 3518 | of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); | 3519 | of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); |
| @@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev) | |||
| 3547 | goto failed_platform_init; | 3548 | goto failed_platform_init; |
| 3548 | } | 3549 | } |
| 3549 | 3550 | ||
| 3550 | if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid, | 3551 | vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL, |
| 3551 | NULL, "vchiq"))) | 3552 | "vchiq"); |
| 3553 | if (IS_ERR(vchiq_dev)) { | ||
| 3554 | err = PTR_ERR(vchiq_dev); | ||
| 3552 | goto failed_device_create; | 3555 | goto failed_device_create; |
| 3556 | } | ||
| 3553 | 3557 | ||
| 3554 | vchiq_debugfs_init(); | 3558 | vchiq_debugfs_init(); |
| 3555 | 3559 | ||
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index b370985b58a1..c6bb4aaf9bd0 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c | |||
| @@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) | |||
| 1033 | return; | 1033 | return; |
| 1034 | } | 1034 | } |
| 1035 | 1035 | ||
| 1036 | MACvIntDisable(priv->PortOffset); | ||
| 1037 | |||
| 1038 | spin_lock_irqsave(&priv->lock, flags); | 1036 | spin_lock_irqsave(&priv->lock, flags); |
| 1039 | 1037 | ||
| 1040 | /* Read low level stats */ | 1038 | /* Read low level stats */ |
| @@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) | |||
| 1122 | } | 1120 | } |
| 1123 | 1121 | ||
| 1124 | spin_unlock_irqrestore(&priv->lock, flags); | 1122 | spin_unlock_irqrestore(&priv->lock, flags); |
| 1125 | |||
| 1126 | MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); | ||
| 1127 | } | 1123 | } |
| 1128 | 1124 | ||
| 1129 | static void vnt_interrupt_work(struct work_struct *work) | 1125 | static void vnt_interrupt_work(struct work_struct *work) |
| @@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work) | |||
| 1133 | 1129 | ||
| 1134 | if (priv->vif) | 1130 | if (priv->vif) |
| 1135 | vnt_interrupt_process(priv); | 1131 | vnt_interrupt_process(priv); |
| 1132 | |||
| 1133 | MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); | ||
| 1136 | } | 1134 | } |
| 1137 | 1135 | ||
| 1138 | static irqreturn_t vnt_interrupt(int irq, void *arg) | 1136 | static irqreturn_t vnt_interrupt(int irq, void *arg) |
| 1139 | { | 1137 | { |
| 1140 | struct vnt_private *priv = arg; | 1138 | struct vnt_private *priv = arg; |
| 1141 | 1139 | ||
| 1142 | if (priv->vif) | 1140 | schedule_work(&priv->interrupt_work); |
| 1143 | schedule_work(&priv->interrupt_work); | 1141 | |
| 1142 | MACvIntDisable(priv->PortOffset); | ||
| 1144 | 1143 | ||
| 1145 | return IRQ_HANDLED; | 1144 | return IRQ_HANDLED; |
| 1146 | } | 1145 | } |
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index db5df3d54818..3bdd56a1021b 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c | |||
| @@ -49,11 +49,6 @@ struct ar933x_uart_port { | |||
| 49 | struct clk *clk; | 49 | struct clk *clk; |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | static inline bool ar933x_uart_console_enabled(void) | ||
| 53 | { | ||
| 54 | return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, | 52 | static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, |
| 58 | int offset) | 53 | int offset) |
| 59 | { | 54 | { |
| @@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = { | |||
| 508 | .verify_port = ar933x_uart_verify_port, | 503 | .verify_port = ar933x_uart_verify_port, |
| 509 | }; | 504 | }; |
| 510 | 505 | ||
| 506 | #ifdef CONFIG_SERIAL_AR933X_CONSOLE | ||
| 511 | static struct ar933x_uart_port * | 507 | static struct ar933x_uart_port * |
| 512 | ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; | 508 | ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; |
| 513 | 509 | ||
| @@ -604,14 +600,7 @@ static struct console ar933x_uart_console = { | |||
| 604 | .index = -1, | 600 | .index = -1, |
| 605 | .data = &ar933x_uart_driver, | 601 | .data = &ar933x_uart_driver, |
| 606 | }; | 602 | }; |
| 607 | 603 | #endif /* CONFIG_SERIAL_AR933X_CONSOLE */ | |
| 608 | static void ar933x_uart_add_console_port(struct ar933x_uart_port *up) | ||
| 609 | { | ||
| 610 | if (!ar933x_uart_console_enabled()) | ||
| 611 | return; | ||
| 612 | |||
| 613 | ar933x_console_ports[up->port.line] = up; | ||
| 614 | } | ||
| 615 | 604 | ||
| 616 | static struct uart_driver ar933x_uart_driver = { | 605 | static struct uart_driver ar933x_uart_driver = { |
| 617 | .owner = THIS_MODULE, | 606 | .owner = THIS_MODULE, |
| @@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev) | |||
| 700 | baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); | 689 | baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); |
| 701 | up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); | 690 | up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); |
| 702 | 691 | ||
| 703 | ar933x_uart_add_console_port(up); | 692 | #ifdef CONFIG_SERIAL_AR933X_CONSOLE |
| 693 | ar933x_console_ports[up->port.line] = up; | ||
| 694 | #endif | ||
| 704 | 695 | ||
| 705 | ret = uart_add_one_port(&ar933x_uart_driver, &up->port); | 696 | ret = uart_add_one_port(&ar933x_uart_driver, &up->port); |
| 706 | if (ret) | 697 | if (ret) |
| @@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void) | |||
| 749 | { | 740 | { |
| 750 | int ret; | 741 | int ret; |
| 751 | 742 | ||
| 752 | if (ar933x_uart_console_enabled()) | 743 | #ifdef CONFIG_SERIAL_AR933X_CONSOLE |
| 753 | ar933x_uart_driver.cons = &ar933x_uart_console; | 744 | ar933x_uart_driver.cons = &ar933x_uart_console; |
| 745 | #endif | ||
| 754 | 746 | ||
| 755 | ret = uart_register_driver(&ar933x_uart_driver); | 747 | ret = uart_register_driver(&ar933x_uart_driver); |
| 756 | if (ret) | 748 | if (ret) |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 05147fe24343..0b4f36905321 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
| @@ -166,6 +166,8 @@ struct atmel_uart_port { | |||
| 166 | unsigned int pending_status; | 166 | unsigned int pending_status; |
| 167 | spinlock_t lock_suspended; | 167 | spinlock_t lock_suspended; |
| 168 | 168 | ||
| 169 | bool hd_start_rx; /* can start RX during half-duplex operation */ | ||
| 170 | |||
| 169 | /* ISO7816 */ | 171 | /* ISO7816 */ |
| 170 | unsigned int fidi_min; | 172 | unsigned int fidi_min; |
| 171 | unsigned int fidi_max; | 173 | unsigned int fidi_max; |
| @@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value) | |||
| 231 | __raw_writeb(value, port->membase + ATMEL_US_THR); | 233 | __raw_writeb(value, port->membase + ATMEL_US_THR); |
| 232 | } | 234 | } |
| 233 | 235 | ||
| 236 | static inline int atmel_uart_is_half_duplex(struct uart_port *port) | ||
| 237 | { | ||
| 238 | return ((port->rs485.flags & SER_RS485_ENABLED) && | ||
| 239 | !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || | ||
| 240 | (port->iso7816.flags & SER_ISO7816_ENABLED); | ||
| 241 | } | ||
| 242 | |||
| 234 | #ifdef CONFIG_SERIAL_ATMEL_PDC | 243 | #ifdef CONFIG_SERIAL_ATMEL_PDC |
| 235 | static bool atmel_use_pdc_rx(struct uart_port *port) | 244 | static bool atmel_use_pdc_rx(struct uart_port *port) |
| 236 | { | 245 | { |
| @@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port) | |||
| 608 | /* Disable interrupts */ | 617 | /* Disable interrupts */ |
| 609 | atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); | 618 | atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); |
| 610 | 619 | ||
| 611 | if (((port->rs485.flags & SER_RS485_ENABLED) && | 620 | if (atmel_uart_is_half_duplex(port)) |
| 612 | !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || | ||
| 613 | port->iso7816.flags & SER_ISO7816_ENABLED) | ||
| 614 | atmel_start_rx(port); | 621 | atmel_start_rx(port); |
| 622 | |||
| 615 | } | 623 | } |
| 616 | 624 | ||
| 617 | /* | 625 | /* |
| @@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port) | |||
| 628 | return; | 636 | return; |
| 629 | 637 | ||
| 630 | if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) | 638 | if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) |
| 631 | if (((port->rs485.flags & SER_RS485_ENABLED) && | 639 | if (atmel_uart_is_half_duplex(port)) |
| 632 | !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || | ||
| 633 | port->iso7816.flags & SER_ISO7816_ENABLED) | ||
| 634 | atmel_stop_rx(port); | 640 | atmel_stop_rx(port); |
| 635 | 641 | ||
| 636 | if (atmel_use_pdc_tx(port)) | 642 | if (atmel_use_pdc_tx(port)) |
| @@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg) | |||
| 928 | */ | 934 | */ |
| 929 | if (!uart_circ_empty(xmit)) | 935 | if (!uart_circ_empty(xmit)) |
| 930 | atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); | 936 | atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); |
| 931 | else if (((port->rs485.flags & SER_RS485_ENABLED) && | 937 | else if (atmel_uart_is_half_duplex(port)) { |
| 932 | !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || | 938 | /* |
| 933 | port->iso7816.flags & SER_ISO7816_ENABLED) { | 939 | * DMA done, re-enable TXEMPTY and signal that we can stop |
| 934 | /* DMA done, stop TX, start RX for RS485 */ | 940 | * TX and start RX for RS485 |
| 935 | atmel_start_rx(port); | 941 | */ |
| 942 | atmel_port->hd_start_rx = true; | ||
| 943 | atmel_uart_writel(port, ATMEL_US_IER, | ||
| 944 | atmel_port->tx_done_mask); | ||
| 936 | } | 945 | } |
| 937 | 946 | ||
| 938 | spin_unlock_irqrestore(&port->lock, flags); | 947 | spin_unlock_irqrestore(&port->lock, flags); |
| @@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port) | |||
| 1288 | sg_dma_len(&atmel_port->sg_rx)/2, | 1297 | sg_dma_len(&atmel_port->sg_rx)/2, |
| 1289 | DMA_DEV_TO_MEM, | 1298 | DMA_DEV_TO_MEM, |
| 1290 | DMA_PREP_INTERRUPT); | 1299 | DMA_PREP_INTERRUPT); |
| 1300 | if (!desc) { | ||
| 1301 | dev_err(port->dev, "Preparing DMA cyclic failed\n"); | ||
| 1302 | goto chan_err; | ||
| 1303 | } | ||
| 1291 | desc->callback = atmel_complete_rx_dma; | 1304 | desc->callback = atmel_complete_rx_dma; |
| 1292 | desc->callback_param = port; | 1305 | desc->callback_param = port; |
| 1293 | atmel_port->desc_rx = desc; | 1306 | atmel_port->desc_rx = desc; |
| @@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending) | |||
| 1376 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | 1389 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
| 1377 | 1390 | ||
| 1378 | if (pending & atmel_port->tx_done_mask) { | 1391 | if (pending & atmel_port->tx_done_mask) { |
| 1379 | /* Either PDC or interrupt transmission */ | ||
| 1380 | atmel_uart_writel(port, ATMEL_US_IDR, | 1392 | atmel_uart_writel(port, ATMEL_US_IDR, |
| 1381 | atmel_port->tx_done_mask); | 1393 | atmel_port->tx_done_mask); |
| 1394 | |||
| 1395 | /* Start RX if flag was set and FIFO is empty */ | ||
| 1396 | if (atmel_port->hd_start_rx) { | ||
| 1397 | if (!(atmel_uart_readl(port, ATMEL_US_CSR) | ||
| 1398 | & ATMEL_US_TXEMPTY)) | ||
| 1399 | dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); | ||
| 1400 | |||
| 1401 | atmel_port->hd_start_rx = false; | ||
| 1402 | atmel_start_rx(port); | ||
| 1403 | return; | ||
| 1404 | } | ||
| 1405 | |||
| 1382 | atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); | 1406 | atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); |
| 1383 | } | 1407 | } |
| 1384 | } | 1408 | } |
| @@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port) | |||
| 1508 | atmel_uart_writel(port, ATMEL_US_IER, | 1532 | atmel_uart_writel(port, ATMEL_US_IER, |
| 1509 | atmel_port->tx_done_mask); | 1533 | atmel_port->tx_done_mask); |
| 1510 | } else { | 1534 | } else { |
| 1511 | if (((port->rs485.flags & SER_RS485_ENABLED) && | 1535 | if (atmel_uart_is_half_duplex(port)) { |
| 1512 | !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || | ||
| 1513 | port->iso7816.flags & SER_ISO7816_ENABLED) { | ||
| 1514 | /* DMA done, stop TX, start RX for RS485 */ | 1536 | /* DMA done, stop TX, start RX for RS485 */ |
| 1515 | atmel_start_rx(port); | 1537 | atmel_start_rx(port); |
| 1516 | } | 1538 | } |
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 6fb312e7af71..bfe5e9e034ec 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c | |||
| @@ -148,8 +148,10 @@ static int configure_kgdboc(void) | |||
| 148 | char *cptr = config; | 148 | char *cptr = config; |
| 149 | struct console *cons; | 149 | struct console *cons; |
| 150 | 150 | ||
| 151 | if (!strlen(config) || isspace(config[0])) | 151 | if (!strlen(config) || isspace(config[0])) { |
| 152 | err = 0; | ||
| 152 | goto noconfig; | 153 | goto noconfig; |
| 154 | } | ||
| 153 | 155 | ||
| 154 | kgdboc_io_ops.is_console = 0; | 156 | kgdboc_io_ops.is_console = 0; |
| 155 | kgdb_tty_driver = NULL; | 157 | kgdb_tty_driver = NULL; |
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index f5bdde405627..450ba6d7996c 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c | |||
| @@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi) | |||
| 1415 | if (spi->dev.of_node) { | 1415 | if (spi->dev.of_node) { |
| 1416 | const struct of_device_id *of_id = | 1416 | const struct of_device_id *of_id = |
| 1417 | of_match_device(max310x_dt_ids, &spi->dev); | 1417 | of_match_device(max310x_dt_ids, &spi->dev); |
| 1418 | if (!of_id) | ||
| 1419 | return -ENODEV; | ||
| 1418 | 1420 | ||
| 1419 | devtype = (struct max310x_devtype *)of_id->data; | 1421 | devtype = (struct max310x_devtype *)of_id->data; |
| 1420 | } else { | 1422 | } else { |
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 231f751d1ef4..7e7b1559fa36 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c | |||
| @@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev) | |||
| 810 | return -EINVAL; | 810 | return -EINVAL; |
| 811 | } | 811 | } |
| 812 | 812 | ||
| 813 | if (!match) | ||
| 814 | return -ENODEV; | ||
| 815 | |||
| 813 | /* Assume that all UART ports have a DT alias or none has */ | 816 | /* Assume that all UART ports have a DT alias or none has */ |
| 814 | id = of_alias_get_id(pdev->dev.of_node, "serial"); | 817 | id = of_alias_get_id(pdev->dev.of_node, "serial"); |
| 815 | if (!pdev->dev.of_node || id < 0) | 818 | if (!pdev->dev.of_node || id < 0) |
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 27235a526cce..4c188f4079b3 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c | |||
| @@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev) | |||
| 1686 | 1686 | ||
| 1687 | s->port.mapbase = r->start; | 1687 | s->port.mapbase = r->start; |
| 1688 | s->port.membase = ioremap(r->start, resource_size(r)); | 1688 | s->port.membase = ioremap(r->start, resource_size(r)); |
| 1689 | if (!s->port.membase) { | ||
| 1690 | ret = -ENOMEM; | ||
| 1691 | goto out_disable_clks; | ||
| 1692 | } | ||
| 1689 | s->port.ops = &mxs_auart_ops; | 1693 | s->port.ops = &mxs_auart_ops; |
| 1690 | s->port.iotype = UPIO_MEM; | 1694 | s->port.iotype = UPIO_MEM; |
| 1691 | s->port.fifosize = MXS_AUART_FIFO_SIZE; | 1695 | s->port.fifosize = MXS_AUART_FIFO_SIZE; |
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 3bcec1c20219..35e5f9c5d5be 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c | |||
| @@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options) | |||
| 1050 | { | 1050 | { |
| 1051 | struct uart_port *uport; | 1051 | struct uart_port *uport; |
| 1052 | struct qcom_geni_serial_port *port; | 1052 | struct qcom_geni_serial_port *port; |
| 1053 | int baud; | 1053 | int baud = 9600; |
| 1054 | int bits = 8; | 1054 | int bits = 8; |
| 1055 | int parity = 'n'; | 1055 | int parity = 'n'; |
| 1056 | int flow = 'n'; | 1056 | int flow = 'n'; |
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 635178cf3eed..09a183dfc526 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c | |||
| @@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void) | |||
| 1507 | ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); | 1507 | ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); |
| 1508 | if (ret < 0) { | 1508 | if (ret < 0) { |
| 1509 | pr_err("failed to init sc16is7xx i2c --> %d\n", ret); | 1509 | pr_err("failed to init sc16is7xx i2c --> %d\n", ret); |
| 1510 | return ret; | 1510 | goto err_i2c; |
| 1511 | } | 1511 | } |
| 1512 | #endif | 1512 | #endif |
| 1513 | 1513 | ||
| @@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void) | |||
| 1515 | ret = spi_register_driver(&sc16is7xx_spi_uart_driver); | 1515 | ret = spi_register_driver(&sc16is7xx_spi_uart_driver); |
| 1516 | if (ret < 0) { | 1516 | if (ret < 0) { |
| 1517 | pr_err("failed to init sc16is7xx spi --> %d\n", ret); | 1517 | pr_err("failed to init sc16is7xx spi --> %d\n", ret); |
| 1518 | return ret; | 1518 | goto err_spi; |
| 1519 | } | 1519 | } |
| 1520 | #endif | 1520 | #endif |
| 1521 | return ret; | 1521 | return ret; |
| 1522 | |||
| 1523 | err_spi: | ||
| 1524 | #ifdef CONFIG_SERIAL_SC16IS7XX_I2C | ||
| 1525 | i2c_del_driver(&sc16is7xx_i2c_uart_driver); | ||
| 1526 | #endif | ||
| 1527 | err_i2c: | ||
| 1528 | uart_unregister_driver(&sc16is7xx_uart); | ||
| 1529 | return ret; | ||
| 1522 | } | 1530 | } |
| 1523 | module_init(sc16is7xx_init); | 1531 | module_init(sc16is7xx_init); |
| 1524 | 1532 | ||
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 060fcd42b6d5..2d1c626312cd 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
| @@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port) | |||
| 838 | 838 | ||
| 839 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 839 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
| 840 | uart_write_wakeup(port); | 840 | uart_write_wakeup(port); |
| 841 | if (uart_circ_empty(xmit)) { | 841 | if (uart_circ_empty(xmit)) |
| 842 | sci_stop_tx(port); | 842 | sci_stop_tx(port); |
| 843 | } else { | ||
| 844 | ctrl = serial_port_in(port, SCSCR); | ||
| 845 | |||
| 846 | if (port->type != PORT_SCI) { | ||
| 847 | serial_port_in(port, SCxSR); /* Dummy read */ | ||
| 848 | sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port)); | ||
| 849 | } | ||
| 850 | 843 | ||
| 851 | ctrl |= SCSCR_TIE; | ||
| 852 | serial_port_out(port, SCSCR, ctrl); | ||
| 853 | } | ||
| 854 | } | 844 | } |
| 855 | 845 | ||
| 856 | /* On SH3, SCIF may read end-of-break as a space->mark char */ | 846 | /* On SH3, SCIF may read end-of-break as a space->mark char */ |
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 044c3cbdcfa4..a9e12b3bc31d 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c | |||
| @@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty) | |||
| 325 | if (tty && C_HUPCL(tty)) | 325 | if (tty && C_HUPCL(tty)) |
| 326 | tty_port_lower_dtr_rts(port); | 326 | tty_port_lower_dtr_rts(port); |
| 327 | 327 | ||
| 328 | if (port->ops->shutdown) | 328 | if (port->ops && port->ops->shutdown) |
| 329 | port->ops->shutdown(port); | 329 | port->ops->shutdown(port); |
| 330 | } | 330 | } |
| 331 | out: | 331 | out: |
| @@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup); | |||
| 398 | */ | 398 | */ |
| 399 | int tty_port_carrier_raised(struct tty_port *port) | 399 | int tty_port_carrier_raised(struct tty_port *port) |
| 400 | { | 400 | { |
| 401 | if (port->ops->carrier_raised == NULL) | 401 | if (!port->ops || !port->ops->carrier_raised) |
| 402 | return 1; | 402 | return 1; |
| 403 | return port->ops->carrier_raised(port); | 403 | return port->ops->carrier_raised(port); |
| 404 | } | 404 | } |
| @@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised); | |||
| 414 | */ | 414 | */ |
| 415 | void tty_port_raise_dtr_rts(struct tty_port *port) | 415 | void tty_port_raise_dtr_rts(struct tty_port *port) |
| 416 | { | 416 | { |
| 417 | if (port->ops->dtr_rts) | 417 | if (port->ops && port->ops->dtr_rts) |
| 418 | port->ops->dtr_rts(port, 1); | 418 | port->ops->dtr_rts(port, 1); |
| 419 | } | 419 | } |
| 420 | EXPORT_SYMBOL(tty_port_raise_dtr_rts); | 420 | EXPORT_SYMBOL(tty_port_raise_dtr_rts); |
| @@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts); | |||
| 429 | */ | 429 | */ |
| 430 | void tty_port_lower_dtr_rts(struct tty_port *port) | 430 | void tty_port_lower_dtr_rts(struct tty_port *port) |
| 431 | { | 431 | { |
| 432 | if (port->ops->dtr_rts) | 432 | if (port->ops && port->ops->dtr_rts) |
| 433 | port->ops->dtr_rts(port, 0); | 433 | port->ops->dtr_rts(port, 0); |
| 434 | } | 434 | } |
| 435 | EXPORT_SYMBOL(tty_port_lower_dtr_rts); | 435 | EXPORT_SYMBOL(tty_port_lower_dtr_rts); |
| @@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty, | |||
| 684 | 684 | ||
| 685 | if (!tty_port_initialized(port)) { | 685 | if (!tty_port_initialized(port)) { |
| 686 | clear_bit(TTY_IO_ERROR, &tty->flags); | 686 | clear_bit(TTY_IO_ERROR, &tty->flags); |
| 687 | if (port->ops->activate) { | 687 | if (port->ops && port->ops->activate) { |
| 688 | int retval = port->ops->activate(port, tty); | 688 | int retval = port->ops->activate(port, tty); |
| 689 | if (retval) { | 689 | if (retval) { |
| 690 | mutex_unlock(&port->mutex); | 690 | mutex_unlock(&port->mutex); |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 739f8960811a..ec666eb4b7b4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work) | |||
| 558 | clear_bit(EVENT_RX_STALL, &acm->flags); | 558 | clear_bit(EVENT_RX_STALL, &acm->flags); |
| 559 | } | 559 | } |
| 560 | 560 | ||
| 561 | if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { | 561 | if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) |
| 562 | tty_port_tty_wakeup(&acm->port); | 562 | tty_port_tty_wakeup(&acm->port); |
| 563 | clear_bit(EVENT_TTY_WAKEUP, &acm->flags); | ||
| 564 | } | ||
| 565 | } | 563 | } |
| 566 | 564 | ||
| 567 | /* | 565 | /* |
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 48277bbc15e4..73c8e6591746 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c | |||
| @@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) | |||
| 145 | 145 | ||
| 146 | do { | 146 | do { |
| 147 | controller = of_find_node_with_property(controller, "phys"); | 147 | controller = of_find_node_with_property(controller, "phys"); |
| 148 | if (!of_device_is_available(controller)) | ||
| 149 | continue; | ||
| 148 | index = 0; | 150 | index = 0; |
| 149 | do { | 151 | do { |
| 150 | if (arg0 == -1) { | 152 | if (arg0 == -1) { |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 3189181bb628..975d7c1288e3 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
| @@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd, | |||
| 2742 | retval = usb_phy_roothub_set_mode(hcd->phy_roothub, | 2742 | retval = usb_phy_roothub_set_mode(hcd->phy_roothub, |
| 2743 | PHY_MODE_USB_HOST_SS); | 2743 | PHY_MODE_USB_HOST_SS); |
| 2744 | if (retval) | 2744 | if (retval) |
| 2745 | retval = usb_phy_roothub_set_mode(hcd->phy_roothub, | ||
| 2746 | PHY_MODE_USB_HOST); | ||
| 2747 | if (retval) | ||
| 2745 | goto err_usb_phy_roothub_power_on; | 2748 | goto err_usb_phy_roothub_power_on; |
| 2746 | 2749 | ||
| 2747 | retval = usb_phy_roothub_power_on(hcd->phy_roothub); | 2750 | retval = usb_phy_roothub_power_on(hcd->phy_roothub); |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index fdc6e4e403e8..8cced3609e24 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa | 29 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa |
| 30 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | 30 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa |
| 31 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 | 31 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 |
| 32 | #define PCI_DEVICE_ID_INTEL_CMLH 0x02ee | ||
| 32 | #define PCI_DEVICE_ID_INTEL_GLK 0x31aa | 33 | #define PCI_DEVICE_ID_INTEL_GLK 0x31aa |
| 33 | #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee | 34 | #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee |
| 34 | #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e | 35 | #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e |
| @@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
| 305 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), | 306 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), |
| 306 | (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, | 307 | (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, |
| 307 | 308 | ||
| 309 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), | ||
| 310 | (kernel_ulong_t) &dwc3_pci_intel_properties, }, | ||
| 311 | |||
| 308 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), | 312 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), |
| 309 | (kernel_ulong_t) &dwc3_pci_intel_properties, }, | 313 | (kernel_ulong_t) &dwc3_pci_intel_properties, }, |
| 310 | 314 | ||
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 75b113a5b25c..f3816a5c861e 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c | |||
| @@ -391,20 +391,20 @@ try_again: | |||
| 391 | req->complete = f_hidg_req_complete; | 391 | req->complete = f_hidg_req_complete; |
| 392 | req->context = hidg; | 392 | req->context = hidg; |
| 393 | 393 | ||
| 394 | spin_unlock_irqrestore(&hidg->write_spinlock, flags); | ||
| 395 | |||
| 394 | status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); | 396 | status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); |
| 395 | if (status < 0) { | 397 | if (status < 0) { |
| 396 | ERROR(hidg->func.config->cdev, | 398 | ERROR(hidg->func.config->cdev, |
| 397 | "usb_ep_queue error on int endpoint %zd\n", status); | 399 | "usb_ep_queue error on int endpoint %zd\n", status); |
| 398 | goto release_write_pending_unlocked; | 400 | goto release_write_pending; |
| 399 | } else { | 401 | } else { |
| 400 | status = count; | 402 | status = count; |
| 401 | } | 403 | } |
| 402 | spin_unlock_irqrestore(&hidg->write_spinlock, flags); | ||
| 403 | 404 | ||
| 404 | return status; | 405 | return status; |
| 405 | release_write_pending: | 406 | release_write_pending: |
| 406 | spin_lock_irqsave(&hidg->write_spinlock, flags); | 407 | spin_lock_irqsave(&hidg->write_spinlock, flags); |
| 407 | release_write_pending_unlocked: | ||
| 408 | hidg->write_pending = 0; | 408 | hidg->write_pending = 0; |
| 409 | spin_unlock_irqrestore(&hidg->write_spinlock, flags); | 409 | spin_unlock_irqrestore(&hidg->write_spinlock, flags); |
| 410 | 410 | ||
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index b77f3126580e..c2011cd7df8c 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c | |||
| @@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) | |||
| 945 | break; | 945 | break; |
| 946 | } | 946 | } |
| 947 | if (&req->req != _req) { | 947 | if (&req->req != _req) { |
| 948 | ep->stopped = stopped; | ||
| 948 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 949 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
| 949 | return -EINVAL; | 950 | return -EINVAL; |
| 950 | } | 951 | } |
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index f63f82450bf4..898339e5df10 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
| @@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) | |||
| 866 | (void) readl(&ep->dev->pci->pcimstctl); | 866 | (void) readl(&ep->dev->pci->pcimstctl); |
| 867 | 867 | ||
| 868 | writel(BIT(DMA_START), &dma->dmastat); | 868 | writel(BIT(DMA_START), &dma->dmastat); |
| 869 | |||
| 870 | if (!ep->is_in) | ||
| 871 | stop_out_naking(ep); | ||
| 872 | } | 869 | } |
| 873 | 870 | ||
| 874 | static void start_dma(struct net2280_ep *ep, struct net2280_request *req) | 871 | static void start_dma(struct net2280_ep *ep, struct net2280_request *req) |
| @@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req) | |||
| 907 | writel(BIT(DMA_START), &dma->dmastat); | 904 | writel(BIT(DMA_START), &dma->dmastat); |
| 908 | return; | 905 | return; |
| 909 | } | 906 | } |
| 907 | stop_out_naking(ep); | ||
| 910 | } | 908 | } |
| 911 | 909 | ||
| 912 | tmp = dmactl_default; | 910 | tmp = dmactl_default; |
| @@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) | |||
| 1275 | break; | 1273 | break; |
| 1276 | } | 1274 | } |
| 1277 | if (&req->req != _req) { | 1275 | if (&req->req != _req) { |
| 1276 | ep->stopped = stopped; | ||
| 1278 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 1277 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
| 1279 | dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", | 1278 | ep_dbg(ep->dev, "%s: Request mismatch\n", __func__); |
| 1280 | __func__); | ||
| 1281 | return -EINVAL; | 1279 | return -EINVAL; |
| 1282 | } | 1280 | } |
| 1283 | 1281 | ||
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 934584f0a20a..6343fbacd244 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
| @@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void) | |||
| 3204 | printk(KERN_INFO "driver %s\n", hcd_name); | 3204 | printk(KERN_INFO "driver %s\n", hcd_name); |
| 3205 | workqueue = create_singlethread_workqueue("u132"); | 3205 | workqueue = create_singlethread_workqueue("u132"); |
| 3206 | retval = platform_driver_register(&u132_platform_driver); | 3206 | retval = platform_driver_register(&u132_platform_driver); |
| 3207 | if (retval) | ||
| 3208 | destroy_workqueue(workqueue); | ||
| 3209 | |||
| 3207 | return retval; | 3210 | return retval; |
| 3208 | } | 3211 | } |
| 3209 | 3212 | ||
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index c78be578abb0..d932cc31711e 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c | |||
| @@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci) | |||
| 516 | return -1; | 516 | return -1; |
| 517 | 517 | ||
| 518 | writel(0, &dbc->regs->control); | 518 | writel(0, &dbc->regs->control); |
| 519 | xhci_dbc_mem_cleanup(xhci); | ||
| 520 | dbc->state = DS_DISABLED; | 519 | dbc->state = DS_DISABLED; |
| 521 | 520 | ||
| 522 | return 0; | 521 | return 0; |
| @@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) | |||
| 562 | ret = xhci_do_dbc_stop(xhci); | 561 | ret = xhci_do_dbc_stop(xhci); |
| 563 | spin_unlock_irqrestore(&dbc->lock, flags); | 562 | spin_unlock_irqrestore(&dbc->lock, flags); |
| 564 | 563 | ||
| 565 | if (!ret) | 564 | if (!ret) { |
| 565 | xhci_dbc_mem_cleanup(xhci); | ||
| 566 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); | 566 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); |
| 567 | } | ||
| 567 | } | 568 | } |
| 568 | 569 | ||
| 569 | static void | 570 | static void |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e2eece693655..96a740543183 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
| @@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
| 1545 | port_index = max_ports; | 1545 | port_index = max_ports; |
| 1546 | while (port_index--) { | 1546 | while (port_index--) { |
| 1547 | u32 t1, t2; | 1547 | u32 t1, t2; |
| 1548 | 1548 | int retries = 10; | |
| 1549 | retry: | ||
| 1549 | t1 = readl(ports[port_index]->addr); | 1550 | t1 = readl(ports[port_index]->addr); |
| 1550 | t2 = xhci_port_state_to_neutral(t1); | 1551 | t2 = xhci_port_state_to_neutral(t1); |
| 1551 | portsc_buf[port_index] = 0; | 1552 | portsc_buf[port_index] = 0; |
| 1552 | 1553 | ||
| 1553 | /* Bail out if a USB3 port has a new device in link training */ | 1554 | /* |
| 1554 | if ((hcd->speed >= HCD_USB3) && | 1555 | * Give a USB3 port in link training time to finish, but don't |
| 1556 | * prevent suspend as port might be stuck | ||
| 1557 | */ | ||
| 1558 | if ((hcd->speed >= HCD_USB3) && retries-- && | ||
| 1555 | (t1 & PORT_PLS_MASK) == XDEV_POLLING) { | 1559 | (t1 & PORT_PLS_MASK) == XDEV_POLLING) { |
| 1556 | bus_state->bus_suspended = 0; | ||
| 1557 | spin_unlock_irqrestore(&xhci->lock, flags); | 1560 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1558 | xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); | 1561 | msleep(XHCI_PORT_POLLING_LFPS_TIME); |
| 1559 | return -EBUSY; | 1562 | spin_lock_irqsave(&xhci->lock, flags); |
| 1563 | xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n", | ||
| 1564 | port_index); | ||
| 1565 | goto retry; | ||
| 1560 | } | 1566 | } |
| 1561 | |||
| 1562 | /* suspend ports in U0, or bail out for new connect changes */ | 1567 | /* suspend ports in U0, or bail out for new connect changes */ |
| 1563 | if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { | 1568 | if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { |
| 1564 | if ((t1 & PORT_CSC) && wake_enabled) { | 1569 | if ((t1 & PORT_CSC) && wake_enabled) { |
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index a6e463715779..671bce18782c 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c | |||
| @@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd) | |||
| 246 | if (!xhci_rcar_wait_for_pll_active(hcd)) | 246 | if (!xhci_rcar_wait_for_pll_active(hcd)) |
| 247 | return -ETIMEDOUT; | 247 | return -ETIMEDOUT; |
| 248 | 248 | ||
| 249 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | ||
| 249 | return xhci_rcar_download_firmware(hcd); | 250 | return xhci_rcar_download_firmware(hcd); |
| 250 | } | 251 | } |
| 251 | 252 | ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 40fa25c4d041..9215a28dad40 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1647 | } | 1647 | } |
| 1648 | } | 1648 | } |
| 1649 | 1649 | ||
| 1650 | if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && | 1650 | if ((portsc & PORT_PLC) && |
| 1651 | DEV_SUPERSPEED_ANY(portsc)) { | 1651 | DEV_SUPERSPEED_ANY(portsc) && |
| 1652 | ((portsc & PORT_PLS_MASK) == XDEV_U0 || | ||
| 1653 | (portsc & PORT_PLS_MASK) == XDEV_U1 || | ||
| 1654 | (portsc & PORT_PLS_MASK) == XDEV_U2)) { | ||
| 1652 | xhci_dbg(xhci, "resume SS port %d finished\n", port_id); | 1655 | xhci_dbg(xhci, "resume SS port %d finished\n", port_id); |
| 1653 | /* We've just brought the device into U0 through either the | 1656 | /* We've just brought the device into U0/1/2 through either the |
| 1654 | * Resume state after a device remote wakeup, or through the | 1657 | * Resume state after a device remote wakeup, or through the |
| 1655 | * U3Exit state after a host-initiated resume. If it's a device | 1658 | * U3Exit state after a host-initiated resume. If it's a device |
| 1656 | * initiated remote wake, don't pass up the link state change, | 1659 | * initiated remote wake, don't pass up the link state change, |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 652dc36e3012..9334cdee382a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
| @@ -452,6 +452,14 @@ struct xhci_op_regs { | |||
| 452 | */ | 452 | */ |
| 453 | #define XHCI_DEFAULT_BESL 4 | 453 | #define XHCI_DEFAULT_BESL 4 |
| 454 | 454 | ||
| 455 | /* | ||
| 456 | * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports | ||
| 457 | * to complete link training. usually link trainig completes much faster | ||
| 458 | * so check status 10 times with 36ms sleep in places we need to wait for | ||
| 459 | * polling to complete. | ||
| 460 | */ | ||
| 461 | #define XHCI_PORT_POLLING_LFPS_TIME 36 | ||
| 462 | |||
| 455 | /** | 463 | /** |
| 456 | * struct xhci_intr_reg - Interrupt Register Set | 464 | * struct xhci_intr_reg - Interrupt Register Set |
| 457 | * @irq_pending: IMAN - Interrupt Management Register. Used to enable | 465 | * @irq_pending: IMAN - Interrupt Management Register. Used to enable |
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4d72b7d1d383..04684849d683 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c | |||
| @@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
| 547 | */ | 547 | */ |
| 548 | hub->port_swap = USB251XB_DEF_PORT_SWAP; | 548 | hub->port_swap = USB251XB_DEF_PORT_SWAP; |
| 549 | of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) { | 549 | of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) { |
| 550 | if ((port >= 0) && (port <= data->port_cnt)) | 550 | if (port <= data->port_cnt) |
| 551 | hub->port_swap |= BIT(port); | 551 | hub->port_swap |= BIT(port); |
| 552 | } | 552 | } |
| 553 | 553 | ||
| @@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub) | |||
| 612 | dev); | 612 | dev); |
| 613 | int err; | 613 | int err; |
| 614 | 614 | ||
| 615 | if (np) { | 615 | if (np && of_id) { |
| 616 | err = usb251xb_get_ofdata(hub, | 616 | err = usb251xb_get_ofdata(hub, |
| 617 | (struct usb251xb_data *)of_id->data); | 617 | (struct usb251xb_data *)of_id->data); |
| 618 | if (err) { | 618 | if (err) { |
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig index bcc23486c4ed..928c2cd6fc00 100644 --- a/drivers/usb/mtu3/Kconfig +++ b/drivers/usb/mtu3/Kconfig | |||
| @@ -6,6 +6,7 @@ config USB_MTU3 | |||
| 6 | tristate "MediaTek USB3 Dual Role controller" | 6 | tristate "MediaTek USB3 Dual Role controller" |
| 7 | depends on USB || USB_GADGET | 7 | depends on USB || USB_GADGET |
| 8 | depends on ARCH_MEDIATEK || COMPILE_TEST | 8 | depends on ARCH_MEDIATEK || COMPILE_TEST |
| 9 | depends on EXTCON || !EXTCON | ||
| 9 | select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD | 10 | select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD |
| 10 | help | 11 | help |
| 11 | Say Y or M here if your system runs on MediaTek SoCs with | 12 | Say Y or M here if your system runs on MediaTek SoCs with |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fffe23ab0189..979bef9bfb6b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = { | |||
| 80 | { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ | 80 | { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ |
| 81 | { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ | 81 | { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ |
| 82 | { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ | 82 | { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ |
| 83 | { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */ | ||
| 83 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ | 84 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ |
| 84 | { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ | 85 | { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ |
| 85 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ | 86 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8f5b17471759..1d8461ae2c34 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 609 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 609 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 610 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), | 610 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), |
| 611 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 611 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 612 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, | ||
| 613 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, | ||
| 612 | { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, | 614 | { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, |
| 613 | { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, | 615 | { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, |
| 614 | { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, | 616 | { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index b863bedb55a1..5755f0df0025 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -567,7 +567,9 @@ | |||
| 567 | /* | 567 | /* |
| 568 | * NovaTech product ids (FTDI_VID) | 568 | * NovaTech product ids (FTDI_VID) |
| 569 | */ | 569 | */ |
| 570 | #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ | 570 | #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ |
| 571 | #define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */ | ||
| 572 | #define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */ | ||
| 571 | 573 | ||
| 572 | /* | 574 | /* |
| 573 | * Synapse Wireless product ids (FTDI_VID) | 575 | * Synapse Wireless product ids (FTDI_VID) |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index fc52ac75fbf6..18110225d506 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
| @@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
| 366 | if (!urbtrack) | 366 | if (!urbtrack) |
| 367 | return -ENOMEM; | 367 | return -ENOMEM; |
| 368 | 368 | ||
| 369 | kref_get(&mos_parport->ref_count); | ||
| 370 | urbtrack->mos_parport = mos_parport; | ||
| 371 | urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); | 369 | urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); |
| 372 | if (!urbtrack->urb) { | 370 | if (!urbtrack->urb) { |
| 373 | kfree(urbtrack); | 371 | kfree(urbtrack); |
| @@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
| 388 | usb_sndctrlpipe(usbdev, 0), | 386 | usb_sndctrlpipe(usbdev, 0), |
| 389 | (unsigned char *)urbtrack->setup, | 387 | (unsigned char *)urbtrack->setup, |
| 390 | NULL, 0, async_complete, urbtrack); | 388 | NULL, 0, async_complete, urbtrack); |
| 389 | kref_get(&mos_parport->ref_count); | ||
| 390 | urbtrack->mos_parport = mos_parport; | ||
| 391 | kref_init(&urbtrack->ref_count); | 391 | kref_init(&urbtrack->ref_count); |
| 392 | INIT_LIST_HEAD(&urbtrack->urblist_entry); | 392 | INIT_LIST_HEAD(&urbtrack->urblist_entry); |
| 393 | 393 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 11b21d9410f3..83869065b802 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 246 | #define QUECTEL_PRODUCT_EC25 0x0125 | 246 | #define QUECTEL_PRODUCT_EC25 0x0125 |
| 247 | #define QUECTEL_PRODUCT_BG96 0x0296 | 247 | #define QUECTEL_PRODUCT_BG96 0x0296 |
| 248 | #define QUECTEL_PRODUCT_EP06 0x0306 | 248 | #define QUECTEL_PRODUCT_EP06 0x0306 |
| 249 | #define QUECTEL_PRODUCT_EM12 0x0512 | ||
| 249 | 250 | ||
| 250 | #define CMOTECH_VENDOR_ID 0x16d8 | 251 | #define CMOTECH_VENDOR_ID 0x16d8 |
| 251 | #define CMOTECH_PRODUCT_6001 0x6001 | 252 | #define CMOTECH_PRODUCT_6001 0x6001 |
| @@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 1066 | .driver_info = RSVD(3) }, | 1067 | .driver_info = RSVD(3) }, |
| 1067 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 1068 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
| 1068 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ | 1069 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ |
| 1069 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ | 1070 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */ |
| 1071 | .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) }, | ||
| 1070 | /* Quectel products using Qualcomm vendor ID */ | 1072 | /* Quectel products using Qualcomm vendor ID */ |
| 1071 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, | 1073 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, |
| 1072 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), | 1074 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), |
| @@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = { | |||
| 1087 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), | 1089 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), |
| 1088 | .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, | 1090 | .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, |
| 1089 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, | 1091 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, |
| 1092 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), | ||
| 1093 | .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, | ||
| 1094 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, | ||
| 1090 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, | 1095 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
| 1091 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, | 1096 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
| 1092 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), | 1097 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), |
| @@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = { | |||
| 1940 | .driver_info = RSVD(4) }, | 1945 | .driver_info = RSVD(4) }, |
| 1941 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ | 1946 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ |
| 1942 | .driver_info = RSVD(4) }, | 1947 | .driver_info = RSVD(4) }, |
| 1943 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | 1948 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
| 1944 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | 1949 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
| 1945 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ | 1950 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ |
| 1946 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ | 1951 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ |
| 1952 | .driver_info = RSVD(4) }, | ||
| 1953 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ | ||
| 1947 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, | 1954 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, |
| 1948 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, | 1955 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, |
| 1949 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, | 1956 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, |
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 0f62db091d8d..a2233d72ae7c 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | S(SRC_ATTACHED), \ | 37 | S(SRC_ATTACHED), \ |
| 38 | S(SRC_STARTUP), \ | 38 | S(SRC_STARTUP), \ |
| 39 | S(SRC_SEND_CAPABILITIES), \ | 39 | S(SRC_SEND_CAPABILITIES), \ |
| 40 | S(SRC_SEND_CAPABILITIES_TIMEOUT), \ | ||
| 40 | S(SRC_NEGOTIATE_CAPABILITIES), \ | 41 | S(SRC_NEGOTIATE_CAPABILITIES), \ |
| 41 | S(SRC_TRANSITION_SUPPLY), \ | 42 | S(SRC_TRANSITION_SUPPLY), \ |
| 42 | S(SRC_READY), \ | 43 | S(SRC_READY), \ |
| @@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port) | |||
| 2966 | /* port->hard_reset_count = 0; */ | 2967 | /* port->hard_reset_count = 0; */ |
| 2967 | port->caps_count = 0; | 2968 | port->caps_count = 0; |
| 2968 | port->pd_capable = true; | 2969 | port->pd_capable = true; |
| 2969 | tcpm_set_state_cond(port, hard_reset_state(port), | 2970 | tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT, |
| 2970 | PD_T_SEND_SOURCE_CAP); | 2971 | PD_T_SEND_SOURCE_CAP); |
| 2971 | } | 2972 | } |
| 2972 | break; | 2973 | break; |
| 2974 | case SRC_SEND_CAPABILITIES_TIMEOUT: | ||
| 2975 | /* | ||
| 2976 | * Error recovery for a PD_DATA_SOURCE_CAP reply timeout. | ||
| 2977 | * | ||
| 2978 | * PD 2.0 sinks are supposed to accept src-capabilities with a | ||
| 2979 | * 3.0 header and simply ignore any src PDOs which the sink does | ||
| 2980 | * not understand such as PPS but some 2.0 sinks instead ignore | ||
| 2981 | * the entire PD_DATA_SOURCE_CAP message, causing contract | ||
| 2982 | * negotiation to fail. | ||
| 2983 | * | ||
| 2984 | * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try | ||
| 2985 | * sending src-capabilities with a lower PD revision to | ||
| 2986 | * make these broken sinks work. | ||
| 2987 | */ | ||
| 2988 | if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) { | ||
| 2989 | tcpm_set_state(port, HARD_RESET_SEND, 0); | ||
| 2990 | } else if (port->negotiated_rev > PD_REV20) { | ||
| 2991 | port->negotiated_rev--; | ||
| 2992 | port->hard_reset_count = 0; | ||
| 2993 | tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); | ||
| 2994 | } else { | ||
| 2995 | tcpm_set_state(port, hard_reset_state(port), 0); | ||
| 2996 | } | ||
| 2997 | break; | ||
| 2973 | case SRC_NEGOTIATE_CAPABILITIES: | 2998 | case SRC_NEGOTIATE_CAPABILITIES: |
| 2974 | ret = tcpm_pd_check_request(port); | 2999 | ret = tcpm_pd_check_request(port); |
| 2975 | if (ret < 0) { | 3000 | if (ret < 0) { |
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c index 423208e19383..6770afd40765 100644 --- a/drivers/usb/typec/tcpm/wcove.c +++ b/drivers/usb/typec/tcpm/wcove.c | |||
| @@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev) | |||
| 615 | wcove->dev = &pdev->dev; | 615 | wcove->dev = &pdev->dev; |
| 616 | wcove->regmap = pmic->regmap; | 616 | wcove->regmap = pmic->regmap; |
| 617 | 617 | ||
| 618 | irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, | 618 | irq = platform_get_irq(pdev, 0); |
| 619 | platform_get_irq(pdev, 0)); | 619 | if (irq < 0) { |
| 620 | dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq); | ||
| 621 | return irq; | ||
| 622 | } | ||
| 623 | |||
| 624 | irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq); | ||
| 620 | if (irq < 0) | 625 | if (irq < 0) |
| 621 | return irq; | 626 | return irq; |
| 622 | 627 | ||
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index a25659b5a5d1..3fa20e95a6bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
| @@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void) | |||
| 1661 | rc = pci_add_dynid(&vfio_pci_driver, vendor, device, | 1661 | rc = pci_add_dynid(&vfio_pci_driver, vendor, device, |
| 1662 | subvendor, subdevice, class, class_mask, 0); | 1662 | subvendor, subdevice, class, class_mask, 0); |
| 1663 | if (rc) | 1663 | if (rc) |
| 1664 | pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", | 1664 | pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", |
| 1665 | vendor, device, subvendor, subdevice, | 1665 | vendor, device, subvendor, subdevice, |
| 1666 | class, class_mask, rc); | 1666 | class, class_mask, rc); |
| 1667 | else | 1667 | else |
| 1668 | pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", | 1668 | pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", |
| 1669 | vendor, device, subvendor, subdevice, | 1669 | vendor, device, subvendor, subdevice, |
| 1670 | class, class_mask); | 1670 | class, class_mask); |
| 1671 | } | 1671 | } |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 8dbb270998f4..6b64e45a5269 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
| @@ -1398,7 +1398,7 @@ unlock_exit: | |||
| 1398 | mutex_unlock(&container->lock); | 1398 | mutex_unlock(&container->lock); |
| 1399 | } | 1399 | } |
| 1400 | 1400 | ||
| 1401 | const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { | 1401 | static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { |
| 1402 | .name = "iommu-vfio-powerpc", | 1402 | .name = "iommu-vfio-powerpc", |
| 1403 | .owner = THIS_MODULE, | 1403 | .owner = THIS_MODULE, |
| 1404 | .open = tce_iommu_open, | 1404 | .open = tce_iommu_open, |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 73652e21efec..d0f731c9920a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -58,12 +58,18 @@ module_param_named(disable_hugepages, | |||
| 58 | MODULE_PARM_DESC(disable_hugepages, | 58 | MODULE_PARM_DESC(disable_hugepages, |
| 59 | "Disable VFIO IOMMU support for IOMMU hugepages."); | 59 | "Disable VFIO IOMMU support for IOMMU hugepages."); |
| 60 | 60 | ||
| 61 | static unsigned int dma_entry_limit __read_mostly = U16_MAX; | ||
| 62 | module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); | ||
| 63 | MODULE_PARM_DESC(dma_entry_limit, | ||
| 64 | "Maximum number of user DMA mappings per container (65535)."); | ||
| 65 | |||
| 61 | struct vfio_iommu { | 66 | struct vfio_iommu { |
| 62 | struct list_head domain_list; | 67 | struct list_head domain_list; |
| 63 | struct vfio_domain *external_domain; /* domain for external user */ | 68 | struct vfio_domain *external_domain; /* domain for external user */ |
| 64 | struct mutex lock; | 69 | struct mutex lock; |
| 65 | struct rb_root dma_list; | 70 | struct rb_root dma_list; |
| 66 | struct blocking_notifier_head notifier; | 71 | struct blocking_notifier_head notifier; |
| 72 | unsigned int dma_avail; | ||
| 67 | bool v2; | 73 | bool v2; |
| 68 | bool nesting; | 74 | bool nesting; |
| 69 | }; | 75 | }; |
| @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) | |||
| 836 | vfio_unlink_dma(iommu, dma); | 842 | vfio_unlink_dma(iommu, dma); |
| 837 | put_task_struct(dma->task); | 843 | put_task_struct(dma->task); |
| 838 | kfree(dma); | 844 | kfree(dma); |
| 845 | iommu->dma_avail++; | ||
| 839 | } | 846 | } |
| 840 | 847 | ||
| 841 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) | 848 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) |
| @@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
| 1081 | goto out_unlock; | 1088 | goto out_unlock; |
| 1082 | } | 1089 | } |
| 1083 | 1090 | ||
| 1091 | if (!iommu->dma_avail) { | ||
| 1092 | ret = -ENOSPC; | ||
| 1093 | goto out_unlock; | ||
| 1094 | } | ||
| 1095 | |||
| 1084 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); | 1096 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
| 1085 | if (!dma) { | 1097 | if (!dma) { |
| 1086 | ret = -ENOMEM; | 1098 | ret = -ENOMEM; |
| 1087 | goto out_unlock; | 1099 | goto out_unlock; |
| 1088 | } | 1100 | } |
| 1089 | 1101 | ||
| 1102 | iommu->dma_avail--; | ||
| 1090 | dma->iova = iova; | 1103 | dma->iova = iova; |
| 1091 | dma->vaddr = vaddr; | 1104 | dma->vaddr = vaddr; |
| 1092 | dma->prot = prot; | 1105 | dma->prot = prot; |
| @@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) | |||
| 1583 | 1596 | ||
| 1584 | INIT_LIST_HEAD(&iommu->domain_list); | 1597 | INIT_LIST_HEAD(&iommu->domain_list); |
| 1585 | iommu->dma_list = RB_ROOT; | 1598 | iommu->dma_list = RB_ROOT; |
| 1599 | iommu->dma_avail = dma_entry_limit; | ||
| 1586 | mutex_init(&iommu->lock); | 1600 | mutex_init(&iommu->lock); |
| 1587 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); | 1601 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); |
| 1588 | 1602 | ||
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index df7d09409efe..8ca333f21292 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c | |||
| @@ -27,6 +27,10 @@ | |||
| 27 | 27 | ||
| 28 | #define GUEST_MAPPINGS_TRIES 5 | 28 | #define GUEST_MAPPINGS_TRIES 5 |
| 29 | 29 | ||
| 30 | #define VBG_KERNEL_REQUEST \ | ||
| 31 | (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \ | ||
| 32 | VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN) | ||
| 33 | |||
| 30 | /** | 34 | /** |
| 31 | * Reserves memory in which the VMM can relocate any guest mappings | 35 | * Reserves memory in which the VMM can relocate any guest mappings |
| 32 | * that are floating around. | 36 | * that are floating around. |
| @@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev) | |||
| 48 | int i, rc; | 52 | int i, rc; |
| 49 | 53 | ||
| 50 | /* Query the required space. */ | 54 | /* Query the required space. */ |
| 51 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); | 55 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO, |
| 56 | VBG_KERNEL_REQUEST); | ||
| 52 | if (!req) | 57 | if (!req) |
| 53 | return; | 58 | return; |
| 54 | 59 | ||
| @@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev) | |||
| 135 | * Tell the host that we're going to free the memory we reserved for | 140 | * Tell the host that we're going to free the memory we reserved for |
| 136 | * it, the free it up. (Leak the memory if anything goes wrong here.) | 141 | * it, the free it up. (Leak the memory if anything goes wrong here.) |
| 137 | */ | 142 | */ |
| 138 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); | 143 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO, |
| 144 | VBG_KERNEL_REQUEST); | ||
| 139 | if (!req) | 145 | if (!req) |
| 140 | return; | 146 | return; |
| 141 | 147 | ||
| @@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev) | |||
| 172 | struct vmmdev_guest_info2 *req2 = NULL; | 178 | struct vmmdev_guest_info2 *req2 = NULL; |
| 173 | int rc, ret = -ENOMEM; | 179 | int rc, ret = -ENOMEM; |
| 174 | 180 | ||
| 175 | req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); | 181 | req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO, |
| 176 | req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); | 182 | VBG_KERNEL_REQUEST); |
| 183 | req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2, | ||
| 184 | VBG_KERNEL_REQUEST); | ||
| 177 | if (!req1 || !req2) | 185 | if (!req1 || !req2) |
| 178 | goto out_free; | 186 | goto out_free; |
| 179 | 187 | ||
| @@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev) | |||
| 187 | req2->additions_minor = VBG_VERSION_MINOR; | 195 | req2->additions_minor = VBG_VERSION_MINOR; |
| 188 | req2->additions_build = VBG_VERSION_BUILD; | 196 | req2->additions_build = VBG_VERSION_BUILD; |
| 189 | req2->additions_revision = VBG_SVN_REV; | 197 | req2->additions_revision = VBG_SVN_REV; |
| 190 | /* (no features defined yet) */ | 198 | req2->additions_features = |
| 191 | req2->additions_features = 0; | 199 | VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO; |
| 192 | strlcpy(req2->name, VBG_VERSION_STRING, | 200 | strlcpy(req2->name, VBG_VERSION_STRING, |
| 193 | sizeof(req2->name)); | 201 | sizeof(req2->name)); |
| 194 | 202 | ||
| @@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) | |||
| 230 | struct vmmdev_guest_status *req; | 238 | struct vmmdev_guest_status *req; |
| 231 | int rc; | 239 | int rc; |
| 232 | 240 | ||
| 233 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); | 241 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS, |
| 242 | VBG_KERNEL_REQUEST); | ||
| 234 | if (!req) | 243 | if (!req) |
| 235 | return -ENOMEM; | 244 | return -ENOMEM; |
| 236 | 245 | ||
| @@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) | |||
| 423 | struct vmmdev_heartbeat *req; | 432 | struct vmmdev_heartbeat *req; |
| 424 | int rc; | 433 | int rc; |
| 425 | 434 | ||
| 426 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); | 435 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE, |
| 436 | VBG_KERNEL_REQUEST); | ||
| 427 | if (!req) | 437 | if (!req) |
| 428 | return -ENOMEM; | 438 | return -ENOMEM; |
| 429 | 439 | ||
| @@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev) | |||
| 457 | 467 | ||
| 458 | gdev->guest_heartbeat_req = vbg_req_alloc( | 468 | gdev->guest_heartbeat_req = vbg_req_alloc( |
| 459 | sizeof(*gdev->guest_heartbeat_req), | 469 | sizeof(*gdev->guest_heartbeat_req), |
| 460 | VMMDEVREQ_GUEST_HEARTBEAT); | 470 | VMMDEVREQ_GUEST_HEARTBEAT, |
| 471 | VBG_KERNEL_REQUEST); | ||
| 461 | if (!gdev->guest_heartbeat_req) | 472 | if (!gdev->guest_heartbeat_req) |
| 462 | return -ENOMEM; | 473 | return -ENOMEM; |
| 463 | 474 | ||
| @@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, | |||
| 528 | struct vmmdev_mask *req; | 539 | struct vmmdev_mask *req; |
| 529 | int rc; | 540 | int rc; |
| 530 | 541 | ||
| 531 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); | 542 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK, |
| 543 | VBG_KERNEL_REQUEST); | ||
| 532 | if (!req) | 544 | if (!req) |
| 533 | return -ENOMEM; | 545 | return -ENOMEM; |
| 534 | 546 | ||
| @@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev, | |||
| 567 | u32 changed, previous; | 579 | u32 changed, previous; |
| 568 | int rc, ret = 0; | 580 | int rc, ret = 0; |
| 569 | 581 | ||
| 570 | /* Allocate a request buffer before taking the spinlock */ | 582 | /* |
| 571 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); | 583 | * Allocate a request buffer before taking the spinlock, when |
| 584 | * the session is being terminated the requestor is the kernel, | ||
| 585 | * as we're cleaning up. | ||
| 586 | */ | ||
| 587 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK, | ||
| 588 | session_termination ? VBG_KERNEL_REQUEST : | ||
| 589 | session->requestor); | ||
| 572 | if (!req) { | 590 | if (!req) { |
| 573 | if (!session_termination) | 591 | if (!session_termination) |
| 574 | return -ENOMEM; | 592 | return -ENOMEM; |
| @@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) | |||
| 627 | struct vmmdev_mask *req; | 645 | struct vmmdev_mask *req; |
| 628 | int rc; | 646 | int rc; |
| 629 | 647 | ||
| 630 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); | 648 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, |
| 649 | VBG_KERNEL_REQUEST); | ||
| 631 | if (!req) | 650 | if (!req) |
| 632 | return -ENOMEM; | 651 | return -ENOMEM; |
| 633 | 652 | ||
| @@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, | |||
| 662 | u32 changed, previous; | 681 | u32 changed, previous; |
| 663 | int rc, ret = 0; | 682 | int rc, ret = 0; |
| 664 | 683 | ||
| 665 | /* Allocate a request buffer before taking the spinlock */ | 684 | /* |
| 666 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); | 685 | * Allocate a request buffer before taking the spinlock, when |
| 686 | * the session is being terminated the requestor is the kernel, | ||
| 687 | * as we're cleaning up. | ||
| 688 | */ | ||
| 689 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, | ||
| 690 | session_termination ? VBG_KERNEL_REQUEST : | ||
| 691 | session->requestor); | ||
| 667 | if (!req) { | 692 | if (!req) { |
| 668 | if (!session_termination) | 693 | if (!session_termination) |
| 669 | return -ENOMEM; | 694 | return -ENOMEM; |
| @@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev) | |||
| 722 | struct vmmdev_host_version *req; | 747 | struct vmmdev_host_version *req; |
| 723 | int rc, ret; | 748 | int rc, ret; |
| 724 | 749 | ||
| 725 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); | 750 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION, |
| 751 | VBG_KERNEL_REQUEST); | ||
| 726 | if (!req) | 752 | if (!req) |
| 727 | return -ENOMEM; | 753 | return -ENOMEM; |
| 728 | 754 | ||
| @@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) | |||
| 783 | 809 | ||
| 784 | gdev->mem_balloon.get_req = | 810 | gdev->mem_balloon.get_req = |
| 785 | vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), | 811 | vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), |
| 786 | VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); | 812 | VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ, |
| 813 | VBG_KERNEL_REQUEST); | ||
| 787 | gdev->mem_balloon.change_req = | 814 | gdev->mem_balloon.change_req = |
| 788 | vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), | 815 | vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), |
| 789 | VMMDEVREQ_CHANGE_MEMBALLOON); | 816 | VMMDEVREQ_CHANGE_MEMBALLOON, |
| 817 | VBG_KERNEL_REQUEST); | ||
| 790 | gdev->cancel_req = | 818 | gdev->cancel_req = |
| 791 | vbg_req_alloc(sizeof(*(gdev->cancel_req)), | 819 | vbg_req_alloc(sizeof(*(gdev->cancel_req)), |
| 792 | VMMDEVREQ_HGCM_CANCEL2); | 820 | VMMDEVREQ_HGCM_CANCEL2, |
| 821 | VBG_KERNEL_REQUEST); | ||
| 793 | gdev->ack_events_req = | 822 | gdev->ack_events_req = |
| 794 | vbg_req_alloc(sizeof(*gdev->ack_events_req), | 823 | vbg_req_alloc(sizeof(*gdev->ack_events_req), |
| 795 | VMMDEVREQ_ACKNOWLEDGE_EVENTS); | 824 | VMMDEVREQ_ACKNOWLEDGE_EVENTS, |
| 825 | VBG_KERNEL_REQUEST); | ||
| 796 | gdev->mouse_status_req = | 826 | gdev->mouse_status_req = |
| 797 | vbg_req_alloc(sizeof(*gdev->mouse_status_req), | 827 | vbg_req_alloc(sizeof(*gdev->mouse_status_req), |
| 798 | VMMDEVREQ_GET_MOUSE_STATUS); | 828 | VMMDEVREQ_GET_MOUSE_STATUS, |
| 829 | VBG_KERNEL_REQUEST); | ||
| 799 | 830 | ||
| 800 | if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || | 831 | if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || |
| 801 | !gdev->cancel_req || !gdev->ack_events_req || | 832 | !gdev->cancel_req || !gdev->ack_events_req || |
| @@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev) | |||
| 892 | * vboxguest_linux.c calls this when userspace opens the char-device. | 923 | * vboxguest_linux.c calls this when userspace opens the char-device. |
| 893 | * Return: A pointer to the new session or an ERR_PTR on error. | 924 | * Return: A pointer to the new session or an ERR_PTR on error. |
| 894 | * @gdev: The Guest extension device. | 925 | * @gdev: The Guest extension device. |
| 895 | * @user: Set if this is a session for the vboxuser device. | 926 | * @requestor: VMMDEV_REQUESTOR_* flags |
| 896 | */ | 927 | */ |
| 897 | struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) | 928 | struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) |
| 898 | { | 929 | { |
| 899 | struct vbg_session *session; | 930 | struct vbg_session *session; |
| 900 | 931 | ||
| @@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) | |||
| 903 | return ERR_PTR(-ENOMEM); | 934 | return ERR_PTR(-ENOMEM); |
| 904 | 935 | ||
| 905 | session->gdev = gdev; | 936 | session->gdev = gdev; |
| 906 | session->user_session = user; | 937 | session->requestor = requestor; |
| 907 | 938 | ||
| 908 | return session; | 939 | return session; |
| 909 | } | 940 | } |
| @@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session) | |||
| 924 | if (!session->hgcm_client_ids[i]) | 955 | if (!session->hgcm_client_ids[i]) |
| 925 | continue; | 956 | continue; |
| 926 | 957 | ||
| 927 | vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); | 958 | /* requestor is kernel here, as we're cleaning up. */ |
| 959 | vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST, | ||
| 960 | session->hgcm_client_ids[i], &rc); | ||
| 928 | } | 961 | } |
| 929 | 962 | ||
| 930 | kfree(session); | 963 | kfree(session); |
| @@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, | |||
| 1152 | return -EPERM; | 1185 | return -EPERM; |
| 1153 | } | 1186 | } |
| 1154 | 1187 | ||
| 1155 | if (trusted_apps_only && session->user_session) { | 1188 | if (trusted_apps_only && |
| 1189 | (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) { | ||
| 1156 | vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", | 1190 | vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", |
| 1157 | req->request_type); | 1191 | req->request_type); |
| 1158 | return -EPERM; | 1192 | return -EPERM; |
| @@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, | |||
| 1209 | if (i >= ARRAY_SIZE(session->hgcm_client_ids)) | 1243 | if (i >= ARRAY_SIZE(session->hgcm_client_ids)) |
| 1210 | return -EMFILE; | 1244 | return -EMFILE; |
| 1211 | 1245 | ||
| 1212 | ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, | 1246 | ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc, |
| 1213 | &conn->hdr.rc); | 1247 | &client_id, &conn->hdr.rc); |
| 1214 | 1248 | ||
| 1215 | mutex_lock(&gdev->session_mutex); | 1249 | mutex_lock(&gdev->session_mutex); |
| 1216 | if (ret == 0 && conn->hdr.rc >= 0) { | 1250 | if (ret == 0 && conn->hdr.rc >= 0) { |
| @@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, | |||
| 1251 | if (i >= ARRAY_SIZE(session->hgcm_client_ids)) | 1285 | if (i >= ARRAY_SIZE(session->hgcm_client_ids)) |
| 1252 | return -EINVAL; | 1286 | return -EINVAL; |
| 1253 | 1287 | ||
| 1254 | ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); | 1288 | ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id, |
| 1289 | &disconn->hdr.rc); | ||
| 1255 | 1290 | ||
| 1256 | mutex_lock(&gdev->session_mutex); | 1291 | mutex_lock(&gdev->session_mutex); |
| 1257 | if (ret == 0 && disconn->hdr.rc >= 0) | 1292 | if (ret == 0 && disconn->hdr.rc >= 0) |
| @@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, | |||
| 1313 | } | 1348 | } |
| 1314 | 1349 | ||
| 1315 | if (IS_ENABLED(CONFIG_COMPAT) && f32bit) | 1350 | if (IS_ENABLED(CONFIG_COMPAT) && f32bit) |
| 1316 | ret = vbg_hgcm_call32(gdev, client_id, | 1351 | ret = vbg_hgcm_call32(gdev, session->requestor, client_id, |
| 1317 | call->function, call->timeout_ms, | 1352 | call->function, call->timeout_ms, |
| 1318 | VBG_IOCTL_HGCM_CALL_PARMS32(call), | 1353 | VBG_IOCTL_HGCM_CALL_PARMS32(call), |
| 1319 | call->parm_count, &call->hdr.rc); | 1354 | call->parm_count, &call->hdr.rc); |
| 1320 | else | 1355 | else |
| 1321 | ret = vbg_hgcm_call(gdev, client_id, | 1356 | ret = vbg_hgcm_call(gdev, session->requestor, client_id, |
| 1322 | call->function, call->timeout_ms, | 1357 | call->function, call->timeout_ms, |
| 1323 | VBG_IOCTL_HGCM_CALL_PARMS(call), | 1358 | VBG_IOCTL_HGCM_CALL_PARMS(call), |
| 1324 | call->parm_count, &call->hdr.rc); | 1359 | call->parm_count, &call->hdr.rc); |
| @@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, | |||
| 1408 | } | 1443 | } |
| 1409 | 1444 | ||
| 1410 | static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, | 1445 | static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, |
| 1446 | struct vbg_session *session, | ||
| 1411 | struct vbg_ioctl_write_coredump *dump) | 1447 | struct vbg_ioctl_write_coredump *dump) |
| 1412 | { | 1448 | { |
| 1413 | struct vmmdev_write_core_dump *req; | 1449 | struct vmmdev_write_core_dump *req; |
| @@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, | |||
| 1415 | if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) | 1451 | if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) |
| 1416 | return -EINVAL; | 1452 | return -EINVAL; |
| 1417 | 1453 | ||
| 1418 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); | 1454 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP, |
| 1455 | session->requestor); | ||
| 1419 | if (!req) | 1456 | if (!req) |
| 1420 | return -ENOMEM; | 1457 | return -ENOMEM; |
| 1421 | 1458 | ||
| @@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) | |||
| 1476 | case VBG_IOCTL_CHECK_BALLOON: | 1513 | case VBG_IOCTL_CHECK_BALLOON: |
| 1477 | return vbg_ioctl_check_balloon(gdev, data); | 1514 | return vbg_ioctl_check_balloon(gdev, data); |
| 1478 | case VBG_IOCTL_WRITE_CORE_DUMP: | 1515 | case VBG_IOCTL_WRITE_CORE_DUMP: |
| 1479 | return vbg_ioctl_write_core_dump(gdev, data); | 1516 | return vbg_ioctl_write_core_dump(gdev, session, data); |
| 1480 | } | 1517 | } |
| 1481 | 1518 | ||
| 1482 | /* Variable sized requests. */ | 1519 | /* Variable sized requests. */ |
| @@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) | |||
| 1508 | struct vmmdev_mouse_status *req; | 1545 | struct vmmdev_mouse_status *req; |
| 1509 | int rc; | 1546 | int rc; |
| 1510 | 1547 | ||
| 1511 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); | 1548 | req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS, |
| 1549 | VBG_KERNEL_REQUEST); | ||
| 1512 | if (!req) | 1550 | if (!req) |
| 1513 | return -ENOMEM; | 1551 | return -ENOMEM; |
| 1514 | 1552 | ||
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 7ad9ec45bfa9..4188c12b839f 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h | |||
| @@ -154,15 +154,15 @@ struct vbg_session { | |||
| 154 | * host. Protected by vbg_gdev.session_mutex. | 154 | * host. Protected by vbg_gdev.session_mutex. |
| 155 | */ | 155 | */ |
| 156 | u32 guest_caps; | 156 | u32 guest_caps; |
| 157 | /** Does this session belong to a root process or a user one? */ | 157 | /** VMMDEV_REQUESTOR_* flags */ |
| 158 | bool user_session; | 158 | u32 requestor; |
| 159 | /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ | 159 | /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ |
| 160 | bool cancel_waiters; | 160 | bool cancel_waiters; |
| 161 | }; | 161 | }; |
| 162 | 162 | ||
| 163 | int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); | 163 | int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); |
| 164 | void vbg_core_exit(struct vbg_dev *gdev); | 164 | void vbg_core_exit(struct vbg_dev *gdev); |
| 165 | struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); | 165 | struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor); |
| 166 | void vbg_core_close_session(struct vbg_session *session); | 166 | void vbg_core_close_session(struct vbg_session *session); |
| 167 | int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); | 167 | int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); |
| 168 | int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); | 168 | int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); |
| @@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id); | |||
| 172 | void vbg_linux_mouse_event(struct vbg_dev *gdev); | 172 | void vbg_linux_mouse_event(struct vbg_dev *gdev); |
| 173 | 173 | ||
| 174 | /* Private (non exported) functions form vboxguest_utils.c */ | 174 | /* Private (non exported) functions form vboxguest_utils.c */ |
| 175 | void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); | 175 | void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type, |
| 176 | u32 requestor); | ||
| 176 | void vbg_req_free(void *req, size_t len); | 177 | void vbg_req_free(void *req, size_t len); |
| 177 | int vbg_req_perform(struct vbg_dev *gdev, void *req); | 178 | int vbg_req_perform(struct vbg_dev *gdev, void *req); |
| 178 | int vbg_hgcm_call32( | 179 | int vbg_hgcm_call32( |
| 179 | struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, | 180 | struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, |
| 180 | struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, | 181 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32, |
| 181 | int *vbox_status); | 182 | u32 parm_count, int *vbox_status); |
| 182 | 183 | ||
| 183 | #endif | 184 | #endif |
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 6e2a9619192d..6e8c0f1c1056 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | * Copyright (C) 2006-2016 Oracle Corporation | 5 | * Copyright (C) 2006-2016 Oracle Corporation |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <linux/cred.h> | ||
| 8 | #include <linux/input.h> | 9 | #include <linux/input.h> |
| 9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 10 | #include <linux/miscdevice.h> | 11 | #include <linux/miscdevice.h> |
| @@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex); | |||
| 28 | /** Global vbg_gdev pointer used by vbg_get/put_gdev. */ | 29 | /** Global vbg_gdev pointer used by vbg_get/put_gdev. */ |
| 29 | static struct vbg_dev *vbg_gdev; | 30 | static struct vbg_dev *vbg_gdev; |
| 30 | 31 | ||
| 32 | static u32 vbg_misc_device_requestor(struct inode *inode) | ||
| 33 | { | ||
| 34 | u32 requestor = VMMDEV_REQUESTOR_USERMODE | | ||
| 35 | VMMDEV_REQUESTOR_CON_DONT_KNOW | | ||
| 36 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN; | ||
| 37 | |||
| 38 | if (from_kuid(current_user_ns(), current->cred->uid) == 0) | ||
| 39 | requestor |= VMMDEV_REQUESTOR_USR_ROOT; | ||
| 40 | else | ||
| 41 | requestor |= VMMDEV_REQUESTOR_USR_USER; | ||
| 42 | |||
| 43 | if (in_egroup_p(inode->i_gid)) | ||
| 44 | requestor |= VMMDEV_REQUESTOR_GRP_VBOX; | ||
| 45 | |||
| 46 | return requestor; | ||
| 47 | } | ||
| 48 | |||
| 31 | static int vbg_misc_device_open(struct inode *inode, struct file *filp) | 49 | static int vbg_misc_device_open(struct inode *inode, struct file *filp) |
| 32 | { | 50 | { |
| 33 | struct vbg_session *session; | 51 | struct vbg_session *session; |
| @@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp) | |||
| 36 | /* misc_open sets filp->private_data to our misc device */ | 54 | /* misc_open sets filp->private_data to our misc device */ |
| 37 | gdev = container_of(filp->private_data, struct vbg_dev, misc_device); | 55 | gdev = container_of(filp->private_data, struct vbg_dev, misc_device); |
| 38 | 56 | ||
| 39 | session = vbg_core_open_session(gdev, false); | 57 | session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode)); |
| 40 | if (IS_ERR(session)) | 58 | if (IS_ERR(session)) |
| 41 | return PTR_ERR(session); | 59 | return PTR_ERR(session); |
| 42 | 60 | ||
| @@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp) | |||
| 53 | gdev = container_of(filp->private_data, struct vbg_dev, | 71 | gdev = container_of(filp->private_data, struct vbg_dev, |
| 54 | misc_device_user); | 72 | misc_device_user); |
| 55 | 73 | ||
| 56 | session = vbg_core_open_session(gdev, false); | 74 | session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) | |
| 75 | VMMDEV_REQUESTOR_USER_DEVICE); | ||
| 57 | if (IS_ERR(session)) | 76 | if (IS_ERR(session)) |
| 58 | return PTR_ERR(session); | 77 | return PTR_ERR(session); |
| 59 | 78 | ||
| @@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, | |||
| 115 | req == VBG_IOCTL_VMMDEV_REQUEST_BIG; | 134 | req == VBG_IOCTL_VMMDEV_REQUEST_BIG; |
| 116 | 135 | ||
| 117 | if (is_vmmdev_req) | 136 | if (is_vmmdev_req) |
| 118 | buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); | 137 | buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, |
| 138 | session->requestor); | ||
| 119 | else | 139 | else |
| 120 | buf = kmalloc(size, GFP_KERNEL); | 140 | buf = kmalloc(size, GFP_KERNEL); |
| 121 | if (!buf) | 141 | if (!buf) |
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index bf4474214b4d..75fd140b02ff 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c | |||
| @@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err); | |||
| 62 | VBG_LOG(vbg_debug, pr_debug); | 62 | VBG_LOG(vbg_debug, pr_debug); |
| 63 | #endif | 63 | #endif |
| 64 | 64 | ||
| 65 | void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) | 65 | void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type, |
| 66 | u32 requestor) | ||
| 66 | { | 67 | { |
| 67 | struct vmmdev_request_header *req; | 68 | struct vmmdev_request_header *req; |
| 68 | int order = get_order(PAGE_ALIGN(len)); | 69 | int order = get_order(PAGE_ALIGN(len)); |
| @@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) | |||
| 78 | req->request_type = req_type; | 79 | req->request_type = req_type; |
| 79 | req->rc = VERR_GENERAL_FAILURE; | 80 | req->rc = VERR_GENERAL_FAILURE; |
| 80 | req->reserved1 = 0; | 81 | req->reserved1 = 0; |
| 81 | req->reserved2 = 0; | 82 | req->requestor = requestor; |
| 82 | 83 | ||
| 83 | return req; | 84 | return req; |
| 84 | } | 85 | } |
| @@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev, | |||
| 119 | return done; | 120 | return done; |
| 120 | } | 121 | } |
| 121 | 122 | ||
| 122 | int vbg_hgcm_connect(struct vbg_dev *gdev, | 123 | int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor, |
| 123 | struct vmmdev_hgcm_service_location *loc, | 124 | struct vmmdev_hgcm_service_location *loc, |
| 124 | u32 *client_id, int *vbox_status) | 125 | u32 *client_id, int *vbox_status) |
| 125 | { | 126 | { |
| @@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev, | |||
| 127 | int rc; | 128 | int rc; |
| 128 | 129 | ||
| 129 | hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), | 130 | hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), |
| 130 | VMMDEVREQ_HGCM_CONNECT); | 131 | VMMDEVREQ_HGCM_CONNECT, requestor); |
| 131 | if (!hgcm_connect) | 132 | if (!hgcm_connect) |
| 132 | return -ENOMEM; | 133 | return -ENOMEM; |
| 133 | 134 | ||
| @@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev, | |||
| 153 | } | 154 | } |
| 154 | EXPORT_SYMBOL(vbg_hgcm_connect); | 155 | EXPORT_SYMBOL(vbg_hgcm_connect); |
| 155 | 156 | ||
| 156 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) | 157 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor, |
| 158 | u32 client_id, int *vbox_status) | ||
| 157 | { | 159 | { |
| 158 | struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; | 160 | struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; |
| 159 | int rc; | 161 | int rc; |
| 160 | 162 | ||
| 161 | hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), | 163 | hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), |
| 162 | VMMDEVREQ_HGCM_DISCONNECT); | 164 | VMMDEVREQ_HGCM_DISCONNECT, |
| 165 | requestor); | ||
| 163 | if (!hgcm_disconnect) | 166 | if (!hgcm_disconnect) |
| 164 | return -ENOMEM; | 167 | return -ENOMEM; |
| 165 | 168 | ||
| @@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result( | |||
| 593 | return 0; | 596 | return 0; |
| 594 | } | 597 | } |
| 595 | 598 | ||
| 596 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, | 599 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, |
| 597 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, | 600 | u32 function, u32 timeout_ms, |
| 598 | u32 parm_count, int *vbox_status) | 601 | struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, |
| 602 | int *vbox_status) | ||
| 599 | { | 603 | { |
| 600 | struct vmmdev_hgcm_call *call; | 604 | struct vmmdev_hgcm_call *call; |
| 601 | void **bounce_bufs = NULL; | 605 | void **bounce_bufs = NULL; |
| @@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, | |||
| 615 | goto free_bounce_bufs; | 619 | goto free_bounce_bufs; |
| 616 | } | 620 | } |
| 617 | 621 | ||
| 618 | call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); | 622 | call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor); |
| 619 | if (!call) { | 623 | if (!call) { |
| 620 | ret = -ENOMEM; | 624 | ret = -ENOMEM; |
| 621 | goto free_bounce_bufs; | 625 | goto free_bounce_bufs; |
| @@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call); | |||
| 647 | 651 | ||
| 648 | #ifdef CONFIG_COMPAT | 652 | #ifdef CONFIG_COMPAT |
| 649 | int vbg_hgcm_call32( | 653 | int vbg_hgcm_call32( |
| 650 | struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, | 654 | struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, |
| 651 | struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, | 655 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32, |
| 652 | int *vbox_status) | 656 | u32 parm_count, int *vbox_status) |
| 653 | { | 657 | { |
| 654 | struct vmmdev_hgcm_function_parameter *parm64 = NULL; | 658 | struct vmmdev_hgcm_function_parameter *parm64 = NULL; |
| 655 | u32 i, size; | 659 | u32 i, size; |
| @@ -689,7 +693,7 @@ int vbg_hgcm_call32( | |||
| 689 | goto out_free; | 693 | goto out_free; |
| 690 | } | 694 | } |
| 691 | 695 | ||
| 692 | ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, | 696 | ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms, |
| 693 | parm64, parm_count, vbox_status); | 697 | parm64, parm_count, vbox_status); |
| 694 | if (ret < 0) | 698 | if (ret < 0) |
| 695 | goto out_free; | 699 | goto out_free; |
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h index 77f0c8f8a231..84834dad38d5 100644 --- a/drivers/virt/vboxguest/vboxguest_version.h +++ b/drivers/virt/vboxguest/vboxguest_version.h | |||
| @@ -9,11 +9,10 @@ | |||
| 9 | #ifndef __VBOX_VERSION_H__ | 9 | #ifndef __VBOX_VERSION_H__ |
| 10 | #define __VBOX_VERSION_H__ | 10 | #define __VBOX_VERSION_H__ |
| 11 | 11 | ||
| 12 | /* Last synced October 4th 2017 */ | 12 | #define VBG_VERSION_MAJOR 6 |
| 13 | #define VBG_VERSION_MAJOR 5 | 13 | #define VBG_VERSION_MINOR 0 |
| 14 | #define VBG_VERSION_MINOR 2 | ||
| 15 | #define VBG_VERSION_BUILD 0 | 14 | #define VBG_VERSION_BUILD 0 |
| 16 | #define VBG_SVN_REV 68940 | 15 | #define VBG_SVN_REV 127566 |
| 17 | #define VBG_VERSION_STRING "5.2.0" | 16 | #define VBG_VERSION_STRING "6.0.0" |
| 18 | 17 | ||
| 19 | #endif | 18 | #endif |
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h index 5e2ae978935d..6337b8d75d96 100644 --- a/drivers/virt/vboxguest/vmmdev.h +++ b/drivers/virt/vboxguest/vmmdev.h | |||
| @@ -98,8 +98,8 @@ struct vmmdev_request_header { | |||
| 98 | s32 rc; | 98 | s32 rc; |
| 99 | /** Reserved field no.1. MBZ. */ | 99 | /** Reserved field no.1. MBZ. */ |
| 100 | u32 reserved1; | 100 | u32 reserved1; |
| 101 | /** Reserved field no.2. MBZ. */ | 101 | /** IN: Requestor information (VMMDEV_REQUESTOR_*) */ |
| 102 | u32 reserved2; | 102 | u32 requestor; |
| 103 | }; | 103 | }; |
| 104 | VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); | 104 | VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); |
| 105 | 105 | ||
| @@ -247,6 +247,8 @@ struct vmmdev_guest_info { | |||
| 247 | }; | 247 | }; |
| 248 | VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); | 248 | VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); |
| 249 | 249 | ||
| 250 | #define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0) | ||
| 251 | |||
| 250 | /** struct vmmdev_guestinfo2 - Guest information report, version 2. */ | 252 | /** struct vmmdev_guestinfo2 - Guest information report, version 2. */ |
| 251 | struct vmmdev_guest_info2 { | 253 | struct vmmdev_guest_info2 { |
| 252 | /** Header. */ | 254 | /** Header. */ |
| @@ -259,7 +261,7 @@ struct vmmdev_guest_info2 { | |||
| 259 | u32 additions_build; | 261 | u32 additions_build; |
| 260 | /** SVN revision. */ | 262 | /** SVN revision. */ |
| 261 | u32 additions_revision; | 263 | u32 additions_revision; |
| 262 | /** Feature mask, currently unused. */ | 264 | /** Feature mask. */ |
| 263 | u32 additions_features; | 265 | u32 additions_features; |
| 264 | /** | 266 | /** |
| 265 | * The intentional meaning of this field was: | 267 | * The intentional meaning of this field was: |
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index de01a6d0059d..a1c61e351d3f 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c | |||
| @@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 140 | if (!(vma->vm_flags & VM_SHARED)) | 140 | if (!(vma->vm_flags & VM_SHARED)) |
| 141 | return -EINVAL; | 141 | return -EINVAL; |
| 142 | 142 | ||
| 143 | vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), | 143 | vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL); |
| 144 | GFP_KERNEL); | ||
| 145 | if (!vma_priv) | 144 | if (!vma_priv) |
| 146 | return -ENOMEM; | 145 | return -ENOMEM; |
| 147 | 146 | ||
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index c3e201025ef0..0782ff3c2273 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
| @@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) | |||
| 622 | if (xen_store_evtchn == 0) | 622 | if (xen_store_evtchn == 0) |
| 623 | return -ENOENT; | 623 | return -ENOENT; |
| 624 | 624 | ||
| 625 | nonseekable_open(inode, filp); | 625 | stream_open(inode, filp); |
| 626 | |||
| 627 | filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ | ||
| 628 | 626 | ||
| 629 | u = kzalloc(sizeof(*u), GFP_KERNEL); | 627 | u = kzalloc(sizeof(*u), GFP_KERNEL); |
| 630 | if (u == NULL) | 628 | if (u == NULL) |
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index ca08c83168f5..0b37867b5c20 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
| @@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr) | |||
| 1515 | 1515 | ||
| 1516 | xdr_encode_AFS_StoreStatus(&bp, attr); | 1516 | xdr_encode_AFS_StoreStatus(&bp, attr); |
| 1517 | 1517 | ||
| 1518 | *bp++ = 0; /* position of start of write */ | 1518 | *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */ |
| 1519 | *bp++ = 0; | 1519 | *bp++ = htonl((u32) attr->ia_size); |
| 1520 | *bp++ = 0; /* size of write */ | 1520 | *bp++ = 0; /* size of write */ |
| 1521 | *bp++ = 0; | 1521 | *bp++ = 0; |
| 1522 | *bp++ = htonl(attr->ia_size >> 32); /* new file length */ | 1522 | *bp++ = htonl(attr->ia_size >> 32); /* new file length */ |
| @@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) | |||
| 1564 | 1564 | ||
| 1565 | xdr_encode_AFS_StoreStatus(&bp, attr); | 1565 | xdr_encode_AFS_StoreStatus(&bp, attr); |
| 1566 | 1566 | ||
| 1567 | *bp++ = 0; /* position of start of write */ | 1567 | *bp++ = htonl(attr->ia_size); /* position of start of write */ |
| 1568 | *bp++ = 0; /* size of write */ | 1568 | *bp++ = 0; /* size of write */ |
| 1569 | *bp++ = htonl(attr->ia_size); /* new file length */ | 1569 | *bp++ = htonl(attr->ia_size); /* new file length */ |
| 1570 | 1570 | ||
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 5aa57929e8c2..6e97a42d24d1 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c | |||
| @@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) | |||
| 1514 | bp = xdr_encode_u32(bp, 0); /* RPC flags */ | 1514 | bp = xdr_encode_u32(bp, 0); /* RPC flags */ |
| 1515 | bp = xdr_encode_YFSFid(bp, &vnode->fid); | 1515 | bp = xdr_encode_YFSFid(bp, &vnode->fid); |
| 1516 | bp = xdr_encode_YFS_StoreStatus(bp, attr); | 1516 | bp = xdr_encode_YFS_StoreStatus(bp, attr); |
| 1517 | bp = xdr_encode_u64(bp, 0); /* position of start of write */ | 1517 | bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */ |
| 1518 | bp = xdr_encode_u64(bp, 0); /* size of write */ | 1518 | bp = xdr_encode_u64(bp, 0); /* size of write */ |
| 1519 | bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ | 1519 | bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ |
| 1520 | yfs_check_req(call, bp); | 1520 | yfs_check_req(call, bp); |
| @@ -181,7 +181,7 @@ struct poll_iocb { | |||
| 181 | struct file *file; | 181 | struct file *file; |
| 182 | struct wait_queue_head *head; | 182 | struct wait_queue_head *head; |
| 183 | __poll_t events; | 183 | __poll_t events; |
| 184 | bool woken; | 184 | bool done; |
| 185 | bool cancelled; | 185 | bool cancelled; |
| 186 | struct wait_queue_entry wait; | 186 | struct wait_queue_entry wait; |
| 187 | struct work_struct work; | 187 | struct work_struct work; |
| @@ -204,8 +204,7 @@ struct aio_kiocb { | |||
| 204 | struct kioctx *ki_ctx; | 204 | struct kioctx *ki_ctx; |
| 205 | kiocb_cancel_fn *ki_cancel; | 205 | kiocb_cancel_fn *ki_cancel; |
| 206 | 206 | ||
| 207 | struct iocb __user *ki_user_iocb; /* user's aiocb */ | 207 | struct io_event ki_res; |
| 208 | __u64 ki_user_data; /* user's data for completion */ | ||
| 209 | 208 | ||
| 210 | struct list_head ki_list; /* the aio core uses this | 209 | struct list_head ki_list; /* the aio core uses this |
| 211 | * for cancellation */ | 210 | * for cancellation */ |
| @@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
| 1022 | /* aio_get_req | 1021 | /* aio_get_req |
| 1023 | * Allocate a slot for an aio request. | 1022 | * Allocate a slot for an aio request. |
| 1024 | * Returns NULL if no requests are free. | 1023 | * Returns NULL if no requests are free. |
| 1024 | * | ||
| 1025 | * The refcount is initialized to 2 - one for the async op completion, | ||
| 1026 | * one for the synchronous code that does this. | ||
| 1025 | */ | 1027 | */ |
| 1026 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | 1028 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
| 1027 | { | 1029 | { |
| @@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | |||
| 1031 | if (unlikely(!req)) | 1033 | if (unlikely(!req)) |
| 1032 | return NULL; | 1034 | return NULL; |
| 1033 | 1035 | ||
| 1036 | if (unlikely(!get_reqs_available(ctx))) { | ||
| 1037 | kmem_cache_free(kiocb_cachep, req); | ||
| 1038 | return NULL; | ||
| 1039 | } | ||
| 1040 | |||
| 1034 | percpu_ref_get(&ctx->reqs); | 1041 | percpu_ref_get(&ctx->reqs); |
| 1035 | req->ki_ctx = ctx; | 1042 | req->ki_ctx = ctx; |
| 1036 | INIT_LIST_HEAD(&req->ki_list); | 1043 | INIT_LIST_HEAD(&req->ki_list); |
| 1037 | refcount_set(&req->ki_refcnt, 0); | 1044 | refcount_set(&req->ki_refcnt, 2); |
| 1038 | req->ki_eventfd = NULL; | 1045 | req->ki_eventfd = NULL; |
| 1039 | return req; | 1046 | return req; |
| 1040 | } | 1047 | } |
| @@ -1067,30 +1074,20 @@ out: | |||
| 1067 | return ret; | 1074 | return ret; |
| 1068 | } | 1075 | } |
| 1069 | 1076 | ||
| 1070 | static inline void iocb_put(struct aio_kiocb *iocb) | 1077 | static inline void iocb_destroy(struct aio_kiocb *iocb) |
| 1071 | { | ||
| 1072 | if (refcount_read(&iocb->ki_refcnt) == 0 || | ||
| 1073 | refcount_dec_and_test(&iocb->ki_refcnt)) { | ||
| 1074 | if (iocb->ki_filp) | ||
| 1075 | fput(iocb->ki_filp); | ||
| 1076 | percpu_ref_put(&iocb->ki_ctx->reqs); | ||
| 1077 | kmem_cache_free(kiocb_cachep, iocb); | ||
| 1078 | } | ||
| 1079 | } | ||
| 1080 | |||
| 1081 | static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, | ||
| 1082 | long res, long res2) | ||
| 1083 | { | 1078 | { |
| 1084 | ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; | 1079 | if (iocb->ki_eventfd) |
| 1085 | ev->data = iocb->ki_user_data; | 1080 | eventfd_ctx_put(iocb->ki_eventfd); |
| 1086 | ev->res = res; | 1081 | if (iocb->ki_filp) |
| 1087 | ev->res2 = res2; | 1082 | fput(iocb->ki_filp); |
| 1083 | percpu_ref_put(&iocb->ki_ctx->reqs); | ||
| 1084 | kmem_cache_free(kiocb_cachep, iocb); | ||
| 1088 | } | 1085 | } |
| 1089 | 1086 | ||
| 1090 | /* aio_complete | 1087 | /* aio_complete |
| 1091 | * Called when the io request on the given iocb is complete. | 1088 | * Called when the io request on the given iocb is complete. |
| 1092 | */ | 1089 | */ |
| 1093 | static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | 1090 | static void aio_complete(struct aio_kiocb *iocb) |
| 1094 | { | 1091 | { |
| 1095 | struct kioctx *ctx = iocb->ki_ctx; | 1092 | struct kioctx *ctx = iocb->ki_ctx; |
| 1096 | struct aio_ring *ring; | 1093 | struct aio_ring *ring; |
| @@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | |||
| 1114 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | 1111 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
| 1115 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; | 1112 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
| 1116 | 1113 | ||
| 1117 | aio_fill_event(event, iocb, res, res2); | 1114 | *event = iocb->ki_res; |
| 1118 | 1115 | ||
| 1119 | kunmap_atomic(ev_page); | 1116 | kunmap_atomic(ev_page); |
| 1120 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | 1117 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
| 1121 | 1118 | ||
| 1122 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | 1119 | pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, |
| 1123 | ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, | 1120 | (void __user *)(unsigned long)iocb->ki_res.obj, |
| 1124 | res, res2); | 1121 | iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); |
| 1125 | 1122 | ||
| 1126 | /* after flagging the request as done, we | 1123 | /* after flagging the request as done, we |
| 1127 | * must never even look at it again | 1124 | * must never even look at it again |
| @@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | |||
| 1148 | * eventfd. The eventfd_signal() function is safe to be called | 1145 | * eventfd. The eventfd_signal() function is safe to be called |
| 1149 | * from IRQ context. | 1146 | * from IRQ context. |
| 1150 | */ | 1147 | */ |
| 1151 | if (iocb->ki_eventfd) { | 1148 | if (iocb->ki_eventfd) |
| 1152 | eventfd_signal(iocb->ki_eventfd, 1); | 1149 | eventfd_signal(iocb->ki_eventfd, 1); |
| 1153 | eventfd_ctx_put(iocb->ki_eventfd); | ||
| 1154 | } | ||
| 1155 | 1150 | ||
| 1156 | /* | 1151 | /* |
| 1157 | * We have to order our ring_info tail store above and test | 1152 | * We have to order our ring_info tail store above and test |
| @@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | |||
| 1163 | 1158 | ||
| 1164 | if (waitqueue_active(&ctx->wait)) | 1159 | if (waitqueue_active(&ctx->wait)) |
| 1165 | wake_up(&ctx->wait); | 1160 | wake_up(&ctx->wait); |
| 1166 | iocb_put(iocb); | 1161 | } |
| 1162 | |||
| 1163 | static inline void iocb_put(struct aio_kiocb *iocb) | ||
| 1164 | { | ||
| 1165 | if (refcount_dec_and_test(&iocb->ki_refcnt)) { | ||
| 1166 | aio_complete(iocb); | ||
| 1167 | iocb_destroy(iocb); | ||
| 1168 | } | ||
| 1167 | } | 1169 | } |
| 1168 | 1170 | ||
| 1169 | /* aio_read_events_ring | 1171 | /* aio_read_events_ring |
| @@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) | |||
| 1437 | file_end_write(kiocb->ki_filp); | 1439 | file_end_write(kiocb->ki_filp); |
| 1438 | } | 1440 | } |
| 1439 | 1441 | ||
| 1440 | aio_complete(iocb, res, res2); | 1442 | iocb->ki_res.res = res; |
| 1443 | iocb->ki_res.res2 = res2; | ||
| 1444 | iocb_put(iocb); | ||
| 1441 | } | 1445 | } |
| 1442 | 1446 | ||
| 1443 | static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) | 1447 | static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) |
| @@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret) | |||
| 1514 | } | 1518 | } |
| 1515 | } | 1519 | } |
| 1516 | 1520 | ||
| 1517 | static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, | 1521 | static int aio_read(struct kiocb *req, const struct iocb *iocb, |
| 1518 | bool vectored, bool compat) | 1522 | bool vectored, bool compat) |
| 1519 | { | 1523 | { |
| 1520 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | 1524 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
| 1521 | struct iov_iter iter; | 1525 | struct iov_iter iter; |
| 1522 | struct file *file; | 1526 | struct file *file; |
| 1523 | ssize_t ret; | 1527 | int ret; |
| 1524 | 1528 | ||
| 1525 | ret = aio_prep_rw(req, iocb); | 1529 | ret = aio_prep_rw(req, iocb); |
| 1526 | if (ret) | 1530 | if (ret) |
| @@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, | |||
| 1542 | return ret; | 1546 | return ret; |
| 1543 | } | 1547 | } |
| 1544 | 1548 | ||
| 1545 | static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, | 1549 | static int aio_write(struct kiocb *req, const struct iocb *iocb, |
| 1546 | bool vectored, bool compat) | 1550 | bool vectored, bool compat) |
| 1547 | { | 1551 | { |
| 1548 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | 1552 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
| 1549 | struct iov_iter iter; | 1553 | struct iov_iter iter; |
| 1550 | struct file *file; | 1554 | struct file *file; |
| 1551 | ssize_t ret; | 1555 | int ret; |
| 1552 | 1556 | ||
| 1553 | ret = aio_prep_rw(req, iocb); | 1557 | ret = aio_prep_rw(req, iocb); |
| 1554 | if (ret) | 1558 | if (ret) |
| @@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, | |||
| 1585 | 1589 | ||
| 1586 | static void aio_fsync_work(struct work_struct *work) | 1590 | static void aio_fsync_work(struct work_struct *work) |
| 1587 | { | 1591 | { |
| 1588 | struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); | 1592 | struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); |
| 1589 | int ret; | ||
| 1590 | 1593 | ||
| 1591 | ret = vfs_fsync(req->file, req->datasync); | 1594 | iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); |
| 1592 | aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); | 1595 | iocb_put(iocb); |
| 1593 | } | 1596 | } |
| 1594 | 1597 | ||
| 1595 | static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, | 1598 | static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, |
| @@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, | |||
| 1608 | return 0; | 1611 | return 0; |
| 1609 | } | 1612 | } |
| 1610 | 1613 | ||
| 1611 | static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) | ||
| 1612 | { | ||
| 1613 | aio_complete(iocb, mangle_poll(mask), 0); | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | static void aio_poll_complete_work(struct work_struct *work) | 1614 | static void aio_poll_complete_work(struct work_struct *work) |
| 1617 | { | 1615 | { |
| 1618 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); | 1616 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); |
| @@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work) | |||
| 1638 | return; | 1636 | return; |
| 1639 | } | 1637 | } |
| 1640 | list_del_init(&iocb->ki_list); | 1638 | list_del_init(&iocb->ki_list); |
| 1639 | iocb->ki_res.res = mangle_poll(mask); | ||
| 1640 | req->done = true; | ||
| 1641 | spin_unlock_irq(&ctx->ctx_lock); | 1641 | spin_unlock_irq(&ctx->ctx_lock); |
| 1642 | 1642 | ||
| 1643 | aio_poll_complete(iocb, mask); | 1643 | iocb_put(iocb); |
| 1644 | } | 1644 | } |
| 1645 | 1645 | ||
| 1646 | /* assumes we are called with irqs disabled */ | 1646 | /* assumes we are called with irqs disabled */ |
| @@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, | |||
| 1668 | __poll_t mask = key_to_poll(key); | 1668 | __poll_t mask = key_to_poll(key); |
| 1669 | unsigned long flags; | 1669 | unsigned long flags; |
| 1670 | 1670 | ||
| 1671 | req->woken = true; | ||
| 1672 | |||
| 1673 | /* for instances that support it check for an event match first: */ | 1671 | /* for instances that support it check for an event match first: */ |
| 1674 | if (mask) { | 1672 | if (mask && !(mask & req->events)) |
| 1675 | if (!(mask & req->events)) | 1673 | return 0; |
| 1676 | return 0; | 1674 | |
| 1675 | list_del_init(&req->wait.entry); | ||
| 1677 | 1676 | ||
| 1677 | if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { | ||
| 1678 | /* | 1678 | /* |
| 1679 | * Try to complete the iocb inline if we can. Use | 1679 | * Try to complete the iocb inline if we can. Use |
| 1680 | * irqsave/irqrestore because not all filesystems (e.g. fuse) | 1680 | * irqsave/irqrestore because not all filesystems (e.g. fuse) |
| 1681 | * call this function with IRQs disabled and because IRQs | 1681 | * call this function with IRQs disabled and because IRQs |
| 1682 | * have to be disabled before ctx_lock is obtained. | 1682 | * have to be disabled before ctx_lock is obtained. |
| 1683 | */ | 1683 | */ |
| 1684 | if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { | 1684 | list_del(&iocb->ki_list); |
| 1685 | list_del(&iocb->ki_list); | 1685 | iocb->ki_res.res = mangle_poll(mask); |
| 1686 | spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); | 1686 | req->done = true; |
| 1687 | 1687 | spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); | |
| 1688 | list_del_init(&req->wait.entry); | 1688 | iocb_put(iocb); |
| 1689 | aio_poll_complete(iocb, mask); | 1689 | } else { |
| 1690 | return 1; | 1690 | schedule_work(&req->work); |
| 1691 | } | ||
| 1692 | } | 1691 | } |
| 1693 | |||
| 1694 | list_del_init(&req->wait.entry); | ||
| 1695 | schedule_work(&req->work); | ||
| 1696 | return 1; | 1692 | return 1; |
| 1697 | } | 1693 | } |
| 1698 | 1694 | ||
| @@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, | |||
| 1719 | add_wait_queue(head, &pt->iocb->poll.wait); | 1715 | add_wait_queue(head, &pt->iocb->poll.wait); |
| 1720 | } | 1716 | } |
| 1721 | 1717 | ||
| 1722 | static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | 1718 | static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) |
| 1723 | { | 1719 | { |
| 1724 | struct kioctx *ctx = aiocb->ki_ctx; | 1720 | struct kioctx *ctx = aiocb->ki_ctx; |
| 1725 | struct poll_iocb *req = &aiocb->poll; | 1721 | struct poll_iocb *req = &aiocb->poll; |
| 1726 | struct aio_poll_table apt; | 1722 | struct aio_poll_table apt; |
| 1723 | bool cancel = false; | ||
| 1727 | __poll_t mask; | 1724 | __poll_t mask; |
| 1728 | 1725 | ||
| 1729 | /* reject any unknown events outside the normal event mask. */ | 1726 | /* reject any unknown events outside the normal event mask. */ |
| @@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | |||
| 1737 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; | 1734 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; |
| 1738 | 1735 | ||
| 1739 | req->head = NULL; | 1736 | req->head = NULL; |
| 1740 | req->woken = false; | 1737 | req->done = false; |
| 1741 | req->cancelled = false; | 1738 | req->cancelled = false; |
| 1742 | 1739 | ||
| 1743 | apt.pt._qproc = aio_poll_queue_proc; | 1740 | apt.pt._qproc = aio_poll_queue_proc; |
| @@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | |||
| 1749 | INIT_LIST_HEAD(&req->wait.entry); | 1746 | INIT_LIST_HEAD(&req->wait.entry); |
| 1750 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); | 1747 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); |
| 1751 | 1748 | ||
| 1752 | /* one for removal from waitqueue, one for this function */ | ||
| 1753 | refcount_set(&aiocb->ki_refcnt, 2); | ||
| 1754 | |||
| 1755 | mask = vfs_poll(req->file, &apt.pt) & req->events; | 1749 | mask = vfs_poll(req->file, &apt.pt) & req->events; |
| 1756 | if (unlikely(!req->head)) { | ||
| 1757 | /* we did not manage to set up a waitqueue, done */ | ||
| 1758 | goto out; | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | spin_lock_irq(&ctx->ctx_lock); | 1750 | spin_lock_irq(&ctx->ctx_lock); |
| 1762 | spin_lock(&req->head->lock); | 1751 | if (likely(req->head)) { |
| 1763 | if (req->woken) { | 1752 | spin_lock(&req->head->lock); |
| 1764 | /* wake_up context handles the rest */ | 1753 | if (unlikely(list_empty(&req->wait.entry))) { |
| 1765 | mask = 0; | 1754 | if (apt.error) |
| 1755 | cancel = true; | ||
| 1756 | apt.error = 0; | ||
| 1757 | mask = 0; | ||
| 1758 | } | ||
| 1759 | if (mask || apt.error) { | ||
| 1760 | list_del_init(&req->wait.entry); | ||
| 1761 | } else if (cancel) { | ||
| 1762 | WRITE_ONCE(req->cancelled, true); | ||
| 1763 | } else if (!req->done) { /* actually waiting for an event */ | ||
| 1764 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | ||
| 1765 | aiocb->ki_cancel = aio_poll_cancel; | ||
| 1766 | } | ||
| 1767 | spin_unlock(&req->head->lock); | ||
| 1768 | } | ||
| 1769 | if (mask) { /* no async, we'd stolen it */ | ||
| 1770 | aiocb->ki_res.res = mangle_poll(mask); | ||
| 1766 | apt.error = 0; | 1771 | apt.error = 0; |
| 1767 | } else if (mask || apt.error) { | ||
| 1768 | /* if we get an error or a mask we are done */ | ||
| 1769 | WARN_ON_ONCE(list_empty(&req->wait.entry)); | ||
| 1770 | list_del_init(&req->wait.entry); | ||
| 1771 | } else { | ||
| 1772 | /* actually waiting for an event */ | ||
| 1773 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | ||
| 1774 | aiocb->ki_cancel = aio_poll_cancel; | ||
| 1775 | } | 1772 | } |
| 1776 | spin_unlock(&req->head->lock); | ||
| 1777 | spin_unlock_irq(&ctx->ctx_lock); | 1773 | spin_unlock_irq(&ctx->ctx_lock); |
| 1778 | |||
| 1779 | out: | ||
| 1780 | if (unlikely(apt.error)) | ||
| 1781 | return apt.error; | ||
| 1782 | |||
| 1783 | if (mask) | 1774 | if (mask) |
| 1784 | aio_poll_complete(aiocb, mask); | 1775 | iocb_put(aiocb); |
| 1785 | iocb_put(aiocb); | 1776 | return apt.error; |
| 1786 | return 0; | ||
| 1787 | } | 1777 | } |
| 1788 | 1778 | ||
| 1789 | static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, | 1779 | static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, |
| 1790 | struct iocb __user *user_iocb, bool compat) | 1780 | struct iocb __user *user_iocb, struct aio_kiocb *req, |
| 1781 | bool compat) | ||
| 1791 | { | 1782 | { |
| 1792 | struct aio_kiocb *req; | ||
| 1793 | ssize_t ret; | ||
| 1794 | |||
| 1795 | /* enforce forwards compatibility on users */ | ||
| 1796 | if (unlikely(iocb->aio_reserved2)) { | ||
| 1797 | pr_debug("EINVAL: reserve field set\n"); | ||
| 1798 | return -EINVAL; | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | /* prevent overflows */ | ||
| 1802 | if (unlikely( | ||
| 1803 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | ||
| 1804 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | ||
| 1805 | ((ssize_t)iocb->aio_nbytes < 0) | ||
| 1806 | )) { | ||
| 1807 | pr_debug("EINVAL: overflow check\n"); | ||
| 1808 | return -EINVAL; | ||
| 1809 | } | ||
| 1810 | |||
| 1811 | if (!get_reqs_available(ctx)) | ||
| 1812 | return -EAGAIN; | ||
| 1813 | |||
| 1814 | ret = -EAGAIN; | ||
| 1815 | req = aio_get_req(ctx); | ||
| 1816 | if (unlikely(!req)) | ||
| 1817 | goto out_put_reqs_available; | ||
| 1818 | |||
| 1819 | req->ki_filp = fget(iocb->aio_fildes); | 1783 | req->ki_filp = fget(iocb->aio_fildes); |
| 1820 | ret = -EBADF; | ||
| 1821 | if (unlikely(!req->ki_filp)) | 1784 | if (unlikely(!req->ki_filp)) |
| 1822 | goto out_put_req; | 1785 | return -EBADF; |
| 1823 | 1786 | ||
| 1824 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { | 1787 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
| 1788 | struct eventfd_ctx *eventfd; | ||
| 1825 | /* | 1789 | /* |
| 1826 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | 1790 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an |
| 1827 | * instance of the file* now. The file descriptor must be | 1791 | * instance of the file* now. The file descriptor must be |
| 1828 | * an eventfd() fd, and will be signaled for each completed | 1792 | * an eventfd() fd, and will be signaled for each completed |
| 1829 | * event using the eventfd_signal() function. | 1793 | * event using the eventfd_signal() function. |
| 1830 | */ | 1794 | */ |
| 1831 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); | 1795 | eventfd = eventfd_ctx_fdget(iocb->aio_resfd); |
| 1832 | if (IS_ERR(req->ki_eventfd)) { | 1796 | if (IS_ERR(eventfd)) |
| 1833 | ret = PTR_ERR(req->ki_eventfd); | 1797 | return PTR_ERR(eventfd); |
| 1834 | req->ki_eventfd = NULL; | 1798 | |
| 1835 | goto out_put_req; | 1799 | req->ki_eventfd = eventfd; |
| 1836 | } | ||
| 1837 | } | 1800 | } |
| 1838 | 1801 | ||
| 1839 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); | 1802 | if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { |
| 1840 | if (unlikely(ret)) { | ||
| 1841 | pr_debug("EFAULT: aio_key\n"); | 1803 | pr_debug("EFAULT: aio_key\n"); |
| 1842 | goto out_put_req; | 1804 | return -EFAULT; |
| 1843 | } | 1805 | } |
| 1844 | 1806 | ||
| 1845 | req->ki_user_iocb = user_iocb; | 1807 | req->ki_res.obj = (u64)(unsigned long)user_iocb; |
| 1846 | req->ki_user_data = iocb->aio_data; | 1808 | req->ki_res.data = iocb->aio_data; |
| 1809 | req->ki_res.res = 0; | ||
| 1810 | req->ki_res.res2 = 0; | ||
| 1847 | 1811 | ||
| 1848 | switch (iocb->aio_lio_opcode) { | 1812 | switch (iocb->aio_lio_opcode) { |
| 1849 | case IOCB_CMD_PREAD: | 1813 | case IOCB_CMD_PREAD: |
| 1850 | ret = aio_read(&req->rw, iocb, false, compat); | 1814 | return aio_read(&req->rw, iocb, false, compat); |
| 1851 | break; | ||
| 1852 | case IOCB_CMD_PWRITE: | 1815 | case IOCB_CMD_PWRITE: |
| 1853 | ret = aio_write(&req->rw, iocb, false, compat); | 1816 | return aio_write(&req->rw, iocb, false, compat); |
| 1854 | break; | ||
| 1855 | case IOCB_CMD_PREADV: | 1817 | case IOCB_CMD_PREADV: |
| 1856 | ret = aio_read(&req->rw, iocb, true, compat); | 1818 | return aio_read(&req->rw, iocb, true, compat); |
| 1857 | break; | ||
| 1858 | case IOCB_CMD_PWRITEV: | 1819 | case IOCB_CMD_PWRITEV: |
| 1859 | ret = aio_write(&req->rw, iocb, true, compat); | 1820 | return aio_write(&req->rw, iocb, true, compat); |
| 1860 | break; | ||
| 1861 | case IOCB_CMD_FSYNC: | 1821 | case IOCB_CMD_FSYNC: |
| 1862 | ret = aio_fsync(&req->fsync, iocb, false); | 1822 | return aio_fsync(&req->fsync, iocb, false); |
| 1863 | break; | ||
| 1864 | case IOCB_CMD_FDSYNC: | 1823 | case IOCB_CMD_FDSYNC: |
| 1865 | ret = aio_fsync(&req->fsync, iocb, true); | 1824 | return aio_fsync(&req->fsync, iocb, true); |
| 1866 | break; | ||
| 1867 | case IOCB_CMD_POLL: | 1825 | case IOCB_CMD_POLL: |
| 1868 | ret = aio_poll(req, iocb); | 1826 | return aio_poll(req, iocb); |
| 1869 | break; | ||
| 1870 | default: | 1827 | default: |
| 1871 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); | 1828 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); |
| 1872 | ret = -EINVAL; | 1829 | return -EINVAL; |
| 1873 | break; | ||
| 1874 | } | 1830 | } |
| 1875 | |||
| 1876 | /* | ||
| 1877 | * If ret is 0, we'd either done aio_complete() ourselves or have | ||
| 1878 | * arranged for that to be done asynchronously. Anything non-zero | ||
| 1879 | * means that we need to destroy req ourselves. | ||
| 1880 | */ | ||
| 1881 | if (ret) | ||
| 1882 | goto out_put_req; | ||
| 1883 | return 0; | ||
| 1884 | out_put_req: | ||
| 1885 | if (req->ki_eventfd) | ||
| 1886 | eventfd_ctx_put(req->ki_eventfd); | ||
| 1887 | iocb_put(req); | ||
| 1888 | out_put_reqs_available: | ||
| 1889 | put_reqs_available(ctx, 1); | ||
| 1890 | return ret; | ||
| 1891 | } | 1831 | } |
| 1892 | 1832 | ||
| 1893 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1833 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
| 1894 | bool compat) | 1834 | bool compat) |
| 1895 | { | 1835 | { |
| 1836 | struct aio_kiocb *req; | ||
| 1896 | struct iocb iocb; | 1837 | struct iocb iocb; |
| 1838 | int err; | ||
| 1897 | 1839 | ||
| 1898 | if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) | 1840 | if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) |
| 1899 | return -EFAULT; | 1841 | return -EFAULT; |
| 1900 | 1842 | ||
| 1901 | return __io_submit_one(ctx, &iocb, user_iocb, compat); | 1843 | /* enforce forwards compatibility on users */ |
| 1844 | if (unlikely(iocb.aio_reserved2)) { | ||
| 1845 | pr_debug("EINVAL: reserve field set\n"); | ||
| 1846 | return -EINVAL; | ||
| 1847 | } | ||
| 1848 | |||
| 1849 | /* prevent overflows */ | ||
| 1850 | if (unlikely( | ||
| 1851 | (iocb.aio_buf != (unsigned long)iocb.aio_buf) || | ||
| 1852 | (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || | ||
| 1853 | ((ssize_t)iocb.aio_nbytes < 0) | ||
| 1854 | )) { | ||
| 1855 | pr_debug("EINVAL: overflow check\n"); | ||
| 1856 | return -EINVAL; | ||
| 1857 | } | ||
| 1858 | |||
| 1859 | req = aio_get_req(ctx); | ||
| 1860 | if (unlikely(!req)) | ||
| 1861 | return -EAGAIN; | ||
| 1862 | |||
| 1863 | err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); | ||
| 1864 | |||
| 1865 | /* Done with the synchronous reference */ | ||
| 1866 | iocb_put(req); | ||
| 1867 | |||
| 1868 | /* | ||
| 1869 | * If err is 0, we'd either done aio_complete() ourselves or have | ||
| 1870 | * arranged for that to be done asynchronously. Anything non-zero | ||
| 1871 | * means that we need to destroy req ourselves. | ||
| 1872 | */ | ||
| 1873 | if (unlikely(err)) { | ||
| 1874 | iocb_destroy(req); | ||
| 1875 | put_reqs_available(ctx, 1); | ||
| 1876 | } | ||
| 1877 | return err; | ||
| 1902 | } | 1878 | } |
| 1903 | 1879 | ||
| 1904 | /* sys_io_submit: | 1880 | /* sys_io_submit: |
| @@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, | |||
| 1997 | } | 1973 | } |
| 1998 | #endif | 1974 | #endif |
| 1999 | 1975 | ||
| 2000 | /* lookup_kiocb | ||
| 2001 | * Finds a given iocb for cancellation. | ||
| 2002 | */ | ||
| 2003 | static struct aio_kiocb * | ||
| 2004 | lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) | ||
| 2005 | { | ||
| 2006 | struct aio_kiocb *kiocb; | ||
| 2007 | |||
| 2008 | assert_spin_locked(&ctx->ctx_lock); | ||
| 2009 | |||
| 2010 | /* TODO: use a hash or array, this sucks. */ | ||
| 2011 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { | ||
| 2012 | if (kiocb->ki_user_iocb == iocb) | ||
| 2013 | return kiocb; | ||
| 2014 | } | ||
| 2015 | return NULL; | ||
| 2016 | } | ||
| 2017 | |||
| 2018 | /* sys_io_cancel: | 1976 | /* sys_io_cancel: |
| 2019 | * Attempts to cancel an iocb previously passed to io_submit. If | 1977 | * Attempts to cancel an iocb previously passed to io_submit. If |
| 2020 | * the operation is successfully cancelled, the resulting event is | 1978 | * the operation is successfully cancelled, the resulting event is |
| @@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
| 2032 | struct aio_kiocb *kiocb; | 1990 | struct aio_kiocb *kiocb; |
| 2033 | int ret = -EINVAL; | 1991 | int ret = -EINVAL; |
| 2034 | u32 key; | 1992 | u32 key; |
| 1993 | u64 obj = (u64)(unsigned long)iocb; | ||
| 2035 | 1994 | ||
| 2036 | if (unlikely(get_user(key, &iocb->aio_key))) | 1995 | if (unlikely(get_user(key, &iocb->aio_key))) |
| 2037 | return -EFAULT; | 1996 | return -EFAULT; |
| @@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
| 2043 | return -EINVAL; | 2002 | return -EINVAL; |
| 2044 | 2003 | ||
| 2045 | spin_lock_irq(&ctx->ctx_lock); | 2004 | spin_lock_irq(&ctx->ctx_lock); |
| 2046 | kiocb = lookup_kiocb(ctx, iocb); | 2005 | /* TODO: use a hash or array, this sucks. */ |
| 2047 | if (kiocb) { | 2006 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
| 2048 | ret = kiocb->ki_cancel(&kiocb->rw); | 2007 | if (kiocb->ki_res.obj == obj) { |
| 2049 | list_del_init(&kiocb->ki_list); | 2008 | ret = kiocb->ki_cancel(&kiocb->rw); |
| 2009 | list_del_init(&kiocb->ki_list); | ||
| 2010 | break; | ||
| 2011 | } | ||
| 2050 | } | 2012 | } |
| 2051 | spin_unlock_irq(&ctx->ctx_lock); | 2013 | spin_unlock_irq(&ctx->ctx_lock); |
| 2052 | 2014 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1d49694e6ae3..c5880329ae37 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -6174,7 +6174,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, | |||
| 6174 | * | 6174 | * |
| 6175 | * This is overestimating in most cases. | 6175 | * This is overestimating in most cases. |
| 6176 | */ | 6176 | */ |
| 6177 | qgroup_rsv_size = outstanding_extents * fs_info->nodesize; | 6177 | qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize; |
| 6178 | 6178 | ||
| 6179 | spin_lock(&block_rsv->lock); | 6179 | spin_lock(&block_rsv->lock); |
| 6180 | block_rsv->size = reserve_size; | 6180 | block_rsv->size = reserve_size; |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index eb680b715dd6..e659d9d61107 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
| @@ -1922,8 +1922,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, | |||
| 1922 | int i; | 1922 | int i; |
| 1923 | 1923 | ||
| 1924 | /* Level sanity check */ | 1924 | /* Level sanity check */ |
| 1925 | if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL || | 1925 | if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || |
| 1926 | root_level < 0 || root_level >= BTRFS_MAX_LEVEL || | 1926 | root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || |
| 1927 | root_level < cur_level) { | 1927 | root_level < cur_level) { |
| 1928 | btrfs_err_rl(fs_info, | 1928 | btrfs_err_rl(fs_info, |
| 1929 | "%s: bad levels, cur_level=%d root_level=%d", | 1929 | "%s: bad levels, cur_level=%d root_level=%d", |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 1869ba8e5981..67a6f7d47402 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
| @@ -2430,8 +2430,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, | |||
| 2430 | bitmap_clear(rbio->dbitmap, pagenr, 1); | 2430 | bitmap_clear(rbio->dbitmap, pagenr, 1); |
| 2431 | kunmap(p); | 2431 | kunmap(p); |
| 2432 | 2432 | ||
| 2433 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) | 2433 | for (stripe = 0; stripe < nr_data; stripe++) |
| 2434 | kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); | 2434 | kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); |
| 2435 | kunmap(p_page); | ||
| 2435 | } | 2436 | } |
| 2436 | 2437 | ||
| 2437 | __free_page(p_page); | 2438 | __free_page(p_page); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index acdad6d658f5..e4e665f422fc 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) | |||
| 1886 | } | 1886 | } |
| 1887 | } | 1887 | } |
| 1888 | 1888 | ||
| 1889 | static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) | 1889 | static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans) |
| 1890 | { | 1890 | { |
| 1891 | struct btrfs_fs_info *fs_info = trans->fs_info; | ||
| 1892 | |||
| 1891 | /* | 1893 | /* |
| 1892 | * We use writeback_inodes_sb here because if we used | 1894 | * We use writeback_inodes_sb here because if we used |
| 1893 | * btrfs_start_delalloc_roots we would deadlock with fs freeze. | 1895 | * btrfs_start_delalloc_roots we would deadlock with fs freeze. |
| @@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) | |||
| 1897 | * from already being in a transaction and our join_transaction doesn't | 1899 | * from already being in a transaction and our join_transaction doesn't |
| 1898 | * have to re-take the fs freeze lock. | 1900 | * have to re-take the fs freeze lock. |
| 1899 | */ | 1901 | */ |
| 1900 | if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) | 1902 | if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { |
| 1901 | writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); | 1903 | writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); |
| 1904 | } else { | ||
| 1905 | struct btrfs_pending_snapshot *pending; | ||
| 1906 | struct list_head *head = &trans->transaction->pending_snapshots; | ||
| 1907 | |||
| 1908 | /* | ||
| 1909 | * Flush dellaloc for any root that is going to be snapshotted. | ||
| 1910 | * This is done to avoid a corrupted version of files, in the | ||
| 1911 | * snapshots, that had both buffered and direct IO writes (even | ||
| 1912 | * if they were done sequentially) due to an unordered update of | ||
| 1913 | * the inode's size on disk. | ||
| 1914 | */ | ||
| 1915 | list_for_each_entry(pending, head, list) { | ||
| 1916 | int ret; | ||
| 1917 | |||
| 1918 | ret = btrfs_start_delalloc_snapshot(pending->root); | ||
| 1919 | if (ret) | ||
| 1920 | return ret; | ||
| 1921 | } | ||
| 1922 | } | ||
| 1902 | return 0; | 1923 | return 0; |
| 1903 | } | 1924 | } |
| 1904 | 1925 | ||
| 1905 | static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) | 1926 | static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans) |
| 1906 | { | 1927 | { |
| 1907 | if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) | 1928 | struct btrfs_fs_info *fs_info = trans->fs_info; |
| 1929 | |||
| 1930 | if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { | ||
| 1908 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); | 1931 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); |
| 1932 | } else { | ||
| 1933 | struct btrfs_pending_snapshot *pending; | ||
| 1934 | struct list_head *head = &trans->transaction->pending_snapshots; | ||
| 1935 | |||
| 1936 | /* | ||
| 1937 | * Wait for any dellaloc that we started previously for the roots | ||
| 1938 | * that are going to be snapshotted. This is to avoid a corrupted | ||
| 1939 | * version of files in the snapshots that had both buffered and | ||
| 1940 | * direct IO writes (even if they were done sequentially). | ||
| 1941 | */ | ||
| 1942 | list_for_each_entry(pending, head, list) | ||
| 1943 | btrfs_wait_ordered_extents(pending->root, | ||
| 1944 | U64_MAX, 0, U64_MAX); | ||
| 1945 | } | ||
| 1909 | } | 1946 | } |
| 1910 | 1947 | ||
| 1911 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans) | 1948 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans) |
| @@ -2023,7 +2060,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) | |||
| 2023 | 2060 | ||
| 2024 | extwriter_counter_dec(cur_trans, trans->type); | 2061 | extwriter_counter_dec(cur_trans, trans->type); |
| 2025 | 2062 | ||
| 2026 | ret = btrfs_start_delalloc_flush(fs_info); | 2063 | ret = btrfs_start_delalloc_flush(trans); |
| 2027 | if (ret) | 2064 | if (ret) |
| 2028 | goto cleanup_transaction; | 2065 | goto cleanup_transaction; |
| 2029 | 2066 | ||
| @@ -2039,7 +2076,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) | |||
| 2039 | if (ret) | 2076 | if (ret) |
| 2040 | goto cleanup_transaction; | 2077 | goto cleanup_transaction; |
| 2041 | 2078 | ||
| 2042 | btrfs_wait_delalloc_flush(fs_info); | 2079 | btrfs_wait_delalloc_flush(trans); |
| 2043 | 2080 | ||
| 2044 | btrfs_scrub_pause(fs_info); | 2081 | btrfs_scrub_pause(fs_info); |
| 2045 | /* | 2082 | /* |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f06454a55e00..561884f60d35 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -3578,9 +3578,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
| 3578 | } | 3578 | } |
| 3579 | btrfs_release_path(path); | 3579 | btrfs_release_path(path); |
| 3580 | 3580 | ||
| 3581 | /* find the first key from this transaction again */ | 3581 | /* |
| 3582 | * Find the first key from this transaction again. See the note for | ||
| 3583 | * log_new_dir_dentries, if we're logging a directory recursively we | ||
| 3584 | * won't be holding its i_mutex, which means we can modify the directory | ||
| 3585 | * while we're logging it. If we remove an entry between our first | ||
| 3586 | * search and this search we'll not find the key again and can just | ||
| 3587 | * bail. | ||
| 3588 | */ | ||
| 3582 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); | 3589 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); |
| 3583 | if (WARN_ON(ret != 0)) | 3590 | if (ret != 0) |
| 3584 | goto done; | 3591 | goto done; |
| 3585 | 3592 | ||
| 3586 | /* | 3593 | /* |
| @@ -4544,6 +4551,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, | |||
| 4544 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | 4551 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
| 4545 | struct btrfs_inode_item); | 4552 | struct btrfs_inode_item); |
| 4546 | *size_ret = btrfs_inode_size(path->nodes[0], item); | 4553 | *size_ret = btrfs_inode_size(path->nodes[0], item); |
| 4554 | /* | ||
| 4555 | * If the in-memory inode's i_size is smaller then the inode | ||
| 4556 | * size stored in the btree, return the inode's i_size, so | ||
| 4557 | * that we get a correct inode size after replaying the log | ||
| 4558 | * when before a power failure we had a shrinking truncate | ||
| 4559 | * followed by addition of a new name (rename / new hard link). | ||
| 4560 | * Otherwise return the inode size from the btree, to avoid | ||
| 4561 | * data loss when replaying a log due to previously doing a | ||
| 4562 | * write that expands the inode's size and logging a new name | ||
| 4563 | * immediately after. | ||
| 4564 | */ | ||
| 4565 | if (*size_ret > inode->vfs_inode.i_size) | ||
| 4566 | *size_ret = inode->vfs_inode.i_size; | ||
| 4547 | } | 4567 | } |
| 4548 | 4568 | ||
| 4549 | btrfs_release_path(path); | 4569 | btrfs_release_path(path); |
| @@ -4705,15 +4725,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, | |||
| 4705 | struct btrfs_file_extent_item); | 4725 | struct btrfs_file_extent_item); |
| 4706 | 4726 | ||
| 4707 | if (btrfs_file_extent_type(leaf, extent) == | 4727 | if (btrfs_file_extent_type(leaf, extent) == |
| 4708 | BTRFS_FILE_EXTENT_INLINE) { | 4728 | BTRFS_FILE_EXTENT_INLINE) |
| 4709 | len = btrfs_file_extent_ram_bytes(leaf, extent); | ||
| 4710 | ASSERT(len == i_size || | ||
| 4711 | (len == fs_info->sectorsize && | ||
| 4712 | btrfs_file_extent_compression(leaf, extent) != | ||
| 4713 | BTRFS_COMPRESS_NONE) || | ||
| 4714 | (len < i_size && i_size < fs_info->sectorsize)); | ||
| 4715 | return 0; | 4729 | return 0; |
| 4716 | } | ||
| 4717 | 4730 | ||
| 4718 | len = btrfs_file_extent_num_bytes(leaf, extent); | 4731 | len = btrfs_file_extent_num_bytes(leaf, extent); |
| 4719 | /* Last extent goes beyond i_size, no need to log a hole. */ | 4732 | /* Last extent goes beyond i_size, no need to log a hole. */ |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 9024eee889b9..db934ceae9c1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -6407,7 +6407,7 @@ static void btrfs_end_bio(struct bio *bio) | |||
| 6407 | if (bio_op(bio) == REQ_OP_WRITE) | 6407 | if (bio_op(bio) == REQ_OP_WRITE) |
| 6408 | btrfs_dev_stat_inc_and_print(dev, | 6408 | btrfs_dev_stat_inc_and_print(dev, |
| 6409 | BTRFS_DEV_STAT_WRITE_ERRS); | 6409 | BTRFS_DEV_STAT_WRITE_ERRS); |
| 6410 | else | 6410 | else if (!(bio->bi_opf & REQ_RAHEAD)) |
| 6411 | btrfs_dev_stat_inc_and_print(dev, | 6411 | btrfs_dev_stat_inc_and_print(dev, |
| 6412 | BTRFS_DEV_STAT_READ_ERRS); | 6412 | BTRFS_DEV_STAT_READ_ERRS); |
| 6413 | if (bio->bi_opf & REQ_PREFLUSH) | 6413 | if (bio->bi_opf & REQ_PREFLUSH) |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e3346628efe2..2d61ddda9bf5 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head) | |||
| 524 | struct inode *inode = container_of(head, struct inode, i_rcu); | 524 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 525 | struct ceph_inode_info *ci = ceph_inode(inode); | 525 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 526 | 526 | ||
| 527 | kfree(ci->i_symlink); | ||
| 527 | kmem_cache_free(ceph_inode_cachep, ci); | 528 | kmem_cache_free(ceph_inode_cachep, ci); |
| 528 | } | 529 | } |
| 529 | 530 | ||
| @@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode) | |||
| 566 | } | 567 | } |
| 567 | } | 568 | } |
| 568 | 569 | ||
| 569 | kfree(ci->i_symlink); | ||
| 570 | while ((n = rb_first(&ci->i_fragtree)) != NULL) { | 570 | while ((n = rb_first(&ci->i_fragtree)) != NULL) { |
| 571 | frag = rb_entry(n, struct ceph_inode_frag, node); | 571 | frag = rb_entry(n, struct ceph_inode_frag, node); |
| 572 | rb_erase(n, &ci->i_fragtree); | 572 | rb_erase(n, &ci->i_fragtree); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f9b71c12cc9f..a05bf1d6e1d0 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) | |||
| 559 | tcon->ses->server->echo_interval / HZ); | 559 | tcon->ses->server->echo_interval / HZ); |
| 560 | if (tcon->snapshot_time) | 560 | if (tcon->snapshot_time) |
| 561 | seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); | 561 | seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); |
| 562 | if (tcon->handle_timeout) | ||
| 563 | seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); | ||
| 562 | /* convert actimeo and display it in seconds */ | 564 | /* convert actimeo and display it in seconds */ |
| 563 | seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); | 565 | seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); |
| 564 | 566 | ||
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 38feae812b47..5b18d4585740 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -60,6 +60,12 @@ | |||
| 60 | #define CIFS_MAX_ACTIMEO (1 << 30) | 60 | #define CIFS_MAX_ACTIMEO (1 << 30) |
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| 63 | * Max persistent and resilient handle timeout (milliseconds). | ||
| 64 | * Windows durable max was 960000 (16 minutes) | ||
| 65 | */ | ||
| 66 | #define SMB3_MAX_HANDLE_TIMEOUT 960000 | ||
| 67 | |||
| 68 | /* | ||
| 63 | * MAX_REQ is the maximum number of requests that WE will send | 69 | * MAX_REQ is the maximum number of requests that WE will send |
| 64 | * on one socket concurrently. | 70 | * on one socket concurrently. |
| 65 | */ | 71 | */ |
| @@ -586,6 +592,7 @@ struct smb_vol { | |||
| 586 | struct nls_table *local_nls; | 592 | struct nls_table *local_nls; |
| 587 | unsigned int echo_interval; /* echo interval in secs */ | 593 | unsigned int echo_interval; /* echo interval in secs */ |
| 588 | __u64 snapshot_time; /* needed for timewarp tokens */ | 594 | __u64 snapshot_time; /* needed for timewarp tokens */ |
| 595 | __u32 handle_timeout; /* persistent and durable handle timeout in ms */ | ||
| 589 | unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ | 596 | unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ |
| 590 | }; | 597 | }; |
| 591 | 598 | ||
| @@ -1058,6 +1065,7 @@ struct cifs_tcon { | |||
| 1058 | __u32 vol_serial_number; | 1065 | __u32 vol_serial_number; |
| 1059 | __le64 vol_create_time; | 1066 | __le64 vol_create_time; |
| 1060 | __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ | 1067 | __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ |
| 1068 | __u32 handle_timeout; /* persistent and durable handle timeout in ms */ | ||
| 1061 | __u32 ss_flags; /* sector size flags */ | 1069 | __u32 ss_flags; /* sector size flags */ |
| 1062 | __u32 perf_sector_size; /* best sector size for perf */ | 1070 | __u32 perf_sector_size; /* best sector size for perf */ |
| 1063 | __u32 max_chunks; | 1071 | __u32 max_chunks; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a8e9738db691..4c0e44489f21 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -103,7 +103,7 @@ enum { | |||
| 103 | Opt_cruid, Opt_gid, Opt_file_mode, | 103 | Opt_cruid, Opt_gid, Opt_file_mode, |
| 104 | Opt_dirmode, Opt_port, | 104 | Opt_dirmode, Opt_port, |
| 105 | Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, | 105 | Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, |
| 106 | Opt_echo_interval, Opt_max_credits, | 106 | Opt_echo_interval, Opt_max_credits, Opt_handletimeout, |
| 107 | Opt_snapshot, | 107 | Opt_snapshot, |
| 108 | 108 | ||
| 109 | /* Mount options which take string value */ | 109 | /* Mount options which take string value */ |
| @@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = { | |||
| 208 | { Opt_rsize, "rsize=%s" }, | 208 | { Opt_rsize, "rsize=%s" }, |
| 209 | { Opt_wsize, "wsize=%s" }, | 209 | { Opt_wsize, "wsize=%s" }, |
| 210 | { Opt_actimeo, "actimeo=%s" }, | 210 | { Opt_actimeo, "actimeo=%s" }, |
| 211 | { Opt_handletimeout, "handletimeout=%s" }, | ||
| 211 | { Opt_echo_interval, "echo_interval=%s" }, | 212 | { Opt_echo_interval, "echo_interval=%s" }, |
| 212 | { Opt_max_credits, "max_credits=%s" }, | 213 | { Opt_max_credits, "max_credits=%s" }, |
| 213 | { Opt_snapshot, "snapshot=%s" }, | 214 | { Opt_snapshot, "snapshot=%s" }, |
| @@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 1619 | 1620 | ||
| 1620 | vol->actimeo = CIFS_DEF_ACTIMEO; | 1621 | vol->actimeo = CIFS_DEF_ACTIMEO; |
| 1621 | 1622 | ||
| 1623 | /* Most clients set timeout to 0, allows server to use its default */ | ||
| 1624 | vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */ | ||
| 1625 | |||
| 1622 | /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ | 1626 | /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ |
| 1623 | vol->ops = &smb30_operations; | 1627 | vol->ops = &smb30_operations; |
| 1624 | vol->vals = &smbdefault_values; | 1628 | vol->vals = &smbdefault_values; |
| @@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 2017 | goto cifs_parse_mount_err; | 2021 | goto cifs_parse_mount_err; |
| 2018 | } | 2022 | } |
| 2019 | break; | 2023 | break; |
| 2024 | case Opt_handletimeout: | ||
| 2025 | if (get_option_ul(args, &option)) { | ||
| 2026 | cifs_dbg(VFS, "%s: Invalid handletimeout value\n", | ||
| 2027 | __func__); | ||
| 2028 | goto cifs_parse_mount_err; | ||
| 2029 | } | ||
| 2030 | vol->handle_timeout = option; | ||
| 2031 | if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) { | ||
| 2032 | cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n"); | ||
| 2033 | goto cifs_parse_mount_err; | ||
| 2034 | } | ||
| 2035 | break; | ||
| 2020 | case Opt_echo_interval: | 2036 | case Opt_echo_interval: |
| 2021 | if (get_option_ul(args, &option)) { | 2037 | if (get_option_ul(args, &option)) { |
| 2022 | cifs_dbg(VFS, "%s: Invalid echo interval value\n", | 2038 | cifs_dbg(VFS, "%s: Invalid echo interval value\n", |
| @@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
| 3183 | return 0; | 3199 | return 0; |
| 3184 | if (tcon->snapshot_time != volume_info->snapshot_time) | 3200 | if (tcon->snapshot_time != volume_info->snapshot_time) |
| 3185 | return 0; | 3201 | return 0; |
| 3202 | if (tcon->handle_timeout != volume_info->handle_timeout) | ||
| 3203 | return 0; | ||
| 3186 | return 1; | 3204 | return 1; |
| 3187 | } | 3205 | } |
| 3188 | 3206 | ||
| @@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
| 3297 | tcon->snapshot_time = volume_info->snapshot_time; | 3315 | tcon->snapshot_time = volume_info->snapshot_time; |
| 3298 | } | 3316 | } |
| 3299 | 3317 | ||
| 3318 | if (volume_info->handle_timeout) { | ||
| 3319 | if (ses->server->vals->protocol_id == 0) { | ||
| 3320 | cifs_dbg(VFS, | ||
| 3321 | "Use SMB2.1 or later for handle timeout option\n"); | ||
| 3322 | rc = -EOPNOTSUPP; | ||
| 3323 | goto out_fail; | ||
| 3324 | } else | ||
| 3325 | tcon->handle_timeout = volume_info->handle_timeout; | ||
| 3326 | } | ||
| 3327 | |||
| 3300 | tcon->ses = ses; | 3328 | tcon->ses = ses; |
| 3301 | if (volume_info->password) { | 3329 | if (volume_info->password) { |
| 3302 | tcon->password = kstrdup(volume_info->password, GFP_KERNEL); | 3330 | tcon->password = kstrdup(volume_info->password, GFP_KERNEL); |
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index b204e84b87fb..54bffb2a1786 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c | |||
| @@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
| 68 | 68 | ||
| 69 | 69 | ||
| 70 | if (oparms->tcon->use_resilient) { | 70 | if (oparms->tcon->use_resilient) { |
| 71 | nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ | 71 | /* default timeout is 0, servers pick default (120 seconds) */ |
| 72 | nr_ioctl_req.Timeout = | ||
| 73 | cpu_to_le32(oparms->tcon->handle_timeout); | ||
| 72 | nr_ioctl_req.Reserved = 0; | 74 | nr_ioctl_req.Reserved = 0; |
| 73 | rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, | 75 | rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, |
| 74 | fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, | 76 | fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, |
| 75 | true /* is_fsctl */, | 77 | true /* is_fsctl */, |
| 76 | (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), | 78 | (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), |
| 77 | NULL, NULL /* no return info */); | 79 | CIFSMaxBufSize, NULL, NULL /* no return info */); |
| 78 | if (rc == -EOPNOTSUPP) { | 80 | if (rc == -EOPNOTSUPP) { |
| 79 | cifs_dbg(VFS, | 81 | cifs_dbg(VFS, |
| 80 | "resiliency not supported by server, disabling\n"); | 82 | "resiliency not supported by server, disabling\n"); |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 1022a3771e14..00225e699d03 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
| @@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 581 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 581 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
| 582 | FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, | 582 | FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, |
| 583 | NULL /* no data input */, 0 /* no data input */, | 583 | NULL /* no data input */, 0 /* no data input */, |
| 584 | (char **)&out_buf, &ret_data_len); | 584 | CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); |
| 585 | if (rc == -EOPNOTSUPP) { | 585 | if (rc == -EOPNOTSUPP) { |
| 586 | cifs_dbg(FYI, | 586 | cifs_dbg(FYI, |
| 587 | "server does not support query network interfaces\n"); | 587 | "server does not support query network interfaces\n"); |
| @@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) | |||
| 717 | oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId); | 717 | oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId); |
| 718 | #endif /* CIFS_DEBUG2 */ | 718 | #endif /* CIFS_DEBUG2 */ |
| 719 | 719 | ||
| 720 | if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) | ||
| 721 | oplock = smb2_parse_lease_state(server, o_rsp, | ||
| 722 | &oparms.fid->epoch, | ||
| 723 | oparms.fid->lease_key); | ||
| 724 | else | ||
| 725 | goto oshr_exit; | ||
| 726 | |||
| 727 | |||
| 728 | memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid)); | 720 | memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid)); |
| 729 | tcon->crfid.tcon = tcon; | 721 | tcon->crfid.tcon = tcon; |
| 730 | tcon->crfid.is_valid = true; | 722 | tcon->crfid.is_valid = true; |
| 731 | kref_init(&tcon->crfid.refcount); | 723 | kref_init(&tcon->crfid.refcount); |
| 732 | kref_get(&tcon->crfid.refcount); | ||
| 733 | 724 | ||
| 725 | if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { | ||
| 726 | kref_get(&tcon->crfid.refcount); | ||
| 727 | oplock = smb2_parse_lease_state(server, o_rsp, | ||
| 728 | &oparms.fid->epoch, | ||
| 729 | oparms.fid->lease_key); | ||
| 730 | } else | ||
| 731 | goto oshr_exit; | ||
| 734 | 732 | ||
| 735 | qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; | 733 | qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; |
| 736 | if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) | 734 | if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) |
| 737 | goto oshr_exit; | 735 | goto oshr_exit; |
| 738 | rc = smb2_validate_and_copy_iov( | 736 | if (!smb2_validate_and_copy_iov( |
| 739 | le16_to_cpu(qi_rsp->OutputBufferOffset), | 737 | le16_to_cpu(qi_rsp->OutputBufferOffset), |
| 740 | sizeof(struct smb2_file_all_info), | 738 | sizeof(struct smb2_file_all_info), |
| 741 | &rsp_iov[1], sizeof(struct smb2_file_all_info), | 739 | &rsp_iov[1], sizeof(struct smb2_file_all_info), |
| 742 | (char *)&tcon->crfid.file_all_info); | 740 | (char *)&tcon->crfid.file_all_info)) |
| 743 | if (rc) | 741 | tcon->crfid.file_all_info_is_valid = 1; |
| 744 | goto oshr_exit; | ||
| 745 | tcon->crfid.file_all_info_is_valid = 1; | ||
| 746 | 742 | ||
| 747 | oshr_exit: | 743 | oshr_exit: |
| 748 | mutex_unlock(&tcon->crfid.fid_mutex); | 744 | mutex_unlock(&tcon->crfid.fid_mutex); |
| @@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1299 | 1295 | ||
| 1300 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, | 1296 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, |
| 1301 | FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, | 1297 | FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, |
| 1302 | NULL, 0 /* no input */, | 1298 | NULL, 0 /* no input */, CIFSMaxBufSize, |
| 1303 | (char **)&res_key, &ret_data_len); | 1299 | (char **)&res_key, &ret_data_len); |
| 1304 | 1300 | ||
| 1305 | if (rc) { | 1301 | if (rc) { |
| @@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid, | |||
| 1404 | rc = SMB2_ioctl_init(tcon, &rqst[1], | 1400 | rc = SMB2_ioctl_init(tcon, &rqst[1], |
| 1405 | COMPOUND_FID, COMPOUND_FID, | 1401 | COMPOUND_FID, COMPOUND_FID, |
| 1406 | qi.info_type, true, NULL, | 1402 | qi.info_type, true, NULL, |
| 1407 | 0); | 1403 | 0, CIFSMaxBufSize); |
| 1408 | } | 1404 | } |
| 1409 | } else if (qi.flags == PASSTHRU_QUERY_INFO) { | 1405 | } else if (qi.flags == PASSTHRU_QUERY_INFO) { |
| 1410 | memset(&qi_iov, 0, sizeof(qi_iov)); | 1406 | memset(&qi_iov, 0, sizeof(qi_iov)); |
| @@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid, | |||
| 1532 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, | 1528 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, |
| 1533 | trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, | 1529 | trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, |
| 1534 | true /* is_fsctl */, (char *)pcchunk, | 1530 | true /* is_fsctl */, (char *)pcchunk, |
| 1535 | sizeof(struct copychunk_ioctl), (char **)&retbuf, | 1531 | sizeof(struct copychunk_ioctl), CIFSMaxBufSize, |
| 1536 | &ret_data_len); | 1532 | (char **)&retbuf, &ret_data_len); |
| 1537 | if (rc == 0) { | 1533 | if (rc == 0) { |
| 1538 | if (ret_data_len != | 1534 | if (ret_data_len != |
| 1539 | sizeof(struct copychunk_ioctl_rsp)) { | 1535 | sizeof(struct copychunk_ioctl_rsp)) { |
| @@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1693 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1689 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
| 1694 | cfile->fid.volatile_fid, FSCTL_SET_SPARSE, | 1690 | cfile->fid.volatile_fid, FSCTL_SET_SPARSE, |
| 1695 | true /* is_fctl */, | 1691 | true /* is_fctl */, |
| 1696 | &setsparse, 1, NULL, NULL); | 1692 | &setsparse, 1, CIFSMaxBufSize, NULL, NULL); |
| 1697 | if (rc) { | 1693 | if (rc) { |
| 1698 | tcon->broken_sparse_sup = true; | 1694 | tcon->broken_sparse_sup = true; |
| 1699 | cifs_dbg(FYI, "set sparse rc = %d\n", rc); | 1695 | cifs_dbg(FYI, "set sparse rc = %d\n", rc); |
| @@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid, | |||
| 1766 | true /* is_fsctl */, | 1762 | true /* is_fsctl */, |
| 1767 | (char *)&dup_ext_buf, | 1763 | (char *)&dup_ext_buf, |
| 1768 | sizeof(struct duplicate_extents_to_file), | 1764 | sizeof(struct duplicate_extents_to_file), |
| 1769 | NULL, | 1765 | CIFSMaxBufSize, NULL, |
| 1770 | &ret_data_len); | 1766 | &ret_data_len); |
| 1771 | 1767 | ||
| 1772 | if (ret_data_len > 0) | 1768 | if (ret_data_len > 0) |
| @@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1801 | true /* is_fsctl */, | 1797 | true /* is_fsctl */, |
| 1802 | (char *)&integr_info, | 1798 | (char *)&integr_info, |
| 1803 | sizeof(struct fsctl_set_integrity_information_req), | 1799 | sizeof(struct fsctl_set_integrity_information_req), |
| 1804 | NULL, | 1800 | CIFSMaxBufSize, NULL, |
| 1805 | &ret_data_len); | 1801 | &ret_data_len); |
| 1806 | 1802 | ||
| 1807 | } | 1803 | } |
| @@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1809 | /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ | 1805 | /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ |
| 1810 | #define GMT_TOKEN_SIZE 50 | 1806 | #define GMT_TOKEN_SIZE 50 |
| 1811 | 1807 | ||
| 1808 | #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */ | ||
| 1809 | |||
| 1812 | /* | 1810 | /* |
| 1813 | * Input buffer contains (empty) struct smb_snapshot array with size filled in | 1811 | * Input buffer contains (empty) struct smb_snapshot array with size filled in |
| 1814 | * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 | 1812 | * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 |
| @@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1820 | char *retbuf = NULL; | 1818 | char *retbuf = NULL; |
| 1821 | unsigned int ret_data_len = 0; | 1819 | unsigned int ret_data_len = 0; |
| 1822 | int rc; | 1820 | int rc; |
| 1821 | u32 max_response_size; | ||
| 1823 | struct smb_snapshot_array snapshot_in; | 1822 | struct smb_snapshot_array snapshot_in; |
| 1824 | 1823 | ||
| 1824 | if (get_user(ret_data_len, (unsigned int __user *)ioc_buf)) | ||
| 1825 | return -EFAULT; | ||
| 1826 | |||
| 1827 | /* | ||
| 1828 | * Note that for snapshot queries that servers like Azure expect that | ||
| 1829 | * the first query be minimal size (and just used to get the number/size | ||
| 1830 | * of previous versions) so response size must be specified as EXACTLY | ||
| 1831 | * sizeof(struct snapshot_array) which is 16 when rounded up to multiple | ||
| 1832 | * of eight bytes. | ||
| 1833 | */ | ||
| 1834 | if (ret_data_len == 0) | ||
| 1835 | max_response_size = MIN_SNAPSHOT_ARRAY_SIZE; | ||
| 1836 | else | ||
| 1837 | max_response_size = CIFSMaxBufSize; | ||
| 1838 | |||
| 1825 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1839 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
| 1826 | cfile->fid.volatile_fid, | 1840 | cfile->fid.volatile_fid, |
| 1827 | FSCTL_SRV_ENUMERATE_SNAPSHOTS, | 1841 | FSCTL_SRV_ENUMERATE_SNAPSHOTS, |
| 1828 | true /* is_fsctl */, | 1842 | true /* is_fsctl */, |
| 1829 | NULL, 0 /* no input data */, | 1843 | NULL, 0 /* no input data */, max_response_size, |
| 1830 | (char **)&retbuf, | 1844 | (char **)&retbuf, |
| 1831 | &ret_data_len); | 1845 | &ret_data_len); |
| 1832 | cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", | 1846 | cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", |
| @@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, | |||
| 2304 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 2318 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
| 2305 | FSCTL_DFS_GET_REFERRALS, | 2319 | FSCTL_DFS_GET_REFERRALS, |
| 2306 | true /* is_fsctl */, | 2320 | true /* is_fsctl */, |
| 2307 | (char *)dfs_req, dfs_req_size, | 2321 | (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, |
| 2308 | (char **)&dfs_rsp, &dfs_rsp_size); | 2322 | (char **)&dfs_rsp, &dfs_rsp_size); |
| 2309 | } while (rc == -EAGAIN); | 2323 | } while (rc == -EAGAIN); |
| 2310 | 2324 | ||
| @@ -2658,7 +2672,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, | |||
| 2658 | rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid, | 2672 | rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid, |
| 2659 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, | 2673 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, |
| 2660 | true /* is_fctl */, (char *)&fsctl_buf, | 2674 | true /* is_fctl */, (char *)&fsctl_buf, |
| 2661 | sizeof(struct file_zero_data_information)); | 2675 | sizeof(struct file_zero_data_information), |
| 2676 | CIFSMaxBufSize); | ||
| 2662 | if (rc) | 2677 | if (rc) |
| 2663 | goto zero_range_exit; | 2678 | goto zero_range_exit; |
| 2664 | 2679 | ||
| @@ -2735,7 +2750,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, | |||
| 2735 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 2750 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
| 2736 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, | 2751 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, |
| 2737 | true /* is_fctl */, (char *)&fsctl_buf, | 2752 | true /* is_fctl */, (char *)&fsctl_buf, |
| 2738 | sizeof(struct file_zero_data_information), NULL, NULL); | 2753 | sizeof(struct file_zero_data_information), |
| 2754 | CIFSMaxBufSize, NULL, NULL); | ||
| 2739 | free_xid(xid); | 2755 | free_xid(xid); |
| 2740 | return rc; | 2756 | return rc; |
| 2741 | } | 2757 | } |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 21ac19ff19cb..21ad01d55ab2 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
| @@ -1002,7 +1002,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 1002 | 1002 | ||
| 1003 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 1003 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
| 1004 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, | 1004 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, |
| 1005 | (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen); | 1005 | (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, |
| 1006 | (char **)&pneg_rsp, &rsplen); | ||
| 1006 | if (rc == -EOPNOTSUPP) { | 1007 | if (rc == -EOPNOTSUPP) { |
| 1007 | /* | 1008 | /* |
| 1008 | * Old Windows versions or Netapp SMB server can return | 1009 | * Old Windows versions or Netapp SMB server can return |
| @@ -1858,8 +1859,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, | |||
| 1858 | } | 1859 | } |
| 1859 | 1860 | ||
| 1860 | static struct create_durable_v2 * | 1861 | static struct create_durable_v2 * |
| 1861 | create_durable_v2_buf(struct cifs_fid *pfid) | 1862 | create_durable_v2_buf(struct cifs_open_parms *oparms) |
| 1862 | { | 1863 | { |
| 1864 | struct cifs_fid *pfid = oparms->fid; | ||
| 1863 | struct create_durable_v2 *buf; | 1865 | struct create_durable_v2 *buf; |
| 1864 | 1866 | ||
| 1865 | buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); | 1867 | buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); |
| @@ -1873,7 +1875,14 @@ create_durable_v2_buf(struct cifs_fid *pfid) | |||
| 1873 | (struct create_durable_v2, Name)); | 1875 | (struct create_durable_v2, Name)); |
| 1874 | buf->ccontext.NameLength = cpu_to_le16(4); | 1876 | buf->ccontext.NameLength = cpu_to_le16(4); |
| 1875 | 1877 | ||
| 1876 | buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ | 1878 | /* |
| 1879 | * NB: Handle timeout defaults to 0, which allows server to choose | ||
| 1880 | * (most servers default to 120 seconds) and most clients default to 0. | ||
| 1881 | * This can be overridden at mount ("handletimeout=") if the user wants | ||
| 1882 | * a different persistent (or resilient) handle timeout for all opens | ||
| 1883 | * opens on a particular SMB3 mount. | ||
| 1884 | */ | ||
| 1885 | buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); | ||
| 1877 | buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); | 1886 | buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); |
| 1878 | generate_random_uuid(buf->dcontext.CreateGuid); | 1887 | generate_random_uuid(buf->dcontext.CreateGuid); |
| 1879 | memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); | 1888 | memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); |
| @@ -1926,7 +1935,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, | |||
| 1926 | struct smb2_create_req *req = iov[0].iov_base; | 1935 | struct smb2_create_req *req = iov[0].iov_base; |
| 1927 | unsigned int num = *num_iovec; | 1936 | unsigned int num = *num_iovec; |
| 1928 | 1937 | ||
| 1929 | iov[num].iov_base = create_durable_v2_buf(oparms->fid); | 1938 | iov[num].iov_base = create_durable_v2_buf(oparms); |
| 1930 | if (iov[num].iov_base == NULL) | 1939 | if (iov[num].iov_base == NULL) |
| 1931 | return -ENOMEM; | 1940 | return -ENOMEM; |
| 1932 | iov[num].iov_len = sizeof(struct create_durable_v2); | 1941 | iov[num].iov_len = sizeof(struct create_durable_v2); |
| @@ -2478,7 +2487,8 @@ creat_exit: | |||
| 2478 | int | 2487 | int |
| 2479 | SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | 2488 | SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, |
| 2480 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 2489 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
| 2481 | bool is_fsctl, char *in_data, u32 indatalen) | 2490 | bool is_fsctl, char *in_data, u32 indatalen, |
| 2491 | __u32 max_response_size) | ||
| 2482 | { | 2492 | { |
| 2483 | struct smb2_ioctl_req *req; | 2493 | struct smb2_ioctl_req *req; |
| 2484 | struct kvec *iov = rqst->rq_iov; | 2494 | struct kvec *iov = rqst->rq_iov; |
| @@ -2520,16 +2530,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | |||
| 2520 | req->OutputCount = 0; /* MBZ */ | 2530 | req->OutputCount = 0; /* MBZ */ |
| 2521 | 2531 | ||
| 2522 | /* | 2532 | /* |
| 2523 | * Could increase MaxOutputResponse, but that would require more | 2533 | * In most cases max_response_size is set to 16K (CIFSMaxBufSize) |
| 2524 | * than one credit. Windows typically sets this smaller, but for some | 2534 | * We Could increase default MaxOutputResponse, but that could require |
| 2535 | * more credits. Windows typically sets this smaller, but for some | ||
| 2525 | * ioctls it may be useful to allow server to send more. No point | 2536 | * ioctls it may be useful to allow server to send more. No point |
| 2526 | * limiting what the server can send as long as fits in one credit | 2537 | * limiting what the server can send as long as fits in one credit |
| 2527 | * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE | 2538 | * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want |
| 2528 | * (by default, note that it can be overridden to make max larger) | 2539 | * to increase this limit up in the future. |
| 2529 | * in responses (except for read responses which can be bigger. | 2540 | * Note that for snapshot queries that servers like Azure expect that |
| 2530 | * We may want to bump this limit up | 2541 | * the first query be minimal size (and just used to get the number/size |
| 2542 | * of previous versions) so response size must be specified as EXACTLY | ||
| 2543 | * sizeof(struct snapshot_array) which is 16 when rounded up to multiple | ||
| 2544 | * of eight bytes. Currently that is the only case where we set max | ||
| 2545 | * response size smaller. | ||
| 2531 | */ | 2546 | */ |
| 2532 | req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); | 2547 | req->MaxOutputResponse = cpu_to_le32(max_response_size); |
| 2533 | 2548 | ||
| 2534 | if (is_fsctl) | 2549 | if (is_fsctl) |
| 2535 | req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); | 2550 | req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); |
| @@ -2550,13 +2565,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst) | |||
| 2550 | cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ | 2565 | cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ |
| 2551 | } | 2566 | } |
| 2552 | 2567 | ||
| 2568 | |||
| 2553 | /* | 2569 | /* |
| 2554 | * SMB2 IOCTL is used for both IOCTLs and FSCTLs | 2570 | * SMB2 IOCTL is used for both IOCTLs and FSCTLs |
| 2555 | */ | 2571 | */ |
| 2556 | int | 2572 | int |
| 2557 | SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | 2573 | SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, |
| 2558 | u64 volatile_fid, u32 opcode, bool is_fsctl, | 2574 | u64 volatile_fid, u32 opcode, bool is_fsctl, |
| 2559 | char *in_data, u32 indatalen, | 2575 | char *in_data, u32 indatalen, u32 max_out_data_len, |
| 2560 | char **out_data, u32 *plen /* returned data len */) | 2576 | char **out_data, u32 *plen /* returned data len */) |
| 2561 | { | 2577 | { |
| 2562 | struct smb_rqst rqst; | 2578 | struct smb_rqst rqst; |
| @@ -2593,8 +2609,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 2593 | rqst.rq_iov = iov; | 2609 | rqst.rq_iov = iov; |
| 2594 | rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; | 2610 | rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; |
| 2595 | 2611 | ||
| 2596 | rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, | 2612 | rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode, |
| 2597 | opcode, is_fsctl, in_data, indatalen); | 2613 | is_fsctl, in_data, indatalen, max_out_data_len); |
| 2598 | if (rc) | 2614 | if (rc) |
| 2599 | goto ioctl_exit; | 2615 | goto ioctl_exit; |
| 2600 | 2616 | ||
| @@ -2672,7 +2688,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2672 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, | 2688 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, |
| 2673 | FSCTL_SET_COMPRESSION, true /* is_fsctl */, | 2689 | FSCTL_SET_COMPRESSION, true /* is_fsctl */, |
| 2674 | (char *)&fsctl_input /* data input */, | 2690 | (char *)&fsctl_input /* data input */, |
| 2675 | 2 /* in data len */, &ret_data /* out data */, NULL); | 2691 | 2 /* in data len */, CIFSMaxBufSize /* max out data */, |
| 2692 | &ret_data /* out data */, NULL); | ||
| 2676 | 2693 | ||
| 2677 | cifs_dbg(FYI, "set compression rc %d\n", rc); | 2694 | cifs_dbg(FYI, "set compression rc %d\n", rc); |
| 2678 | 2695 | ||
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 3c32d0cfea69..52df125e9189 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
| @@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | |||
| 142 | extern void SMB2_open_free(struct smb_rqst *rqst); | 142 | extern void SMB2_open_free(struct smb_rqst *rqst); |
| 143 | extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, | 143 | extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, |
| 144 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 144 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
| 145 | bool is_fsctl, char *in_data, u32 indatalen, | 145 | bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, |
| 146 | char **out_data, u32 *plen /* returned data len */); | 146 | char **out_data, u32 *plen /* returned data len */); |
| 147 | extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | 147 | extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, |
| 148 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 148 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
| 149 | bool is_fsctl, char *in_data, u32 indatalen); | 149 | bool is_fsctl, char *in_data, u32 indatalen, |
| 150 | __u32 max_response_size); | ||
| 150 | extern void SMB2_ioctl_free(struct smb_rqst *rqst); | 151 | extern void SMB2_ioctl_free(struct smb_rqst *rqst); |
| 151 | extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | 152 | extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, |
| 152 | u64 persistent_file_id, u64 volatile_file_id); | 153 | u64 persistent_file_id, u64 volatile_file_id); |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 95b5e78c22b1..f25daa207421 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
| @@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) | |||
| 163 | return 0; | 163 | return 0; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | static void debugfs_evict_inode(struct inode *inode) | 166 | static void debugfs_i_callback(struct rcu_head *head) |
| 167 | { | 167 | { |
| 168 | truncate_inode_pages_final(&inode->i_data); | 168 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 169 | clear_inode(inode); | ||
| 170 | if (S_ISLNK(inode->i_mode)) | 169 | if (S_ISLNK(inode->i_mode)) |
| 171 | kfree(inode->i_link); | 170 | kfree(inode->i_link); |
| 171 | free_inode_nonrcu(inode); | ||
| 172 | } | ||
| 173 | |||
| 174 | static void debugfs_destroy_inode(struct inode *inode) | ||
| 175 | { | ||
| 176 | call_rcu(&inode->i_rcu, debugfs_i_callback); | ||
| 172 | } | 177 | } |
| 173 | 178 | ||
| 174 | static const struct super_operations debugfs_super_operations = { | 179 | static const struct super_operations debugfs_super_operations = { |
| 175 | .statfs = simple_statfs, | 180 | .statfs = simple_statfs, |
| 176 | .remount_fs = debugfs_remount, | 181 | .remount_fs = debugfs_remount, |
| 177 | .show_options = debugfs_show_options, | 182 | .show_options = debugfs_show_options, |
| 178 | .evict_inode = debugfs_evict_inode, | 183 | .destroy_inode = debugfs_destroy_inode, |
| 179 | }; | 184 | }; |
| 180 | 185 | ||
| 181 | static void debugfs_release_dentry(struct dentry *dentry) | 186 | static void debugfs_release_dentry(struct dentry *dentry) |
diff --git a/fs/fs_parser.c b/fs/fs_parser.c index 842e8f749db6..570d71043acf 100644 --- a/fs/fs_parser.c +++ b/fs/fs_parser.c | |||
| @@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc) | |||
| 410 | for (param = desc->specs; param->name; param++) { | 410 | for (param = desc->specs; param->name; param++) { |
| 411 | if (param->opt == e->opt && | 411 | if (param->opt == e->opt && |
| 412 | param->type != fs_param_is_enum) { | 412 | param->type != fs_param_is_enum) { |
| 413 | pr_err("VALIDATE %s: e[%lu] enum val for %s\n", | 413 | pr_err("VALIDATE %s: e[%tu] enum val for %s\n", |
| 414 | name, e - desc->enums, param->name); | 414 | name, e - desc->enums, param->name); |
| 415 | good = false; | 415 | good = false; |
| 416 | } | 416 | } |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ec32fece5e1e..9285dd4f4b1c 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -755,11 +755,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 755 | umode_t mode, dev_t dev) | 755 | umode_t mode, dev_t dev) |
| 756 | { | 756 | { |
| 757 | struct inode *inode; | 757 | struct inode *inode; |
| 758 | struct resv_map *resv_map; | 758 | struct resv_map *resv_map = NULL; |
| 759 | 759 | ||
| 760 | resv_map = resv_map_alloc(); | 760 | /* |
| 761 | if (!resv_map) | 761 | * Reserve maps are only needed for inodes that can have associated |
| 762 | return NULL; | 762 | * page allocations. |
| 763 | */ | ||
| 764 | if (S_ISREG(mode) || S_ISLNK(mode)) { | ||
| 765 | resv_map = resv_map_alloc(); | ||
| 766 | if (!resv_map) | ||
| 767 | return NULL; | ||
| 768 | } | ||
| 763 | 769 | ||
| 764 | inode = new_inode(sb); | 770 | inode = new_inode(sb); |
| 765 | if (inode) { | 771 | if (inode) { |
| @@ -794,8 +800,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 794 | break; | 800 | break; |
| 795 | } | 801 | } |
| 796 | lockdep_annotate_inode_mutex_key(inode); | 802 | lockdep_annotate_inode_mutex_key(inode); |
| 797 | } else | 803 | } else { |
| 798 | kref_put(&resv_map->refs, resv_map_release); | 804 | if (resv_map) |
| 805 | kref_put(&resv_map->refs, resv_map_release); | ||
| 806 | } | ||
| 799 | 807 | ||
| 800 | return inode; | 808 | return inode; |
| 801 | } | 809 | } |
diff --git a/fs/io_uring.c b/fs/io_uring.c index 6aaa30580a2b..07d6ef195d05 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c | |||
| @@ -1022,6 +1022,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, | |||
| 1022 | 1022 | ||
| 1023 | ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); | 1023 | ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); |
| 1024 | if (!ret) { | 1024 | if (!ret) { |
| 1025 | ssize_t ret2; | ||
| 1026 | |||
| 1025 | /* | 1027 | /* |
| 1026 | * Open-code file_start_write here to grab freeze protection, | 1028 | * Open-code file_start_write here to grab freeze protection, |
| 1027 | * which will be released by another thread in | 1029 | * which will be released by another thread in |
| @@ -1036,7 +1038,19 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, | |||
| 1036 | SB_FREEZE_WRITE); | 1038 | SB_FREEZE_WRITE); |
| 1037 | } | 1039 | } |
| 1038 | kiocb->ki_flags |= IOCB_WRITE; | 1040 | kiocb->ki_flags |= IOCB_WRITE; |
| 1039 | io_rw_done(kiocb, call_write_iter(file, kiocb, &iter)); | 1041 | |
| 1042 | ret2 = call_write_iter(file, kiocb, &iter); | ||
| 1043 | if (!force_nonblock || ret2 != -EAGAIN) { | ||
| 1044 | io_rw_done(kiocb, ret2); | ||
| 1045 | } else { | ||
| 1046 | /* | ||
| 1047 | * If ->needs_lock is true, we're already in async | ||
| 1048 | * context. | ||
| 1049 | */ | ||
| 1050 | if (!s->needs_lock) | ||
| 1051 | io_async_list_note(WRITE, req, iov_count); | ||
| 1052 | ret = -EAGAIN; | ||
| 1053 | } | ||
| 1040 | } | 1054 | } |
| 1041 | out_free: | 1055 | out_free: |
| 1042 | kfree(iovec); | 1056 | kfree(iovec); |
| @@ -1968,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, | |||
| 1968 | return 0; | 1982 | return 0; |
| 1969 | 1983 | ||
| 1970 | if (sig) { | 1984 | if (sig) { |
| 1971 | ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz); | 1985 | #ifdef CONFIG_COMPAT |
| 1986 | if (in_compat_syscall()) | ||
| 1987 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, | ||
| 1988 | &ksigmask, &sigsaved, sigsz); | ||
| 1989 | else | ||
| 1990 | #endif | ||
| 1991 | ret = set_user_sigmask(sig, &ksigmask, | ||
| 1992 | &sigsaved, sigsz); | ||
| 1993 | |||
| 1972 | if (ret) | 1994 | if (ret) |
| 1973 | return ret; | 1995 | return ret; |
| 1974 | } | 1996 | } |
| @@ -2193,6 +2215,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, | |||
| 2193 | fput(ctx->user_files[i]); | 2215 | fput(ctx->user_files[i]); |
| 2194 | 2216 | ||
| 2195 | kfree(ctx->user_files); | 2217 | kfree(ctx->user_files); |
| 2218 | ctx->user_files = NULL; | ||
| 2196 | ctx->nr_user_files = 0; | 2219 | ctx->nr_user_files = 0; |
| 2197 | return ret; | 2220 | return ret; |
| 2198 | } | 2221 | } |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 389ea53ea487..bccfc40b3a74 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
| @@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | |||
| 1414 | 1414 | ||
| 1415 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); | 1415 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); |
| 1416 | 1416 | ||
| 1417 | if (f->target) { | ||
| 1418 | kfree(f->target); | ||
| 1419 | f->target = NULL; | ||
| 1420 | } | ||
| 1421 | |||
| 1422 | fds = f->dents; | 1417 | fds = f->dents; |
| 1423 | while(fds) { | 1418 | while(fds) { |
| 1424 | fd = fds; | 1419 | fd = fds; |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index bb6ae387469f..05d892c79339 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
| @@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb) | |||
| 47 | static void jffs2_i_callback(struct rcu_head *head) | 47 | static void jffs2_i_callback(struct rcu_head *head) |
| 48 | { | 48 | { |
| 49 | struct inode *inode = container_of(head, struct inode, i_rcu); | 49 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 50 | kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); | 50 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
| 51 | |||
| 52 | kfree(f->target); | ||
| 53 | kmem_cache_free(jffs2_inode_cachep, f); | ||
| 51 | } | 54 | } |
| 52 | 55 | ||
| 53 | static void jffs2_destroy_inode(struct inode *inode) | 56 | static void jffs2_destroy_inode(struct inode *inode) |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 93fb7cf0b92b..f0b5c987d6ae 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
| @@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host) | |||
| 290 | 290 | ||
| 291 | WARN_ON_ONCE(host->h_server); | 291 | WARN_ON_ONCE(host->h_server); |
| 292 | 292 | ||
| 293 | if (refcount_dec_and_test(&host->h_count)) { | 293 | if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) { |
| 294 | WARN_ON_ONCE(!list_empty(&host->h_lockowners)); | 294 | WARN_ON_ONCE(!list_empty(&host->h_lockowners)); |
| 295 | WARN_ON_ONCE(!list_empty(&host->h_granted)); | 295 | WARN_ON_ONCE(!list_empty(&host->h_granted)); |
| 296 | WARN_ON_ONCE(!list_empty(&host->h_reclaim)); | 296 | WARN_ON_ONCE(!list_empty(&host->h_reclaim)); |
| 297 | 297 | ||
| 298 | mutex_lock(&nlm_host_mutex); | ||
| 299 | nlm_destroy_host_locked(host); | 298 | nlm_destroy_host_locked(host); |
| 300 | mutex_unlock(&nlm_host_mutex); | 299 | mutex_unlock(&nlm_host_mutex); |
| 301 | } | 300 | } |
diff --git a/fs/locks.c b/fs/locks.c index eaa1cfaf73b0..71d0c6c2aac5 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
| @@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, | |||
| 1160 | */ | 1160 | */ |
| 1161 | error = -EDEADLK; | 1161 | error = -EDEADLK; |
| 1162 | spin_lock(&blocked_lock_lock); | 1162 | spin_lock(&blocked_lock_lock); |
| 1163 | /* | ||
| 1164 | * Ensure that we don't find any locks blocked on this | ||
| 1165 | * request during deadlock detection. | ||
| 1166 | */ | ||
| 1167 | __locks_wake_up_blocks(request); | ||
| 1163 | if (likely(!posix_locks_deadlock(request, fl))) { | 1168 | if (likely(!posix_locks_deadlock(request, fl))) { |
| 1164 | error = FILE_LOCK_DEFERRED; | 1169 | error = FILE_LOCK_DEFERRED; |
| 1165 | __locks_insert_block(fl, request, | 1170 | __locks_insert_block(fl, request, |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index fb1cf1a4bda2..90d71fda65ce 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto, | |||
| 453 | case XPRT_TRANSPORT_RDMA: | 453 | case XPRT_TRANSPORT_RDMA: |
| 454 | if (retrans == NFS_UNSPEC_RETRANS) | 454 | if (retrans == NFS_UNSPEC_RETRANS) |
| 455 | to->to_retries = NFS_DEF_TCP_RETRANS; | 455 | to->to_retries = NFS_DEF_TCP_RETRANS; |
| 456 | if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0) | 456 | if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0) |
| 457 | to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; | 457 | to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; |
| 458 | if (to->to_initval > NFS_MAX_TCP_TIMEOUT) | 458 | if (to->to_initval > NFS_MAX_TCP_TIMEOUT) |
| 459 | to->to_initval = NFS_MAX_TCP_TIMEOUT; | 459 | to->to_initval = NFS_MAX_TCP_TIMEOUT; |
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index f9264e1922a2..6673d4ff5a2a 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c | |||
| @@ -1289,6 +1289,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, | |||
| 1289 | static int ff_layout_read_done_cb(struct rpc_task *task, | 1289 | static int ff_layout_read_done_cb(struct rpc_task *task, |
| 1290 | struct nfs_pgio_header *hdr) | 1290 | struct nfs_pgio_header *hdr) |
| 1291 | { | 1291 | { |
| 1292 | int new_idx = hdr->pgio_mirror_idx; | ||
| 1292 | int err; | 1293 | int err; |
| 1293 | 1294 | ||
| 1294 | trace_nfs4_pnfs_read(hdr, task->tk_status); | 1295 | trace_nfs4_pnfs_read(hdr, task->tk_status); |
| @@ -1307,7 +1308,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task, | |||
| 1307 | case -NFS4ERR_RESET_TO_PNFS: | 1308 | case -NFS4ERR_RESET_TO_PNFS: |
| 1308 | if (ff_layout_choose_best_ds_for_read(hdr->lseg, | 1309 | if (ff_layout_choose_best_ds_for_read(hdr->lseg, |
| 1309 | hdr->pgio_mirror_idx + 1, | 1310 | hdr->pgio_mirror_idx + 1, |
| 1310 | &hdr->pgio_mirror_idx)) | 1311 | &new_idx)) |
| 1311 | goto out_layouterror; | 1312 | goto out_layouterror; |
| 1312 | set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); | 1313 | set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); |
| 1313 | return task->tk_status; | 1314 | return task->tk_status; |
| @@ -1320,7 +1321,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task, | |||
| 1320 | 1321 | ||
| 1321 | return 0; | 1322 | return 0; |
| 1322 | out_layouterror: | 1323 | out_layouterror: |
| 1324 | ff_layout_read_record_layoutstats_done(task, hdr); | ||
| 1323 | ff_layout_send_layouterror(hdr->lseg); | 1325 | ff_layout_send_layouterror(hdr->lseg); |
| 1326 | hdr->pgio_mirror_idx = new_idx; | ||
| 1324 | out_eagain: | 1327 | out_eagain: |
| 1325 | rpc_restart_call_prepare(task); | 1328 | rpc_restart_call_prepare(task); |
| 1326 | return -EAGAIN; | 1329 | return -EAGAIN; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4dbb0ee23432..741ff8c9c6ed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -2933,7 +2933,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, | |||
| 2933 | } | 2933 | } |
| 2934 | 2934 | ||
| 2935 | out: | 2935 | out: |
| 2936 | nfs4_sequence_free_slot(&opendata->o_res.seq_res); | 2936 | if (!opendata->cancelled) |
| 2937 | nfs4_sequence_free_slot(&opendata->o_res.seq_res); | ||
| 2937 | return ret; | 2938 | return ret; |
| 2938 | } | 2939 | } |
| 2939 | 2940 | ||
| @@ -6301,7 +6302,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, | |||
| 6301 | p->arg.seqid = seqid; | 6302 | p->arg.seqid = seqid; |
| 6302 | p->res.seqid = seqid; | 6303 | p->res.seqid = seqid; |
| 6303 | p->lsp = lsp; | 6304 | p->lsp = lsp; |
| 6304 | refcount_inc(&lsp->ls_count); | ||
| 6305 | /* Ensure we don't close file until we're done freeing locks! */ | 6305 | /* Ensure we don't close file until we're done freeing locks! */ |
| 6306 | p->ctx = get_nfs_open_context(ctx); | 6306 | p->ctx = get_nfs_open_context(ctx); |
| 6307 | p->l_ctx = nfs_get_lock_context(ctx); | 6307 | p->l_ctx = nfs_get_lock_context(ctx); |
| @@ -6526,7 +6526,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, | |||
| 6526 | p->res.lock_seqid = p->arg.lock_seqid; | 6526 | p->res.lock_seqid = p->arg.lock_seqid; |
| 6527 | p->lsp = lsp; | 6527 | p->lsp = lsp; |
| 6528 | p->server = server; | 6528 | p->server = server; |
| 6529 | refcount_inc(&lsp->ls_count); | ||
| 6530 | p->ctx = get_nfs_open_context(ctx); | 6529 | p->ctx = get_nfs_open_context(ctx); |
| 6531 | locks_init_lock(&p->fl); | 6530 | locks_init_lock(&p->fl); |
| 6532 | locks_copy_lock(&p->fl, fl); | 6531 | locks_copy_lock(&p->fl, fl); |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index a35259eebc56..1dc9a08e8bdc 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
| @@ -4719,22 +4719,23 @@ out: | |||
| 4719 | 4719 | ||
| 4720 | /* Lock an inode and grab a bh pointing to the inode. */ | 4720 | /* Lock an inode and grab a bh pointing to the inode. */ |
| 4721 | int ocfs2_reflink_inodes_lock(struct inode *s_inode, | 4721 | int ocfs2_reflink_inodes_lock(struct inode *s_inode, |
| 4722 | struct buffer_head **bh1, | 4722 | struct buffer_head **bh_s, |
| 4723 | struct inode *t_inode, | 4723 | struct inode *t_inode, |
| 4724 | struct buffer_head **bh2) | 4724 | struct buffer_head **bh_t) |
| 4725 | { | 4725 | { |
| 4726 | struct inode *inode1; | 4726 | struct inode *inode1 = s_inode; |
| 4727 | struct inode *inode2; | 4727 | struct inode *inode2 = t_inode; |
| 4728 | struct ocfs2_inode_info *oi1; | 4728 | struct ocfs2_inode_info *oi1; |
| 4729 | struct ocfs2_inode_info *oi2; | 4729 | struct ocfs2_inode_info *oi2; |
| 4730 | struct buffer_head *bh1 = NULL; | ||
| 4731 | struct buffer_head *bh2 = NULL; | ||
| 4730 | bool same_inode = (s_inode == t_inode); | 4732 | bool same_inode = (s_inode == t_inode); |
| 4733 | bool need_swap = (inode1->i_ino > inode2->i_ino); | ||
| 4731 | int status; | 4734 | int status; |
| 4732 | 4735 | ||
| 4733 | /* First grab the VFS and rw locks. */ | 4736 | /* First grab the VFS and rw locks. */ |
| 4734 | lock_two_nondirectories(s_inode, t_inode); | 4737 | lock_two_nondirectories(s_inode, t_inode); |
| 4735 | inode1 = s_inode; | 4738 | if (need_swap) |
| 4736 | inode2 = t_inode; | ||
| 4737 | if (inode1->i_ino > inode2->i_ino) | ||
| 4738 | swap(inode1, inode2); | 4739 | swap(inode1, inode2); |
| 4739 | 4740 | ||
| 4740 | status = ocfs2_rw_lock(inode1, 1); | 4741 | status = ocfs2_rw_lock(inode1, 1); |
| @@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode, | |||
| 4757 | trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, | 4758 | trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, |
| 4758 | (unsigned long long)oi2->ip_blkno); | 4759 | (unsigned long long)oi2->ip_blkno); |
| 4759 | 4760 | ||
| 4760 | if (*bh1) | ||
| 4761 | *bh1 = NULL; | ||
| 4762 | if (*bh2) | ||
| 4763 | *bh2 = NULL; | ||
| 4764 | |||
| 4765 | /* We always want to lock the one with the lower lockid first. */ | 4761 | /* We always want to lock the one with the lower lockid first. */ |
| 4766 | if (oi1->ip_blkno > oi2->ip_blkno) | 4762 | if (oi1->ip_blkno > oi2->ip_blkno) |
| 4767 | mlog_errno(-ENOLCK); | 4763 | mlog_errno(-ENOLCK); |
| 4768 | 4764 | ||
| 4769 | /* lock id1 */ | 4765 | /* lock id1 */ |
| 4770 | status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET); | 4766 | status = ocfs2_inode_lock_nested(inode1, &bh1, 1, |
| 4767 | OI_LS_REFLINK_TARGET); | ||
| 4771 | if (status < 0) { | 4768 | if (status < 0) { |
| 4772 | if (status != -ENOENT) | 4769 | if (status != -ENOENT) |
| 4773 | mlog_errno(status); | 4770 | mlog_errno(status); |
| @@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode, | |||
| 4776 | 4773 | ||
| 4777 | /* lock id2 */ | 4774 | /* lock id2 */ |
| 4778 | if (!same_inode) { | 4775 | if (!same_inode) { |
| 4779 | status = ocfs2_inode_lock_nested(inode2, bh2, 1, | 4776 | status = ocfs2_inode_lock_nested(inode2, &bh2, 1, |
| 4780 | OI_LS_REFLINK_TARGET); | 4777 | OI_LS_REFLINK_TARGET); |
| 4781 | if (status < 0) { | 4778 | if (status < 0) { |
| 4782 | if (status != -ENOENT) | 4779 | if (status != -ENOENT) |
| 4783 | mlog_errno(status); | 4780 | mlog_errno(status); |
| 4784 | goto out_cl1; | 4781 | goto out_cl1; |
| 4785 | } | 4782 | } |
| 4786 | } else | 4783 | } else { |
| 4787 | *bh2 = *bh1; | 4784 | bh2 = bh1; |
| 4785 | } | ||
| 4786 | |||
| 4787 | /* | ||
| 4788 | * If we swapped inode order above, we have to swap the buffer heads | ||
| 4789 | * before passing them back to the caller. | ||
| 4790 | */ | ||
| 4791 | if (need_swap) | ||
| 4792 | swap(bh1, bh2); | ||
| 4793 | *bh_s = bh1; | ||
| 4794 | *bh_t = bh2; | ||
| 4788 | 4795 | ||
| 4789 | trace_ocfs2_double_lock_end( | 4796 | trace_ocfs2_double_lock_end( |
| 4790 | (unsigned long long)oi1->ip_blkno, | 4797 | (unsigned long long)oi1->ip_blkno, |
| @@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode, | |||
| 4794 | 4801 | ||
| 4795 | out_cl1: | 4802 | out_cl1: |
| 4796 | ocfs2_inode_unlock(inode1, 1); | 4803 | ocfs2_inode_unlock(inode1, 1); |
| 4797 | brelse(*bh1); | 4804 | brelse(bh1); |
| 4798 | *bh1 = NULL; | ||
| 4799 | out_rw2: | 4805 | out_rw2: |
| 4800 | ocfs2_rw_unlock(inode2, 1); | 4806 | ocfs2_rw_unlock(inode2, 1); |
| 4801 | out_i2: | 4807 | out_i2: |
| @@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f, | |||
| 733 | return 0; | 733 | return 0; |
| 734 | } | 734 | } |
| 735 | 735 | ||
| 736 | /* Any file opened for execve()/uselib() has to be a regular file. */ | ||
| 737 | if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) { | ||
| 738 | error = -EACCES; | ||
| 739 | goto cleanup_file; | ||
| 740 | } | ||
| 741 | |||
| 736 | if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { | 742 | if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { |
| 737 | error = get_write_access(inode); | 743 | error = get_write_access(inode); |
| 738 | if (unlikely(error)) | 744 | if (unlikely(error)) |
| @@ -1209,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp) | |||
| 1209 | } | 1215 | } |
| 1210 | 1216 | ||
| 1211 | EXPORT_SYMBOL(nonseekable_open); | 1217 | EXPORT_SYMBOL(nonseekable_open); |
| 1218 | |||
| 1219 | /* | ||
| 1220 | * stream_open is used by subsystems that want stream-like file descriptors. | ||
| 1221 | * Such file descriptors are not seekable and don't have notion of position | ||
| 1222 | * (file.f_pos is always 0). Contrary to file descriptors of other regular | ||
| 1223 | * files, .read() and .write() can run simultaneously. | ||
| 1224 | * | ||
| 1225 | * stream_open never fails and is marked to return int so that it could be | ||
| 1226 | * directly used as file_operations.open . | ||
| 1227 | */ | ||
| 1228 | int stream_open(struct inode *inode, struct file *filp) | ||
| 1229 | { | ||
| 1230 | filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS); | ||
| 1231 | filp->f_mode |= FMODE_STREAM; | ||
| 1232 | return 0; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | EXPORT_SYMBOL(stream_open); | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index ddef482f1334..6a803a0b75df 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -616,24 +616,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, | |||
| 616 | static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, | 616 | static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, |
| 617 | struct pid *pid, struct task_struct *task) | 617 | struct pid *pid, struct task_struct *task) |
| 618 | { | 618 | { |
| 619 | long nr; | 619 | struct syscall_info info; |
| 620 | unsigned long args[6], sp, pc; | 620 | u64 *args = &info.data.args[0]; |
| 621 | int res; | 621 | int res; |
| 622 | 622 | ||
| 623 | res = lock_trace(task); | 623 | res = lock_trace(task); |
| 624 | if (res) | 624 | if (res) |
| 625 | return res; | 625 | return res; |
| 626 | 626 | ||
| 627 | if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) | 627 | if (task_current_syscall(task, &info)) |
| 628 | seq_puts(m, "running\n"); | 628 | seq_puts(m, "running\n"); |
| 629 | else if (nr < 0) | 629 | else if (info.data.nr < 0) |
| 630 | seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc); | 630 | seq_printf(m, "%d 0x%llx 0x%llx\n", |
| 631 | info.data.nr, info.sp, info.data.instruction_pointer); | ||
| 631 | else | 632 | else |
| 632 | seq_printf(m, | 633 | seq_printf(m, |
| 633 | "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", | 634 | "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", |
| 634 | nr, | 635 | info.data.nr, |
| 635 | args[0], args[1], args[2], args[3], args[4], args[5], | 636 | args[0], args[1], args[2], args[3], args[4], args[5], |
| 636 | sp, pc); | 637 | info.sp, info.data.instruction_pointer); |
| 637 | unlock_trace(task); | 638 | unlock_trace(task); |
| 638 | 639 | ||
| 639 | return 0; | 640 | return 0; |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index d29d869abec1..f5834488b67d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
| @@ -615,7 +615,7 @@ static void __init proc_kcore_text_init(void) | |||
| 615 | /* | 615 | /* |
| 616 | * MODULES_VADDR has no intersection with VMALLOC_ADDR. | 616 | * MODULES_VADDR has no intersection with VMALLOC_ADDR. |
| 617 | */ | 617 | */ |
| 618 | struct kcore_list kcore_modules; | 618 | static struct kcore_list kcore_modules; |
| 619 | static void __init add_modules_range(void) | 619 | static void __init add_modules_range(void) |
| 620 | { | 620 | { |
| 621 | if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { | 621 | if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 4d598a399bbf..d65390727541 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
| @@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header) | |||
| 1626 | if (--header->nreg) | 1626 | if (--header->nreg) |
| 1627 | return; | 1627 | return; |
| 1628 | 1628 | ||
| 1629 | put_links(header); | 1629 | if (parent) |
| 1630 | put_links(header); | ||
| 1630 | start_unregistering(header); | 1631 | start_unregistering(header); |
| 1631 | if (!--header->count) | 1632 | if (!--header->count) |
| 1632 | kfree_rcu(header, rcu); | 1633 | kfree_rcu(header, rcu); |
diff --git a/fs/read_write.c b/fs/read_write.c index 177ccc3d405a..61b43ad7608e 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ | |||
| 560 | 560 | ||
| 561 | static inline loff_t file_pos_read(struct file *file) | 561 | static inline loff_t file_pos_read(struct file *file) |
| 562 | { | 562 | { |
| 563 | return file->f_pos; | 563 | return file->f_mode & FMODE_STREAM ? 0 : file->f_pos; |
| 564 | } | 564 | } |
| 565 | 565 | ||
| 566 | static inline void file_pos_write(struct file *file, loff_t pos) | 566 | static inline void file_pos_write(struct file *file, loff_t pos) |
| 567 | { | 567 | { |
| 568 | file->f_pos = pos; | 568 | if ((file->f_mode & FMODE_STREAM) == 0) |
| 569 | file->f_pos = pos; | ||
| 569 | } | 570 | } |
| 570 | 571 | ||
| 571 | ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) | 572 | ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8dc2818fdd84..12628184772c 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head) | |||
| 276 | { | 276 | { |
| 277 | struct inode *inode = container_of(head, struct inode, i_rcu); | 277 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 278 | struct ubifs_inode *ui = ubifs_inode(inode); | 278 | struct ubifs_inode *ui = ubifs_inode(inode); |
| 279 | kfree(ui->data); | ||
| 279 | kmem_cache_free(ubifs_inode_slab, ui); | 280 | kmem_cache_free(ubifs_inode_slab, ui); |
| 280 | } | 281 | } |
| 281 | 282 | ||
| 282 | static void ubifs_destroy_inode(struct inode *inode) | 283 | static void ubifs_destroy_inode(struct inode *inode) |
| 283 | { | 284 | { |
| 284 | struct ubifs_inode *ui = ubifs_inode(inode); | ||
| 285 | |||
| 286 | kfree(ui->data); | ||
| 287 | call_rcu(&inode->i_rcu, ubifs_i_callback); | 285 | call_rcu(&inode->i_rcu, ubifs_i_callback); |
| 288 | } | 286 | } |
| 289 | 287 | ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 48502cb9990f..4637ae1ae91c 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -1191,7 +1191,10 @@ xfs_iread_extents( | |||
| 1191 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. | 1191 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. |
| 1192 | */ | 1192 | */ |
| 1193 | level = be16_to_cpu(block->bb_level); | 1193 | level = be16_to_cpu(block->bb_level); |
| 1194 | ASSERT(level > 0); | 1194 | if (unlikely(level == 0)) { |
| 1195 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); | ||
| 1196 | return -EFSCORRUPTED; | ||
| 1197 | } | ||
| 1195 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); | 1198 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); |
| 1196 | bno = be64_to_cpu(*pp); | 1199 | bno = be64_to_cpu(*pp); |
| 1197 | 1200 | ||
| @@ -4249,9 +4252,13 @@ xfs_bmapi_write( | |||
| 4249 | struct xfs_bmbt_irec *mval, /* output: map values */ | 4252 | struct xfs_bmbt_irec *mval, /* output: map values */ |
| 4250 | int *nmap) /* i/o: mval size/count */ | 4253 | int *nmap) /* i/o: mval size/count */ |
| 4251 | { | 4254 | { |
| 4255 | struct xfs_bmalloca bma = { | ||
| 4256 | .tp = tp, | ||
| 4257 | .ip = ip, | ||
| 4258 | .total = total, | ||
| 4259 | }; | ||
| 4252 | struct xfs_mount *mp = ip->i_mount; | 4260 | struct xfs_mount *mp = ip->i_mount; |
| 4253 | struct xfs_ifork *ifp; | 4261 | struct xfs_ifork *ifp; |
| 4254 | struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ | ||
| 4255 | xfs_fileoff_t end; /* end of mapped file region */ | 4262 | xfs_fileoff_t end; /* end of mapped file region */ |
| 4256 | bool eof = false; /* after the end of extents */ | 4263 | bool eof = false; /* after the end of extents */ |
| 4257 | int error; /* error return */ | 4264 | int error; /* error return */ |
| @@ -4319,10 +4326,6 @@ xfs_bmapi_write( | |||
| 4319 | eof = true; | 4326 | eof = true; |
| 4320 | if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) | 4327 | if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) |
| 4321 | bma.prev.br_startoff = NULLFILEOFF; | 4328 | bma.prev.br_startoff = NULLFILEOFF; |
| 4322 | bma.tp = tp; | ||
| 4323 | bma.ip = ip; | ||
| 4324 | bma.total = total; | ||
| 4325 | bma.datatype = 0; | ||
| 4326 | bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); | 4329 | bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); |
| 4327 | 4330 | ||
| 4328 | n = 0; | 4331 | n = 0; |
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c index 6f94d1f7322d..117910db51b8 100644 --- a/fs/xfs/scrub/btree.c +++ b/fs/xfs/scrub/btree.c | |||
| @@ -415,8 +415,17 @@ xchk_btree_check_owner( | |||
| 415 | struct xfs_btree_cur *cur = bs->cur; | 415 | struct xfs_btree_cur *cur = bs->cur; |
| 416 | struct check_owner *co; | 416 | struct check_owner *co; |
| 417 | 417 | ||
| 418 | if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL) | 418 | /* |
| 419 | * In theory, xfs_btree_get_block should only give us a null buffer | ||
| 420 | * pointer for the root of a root-in-inode btree type, but we need | ||
| 421 | * to check defensively here in case the cursor state is also screwed | ||
| 422 | * up. | ||
| 423 | */ | ||
| 424 | if (bp == NULL) { | ||
| 425 | if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)) | ||
| 426 | xchk_btree_set_corrupt(bs->sc, bs->cur, level); | ||
| 419 | return 0; | 427 | return 0; |
| 428 | } | ||
| 420 | 429 | ||
| 421 | /* | 430 | /* |
| 422 | * We want to cross-reference each btree block with the bnobt | 431 | * We want to cross-reference each btree block with the bnobt |
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c index f1260b4bfdee..90527b094878 100644 --- a/fs/xfs/scrub/dabtree.c +++ b/fs/xfs/scrub/dabtree.c | |||
| @@ -574,6 +574,11 @@ xchk_da_btree( | |||
| 574 | /* Drill another level deeper. */ | 574 | /* Drill another level deeper. */ |
| 575 | blkno = be32_to_cpu(key->before); | 575 | blkno = be32_to_cpu(key->before); |
| 576 | level++; | 576 | level++; |
| 577 | if (level >= XFS_DA_NODE_MAXDEPTH) { | ||
| 578 | /* Too deep! */ | ||
| 579 | xchk_da_set_corrupt(&ds, level - 1); | ||
| 580 | break; | ||
| 581 | } | ||
| 577 | ds.tree_level--; | 582 | ds.tree_level--; |
| 578 | error = xchk_da_btree_block(&ds, level, blkno); | 583 | error = xchk_da_btree_block(&ds, level, blkno); |
| 579 | if (error) | 584 | if (error) |
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 93f07edafd81..9ee2a7d02e70 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c | |||
| @@ -161,6 +161,14 @@ xfs_ioc_trim( | |||
| 161 | return -EPERM; | 161 | return -EPERM; |
| 162 | if (!blk_queue_discard(q)) | 162 | if (!blk_queue_discard(q)) |
| 163 | return -EOPNOTSUPP; | 163 | return -EOPNOTSUPP; |
| 164 | |||
| 165 | /* | ||
| 166 | * We haven't recovered the log, so we cannot use our bnobt-guided | ||
| 167 | * storage zapping commands. | ||
| 168 | */ | ||
| 169 | if (mp->m_flags & XFS_MOUNT_NORECOVERY) | ||
| 170 | return -EROFS; | ||
| 171 | |||
| 164 | if (copy_from_user(&range, urange, sizeof(range))) | 172 | if (copy_from_user(&range, urange, sizeof(range))) |
| 165 | return -EFAULT; | 173 | return -EFAULT; |
| 166 | 174 | ||
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 1f2e2845eb76..a7ceae90110e 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -529,18 +529,17 @@ xfs_file_dio_aio_write( | |||
| 529 | count = iov_iter_count(from); | 529 | count = iov_iter_count(from); |
| 530 | 530 | ||
| 531 | /* | 531 | /* |
| 532 | * If we are doing unaligned IO, wait for all other IO to drain, | 532 | * If we are doing unaligned IO, we can't allow any other overlapping IO |
| 533 | * otherwise demote the lock if we had to take the exclusive lock | 533 | * in-flight at the same time or we risk data corruption. Wait for all |
| 534 | * for other reasons in xfs_file_aio_write_checks. | 534 | * other IO to drain before we submit. If the IO is aligned, demote the |
| 535 | * iolock if we had to take the exclusive lock in | ||
| 536 | * xfs_file_aio_write_checks() for other reasons. | ||
| 535 | */ | 537 | */ |
| 536 | if (unaligned_io) { | 538 | if (unaligned_io) { |
| 537 | /* If we are going to wait for other DIO to finish, bail */ | 539 | /* unaligned dio always waits, bail */ |
| 538 | if (iocb->ki_flags & IOCB_NOWAIT) { | 540 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 539 | if (atomic_read(&inode->i_dio_count)) | 541 | return -EAGAIN; |
| 540 | return -EAGAIN; | 542 | inode_dio_wait(inode); |
| 541 | } else { | ||
| 542 | inode_dio_wait(inode); | ||
| 543 | } | ||
| 544 | } else if (iolock == XFS_IOLOCK_EXCL) { | 543 | } else if (iolock == XFS_IOLOCK_EXCL) { |
| 545 | xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); | 544 | xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); |
| 546 | iolock = XFS_IOLOCK_SHARED; | 545 | iolock = XFS_IOLOCK_SHARED; |
| @@ -548,6 +547,14 @@ xfs_file_dio_aio_write( | |||
| 548 | 547 | ||
| 549 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos); | 548 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos); |
| 550 | ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io); | 549 | ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io); |
| 550 | |||
| 551 | /* | ||
| 552 | * If unaligned, this is the only IO in-flight. If it has not yet | ||
| 553 | * completed, wait on it before we release the iolock to prevent | ||
| 554 | * subsequent overlapping IO. | ||
| 555 | */ | ||
| 556 | if (ret == -EIOCBQUEUED && unaligned_io) | ||
| 557 | inode_dio_wait(inode); | ||
| 551 | out: | 558 | out: |
| 552 | xfs_iunlock(ip, iolock); | 559 | xfs_iunlock(ip, iolock); |
| 553 | 560 | ||
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 30b1ae53689f..c50542dc71e0 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h | |||
| @@ -150,7 +150,10 @@ | |||
| 150 | 150 | ||
| 151 | /* Defaults for debug_level, debug and normal */ | 151 | /* Defaults for debug_level, debug and normal */ |
| 152 | 152 | ||
| 153 | #ifndef ACPI_DEBUG_DEFAULT | ||
| 153 | #define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) | 154 | #define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) |
| 155 | #endif | ||
| 156 | |||
| 154 | #define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) | 157 | #define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) |
| 155 | #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) | 158 | #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) |
| 156 | 159 | ||
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 9ff328fd946a..624b90b34085 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h | |||
| @@ -82,6 +82,11 @@ | |||
| 82 | #define ACPI_NO_ERROR_MESSAGES | 82 | #define ACPI_NO_ERROR_MESSAGES |
| 83 | #undef ACPI_DEBUG_OUTPUT | 83 | #undef ACPI_DEBUG_OUTPUT |
| 84 | 84 | ||
| 85 | /* Use a specific bugging default separate from ACPICA */ | ||
| 86 | |||
| 87 | #undef ACPI_DEBUG_DEFAULT | ||
| 88 | #define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) | ||
| 89 | |||
| 85 | /* External interface for __KERNEL__, stub is needed */ | 90 | /* External interface for __KERNEL__, stub is needed */ |
| 86 | 91 | ||
| 87 | #define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ | 92 | #define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ |
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 0c938a4354f6..b88239e9efe4 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h | |||
| @@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 105 | * syscall_get_arguments - extract system call parameter values | 105 | * syscall_get_arguments - extract system call parameter values |
| 106 | * @task: task of interest, must be blocked | 106 | * @task: task of interest, must be blocked |
| 107 | * @regs: task_pt_regs() of @task | 107 | * @regs: task_pt_regs() of @task |
| 108 | * @i: argument index [0,5] | ||
| 109 | * @n: number of arguments; n+i must be [1,6]. | ||
| 110 | * @args: array filled with argument values | 108 | * @args: array filled with argument values |
| 111 | * | 109 | * |
| 112 | * Fetches @n arguments to the system call starting with the @i'th argument | 110 | * Fetches 6 arguments to the system call. First argument is stored in |
| 113 | * (from 0 through 5). Argument @i is stored in @args[0], and so on. | 111 | * @args[0], and so on. |
| 114 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 115 | * | 112 | * |
| 116 | * It's only valid to call this when @task is stopped for tracing on | 113 | * It's only valid to call this when @task is stopped for tracing on |
| 117 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 114 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 118 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 119 | * taking up to 6 arguments. | ||
| 120 | */ | 115 | */ |
| 121 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 116 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 122 | unsigned int i, unsigned int n, unsigned long *args); | 117 | unsigned long *args); |
| 123 | 118 | ||
| 124 | /** | 119 | /** |
| 125 | * syscall_set_arguments - change system call parameter value | 120 | * syscall_set_arguments - change system call parameter value |
| 126 | * @task: task of interest, must be in system call entry tracing | 121 | * @task: task of interest, must be in system call entry tracing |
| 127 | * @regs: task_pt_regs() of @task | 122 | * @regs: task_pt_regs() of @task |
| 128 | * @i: argument index [0,5] | ||
| 129 | * @n: number of arguments; n+i must be [1,6]. | ||
| 130 | * @args: array of argument values to store | 123 | * @args: array of argument values to store |
| 131 | * | 124 | * |
| 132 | * Changes @n arguments to the system call starting with the @i'th argument. | 125 | * Changes 6 arguments to the system call. |
| 133 | * Argument @i gets value @args[0], and so on. | 126 | * The first argument gets value @args[0], and so on. |
| 134 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 135 | * | 127 | * |
| 136 | * It's only valid to call this when @task is stopped for tracing on | 128 | * It's only valid to call this when @task is stopped for tracing on |
| 137 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 129 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 138 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 139 | * taking up to 6 arguments. | ||
| 140 | */ | 130 | */ |
| 141 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 131 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 142 | unsigned int i, unsigned int n, | ||
| 143 | const unsigned long *args); | 132 | const unsigned long *args); |
| 144 | 133 | ||
| 145 | /** | 134 | /** |
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h index 8063e8314eef..6d487c5eba2c 100644 --- a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h | |||
| @@ -51,7 +51,10 @@ | |||
| 51 | #define RESET_SD_EMMC_A 44 | 51 | #define RESET_SD_EMMC_A 44 |
| 52 | #define RESET_SD_EMMC_B 45 | 52 | #define RESET_SD_EMMC_B 45 |
| 53 | #define RESET_SD_EMMC_C 46 | 53 | #define RESET_SD_EMMC_C 46 |
| 54 | /* 47-60 */ | 54 | /* 47 */ |
| 55 | #define RESET_USB_PHY20 48 | ||
| 56 | #define RESET_USB_PHY21 49 | ||
| 57 | /* 50-60 */ | ||
| 55 | #define RESET_AUDIO_CODEC 61 | 58 | #define RESET_AUDIO_CODEC 61 |
| 56 | /* 62-63 */ | 59 | /* 62-63 */ |
| 57 | /* RESET2 */ | 60 | /* RESET2 */ |
diff --git a/include/keys/trusted.h b/include/keys/trusted.h index adbcb6817826..0071298b9b28 100644 --- a/include/keys/trusted.h +++ b/include/keys/trusted.h | |||
| @@ -38,7 +38,7 @@ enum { | |||
| 38 | 38 | ||
| 39 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, | 39 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, |
| 40 | unsigned int keylen, unsigned char *h1, | 40 | unsigned int keylen, unsigned char *h1, |
| 41 | unsigned char *h2, unsigned char h3, ...); | 41 | unsigned char *h2, unsigned int h3, ...); |
| 42 | int TSS_checkhmac1(unsigned char *buffer, | 42 | int TSS_checkhmac1(unsigned char *buffer, |
| 43 | const uint32_t command, | 43 | const uint32_t command, |
| 44 | const unsigned char *ononce, | 44 | const unsigned char *ononce, |
diff --git a/include/linux/atalk.h b/include/linux/atalk.h index d5cfc0b15b76..f6034ba774be 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h | |||
| @@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) | |||
| 108 | #define AARP_RESOLVE_TIME (10 * HZ) | 108 | #define AARP_RESOLVE_TIME (10 * HZ) |
| 109 | 109 | ||
| 110 | extern struct datalink_proto *ddp_dl, *aarp_dl; | 110 | extern struct datalink_proto *ddp_dl, *aarp_dl; |
| 111 | extern void aarp_proto_init(void); | 111 | extern int aarp_proto_init(void); |
| 112 | 112 | ||
| 113 | /* Inter module exports */ | 113 | /* Inter module exports */ |
| 114 | 114 | ||
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h index 50fb0dee23e8..d35b8ec1c485 100644 --- a/include/linux/bitrev.h +++ b/include/linux/bitrev.h | |||
| @@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x) | |||
| 34 | 34 | ||
| 35 | #define __constant_bitrev32(x) \ | 35 | #define __constant_bitrev32(x) \ |
| 36 | ({ \ | 36 | ({ \ |
| 37 | u32 __x = x; \ | 37 | u32 ___x = x; \ |
| 38 | __x = (__x >> 16) | (__x << 16); \ | 38 | ___x = (___x >> 16) | (___x << 16); \ |
| 39 | __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ | 39 | ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ |
| 40 | __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ | 40 | ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ |
| 41 | __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ | 41 | ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ |
| 42 | __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ | 42 | ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ |
| 43 | __x; \ | 43 | ___x; \ |
| 44 | }) | 44 | }) |
| 45 | 45 | ||
| 46 | #define __constant_bitrev16(x) \ | 46 | #define __constant_bitrev16(x) \ |
| 47 | ({ \ | 47 | ({ \ |
| 48 | u16 __x = x; \ | 48 | u16 ___x = x; \ |
| 49 | __x = (__x >> 8) | (__x << 8); \ | 49 | ___x = (___x >> 8) | (___x << 8); \ |
| 50 | __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ | 50 | ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ |
| 51 | __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ | 51 | ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ |
| 52 | __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ | 52 | ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ |
| 53 | __x; \ | 53 | ___x; \ |
| 54 | }) | 54 | }) |
| 55 | 55 | ||
| 56 | #define __constant_bitrev8x4(x) \ | 56 | #define __constant_bitrev8x4(x) \ |
| 57 | ({ \ | 57 | ({ \ |
| 58 | u32 __x = x; \ | 58 | u32 ___x = x; \ |
| 59 | __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ | 59 | ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ |
| 60 | __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ | 60 | ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ |
| 61 | __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ | 61 | ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ |
| 62 | __x; \ | 62 | ___x; \ |
| 63 | }) | 63 | }) |
| 64 | 64 | ||
| 65 | #define __constant_bitrev8(x) \ | 65 | #define __constant_bitrev8(x) \ |
| 66 | ({ \ | 66 | ({ \ |
| 67 | u8 __x = x; \ | 67 | u8 ___x = x; \ |
| 68 | __x = (__x >> 4) | (__x << 4); \ | 68 | ___x = (___x >> 4) | (___x << 4); \ |
| 69 | __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ | 69 | ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ |
| 70 | __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ | 70 | ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ |
| 71 | __x; \ | 71 | ___x; \ |
| 72 | }) | 72 | }) |
| 73 | 73 | ||
| 74 | #define bitrev32(x) \ | 74 | #define bitrev32(x) \ |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a2132e09dc1c..f02367faa58d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -193,7 +193,6 @@ enum bpf_arg_type { | |||
| 193 | 193 | ||
| 194 | ARG_PTR_TO_CTX, /* pointer to context */ | 194 | ARG_PTR_TO_CTX, /* pointer to context */ |
| 195 | ARG_ANYTHING, /* any (initialized) argument is ok */ | 195 | ARG_ANYTHING, /* any (initialized) argument is ok */ |
| 196 | ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */ | ||
| 197 | ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ | 196 | ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ |
| 198 | ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ | 197 | ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ |
| 199 | }; | 198 | }; |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 69f7a3449eda..7d8228d1c898 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -66,6 +66,46 @@ struct bpf_reg_state { | |||
| 66 | * same reference to the socket, to determine proper reference freeing. | 66 | * same reference to the socket, to determine proper reference freeing. |
| 67 | */ | 67 | */ |
| 68 | u32 id; | 68 | u32 id; |
| 69 | /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned | ||
| 70 | * from a pointer-cast helper, bpf_sk_fullsock() and | ||
| 71 | * bpf_tcp_sock(). | ||
| 72 | * | ||
| 73 | * Consider the following where "sk" is a reference counted | ||
| 74 | * pointer returned from "sk = bpf_sk_lookup_tcp();": | ||
| 75 | * | ||
| 76 | * 1: sk = bpf_sk_lookup_tcp(); | ||
| 77 | * 2: if (!sk) { return 0; } | ||
| 78 | * 3: fullsock = bpf_sk_fullsock(sk); | ||
| 79 | * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } | ||
| 80 | * 5: tp = bpf_tcp_sock(fullsock); | ||
| 81 | * 6: if (!tp) { bpf_sk_release(sk); return 0; } | ||
| 82 | * 7: bpf_sk_release(sk); | ||
| 83 | * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain | ||
| 84 | * | ||
| 85 | * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and | ||
| 86 | * "tp" ptr should be invalidated also. In order to do that, | ||
| 87 | * the reg holding "fullsock" and "sk" need to remember | ||
| 88 | * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id | ||
| 89 | * such that the verifier can reset all regs which have | ||
| 90 | * ref_obj_id matching the sk_reg->id. | ||
| 91 | * | ||
| 92 | * sk_reg->ref_obj_id is set to sk_reg->id at line 1. | ||
| 93 | * sk_reg->id will stay as NULL-marking purpose only. | ||
| 94 | * After NULL-marking is done, sk_reg->id can be reset to 0. | ||
| 95 | * | ||
| 96 | * After "fullsock = bpf_sk_fullsock(sk);" at line 3, | ||
| 97 | * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. | ||
| 98 | * | ||
| 99 | * After "tp = bpf_tcp_sock(fullsock);" at line 5, | ||
| 100 | * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id | ||
| 101 | * which is the same as sk_reg->ref_obj_id. | ||
| 102 | * | ||
| 103 | * From the verifier perspective, if sk, fullsock and tp | ||
| 104 | * are not NULL, they are the same ptr with different | ||
| 105 | * reg->type. In particular, bpf_sk_release(tp) is also | ||
| 106 | * allowed and has the same effect as bpf_sk_release(sk). | ||
| 107 | */ | ||
| 108 | u32 ref_obj_id; | ||
| 69 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of | 109 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
| 70 | * the actual value. | 110 | * the actual value. |
| 71 | * For pointer types, this represents the variable part of the offset | 111 | * For pointer types, this represents the variable part of the offset |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 9cd00a37b8d3..6db2d9a6e503 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -148,6 +148,22 @@ | |||
| 148 | #define BCM_LED_SRC_OFF 0xe /* Tied high */ | 148 | #define BCM_LED_SRC_OFF 0xe /* Tied high */ |
| 149 | #define BCM_LED_SRC_ON 0xf /* Tied low */ | 149 | #define BCM_LED_SRC_ON 0xf /* Tied low */ |
| 150 | 150 | ||
| 151 | /* | ||
| 152 | * Broadcom Multicolor LED configurations (expansion register 4) | ||
| 153 | */ | ||
| 154 | #define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04) | ||
| 155 | #define BCM_LED_MULTICOLOR_IN_PHASE BIT(8) | ||
| 156 | #define BCM_LED_MULTICOLOR_LINK_ACT 0x0 | ||
| 157 | #define BCM_LED_MULTICOLOR_SPEED 0x1 | ||
| 158 | #define BCM_LED_MULTICOLOR_ACT_FLASH 0x2 | ||
| 159 | #define BCM_LED_MULTICOLOR_FDX 0x3 | ||
| 160 | #define BCM_LED_MULTICOLOR_OFF 0x4 | ||
| 161 | #define BCM_LED_MULTICOLOR_ON 0x5 | ||
| 162 | #define BCM_LED_MULTICOLOR_ALT 0x6 | ||
| 163 | #define BCM_LED_MULTICOLOR_FLASH 0x7 | ||
| 164 | #define BCM_LED_MULTICOLOR_LINK 0x8 | ||
| 165 | #define BCM_LED_MULTICOLOR_ACT 0x9 | ||
| 166 | #define BCM_LED_MULTICOLOR_PROGRAM 0xa | ||
| 151 | 167 | ||
| 152 | /* | 168 | /* |
| 153 | * BCM5482: Shadow registers | 169 | * BCM5482: Shadow registers |
diff --git a/include/linux/device.h b/include/linux/device.h index b425a7ee04ce..4e6987e11f68 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -49,8 +49,6 @@ struct bus_attribute { | |||
| 49 | ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); | 49 | ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | #define BUS_ATTR(_name, _mode, _show, _store) \ | ||
| 53 | struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) | ||
| 54 | #define BUS_ATTR_RW(_name) \ | 52 | #define BUS_ATTR_RW(_name) \ |
| 55 | struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) | 53 | struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) |
| 56 | #define BUS_ATTR_RO(_name) \ | 54 | #define BUS_ATTR_RO(_name) \ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 8b42df09b04c..dd28e7679089 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -158,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 158 | #define FMODE_OPENED ((__force fmode_t)0x80000) | 158 | #define FMODE_OPENED ((__force fmode_t)0x80000) |
| 159 | #define FMODE_CREATED ((__force fmode_t)0x100000) | 159 | #define FMODE_CREATED ((__force fmode_t)0x100000) |
| 160 | 160 | ||
| 161 | /* File is stream-like */ | ||
| 162 | #define FMODE_STREAM ((__force fmode_t)0x200000) | ||
| 163 | |||
| 161 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 164 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
| 162 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) | 165 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) |
| 163 | 166 | ||
| @@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); | |||
| 3074 | extern loff_t no_seek_end_llseek(struct file *, loff_t, int); | 3077 | extern loff_t no_seek_end_llseek(struct file *, loff_t, int); |
| 3075 | extern int generic_file_open(struct inode * inode, struct file * filp); | 3078 | extern int generic_file_open(struct inode * inode, struct file * filp); |
| 3076 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 3079 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
| 3080 | extern int stream_open(struct inode * inode, struct file * filp); | ||
| 3077 | 3081 | ||
| 3078 | #ifdef CONFIG_BLOCK | 3082 | #ifdef CONFIG_BLOCK |
| 3079 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, | 3083 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ea35263eb76b..11943b60f208 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void) | |||
| 203 | #define pud_huge(x) 0 | 203 | #define pud_huge(x) 0 |
| 204 | #define is_hugepage_only_range(mm, addr, len) 0 | 204 | #define is_hugepage_only_range(mm, addr, len) 0 |
| 205 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) | 205 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
| 206 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) | ||
| 207 | #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ | 206 | #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ |
| 208 | src_addr, pagep) ({ BUG(); 0; }) | 207 | src_addr, pagep) ({ BUG(); 0; }) |
| 209 | #define huge_pte_offset(mm, address, sz) 0 | 208 | #define huge_pte_offset(mm, address, sz) 0 |
| @@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | |||
| 234 | { | 233 | { |
| 235 | BUG(); | 234 | BUG(); |
| 236 | } | 235 | } |
| 236 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, | ||
| 237 | struct vm_area_struct *vma, unsigned long address, | ||
| 238 | unsigned int flags) | ||
| 239 | { | ||
| 240 | BUG(); | ||
| 241 | return 0; | ||
| 242 | } | ||
| 237 | 243 | ||
| 238 | #endif /* !CONFIG_HUGETLB_PAGE */ | 244 | #endif /* !CONFIG_HUGETLB_PAGE */ |
| 239 | /* | 245 | /* |
diff --git a/include/linux/kcore.h b/include/linux/kcore.h index c843f4a9c512..da676cdbd727 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h | |||
| @@ -38,12 +38,6 @@ struct vmcoredd_node { | |||
| 38 | 38 | ||
| 39 | #ifdef CONFIG_PROC_KCORE | 39 | #ifdef CONFIG_PROC_KCORE |
| 40 | void __init kclist_add(struct kcore_list *, void *, size_t, int type); | 40 | void __init kclist_add(struct kcore_list *, void *, size_t, int type); |
| 41 | static inline | ||
| 42 | void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) | ||
| 43 | { | ||
| 44 | m->vaddr = (unsigned long)vaddr; | ||
| 45 | kclist_add(m, addr, sz, KCORE_REMAP); | ||
| 46 | } | ||
| 47 | 41 | ||
| 48 | extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); | 42 | extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); |
| 49 | #else | 43 | #else |
| @@ -51,11 +45,6 @@ static inline | |||
| 51 | void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) | 45 | void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) |
| 52 | { | 46 | { |
| 53 | } | 47 | } |
| 54 | |||
| 55 | static inline | ||
| 56 | void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) | ||
| 57 | { | ||
| 58 | } | ||
| 59 | #endif | 48 | #endif |
| 60 | 49 | ||
| 61 | #endif /* _LINUX_KCORE_H */ | 50 | #endif /* _LINUX_KCORE_H */ |
diff --git a/include/linux/list.h b/include/linux/list.h index 79626b5ab36c..58aa3adf94e6 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head, | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | /** | 209 | /** |
| 210 | * list_is_first -- tests whether @ list is the first entry in list @head | 210 | * list_is_first -- tests whether @list is the first entry in list @head |
| 211 | * @list: the entry to test | 211 | * @list: the entry to test |
| 212 | * @head: the head of the list | 212 | * @head: the head of the list |
| 213 | */ | 213 | */ |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1f3d880b7ca1..dbb6118370c1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -566,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page); | |||
| 566 | void __unlock_page_memcg(struct mem_cgroup *memcg); | 566 | void __unlock_page_memcg(struct mem_cgroup *memcg); |
| 567 | void unlock_page_memcg(struct page *page); | 567 | void unlock_page_memcg(struct page *page); |
| 568 | 568 | ||
| 569 | /* idx can be of type enum memcg_stat_item or node_stat_item */ | 569 | /* |
| 570 | * idx can be of type enum memcg_stat_item or node_stat_item. | ||
| 571 | * Keep in sync with memcg_exact_page_state(). | ||
| 572 | */ | ||
| 570 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, | 573 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, |
| 571 | int idx) | 574 | int idx) |
| 572 | { | 575 | { |
diff --git a/include/linux/mii.h b/include/linux/mii.h index 6fee8b1a4400..5cd824c1c0ca 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h | |||
| @@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) | |||
| 469 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, | 469 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, |
| 470 | advertising)) | 470 | advertising)) |
| 471 | lcl_adv |= ADVERTISE_PAUSE_CAP; | 471 | lcl_adv |= ADVERTISE_PAUSE_CAP; |
| 472 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, | 472 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, |
| 473 | advertising)) | 473 | advertising)) |
| 474 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | 474 | lcl_adv |= ADVERTISE_PAUSE_ASYM; |
| 475 | 475 | ||
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 022541dc5dbf..0d0729648844 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags { | |||
| 594 | }; | 594 | }; |
| 595 | 595 | ||
| 596 | struct mlx5_td { | 596 | struct mlx5_td { |
| 597 | /* protects tirs list changes while tirs refresh */ | ||
| 598 | struct mutex list_lock; | ||
| 597 | struct list_head tirs_list; | 599 | struct list_head tirs_list; |
| 598 | u32 tdn; | 600 | u32 tdn; |
| 599 | }; | 601 | }; |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7eade9132f02..4ef4bbe78a1d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -671,7 +671,7 @@ enum vm_fault_reason { | |||
| 671 | 671 | ||
| 672 | /* Encode hstate index for a hwpoisoned large page */ | 672 | /* Encode hstate index for a hwpoisoned large page */ |
| 673 | #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) | 673 | #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) |
| 674 | #define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf) | 674 | #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf) |
| 675 | 675 | ||
| 676 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ | 676 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ |
| 677 | VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ | 677 | VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ |
diff --git a/include/linux/net.h b/include/linux/net.h index 651fca72286c..c606c72311d0 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
| @@ -83,6 +83,12 @@ enum sock_type { | |||
| 83 | 83 | ||
| 84 | #endif /* ARCH_HAS_SOCKET_TYPES */ | 84 | #endif /* ARCH_HAS_SOCKET_TYPES */ |
| 85 | 85 | ||
| 86 | /** | ||
| 87 | * enum sock_shutdown_cmd - Shutdown types | ||
| 88 | * @SHUT_RD: shutdown receptions | ||
| 89 | * @SHUT_WR: shutdown transmissions | ||
| 90 | * @SHUT_RDWR: shutdown receptions/transmissions | ||
| 91 | */ | ||
| 86 | enum sock_shutdown_cmd { | 92 | enum sock_shutdown_cmd { |
| 87 | SHUT_RD, | 93 | SHUT_RD, |
| 88 | SHUT_WR, | 94 | SHUT_WR, |
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 4eb26d278046..280ae96dc4c3 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h | |||
| @@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page, | |||
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. | 43 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
| 44 | * If specified range includes migrate types other than MOVABLE or CMA, | ||
| 45 | * this will fail with -EBUSY. | ||
| 46 | * | ||
| 47 | * For isolating all pages in the range finally, the caller have to | ||
| 48 | * free all pages in the range. test_page_isolated() can be used for | ||
| 49 | * test it. | ||
| 50 | * | ||
| 51 | * The following flags are allowed (they can be combined in a bit mask) | ||
| 52 | * SKIP_HWPOISON - ignore hwpoison pages | ||
| 53 | * REPORT_FAILURE - report details about the failure to isolate the range | ||
| 54 | */ | 44 | */ |
| 55 | int | 45 | int |
| 56 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 46 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
diff --git a/include/linux/parport.h b/include/linux/parport.h index f41f1d041e2c..397607a0c0eb 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h | |||
| @@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *, | |||
| 460 | void *, size_t, int); | 460 | void *, size_t, int); |
| 461 | 461 | ||
| 462 | /* IEEE1284.3 functions */ | 462 | /* IEEE1284.3 functions */ |
| 463 | #define daisy_dev_name "Device ID probe" | ||
| 464 | extern int parport_daisy_init (struct parport *port); | 463 | extern int parport_daisy_init (struct parport *port); |
| 465 | extern void parport_daisy_fini (struct parport *port); | 464 | extern void parport_daisy_fini (struct parport *port); |
| 466 | extern struct pardevice *parport_open (int devnum, const char *name); | 465 | extern struct pardevice *parport_open (int devnum, const char *name); |
| @@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len); | |||
| 469 | extern void parport_daisy_deselect_all (struct parport *port); | 468 | extern void parport_daisy_deselect_all (struct parport *port); |
| 470 | extern int parport_daisy_select (struct parport *port, int daisy, int mode); | 469 | extern int parport_daisy_select (struct parport *port, int daisy, int mode); |
| 471 | 470 | ||
| 472 | #ifdef CONFIG_PARPORT_1284 | ||
| 473 | extern int daisy_drv_init(void); | ||
| 474 | extern void daisy_drv_exit(void); | ||
| 475 | #else | ||
| 476 | static inline int daisy_drv_init(void) | ||
| 477 | { | ||
| 478 | return 0; | ||
| 479 | } | ||
| 480 | |||
| 481 | static inline void daisy_drv_exit(void) {} | ||
| 482 | #endif | ||
| 483 | |||
| 484 | /* Lowlevel drivers _can_ call this support function to handle irqs. */ | 471 | /* Lowlevel drivers _can_ call this support function to handle irqs. */ |
| 485 | static inline void parport_generic_irq(struct parport *port) | 472 | static inline void parport_generic_irq(struct parport *port) |
| 486 | { | 473 | { |
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h index a867637e172d..9e46678edb2a 100644 --- a/include/linux/platform_data/gpio/gpio-amd-fch.h +++ b/include/linux/platform_data/gpio/gpio-amd-fch.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL+ */ | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | 2 | ||
| 3 | /* | 3 | /* |
| 4 | * AMD FCH gpio driver platform-data | 4 | * AMD FCH gpio driver platform-data |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index edb9b040c94c..d5084ebd9f03 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -9,6 +9,13 @@ | |||
| 9 | #include <linux/bug.h> /* For BUG_ON. */ | 9 | #include <linux/bug.h> /* For BUG_ON. */ |
| 10 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ | 10 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ |
| 11 | #include <uapi/linux/ptrace.h> | 11 | #include <uapi/linux/ptrace.h> |
| 12 | #include <linux/seccomp.h> | ||
| 13 | |||
| 14 | /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */ | ||
| 15 | struct syscall_info { | ||
| 16 | __u64 sp; | ||
| 17 | struct seccomp_data data; | ||
| 18 | }; | ||
| 12 | 19 | ||
| 13 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, | 20 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, |
| 14 | void *buf, int len, unsigned int gup_flags); | 21 | void *buf, int len, unsigned int gup_flags); |
| @@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs) | |||
| 407 | #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) | 414 | #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) |
| 408 | #endif | 415 | #endif |
| 409 | 416 | ||
| 410 | extern int task_current_syscall(struct task_struct *target, long *callno, | 417 | extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); |
| 411 | unsigned long args[6], unsigned int maxargs, | ||
| 412 | unsigned long *sp, unsigned long *pc); | ||
| 413 | 418 | ||
| 414 | extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); | 419 | extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); |
| 415 | #endif | 420 | #endif |
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index ae5655197698..e412c092c1e8 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h | |||
| @@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void) | |||
| 418 | set_thread_flag(TIF_RESTORE_SIGMASK); | 418 | set_thread_flag(TIF_RESTORE_SIGMASK); |
| 419 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | 419 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
| 420 | } | 420 | } |
| 421 | |||
| 422 | static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 423 | { | ||
| 424 | clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); | ||
| 425 | } | ||
| 426 | |||
| 421 | static inline void clear_restore_sigmask(void) | 427 | static inline void clear_restore_sigmask(void) |
| 422 | { | 428 | { |
| 423 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 429 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
| 424 | } | 430 | } |
| 431 | static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 432 | { | ||
| 433 | return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); | ||
| 434 | } | ||
| 425 | static inline bool test_restore_sigmask(void) | 435 | static inline bool test_restore_sigmask(void) |
| 426 | { | 436 | { |
| 427 | return test_thread_flag(TIF_RESTORE_SIGMASK); | 437 | return test_thread_flag(TIF_RESTORE_SIGMASK); |
| @@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void) | |||
| 439 | current->restore_sigmask = true; | 449 | current->restore_sigmask = true; |
| 440 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | 450 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
| 441 | } | 451 | } |
| 452 | static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 453 | { | ||
| 454 | tsk->restore_sigmask = false; | ||
| 455 | } | ||
| 442 | static inline void clear_restore_sigmask(void) | 456 | static inline void clear_restore_sigmask(void) |
| 443 | { | 457 | { |
| 444 | current->restore_sigmask = false; | 458 | current->restore_sigmask = false; |
| @@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void) | |||
| 447 | { | 461 | { |
| 448 | return current->restore_sigmask; | 462 | return current->restore_sigmask; |
| 449 | } | 463 | } |
| 464 | static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 465 | { | ||
| 466 | return tsk->restore_sigmask; | ||
| 467 | } | ||
| 450 | static inline bool test_and_clear_restore_sigmask(void) | 468 | static inline bool test_and_clear_restore_sigmask(void) |
| 451 | { | 469 | { |
| 452 | if (!current->restore_sigmask) | 470 | if (!current->restore_sigmask) |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 11b45f7ae405..9449b19c5f10 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) | 32 | #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) |
| 33 | /* Use GFP_DMA memory */ | 33 | /* Use GFP_DMA memory */ |
| 34 | #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) | 34 | #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) |
| 35 | /* Use GFP_DMA32 memory */ | ||
| 36 | #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) | ||
| 35 | /* DEBUG: Store the last owner for bug hunting */ | 37 | /* DEBUG: Store the last owner for bug hunting */ |
| 36 | #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) | 38 | #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) |
| 37 | /* Panic if kmem_cache_create() fails */ | 39 | /* Panic if kmem_cache_create() fails */ |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 6016daeecee4..b57cd8bf96e2 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
| @@ -26,7 +26,7 @@ typedef __kernel_sa_family_t sa_family_t; | |||
| 26 | /* | 26 | /* |
| 27 | * 1003.1g requires sa_family_t and that sa_data is char. | 27 | * 1003.1g requires sa_family_t and that sa_data is char. |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | struct sockaddr { | 30 | struct sockaddr { |
| 31 | sa_family_t sa_family; /* address family, AF_xxx */ | 31 | sa_family_t sa_family; /* address family, AF_xxx */ |
| 32 | char sa_data[14]; /* 14 bytes of protocol address */ | 32 | char sa_data[14]; /* 14 bytes of protocol address */ |
| @@ -44,7 +44,7 @@ struct linger { | |||
| 44 | * system, not 4.3. Thus msg_accrights(len) are now missing. They | 44 | * system, not 4.3. Thus msg_accrights(len) are now missing. They |
| 45 | * belong in an obscure libc emulation or the bin. | 45 | * belong in an obscure libc emulation or the bin. |
| 46 | */ | 46 | */ |
| 47 | 47 | ||
| 48 | struct msghdr { | 48 | struct msghdr { |
| 49 | void *msg_name; /* ptr to socket address structure */ | 49 | void *msg_name; /* ptr to socket address structure */ |
| 50 | int msg_namelen; /* size of socket address structure */ | 50 | int msg_namelen; /* size of socket address structure */ |
| @@ -54,7 +54,7 @@ struct msghdr { | |||
| 54 | unsigned int msg_flags; /* flags on received message */ | 54 | unsigned int msg_flags; /* flags on received message */ |
| 55 | struct kiocb *msg_iocb; /* ptr to iocb for async requests */ | 55 | struct kiocb *msg_iocb; /* ptr to iocb for async requests */ |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | struct user_msghdr { | 58 | struct user_msghdr { |
| 59 | void __user *msg_name; /* ptr to socket address structure */ | 59 | void __user *msg_name; /* ptr to socket address structure */ |
| 60 | int msg_namelen; /* size of socket address structure */ | 60 | int msg_namelen; /* size of socket address structure */ |
| @@ -122,7 +122,7 @@ struct cmsghdr { | |||
| 122 | * inside range, given by msg->msg_controllen before using | 122 | * inside range, given by msg->msg_controllen before using |
| 123 | * ancillary object DATA. --ANK (980731) | 123 | * ancillary object DATA. --ANK (980731) |
| 124 | */ | 124 | */ |
| 125 | 125 | ||
| 126 | static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, | 126 | static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, |
| 127 | struct cmsghdr *__cmsg) | 127 | struct cmsghdr *__cmsg) |
| 128 | { | 128 | { |
| @@ -264,10 +264,10 @@ struct ucred { | |||
| 264 | /* Maximum queue length specifiable by listen. */ | 264 | /* Maximum queue length specifiable by listen. */ |
| 265 | #define SOMAXCONN 128 | 265 | #define SOMAXCONN 128 |
| 266 | 266 | ||
| 267 | /* Flags we can use with send/ and recv. | 267 | /* Flags we can use with send/ and recv. |
| 268 | Added those for 1003.1g not all are supported yet | 268 | Added those for 1003.1g not all are supported yet |
| 269 | */ | 269 | */ |
| 270 | 270 | ||
| 271 | #define MSG_OOB 1 | 271 | #define MSG_OOB 1 |
| 272 | #define MSG_PEEK 2 | 272 | #define MSG_PEEK 2 |
| 273 | #define MSG_DONTROUTE 4 | 273 | #define MSG_DONTROUTE 4 |
diff --git a/include/linux/string.h b/include/linux/string.h index 7927b875f80c..6ab0a6fa512e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t); | |||
| 150 | #ifndef __HAVE_ARCH_MEMCMP | 150 | #ifndef __HAVE_ARCH_MEMCMP |
| 151 | extern int memcmp(const void *,const void *,__kernel_size_t); | 151 | extern int memcmp(const void *,const void *,__kernel_size_t); |
| 152 | #endif | 152 | #endif |
| 153 | #ifndef __HAVE_ARCH_BCMP | ||
| 154 | extern int bcmp(const void *,const void *,__kernel_size_t); | ||
| 155 | #endif | ||
| 153 | #ifndef __HAVE_ARCH_MEMCHR | 156 | #ifndef __HAVE_ARCH_MEMCHR |
| 154 | extern void * memchr(const void *,int,__kernel_size_t); | 157 | extern void * memchr(const void *,int,__kernel_size_t); |
| 155 | #endif | 158 | #endif |
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h index a240ed2a0372..ff56c443180c 100644 --- a/include/linux/vbox_utils.h +++ b/include/linux/vbox_utils.h | |||
| @@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...); | |||
| 24 | #define vbg_debug pr_debug | 24 | #define vbg_debug pr_debug |
| 25 | #endif | 25 | #endif |
| 26 | 26 | ||
| 27 | int vbg_hgcm_connect(struct vbg_dev *gdev, | 27 | int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor, |
| 28 | struct vmmdev_hgcm_service_location *loc, | 28 | struct vmmdev_hgcm_service_location *loc, |
| 29 | u32 *client_id, int *vbox_status); | 29 | u32 *client_id, int *vbox_status); |
| 30 | 30 | ||
| 31 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); | 31 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor, |
| 32 | u32 client_id, int *vbox_status); | ||
| 32 | 33 | ||
| 33 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, | 34 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, |
| 34 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, | 35 | u32 function, u32 timeout_ms, |
| 35 | u32 parm_count, int *vbox_status); | 36 | struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, |
| 37 | int *vbox_status); | ||
| 36 | 38 | ||
| 37 | /** | 39 | /** |
| 38 | * Convert a VirtualBox status code to a standard Linux kernel return value. | 40 | * Convert a VirtualBox status code to a standard Linux kernel return value. |
diff --git a/include/net/act_api.h b/include/net/act_api.h index c745e9ccfab2..c61a1bf4e3de 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h | |||
| @@ -39,7 +39,7 @@ struct tc_action { | |||
| 39 | struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; | 39 | struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; |
| 40 | struct gnet_stats_queue __percpu *cpu_qstats; | 40 | struct gnet_stats_queue __percpu *cpu_qstats; |
| 41 | struct tc_cookie __rcu *act_cookie; | 41 | struct tc_cookie __rcu *act_cookie; |
| 42 | struct tcf_chain *goto_chain; | 42 | struct tcf_chain __rcu *goto_chain; |
| 43 | }; | 43 | }; |
| 44 | #define tcf_index common.tcfa_index | 44 | #define tcf_index common.tcfa_index |
| 45 | #define tcf_refcnt common.tcfa_refcnt | 45 | #define tcf_refcnt common.tcfa_refcnt |
| @@ -90,7 +90,7 @@ struct tc_action_ops { | |||
| 90 | int (*lookup)(struct net *net, struct tc_action **a, u32 index); | 90 | int (*lookup)(struct net *net, struct tc_action **a, u32 index); |
| 91 | int (*init)(struct net *net, struct nlattr *nla, | 91 | int (*init)(struct net *net, struct nlattr *nla, |
| 92 | struct nlattr *est, struct tc_action **act, int ovr, | 92 | struct nlattr *est, struct tc_action **act, int ovr, |
| 93 | int bind, bool rtnl_held, | 93 | int bind, bool rtnl_held, struct tcf_proto *tp, |
| 94 | struct netlink_ext_ack *extack); | 94 | struct netlink_ext_ack *extack); |
| 95 | int (*walk)(struct net *, struct sk_buff *, | 95 | int (*walk)(struct net *, struct sk_buff *, |
| 96 | struct netlink_callback *, int, | 96 | struct netlink_callback *, int, |
| @@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); | |||
| 181 | int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); | 181 | int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); |
| 182 | int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); | 182 | int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); |
| 183 | 183 | ||
| 184 | int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, | ||
| 185 | struct tcf_chain **handle, | ||
| 186 | struct netlink_ext_ack *newchain); | ||
| 187 | struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, | ||
| 188 | struct tcf_chain *newchain); | ||
| 184 | #endif /* CONFIG_NET_CLS_ACT */ | 189 | #endif /* CONFIG_NET_CLS_ACT */ |
| 185 | 190 | ||
| 186 | static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, | 191 | static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, |
diff --git a/include/net/ip.h b/include/net/ip.h index be3cad9c2e4c..583526aad1d0 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, | |||
| 677 | unsigned char __user *data, int optlen); | 677 | unsigned char __user *data, int optlen); |
| 678 | void ip_options_undo(struct ip_options *opt); | 678 | void ip_options_undo(struct ip_options *opt); |
| 679 | void ip_forward_options(struct sk_buff *skb); | 679 | void ip_forward_options(struct sk_buff *skb); |
| 680 | int ip_options_rcv_srr(struct sk_buff *skb); | 680 | int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); |
| 681 | 681 | ||
| 682 | /* | 682 | /* |
| 683 | * Functions provided by ip_sockglue.c | 683 | * Functions provided by ip_sockglue.c |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index a68ced28d8f4..12689ddfc24c 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
| @@ -59,6 +59,7 @@ struct net { | |||
| 59 | */ | 59 | */ |
| 60 | spinlock_t rules_mod_lock; | 60 | spinlock_t rules_mod_lock; |
| 61 | 61 | ||
| 62 | u32 hash_mix; | ||
| 62 | atomic64_t cookie_gen; | 63 | atomic64_t cookie_gen; |
| 63 | 64 | ||
| 64 | struct list_head list; /* list of network namespaces */ | 65 | struct list_head list; /* list of network namespaces */ |
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index 16a842456189..d9b665151f3d 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h | |||
| @@ -2,16 +2,10 @@ | |||
| 2 | #ifndef __NET_NS_HASH_H__ | 2 | #ifndef __NET_NS_HASH_H__ |
| 3 | #define __NET_NS_HASH_H__ | 3 | #define __NET_NS_HASH_H__ |
| 4 | 4 | ||
| 5 | #include <asm/cache.h> | 5 | #include <net/net_namespace.h> |
| 6 | |||
| 7 | struct net; | ||
| 8 | 6 | ||
| 9 | static inline u32 net_hash_mix(const struct net *net) | 7 | static inline u32 net_hash_mix(const struct net *net) |
| 10 | { | 8 | { |
| 11 | #ifdef CONFIG_NET_NS | 9 | return net->hash_mix; |
| 12 | return (u32)(((unsigned long)net) >> ilog2(sizeof(*net))); | ||
| 13 | #else | ||
| 14 | return 0; | ||
| 15 | #endif | ||
| 16 | } | 10 | } |
| 17 | #endif | 11 | #endif |
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 87499b6b35d6..df5c69db68af 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h | |||
| @@ -166,7 +166,7 @@ struct nci_conn_info { | |||
| 166 | * According to specification 102 622 chapter 4.4 Pipes, | 166 | * According to specification 102 622 chapter 4.4 Pipes, |
| 167 | * the pipe identifier is 7 bits long. | 167 | * the pipe identifier is 7 bits long. |
| 168 | */ | 168 | */ |
| 169 | #define NCI_HCI_MAX_PIPES 127 | 169 | #define NCI_HCI_MAX_PIPES 128 |
| 170 | 170 | ||
| 171 | struct nci_hci_gate { | 171 | struct nci_hci_gate { |
| 172 | u8 gate; | 172 | u8 gate; |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 31284c078d06..a2b38b3deeca 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -378,6 +378,7 @@ struct tcf_chain { | |||
| 378 | bool flushing; | 378 | bool flushing; |
| 379 | const struct tcf_proto_ops *tmplt_ops; | 379 | const struct tcf_proto_ops *tmplt_ops; |
| 380 | void *tmplt_priv; | 380 | void *tmplt_priv; |
| 381 | struct rcu_head rcu; | ||
| 381 | }; | 382 | }; |
| 382 | 383 | ||
| 383 | struct tcf_block { | 384 | struct tcf_block { |
| @@ -922,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch) | |||
| 922 | sch->qstats.overlimits++; | 923 | sch->qstats.overlimits++; |
| 923 | } | 924 | } |
| 924 | 925 | ||
| 926 | static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) | ||
| 927 | { | ||
| 928 | __u32 qlen = qdisc_qlen_sum(sch); | ||
| 929 | |||
| 930 | return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); | ||
| 931 | } | ||
| 932 | |||
| 933 | static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, | ||
| 934 | __u32 *backlog) | ||
| 935 | { | ||
| 936 | struct gnet_stats_queue qstats = { 0 }; | ||
| 937 | __u32 len = qdisc_qlen_sum(sch); | ||
| 938 | |||
| 939 | __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); | ||
| 940 | *qlen = qstats.qlen; | ||
| 941 | *backlog = qstats.backlog; | ||
| 942 | } | ||
| 943 | |||
| 944 | static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) | ||
| 945 | { | ||
| 946 | __u32 qlen, backlog; | ||
| 947 | |||
| 948 | qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); | ||
| 949 | qdisc_tree_reduce_backlog(sch, qlen, backlog); | ||
| 950 | } | ||
| 951 | |||
| 952 | static inline void qdisc_purge_queue(struct Qdisc *sch) | ||
| 953 | { | ||
| 954 | __u32 qlen, backlog; | ||
| 955 | |||
| 956 | qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); | ||
| 957 | qdisc_reset(sch); | ||
| 958 | qdisc_tree_reduce_backlog(sch, qlen, backlog); | ||
| 959 | } | ||
| 960 | |||
| 925 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) | 961 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) |
| 926 | { | 962 | { |
| 927 | qh->head = NULL; | 963 | qh->head = NULL; |
| @@ -1105,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, | |||
| 1105 | sch_tree_lock(sch); | 1141 | sch_tree_lock(sch); |
| 1106 | old = *pold; | 1142 | old = *pold; |
| 1107 | *pold = new; | 1143 | *pold = new; |
| 1108 | if (old != NULL) { | 1144 | if (old != NULL) |
| 1109 | unsigned int qlen = old->q.qlen; | 1145 | qdisc_tree_flush_backlog(old); |
| 1110 | unsigned int backlog = old->qstats.backlog; | ||
| 1111 | |||
| 1112 | qdisc_reset(old); | ||
| 1113 | qdisc_tree_reduce_backlog(old, qlen, backlog); | ||
| 1114 | } | ||
| 1115 | sch_tree_unlock(sch); | 1146 | sch_tree_unlock(sch); |
| 1116 | 1147 | ||
| 1117 | return old; | 1148 | return old; |
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 32ee65a30aff..1c6e6c0766ca 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h | |||
| @@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, | |||
| 61 | static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, | 61 | static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, |
| 62 | unsigned int offset) | 62 | unsigned int offset) |
| 63 | { | 63 | { |
| 64 | struct sctphdr *sh = sctp_hdr(skb); | 64 | struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); |
| 65 | const struct skb_checksum_ops ops = { | 65 | const struct skb_checksum_ops ops = { |
| 66 | .update = sctp_csum_update, | 66 | .update = sctp_csum_update, |
| 67 | .combine = sctp_csum_combine, | 67 | .combine = sctp_csum_combine, |
diff --git a/include/net/sock.h b/include/net/sock.h index 328cb7cb7b0b..8de5ee258b93 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) | |||
| 710 | hlist_add_head_rcu(&sk->sk_node, list); | 710 | hlist_add_head_rcu(&sk->sk_node, list); |
| 711 | } | 711 | } |
| 712 | 712 | ||
| 713 | static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) | ||
| 714 | { | ||
| 715 | sock_hold(sk); | ||
| 716 | hlist_add_tail_rcu(&sk->sk_node, list); | ||
| 717 | } | ||
| 718 | |||
| 713 | static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 719 | static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
| 714 | { | 720 | { |
| 715 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); | 721 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index ee8d005f56fc..eb8f01c819e6 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h | |||
| @@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a) | |||
| 56 | 56 | ||
| 57 | static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a) | 57 | static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a) |
| 58 | { | 58 | { |
| 59 | return a->goto_chain->index; | 59 | return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | #endif /* __NET_TC_GACT_H */ | 62 | #endif /* __NET_TC_GACT_H */ |
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 61cf7dbb6782..d074b6d60f8a 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h | |||
| @@ -36,7 +36,6 @@ struct xdp_umem { | |||
| 36 | u32 headroom; | 36 | u32 headroom; |
| 37 | u32 chunk_size_nohr; | 37 | u32 chunk_size_nohr; |
| 38 | struct user_struct *user; | 38 | struct user_struct *user; |
| 39 | struct pid *pid; | ||
| 40 | unsigned long address; | 39 | unsigned long address; |
| 41 | refcount_t users; | 40 | refcount_t users; |
| 42 | struct work_struct work; | 41 | struct work_struct work; |
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 44a3259ed4a5..b6e0cbc2c71f 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h | |||
| @@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter, | |||
| 28 | 28 | ||
| 29 | TP_fast_assign( | 29 | TP_fast_assign( |
| 30 | __entry->id = id; | 30 | __entry->id = id; |
| 31 | syscall_get_arguments(current, regs, 0, 6, __entry->args); | 31 | syscall_get_arguments(current, regs, __entry->args); |
| 32 | ), | 32 | ), |
| 33 | 33 | ||
| 34 | TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", | 34 | TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 5f24b50c9e88..059dc2bedaf6 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
| @@ -7,5 +7,7 @@ no-export-headers += kvm.h | |||
| 7 | endif | 7 | endif |
| 8 | 8 | ||
| 9 | ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) | 9 | ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) |
| 10 | ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),) | ||
| 10 | no-export-headers += kvm_para.h | 11 | no-export-headers += kvm_para.h |
| 11 | endif | 12 | endif |
| 13 | endif | ||
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3c38ac9a92a7..929c8e537a14 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
| @@ -502,16 +502,6 @@ union bpf_attr { | |||
| 502 | * Return | 502 | * Return |
| 503 | * 0 on success, or a negative error in case of failure. | 503 | * 0 on success, or a negative error in case of failure. |
| 504 | * | 504 | * |
| 505 | * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) | ||
| 506 | * Description | ||
| 507 | * Push an element *value* in *map*. *flags* is one of: | ||
| 508 | * | ||
| 509 | * **BPF_EXIST** | ||
| 510 | * If the queue/stack is full, the oldest element is removed to | ||
| 511 | * make room for this. | ||
| 512 | * Return | ||
| 513 | * 0 on success, or a negative error in case of failure. | ||
| 514 | * | ||
| 515 | * int bpf_probe_read(void *dst, u32 size, const void *src) | 505 | * int bpf_probe_read(void *dst, u32 size, const void *src) |
| 516 | * Description | 506 | * Description |
| 517 | * For tracing programs, safely attempt to read *size* bytes from | 507 | * For tracing programs, safely attempt to read *size* bytes from |
| @@ -1435,14 +1425,14 @@ union bpf_attr { | |||
| 1435 | * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) | 1425 | * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) |
| 1436 | * Description | 1426 | * Description |
| 1437 | * Equivalent to bpf_get_socket_cookie() helper that accepts | 1427 | * Equivalent to bpf_get_socket_cookie() helper that accepts |
| 1438 | * *skb*, but gets socket from **struct bpf_sock_addr** contex. | 1428 | * *skb*, but gets socket from **struct bpf_sock_addr** context. |
| 1439 | * Return | 1429 | * Return |
| 1440 | * A 8-byte long non-decreasing number. | 1430 | * A 8-byte long non-decreasing number. |
| 1441 | * | 1431 | * |
| 1442 | * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) | 1432 | * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) |
| 1443 | * Description | 1433 | * Description |
| 1444 | * Equivalent to bpf_get_socket_cookie() helper that accepts | 1434 | * Equivalent to bpf_get_socket_cookie() helper that accepts |
| 1445 | * *skb*, but gets socket from **struct bpf_sock_ops** contex. | 1435 | * *skb*, but gets socket from **struct bpf_sock_ops** context. |
| 1446 | * Return | 1436 | * Return |
| 1447 | * A 8-byte long non-decreasing number. | 1437 | * A 8-byte long non-decreasing number. |
| 1448 | * | 1438 | * |
| @@ -2098,52 +2088,52 @@ union bpf_attr { | |||
| 2098 | * Return | 2088 | * Return |
| 2099 | * 0 on success, or a negative error in case of failure. | 2089 | * 0 on success, or a negative error in case of failure. |
| 2100 | * | 2090 | * |
| 2101 | * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) | 2091 | * int bpf_rc_repeat(void *ctx) |
| 2102 | * Description | 2092 | * Description |
| 2103 | * This helper is used in programs implementing IR decoding, to | 2093 | * This helper is used in programs implementing IR decoding, to |
| 2104 | * report a successfully decoded key press with *scancode*, | 2094 | * report a successfully decoded repeat key message. This delays |
| 2105 | * *toggle* value in the given *protocol*. The scancode will be | 2095 | * the generation of a key up event for previously generated |
| 2106 | * translated to a keycode using the rc keymap, and reported as | 2096 | * key down event. |
| 2107 | * an input key down event. After a period a key up event is | ||
| 2108 | * generated. This period can be extended by calling either | ||
| 2109 | * **bpf_rc_keydown**\ () again with the same values, or calling | ||
| 2110 | * **bpf_rc_repeat**\ (). | ||
| 2111 | * | 2097 | * |
| 2112 | * Some protocols include a toggle bit, in case the button was | 2098 | * Some IR protocols like NEC have a special IR message for |
| 2113 | * released and pressed again between consecutive scancodes. | 2099 | * repeating last button, for when a button is held down. |
| 2114 | * | 2100 | * |
| 2115 | * The *ctx* should point to the lirc sample as passed into | 2101 | * The *ctx* should point to the lirc sample as passed into |
| 2116 | * the program. | 2102 | * the program. |
| 2117 | * | 2103 | * |
| 2118 | * The *protocol* is the decoded protocol number (see | ||
| 2119 | * **enum rc_proto** for some predefined values). | ||
| 2120 | * | ||
| 2121 | * This helper is only available is the kernel was compiled with | 2104 | * This helper is only available is the kernel was compiled with |
| 2122 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to | 2105 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2123 | * "**y**". | 2106 | * "**y**". |
| 2124 | * Return | 2107 | * Return |
| 2125 | * 0 | 2108 | * 0 |
| 2126 | * | 2109 | * |
| 2127 | * int bpf_rc_repeat(void *ctx) | 2110 | * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) |
| 2128 | * Description | 2111 | * Description |
| 2129 | * This helper is used in programs implementing IR decoding, to | 2112 | * This helper is used in programs implementing IR decoding, to |
| 2130 | * report a successfully decoded repeat key message. This delays | 2113 | * report a successfully decoded key press with *scancode*, |
| 2131 | * the generation of a key up event for previously generated | 2114 | * *toggle* value in the given *protocol*. The scancode will be |
| 2132 | * key down event. | 2115 | * translated to a keycode using the rc keymap, and reported as |
| 2116 | * an input key down event. After a period a key up event is | ||
| 2117 | * generated. This period can be extended by calling either | ||
| 2118 | * **bpf_rc_keydown**\ () again with the same values, or calling | ||
| 2119 | * **bpf_rc_repeat**\ (). | ||
| 2133 | * | 2120 | * |
| 2134 | * Some IR protocols like NEC have a special IR message for | 2121 | * Some protocols include a toggle bit, in case the button was |
| 2135 | * repeating last button, for when a button is held down. | 2122 | * released and pressed again between consecutive scancodes. |
| 2136 | * | 2123 | * |
| 2137 | * The *ctx* should point to the lirc sample as passed into | 2124 | * The *ctx* should point to the lirc sample as passed into |
| 2138 | * the program. | 2125 | * the program. |
| 2139 | * | 2126 | * |
| 2127 | * The *protocol* is the decoded protocol number (see | ||
| 2128 | * **enum rc_proto** for some predefined values). | ||
| 2129 | * | ||
| 2140 | * This helper is only available is the kernel was compiled with | 2130 | * This helper is only available is the kernel was compiled with |
| 2141 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to | 2131 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2142 | * "**y**". | 2132 | * "**y**". |
| 2143 | * Return | 2133 | * Return |
| 2144 | * 0 | 2134 | * 0 |
| 2145 | * | 2135 | * |
| 2146 | * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb) | 2136 | * u64 bpf_skb_cgroup_id(struct sk_buff *skb) |
| 2147 | * Description | 2137 | * Description |
| 2148 | * Return the cgroup v2 id of the socket associated with the *skb*. | 2138 | * Return the cgroup v2 id of the socket associated with the *skb*. |
| 2149 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () | 2139 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () |
| @@ -2159,30 +2149,12 @@ union bpf_attr { | |||
| 2159 | * Return | 2149 | * Return |
| 2160 | * The id is returned or 0 in case the id could not be retrieved. | 2150 | * The id is returned or 0 in case the id could not be retrieved. |
| 2161 | * | 2151 | * |
| 2162 | * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) | ||
| 2163 | * Description | ||
| 2164 | * Return id of cgroup v2 that is ancestor of cgroup associated | ||
| 2165 | * with the *skb* at the *ancestor_level*. The root cgroup is at | ||
| 2166 | * *ancestor_level* zero and each step down the hierarchy | ||
| 2167 | * increments the level. If *ancestor_level* == level of cgroup | ||
| 2168 | * associated with *skb*, then return value will be same as that | ||
| 2169 | * of **bpf_skb_cgroup_id**\ (). | ||
| 2170 | * | ||
| 2171 | * The helper is useful to implement policies based on cgroups | ||
| 2172 | * that are upper in hierarchy than immediate cgroup associated | ||
| 2173 | * with *skb*. | ||
| 2174 | * | ||
| 2175 | * The format of returned id and helper limitations are same as in | ||
| 2176 | * **bpf_skb_cgroup_id**\ (). | ||
| 2177 | * Return | ||
| 2178 | * The id is returned or 0 in case the id could not be retrieved. | ||
| 2179 | * | ||
| 2180 | * u64 bpf_get_current_cgroup_id(void) | 2152 | * u64 bpf_get_current_cgroup_id(void) |
| 2181 | * Return | 2153 | * Return |
| 2182 | * A 64-bit integer containing the current cgroup id based | 2154 | * A 64-bit integer containing the current cgroup id based |
| 2183 | * on the cgroup within which the current task is running. | 2155 | * on the cgroup within which the current task is running. |
| 2184 | * | 2156 | * |
| 2185 | * void* get_local_storage(void *map, u64 flags) | 2157 | * void *bpf_get_local_storage(void *map, u64 flags) |
| 2186 | * Description | 2158 | * Description |
| 2187 | * Get the pointer to the local storage area. | 2159 | * Get the pointer to the local storage area. |
| 2188 | * The type and the size of the local storage is defined | 2160 | * The type and the size of the local storage is defined |
| @@ -2209,6 +2181,24 @@ union bpf_attr { | |||
| 2209 | * Return | 2181 | * Return |
| 2210 | * 0 on success, or a negative error in case of failure. | 2182 | * 0 on success, or a negative error in case of failure. |
| 2211 | * | 2183 | * |
| 2184 | * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) | ||
| 2185 | * Description | ||
| 2186 | * Return id of cgroup v2 that is ancestor of cgroup associated | ||
| 2187 | * with the *skb* at the *ancestor_level*. The root cgroup is at | ||
| 2188 | * *ancestor_level* zero and each step down the hierarchy | ||
| 2189 | * increments the level. If *ancestor_level* == level of cgroup | ||
| 2190 | * associated with *skb*, then return value will be same as that | ||
| 2191 | * of **bpf_skb_cgroup_id**\ (). | ||
| 2192 | * | ||
| 2193 | * The helper is useful to implement policies based on cgroups | ||
| 2194 | * that are upper in hierarchy than immediate cgroup associated | ||
| 2195 | * with *skb*. | ||
| 2196 | * | ||
| 2197 | * The format of returned id and helper limitations are same as in | ||
| 2198 | * **bpf_skb_cgroup_id**\ (). | ||
| 2199 | * Return | ||
| 2200 | * The id is returned or 0 in case the id could not be retrieved. | ||
| 2201 | * | ||
| 2212 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) | 2202 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2213 | * Description | 2203 | * Description |
| 2214 | * Look for TCP socket matching *tuple*, optionally in a child | 2204 | * Look for TCP socket matching *tuple*, optionally in a child |
| @@ -2289,6 +2279,16 @@ union bpf_attr { | |||
| 2289 | * Return | 2279 | * Return |
| 2290 | * 0 on success, or a negative error in case of failure. | 2280 | * 0 on success, or a negative error in case of failure. |
| 2291 | * | 2281 | * |
| 2282 | * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) | ||
| 2283 | * Description | ||
| 2284 | * Push an element *value* in *map*. *flags* is one of: | ||
| 2285 | * | ||
| 2286 | * **BPF_EXIST** | ||
| 2287 | * If the queue/stack is full, the oldest element is | ||
| 2288 | * removed to make room for this. | ||
| 2289 | * Return | ||
| 2290 | * 0 on success, or a negative error in case of failure. | ||
| 2291 | * | ||
| 2292 | * int bpf_map_pop_elem(struct bpf_map *map, void *value) | 2292 | * int bpf_map_pop_elem(struct bpf_map *map, void *value) |
| 2293 | * Description | 2293 | * Description |
| 2294 | * Pop an element from *map*. | 2294 | * Pop an element from *map*. |
| @@ -2343,29 +2343,94 @@ union bpf_attr { | |||
| 2343 | * Return | 2343 | * Return |
| 2344 | * 0 | 2344 | * 0 |
| 2345 | * | 2345 | * |
| 2346 | * int bpf_spin_lock(struct bpf_spin_lock *lock) | ||
| 2347 | * Description | ||
| 2348 | * Acquire a spinlock represented by the pointer *lock*, which is | ||
| 2349 | * stored as part of a value of a map. Taking the lock allows to | ||
| 2350 | * safely update the rest of the fields in that value. The | ||
| 2351 | * spinlock can (and must) later be released with a call to | ||
| 2352 | * **bpf_spin_unlock**\ (\ *lock*\ ). | ||
| 2353 | * | ||
| 2354 | * Spinlocks in BPF programs come with a number of restrictions | ||
| 2355 | * and constraints: | ||
| 2356 | * | ||
| 2357 | * * **bpf_spin_lock** objects are only allowed inside maps of | ||
| 2358 | * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this | ||
| 2359 | * list could be extended in the future). | ||
| 2360 | * * BTF description of the map is mandatory. | ||
| 2361 | * * The BPF program can take ONE lock at a time, since taking two | ||
| 2362 | * or more could cause dead locks. | ||
| 2363 | * * Only one **struct bpf_spin_lock** is allowed per map element. | ||
| 2364 | * * When the lock is taken, calls (either BPF to BPF or helpers) | ||
| 2365 | * are not allowed. | ||
| 2366 | * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not | ||
| 2367 | * allowed inside a spinlock-ed region. | ||
| 2368 | * * The BPF program MUST call **bpf_spin_unlock**\ () to release | ||
| 2369 | * the lock, on all execution paths, before it returns. | ||
| 2370 | * * The BPF program can access **struct bpf_spin_lock** only via | ||
| 2371 | * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () | ||
| 2372 | * helpers. Loading or storing data into the **struct | ||
| 2373 | * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. | ||
| 2374 | * * To use the **bpf_spin_lock**\ () helper, the BTF description | ||
| 2375 | * of the map value must be a struct and have **struct | ||
| 2376 | * bpf_spin_lock** *anyname*\ **;** field at the top level. | ||
| 2377 | * Nested lock inside another struct is not allowed. | ||
| 2378 | * * The **struct bpf_spin_lock** *lock* field in a map value must | ||
| 2379 | * be aligned on a multiple of 4 bytes in that value. | ||
| 2380 | * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy | ||
| 2381 | * the **bpf_spin_lock** field to user space. | ||
| 2382 | * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from | ||
| 2383 | * a BPF program, do not update the **bpf_spin_lock** field. | ||
| 2384 | * * **bpf_spin_lock** cannot be on the stack or inside a | ||
| 2385 | * networking packet (it can only be inside of a map values). | ||
| 2386 | * * **bpf_spin_lock** is available to root only. | ||
| 2387 | * * Tracing programs and socket filter programs cannot use | ||
| 2388 | * **bpf_spin_lock**\ () due to insufficient preemption checks | ||
| 2389 | * (but this may change in the future). | ||
| 2390 | * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. | ||
| 2391 | * Return | ||
| 2392 | * 0 | ||
| 2393 | * | ||
| 2394 | * int bpf_spin_unlock(struct bpf_spin_lock *lock) | ||
| 2395 | * Description | ||
| 2396 | * Release the *lock* previously locked by a call to | ||
| 2397 | * **bpf_spin_lock**\ (\ *lock*\ ). | ||
| 2398 | * Return | ||
| 2399 | * 0 | ||
| 2400 | * | ||
| 2346 | * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) | 2401 | * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) |
| 2347 | * Description | 2402 | * Description |
| 2348 | * This helper gets a **struct bpf_sock** pointer such | 2403 | * This helper gets a **struct bpf_sock** pointer such |
| 2349 | * that all the fields in bpf_sock can be accessed. | 2404 | * that all the fields in this **bpf_sock** can be accessed. |
| 2350 | * Return | 2405 | * Return |
| 2351 | * A **struct bpf_sock** pointer on success, or NULL in | 2406 | * A **struct bpf_sock** pointer on success, or **NULL** in |
| 2352 | * case of failure. | 2407 | * case of failure. |
| 2353 | * | 2408 | * |
| 2354 | * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) | 2409 | * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) |
| 2355 | * Description | 2410 | * Description |
| 2356 | * This helper gets a **struct bpf_tcp_sock** pointer from a | 2411 | * This helper gets a **struct bpf_tcp_sock** pointer from a |
| 2357 | * **struct bpf_sock** pointer. | 2412 | * **struct bpf_sock** pointer. |
| 2358 | * | ||
| 2359 | * Return | 2413 | * Return |
| 2360 | * A **struct bpf_tcp_sock** pointer on success, or NULL in | 2414 | * A **struct bpf_tcp_sock** pointer on success, or **NULL** in |
| 2361 | * case of failure. | 2415 | * case of failure. |
| 2362 | * | 2416 | * |
| 2363 | * int bpf_skb_ecn_set_ce(struct sk_buf *skb) | 2417 | * int bpf_skb_ecn_set_ce(struct sk_buf *skb) |
| 2364 | * Description | 2418 | * Description |
| 2365 | * Sets ECN of IP header to ce (congestion encountered) if | 2419 | * Set ECN (Explicit Congestion Notification) field of IP header |
| 2366 | * current value is ect (ECN capable). Works with IPv6 and IPv4. | 2420 | * to **CE** (Congestion Encountered) if current value is **ECT** |
| 2367 | * Return | 2421 | * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 |
| 2368 | * 1 if set, 0 if not set. | 2422 | * and IPv4. |
| 2423 | * Return | ||
| 2424 | * 1 if the **CE** flag is set (either by the current helper call | ||
| 2425 | * or because it was already present), 0 if it is not set. | ||
| 2426 | * | ||
| 2427 | * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) | ||
| 2428 | * Description | ||
| 2429 | * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. | ||
| 2430 | * **bpf_sk_release**\ () is unnecessary and not allowed. | ||
| 2431 | * Return | ||
| 2432 | * A **struct bpf_sock** pointer on success, or **NULL** in | ||
| 2433 | * case of failure. | ||
| 2369 | */ | 2434 | */ |
| 2370 | #define __BPF_FUNC_MAPPER(FN) \ | 2435 | #define __BPF_FUNC_MAPPER(FN) \ |
| 2371 | FN(unspec), \ | 2436 | FN(unspec), \ |
| @@ -2465,7 +2530,8 @@ union bpf_attr { | |||
| 2465 | FN(spin_unlock), \ | 2530 | FN(spin_unlock), \ |
| 2466 | FN(sk_fullsock), \ | 2531 | FN(sk_fullsock), \ |
| 2467 | FN(tcp_sock), \ | 2532 | FN(tcp_sock), \ |
| 2468 | FN(skb_ecn_set_ce), | 2533 | FN(skb_ecn_set_ce), \ |
| 2534 | FN(get_listener_sock), | ||
| 2469 | 2535 | ||
| 2470 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 2536 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
| 2471 | * function eBPF program intends to call | 2537 | * function eBPF program intends to call |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 3652b239dad1..d473e5ed044c 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
| @@ -1591,7 +1591,7 @@ enum ethtool_link_mode_bit_indices { | |||
| 1591 | 1591 | ||
| 1592 | static inline int ethtool_validate_speed(__u32 speed) | 1592 | static inline int ethtool_validate_speed(__u32 speed) |
| 1593 | { | 1593 | { |
| 1594 | return speed <= INT_MAX || speed == SPEED_UNKNOWN; | 1594 | return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN; |
| 1595 | } | 1595 | } |
| 1596 | 1596 | ||
| 1597 | /* Duplex, half or full. */ | 1597 | /* Duplex, half or full. */ |
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h index 0e68024f36c7..26f39816af14 100644 --- a/include/uapi/linux/vbox_vmmdev_types.h +++ b/include/uapi/linux/vbox_vmmdev_types.h | |||
| @@ -102,6 +102,66 @@ enum vmmdev_request_type { | |||
| 102 | #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32 | 102 | #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32 |
| 103 | #endif | 103 | #endif |
| 104 | 104 | ||
| 105 | /* vmmdev_request_header.requestor defines */ | ||
| 106 | |||
| 107 | /* Requestor user not given. */ | ||
| 108 | #define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000 | ||
| 109 | /* The kernel driver (vboxguest) is the requestor. */ | ||
| 110 | #define VMMDEV_REQUESTOR_USR_DRV 0x00000001 | ||
| 111 | /* Some other kernel driver is the requestor. */ | ||
| 112 | #define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002 | ||
| 113 | /* The root or a admin user is the requestor. */ | ||
| 114 | #define VMMDEV_REQUESTOR_USR_ROOT 0x00000003 | ||
| 115 | /* Regular joe user is making the request. */ | ||
| 116 | #define VMMDEV_REQUESTOR_USR_USER 0x00000006 | ||
| 117 | /* User classification mask. */ | ||
| 118 | #define VMMDEV_REQUESTOR_USR_MASK 0x00000007 | ||
| 119 | |||
| 120 | /* Kernel mode request. Note this is 0, check for !USERMODE instead. */ | ||
| 121 | #define VMMDEV_REQUESTOR_KERNEL 0x00000000 | ||
| 122 | /* User mode request. */ | ||
| 123 | #define VMMDEV_REQUESTOR_USERMODE 0x00000008 | ||
| 124 | /* User or kernel mode classification mask. */ | ||
| 125 | #define VMMDEV_REQUESTOR_MODE_MASK 0x00000008 | ||
| 126 | |||
| 127 | /* Don't know the physical console association of the requestor. */ | ||
| 128 | #define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000 | ||
| 129 | /* | ||
| 130 | * The request originates with a process that is NOT associated with the | ||
| 131 | * physical console. | ||
| 132 | */ | ||
| 133 | #define VMMDEV_REQUESTOR_CON_NO 0x00000010 | ||
| 134 | /* Requestor process is associated with the physical console. */ | ||
| 135 | #define VMMDEV_REQUESTOR_CON_YES 0x00000020 | ||
| 136 | /* Console classification mask. */ | ||
| 137 | #define VMMDEV_REQUESTOR_CON_MASK 0x00000030 | ||
| 138 | |||
| 139 | /* Requestor is member of special VirtualBox user group. */ | ||
| 140 | #define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080 | ||
| 141 | |||
| 142 | /* Note: trust level is for windows guests only, linux always uses not-given */ | ||
| 143 | /* Requestor trust level: Unspecified */ | ||
| 144 | #define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000 | ||
| 145 | /* Requestor trust level: Untrusted (SID S-1-16-0) */ | ||
| 146 | #define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000 | ||
| 147 | /* Requestor trust level: Untrusted (SID S-1-16-4096) */ | ||
| 148 | #define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000 | ||
| 149 | /* Requestor trust level: Medium (SID S-1-16-8192) */ | ||
| 150 | #define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000 | ||
| 151 | /* Requestor trust level: Medium plus (SID S-1-16-8448) */ | ||
| 152 | #define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000 | ||
| 153 | /* Requestor trust level: High (SID S-1-16-12288) */ | ||
| 154 | #define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000 | ||
| 155 | /* Requestor trust level: System (SID S-1-16-16384) */ | ||
| 156 | #define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000 | ||
| 157 | /* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */ | ||
| 158 | #define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000 | ||
| 159 | /* Requestor trust level mask */ | ||
| 160 | #define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000 | ||
| 161 | |||
| 162 | /* Requestor is using the less trusted user device node (/dev/vboxuser) */ | ||
| 163 | #define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000 | ||
| 164 | |||
| 105 | /** HGCM service location types. */ | 165 | /** HGCM service location types. */ |
| 106 | enum vmmdev_hgcm_service_location_type { | 166 | enum vmmdev_hgcm_service_location_type { |
| 107 | VMMDEV_HGCM_LOC_INVALID = 0, | 167 | VMMDEV_HGCM_LOC_INVALID = 0, |
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8974b3755670..3c18260403dd 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c | |||
| @@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work) | |||
| 162 | static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, | 162 | static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, |
| 163 | struct xdp_frame *xdpf) | 163 | struct xdp_frame *xdpf) |
| 164 | { | 164 | { |
| 165 | unsigned int hard_start_headroom; | ||
| 165 | unsigned int frame_size; | 166 | unsigned int frame_size; |
| 166 | void *pkt_data_start; | 167 | void *pkt_data_start; |
| 167 | struct sk_buff *skb; | 168 | struct sk_buff *skb; |
| 168 | 169 | ||
| 170 | /* Part of headroom was reserved to xdpf */ | ||
| 171 | hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom; | ||
| 172 | |||
| 169 | /* build_skb need to place skb_shared_info after SKB end, and | 173 | /* build_skb need to place skb_shared_info after SKB end, and |
| 170 | * also want to know the memory "truesize". Thus, need to | 174 | * also want to know the memory "truesize". Thus, need to |
| 171 | * know the memory frame size backing xdp_buff. | 175 | * know the memory frame size backing xdp_buff. |
| @@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, | |||
| 183 | * is not at a fixed memory location, with mixed length | 187 | * is not at a fixed memory location, with mixed length |
| 184 | * packets, which is bad for cache-line hotness. | 188 | * packets, which is bad for cache-line hotness. |
| 185 | */ | 189 | */ |
| 186 | frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + | 190 | frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) + |
| 187 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 191 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 188 | 192 | ||
| 189 | pkt_data_start = xdpf->data - xdpf->headroom; | 193 | pkt_data_start = xdpf->data - hard_start_headroom; |
| 190 | skb = build_skb(pkt_data_start, frame_size); | 194 | skb = build_skb(pkt_data_start, frame_size); |
| 191 | if (!skb) | 195 | if (!skb) |
| 192 | return NULL; | 196 | return NULL; |
| 193 | 197 | ||
| 194 | skb_reserve(skb, xdpf->headroom); | 198 | skb_reserve(skb, hard_start_headroom); |
| 195 | __skb_put(skb, xdpf->len); | 199 | __skb_put(skb, xdpf->len); |
| 196 | if (xdpf->metasize) | 200 | if (xdpf->metasize) |
| 197 | skb_metadata_set(skb, xdpf->metasize); | 201 | skb_metadata_set(skb, xdpf->metasize); |
| @@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, | |||
| 205 | * - RX ring dev queue index (skb_record_rx_queue) | 209 | * - RX ring dev queue index (skb_record_rx_queue) |
| 206 | */ | 210 | */ |
| 207 | 211 | ||
| 212 | /* Allow SKB to reuse area used by xdp_frame */ | ||
| 213 | xdp_scrub_frame(xdpf); | ||
| 214 | |||
| 208 | return skb; | 215 | return skb; |
| 209 | } | 216 | } |
| 210 | 217 | ||
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 2ada5e21dfa6..4a8f390a2b82 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
| @@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ | |||
| 554 | } | 554 | } |
| 555 | EXPORT_SYMBOL(bpf_prog_get_type_path); | 555 | EXPORT_SYMBOL(bpf_prog_get_type_path); |
| 556 | 556 | ||
| 557 | static void bpf_evict_inode(struct inode *inode) | ||
| 558 | { | ||
| 559 | enum bpf_type type; | ||
| 560 | |||
| 561 | truncate_inode_pages_final(&inode->i_data); | ||
| 562 | clear_inode(inode); | ||
| 563 | |||
| 564 | if (S_ISLNK(inode->i_mode)) | ||
| 565 | kfree(inode->i_link); | ||
| 566 | if (!bpf_inode_type(inode, &type)) | ||
| 567 | bpf_any_put(inode->i_private, type); | ||
| 568 | } | ||
| 569 | |||
| 570 | /* | 557 | /* |
| 571 | * Display the mount options in /proc/mounts. | 558 | * Display the mount options in /proc/mounts. |
| 572 | */ | 559 | */ |
| @@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) | |||
| 579 | return 0; | 566 | return 0; |
| 580 | } | 567 | } |
| 581 | 568 | ||
| 569 | static void bpf_destroy_inode_deferred(struct rcu_head *head) | ||
| 570 | { | ||
| 571 | struct inode *inode = container_of(head, struct inode, i_rcu); | ||
| 572 | enum bpf_type type; | ||
| 573 | |||
| 574 | if (S_ISLNK(inode->i_mode)) | ||
| 575 | kfree(inode->i_link); | ||
| 576 | if (!bpf_inode_type(inode, &type)) | ||
| 577 | bpf_any_put(inode->i_private, type); | ||
| 578 | free_inode_nonrcu(inode); | ||
| 579 | } | ||
| 580 | |||
| 581 | static void bpf_destroy_inode(struct inode *inode) | ||
| 582 | { | ||
| 583 | call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred); | ||
| 584 | } | ||
| 585 | |||
| 582 | static const struct super_operations bpf_super_ops = { | 586 | static const struct super_operations bpf_super_ops = { |
| 583 | .statfs = simple_statfs, | 587 | .statfs = simple_statfs, |
| 584 | .drop_inode = generic_delete_inode, | 588 | .drop_inode = generic_delete_inode, |
| 585 | .show_options = bpf_show_options, | 589 | .show_options = bpf_show_options, |
| 586 | .evict_inode = bpf_evict_inode, | 590 | .destroy_inode = bpf_destroy_inode, |
| 587 | }; | 591 | }; |
| 588 | 592 | ||
| 589 | enum { | 593 | enum { |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 62f6bced3a3c..afca36f53c49 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | |||
| 136 | 136 | ||
| 137 | void *bpf_map_area_alloc(size_t size, int numa_node) | 137 | void *bpf_map_area_alloc(size_t size, int numa_node) |
| 138 | { | 138 | { |
| 139 | /* We definitely need __GFP_NORETRY, so OOM killer doesn't | 139 | /* We really just want to fail instead of triggering OOM killer |
| 140 | * trigger under memory pressure as we really just want to | 140 | * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, |
| 141 | * fail instead. | 141 | * which is used for lower order allocation requests. |
| 142 | * | ||
| 143 | * It has been observed that higher order allocation requests done by | ||
| 144 | * vmalloc with __GFP_NORETRY being set might fail due to not trying | ||
| 145 | * to reclaim memory from the page cache, thus we set | ||
| 146 | * __GFP_RETRY_MAYFAIL to avoid such situations. | ||
| 142 | */ | 147 | */ |
| 143 | const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; | 148 | |
| 149 | const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; | ||
| 144 | void *area; | 150 | void *area; |
| 145 | 151 | ||
| 146 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | 152 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { |
| 147 | area = kmalloc_node(size, GFP_USER | flags, numa_node); | 153 | area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, |
| 154 | numa_node); | ||
| 148 | if (area != NULL) | 155 | if (area != NULL) |
| 149 | return area; | 156 | return area; |
| 150 | } | 157 | } |
| 151 | 158 | ||
| 152 | return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, | 159 | return __vmalloc_node_flags_caller(size, numa_node, |
| 153 | __builtin_return_address(0)); | 160 | GFP_KERNEL | __GFP_RETRY_MAYFAIL | |
| 161 | flags, __builtin_return_address(0)); | ||
| 154 | } | 162 | } |
| 155 | 163 | ||
| 156 | void bpf_map_area_free(void *area) | 164 | void bpf_map_area_free(void *area) |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ce166a002d16..6c5a41f7f338 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -212,7 +212,7 @@ struct bpf_call_arg_meta { | |||
| 212 | int access_size; | 212 | int access_size; |
| 213 | s64 msize_smax_value; | 213 | s64 msize_smax_value; |
| 214 | u64 msize_umax_value; | 214 | u64 msize_umax_value; |
| 215 | int ptr_id; | 215 | int ref_obj_id; |
| 216 | int func_id; | 216 | int func_id; |
| 217 | }; | 217 | }; |
| 218 | 218 | ||
| @@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type) | |||
| 346 | type == PTR_TO_TCP_SOCK_OR_NULL; | 346 | type == PTR_TO_TCP_SOCK_OR_NULL; |
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | static bool type_is_refcounted(enum bpf_reg_type type) | ||
| 350 | { | ||
| 351 | return type == PTR_TO_SOCKET; | ||
| 352 | } | ||
| 353 | |||
| 354 | static bool type_is_refcounted_or_null(enum bpf_reg_type type) | ||
| 355 | { | ||
| 356 | return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL; | ||
| 357 | } | ||
| 358 | |||
| 359 | static bool reg_is_refcounted(const struct bpf_reg_state *reg) | ||
| 360 | { | ||
| 361 | return type_is_refcounted(reg->type); | ||
| 362 | } | ||
| 363 | |||
| 364 | static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) | 349 | static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) |
| 365 | { | 350 | { |
| 366 | return reg->type == PTR_TO_MAP_VALUE && | 351 | return reg->type == PTR_TO_MAP_VALUE && |
| 367 | map_value_has_spin_lock(reg->map_ptr); | 352 | map_value_has_spin_lock(reg->map_ptr); |
| 368 | } | 353 | } |
| 369 | 354 | ||
| 370 | static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) | 355 | static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) |
| 371 | { | 356 | { |
| 372 | return type_is_refcounted_or_null(reg->type); | 357 | return type == PTR_TO_SOCKET || |
| 358 | type == PTR_TO_SOCKET_OR_NULL || | ||
| 359 | type == PTR_TO_TCP_SOCK || | ||
| 360 | type == PTR_TO_TCP_SOCK_OR_NULL; | ||
| 373 | } | 361 | } |
| 374 | 362 | ||
| 375 | static bool arg_type_is_refcounted(enum bpf_arg_type type) | 363 | static bool arg_type_may_be_refcounted(enum bpf_arg_type type) |
| 376 | { | 364 | { |
| 377 | return type == ARG_PTR_TO_SOCKET; | 365 | return type == ARG_PTR_TO_SOCK_COMMON; |
| 378 | } | 366 | } |
| 379 | 367 | ||
| 380 | /* Determine whether the function releases some resources allocated by another | 368 | /* Determine whether the function releases some resources allocated by another |
| @@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id) | |||
| 392 | func_id == BPF_FUNC_sk_lookup_udp; | 380 | func_id == BPF_FUNC_sk_lookup_udp; |
| 393 | } | 381 | } |
| 394 | 382 | ||
| 383 | static bool is_ptr_cast_function(enum bpf_func_id func_id) | ||
| 384 | { | ||
| 385 | return func_id == BPF_FUNC_tcp_sock || | ||
| 386 | func_id == BPF_FUNC_sk_fullsock; | ||
| 387 | } | ||
| 388 | |||
| 395 | /* string representation of 'enum bpf_reg_type' */ | 389 | /* string representation of 'enum bpf_reg_type' */ |
| 396 | static const char * const reg_type_str[] = { | 390 | static const char * const reg_type_str[] = { |
| 397 | [NOT_INIT] = "?", | 391 | [NOT_INIT] = "?", |
| @@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, | |||
| 466 | verbose(env, ",call_%d", func(env, reg)->callsite); | 460 | verbose(env, ",call_%d", func(env, reg)->callsite); |
| 467 | } else { | 461 | } else { |
| 468 | verbose(env, "(id=%d", reg->id); | 462 | verbose(env, "(id=%d", reg->id); |
| 463 | if (reg_type_may_be_refcounted_or_null(t)) | ||
| 464 | verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); | ||
| 469 | if (t != SCALAR_VALUE) | 465 | if (t != SCALAR_VALUE) |
| 470 | verbose(env, ",off=%d", reg->off); | 466 | verbose(env, ",off=%d", reg->off); |
| 471 | if (type_is_pkt_pointer(t)) | 467 | if (type_is_pkt_pointer(t)) |
| @@ -1901,8 +1897,9 @@ continue_func: | |||
| 1901 | } | 1897 | } |
| 1902 | frame++; | 1898 | frame++; |
| 1903 | if (frame >= MAX_CALL_FRAMES) { | 1899 | if (frame >= MAX_CALL_FRAMES) { |
| 1904 | WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); | 1900 | verbose(env, "the call stack of %d frames is too deep !\n", |
| 1905 | return -EFAULT; | 1901 | frame); |
| 1902 | return -E2BIG; | ||
| 1906 | } | 1903 | } |
| 1907 | goto process_func; | 1904 | goto process_func; |
| 1908 | } | 1905 | } |
| @@ -2414,16 +2411,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, | |||
| 2414 | /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ | 2411 | /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ |
| 2415 | if (!type_is_sk_pointer(type)) | 2412 | if (!type_is_sk_pointer(type)) |
| 2416 | goto err_type; | 2413 | goto err_type; |
| 2417 | } else if (arg_type == ARG_PTR_TO_SOCKET) { | 2414 | if (reg->ref_obj_id) { |
| 2418 | expected_type = PTR_TO_SOCKET; | 2415 | if (meta->ref_obj_id) { |
| 2419 | if (type != expected_type) | 2416 | verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", |
| 2420 | goto err_type; | 2417 | regno, reg->ref_obj_id, |
| 2421 | if (meta->ptr_id || !reg->id) { | 2418 | meta->ref_obj_id); |
| 2422 | verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", | 2419 | return -EFAULT; |
| 2423 | meta->ptr_id, reg->id); | 2420 | } |
| 2424 | return -EFAULT; | 2421 | meta->ref_obj_id = reg->ref_obj_id; |
| 2425 | } | 2422 | } |
| 2426 | meta->ptr_id = reg->id; | ||
| 2427 | } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { | 2423 | } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { |
| 2428 | if (meta->func_id == BPF_FUNC_spin_lock) { | 2424 | if (meta->func_id == BPF_FUNC_spin_lock) { |
| 2429 | if (process_spin_lock(env, regno, true)) | 2425 | if (process_spin_lock(env, regno, true)) |
| @@ -2740,32 +2736,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn) | |||
| 2740 | return true; | 2736 | return true; |
| 2741 | } | 2737 | } |
| 2742 | 2738 | ||
| 2743 | static bool check_refcount_ok(const struct bpf_func_proto *fn) | 2739 | static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) |
| 2744 | { | 2740 | { |
| 2745 | int count = 0; | 2741 | int count = 0; |
| 2746 | 2742 | ||
| 2747 | if (arg_type_is_refcounted(fn->arg1_type)) | 2743 | if (arg_type_may_be_refcounted(fn->arg1_type)) |
| 2748 | count++; | 2744 | count++; |
| 2749 | if (arg_type_is_refcounted(fn->arg2_type)) | 2745 | if (arg_type_may_be_refcounted(fn->arg2_type)) |
| 2750 | count++; | 2746 | count++; |
| 2751 | if (arg_type_is_refcounted(fn->arg3_type)) | 2747 | if (arg_type_may_be_refcounted(fn->arg3_type)) |
| 2752 | count++; | 2748 | count++; |
| 2753 | if (arg_type_is_refcounted(fn->arg4_type)) | 2749 | if (arg_type_may_be_refcounted(fn->arg4_type)) |
| 2754 | count++; | 2750 | count++; |
| 2755 | if (arg_type_is_refcounted(fn->arg5_type)) | 2751 | if (arg_type_may_be_refcounted(fn->arg5_type)) |
| 2756 | count++; | 2752 | count++; |
| 2757 | 2753 | ||
| 2754 | /* A reference acquiring function cannot acquire | ||
| 2755 | * another refcounted ptr. | ||
| 2756 | */ | ||
| 2757 | if (is_acquire_function(func_id) && count) | ||
| 2758 | return false; | ||
| 2759 | |||
| 2758 | /* We only support one arg being unreferenced at the moment, | 2760 | /* We only support one arg being unreferenced at the moment, |
| 2759 | * which is sufficient for the helper functions we have right now. | 2761 | * which is sufficient for the helper functions we have right now. |
| 2760 | */ | 2762 | */ |
| 2761 | return count <= 1; | 2763 | return count <= 1; |
| 2762 | } | 2764 | } |
| 2763 | 2765 | ||
| 2764 | static int check_func_proto(const struct bpf_func_proto *fn) | 2766 | static int check_func_proto(const struct bpf_func_proto *fn, int func_id) |
| 2765 | { | 2767 | { |
| 2766 | return check_raw_mode_ok(fn) && | 2768 | return check_raw_mode_ok(fn) && |
| 2767 | check_arg_pair_ok(fn) && | 2769 | check_arg_pair_ok(fn) && |
| 2768 | check_refcount_ok(fn) ? 0 : -EINVAL; | 2770 | check_refcount_ok(fn, func_id) ? 0 : -EINVAL; |
| 2769 | } | 2771 | } |
| 2770 | 2772 | ||
| 2771 | /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] | 2773 | /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] |
| @@ -2799,19 +2801,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) | |||
| 2799 | } | 2801 | } |
| 2800 | 2802 | ||
| 2801 | static void release_reg_references(struct bpf_verifier_env *env, | 2803 | static void release_reg_references(struct bpf_verifier_env *env, |
| 2802 | struct bpf_func_state *state, int id) | 2804 | struct bpf_func_state *state, |
| 2805 | int ref_obj_id) | ||
| 2803 | { | 2806 | { |
| 2804 | struct bpf_reg_state *regs = state->regs, *reg; | 2807 | struct bpf_reg_state *regs = state->regs, *reg; |
| 2805 | int i; | 2808 | int i; |
| 2806 | 2809 | ||
| 2807 | for (i = 0; i < MAX_BPF_REG; i++) | 2810 | for (i = 0; i < MAX_BPF_REG; i++) |
| 2808 | if (regs[i].id == id) | 2811 | if (regs[i].ref_obj_id == ref_obj_id) |
| 2809 | mark_reg_unknown(env, regs, i); | 2812 | mark_reg_unknown(env, regs, i); |
| 2810 | 2813 | ||
| 2811 | bpf_for_each_spilled_reg(i, state, reg) { | 2814 | bpf_for_each_spilled_reg(i, state, reg) { |
| 2812 | if (!reg) | 2815 | if (!reg) |
| 2813 | continue; | 2816 | continue; |
| 2814 | if (reg_is_refcounted(reg) && reg->id == id) | 2817 | if (reg->ref_obj_id == ref_obj_id) |
| 2815 | __mark_reg_unknown(reg); | 2818 | __mark_reg_unknown(reg); |
| 2816 | } | 2819 | } |
| 2817 | } | 2820 | } |
| @@ -2820,15 +2823,20 @@ static void release_reg_references(struct bpf_verifier_env *env, | |||
| 2820 | * resources. Identify all copies of the same pointer and clear the reference. | 2823 | * resources. Identify all copies of the same pointer and clear the reference. |
| 2821 | */ | 2824 | */ |
| 2822 | static int release_reference(struct bpf_verifier_env *env, | 2825 | static int release_reference(struct bpf_verifier_env *env, |
| 2823 | struct bpf_call_arg_meta *meta) | 2826 | int ref_obj_id) |
| 2824 | { | 2827 | { |
| 2825 | struct bpf_verifier_state *vstate = env->cur_state; | 2828 | struct bpf_verifier_state *vstate = env->cur_state; |
| 2829 | int err; | ||
| 2826 | int i; | 2830 | int i; |
| 2827 | 2831 | ||
| 2832 | err = release_reference_state(cur_func(env), ref_obj_id); | ||
| 2833 | if (err) | ||
| 2834 | return err; | ||
| 2835 | |||
| 2828 | for (i = 0; i <= vstate->curframe; i++) | 2836 | for (i = 0; i <= vstate->curframe; i++) |
| 2829 | release_reg_references(env, vstate->frame[i], meta->ptr_id); | 2837 | release_reg_references(env, vstate->frame[i], ref_obj_id); |
| 2830 | 2838 | ||
| 2831 | return release_reference_state(cur_func(env), meta->ptr_id); | 2839 | return 0; |
| 2832 | } | 2840 | } |
| 2833 | 2841 | ||
| 2834 | static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, | 2842 | static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| @@ -3047,7 +3055,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 3047 | memset(&meta, 0, sizeof(meta)); | 3055 | memset(&meta, 0, sizeof(meta)); |
| 3048 | meta.pkt_access = fn->pkt_access; | 3056 | meta.pkt_access = fn->pkt_access; |
| 3049 | 3057 | ||
| 3050 | err = check_func_proto(fn); | 3058 | err = check_func_proto(fn, func_id); |
| 3051 | if (err) { | 3059 | if (err) { |
| 3052 | verbose(env, "kernel subsystem misconfigured func %s#%d\n", | 3060 | verbose(env, "kernel subsystem misconfigured func %s#%d\n", |
| 3053 | func_id_name(func_id), func_id); | 3061 | func_id_name(func_id), func_id); |
| @@ -3093,7 +3101,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 3093 | return err; | 3101 | return err; |
| 3094 | } | 3102 | } |
| 3095 | } else if (is_release_function(func_id)) { | 3103 | } else if (is_release_function(func_id)) { |
| 3096 | err = release_reference(env, &meta); | 3104 | err = release_reference(env, meta.ref_obj_id); |
| 3097 | if (err) { | 3105 | if (err) { |
| 3098 | verbose(env, "func %s#%d reference has not been acquired before\n", | 3106 | verbose(env, "func %s#%d reference has not been acquired before\n", |
| 3099 | func_id_name(func_id), func_id); | 3107 | func_id_name(func_id), func_id); |
| @@ -3154,8 +3162,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 3154 | 3162 | ||
| 3155 | if (id < 0) | 3163 | if (id < 0) |
| 3156 | return id; | 3164 | return id; |
| 3157 | /* For release_reference() */ | 3165 | /* For mark_ptr_or_null_reg() */ |
| 3158 | regs[BPF_REG_0].id = id; | 3166 | regs[BPF_REG_0].id = id; |
| 3167 | /* For release_reference() */ | ||
| 3168 | regs[BPF_REG_0].ref_obj_id = id; | ||
| 3159 | } else { | 3169 | } else { |
| 3160 | /* For mark_ptr_or_null_reg() */ | 3170 | /* For mark_ptr_or_null_reg() */ |
| 3161 | regs[BPF_REG_0].id = ++env->id_gen; | 3171 | regs[BPF_REG_0].id = ++env->id_gen; |
| @@ -3170,6 +3180,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 3170 | return -EINVAL; | 3180 | return -EINVAL; |
| 3171 | } | 3181 | } |
| 3172 | 3182 | ||
| 3183 | if (is_ptr_cast_function(func_id)) | ||
| 3184 | /* For release_reference() */ | ||
| 3185 | regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; | ||
| 3186 | |||
| 3173 | do_refine_retval_range(regs, fn->ret_type, func_id, &meta); | 3187 | do_refine_retval_range(regs, fn->ret_type, func_id, &meta); |
| 3174 | 3188 | ||
| 3175 | err = check_map_func_compatibility(env, meta.map_ptr, func_id); | 3189 | err = check_map_func_compatibility(env, meta.map_ptr, func_id); |
| @@ -3368,7 +3382,7 @@ do_sim: | |||
| 3368 | *dst_reg = *ptr_reg; | 3382 | *dst_reg = *ptr_reg; |
| 3369 | } | 3383 | } |
| 3370 | ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); | 3384 | ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); |
| 3371 | if (!ptr_is_dst_reg) | 3385 | if (!ptr_is_dst_reg && ret) |
| 3372 | *dst_reg = tmp; | 3386 | *dst_reg = tmp; |
| 3373 | return !ret ? -EFAULT : 0; | 3387 | return !ret ? -EFAULT : 0; |
| 3374 | } | 3388 | } |
| @@ -4665,11 +4679,19 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, | |||
| 4665 | } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { | 4679 | } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { |
| 4666 | reg->type = PTR_TO_TCP_SOCK; | 4680 | reg->type = PTR_TO_TCP_SOCK; |
| 4667 | } | 4681 | } |
| 4668 | if (is_null || !(reg_is_refcounted(reg) || | 4682 | if (is_null) { |
| 4669 | reg_may_point_to_spin_lock(reg))) { | 4683 | /* We don't need id and ref_obj_id from this point |
| 4670 | /* We don't need id from this point onwards anymore, | 4684 | * onwards anymore, thus we should better reset it, |
| 4671 | * thus we should better reset it, so that state | 4685 | * so that state pruning has chances to take effect. |
| 4672 | * pruning has chances to take effect. | 4686 | */ |
| 4687 | reg->id = 0; | ||
| 4688 | reg->ref_obj_id = 0; | ||
| 4689 | } else if (!reg_may_point_to_spin_lock(reg)) { | ||
| 4690 | /* For not-NULL ptr, reg->ref_obj_id will be reset | ||
| 4691 | * in release_reg_references(). | ||
| 4692 | * | ||
| 4693 | * reg->id is still used by spin_lock ptr. Other | ||
| 4694 | * than spin_lock ptr type, reg->id can be reset. | ||
| 4673 | */ | 4695 | */ |
| 4674 | reg->id = 0; | 4696 | reg->id = 0; |
| 4675 | } | 4697 | } |
| @@ -4684,11 +4706,16 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, | |||
| 4684 | { | 4706 | { |
| 4685 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; | 4707 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 4686 | struct bpf_reg_state *reg, *regs = state->regs; | 4708 | struct bpf_reg_state *reg, *regs = state->regs; |
| 4709 | u32 ref_obj_id = regs[regno].ref_obj_id; | ||
| 4687 | u32 id = regs[regno].id; | 4710 | u32 id = regs[regno].id; |
| 4688 | int i, j; | 4711 | int i, j; |
| 4689 | 4712 | ||
| 4690 | if (reg_is_refcounted_or_null(®s[regno]) && is_null) | 4713 | if (ref_obj_id && ref_obj_id == id && is_null) |
| 4691 | release_reference_state(state, id); | 4714 | /* regs[regno] is in the " == NULL" branch. |
| 4715 | * No one could have freed the reference state before | ||
| 4716 | * doing the NULL check. | ||
| 4717 | */ | ||
| 4718 | WARN_ON_ONCE(release_reference_state(state, id)); | ||
| 4692 | 4719 | ||
| 4693 | for (i = 0; i < MAX_BPF_REG; i++) | 4720 | for (i = 0; i < MAX_BPF_REG; i++) |
| 4694 | mark_ptr_or_null_reg(state, ®s[i], id, is_null); | 4721 | mark_ptr_or_null_reg(state, ®s[i], id, is_null); |
| @@ -6052,15 +6079,17 @@ static int propagate_liveness(struct bpf_verifier_env *env, | |||
| 6052 | } | 6079 | } |
| 6053 | /* Propagate read liveness of registers... */ | 6080 | /* Propagate read liveness of registers... */ |
| 6054 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); | 6081 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); |
| 6055 | /* We don't need to worry about FP liveness because it's read-only */ | 6082 | for (frame = 0; frame <= vstate->curframe; frame++) { |
| 6056 | for (i = 0; i < BPF_REG_FP; i++) { | 6083 | /* We don't need to worry about FP liveness, it's read-only */ |
| 6057 | if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) | 6084 | for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { |
| 6058 | continue; | 6085 | if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ) |
| 6059 | if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { | 6086 | continue; |
| 6060 | err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], | 6087 | if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) { |
| 6061 | &vparent->frame[vstate->curframe]->regs[i]); | 6088 | err = mark_reg_read(env, &vstate->frame[frame]->regs[i], |
| 6062 | if (err) | 6089 | &vparent->frame[frame]->regs[i]); |
| 6063 | return err; | 6090 | if (err) |
| 6091 | return err; | ||
| 6092 | } | ||
| 6064 | } | 6093 | } |
| 6065 | } | 6094 | } |
| 6066 | 6095 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index 025f419d16f6..6754f3ecfd94 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -564,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | |||
| 564 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | 564 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
| 565 | } | 565 | } |
| 566 | 566 | ||
| 567 | static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) | ||
| 568 | { | ||
| 569 | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) | ||
| 570 | return true; | ||
| 571 | /* | ||
| 572 | * When CPU hotplug is disabled, then taking the CPU down is not | ||
| 573 | * possible because takedown_cpu() and the architecture and | ||
| 574 | * subsystem specific mechanisms are not available. So the CPU | ||
| 575 | * which would be completely unplugged again needs to stay around | ||
| 576 | * in the current state. | ||
| 577 | */ | ||
| 578 | return st->state <= CPUHP_BRINGUP_CPU; | ||
| 579 | } | ||
| 580 | |||
| 567 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 581 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
| 568 | enum cpuhp_state target) | 582 | enum cpuhp_state target) |
| 569 | { | 583 | { |
| @@ -574,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | |||
| 574 | st->state++; | 588 | st->state++; |
| 575 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); | 589 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
| 576 | if (ret) { | 590 | if (ret) { |
| 577 | st->target = prev_state; | 591 | if (can_rollback_cpu(st)) { |
| 578 | undo_cpu_up(cpu, st); | 592 | st->target = prev_state; |
| 593 | undo_cpu_up(cpu, st); | ||
| 594 | } | ||
| 579 | break; | 595 | break; |
| 580 | } | 596 | } |
| 581 | } | 597 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 771e93f9c43f..6f357f4fc859 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/hw_breakpoint.h> | 29 | #include <linux/hw_breakpoint.h> |
| 30 | #include <linux/cn_proc.h> | 30 | #include <linux/cn_proc.h> |
| 31 | #include <linux/compat.h> | 31 | #include <linux/compat.h> |
| 32 | #include <linux/sched/signal.h> | ||
| 32 | 33 | ||
| 33 | /* | 34 | /* |
| 34 | * Access another process' address space via ptrace. | 35 | * Access another process' address space via ptrace. |
| @@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request, | |||
| 924 | ret = ptrace_setsiginfo(child, &siginfo); | 925 | ret = ptrace_setsiginfo(child, &siginfo); |
| 925 | break; | 926 | break; |
| 926 | 927 | ||
| 927 | case PTRACE_GETSIGMASK: | 928 | case PTRACE_GETSIGMASK: { |
| 929 | sigset_t *mask; | ||
| 930 | |||
| 928 | if (addr != sizeof(sigset_t)) { | 931 | if (addr != sizeof(sigset_t)) { |
| 929 | ret = -EINVAL; | 932 | ret = -EINVAL; |
| 930 | break; | 933 | break; |
| 931 | } | 934 | } |
| 932 | 935 | ||
| 933 | if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) | 936 | if (test_tsk_restore_sigmask(child)) |
| 937 | mask = &child->saved_sigmask; | ||
| 938 | else | ||
| 939 | mask = &child->blocked; | ||
| 940 | |||
| 941 | if (copy_to_user(datavp, mask, sizeof(sigset_t))) | ||
| 934 | ret = -EFAULT; | 942 | ret = -EFAULT; |
| 935 | else | 943 | else |
| 936 | ret = 0; | 944 | ret = 0; |
| 937 | 945 | ||
| 938 | break; | 946 | break; |
| 947 | } | ||
| 939 | 948 | ||
| 940 | case PTRACE_SETSIGMASK: { | 949 | case PTRACE_SETSIGMASK: { |
| 941 | sigset_t new_set; | 950 | sigset_t new_set; |
| @@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request, | |||
| 961 | child->blocked = new_set; | 970 | child->blocked = new_set; |
| 962 | spin_unlock_irq(&child->sighand->siglock); | 971 | spin_unlock_irq(&child->sighand->siglock); |
| 963 | 972 | ||
| 973 | clear_tsk_restore_sigmask(child); | ||
| 974 | |||
| 964 | ret = 0; | 975 | ret = 0; |
| 965 | break; | 976 | break; |
| 966 | } | 977 | } |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 54a0347ca812..df27e499956a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd) | |||
| 149 | 149 | ||
| 150 | sd->nr = syscall_get_nr(task, regs); | 150 | sd->nr = syscall_get_nr(task, regs); |
| 151 | sd->arch = syscall_get_arch(); | 151 | sd->arch = syscall_get_arch(); |
| 152 | syscall_get_arguments(task, regs, 0, 6, args); | 152 | syscall_get_arguments(task, regs, args); |
| 153 | sd->args[0] = args[0]; | 153 | sd->args[0] = args[0]; |
| 154 | sd->args[1] = args[1]; | 154 | sd->args[1] = args[1]; |
| 155 | sd->args[2] = args[2]; | 155 | sd->args[2] = args[2]; |
diff --git a/kernel/signal.c b/kernel/signal.c index b7953934aa99..f98448cf2def 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, | |||
| 3605 | if (unlikely(sig != kinfo.si_signo)) | 3605 | if (unlikely(sig != kinfo.si_signo)) |
| 3606 | goto err; | 3606 | goto err; |
| 3607 | 3607 | ||
| 3608 | /* Only allow sending arbitrary signals to yourself. */ | ||
| 3609 | ret = -EPERM; | ||
| 3608 | if ((task_pid(current) != pid) && | 3610 | if ((task_pid(current) != pid) && |
| 3609 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) { | 3611 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) |
| 3610 | /* Only allow sending arbitrary signals to yourself. */ | 3612 | goto err; |
| 3611 | ret = -EPERM; | ||
| 3612 | if (kinfo.si_code != SI_USER) | ||
| 3613 | goto err; | ||
| 3614 | |||
| 3615 | /* Turn this into a regular kill signal. */ | ||
| 3616 | prepare_kill_siginfo(sig, &kinfo); | ||
| 3617 | } | ||
| 3618 | } else { | 3613 | } else { |
| 3619 | prepare_kill_siginfo(sig, &kinfo); | 3614 | prepare_kill_siginfo(sig, &kinfo); |
| 3620 | } | 3615 | } |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e5da394d1ca3..c9ec050bcf46 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -128,6 +128,7 @@ static int zero; | |||
| 128 | static int __maybe_unused one = 1; | 128 | static int __maybe_unused one = 1; |
| 129 | static int __maybe_unused two = 2; | 129 | static int __maybe_unused two = 2; |
| 130 | static int __maybe_unused four = 4; | 130 | static int __maybe_unused four = 4; |
| 131 | static unsigned long zero_ul; | ||
| 131 | static unsigned long one_ul = 1; | 132 | static unsigned long one_ul = 1; |
| 132 | static unsigned long long_max = LONG_MAX; | 133 | static unsigned long long_max = LONG_MAX; |
| 133 | static int one_hundred = 100; | 134 | static int one_hundred = 100; |
| @@ -1750,7 +1751,7 @@ static struct ctl_table fs_table[] = { | |||
| 1750 | .maxlen = sizeof(files_stat.max_files), | 1751 | .maxlen = sizeof(files_stat.max_files), |
| 1751 | .mode = 0644, | 1752 | .mode = 0644, |
| 1752 | .proc_handler = proc_doulongvec_minmax, | 1753 | .proc_handler = proc_doulongvec_minmax, |
| 1753 | .extra1 = &zero, | 1754 | .extra1 = &zero_ul, |
| 1754 | .extra2 = &long_max, | 1755 | .extra2 = &long_max, |
| 1755 | }, | 1756 | }, |
| 1756 | { | 1757 | { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fa79323331b2..26c8ca9bd06b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1992,7 +1992,7 @@ static void print_bug_type(void) | |||
| 1992 | * modifying the code. @failed should be one of either: | 1992 | * modifying the code. @failed should be one of either: |
| 1993 | * EFAULT - if the problem happens on reading the @ip address | 1993 | * EFAULT - if the problem happens on reading the @ip address |
| 1994 | * EINVAL - if what is read at @ip is not what was expected | 1994 | * EINVAL - if what is read at @ip is not what was expected |
| 1995 | * EPERM - if the problem happens on writting to the @ip address | 1995 | * EPERM - if the problem happens on writing to the @ip address |
| 1996 | */ | 1996 | */ |
| 1997 | void ftrace_bug(int failed, struct dyn_ftrace *rec) | 1997 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
| 1998 | { | 1998 | { |
| @@ -2391,7 +2391,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 2391 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 2391 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
| 2392 | } | 2392 | } |
| 2393 | 2393 | ||
| 2394 | return -1; /* unknow ftrace bug */ | 2394 | return -1; /* unknown ftrace bug */ |
| 2395 | } | 2395 | } |
| 2396 | 2396 | ||
| 2397 | void __weak ftrace_replace_code(int mod_flags) | 2397 | void __weak ftrace_replace_code(int mod_flags) |
| @@ -3004,7 +3004,7 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
| 3004 | int cnt; | 3004 | int cnt; |
| 3005 | 3005 | ||
| 3006 | if (!num_to_init) | 3006 | if (!num_to_init) |
| 3007 | return 0; | 3007 | return NULL; |
| 3008 | 3008 | ||
| 3009 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); | 3009 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); |
| 3010 | if (!pg) | 3010 | if (!pg) |
| @@ -4755,7 +4755,7 @@ static int | |||
| 4755 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, | 4755 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, |
| 4756 | int reset, int enable) | 4756 | int reset, int enable) |
| 4757 | { | 4757 | { |
| 4758 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); | 4758 | return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); |
| 4759 | } | 4759 | } |
| 4760 | 4760 | ||
| 4761 | /** | 4761 | /** |
| @@ -5463,7 +5463,7 @@ void ftrace_create_filter_files(struct ftrace_ops *ops, | |||
| 5463 | 5463 | ||
| 5464 | /* | 5464 | /* |
| 5465 | * The name "destroy_filter_files" is really a misnomer. Although | 5465 | * The name "destroy_filter_files" is really a misnomer. Although |
| 5466 | * in the future, it may actualy delete the files, but this is | 5466 | * in the future, it may actually delete the files, but this is |
| 5467 | * really intended to make sure the ops passed in are disabled | 5467 | * really intended to make sure the ops passed in are disabled |
| 5468 | * and that when this function returns, the caller is free to | 5468 | * and that when this function returns, the caller is free to |
| 5469 | * free the ops. | 5469 | * free the ops. |
| @@ -5786,7 +5786,7 @@ void ftrace_module_enable(struct module *mod) | |||
| 5786 | /* | 5786 | /* |
| 5787 | * If the tracing is enabled, go ahead and enable the record. | 5787 | * If the tracing is enabled, go ahead and enable the record. |
| 5788 | * | 5788 | * |
| 5789 | * The reason not to enable the record immediatelly is the | 5789 | * The reason not to enable the record immediately is the |
| 5790 | * inherent check of ftrace_make_nop/ftrace_make_call for | 5790 | * inherent check of ftrace_make_nop/ftrace_make_call for |
| 5791 | * correct previous instructions. Making first the NOP | 5791 | * correct previous instructions. Making first the NOP |
| 5792 | * conversion puts the module to the correct state, thus | 5792 | * conversion puts the module to the correct state, thus |
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index dd1f43588d70..fa100ed3b4de 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c | |||
| @@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) | |||
| 74 | static int create_dyn_event(int argc, char **argv) | 74 | static int create_dyn_event(int argc, char **argv) |
| 75 | { | 75 | { |
| 76 | struct dyn_event_operations *ops; | 76 | struct dyn_event_operations *ops; |
| 77 | int ret; | 77 | int ret = -ENODEV; |
| 78 | 78 | ||
| 79 | if (argv[0][0] == '-' || argv[0][0] == '!') | 79 | if (argv[0][0] == '-' || argv[0][0] == '!') |
| 80 | return dyn_event_release(argc, argv, NULL); | 80 | return dyn_event_release(argc, argv, NULL); |
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index ca46339f3009..795aa2038377 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c | |||
| @@ -3713,7 +3713,6 @@ static void track_data_destroy(struct hist_trigger_data *hist_data, | |||
| 3713 | struct trace_event_file *file = hist_data->event_file; | 3713 | struct trace_event_file *file = hist_data->event_file; |
| 3714 | 3714 | ||
| 3715 | destroy_hist_field(data->track_data.track_var, 0); | 3715 | destroy_hist_field(data->track_data.track_var, 0); |
| 3716 | destroy_hist_field(data->track_data.var_ref, 0); | ||
| 3717 | 3716 | ||
| 3718 | if (data->action == ACTION_SNAPSHOT) { | 3717 | if (data->action == ACTION_SNAPSHOT) { |
| 3719 | struct track_data *track_data; | 3718 | struct track_data *track_data; |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index f93a56d2db27..fa8fbff736d6 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 314 | struct ring_buffer_event *event; | 314 | struct ring_buffer_event *event; |
| 315 | struct ring_buffer *buffer; | 315 | struct ring_buffer *buffer; |
| 316 | unsigned long irq_flags; | 316 | unsigned long irq_flags; |
| 317 | unsigned long args[6]; | ||
| 317 | int pc; | 318 | int pc; |
| 318 | int syscall_nr; | 319 | int syscall_nr; |
| 319 | int size; | 320 | int size; |
| @@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 347 | 348 | ||
| 348 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
| 349 | entry->nr = syscall_nr; | 350 | entry->nr = syscall_nr; |
| 350 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 351 | syscall_get_arguments(current, regs, args); |
| 352 | memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args); | ||
| 351 | 353 | ||
| 352 | event_trigger_unlock_commit(trace_file, buffer, event, entry, | 354 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
| 353 | irq_flags, pc); | 355 | irq_flags, pc); |
| @@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
| 583 | struct syscall_metadata *sys_data; | 585 | struct syscall_metadata *sys_data; |
| 584 | struct syscall_trace_enter *rec; | 586 | struct syscall_trace_enter *rec; |
| 585 | struct hlist_head *head; | 587 | struct hlist_head *head; |
| 588 | unsigned long args[6]; | ||
| 586 | bool valid_prog_array; | 589 | bool valid_prog_array; |
| 587 | int syscall_nr; | 590 | int syscall_nr; |
| 588 | int rctx; | 591 | int rctx; |
| @@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
| 613 | return; | 616 | return; |
| 614 | 617 | ||
| 615 | rec->nr = syscall_nr; | 618 | rec->nr = syscall_nr; |
| 616 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 619 | syscall_get_arguments(current, regs, args); |
| 617 | (unsigned long *)&rec->args); | 620 | memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args); |
| 618 | 621 | ||
| 619 | if ((valid_prog_array && | 622 | if ((valid_prog_array && |
| 620 | !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || | 623 | !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 403c9bd90413..6a5787233113 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -554,13 +554,15 @@ static void softlockup_start_all(void) | |||
| 554 | 554 | ||
| 555 | int lockup_detector_online_cpu(unsigned int cpu) | 555 | int lockup_detector_online_cpu(unsigned int cpu) |
| 556 | { | 556 | { |
| 557 | watchdog_enable(cpu); | 557 | if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) |
| 558 | watchdog_enable(cpu); | ||
| 558 | return 0; | 559 | return 0; |
| 559 | } | 560 | } |
| 560 | 561 | ||
| 561 | int lockup_detector_offline_cpu(unsigned int cpu) | 562 | int lockup_detector_offline_cpu(unsigned int cpu) |
| 562 | { | 563 | { |
| 563 | watchdog_disable(cpu); | 564 | if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) |
| 565 | watchdog_disable(cpu); | ||
| 564 | return 0; | 566 | return 0; |
| 565 | } | 567 | } |
| 566 | 568 | ||
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index ea36dc355da1..b396d328a764 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter); | |||
| 1528 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | 1528 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
| 1529 | struct iov_iter *i) | 1529 | struct iov_iter *i) |
| 1530 | { | 1530 | { |
| 1531 | #ifdef CONFIG_CRYPTO | ||
| 1531 | struct ahash_request *hash = hashp; | 1532 | struct ahash_request *hash = hashp; |
| 1532 | struct scatterlist sg; | 1533 | struct scatterlist sg; |
| 1533 | size_t copied; | 1534 | size_t copied; |
| @@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | |||
| 1537 | ahash_request_set_crypt(hash, &sg, NULL, copied); | 1538 | ahash_request_set_crypt(hash, &sg, NULL, copied); |
| 1538 | crypto_ahash_update(hash); | 1539 | crypto_ahash_update(hash); |
| 1539 | return copied; | 1540 | return copied; |
| 1541 | #else | ||
| 1542 | return 0; | ||
| 1543 | #endif | ||
| 1540 | } | 1544 | } |
| 1541 | EXPORT_SYMBOL(hash_and_copy_to_iter); | 1545 | EXPORT_SYMBOL(hash_and_copy_to_iter); |
| 1542 | 1546 | ||
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 4525fb094844..a8ede77afe0d 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c | |||
| @@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 291 | { | 291 | { |
| 292 | const unsigned char *ip = in; | 292 | const unsigned char *ip = in; |
| 293 | unsigned char *op = out; | 293 | unsigned char *op = out; |
| 294 | unsigned char *data_start; | ||
| 294 | size_t l = in_len; | 295 | size_t l = in_len; |
| 295 | size_t t = 0; | 296 | size_t t = 0; |
| 296 | signed char state_offset = -2; | 297 | signed char state_offset = -2; |
| 297 | unsigned int m4_max_offset; | 298 | unsigned int m4_max_offset; |
| 298 | 299 | ||
| 299 | // LZO v0 will never write 17 as first byte, | 300 | // LZO v0 will never write 17 as first byte (except for zero-length |
| 300 | // so this is used to version the bitstream | 301 | // input), so this is used to version the bitstream |
| 301 | if (bitstream_version > 0) { | 302 | if (bitstream_version > 0) { |
| 302 | *op++ = 17; | 303 | *op++ = 17; |
| 303 | *op++ = bitstream_version; | 304 | *op++ = bitstream_version; |
| @@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 306 | m4_max_offset = M4_MAX_OFFSET_V0; | 307 | m4_max_offset = M4_MAX_OFFSET_V0; |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 310 | data_start = op; | ||
| 311 | |||
| 309 | while (l > 20) { | 312 | while (l > 20) { |
| 310 | size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); | 313 | size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); |
| 311 | uintptr_t ll_end = (uintptr_t) ip + ll; | 314 | uintptr_t ll_end = (uintptr_t) ip + ll; |
| @@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 324 | if (t > 0) { | 327 | if (t > 0) { |
| 325 | const unsigned char *ii = in + in_len - t; | 328 | const unsigned char *ii = in + in_len - t; |
| 326 | 329 | ||
| 327 | if (op == out && t <= 238) { | 330 | if (op == data_start && t <= 238) { |
| 328 | *op++ = (17 + t); | 331 | *op++ = (17 + t); |
| 329 | } else if (t <= 3) { | 332 | } else if (t <= 3) { |
| 330 | op[state_offset] |= t; | 333 | op[state_offset] |= t; |
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 6d2600ea3b55..9e07e9ef1aad 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c | |||
| @@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | |||
| 54 | if (unlikely(in_len < 3)) | 54 | if (unlikely(in_len < 3)) |
| 55 | goto input_overrun; | 55 | goto input_overrun; |
| 56 | 56 | ||
| 57 | if (likely(*ip == 17)) { | 57 | if (likely(in_len >= 5) && likely(*ip == 17)) { |
| 58 | bitstream_version = ip[1]; | 58 | bitstream_version = ip[1]; |
| 59 | ip += 2; | 59 | ip += 2; |
| 60 | if (unlikely(in_len < 5)) | ||
| 61 | goto input_overrun; | ||
| 62 | } else { | 60 | } else { |
| 63 | bitstream_version = 0; | 61 | bitstream_version = 0; |
| 64 | } | 62 | } |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 0a105d4af166..97f59abc3e92 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 416 | else if (tbl->nest) | 416 | else if (tbl->nest) |
| 417 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); | 417 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); |
| 418 | 418 | ||
| 419 | if (!err) | 419 | if (!err || err == -EEXIST) { |
| 420 | err = rhashtable_rehash_table(ht); | 420 | int nerr; |
| 421 | |||
| 422 | nerr = rhashtable_rehash_table(ht); | ||
| 423 | err = err ?: nerr; | ||
| 424 | } | ||
| 421 | 425 | ||
| 422 | mutex_unlock(&ht->mutex); | 426 | mutex_unlock(&ht->mutex); |
| 423 | 427 | ||
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 5b382c1244ed..155fe38756ec 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
| @@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); | |||
| 591 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, | 591 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
| 592 | unsigned int cpu) | 592 | unsigned int cpu) |
| 593 | { | 593 | { |
| 594 | /* | ||
| 595 | * Once the clear bit is set, the bit may be allocated out. | ||
| 596 | * | ||
| 597 | * Orders READ/WRITE on the asssociated instance(such as request | ||
| 598 | * of blk_mq) by this bit for avoiding race with re-allocation, | ||
| 599 | * and its pair is the memory barrier implied in __sbitmap_get_word. | ||
| 600 | * | ||
| 601 | * One invariant is that the clear bit has to be zero when the bit | ||
| 602 | * is in use. | ||
| 603 | */ | ||
| 604 | smp_mb__before_atomic(); | ||
| 594 | sbitmap_deferred_clear_bit(&sbq->sb, nr); | 605 | sbitmap_deferred_clear_bit(&sbq->sb, nr); |
| 595 | 606 | ||
| 596 | /* | 607 | /* |
diff --git a/lib/string.c b/lib/string.c index 38e4ca08e757..3ab861c1a857 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) | |||
| 866 | EXPORT_SYMBOL(memcmp); | 866 | EXPORT_SYMBOL(memcmp); |
| 867 | #endif | 867 | #endif |
| 868 | 868 | ||
| 869 | #ifndef __HAVE_ARCH_BCMP | ||
| 870 | /** | ||
| 871 | * bcmp - returns 0 if and only if the buffers have identical contents. | ||
| 872 | * @a: pointer to first buffer. | ||
| 873 | * @b: pointer to second buffer. | ||
| 874 | * @len: size of buffers. | ||
| 875 | * | ||
| 876 | * The sign or magnitude of a non-zero return value has no particular | ||
| 877 | * meaning, and architectures may implement their own more efficient bcmp(). So | ||
| 878 | * while this particular implementation is a simple (tail) call to memcmp, do | ||
| 879 | * not rely on anything but whether the return value is zero or non-zero. | ||
| 880 | */ | ||
| 881 | #undef bcmp | ||
| 882 | int bcmp(const void *a, const void *b, size_t len) | ||
| 883 | { | ||
| 884 | return memcmp(a, b, len); | ||
| 885 | } | ||
| 886 | EXPORT_SYMBOL(bcmp); | ||
| 887 | #endif | ||
| 888 | |||
| 869 | #ifndef __HAVE_ARCH_MEMSCAN | 889 | #ifndef __HAVE_ARCH_MEMSCAN |
| 870 | /** | 890 | /** |
| 871 | * memscan - Find a character in an area of memory. | 891 | * memscan - Find a character in an area of memory. |
diff --git a/lib/syscall.c b/lib/syscall.c index 1a7077f20eae..fb328e7ccb08 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
| @@ -5,16 +5,14 @@ | |||
| 5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
| 6 | #include <asm/syscall.h> | 6 | #include <asm/syscall.h> |
| 7 | 7 | ||
| 8 | static int collect_syscall(struct task_struct *target, long *callno, | 8 | static int collect_syscall(struct task_struct *target, struct syscall_info *info) |
| 9 | unsigned long args[6], unsigned int maxargs, | ||
| 10 | unsigned long *sp, unsigned long *pc) | ||
| 11 | { | 9 | { |
| 12 | struct pt_regs *regs; | 10 | struct pt_regs *regs; |
| 13 | 11 | ||
| 14 | if (!try_get_task_stack(target)) { | 12 | if (!try_get_task_stack(target)) { |
| 15 | /* Task has no stack, so the task isn't in a syscall. */ | 13 | /* Task has no stack, so the task isn't in a syscall. */ |
| 16 | *sp = *pc = 0; | 14 | memset(info, 0, sizeof(*info)); |
| 17 | *callno = -1; | 15 | info->data.nr = -1; |
| 18 | return 0; | 16 | return 0; |
| 19 | } | 17 | } |
| 20 | 18 | ||
| @@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 24 | return -EAGAIN; | 22 | return -EAGAIN; |
| 25 | } | 23 | } |
| 26 | 24 | ||
| 27 | *sp = user_stack_pointer(regs); | 25 | info->sp = user_stack_pointer(regs); |
| 28 | *pc = instruction_pointer(regs); | 26 | info->data.instruction_pointer = instruction_pointer(regs); |
| 29 | 27 | ||
| 30 | *callno = syscall_get_nr(target, regs); | 28 | info->data.nr = syscall_get_nr(target, regs); |
| 31 | if (*callno != -1L && maxargs > 0) | 29 | if (info->data.nr != -1L) |
| 32 | syscall_get_arguments(target, regs, 0, maxargs, args); | 30 | syscall_get_arguments(target, regs, |
| 31 | (unsigned long *)&info->data.args[0]); | ||
| 33 | 32 | ||
| 34 | put_task_stack(target); | 33 | put_task_stack(target); |
| 35 | return 0; | 34 | return 0; |
| @@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 38 | /** | 37 | /** |
| 39 | * task_current_syscall - Discover what a blocked task is doing. | 38 | * task_current_syscall - Discover what a blocked task is doing. |
| 40 | * @target: thread to examine | 39 | * @target: thread to examine |
| 41 | * @callno: filled with system call number or -1 | 40 | * @info: structure with the following fields: |
| 42 | * @args: filled with @maxargs system call arguments | 41 | * .sp - filled with user stack pointer |
| 43 | * @maxargs: number of elements in @args to fill | 42 | * .data.nr - filled with system call number or -1 |
| 44 | * @sp: filled with user stack pointer | 43 | * .data.args - filled with @maxargs system call arguments |
| 45 | * @pc: filled with user PC | 44 | * .data.instruction_pointer - filled with user PC |
| 46 | * | 45 | * |
| 47 | * If @target is blocked in a system call, returns zero with *@callno | 46 | * If @target is blocked in a system call, returns zero with @info.data.nr |
| 48 | * set to the the call's number and @args filled in with its arguments. | 47 | * set to the the call's number and @info.data.args filled in with its |
| 49 | * Registers not used for system call arguments may not be available and | 48 | * arguments. Registers not used for system call arguments may not be available |
| 50 | * it is not kosher to use &struct user_regset calls while the system | 49 | * and it is not kosher to use &struct user_regset calls while the system |
| 51 | * call is still in progress. Note we may get this result if @target | 50 | * call is still in progress. Note we may get this result if @target |
| 52 | * has finished its system call but not yet returned to user mode, such | 51 | * has finished its system call but not yet returned to user mode, such |
| 53 | * as when it's stopped for signal handling or syscall exit tracing. | 52 | * as when it's stopped for signal handling or syscall exit tracing. |
| 54 | * | 53 | * |
| 55 | * If @target is blocked in the kernel during a fault or exception, | 54 | * If @target is blocked in the kernel during a fault or exception, |
| 56 | * returns zero with *@callno set to -1 and does not fill in @args. | 55 | * returns zero with *@info.data.nr set to -1 and does not fill in |
| 57 | * If so, it's now safe to examine @target using &struct user_regset | 56 | * @info.data.args. If so, it's now safe to examine @target using |
| 58 | * get() calls as long as we're sure @target won't return to user mode. | 57 | * &struct user_regset get() calls as long as we're sure @target won't return |
| 58 | * to user mode. | ||
| 59 | * | 59 | * |
| 60 | * Returns -%EAGAIN if @target does not remain blocked. | 60 | * Returns -%EAGAIN if @target does not remain blocked. |
| 61 | * | ||
| 62 | * Returns -%EINVAL if @maxargs is too large (maximum is six). | ||
| 63 | */ | 61 | */ |
| 64 | int task_current_syscall(struct task_struct *target, long *callno, | 62 | int task_current_syscall(struct task_struct *target, struct syscall_info *info) |
| 65 | unsigned long args[6], unsigned int maxargs, | ||
| 66 | unsigned long *sp, unsigned long *pc) | ||
| 67 | { | 63 | { |
| 68 | long state; | 64 | long state; |
| 69 | unsigned long ncsw; | 65 | unsigned long ncsw; |
| 70 | 66 | ||
| 71 | if (unlikely(maxargs > 6)) | ||
| 72 | return -EINVAL; | ||
| 73 | |||
| 74 | if (target == current) | 67 | if (target == current) |
| 75 | return collect_syscall(target, callno, args, maxargs, sp, pc); | 68 | return collect_syscall(target, info); |
| 76 | 69 | ||
| 77 | state = target->state; | 70 | state = target->state; |
| 78 | if (unlikely(!state)) | 71 | if (unlikely(!state)) |
| @@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno, | |||
| 80 | 73 | ||
| 81 | ncsw = wait_task_inactive(target, state); | 74 | ncsw = wait_task_inactive(target, state); |
| 82 | if (unlikely(!ncsw) || | 75 | if (unlikely(!ncsw) || |
| 83 | unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || | 76 | unlikely(collect_syscall(target, info)) || |
| 84 | unlikely(wait_task_inactive(target, state) != ncsw)) | 77 | unlikely(wait_task_inactive(target, state) != ncsw)) |
| 85 | return -EAGAIN; | 78 | return -EAGAIN; |
| 86 | 79 | ||
diff --git a/mm/compaction.c b/mm/compaction.c index f171a83707ce..3319e0872d01 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, | |||
| 242 | bool check_target) | 242 | bool check_target) |
| 243 | { | 243 | { |
| 244 | struct page *page = pfn_to_online_page(pfn); | 244 | struct page *page = pfn_to_online_page(pfn); |
| 245 | struct page *block_page; | ||
| 245 | struct page *end_page; | 246 | struct page *end_page; |
| 246 | unsigned long block_pfn; | 247 | unsigned long block_pfn; |
| 247 | 248 | ||
| @@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, | |||
| 267 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) | 268 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) |
| 268 | return false; | 269 | return false; |
| 269 | 270 | ||
| 271 | /* Ensure the start of the pageblock or zone is online and valid */ | ||
| 272 | block_pfn = pageblock_start_pfn(pfn); | ||
| 273 | block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn)); | ||
| 274 | if (block_page) { | ||
| 275 | page = block_page; | ||
| 276 | pfn = block_pfn; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* Ensure the end of the pageblock or zone is online and valid */ | ||
| 280 | block_pfn += pageblock_nr_pages; | ||
| 281 | block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); | ||
| 282 | end_page = pfn_to_online_page(block_pfn); | ||
| 283 | if (!end_page) | ||
| 284 | return false; | ||
| 285 | |||
| 270 | /* | 286 | /* |
| 271 | * Only clear the hint if a sample indicates there is either a | 287 | * Only clear the hint if a sample indicates there is either a |
| 272 | * free page or an LRU page in the block. One or other condition | 288 | * free page or an LRU page in the block. One or other condition |
| 273 | * is necessary for the block to be a migration source/target. | 289 | * is necessary for the block to be a migration source/target. |
| 274 | */ | 290 | */ |
| 275 | block_pfn = pageblock_start_pfn(pfn); | ||
| 276 | pfn = max(block_pfn, zone->zone_start_pfn); | ||
| 277 | page = pfn_to_page(pfn); | ||
| 278 | if (zone != page_zone(page)) | ||
| 279 | return false; | ||
| 280 | pfn = block_pfn + pageblock_nr_pages; | ||
| 281 | pfn = min(pfn, zone_end_pfn(zone)); | ||
| 282 | end_page = pfn_to_page(pfn); | ||
| 283 | |||
| 284 | do { | 291 | do { |
| 285 | if (pfn_valid_within(pfn)) { | 292 | if (pfn_valid_within(pfn)) { |
| 286 | if (check_source && PageLRU(page)) { | 293 | if (check_source && PageLRU(page)) { |
| @@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, | |||
| 309 | static void __reset_isolation_suitable(struct zone *zone) | 316 | static void __reset_isolation_suitable(struct zone *zone) |
| 310 | { | 317 | { |
| 311 | unsigned long migrate_pfn = zone->zone_start_pfn; | 318 | unsigned long migrate_pfn = zone->zone_start_pfn; |
| 312 | unsigned long free_pfn = zone_end_pfn(zone); | 319 | unsigned long free_pfn = zone_end_pfn(zone) - 1; |
| 313 | unsigned long reset_migrate = free_pfn; | 320 | unsigned long reset_migrate = free_pfn; |
| 314 | unsigned long reset_free = migrate_pfn; | 321 | unsigned long reset_free = migrate_pfn; |
| 315 | bool source_set = false; | 322 | bool source_set = false; |
| @@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc) | |||
| 1363 | count_compact_events(COMPACTISOLATED, nr_isolated); | 1370 | count_compact_events(COMPACTISOLATED, nr_isolated); |
| 1364 | } else { | 1371 | } else { |
| 1365 | /* If isolation fails, abort the search */ | 1372 | /* If isolation fails, abort the search */ |
| 1366 | order = -1; | 1373 | order = cc->search_order + 1; |
| 1367 | page = NULL; | 1374 | page = NULL; |
| 1368 | } | 1375 | } |
| 1369 | } | 1376 | } |
diff --git a/mm/debug.c b/mm/debug.c index c0b31b6c3877..eee9c221280c 100644 --- a/mm/debug.c +++ b/mm/debug.c | |||
| @@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason) | |||
| 79 | pr_warn("ksm "); | 79 | pr_warn("ksm "); |
| 80 | else if (mapping) { | 80 | else if (mapping) { |
| 81 | pr_warn("%ps ", mapping->a_ops); | 81 | pr_warn("%ps ", mapping->a_ops); |
| 82 | if (mapping->host->i_dentry.first) { | 82 | if (mapping->host && mapping->host->i_dentry.first) { |
| 83 | struct dentry *dentry; | 83 | struct dentry *dentry; |
| 84 | dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); | 84 | dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); |
| 85 | pr_warn("name:\"%pd\" ", dentry); | 85 | pr_warn("name:\"%pd\" ", dentry); |
| @@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm) | |||
| 168 | mm_pgtables_bytes(mm), | 168 | mm_pgtables_bytes(mm), |
| 169 | mm->map_count, | 169 | mm->map_count, |
| 170 | mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, | 170 | mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, |
| 171 | atomic64_read(&mm->pinned_vm), | 171 | (u64)atomic64_read(&mm->pinned_vm), |
| 172 | mm->data_vm, mm->exec_vm, mm->stack_vm, | 172 | mm->data_vm, mm->exec_vm, mm->stack_vm, |
| 173 | mm->start_code, mm->end_code, mm->start_data, mm->end_data, | 173 | mm->start_code, mm->end_code, mm->start_data, mm->end_data, |
| 174 | mm->start_brk, mm->brk, mm->start_stack, | 174 | mm->start_brk, mm->brk, mm->start_stack, |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 404acdcd0455..165ea46bf149 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -755,6 +755,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
| 755 | spinlock_t *ptl; | 755 | spinlock_t *ptl; |
| 756 | 756 | ||
| 757 | ptl = pmd_lock(mm, pmd); | 757 | ptl = pmd_lock(mm, pmd); |
| 758 | if (!pmd_none(*pmd)) { | ||
| 759 | if (write) { | ||
| 760 | if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { | ||
| 761 | WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); | ||
| 762 | goto out_unlock; | ||
| 763 | } | ||
| 764 | entry = pmd_mkyoung(*pmd); | ||
| 765 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
| 766 | if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) | ||
| 767 | update_mmu_cache_pmd(vma, addr, pmd); | ||
| 768 | } | ||
| 769 | |||
| 770 | goto out_unlock; | ||
| 771 | } | ||
| 772 | |||
| 758 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); | 773 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
| 759 | if (pfn_t_devmap(pfn)) | 774 | if (pfn_t_devmap(pfn)) |
| 760 | entry = pmd_mkdevmap(entry); | 775 | entry = pmd_mkdevmap(entry); |
| @@ -766,11 +781,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
| 766 | if (pgtable) { | 781 | if (pgtable) { |
| 767 | pgtable_trans_huge_deposit(mm, pmd, pgtable); | 782 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
| 768 | mm_inc_nr_ptes(mm); | 783 | mm_inc_nr_ptes(mm); |
| 784 | pgtable = NULL; | ||
| 769 | } | 785 | } |
| 770 | 786 | ||
| 771 | set_pmd_at(mm, addr, pmd, entry); | 787 | set_pmd_at(mm, addr, pmd, entry); |
| 772 | update_mmu_cache_pmd(vma, addr, pmd); | 788 | update_mmu_cache_pmd(vma, addr, pmd); |
| 789 | |||
| 790 | out_unlock: | ||
| 773 | spin_unlock(ptl); | 791 | spin_unlock(ptl); |
| 792 | if (pgtable) | ||
| 793 | pte_free(mm, pgtable); | ||
| 774 | } | 794 | } |
| 775 | 795 | ||
| 776 | vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | 796 | vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
| @@ -821,6 +841,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| 821 | spinlock_t *ptl; | 841 | spinlock_t *ptl; |
| 822 | 842 | ||
| 823 | ptl = pud_lock(mm, pud); | 843 | ptl = pud_lock(mm, pud); |
| 844 | if (!pud_none(*pud)) { | ||
| 845 | if (write) { | ||
| 846 | if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { | ||
| 847 | WARN_ON_ONCE(!is_huge_zero_pud(*pud)); | ||
| 848 | goto out_unlock; | ||
| 849 | } | ||
| 850 | entry = pud_mkyoung(*pud); | ||
| 851 | entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); | ||
| 852 | if (pudp_set_access_flags(vma, addr, pud, entry, 1)) | ||
| 853 | update_mmu_cache_pud(vma, addr, pud); | ||
| 854 | } | ||
| 855 | goto out_unlock; | ||
| 856 | } | ||
| 857 | |||
| 824 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); | 858 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); |
| 825 | if (pfn_t_devmap(pfn)) | 859 | if (pfn_t_devmap(pfn)) |
| 826 | entry = pud_mkdevmap(entry); | 860 | entry = pud_mkdevmap(entry); |
| @@ -830,6 +864,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| 830 | } | 864 | } |
| 831 | set_pud_at(mm, addr, pud, entry); | 865 | set_pud_at(mm, addr, pud, entry); |
| 832 | update_mmu_cache_pud(vma, addr, pud); | 866 | update_mmu_cache_pud(vma, addr, pud); |
| 867 | |||
| 868 | out_unlock: | ||
| 833 | spin_unlock(ptl); | 869 | spin_unlock(ptl); |
| 834 | } | 870 | } |
| 835 | 871 | ||
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 3e0c11f7d7a1..3ce956efa0cb 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h | |||
| @@ -163,7 +163,10 @@ static inline u8 random_tag(void) | |||
| 163 | #endif | 163 | #endif |
| 164 | 164 | ||
| 165 | #ifndef arch_kasan_set_tag | 165 | #ifndef arch_kasan_set_tag |
| 166 | #define arch_kasan_set_tag(addr, tag) ((void *)(addr)) | 166 | static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) |
| 167 | { | ||
| 168 | return addr; | ||
| 169 | } | ||
| 167 | #endif | 170 | #endif |
| 168 | #ifndef arch_kasan_reset_tag | 171 | #ifndef arch_kasan_reset_tag |
| 169 | #define arch_kasan_reset_tag(addr) ((void *)(addr)) | 172 | #define arch_kasan_reset_tag(addr) ((void *)(addr)) |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 707fa5579f66..6c318f5ac234 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -1529,11 +1529,6 @@ static void kmemleak_scan(void) | |||
| 1529 | } | 1529 | } |
| 1530 | rcu_read_unlock(); | 1530 | rcu_read_unlock(); |
| 1531 | 1531 | ||
| 1532 | /* data/bss scanning */ | ||
| 1533 | scan_large_block(_sdata, _edata); | ||
| 1534 | scan_large_block(__bss_start, __bss_stop); | ||
| 1535 | scan_large_block(__start_ro_after_init, __end_ro_after_init); | ||
| 1536 | |||
| 1537 | #ifdef CONFIG_SMP | 1532 | #ifdef CONFIG_SMP |
| 1538 | /* per-cpu sections scanning */ | 1533 | /* per-cpu sections scanning */ |
| 1539 | for_each_possible_cpu(i) | 1534 | for_each_possible_cpu(i) |
| @@ -2071,6 +2066,17 @@ void __init kmemleak_init(void) | |||
| 2071 | } | 2066 | } |
| 2072 | local_irq_restore(flags); | 2067 | local_irq_restore(flags); |
| 2073 | 2068 | ||
| 2069 | /* register the data/bss sections */ | ||
| 2070 | create_object((unsigned long)_sdata, _edata - _sdata, | ||
| 2071 | KMEMLEAK_GREY, GFP_ATOMIC); | ||
| 2072 | create_object((unsigned long)__bss_start, __bss_stop - __bss_start, | ||
| 2073 | KMEMLEAK_GREY, GFP_ATOMIC); | ||
| 2074 | /* only register .data..ro_after_init if not within .data */ | ||
| 2075 | if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) | ||
| 2076 | create_object((unsigned long)__start_ro_after_init, | ||
| 2077 | __end_ro_after_init - __start_ro_after_init, | ||
| 2078 | KMEMLEAK_GREY, GFP_ATOMIC); | ||
| 2079 | |||
| 2074 | /* | 2080 | /* |
| 2075 | * This is the point where tracking allocations is safe. Automatic | 2081 | * This is the point where tracking allocations is safe. Automatic |
| 2076 | * scanning is started during the late initcall. Add the early logged | 2082 | * scanning is started during the late initcall. Add the early logged |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 532e0e2a4817..81a0d3914ec9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |||
| 3882 | return &memcg->cgwb_domain; | 3882 | return &memcg->cgwb_domain; |
| 3883 | } | 3883 | } |
| 3884 | 3884 | ||
| 3885 | /* | ||
| 3886 | * idx can be of type enum memcg_stat_item or node_stat_item. | ||
| 3887 | * Keep in sync with memcg_exact_page(). | ||
| 3888 | */ | ||
| 3889 | static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) | ||
| 3890 | { | ||
| 3891 | long x = atomic_long_read(&memcg->stat[idx]); | ||
| 3892 | int cpu; | ||
| 3893 | |||
| 3894 | for_each_online_cpu(cpu) | ||
| 3895 | x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx]; | ||
| 3896 | if (x < 0) | ||
| 3897 | x = 0; | ||
| 3898 | return x; | ||
| 3899 | } | ||
| 3900 | |||
| 3885 | /** | 3901 | /** |
| 3886 | * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg | 3902 | * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg |
| 3887 | * @wb: bdi_writeback in question | 3903 | * @wb: bdi_writeback in question |
| @@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, | |||
| 3907 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); | 3923 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); |
| 3908 | struct mem_cgroup *parent; | 3924 | struct mem_cgroup *parent; |
| 3909 | 3925 | ||
| 3910 | *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); | 3926 | *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); |
| 3911 | 3927 | ||
| 3912 | /* this should eventually include NR_UNSTABLE_NFS */ | 3928 | /* this should eventually include NR_UNSTABLE_NFS */ |
| 3913 | *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); | 3929 | *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); |
| 3914 | *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | | 3930 | *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | |
| 3915 | (1 << LRU_ACTIVE_FILE)); | 3931 | (1 << LRU_ACTIVE_FILE)); |
| 3916 | *pheadroom = PAGE_COUNTER_MAX; | 3932 | *pheadroom = PAGE_COUNTER_MAX; |
diff --git a/mm/memory.c b/mm/memory.c index 47fe250307c7..ab650c21bccd 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |||
| 1549 | WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); | 1549 | WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); |
| 1550 | goto out_unlock; | 1550 | goto out_unlock; |
| 1551 | } | 1551 | } |
| 1552 | entry = *pte; | 1552 | entry = pte_mkyoung(*pte); |
| 1553 | goto out_mkwrite; | 1553 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
| 1554 | } else | 1554 | if (ptep_set_access_flags(vma, addr, pte, entry, 1)) |
| 1555 | goto out_unlock; | 1555 | update_mmu_cache(vma, addr, pte); |
| 1556 | } | ||
| 1557 | goto out_unlock; | ||
| 1556 | } | 1558 | } |
| 1557 | 1559 | ||
| 1558 | /* Ok, finally just insert the thing.. */ | 1560 | /* Ok, finally just insert the thing.. */ |
| @@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |||
| 1561 | else | 1563 | else |
| 1562 | entry = pte_mkspecial(pfn_t_pte(pfn, prot)); | 1564 | entry = pte_mkspecial(pfn_t_pte(pfn, prot)); |
| 1563 | 1565 | ||
| 1564 | out_mkwrite: | ||
| 1565 | if (mkwrite) { | 1566 | if (mkwrite) { |
| 1566 | entry = pte_mkyoung(entry); | 1567 | entry = pte_mkyoung(entry); |
| 1567 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1568 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f767582af4f8..0082d699be94 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
| 1576 | { | 1576 | { |
| 1577 | unsigned long pfn, nr_pages; | 1577 | unsigned long pfn, nr_pages; |
| 1578 | long offlined_pages; | 1578 | long offlined_pages; |
| 1579 | int ret, node; | 1579 | int ret, node, nr_isolate_pageblock; |
| 1580 | unsigned long flags; | 1580 | unsigned long flags; |
| 1581 | unsigned long valid_start, valid_end; | 1581 | unsigned long valid_start, valid_end; |
| 1582 | struct zone *zone; | 1582 | struct zone *zone; |
| @@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
| 1602 | ret = start_isolate_page_range(start_pfn, end_pfn, | 1602 | ret = start_isolate_page_range(start_pfn, end_pfn, |
| 1603 | MIGRATE_MOVABLE, | 1603 | MIGRATE_MOVABLE, |
| 1604 | SKIP_HWPOISON | REPORT_FAILURE); | 1604 | SKIP_HWPOISON | REPORT_FAILURE); |
| 1605 | if (ret) { | 1605 | if (ret < 0) { |
| 1606 | reason = "failure to isolate range"; | 1606 | reason = "failure to isolate range"; |
| 1607 | goto failed_removal; | 1607 | goto failed_removal; |
| 1608 | } | 1608 | } |
| 1609 | nr_isolate_pageblock = ret; | ||
| 1609 | 1610 | ||
| 1610 | arg.start_pfn = start_pfn; | 1611 | arg.start_pfn = start_pfn; |
| 1611 | arg.nr_pages = nr_pages; | 1612 | arg.nr_pages = nr_pages; |
| @@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
| 1657 | /* Ok, all of our target is isolated. | 1658 | /* Ok, all of our target is isolated. |
| 1658 | We cannot do rollback at this point. */ | 1659 | We cannot do rollback at this point. */ |
| 1659 | offline_isolated_pages(start_pfn, end_pfn); | 1660 | offline_isolated_pages(start_pfn, end_pfn); |
| 1660 | /* reset pagetype flags and makes migrate type to be MOVABLE */ | 1661 | |
| 1661 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | 1662 | /* |
| 1663 | * Onlining will reset pagetype flags and makes migrate type | ||
| 1664 | * MOVABLE, so just need to decrease the number of isolated | ||
| 1665 | * pageblocks zone counter here. | ||
| 1666 | */ | ||
| 1667 | spin_lock_irqsave(&zone->lock, flags); | ||
| 1668 | zone->nr_isolate_pageblock -= nr_isolate_pageblock; | ||
| 1669 | spin_unlock_irqrestore(&zone->lock, flags); | ||
| 1670 | |||
| 1662 | /* removal success */ | 1671 | /* removal success */ |
| 1663 | adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); | 1672 | adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); |
| 1664 | zone->present_pages -= offlined_pages; | 1673 | zone->present_pages -= offlined_pages; |
| @@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
| 1690 | 1699 | ||
| 1691 | failed_removal_isolated: | 1700 | failed_removal_isolated: |
| 1692 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | 1701 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); |
| 1702 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | ||
| 1693 | failed_removal: | 1703 | failed_removal: |
| 1694 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", | 1704 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", |
| 1695 | (unsigned long long) start_pfn << PAGE_SHIFT, | 1705 | (unsigned long long) start_pfn << PAGE_SHIFT, |
| 1696 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, | 1706 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
| 1697 | reason); | 1707 | reason); |
| 1698 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | ||
| 1699 | /* pushback to free area */ | 1708 | /* pushback to free area */ |
| 1700 | mem_hotplug_done(); | 1709 | mem_hotplug_done(); |
| 1701 | return ret; | 1710 | return ret; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index af171ccb56a2..2219e747df49 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page, | |||
| 428 | return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); | 428 | return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | /* | ||
| 432 | * queue_pages_pmd() has three possible return values: | ||
| 433 | * 1 - pages are placed on the right node or queued successfully. | ||
| 434 | * 0 - THP was split. | ||
| 435 | * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing | ||
| 436 | * page was already on a node that does not follow the policy. | ||
| 437 | */ | ||
| 431 | static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, | 438 | static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, |
| 432 | unsigned long end, struct mm_walk *walk) | 439 | unsigned long end, struct mm_walk *walk) |
| 433 | { | 440 | { |
| @@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, | |||
| 437 | unsigned long flags; | 444 | unsigned long flags; |
| 438 | 445 | ||
| 439 | if (unlikely(is_pmd_migration_entry(*pmd))) { | 446 | if (unlikely(is_pmd_migration_entry(*pmd))) { |
| 440 | ret = 1; | 447 | ret = -EIO; |
| 441 | goto unlock; | 448 | goto unlock; |
| 442 | } | 449 | } |
| 443 | page = pmd_page(*pmd); | 450 | page = pmd_page(*pmd); |
| @@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, | |||
| 454 | ret = 1; | 461 | ret = 1; |
| 455 | flags = qp->flags; | 462 | flags = qp->flags; |
| 456 | /* go to thp migration */ | 463 | /* go to thp migration */ |
| 457 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | 464 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { |
| 465 | if (!vma_migratable(walk->vma)) { | ||
| 466 | ret = -EIO; | ||
| 467 | goto unlock; | ||
| 468 | } | ||
| 469 | |||
| 458 | migrate_page_add(page, qp->pagelist, flags); | 470 | migrate_page_add(page, qp->pagelist, flags); |
| 471 | } else | ||
| 472 | ret = -EIO; | ||
| 459 | unlock: | 473 | unlock: |
| 460 | spin_unlock(ptl); | 474 | spin_unlock(ptl); |
| 461 | out: | 475 | out: |
| @@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, | |||
| 480 | ptl = pmd_trans_huge_lock(pmd, vma); | 494 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 481 | if (ptl) { | 495 | if (ptl) { |
| 482 | ret = queue_pages_pmd(pmd, ptl, addr, end, walk); | 496 | ret = queue_pages_pmd(pmd, ptl, addr, end, walk); |
| 483 | if (ret) | 497 | if (ret > 0) |
| 484 | return 0; | 498 | return 0; |
| 499 | else if (ret < 0) | ||
| 500 | return ret; | ||
| 485 | } | 501 | } |
| 486 | 502 | ||
| 487 | if (pmd_trans_unstable(pmd)) | 503 | if (pmd_trans_unstable(pmd)) |
| @@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, | |||
| 502 | continue; | 518 | continue; |
| 503 | if (!queue_pages_required(page, qp)) | 519 | if (!queue_pages_required(page, qp)) |
| 504 | continue; | 520 | continue; |
| 505 | migrate_page_add(page, qp->pagelist, flags); | 521 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { |
| 522 | if (!vma_migratable(vma)) | ||
| 523 | break; | ||
| 524 | migrate_page_add(page, qp->pagelist, flags); | ||
| 525 | } else | ||
| 526 | break; | ||
| 506 | } | 527 | } |
| 507 | pte_unmap_unlock(pte - 1, ptl); | 528 | pte_unmap_unlock(pte - 1, ptl); |
| 508 | cond_resched(); | 529 | cond_resched(); |
| 509 | return 0; | 530 | return addr != end ? -EIO : 0; |
| 510 | } | 531 | } |
| 511 | 532 | ||
| 512 | static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, | 533 | static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, |
| @@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, | |||
| 576 | unsigned long endvma = vma->vm_end; | 597 | unsigned long endvma = vma->vm_end; |
| 577 | unsigned long flags = qp->flags; | 598 | unsigned long flags = qp->flags; |
| 578 | 599 | ||
| 579 | if (!vma_migratable(vma)) | 600 | /* |
| 601 | * Need check MPOL_MF_STRICT to return -EIO if possible | ||
| 602 | * regardless of vma_migratable | ||
| 603 | */ | ||
| 604 | if (!vma_migratable(vma) && | ||
| 605 | !(flags & MPOL_MF_STRICT)) | ||
| 580 | return 1; | 606 | return 1; |
| 581 | 607 | ||
| 582 | if (endvma > end) | 608 | if (endvma > end) |
| @@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, | |||
| 603 | } | 629 | } |
| 604 | 630 | ||
| 605 | /* queue pages from current vma */ | 631 | /* queue pages from current vma */ |
| 606 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | 632 | if (flags & MPOL_MF_VALID) |
| 607 | return 0; | 633 | return 0; |
| 608 | return 1; | 634 | return 1; |
| 609 | } | 635 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index ac6f4939bb59..663a5449367a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, | |||
| 248 | pte = swp_entry_to_pte(entry); | 248 | pte = swp_entry_to_pte(entry); |
| 249 | } else if (is_device_public_page(new)) { | 249 | } else if (is_device_public_page(new)) { |
| 250 | pte = pte_mkdevmap(pte); | 250 | pte = pte_mkdevmap(pte); |
| 251 | flush_dcache_page(new); | ||
| 252 | } | 251 | } |
| 253 | } else | 252 | } |
| 254 | flush_dcache_page(new); | ||
| 255 | 253 | ||
| 256 | #ifdef CONFIG_HUGETLB_PAGE | 254 | #ifdef CONFIG_HUGETLB_PAGE |
| 257 | if (PageHuge(new)) { | 255 | if (PageHuge(new)) { |
| @@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
| 995 | */ | 993 | */ |
| 996 | if (!PageMappingFlags(page)) | 994 | if (!PageMappingFlags(page)) |
| 997 | page->mapping = NULL; | 995 | page->mapping = NULL; |
| 996 | |||
| 997 | if (unlikely(is_zone_device_page(newpage))) { | ||
| 998 | if (is_device_public_page(newpage)) | ||
| 999 | flush_dcache_page(newpage); | ||
| 1000 | } else | ||
| 1001 | flush_dcache_page(newpage); | ||
| 1002 | |||
| 998 | } | 1003 | } |
| 999 | out: | 1004 | out: |
| 1000 | return rc; | 1005 | return rc; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 03fcf73d47da..d96ca5bc555b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
| 8233 | 8233 | ||
| 8234 | ret = start_isolate_page_range(pfn_max_align_down(start), | 8234 | ret = start_isolate_page_range(pfn_max_align_down(start), |
| 8235 | pfn_max_align_up(end), migratetype, 0); | 8235 | pfn_max_align_up(end), migratetype, 0); |
| 8236 | if (ret) | 8236 | if (ret < 0) |
| 8237 | return ret; | 8237 | return ret; |
| 8238 | 8238 | ||
| 8239 | /* | 8239 | /* |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index ce323e56b34d..019280712e1b 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
| @@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ | |||
| 59 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. | 59 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
| 60 | * We just check MOVABLE pages. | 60 | * We just check MOVABLE pages. |
| 61 | */ | 61 | */ |
| 62 | if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags)) | 62 | if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, |
| 63 | isol_flags)) | ||
| 63 | ret = 0; | 64 | ret = 0; |
| 64 | 65 | ||
| 65 | /* | 66 | /* |
| @@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) | |||
| 160 | return NULL; | 161 | return NULL; |
| 161 | } | 162 | } |
| 162 | 163 | ||
| 163 | /* | 164 | /** |
| 164 | * start_isolate_page_range() -- make page-allocation-type of range of pages | 165 | * start_isolate_page_range() - make page-allocation-type of range of pages to |
| 165 | * to be MIGRATE_ISOLATE. | 166 | * be MIGRATE_ISOLATE. |
| 166 | * @start_pfn: The lower PFN of the range to be isolated. | 167 | * @start_pfn: The lower PFN of the range to be isolated. |
| 167 | * @end_pfn: The upper PFN of the range to be isolated. | 168 | * @end_pfn: The upper PFN of the range to be isolated. |
| 168 | * @migratetype: migrate type to set in error recovery. | 169 | * start_pfn/end_pfn must be aligned to pageblock_order. |
| 170 | * @migratetype: Migrate type to set in error recovery. | ||
| 171 | * @flags: The following flags are allowed (they can be combined in | ||
| 172 | * a bit mask) | ||
| 173 | * SKIP_HWPOISON - ignore hwpoison pages | ||
| 174 | * REPORT_FAILURE - report details about the failure to | ||
| 175 | * isolate the range | ||
| 169 | * | 176 | * |
| 170 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in | 177 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in |
| 171 | * the range will never be allocated. Any free pages and pages freed in the | 178 | * the range will never be allocated. Any free pages and pages freed in the |
| 172 | * future will not be allocated again. | 179 | * future will not be allocated again. If specified range includes migrate types |
| 173 | * | 180 | * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all |
| 174 | * start_pfn/end_pfn must be aligned to pageblock_order. | 181 | * pages in the range finally, the caller have to free all pages in the range. |
| 175 | * Return 0 on success and -EBUSY if any part of range cannot be isolated. | 182 | * test_page_isolated() can be used for test it. |
| 176 | * | 183 | * |
| 177 | * There is no high level synchronization mechanism that prevents two threads | 184 | * There is no high level synchronization mechanism that prevents two threads |
| 178 | * from trying to isolate overlapping ranges. If this happens, one thread | 185 | * from trying to isolate overlapping ranges. If this happens, one thread |
| 179 | * will notice pageblocks in the overlapping range already set to isolate. | 186 | * will notice pageblocks in the overlapping range already set to isolate. |
| 180 | * This happens in set_migratetype_isolate, and set_migratetype_isolate | 187 | * This happens in set_migratetype_isolate, and set_migratetype_isolate |
| 181 | * returns an error. We then clean up by restoring the migration type on | 188 | * returns an error. We then clean up by restoring the migration type on |
| 182 | * pageblocks we may have modified and return -EBUSY to caller. This | 189 | * pageblocks we may have modified and return -EBUSY to caller. This |
| 183 | * prevents two threads from simultaneously working on overlapping ranges. | 190 | * prevents two threads from simultaneously working on overlapping ranges. |
| 191 | * | ||
| 192 | * Return: the number of isolated pageblocks on success and -EBUSY if any part | ||
| 193 | * of range cannot be isolated. | ||
| 184 | */ | 194 | */ |
| 185 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 195 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 186 | unsigned migratetype, int flags) | 196 | unsigned migratetype, int flags) |
| @@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
| 188 | unsigned long pfn; | 198 | unsigned long pfn; |
| 189 | unsigned long undo_pfn; | 199 | unsigned long undo_pfn; |
| 190 | struct page *page; | 200 | struct page *page; |
| 201 | int nr_isolate_pageblock = 0; | ||
| 191 | 202 | ||
| 192 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | 203 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); |
| 193 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | 204 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); |
| @@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
| 196 | pfn < end_pfn; | 207 | pfn < end_pfn; |
| 197 | pfn += pageblock_nr_pages) { | 208 | pfn += pageblock_nr_pages) { |
| 198 | page = __first_valid_page(pfn, pageblock_nr_pages); | 209 | page = __first_valid_page(pfn, pageblock_nr_pages); |
| 199 | if (page && | 210 | if (page) { |
| 200 | set_migratetype_isolate(page, migratetype, flags)) { | 211 | if (set_migratetype_isolate(page, migratetype, flags)) { |
| 201 | undo_pfn = pfn; | 212 | undo_pfn = pfn; |
| 202 | goto undo; | 213 | goto undo; |
| 214 | } | ||
| 215 | nr_isolate_pageblock++; | ||
| 203 | } | 216 | } |
| 204 | } | 217 | } |
| 205 | return 0; | 218 | return nr_isolate_pageblock; |
| 206 | undo: | 219 | undo: |
| 207 | for (pfn = start_pfn; | 220 | for (pfn = start_pfn; |
| 208 | pfn < undo_pfn; | 221 | pfn < undo_pfn; |
| @@ -2115,6 +2115,8 @@ done: | |||
| 2115 | cachep->allocflags = __GFP_COMP; | 2115 | cachep->allocflags = __GFP_COMP; |
| 2116 | if (flags & SLAB_CACHE_DMA) | 2116 | if (flags & SLAB_CACHE_DMA) |
| 2117 | cachep->allocflags |= GFP_DMA; | 2117 | cachep->allocflags |= GFP_DMA; |
| 2118 | if (flags & SLAB_CACHE_DMA32) | ||
| 2119 | cachep->allocflags |= GFP_DMA32; | ||
| 2118 | if (flags & SLAB_RECLAIM_ACCOUNT) | 2120 | if (flags & SLAB_RECLAIM_ACCOUNT) |
| 2119 | cachep->allocflags |= __GFP_RECLAIMABLE; | 2121 | cachep->allocflags |= __GFP_RECLAIMABLE; |
| 2120 | cachep->size = size; | 2122 | cachep->size = size; |
| @@ -4306,7 +4308,8 @@ static void show_symbol(struct seq_file *m, unsigned long address) | |||
| 4306 | 4308 | ||
| 4307 | static int leaks_show(struct seq_file *m, void *p) | 4309 | static int leaks_show(struct seq_file *m, void *p) |
| 4308 | { | 4310 | { |
| 4309 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); | 4311 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, |
| 4312 | root_caches_node); | ||
| 4310 | struct page *page; | 4313 | struct page *page; |
| 4311 | struct kmem_cache_node *n; | 4314 | struct kmem_cache_node *n; |
| 4312 | const char *name; | 4315 | const char *name; |
| @@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, | |||
| 127 | 127 | ||
| 128 | 128 | ||
| 129 | /* Legal flag mask for kmem_cache_create(), for various configurations */ | 129 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
| 130 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | 130 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
| 131 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ | ||
| 131 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) | 132 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
| 132 | 133 | ||
| 133 | #if defined(CONFIG_DEBUG_SLAB) | 134 | #if defined(CONFIG_DEBUG_SLAB) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 03eeb8b7b4b1..58251ba63e4a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, | |||
| 53 | SLAB_FAILSLAB | SLAB_KASAN) | 53 | SLAB_FAILSLAB | SLAB_KASAN) |
| 54 | 54 | ||
| 55 | #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ | 55 | #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ |
| 56 | SLAB_ACCOUNT) | 56 | SLAB_CACHE_DMA32 | SLAB_ACCOUNT) |
| 57 | 57 | ||
| 58 | /* | 58 | /* |
| 59 | * Merge control. If this is set then no merging of slab caches will occur. | 59 | * Merge control. If this is set then no merging of slab caches will occur. |
| @@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
| 3589 | if (s->flags & SLAB_CACHE_DMA) | 3589 | if (s->flags & SLAB_CACHE_DMA) |
| 3590 | s->allocflags |= GFP_DMA; | 3590 | s->allocflags |= GFP_DMA; |
| 3591 | 3591 | ||
| 3592 | if (s->flags & SLAB_CACHE_DMA32) | ||
| 3593 | s->allocflags |= GFP_DMA32; | ||
| 3594 | |||
| 3592 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | 3595 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
| 3593 | s->allocflags |= __GFP_RECLAIMABLE; | 3596 | s->allocflags |= __GFP_RECLAIMABLE; |
| 3594 | 3597 | ||
| @@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s) | |||
| 5679 | */ | 5682 | */ |
| 5680 | if (s->flags & SLAB_CACHE_DMA) | 5683 | if (s->flags & SLAB_CACHE_DMA) |
| 5681 | *p++ = 'd'; | 5684 | *p++ = 'd'; |
| 5685 | if (s->flags & SLAB_CACHE_DMA32) | ||
| 5686 | *p++ = 'D'; | ||
| 5682 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | 5687 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
| 5683 | *p++ = 'a'; | 5688 | *p++ = 'a'; |
| 5684 | if (s->flags & SLAB_CONSISTENCY_CHECKS) | 5689 | if (s->flags & SLAB_CONSISTENCY_CHECKS) |
diff --git a/mm/sparse.c b/mm/sparse.c index 69904aa6165b..56e057c432f9 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
| @@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |||
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | #ifdef CONFIG_MEMORY_HOTREMOVE | 569 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 570 | /* Mark all memory sections within the pfn range as online */ | 570 | /* Mark all memory sections within the pfn range as offline */ |
| 571 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | 571 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 572 | { | 572 | { |
| 573 | unsigned long pfn; | 573 | unsigned long pfn; |
| @@ -204,7 +204,7 @@ EXPORT_SYMBOL(vmemdup_user); | |||
| 204 | * @s: The string to duplicate | 204 | * @s: The string to duplicate |
| 205 | * @n: Maximum number of bytes to copy, including the trailing NUL. | 205 | * @n: Maximum number of bytes to copy, including the trailing NUL. |
| 206 | * | 206 | * |
| 207 | * Return: newly allocated copy of @s or %NULL in case of error | 207 | * Return: newly allocated copy of @s or an ERR_PTR() in case of error |
| 208 | */ | 208 | */ |
| 209 | char *strndup_user(const char __user *s, long n) | 209 | char *strndup_user(const char __user *s, long n) |
| 210 | { | 210 | { |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 15293c2a5dd8..8d77b6ee4477 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) | |||
| 443 | return rc; | 443 | return rc; |
| 444 | } | 444 | } |
| 445 | 445 | ||
| 446 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) | 446 | static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, |
| 447 | struct scatterlist *sgl, unsigned int sgc) | ||
| 447 | { | 448 | { |
| 448 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; | 449 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; |
| 449 | const struct net_device_ops *ops = real_dev->netdev_ops; | 450 | const struct net_device_ops *ops = real_dev->netdev_ops; |
| 450 | int rc = -EINVAL; | 451 | int rc = 0; |
| 452 | |||
| 453 | if (ops->ndo_fcoe_ddp_target) | ||
| 454 | rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); | ||
| 451 | 455 | ||
| 452 | if (ops->ndo_fcoe_get_wwn) | ||
| 453 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
| 454 | return rc; | 456 | return rc; |
| 455 | } | 457 | } |
| 458 | #endif | ||
| 456 | 459 | ||
| 457 | static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, | 460 | #ifdef NETDEV_FCOE_WWNN |
| 458 | struct scatterlist *sgl, unsigned int sgc) | 461 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) |
| 459 | { | 462 | { |
| 460 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; | 463 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; |
| 461 | const struct net_device_ops *ops = real_dev->netdev_ops; | 464 | const struct net_device_ops *ops = real_dev->netdev_ops; |
| 462 | int rc = 0; | 465 | int rc = -EINVAL; |
| 463 | |||
| 464 | if (ops->ndo_fcoe_ddp_target) | ||
| 465 | rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); | ||
| 466 | 466 | ||
| 467 | if (ops->ndo_fcoe_get_wwn) | ||
| 468 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
| 467 | return rc; | 469 | return rc; |
| 468 | } | 470 | } |
| 469 | #endif | 471 | #endif |
| @@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
| 794 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 796 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
| 795 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 797 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
| 796 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 798 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
| 797 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
| 798 | .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, | 799 | .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, |
| 799 | #endif | 800 | #endif |
| 801 | #ifdef NETDEV_FCOE_WWNN | ||
| 802 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
| 803 | #endif | ||
| 800 | #ifdef CONFIG_NET_POLL_CONTROLLER | 804 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 801 | .ndo_poll_controller = vlan_dev_poll_controller, | 805 | .ndo_poll_controller = vlan_dev_poll_controller, |
| 802 | .ndo_netpoll_setup = vlan_dev_netpoll_setup, | 806 | .ndo_netpoll_setup = vlan_dev_netpoll_setup, |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 49a16cee2aae..420a98bf79b5 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
| @@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = { | |||
| 879 | 879 | ||
| 880 | static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 }; | 880 | static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 }; |
| 881 | 881 | ||
| 882 | void __init aarp_proto_init(void) | 882 | int __init aarp_proto_init(void) |
| 883 | { | 883 | { |
| 884 | int rc; | ||
| 885 | |||
| 884 | aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv); | 886 | aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv); |
| 885 | if (!aarp_dl) | 887 | if (!aarp_dl) { |
| 886 | printk(KERN_CRIT "Unable to register AARP with SNAP.\n"); | 888 | printk(KERN_CRIT "Unable to register AARP with SNAP.\n"); |
| 889 | return -ENOMEM; | ||
| 890 | } | ||
| 887 | timer_setup(&aarp_timer, aarp_expire_timeout, 0); | 891 | timer_setup(&aarp_timer, aarp_expire_timeout, 0); |
| 888 | aarp_timer.expires = jiffies + sysctl_aarp_expiry_time; | 892 | aarp_timer.expires = jiffies + sysctl_aarp_expiry_time; |
| 889 | add_timer(&aarp_timer); | 893 | add_timer(&aarp_timer); |
| 890 | register_netdevice_notifier(&aarp_notifier); | 894 | rc = register_netdevice_notifier(&aarp_notifier); |
| 895 | if (rc) { | ||
| 896 | del_timer_sync(&aarp_timer); | ||
| 897 | unregister_snap_client(aarp_dl); | ||
| 898 | } | ||
| 899 | return rc; | ||
| 891 | } | 900 | } |
| 892 | 901 | ||
| 893 | /* Remove the AARP entries associated with a device. */ | 902 | /* Remove the AARP entries associated with a device. */ |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 795fbc6c06aa..709d2542f729 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
| @@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B }; | |||
| 1904 | EXPORT_SYMBOL(atrtr_get_dev); | 1904 | EXPORT_SYMBOL(atrtr_get_dev); |
| 1905 | EXPORT_SYMBOL(atalk_find_dev_addr); | 1905 | EXPORT_SYMBOL(atalk_find_dev_addr); |
| 1906 | 1906 | ||
| 1907 | static const char atalk_err_snap[] __initconst = | ||
| 1908 | KERN_CRIT "Unable to register DDP with SNAP.\n"; | ||
| 1909 | |||
| 1910 | /* Called by proto.c on kernel start up */ | 1907 | /* Called by proto.c on kernel start up */ |
| 1911 | static int __init atalk_init(void) | 1908 | static int __init atalk_init(void) |
| 1912 | { | 1909 | { |
| @@ -1921,17 +1918,22 @@ static int __init atalk_init(void) | |||
| 1921 | goto out_proto; | 1918 | goto out_proto; |
| 1922 | 1919 | ||
| 1923 | ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv); | 1920 | ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv); |
| 1924 | if (!ddp_dl) | 1921 | if (!ddp_dl) { |
| 1925 | printk(atalk_err_snap); | 1922 | pr_crit("Unable to register DDP with SNAP.\n"); |
| 1923 | goto out_sock; | ||
| 1924 | } | ||
| 1926 | 1925 | ||
| 1927 | dev_add_pack(<alk_packet_type); | 1926 | dev_add_pack(<alk_packet_type); |
| 1928 | dev_add_pack(&ppptalk_packet_type); | 1927 | dev_add_pack(&ppptalk_packet_type); |
| 1929 | 1928 | ||
| 1930 | rc = register_netdevice_notifier(&ddp_notifier); | 1929 | rc = register_netdevice_notifier(&ddp_notifier); |
| 1931 | if (rc) | 1930 | if (rc) |
| 1932 | goto out_sock; | 1931 | goto out_snap; |
| 1932 | |||
| 1933 | rc = aarp_proto_init(); | ||
| 1934 | if (rc) | ||
| 1935 | goto out_dev; | ||
| 1933 | 1936 | ||
| 1934 | aarp_proto_init(); | ||
| 1935 | rc = atalk_proc_init(); | 1937 | rc = atalk_proc_init(); |
| 1936 | if (rc) | 1938 | if (rc) |
| 1937 | goto out_aarp; | 1939 | goto out_aarp; |
| @@ -1945,11 +1947,13 @@ out_proc: | |||
| 1945 | atalk_proc_exit(); | 1947 | atalk_proc_exit(); |
| 1946 | out_aarp: | 1948 | out_aarp: |
| 1947 | aarp_cleanup_module(); | 1949 | aarp_cleanup_module(); |
| 1950 | out_dev: | ||
| 1948 | unregister_netdevice_notifier(&ddp_notifier); | 1951 | unregister_netdevice_notifier(&ddp_notifier); |
| 1949 | out_sock: | 1952 | out_snap: |
| 1950 | dev_remove_pack(&ppptalk_packet_type); | 1953 | dev_remove_pack(&ppptalk_packet_type); |
| 1951 | dev_remove_pack(<alk_packet_type); | 1954 | dev_remove_pack(<alk_packet_type); |
| 1952 | unregister_snap_client(ddp_dl); | 1955 | unregister_snap_client(ddp_dl); |
| 1956 | out_sock: | ||
| 1953 | sock_unregister(PF_APPLETALK); | 1957 | sock_unregister(PF_APPLETALK); |
| 1954 | out_proto: | 1958 | out_proto: |
| 1955 | proto_unregister(&ddp_proto); | 1959 | proto_unregister(&ddp_proto); |
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index a9b7919c9de5..d5df0114f08a 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
| @@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) | |||
| 104 | 104 | ||
| 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); | 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); |
| 106 | 106 | ||
| 107 | /* free the TID stats immediately */ | 107 | if (!ret) { |
| 108 | cfg80211_sinfo_release_content(&sinfo); | 108 | /* free the TID stats immediately */ |
| 109 | cfg80211_sinfo_release_content(&sinfo); | ||
| 110 | } | ||
| 109 | 111 | ||
| 110 | dev_put(real_netdev); | 112 | dev_put(real_netdev); |
| 111 | if (ret == -ENOENT) { | 113 | if (ret == -ENOENT) { |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ef39aabdb694..4fb01108e5f5 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
| @@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, | |||
| 803 | const u8 *mac, const unsigned short vid) | 803 | const u8 *mac, const unsigned short vid) |
| 804 | { | 804 | { |
| 805 | struct batadv_bla_claim search_claim, *claim; | 805 | struct batadv_bla_claim search_claim, *claim; |
| 806 | struct batadv_bla_claim *claim_removed_entry; | ||
| 807 | struct hlist_node *claim_removed_node; | ||
| 806 | 808 | ||
| 807 | ether_addr_copy(search_claim.addr, mac); | 809 | ether_addr_copy(search_claim.addr, mac); |
| 808 | search_claim.vid = vid; | 810 | search_claim.vid = vid; |
| @@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, | |||
| 813 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, | 815 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, |
| 814 | mac, batadv_print_vid(vid)); | 816 | mac, batadv_print_vid(vid)); |
| 815 | 817 | ||
| 816 | batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, | 818 | claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash, |
| 817 | batadv_choose_claim, claim); | 819 | batadv_compare_claim, |
| 818 | batadv_claim_put(claim); /* reference from the hash is gone */ | 820 | batadv_choose_claim, claim); |
| 821 | if (!claim_removed_node) | ||
| 822 | goto free_claim; | ||
| 819 | 823 | ||
| 824 | /* reference from the hash is gone */ | ||
| 825 | claim_removed_entry = hlist_entry(claim_removed_node, | ||
| 826 | struct batadv_bla_claim, hash_entry); | ||
| 827 | batadv_claim_put(claim_removed_entry); | ||
| 828 | |||
| 829 | free_claim: | ||
| 820 | /* don't need the reference from hash_find() anymore */ | 830 | /* don't need the reference from hash_find() anymore */ |
| 821 | batadv_claim_put(claim); | 831 | batadv_claim_put(claim); |
| 822 | } | 832 | } |
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 0b4b3fb778a6..208655cf6717 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c | |||
| @@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, | |||
| 1116 | struct attribute *attr, | 1116 | struct attribute *attr, |
| 1117 | char *buff, size_t count) | 1117 | char *buff, size_t count) |
| 1118 | { | 1118 | { |
| 1119 | struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); | ||
| 1120 | struct net_device *net_dev = batadv_kobj_to_netdev(kobj); | 1119 | struct net_device *net_dev = batadv_kobj_to_netdev(kobj); |
| 1121 | struct batadv_hard_iface *hard_iface; | 1120 | struct batadv_hard_iface *hard_iface; |
| 1121 | struct batadv_priv *bat_priv; | ||
| 1122 | u32 tp_override; | 1122 | u32 tp_override; |
| 1123 | u32 old_tp_override; | 1123 | u32 old_tp_override; |
| 1124 | bool ret; | 1124 | bool ret; |
| @@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, | |||
| 1147 | 1147 | ||
| 1148 | atomic_set(&hard_iface->bat_v.throughput_override, tp_override); | 1148 | atomic_set(&hard_iface->bat_v.throughput_override, tp_override); |
| 1149 | 1149 | ||
| 1150 | batadv_netlink_notify_hardif(bat_priv, hard_iface); | 1150 | if (hard_iface->soft_iface) { |
| 1151 | bat_priv = netdev_priv(hard_iface->soft_iface); | ||
| 1152 | batadv_netlink_notify_hardif(bat_priv, hard_iface); | ||
| 1153 | } | ||
| 1151 | 1154 | ||
| 1152 | out: | 1155 | out: |
| 1153 | batadv_hardif_put(hard_iface); | 1156 | batadv_hardif_put(hard_iface); |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index f73d79139ae7..26c4e2493ddf 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, | |||
| 616 | struct batadv_tt_global_entry *tt_global, | 616 | struct batadv_tt_global_entry *tt_global, |
| 617 | const char *message) | 617 | const char *message) |
| 618 | { | 618 | { |
| 619 | struct batadv_tt_global_entry *tt_removed_entry; | ||
| 620 | struct hlist_node *tt_removed_node; | ||
| 621 | |||
| 619 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 622 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
| 620 | "Deleting global tt entry %pM (vid: %d): %s\n", | 623 | "Deleting global tt entry %pM (vid: %d): %s\n", |
| 621 | tt_global->common.addr, | 624 | tt_global->common.addr, |
| 622 | batadv_print_vid(tt_global->common.vid), message); | 625 | batadv_print_vid(tt_global->common.vid), message); |
| 623 | 626 | ||
| 624 | batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, | 627 | tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash, |
| 625 | batadv_choose_tt, &tt_global->common); | 628 | batadv_compare_tt, |
| 626 | batadv_tt_global_entry_put(tt_global); | 629 | batadv_choose_tt, |
| 630 | &tt_global->common); | ||
| 631 | if (!tt_removed_node) | ||
| 632 | return; | ||
| 633 | |||
| 634 | /* drop reference of remove hash entry */ | ||
| 635 | tt_removed_entry = hlist_entry(tt_removed_node, | ||
| 636 | struct batadv_tt_global_entry, | ||
| 637 | common.hash_entry); | ||
| 638 | batadv_tt_global_entry_put(tt_removed_entry); | ||
| 627 | } | 639 | } |
| 628 | 640 | ||
| 629 | /** | 641 | /** |
| @@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, | |||
| 1337 | unsigned short vid, const char *message, | 1349 | unsigned short vid, const char *message, |
| 1338 | bool roaming) | 1350 | bool roaming) |
| 1339 | { | 1351 | { |
| 1352 | struct batadv_tt_local_entry *tt_removed_entry; | ||
| 1340 | struct batadv_tt_local_entry *tt_local_entry; | 1353 | struct batadv_tt_local_entry *tt_local_entry; |
| 1341 | u16 flags, curr_flags = BATADV_NO_FLAGS; | 1354 | u16 flags, curr_flags = BATADV_NO_FLAGS; |
| 1342 | void *tt_entry_exists; | 1355 | struct hlist_node *tt_removed_node; |
| 1343 | 1356 | ||
| 1344 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); | 1357 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); |
| 1345 | if (!tt_local_entry) | 1358 | if (!tt_local_entry) |
| @@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, | |||
| 1368 | */ | 1381 | */ |
| 1369 | batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); | 1382 | batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); |
| 1370 | 1383 | ||
| 1371 | tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, | 1384 | tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash, |
| 1372 | batadv_compare_tt, | 1385 | batadv_compare_tt, |
| 1373 | batadv_choose_tt, | 1386 | batadv_choose_tt, |
| 1374 | &tt_local_entry->common); | 1387 | &tt_local_entry->common); |
| 1375 | if (!tt_entry_exists) | 1388 | if (!tt_removed_node) |
| 1376 | goto out; | 1389 | goto out; |
| 1377 | 1390 | ||
| 1378 | /* extra call to free the local tt entry */ | 1391 | /* drop reference of remove hash entry */ |
| 1379 | batadv_tt_local_entry_put(tt_local_entry); | 1392 | tt_removed_entry = hlist_entry(tt_removed_node, |
| 1393 | struct batadv_tt_local_entry, | ||
| 1394 | common.hash_entry); | ||
| 1395 | batadv_tt_local_entry_put(tt_removed_entry); | ||
| 1380 | 1396 | ||
| 1381 | out: | 1397 | out: |
| 1382 | if (tt_local_entry) | 1398 | if (tt_local_entry) |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index a0e369179f6d..02da21d771c9 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br, | |||
| 601 | if (ipv4_is_local_multicast(group)) | 601 | if (ipv4_is_local_multicast(group)) |
| 602 | return 0; | 602 | return 0; |
| 603 | 603 | ||
| 604 | memset(&br_group, 0, sizeof(br_group)); | ||
| 604 | br_group.u.ip4 = group; | 605 | br_group.u.ip4 = group; |
| 605 | br_group.proto = htons(ETH_P_IP); | 606 | br_group.proto = htons(ETH_P_IP); |
| 606 | br_group.vid = vid; | 607 | br_group.vid = vid; |
| @@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
| 1497 | 1498 | ||
| 1498 | own_query = port ? &port->ip4_own_query : &br->ip4_own_query; | 1499 | own_query = port ? &port->ip4_own_query : &br->ip4_own_query; |
| 1499 | 1500 | ||
| 1501 | memset(&br_group, 0, sizeof(br_group)); | ||
| 1500 | br_group.u.ip4 = group; | 1502 | br_group.u.ip4 = group; |
| 1501 | br_group.proto = htons(ETH_P_IP); | 1503 | br_group.proto = htons(ETH_P_IP); |
| 1502 | br_group.vid = vid; | 1504 | br_group.vid = vid; |
| @@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
| 1520 | 1522 | ||
| 1521 | own_query = port ? &port->ip6_own_query : &br->ip6_own_query; | 1523 | own_query = port ? &port->ip6_own_query : &br->ip6_own_query; |
| 1522 | 1524 | ||
| 1525 | memset(&br_group, 0, sizeof(br_group)); | ||
| 1523 | br_group.u.ip6 = *group; | 1526 | br_group.u.ip6 = *group; |
| 1524 | br_group.proto = htons(ETH_P_IPV6); | 1527 | br_group.proto = htons(ETH_P_IPV6); |
| 1525 | br_group.vid = vid; | 1528 | br_group.vid = vid; |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 9d34de68571b..22afa566cbce 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
| @@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv, | |||
| 502 | nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; | 502 | nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; |
| 503 | 503 | ||
| 504 | skb->protocol = htons(ETH_P_IP); | 504 | skb->protocol = htons(ETH_P_IP); |
| 505 | skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4; | ||
| 505 | 506 | ||
| 506 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb, | 507 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb, |
| 507 | skb->dev, NULL, | 508 | skb->dev, NULL, |
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index 564710f88f93..e88d6641647b 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c | |||
| @@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv, | |||
| 235 | nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr; | 235 | nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr; |
| 236 | 236 | ||
| 237 | skb->protocol = htons(ETH_P_IPV6); | 237 | skb->protocol = htons(ETH_P_IPV6); |
| 238 | skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); | ||
| 239 | |||
| 238 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb, | 240 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb, |
| 239 | skb->dev, NULL, | 241 | skb->dev, NULL, |
| 240 | br_nf_pre_routing_finish_ipv6); | 242 | br_nf_pre_routing_finish_ipv6); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 7e71b0df1fbc..3083988ce729 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, | |||
| 840 | size_t bytes) | 840 | size_t bytes) |
| 841 | { | 841 | { |
| 842 | struct ceph_bio_iter *it = &cursor->bio_iter; | 842 | struct ceph_bio_iter *it = &cursor->bio_iter; |
| 843 | struct page *page = bio_iter_page(it->bio, it->iter); | ||
| 843 | 844 | ||
| 844 | BUG_ON(bytes > cursor->resid); | 845 | BUG_ON(bytes > cursor->resid); |
| 845 | BUG_ON(bytes > bio_iter_len(it->bio, it->iter)); | 846 | BUG_ON(bytes > bio_iter_len(it->bio, it->iter)); |
| @@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, | |||
| 851 | return false; /* no more data */ | 852 | return false; /* no more data */ |
| 852 | } | 853 | } |
| 853 | 854 | ||
| 854 | if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done)) | 855 | if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done && |
| 856 | page == bio_iter_page(it->bio, it->iter))) | ||
| 855 | return false; /* more bytes to process in this segment */ | 857 | return false; /* more bytes to process in this segment */ |
| 856 | 858 | ||
| 857 | if (!it->iter.bi_size) { | 859 | if (!it->iter.bi_size) { |
| @@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor, | |||
| 899 | size_t bytes) | 901 | size_t bytes) |
| 900 | { | 902 | { |
| 901 | struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs; | 903 | struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs; |
| 904 | struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter); | ||
| 902 | 905 | ||
| 903 | BUG_ON(bytes > cursor->resid); | 906 | BUG_ON(bytes > cursor->resid); |
| 904 | BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter)); | 907 | BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter)); |
| @@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor, | |||
| 910 | return false; /* no more data */ | 913 | return false; /* no more data */ |
| 911 | } | 914 | } |
| 912 | 915 | ||
| 913 | if (!bytes || cursor->bvec_iter.bi_bvec_done) | 916 | if (!bytes || (cursor->bvec_iter.bi_bvec_done && |
| 917 | page == bvec_iter_page(bvecs, cursor->bvec_iter))) | ||
| 914 | return false; /* more bytes to process in this segment */ | 918 | return false; /* more bytes to process in this segment */ |
| 915 | 919 | ||
| 916 | BUG_ON(cursor->last_piece); | 920 | BUG_ON(cursor->last_piece); |
diff --git a/net/core/datagram.c b/net/core/datagram.c index b2651bb6d2a3..e657289db4ac 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
| @@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, | |||
| 279 | break; | 279 | break; |
| 280 | 280 | ||
| 281 | sk_busy_loop(sk, flags & MSG_DONTWAIT); | 281 | sk_busy_loop(sk, flags & MSG_DONTWAIT); |
| 282 | } while (!skb_queue_empty(&sk->sk_receive_queue)); | 282 | } while (sk->sk_receive_queue.prev != *last); |
| 283 | 283 | ||
| 284 | error = -EAGAIN; | 284 | error = -EAGAIN; |
| 285 | 285 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 2b67f2aa59dd..fdcff29df915 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -5014,8 +5014,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head, | |||
| 5014 | if (pt_prev->list_func != NULL) | 5014 | if (pt_prev->list_func != NULL) |
| 5015 | pt_prev->list_func(head, pt_prev, orig_dev); | 5015 | pt_prev->list_func(head, pt_prev, orig_dev); |
| 5016 | else | 5016 | else |
| 5017 | list_for_each_entry_safe(skb, next, head, list) | 5017 | list_for_each_entry_safe(skb, next, head, list) { |
| 5018 | skb_list_del_init(skb); | ||
| 5018 | pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 5019 | pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
| 5020 | } | ||
| 5019 | } | 5021 | } |
| 5020 | 5022 | ||
| 5021 | static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) | 5023 | static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) |
diff --git a/net/core/devlink.c b/net/core/devlink.c index 78e22cea4cc7..da0a29f30885 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
| @@ -3897,6 +3897,11 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg, | |||
| 3897 | continue; | 3897 | continue; |
| 3898 | } | 3898 | } |
| 3899 | 3899 | ||
| 3900 | if (!devlink->ops->info_get) { | ||
| 3901 | idx++; | ||
| 3902 | continue; | ||
| 3903 | } | ||
| 3904 | |||
| 3900 | mutex_lock(&devlink->lock); | 3905 | mutex_lock(&devlink->lock); |
| 3901 | err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, | 3906 | err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, |
| 3902 | NETLINK_CB(cb->skb).portid, | 3907 | NETLINK_CB(cb->skb).portid, |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index b1eb32419732..36ed619faf36 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) | |||
| 1797 | WARN_ON_ONCE(!ret); | 1797 | WARN_ON_ONCE(!ret); |
| 1798 | 1798 | ||
| 1799 | gstrings.len = ret; | 1799 | gstrings.len = ret; |
| 1800 | data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); | ||
| 1801 | if (gstrings.len && !data) | ||
| 1802 | return -ENOMEM; | ||
| 1803 | 1800 | ||
| 1804 | __ethtool_get_strings(dev, gstrings.string_set, data); | 1801 | if (gstrings.len) { |
| 1802 | data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); | ||
| 1803 | if (!data) | ||
| 1804 | return -ENOMEM; | ||
| 1805 | |||
| 1806 | __ethtool_get_strings(dev, gstrings.string_set, data); | ||
| 1807 | } else { | ||
| 1808 | data = NULL; | ||
| 1809 | } | ||
| 1805 | 1810 | ||
| 1806 | ret = -EFAULT; | 1811 | ret = -EFAULT; |
| 1807 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) | 1812 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) |
| @@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | |||
| 1897 | return -EFAULT; | 1902 | return -EFAULT; |
| 1898 | 1903 | ||
| 1899 | stats.n_stats = n_stats; | 1904 | stats.n_stats = n_stats; |
| 1900 | data = vzalloc(array_size(n_stats, sizeof(u64))); | ||
| 1901 | if (n_stats && !data) | ||
| 1902 | return -ENOMEM; | ||
| 1903 | 1905 | ||
| 1904 | ops->get_ethtool_stats(dev, &stats, data); | 1906 | if (n_stats) { |
| 1907 | data = vzalloc(array_size(n_stats, sizeof(u64))); | ||
| 1908 | if (!data) | ||
| 1909 | return -ENOMEM; | ||
| 1910 | ops->get_ethtool_stats(dev, &stats, data); | ||
| 1911 | } else { | ||
| 1912 | data = NULL; | ||
| 1913 | } | ||
| 1905 | 1914 | ||
| 1906 | ret = -EFAULT; | 1915 | ret = -EFAULT; |
| 1907 | if (copy_to_user(useraddr, &stats, sizeof(stats))) | 1916 | if (copy_to_user(useraddr, &stats, sizeof(stats))) |
| @@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) | |||
| 1941 | return -EFAULT; | 1950 | return -EFAULT; |
| 1942 | 1951 | ||
| 1943 | stats.n_stats = n_stats; | 1952 | stats.n_stats = n_stats; |
| 1944 | data = vzalloc(array_size(n_stats, sizeof(u64))); | ||
| 1945 | if (n_stats && !data) | ||
| 1946 | return -ENOMEM; | ||
| 1947 | 1953 | ||
| 1948 | if (dev->phydev && !ops->get_ethtool_phy_stats) { | 1954 | if (n_stats) { |
| 1949 | ret = phy_ethtool_get_stats(dev->phydev, &stats, data); | 1955 | data = vzalloc(array_size(n_stats, sizeof(u64))); |
| 1950 | if (ret < 0) | 1956 | if (!data) |
| 1951 | return ret; | 1957 | return -ENOMEM; |
| 1958 | |||
| 1959 | if (dev->phydev && !ops->get_ethtool_phy_stats) { | ||
| 1960 | ret = phy_ethtool_get_stats(dev->phydev, &stats, data); | ||
| 1961 | if (ret < 0) | ||
| 1962 | goto out; | ||
| 1963 | } else { | ||
| 1964 | ops->get_ethtool_phy_stats(dev, &stats, data); | ||
| 1965 | } | ||
| 1952 | } else { | 1966 | } else { |
| 1953 | ops->get_ethtool_phy_stats(dev, &stats, data); | 1967 | data = NULL; |
| 1954 | } | 1968 | } |
| 1955 | 1969 | ||
| 1956 | ret = -EFAULT; | 1970 | ret = -EFAULT; |
diff --git a/net/core/filter.c b/net/core/filter.c index f274620945ff..fc92ebc4e200 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -1796,8 +1796,6 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = { | |||
| 1796 | 1796 | ||
| 1797 | BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) | 1797 | BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) |
| 1798 | { | 1798 | { |
| 1799 | sk = sk_to_full_sk(sk); | ||
| 1800 | |||
| 1801 | return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; | 1799 | return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; |
| 1802 | } | 1800 | } |
| 1803 | 1801 | ||
| @@ -5266,7 +5264,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = { | |||
| 5266 | .func = bpf_sk_release, | 5264 | .func = bpf_sk_release, |
| 5267 | .gpl_only = false, | 5265 | .gpl_only = false, |
| 5268 | .ret_type = RET_INTEGER, | 5266 | .ret_type = RET_INTEGER, |
| 5269 | .arg1_type = ARG_PTR_TO_SOCKET, | 5267 | .arg1_type = ARG_PTR_TO_SOCK_COMMON, |
| 5270 | }; | 5268 | }; |
| 5271 | 5269 | ||
| 5272 | BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, | 5270 | BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, |
| @@ -5407,8 +5405,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |||
| 5407 | 5405 | ||
| 5408 | BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) | 5406 | BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) |
| 5409 | { | 5407 | { |
| 5410 | sk = sk_to_full_sk(sk); | ||
| 5411 | |||
| 5412 | if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) | 5408 | if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) |
| 5413 | return (unsigned long)sk; | 5409 | return (unsigned long)sk; |
| 5414 | 5410 | ||
| @@ -5422,6 +5418,23 @@ static const struct bpf_func_proto bpf_tcp_sock_proto = { | |||
| 5422 | .arg1_type = ARG_PTR_TO_SOCK_COMMON, | 5418 | .arg1_type = ARG_PTR_TO_SOCK_COMMON, |
| 5423 | }; | 5419 | }; |
| 5424 | 5420 | ||
| 5421 | BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk) | ||
| 5422 | { | ||
| 5423 | sk = sk_to_full_sk(sk); | ||
| 5424 | |||
| 5425 | if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) | ||
| 5426 | return (unsigned long)sk; | ||
| 5427 | |||
| 5428 | return (unsigned long)NULL; | ||
| 5429 | } | ||
| 5430 | |||
| 5431 | static const struct bpf_func_proto bpf_get_listener_sock_proto = { | ||
| 5432 | .func = bpf_get_listener_sock, | ||
| 5433 | .gpl_only = false, | ||
| 5434 | .ret_type = RET_PTR_TO_SOCKET_OR_NULL, | ||
| 5435 | .arg1_type = ARG_PTR_TO_SOCK_COMMON, | ||
| 5436 | }; | ||
| 5437 | |||
| 5425 | BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) | 5438 | BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) |
| 5426 | { | 5439 | { |
| 5427 | unsigned int iphdr_len; | 5440 | unsigned int iphdr_len; |
| @@ -5607,6 +5620,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |||
| 5607 | #ifdef CONFIG_INET | 5620 | #ifdef CONFIG_INET |
| 5608 | case BPF_FUNC_tcp_sock: | 5621 | case BPF_FUNC_tcp_sock: |
| 5609 | return &bpf_tcp_sock_proto; | 5622 | return &bpf_tcp_sock_proto; |
| 5623 | case BPF_FUNC_get_listener_sock: | ||
| 5624 | return &bpf_get_listener_sock_proto; | ||
| 5610 | case BPF_FUNC_skb_ecn_set_ce: | 5625 | case BPF_FUNC_skb_ecn_set_ce: |
| 5611 | return &bpf_skb_ecn_set_ce_proto; | 5626 | return &bpf_skb_ecn_set_ce_proto; |
| 5612 | #endif | 5627 | #endif |
| @@ -5702,6 +5717,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |||
| 5702 | return &bpf_sk_release_proto; | 5717 | return &bpf_sk_release_proto; |
| 5703 | case BPF_FUNC_tcp_sock: | 5718 | case BPF_FUNC_tcp_sock: |
| 5704 | return &bpf_tcp_sock_proto; | 5719 | return &bpf_tcp_sock_proto; |
| 5720 | case BPF_FUNC_get_listener_sock: | ||
| 5721 | return &bpf_get_listener_sock_proto; | ||
| 5705 | #endif | 5722 | #endif |
| 5706 | default: | 5723 | default: |
| 5707 | return bpf_base_func_proto(func_id); | 5724 | return bpf_base_func_proto(func_id); |
| @@ -6596,14 +6613,8 @@ static bool flow_dissector_is_valid_access(int off, int size, | |||
| 6596 | const struct bpf_prog *prog, | 6613 | const struct bpf_prog *prog, |
| 6597 | struct bpf_insn_access_aux *info) | 6614 | struct bpf_insn_access_aux *info) |
| 6598 | { | 6615 | { |
| 6599 | if (type == BPF_WRITE) { | 6616 | if (type == BPF_WRITE) |
| 6600 | switch (off) { | 6617 | return false; |
| 6601 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): | ||
| 6602 | break; | ||
| 6603 | default: | ||
| 6604 | return false; | ||
| 6605 | } | ||
| 6606 | } | ||
| 6607 | 6618 | ||
| 6608 | switch (off) { | 6619 | switch (off) { |
| 6609 | case bpf_ctx_range(struct __sk_buff, data): | 6620 | case bpf_ctx_range(struct __sk_buff, data): |
| @@ -6615,11 +6626,7 @@ static bool flow_dissector_is_valid_access(int off, int size, | |||
| 6615 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): | 6626 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 6616 | info->reg_type = PTR_TO_FLOW_KEYS; | 6627 | info->reg_type = PTR_TO_FLOW_KEYS; |
| 6617 | break; | 6628 | break; |
| 6618 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 6629 | default: |
| 6619 | case bpf_ctx_range(struct __sk_buff, data_meta): | ||
| 6620 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | ||
| 6621 | case bpf_ctx_range(struct __sk_buff, tstamp): | ||
| 6622 | case bpf_ctx_range(struct __sk_buff, wire_len): | ||
| 6623 | return false; | 6630 | return false; |
| 6624 | } | 6631 | } |
| 6625 | 6632 | ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index bb1a54747d64..94a450b2191a 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog, | |||
| 707 | /* Pass parameters to the BPF program */ | 707 | /* Pass parameters to the BPF program */ |
| 708 | memset(flow_keys, 0, sizeof(*flow_keys)); | 708 | memset(flow_keys, 0, sizeof(*flow_keys)); |
| 709 | cb->qdisc_cb.flow_keys = flow_keys; | 709 | cb->qdisc_cb.flow_keys = flow_keys; |
| 710 | flow_keys->n_proto = skb->protocol; | ||
| 710 | flow_keys->nhoff = skb_network_offset(skb); | 711 | flow_keys->nhoff = skb_network_offset(skb); |
| 711 | flow_keys->thoff = flow_keys->nhoff; | 712 | flow_keys->thoff = flow_keys->nhoff; |
| 712 | 713 | ||
| @@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog, | |||
| 716 | /* Restore state */ | 717 | /* Restore state */ |
| 717 | memcpy(cb, &cb_saved, sizeof(cb_saved)); | 718 | memcpy(cb, &cb_saved, sizeof(cb_saved)); |
| 718 | 719 | ||
| 719 | flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len); | 720 | flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, |
| 721 | skb_network_offset(skb), skb->len); | ||
| 720 | flow_keys->thoff = clamp_t(u16, flow_keys->thoff, | 722 | flow_keys->thoff = clamp_t(u16, flow_keys->thoff, |
| 721 | flow_keys->nhoff, skb->len); | 723 | flow_keys->nhoff, skb->len); |
| 722 | 724 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4ff661f6f989..f8f94303a1f5 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -928,6 +928,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) | |||
| 928 | if (error) | 928 | if (error) |
| 929 | return error; | 929 | return error; |
| 930 | 930 | ||
| 931 | dev_hold(queue->dev); | ||
| 932 | |||
| 931 | if (dev->sysfs_rx_queue_group) { | 933 | if (dev->sysfs_rx_queue_group) { |
| 932 | error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); | 934 | error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); |
| 933 | if (error) { | 935 | if (error) { |
| @@ -937,7 +939,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) | |||
| 937 | } | 939 | } |
| 938 | 940 | ||
| 939 | kobject_uevent(kobj, KOBJ_ADD); | 941 | kobject_uevent(kobj, KOBJ_ADD); |
| 940 | dev_hold(queue->dev); | ||
| 941 | 942 | ||
| 942 | return error; | 943 | return error; |
| 943 | } | 944 | } |
| @@ -1464,6 +1465,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) | |||
| 1464 | if (error) | 1465 | if (error) |
| 1465 | return error; | 1466 | return error; |
| 1466 | 1467 | ||
| 1468 | dev_hold(queue->dev); | ||
| 1469 | |||
| 1467 | #ifdef CONFIG_BQL | 1470 | #ifdef CONFIG_BQL |
| 1468 | error = sysfs_create_group(kobj, &dql_group); | 1471 | error = sysfs_create_group(kobj, &dql_group); |
| 1469 | if (error) { | 1472 | if (error) { |
| @@ -1473,7 +1476,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) | |||
| 1473 | #endif | 1476 | #endif |
| 1474 | 1477 | ||
| 1475 | kobject_uevent(kobj, KOBJ_ADD); | 1478 | kobject_uevent(kobj, KOBJ_ADD); |
| 1476 | dev_hold(queue->dev); | ||
| 1477 | 1479 | ||
| 1478 | return 0; | 1480 | return 0; |
| 1479 | } | 1481 | } |
| @@ -1745,16 +1747,20 @@ int netdev_register_kobject(struct net_device *ndev) | |||
| 1745 | 1747 | ||
| 1746 | error = device_add(dev); | 1748 | error = device_add(dev); |
| 1747 | if (error) | 1749 | if (error) |
| 1748 | return error; | 1750 | goto error_put_device; |
| 1749 | 1751 | ||
| 1750 | error = register_queue_kobjects(ndev); | 1752 | error = register_queue_kobjects(ndev); |
| 1751 | if (error) { | 1753 | if (error) |
| 1752 | device_del(dev); | 1754 | goto error_device_del; |
| 1753 | return error; | ||
| 1754 | } | ||
| 1755 | 1755 | ||
| 1756 | pm_runtime_set_memalloc_noio(dev, true); | 1756 | pm_runtime_set_memalloc_noio(dev, true); |
| 1757 | 1757 | ||
| 1758 | return 0; | ||
| 1759 | |||
| 1760 | error_device_del: | ||
| 1761 | device_del(dev); | ||
| 1762 | error_put_device: | ||
| 1763 | put_device(dev); | ||
| 1758 | return error; | 1764 | return error; |
| 1759 | } | 1765 | } |
| 1760 | 1766 | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 17f36317363d..7e6dcc625701 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
| @@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) | |||
| 304 | 304 | ||
| 305 | refcount_set(&net->count, 1); | 305 | refcount_set(&net->count, 1); |
| 306 | refcount_set(&net->passive, 1); | 306 | refcount_set(&net->passive, 1); |
| 307 | get_random_bytes(&net->hash_mix, sizeof(u32)); | ||
| 307 | net->dev_base_seq = 1; | 308 | net->dev_base_seq = 1; |
| 308 | net->user_ns = user_ns; | 309 | net->user_ns = user_ns; |
| 309 | idr_init(&net->netns_ids); | 310 | idr_init(&net->netns_ids); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2415d9cb9b89..ef2cd5712098 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) | |||
| 3801 | unsigned int delta_truesize; | 3801 | unsigned int delta_truesize; |
| 3802 | struct sk_buff *lp; | 3802 | struct sk_buff *lp; |
| 3803 | 3803 | ||
| 3804 | if (unlikely(p->len + len >= 65536)) | 3804 | if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) |
| 3805 | return -E2BIG; | 3805 | return -E2BIG; |
| 3806 | 3806 | ||
| 3807 | lp = NAPI_GRO_CB(p)->last; | 3807 | lp = NAPI_GRO_CB(p)->last; |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index f227f002c73d..db87d9f58019 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
| @@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local, | |||
| 738 | if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) | 738 | if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) |
| 739 | return -ENOMEM; | 739 | return -ENOMEM; |
| 740 | 740 | ||
| 741 | return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval); | 741 | if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) { |
| 742 | kfree(fval.sp.vec); | ||
| 743 | return -ENOMEM; | ||
| 744 | } | ||
| 745 | |||
| 746 | return 0; | ||
| 742 | } | 747 | } |
| 743 | 748 | ||
| 744 | /** | 749 | /** |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index d5740bad5b18..57d84e9b7b6f 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
| @@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
| 436 | newnp->ipv6_mc_list = NULL; | 436 | newnp->ipv6_mc_list = NULL; |
| 437 | newnp->ipv6_ac_list = NULL; | 437 | newnp->ipv6_ac_list = NULL; |
| 438 | newnp->ipv6_fl_list = NULL; | 438 | newnp->ipv6_fl_list = NULL; |
| 439 | newnp->mcast_oif = inet6_iif(skb); | 439 | newnp->mcast_oif = inet_iif(skb); |
| 440 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | 440 | newnp->mcast_hops = ip_hdr(skb)->ttl; |
| 441 | 441 | ||
| 442 | /* | 442 | /* |
| 443 | * No need to charge this sock to the relevant IPv6 refcnt debug socks count | 443 | * No need to charge this sock to the relevant IPv6 refcnt debug socks count |
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index ed4f6dc26365..85c22ada4744 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c | |||
| @@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 98 | return skb; | 98 | return skb; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto, | ||
| 102 | int *offset) | ||
| 103 | { | ||
| 104 | *offset = QCA_HDR_LEN; | ||
| 105 | *proto = ((__be16 *)skb->data)[0]; | ||
| 106 | |||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 101 | const struct dsa_device_ops qca_netdev_ops = { | 110 | const struct dsa_device_ops qca_netdev_ops = { |
| 102 | .xmit = qca_tag_xmit, | 111 | .xmit = qca_tag_xmit, |
| 103 | .rcv = qca_tag_rcv, | 112 | .rcv = qca_tag_rcv, |
| 113 | .flow_dissect = qca_tag_flow_dissect, | ||
| 104 | .overhead = QCA_HDR_LEN, | 114 | .overhead = QCA_HDR_LEN, |
| 105 | }; | 115 | }; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index fd219f7bd3ea..4b0526441476 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 259 | struct net *net = dev_net(skb->dev); | 259 | struct net *net = dev_net(skb->dev); |
| 260 | struct metadata_dst *tun_dst = NULL; | 260 | struct metadata_dst *tun_dst = NULL; |
| 261 | struct erspan_base_hdr *ershdr; | 261 | struct erspan_base_hdr *ershdr; |
| 262 | struct erspan_metadata *pkt_md; | ||
| 263 | struct ip_tunnel_net *itn; | 262 | struct ip_tunnel_net *itn; |
| 264 | struct ip_tunnel *tunnel; | 263 | struct ip_tunnel *tunnel; |
| 265 | const struct iphdr *iph; | 264 | const struct iphdr *iph; |
| @@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 282 | if (unlikely(!pskb_may_pull(skb, len))) | 281 | if (unlikely(!pskb_may_pull(skb, len))) |
| 283 | return PACKET_REJECT; | 282 | return PACKET_REJECT; |
| 284 | 283 | ||
| 285 | ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); | ||
| 286 | pkt_md = (struct erspan_metadata *)(ershdr + 1); | ||
| 287 | |||
| 288 | if (__iptunnel_pull_header(skb, | 284 | if (__iptunnel_pull_header(skb, |
| 289 | len, | 285 | len, |
| 290 | htons(ETH_P_TEB), | 286 | htons(ETH_P_TEB), |
| @@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 292 | goto drop; | 288 | goto drop; |
| 293 | 289 | ||
| 294 | if (tunnel->collect_md) { | 290 | if (tunnel->collect_md) { |
| 291 | struct erspan_metadata *pkt_md, *md; | ||
| 295 | struct ip_tunnel_info *info; | 292 | struct ip_tunnel_info *info; |
| 296 | struct erspan_metadata *md; | 293 | unsigned char *gh; |
| 297 | __be64 tun_id; | 294 | __be64 tun_id; |
| 298 | __be16 flags; | 295 | __be16 flags; |
| 299 | 296 | ||
| @@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 306 | if (!tun_dst) | 303 | if (!tun_dst) |
| 307 | return PACKET_REJECT; | 304 | return PACKET_REJECT; |
| 308 | 305 | ||
| 306 | /* skb can be uncloned in __iptunnel_pull_header, so | ||
| 307 | * old pkt_md is no longer valid and we need to reset | ||
| 308 | * it | ||
| 309 | */ | ||
| 310 | gh = skb_network_header(skb) + | ||
| 311 | skb_network_header_len(skb); | ||
| 312 | pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + | ||
| 313 | sizeof(*ershdr)); | ||
| 309 | md = ip_tunnel_info_opts(&tun_dst->u.tun_info); | 314 | md = ip_tunnel_info_opts(&tun_dst->u.tun_info); |
| 310 | md->version = ver; | 315 | md->version = ver; |
| 311 | md2 = &md->u.md2; | 316 | md2 = &md->u.md2; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index ecce2dc78f17..1132d6d1796a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb) | |||
| 257 | ip_local_deliver_finish); | 257 | ip_local_deliver_finish); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static inline bool ip_rcv_options(struct sk_buff *skb) | 260 | static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev) |
| 261 | { | 261 | { |
| 262 | struct ip_options *opt; | 262 | struct ip_options *opt; |
| 263 | const struct iphdr *iph; | 263 | const struct iphdr *iph; |
| 264 | struct net_device *dev = skb->dev; | ||
| 265 | 264 | ||
| 266 | /* It looks as overkill, because not all | 265 | /* It looks as overkill, because not all |
| 267 | IP options require packet mangling. | 266 | IP options require packet mangling. |
| @@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb) | |||
| 297 | } | 296 | } |
| 298 | } | 297 | } |
| 299 | 298 | ||
| 300 | if (ip_options_rcv_srr(skb)) | 299 | if (ip_options_rcv_srr(skb, dev)) |
| 301 | goto drop; | 300 | goto drop; |
| 302 | } | 301 | } |
| 303 | 302 | ||
| @@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, | |||
| 353 | } | 352 | } |
| 354 | #endif | 353 | #endif |
| 355 | 354 | ||
| 356 | if (iph->ihl > 5 && ip_rcv_options(skb)) | 355 | if (iph->ihl > 5 && ip_rcv_options(skb, dev)) |
| 357 | goto drop; | 356 | goto drop; |
| 358 | 357 | ||
| 359 | rt = skb_rtable(skb); | 358 | rt = skb_rtable(skb); |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 32a35043c9f5..3db31bb9df50 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
| @@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb) | |||
| 612 | } | 612 | } |
| 613 | } | 613 | } |
| 614 | 614 | ||
| 615 | int ip_options_rcv_srr(struct sk_buff *skb) | 615 | int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) |
| 616 | { | 616 | { |
| 617 | struct ip_options *opt = &(IPCB(skb)->opt); | 617 | struct ip_options *opt = &(IPCB(skb)->opt); |
| 618 | int srrspace, srrptr; | 618 | int srrspace, srrptr; |
| @@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
| 647 | 647 | ||
| 648 | orefdst = skb->_skb_refdst; | 648 | orefdst = skb->_skb_refdst; |
| 649 | skb_dst_set(skb, NULL); | 649 | skb_dst_set(skb, NULL); |
| 650 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); | 650 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev); |
| 651 | rt2 = skb_rtable(skb); | 651 | rt2 = skb_rtable(skb); |
| 652 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { | 652 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { |
| 653 | skb_dst_drop(skb); | 653 | skb_dst_drop(skb); |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index cd4814f7e962..359da68d7c06 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
| @@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA; | |||
| 67 | module_param(dctcp_alpha_on_init, uint, 0644); | 67 | module_param(dctcp_alpha_on_init, uint, 0644); |
| 68 | MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); | 68 | MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); |
| 69 | 69 | ||
| 70 | static unsigned int dctcp_clamp_alpha_on_loss __read_mostly; | ||
| 71 | module_param(dctcp_clamp_alpha_on_loss, uint, 0644); | ||
| 72 | MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss, | ||
| 73 | "parameter for clamping alpha on loss"); | ||
| 74 | |||
| 75 | static struct tcp_congestion_ops dctcp_reno; | 70 | static struct tcp_congestion_ops dctcp_reno; |
| 76 | 71 | ||
| 77 | static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) | 72 | static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) |
| @@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags) | |||
| 164 | } | 159 | } |
| 165 | } | 160 | } |
| 166 | 161 | ||
| 167 | static void dctcp_state(struct sock *sk, u8 new_state) | 162 | static void dctcp_react_to_loss(struct sock *sk) |
| 168 | { | 163 | { |
| 169 | if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) { | 164 | struct dctcp *ca = inet_csk_ca(sk); |
| 170 | struct dctcp *ca = inet_csk_ca(sk); | 165 | struct tcp_sock *tp = tcp_sk(sk); |
| 171 | 166 | ||
| 172 | /* If this extension is enabled, we clamp dctcp_alpha to | 167 | ca->loss_cwnd = tp->snd_cwnd; |
| 173 | * max on packet loss; the motivation is that dctcp_alpha | 168 | tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); |
| 174 | * is an indicator to the extend of congestion and packet | 169 | } |
| 175 | * loss is an indicator of extreme congestion; setting | 170 | |
| 176 | * this in practice turned out to be beneficial, and | 171 | static void dctcp_state(struct sock *sk, u8 new_state) |
| 177 | * effectively assumes total congestion which reduces the | 172 | { |
| 178 | * window by half. | 173 | if (new_state == TCP_CA_Recovery && |
| 179 | */ | 174 | new_state != inet_csk(sk)->icsk_ca_state) |
| 180 | ca->dctcp_alpha = DCTCP_MAX_ALPHA; | 175 | dctcp_react_to_loss(sk); |
| 181 | } | 176 | /* We handle RTO in dctcp_cwnd_event to ensure that we perform only |
| 177 | * one loss-adjustment per RTT. | ||
| 178 | */ | ||
| 182 | } | 179 | } |
| 183 | 180 | ||
| 184 | static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | 181 | static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) |
| @@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | |||
| 190 | case CA_EVENT_ECN_NO_CE: | 187 | case CA_EVENT_ECN_NO_CE: |
| 191 | dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); | 188 | dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); |
| 192 | break; | 189 | break; |
| 190 | case CA_EVENT_LOSS: | ||
| 191 | dctcp_react_to_loss(sk); | ||
| 192 | break; | ||
| 193 | default: | 193 | default: |
| 194 | /* Don't care for the rest. */ | 194 | /* Don't care for the rest. */ |
| 195 | break; | 195 | break; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 277d71239d75..2f8039a26b08 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net) | |||
| 2578 | { | 2578 | { |
| 2579 | int cpu; | 2579 | int cpu; |
| 2580 | 2580 | ||
| 2581 | module_put(net->ipv4.tcp_congestion_control->owner); | 2581 | if (net->ipv4.tcp_congestion_control) |
| 2582 | module_put(net->ipv4.tcp_congestion_control->owner); | ||
| 2582 | 2583 | ||
| 2583 | for_each_possible_cpu(cpu) | 2584 | for_each_possible_cpu(cpu) |
| 2584 | inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); | 2585 | inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); |
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 79d2e43c05c5..5fc1f4e0c0cf 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c | |||
| @@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info) | |||
| 417 | 417 | ||
| 418 | done: | 418 | done: |
| 419 | rhashtable_walk_stop(&iter); | 419 | rhashtable_walk_stop(&iter); |
| 420 | rhashtable_walk_exit(&iter); | ||
| 420 | return ret; | 421 | return ret; |
| 421 | } | 422 | } |
| 422 | 423 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index b32c95f02128..655e46b227f9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -525,10 +525,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) | |||
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | static int ip6erspan_rcv(struct sk_buff *skb, | 527 | static int ip6erspan_rcv(struct sk_buff *skb, |
| 528 | struct tnl_ptk_info *tpi) | 528 | struct tnl_ptk_info *tpi, |
| 529 | int gre_hdr_len) | ||
| 529 | { | 530 | { |
| 530 | struct erspan_base_hdr *ershdr; | 531 | struct erspan_base_hdr *ershdr; |
| 531 | struct erspan_metadata *pkt_md; | ||
| 532 | const struct ipv6hdr *ipv6h; | 532 | const struct ipv6hdr *ipv6h; |
| 533 | struct erspan_md2 *md2; | 533 | struct erspan_md2 *md2; |
| 534 | struct ip6_tnl *tunnel; | 534 | struct ip6_tnl *tunnel; |
| @@ -547,18 +547,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, | |||
| 547 | if (unlikely(!pskb_may_pull(skb, len))) | 547 | if (unlikely(!pskb_may_pull(skb, len))) |
| 548 | return PACKET_REJECT; | 548 | return PACKET_REJECT; |
| 549 | 549 | ||
| 550 | ershdr = (struct erspan_base_hdr *)skb->data; | ||
| 551 | pkt_md = (struct erspan_metadata *)(ershdr + 1); | ||
| 552 | |||
| 553 | if (__iptunnel_pull_header(skb, len, | 550 | if (__iptunnel_pull_header(skb, len, |
| 554 | htons(ETH_P_TEB), | 551 | htons(ETH_P_TEB), |
| 555 | false, false) < 0) | 552 | false, false) < 0) |
| 556 | return PACKET_REJECT; | 553 | return PACKET_REJECT; |
| 557 | 554 | ||
| 558 | if (tunnel->parms.collect_md) { | 555 | if (tunnel->parms.collect_md) { |
| 556 | struct erspan_metadata *pkt_md, *md; | ||
| 559 | struct metadata_dst *tun_dst; | 557 | struct metadata_dst *tun_dst; |
| 560 | struct ip_tunnel_info *info; | 558 | struct ip_tunnel_info *info; |
| 561 | struct erspan_metadata *md; | 559 | unsigned char *gh; |
| 562 | __be64 tun_id; | 560 | __be64 tun_id; |
| 563 | __be16 flags; | 561 | __be16 flags; |
| 564 | 562 | ||
| @@ -571,6 +569,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, | |||
| 571 | if (!tun_dst) | 569 | if (!tun_dst) |
| 572 | return PACKET_REJECT; | 570 | return PACKET_REJECT; |
| 573 | 571 | ||
| 572 | /* skb can be uncloned in __iptunnel_pull_header, so | ||
| 573 | * old pkt_md is no longer valid and we need to reset | ||
| 574 | * it | ||
| 575 | */ | ||
| 576 | gh = skb_network_header(skb) + | ||
| 577 | skb_network_header_len(skb); | ||
| 578 | pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + | ||
| 579 | sizeof(*ershdr)); | ||
| 574 | info = &tun_dst->u.tun_info; | 580 | info = &tun_dst->u.tun_info; |
| 575 | md = ip_tunnel_info_opts(info); | 581 | md = ip_tunnel_info_opts(info); |
| 576 | md->version = ver; | 582 | md->version = ver; |
| @@ -607,7 +613,7 @@ static int gre_rcv(struct sk_buff *skb) | |||
| 607 | 613 | ||
| 608 | if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || | 614 | if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || |
| 609 | tpi.proto == htons(ETH_P_ERSPAN2))) { | 615 | tpi.proto == htons(ETH_P_ERSPAN2))) { |
| 610 | if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD) | 616 | if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) |
| 611 | return 0; | 617 | return 0; |
| 612 | goto out; | 618 | goto out; |
| 613 | } | 619 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index edbd12067170..e51f3c648b09 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 601 | inet6_sk(skb->sk) : NULL; | 601 | inet6_sk(skb->sk) : NULL; |
| 602 | struct ipv6hdr *tmp_hdr; | 602 | struct ipv6hdr *tmp_hdr; |
| 603 | struct frag_hdr *fh; | 603 | struct frag_hdr *fh; |
| 604 | unsigned int mtu, hlen, left, len; | 604 | unsigned int mtu, hlen, left, len, nexthdr_offset; |
| 605 | int hroom, troom; | 605 | int hroom, troom; |
| 606 | __be32 frag_id; | 606 | __be32 frag_id; |
| 607 | int ptr, offset = 0, err = 0; | 607 | int ptr, offset = 0, err = 0; |
| @@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 612 | goto fail; | 612 | goto fail; |
| 613 | hlen = err; | 613 | hlen = err; |
| 614 | nexthdr = *prevhdr; | 614 | nexthdr = *prevhdr; |
| 615 | nexthdr_offset = prevhdr - skb_network_header(skb); | ||
| 615 | 616 | ||
| 616 | mtu = ip6_skb_dst_mtu(skb); | 617 | mtu = ip6_skb_dst_mtu(skb); |
| 617 | 618 | ||
| @@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 646 | (err = skb_checksum_help(skb))) | 647 | (err = skb_checksum_help(skb))) |
| 647 | goto fail; | 648 | goto fail; |
| 648 | 649 | ||
| 650 | prevhdr = skb_network_header(skb) + nexthdr_offset; | ||
| 649 | hroom = LL_RESERVED_SPACE(rt->dst.dev); | 651 | hroom = LL_RESERVED_SPACE(rt->dst.dev); |
| 650 | if (skb_has_frag_list(skb)) { | 652 | if (skb_has_frag_list(skb)) { |
| 651 | unsigned int first_len = skb_pagelen(skb); | 653 | unsigned int first_len = skb_pagelen(skb); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 0c6403cf8b52..ade1390c6348 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 627 | rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, | 627 | rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, |
| 628 | eiph->daddr, eiph->saddr, 0, 0, | 628 | eiph->daddr, eiph->saddr, 0, 0, |
| 629 | IPPROTO_IPIP, RT_TOS(eiph->tos), 0); | 629 | IPPROTO_IPIP, RT_TOS(eiph->tos), 0); |
| 630 | if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { | 630 | if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) { |
| 631 | if (!IS_ERR(rt)) | 631 | if (!IS_ERR(rt)) |
| 632 | ip_rt_put(rt); | 632 | ip_rt_put(rt); |
| 633 | goto out; | 633 | goto out; |
| @@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 636 | } else { | 636 | } else { |
| 637 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, | 637 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, |
| 638 | skb2->dev) || | 638 | skb2->dev) || |
| 639 | skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) | 639 | skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6) |
| 640 | goto out; | 640 | goto out; |
| 641 | } | 641 | } |
| 642 | 642 | ||
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c index 1059894a6f4c..4cb83fb69844 100644 --- a/net/ipv6/netfilter/ip6t_srh.c +++ b/net/ipv6/netfilter/ip6t_srh.c | |||
| @@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 210 | psidoff = srhoff + sizeof(struct ipv6_sr_hdr) + | 210 | psidoff = srhoff + sizeof(struct ipv6_sr_hdr) + |
| 211 | ((srh->segments_left + 1) * sizeof(struct in6_addr)); | 211 | ((srh->segments_left + 1) * sizeof(struct in6_addr)); |
| 212 | psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid); | 212 | psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid); |
| 213 | if (!psid) | ||
| 214 | return false; | ||
| 213 | if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID, | 215 | if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID, |
| 214 | ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk, | 216 | ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk, |
| 215 | &srhinfo->psid_addr))) | 217 | &srhinfo->psid_addr))) |
| @@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 223 | nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) + | 225 | nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) + |
| 224 | ((srh->segments_left - 1) * sizeof(struct in6_addr)); | 226 | ((srh->segments_left - 1) * sizeof(struct in6_addr)); |
| 225 | nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid); | 227 | nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid); |
| 228 | if (!nsid) | ||
| 229 | return false; | ||
| 226 | if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID, | 230 | if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID, |
| 227 | ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk, | 231 | ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk, |
| 228 | &srhinfo->nsid_addr))) | 232 | &srhinfo->nsid_addr))) |
| @@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 233 | if (srhinfo->mt_flags & IP6T_SRH_LSID) { | 237 | if (srhinfo->mt_flags & IP6T_SRH_LSID) { |
| 234 | lsidoff = srhoff + sizeof(struct ipv6_sr_hdr); | 238 | lsidoff = srhoff + sizeof(struct ipv6_sr_hdr); |
| 235 | lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid); | 239 | lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid); |
| 240 | if (!lsid) | ||
| 241 | return false; | ||
| 236 | if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID, | 242 | if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID, |
| 237 | ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk, | 243 | ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk, |
| 238 | &srhinfo->lsid_addr))) | 244 | &srhinfo->lsid_addr))) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4ef4bbdb49d4..0302e0eb07af 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1040,14 +1040,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt) | |||
| 1040 | struct rt6_info *nrt; | 1040 | struct rt6_info *nrt; |
| 1041 | 1041 | ||
| 1042 | if (!fib6_info_hold_safe(rt)) | 1042 | if (!fib6_info_hold_safe(rt)) |
| 1043 | return NULL; | 1043 | goto fallback; |
| 1044 | 1044 | ||
| 1045 | nrt = ip6_dst_alloc(dev_net(dev), dev, flags); | 1045 | nrt = ip6_dst_alloc(dev_net(dev), dev, flags); |
| 1046 | if (nrt) | 1046 | if (!nrt) { |
| 1047 | ip6_rt_copy_init(nrt, rt); | ||
| 1048 | else | ||
| 1049 | fib6_info_release(rt); | 1047 | fib6_info_release(rt); |
| 1048 | goto fallback; | ||
| 1049 | } | ||
| 1050 | 1050 | ||
| 1051 | ip6_rt_copy_init(nrt, rt); | ||
| 1052 | return nrt; | ||
| 1053 | |||
| 1054 | fallback: | ||
| 1055 | nrt = dev_net(dev)->ipv6.ip6_null_entry; | ||
| 1056 | dst_hold(&nrt->dst); | ||
| 1051 | return nrt; | 1057 | return nrt; |
| 1052 | } | 1058 | } |
| 1053 | 1059 | ||
| @@ -1096,10 +1102,6 @@ restart: | |||
| 1096 | dst_hold(&rt->dst); | 1102 | dst_hold(&rt->dst); |
| 1097 | } else { | 1103 | } else { |
| 1098 | rt = ip6_create_rt_rcu(f6i); | 1104 | rt = ip6_create_rt_rcu(f6i); |
| 1099 | if (!rt) { | ||
| 1100 | rt = net->ipv6.ip6_null_entry; | ||
| 1101 | dst_hold(&rt->dst); | ||
| 1102 | } | ||
| 1103 | } | 1105 | } |
| 1104 | 1106 | ||
| 1105 | rcu_read_unlock(); | 1107 | rcu_read_unlock(); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 07e21a82ce4c..b2109b74857d 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
| 669 | !net_eq(tunnel->net, dev_net(tunnel->dev)))) | 669 | !net_eq(tunnel->net, dev_net(tunnel->dev)))) |
| 670 | goto out; | 670 | goto out; |
| 671 | 671 | ||
| 672 | /* skb can be uncloned in iptunnel_pull_header, so | ||
| 673 | * old iph is no longer valid | ||
| 674 | */ | ||
| 675 | iph = (const struct iphdr *)skb_mac_header(skb); | ||
| 672 | err = IP_ECN_decapsulate(iph, skb); | 676 | err = IP_ECN_decapsulate(iph, skb); |
| 673 | if (unlikely(err)) { | 677 | if (unlikely(err)) { |
| 674 | if (log_ecn_error) | 678 | if (log_ecn_error) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 57ef69a10889..44d431849d39 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
| 1110 | newnp->ipv6_fl_list = NULL; | 1110 | newnp->ipv6_fl_list = NULL; |
| 1111 | newnp->pktoptions = NULL; | 1111 | newnp->pktoptions = NULL; |
| 1112 | newnp->opt = NULL; | 1112 | newnp->opt = NULL; |
| 1113 | newnp->mcast_oif = tcp_v6_iif(skb); | 1113 | newnp->mcast_oif = inet_iif(skb); |
| 1114 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | 1114 | newnp->mcast_hops = ip_hdr(skb)->ttl; |
| 1115 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); | 1115 | newnp->rcv_flowinfo = 0; |
| 1116 | if (np->repflow) | 1116 | if (np->repflow) |
| 1117 | newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); | 1117 | newnp->flow_label = 0; |
| 1118 | 1118 | ||
| 1119 | /* | 1119 | /* |
| 1120 | * No need to charge this sock to the relevant IPv6 refcnt debug socks count | 1120 | * No need to charge this sock to the relevant IPv6 refcnt debug socks count |
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index c5c5ab6c5a1c..44fdc641710d 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c | |||
| @@ -2054,14 +2054,14 @@ static int __init kcm_init(void) | |||
| 2054 | if (err) | 2054 | if (err) |
| 2055 | goto fail; | 2055 | goto fail; |
| 2056 | 2056 | ||
| 2057 | err = sock_register(&kcm_family_ops); | ||
| 2058 | if (err) | ||
| 2059 | goto sock_register_fail; | ||
| 2060 | |||
| 2061 | err = register_pernet_device(&kcm_net_ops); | 2057 | err = register_pernet_device(&kcm_net_ops); |
| 2062 | if (err) | 2058 | if (err) |
| 2063 | goto net_ops_fail; | 2059 | goto net_ops_fail; |
| 2064 | 2060 | ||
| 2061 | err = sock_register(&kcm_family_ops); | ||
| 2062 | if (err) | ||
| 2063 | goto sock_register_fail; | ||
| 2064 | |||
| 2065 | err = kcm_proc_init(); | 2065 | err = kcm_proc_init(); |
| 2066 | if (err) | 2066 | if (err) |
| 2067 | goto proc_init_fail; | 2067 | goto proc_init_fail; |
| @@ -2069,12 +2069,12 @@ static int __init kcm_init(void) | |||
| 2069 | return 0; | 2069 | return 0; |
| 2070 | 2070 | ||
| 2071 | proc_init_fail: | 2071 | proc_init_fail: |
| 2072 | unregister_pernet_device(&kcm_net_ops); | ||
| 2073 | |||
| 2074 | net_ops_fail: | ||
| 2075 | sock_unregister(PF_KCM); | 2072 | sock_unregister(PF_KCM); |
| 2076 | 2073 | ||
| 2077 | sock_register_fail: | 2074 | sock_register_fail: |
| 2075 | unregister_pernet_device(&kcm_net_ops); | ||
| 2076 | |||
| 2077 | net_ops_fail: | ||
| 2078 | proto_unregister(&kcm_proto); | 2078 | proto_unregister(&kcm_proto); |
| 2079 | 2079 | ||
| 2080 | fail: | 2080 | fail: |
| @@ -2090,8 +2090,8 @@ fail: | |||
| 2090 | static void __exit kcm_exit(void) | 2090 | static void __exit kcm_exit(void) |
| 2091 | { | 2091 | { |
| 2092 | kcm_proc_exit(); | 2092 | kcm_proc_exit(); |
| 2093 | unregister_pernet_device(&kcm_net_ops); | ||
| 2094 | sock_unregister(PF_KCM); | 2093 | sock_unregister(PF_KCM); |
| 2094 | unregister_pernet_device(&kcm_net_ops); | ||
| 2095 | proto_unregister(&kcm_proto); | 2095 | proto_unregister(&kcm_proto); |
| 2096 | destroy_workqueue(kcm_wq); | 2096 | destroy_workqueue(kcm_wq); |
| 2097 | 2097 | ||
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index dda8930f20e7..f3a8557494d6 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c | |||
| @@ -140,9 +140,15 @@ static int mpls_xmit(struct sk_buff *skb) | |||
| 140 | if (rt) | 140 | if (rt) |
| 141 | err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway, | 141 | err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway, |
| 142 | skb); | 142 | skb); |
| 143 | else if (rt6) | 143 | else if (rt6) { |
| 144 | err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway, | 144 | if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { |
| 145 | skb); | 145 | /* 6PE (RFC 4798) */ |
| 146 | err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3], | ||
| 147 | skb); | ||
| 148 | } else | ||
| 149 | err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway, | ||
| 150 | skb); | ||
| 151 | } | ||
| 146 | if (err) | 152 | if (err) |
| 147 | net_dbg_ratelimited("%s: packet transmission failed: %d\n", | 153 | net_dbg_ratelimited("%s: packet transmission failed: %d\n", |
| 148 | __func__, err); | 154 | __func__, err); |
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 5d782445d2fc..bad17bba8ba7 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c | |||
| @@ -251,6 +251,10 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb, | |||
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST); | 253 | attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST); |
| 254 | if (!attr) { | ||
| 255 | rc = -EMSGSIZE; | ||
| 256 | goto err; | ||
| 257 | } | ||
| 254 | rc = ncsi_write_package_info(skb, ndp, package->id); | 258 | rc = ncsi_write_package_info(skb, ndp, package->id); |
| 255 | if (rc) { | 259 | if (rc) { |
| 256 | nla_nest_cancel(skb, attr); | 260 | nla_nest_cancel(skb, attr); |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index d43ffb09939b..6548271209a0 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -1007,6 +1007,7 @@ config NETFILTER_XT_TARGET_TEE | |||
| 1007 | depends on NETFILTER_ADVANCED | 1007 | depends on NETFILTER_ADVANCED |
| 1008 | depends on IPV6 || IPV6=n | 1008 | depends on IPV6 || IPV6=n |
| 1009 | depends on !NF_CONNTRACK || NF_CONNTRACK | 1009 | depends on !NF_CONNTRACK || NF_CONNTRACK |
| 1010 | depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES | ||
| 1010 | select NF_DUP_IPV4 | 1011 | select NF_DUP_IPV4 |
| 1011 | select NF_DUP_IPV6 if IP6_NF_IPTABLES | 1012 | select NF_DUP_IPV6 if IP6_NF_IPTABLES |
| 1012 | ---help--- | 1013 | ---help--- |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index f067c6b50857..39fcc1ed18f3 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -20,9 +20,9 @@ | |||
| 20 | #include <linux/udp.h> | 20 | #include <linux/udp.h> |
| 21 | #include <linux/tcp.h> | 21 | #include <linux/tcp.h> |
| 22 | #include <linux/netfilter.h> | 22 | #include <linux/netfilter.h> |
| 23 | #include <linux/netfilter_ipv4.h> | ||
| 24 | #include <linux/netfilter_ipv6.h> | ||
| 23 | 25 | ||
| 24 | #include <net/route.h> | ||
| 25 | #include <net/ip6_route.h> | ||
| 26 | #include <net/netfilter/nf_conntrack.h> | 26 | #include <net/netfilter/nf_conntrack.h> |
| 27 | #include <net/netfilter/nf_conntrack_core.h> | 27 | #include <net/netfilter/nf_conntrack_core.h> |
| 28 | #include <net/netfilter/nf_conntrack_expect.h> | 28 | #include <net/netfilter/nf_conntrack_expect.h> |
| @@ -871,38 +871,33 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, | |||
| 871 | } else if (sip_external_media) { | 871 | } else if (sip_external_media) { |
| 872 | struct net_device *dev = skb_dst(skb)->dev; | 872 | struct net_device *dev = skb_dst(skb)->dev; |
| 873 | struct net *net = dev_net(dev); | 873 | struct net *net = dev_net(dev); |
| 874 | struct rtable *rt; | 874 | struct flowi fl; |
| 875 | struct flowi4 fl4 = {}; | ||
| 876 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 877 | struct flowi6 fl6 = {}; | ||
| 878 | #endif | ||
| 879 | struct dst_entry *dst = NULL; | 875 | struct dst_entry *dst = NULL; |
| 880 | 876 | ||
| 877 | memset(&fl, 0, sizeof(fl)); | ||
| 878 | |||
| 881 | switch (nf_ct_l3num(ct)) { | 879 | switch (nf_ct_l3num(ct)) { |
| 882 | case NFPROTO_IPV4: | 880 | case NFPROTO_IPV4: |
| 883 | fl4.daddr = daddr->ip; | 881 | fl.u.ip4.daddr = daddr->ip; |
| 884 | rt = ip_route_output_key(net, &fl4); | 882 | nf_ip_route(net, &dst, &fl, false); |
| 885 | if (!IS_ERR(rt)) | ||
| 886 | dst = &rt->dst; | ||
| 887 | break; | 883 | break; |
| 888 | 884 | ||
| 889 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 890 | case NFPROTO_IPV6: | 885 | case NFPROTO_IPV6: |
| 891 | fl6.daddr = daddr->in6; | 886 | fl.u.ip6.daddr = daddr->in6; |
| 892 | dst = ip6_route_output(net, NULL, &fl6); | 887 | nf_ip6_route(net, &dst, &fl, false); |
| 893 | if (dst->error) { | ||
| 894 | dst_release(dst); | ||
| 895 | dst = NULL; | ||
| 896 | } | ||
| 897 | break; | 888 | break; |
| 898 | #endif | ||
| 899 | } | 889 | } |
| 900 | 890 | ||
| 901 | /* Don't predict any conntracks when media endpoint is reachable | 891 | /* Don't predict any conntracks when media endpoint is reachable |
| 902 | * through the same interface as the signalling peer. | 892 | * through the same interface as the signalling peer. |
| 903 | */ | 893 | */ |
| 904 | if (dst && dst->dev == dev) | 894 | if (dst) { |
| 905 | return NF_ACCEPT; | 895 | bool external_media = (dst->dev == dev); |
| 896 | |||
| 897 | dst_release(dst); | ||
| 898 | if (external_media) | ||
| 899 | return NF_ACCEPT; | ||
| 900 | } | ||
| 906 | } | 901 | } |
| 907 | 902 | ||
| 908 | /* We need to check whether the registration exists before attempting | 903 | /* We need to check whether the registration exists before attempting |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 513f93118604..ef7772e976cc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2806,8 +2806,11 @@ err2: | |||
| 2806 | nf_tables_rule_release(&ctx, rule); | 2806 | nf_tables_rule_release(&ctx, rule); |
| 2807 | err1: | 2807 | err1: |
| 2808 | for (i = 0; i < n; i++) { | 2808 | for (i = 0; i < n; i++) { |
| 2809 | if (info[i].ops != NULL) | 2809 | if (info[i].ops) { |
| 2810 | module_put(info[i].ops->type->owner); | 2810 | module_put(info[i].ops->type->owner); |
| 2811 | if (info[i].ops->type->release_ops) | ||
| 2812 | info[i].ops->type->release_ops(info[i].ops); | ||
| 2813 | } | ||
| 2811 | } | 2814 | } |
| 2812 | kvfree(info); | 2815 | kvfree(info); |
| 2813 | return err; | 2816 | return err; |
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 457a9ceb46af..8dfa798ea683 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c | |||
| @@ -65,21 +65,34 @@ nla_put_failure: | |||
| 65 | return -1; | 65 | return -1; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static void nft_objref_destroy(const struct nft_ctx *ctx, | 68 | static void nft_objref_deactivate(const struct nft_ctx *ctx, |
| 69 | const struct nft_expr *expr) | 69 | const struct nft_expr *expr, |
| 70 | enum nft_trans_phase phase) | ||
| 70 | { | 71 | { |
| 71 | struct nft_object *obj = nft_objref_priv(expr); | 72 | struct nft_object *obj = nft_objref_priv(expr); |
| 72 | 73 | ||
| 74 | if (phase == NFT_TRANS_COMMIT) | ||
| 75 | return; | ||
| 76 | |||
| 73 | obj->use--; | 77 | obj->use--; |
| 74 | } | 78 | } |
| 75 | 79 | ||
| 80 | static void nft_objref_activate(const struct nft_ctx *ctx, | ||
| 81 | const struct nft_expr *expr) | ||
| 82 | { | ||
| 83 | struct nft_object *obj = nft_objref_priv(expr); | ||
| 84 | |||
| 85 | obj->use++; | ||
| 86 | } | ||
| 87 | |||
| 76 | static struct nft_expr_type nft_objref_type; | 88 | static struct nft_expr_type nft_objref_type; |
| 77 | static const struct nft_expr_ops nft_objref_ops = { | 89 | static const struct nft_expr_ops nft_objref_ops = { |
| 78 | .type = &nft_objref_type, | 90 | .type = &nft_objref_type, |
| 79 | .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)), | 91 | .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)), |
| 80 | .eval = nft_objref_eval, | 92 | .eval = nft_objref_eval, |
| 81 | .init = nft_objref_init, | 93 | .init = nft_objref_init, |
| 82 | .destroy = nft_objref_destroy, | 94 | .activate = nft_objref_activate, |
| 95 | .deactivate = nft_objref_deactivate, | ||
| 83 | .dump = nft_objref_dump, | 96 | .dump = nft_objref_dump, |
| 84 | }; | 97 | }; |
| 85 | 98 | ||
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index f8092926f704..a340cd8a751b 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c | |||
| @@ -233,5 +233,5 @@ module_exit(nft_redir_module_exit); | |||
| 233 | 233 | ||
| 234 | MODULE_LICENSE("GPL"); | 234 | MODULE_LICENSE("GPL"); |
| 235 | MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); | 235 | MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); |
| 236 | MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir"); | 236 | MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir"); |
| 237 | MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir"); | 237 | MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir"); |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index fa61208371f8..321a0036fdf5 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
| @@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net, | |||
| 308 | else if (d > 0) | 308 | else if (d > 0) |
| 309 | parent = parent->rb_right; | 309 | parent = parent->rb_right; |
| 310 | else { | 310 | else { |
| 311 | if (!nft_set_elem_active(&rbe->ext, genmask)) { | ||
| 312 | parent = parent->rb_left; | ||
| 313 | continue; | ||
| 314 | } | ||
| 315 | if (nft_rbtree_interval_end(rbe) && | 311 | if (nft_rbtree_interval_end(rbe) && |
| 316 | !nft_rbtree_interval_end(this)) { | 312 | !nft_rbtree_interval_end(this)) { |
| 317 | parent = parent->rb_left; | 313 | parent = parent->rb_left; |
| @@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net, | |||
| 320 | nft_rbtree_interval_end(this)) { | 316 | nft_rbtree_interval_end(this)) { |
| 321 | parent = parent->rb_right; | 317 | parent = parent->rb_right; |
| 322 | continue; | 318 | continue; |
| 319 | } else if (!nft_set_elem_active(&rbe->ext, genmask)) { | ||
| 320 | parent = parent->rb_left; | ||
| 321 | continue; | ||
| 323 | } | 322 | } |
| 324 | nft_rbtree_flush(net, set, rbe); | 323 | nft_rbtree_flush(net, set, rbe); |
| 325 | return rbe; | 324 | return rbe; |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 25eeb6d2a75a..f0ec068e1d02 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
| @@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family) | |||
| 366 | start, end + 1, GFP_KERNEL); | 366 | start, end + 1, GFP_KERNEL); |
| 367 | if (family->id < 0) { | 367 | if (family->id < 0) { |
| 368 | err = family->id; | 368 | err = family->id; |
| 369 | goto errout_locked; | 369 | goto errout_free; |
| 370 | } | 370 | } |
| 371 | 371 | ||
| 372 | err = genl_validate_assign_mc_groups(family); | 372 | err = genl_validate_assign_mc_groups(family); |
| @@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family) | |||
| 385 | 385 | ||
| 386 | errout_remove: | 386 | errout_remove: |
| 387 | idr_remove(&genl_fam_idr, family->id); | 387 | idr_remove(&genl_fam_idr, family->id); |
| 388 | errout_free: | ||
| 388 | kfree(family->attrbuf); | 389 | kfree(family->attrbuf); |
| 389 | errout_locked: | 390 | errout_locked: |
| 390 | genl_unlock_all(); | 391 | genl_unlock_all(); |
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index ae296273ce3d..17dcd0b5eb32 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c | |||
| @@ -726,6 +726,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, | |||
| 726 | llcp_sock->service_name = kmemdup(addr->service_name, | 726 | llcp_sock->service_name = kmemdup(addr->service_name, |
| 727 | llcp_sock->service_name_len, | 727 | llcp_sock->service_name_len, |
| 728 | GFP_KERNEL); | 728 | GFP_KERNEL); |
| 729 | if (!llcp_sock->service_name) { | ||
| 730 | ret = -ENOMEM; | ||
| 731 | goto sock_llcp_release; | ||
| 732 | } | ||
| 729 | 733 | ||
| 730 | nfc_llcp_sock_link(&local->connecting_sockets, sk); | 734 | nfc_llcp_sock_link(&local->connecting_sockets, sk); |
| 731 | 735 | ||
| @@ -745,10 +749,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, | |||
| 745 | return ret; | 749 | return ret; |
| 746 | 750 | ||
| 747 | sock_unlink: | 751 | sock_unlink: |
| 748 | nfc_llcp_put_ssap(local, llcp_sock->ssap); | ||
| 749 | |||
| 750 | nfc_llcp_sock_unlink(&local->connecting_sockets, sk); | 752 | nfc_llcp_sock_unlink(&local->connecting_sockets, sk); |
| 751 | 753 | ||
| 754 | sock_llcp_release: | ||
| 755 | nfc_llcp_put_ssap(local, llcp_sock->ssap); | ||
| 756 | |||
| 752 | put_dev: | 757 | put_dev: |
| 753 | nfc_put_device(dev); | 758 | nfc_put_device(dev); |
| 754 | 759 | ||
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c index ddfc52ac1f9b..c0d323b58e73 100644 --- a/net/nfc/nci/hci.c +++ b/net/nfc/nci/hci.c | |||
| @@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, | |||
| 312 | create_info = (struct nci_hci_create_pipe_resp *)skb->data; | 312 | create_info = (struct nci_hci_create_pipe_resp *)skb->data; |
| 313 | dest_gate = create_info->dest_gate; | 313 | dest_gate = create_info->dest_gate; |
| 314 | new_pipe = create_info->pipe; | 314 | new_pipe = create_info->pipe; |
| 315 | if (new_pipe >= NCI_HCI_MAX_PIPES) { | ||
| 316 | status = NCI_HCI_ANY_E_NOK; | ||
| 317 | goto exit; | ||
| 318 | } | ||
| 315 | 319 | ||
| 316 | /* Save the new created pipe and bind with local gate, | 320 | /* Save the new created pipe and bind with local gate, |
| 317 | * the description for skb->data[3] is destination gate id | 321 | * the description for skb->data[3] is destination gate id |
| @@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, | |||
| 336 | goto exit; | 340 | goto exit; |
| 337 | } | 341 | } |
| 338 | delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; | 342 | delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; |
| 343 | if (delete_info->pipe >= NCI_HCI_MAX_PIPES) { | ||
| 344 | status = NCI_HCI_ANY_E_NOK; | ||
| 345 | goto exit; | ||
| 346 | } | ||
| 339 | 347 | ||
| 340 | ndev->hci_dev->pipes[delete_info->pipe].gate = | 348 | ndev->hci_dev->pipes[delete_info->pipe].gate = |
| 341 | NCI_HCI_INVALID_GATE; | 349 | NCI_HCI_INVALID_GATE; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6679e96ab1dc..9dd158ab51b3 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -448,6 +448,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
| 448 | 448 | ||
| 449 | upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, | 449 | upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, |
| 450 | 0, upcall_info->cmd); | 450 | 0, upcall_info->cmd); |
| 451 | if (!upcall) { | ||
| 452 | err = -EINVAL; | ||
| 453 | goto out; | ||
| 454 | } | ||
| 451 | upcall->dp_ifindex = dp_ifindex; | 455 | upcall->dp_ifindex = dp_ifindex; |
| 452 | 456 | ||
| 453 | err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb); | 457 | err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb); |
| @@ -460,6 +464,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
| 460 | 464 | ||
| 461 | if (upcall_info->egress_tun_info) { | 465 | if (upcall_info->egress_tun_info) { |
| 462 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); | 466 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); |
| 467 | if (!nla) { | ||
| 468 | err = -EMSGSIZE; | ||
| 469 | goto out; | ||
| 470 | } | ||
| 463 | err = ovs_nla_put_tunnel_info(user_skb, | 471 | err = ovs_nla_put_tunnel_info(user_skb, |
| 464 | upcall_info->egress_tun_info); | 472 | upcall_info->egress_tun_info); |
| 465 | BUG_ON(err); | 473 | BUG_ON(err); |
| @@ -468,6 +476,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
| 468 | 476 | ||
| 469 | if (upcall_info->actions_len) { | 477 | if (upcall_info->actions_len) { |
| 470 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); | 478 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); |
| 479 | if (!nla) { | ||
| 480 | err = -EMSGSIZE; | ||
| 481 | goto out; | ||
| 482 | } | ||
| 471 | err = ovs_nla_put_actions(upcall_info->actions, | 483 | err = ovs_nla_put_actions(upcall_info->actions, |
| 472 | upcall_info->actions_len, | 484 | upcall_info->actions_len, |
| 473 | user_skb); | 485 | user_skb); |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 691da853bef5..4bdf5e3ac208 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | |||
| 2306 | 2306 | ||
| 2307 | struct sw_flow_actions *acts; | 2307 | struct sw_flow_actions *acts; |
| 2308 | int new_acts_size; | 2308 | int new_acts_size; |
| 2309 | int req_size = NLA_ALIGN(attr_len); | 2309 | size_t req_size = NLA_ALIGN(attr_len); |
| 2310 | int next_offset = offsetof(struct sw_flow_actions, actions) + | 2310 | int next_offset = offsetof(struct sw_flow_actions, actions) + |
| 2311 | (*sfa)->actions_len; | 2311 | (*sfa)->actions_len; |
| 2312 | 2312 | ||
| 2313 | if (req_size <= (ksize(*sfa) - next_offset)) | 2313 | if (req_size <= (ksize(*sfa) - next_offset)) |
| 2314 | goto out; | 2314 | goto out; |
| 2315 | 2315 | ||
| 2316 | new_acts_size = ksize(*sfa) * 2; | 2316 | new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); |
| 2317 | 2317 | ||
| 2318 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | 2318 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { |
| 2319 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { | 2319 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8376bc1c1508..9419c5cf4de5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -1852,7 +1852,8 @@ oom: | |||
| 1852 | 1852 | ||
| 1853 | static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) | 1853 | static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) |
| 1854 | { | 1854 | { |
| 1855 | if (!skb->protocol && sock->type == SOCK_RAW) { | 1855 | if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && |
| 1856 | sock->type == SOCK_RAW) { | ||
| 1856 | skb_reset_mac_header(skb); | 1857 | skb_reset_mac_header(skb); |
| 1857 | skb->protocol = dev_parse_header_protocol(skb); | 1858 | skb->protocol = dev_parse_header_protocol(skb); |
| 1858 | } | 1859 | } |
| @@ -3243,7 +3244,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, | |||
| 3243 | } | 3244 | } |
| 3244 | 3245 | ||
| 3245 | mutex_lock(&net->packet.sklist_lock); | 3246 | mutex_lock(&net->packet.sklist_lock); |
| 3246 | sk_add_node_rcu(sk, &net->packet.sklist); | 3247 | sk_add_node_tail_rcu(sk, &net->packet.sklist); |
| 3247 | mutex_unlock(&net->packet.sklist_lock); | 3248 | mutex_unlock(&net->packet.sklist_lock); |
| 3248 | 3249 | ||
| 3249 | preempt_disable(); | 3250 | preempt_disable(); |
| @@ -4209,7 +4210,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) | |||
| 4209 | struct pgv *pg_vec; | 4210 | struct pgv *pg_vec; |
| 4210 | int i; | 4211 | int i; |
| 4211 | 4212 | ||
| 4212 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); | 4213 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); |
| 4213 | if (unlikely(!pg_vec)) | 4214 | if (unlikely(!pg_vec)) |
| 4214 | goto out; | 4215 | goto out; |
| 4215 | 4216 | ||
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index fd2694174607..faf726e00e27 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
| @@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net) | |||
| 608 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { | 608 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { |
| 609 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); | 609 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); |
| 610 | 610 | ||
| 611 | if (net != c_net || !tc->t_sock) | 611 | if (net != c_net) |
| 612 | continue; | 612 | continue; |
| 613 | if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { | 613 | if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { |
| 614 | list_move_tail(&tc->t_tcp_node, &tmp_list); | 614 | list_move_tail(&tc->t_tcp_node, &tmp_list); |
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index 7ca57741b2fb..7849f286bb93 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c | |||
| @@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype) | |||
| 105 | struct sk_buff *skb; | 105 | struct sk_buff *skb; |
| 106 | unsigned char *dptr; | 106 | unsigned char *dptr; |
| 107 | unsigned char lci1, lci2; | 107 | unsigned char lci1, lci2; |
| 108 | char buffer[100]; | 108 | int maxfaclen = 0; |
| 109 | int len, faclen = 0; | 109 | int len, faclen; |
| 110 | int reserve; | ||
| 110 | 111 | ||
| 111 | len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1; | 112 | reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1; |
| 113 | len = ROSE_MIN_LEN; | ||
| 112 | 114 | ||
| 113 | switch (frametype) { | 115 | switch (frametype) { |
| 114 | case ROSE_CALL_REQUEST: | 116 | case ROSE_CALL_REQUEST: |
| 115 | len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; | 117 | len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; |
| 116 | faclen = rose_create_facilities(buffer, rose); | 118 | maxfaclen = 256; |
| 117 | len += faclen; | ||
| 118 | break; | 119 | break; |
| 119 | case ROSE_CALL_ACCEPTED: | 120 | case ROSE_CALL_ACCEPTED: |
| 120 | case ROSE_CLEAR_REQUEST: | 121 | case ROSE_CLEAR_REQUEST: |
| @@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype) | |||
| 123 | break; | 124 | break; |
| 124 | } | 125 | } |
| 125 | 126 | ||
| 126 | if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) | 127 | skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC); |
| 128 | if (!skb) | ||
| 127 | return; | 129 | return; |
| 128 | 130 | ||
| 129 | /* | 131 | /* |
| 130 | * Space for AX.25 header and PID. | 132 | * Space for AX.25 header and PID. |
| 131 | */ | 133 | */ |
| 132 | skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1); | 134 | skb_reserve(skb, reserve); |
| 133 | 135 | ||
| 134 | dptr = skb_put(skb, skb_tailroom(skb)); | 136 | dptr = skb_put(skb, len); |
| 135 | 137 | ||
| 136 | lci1 = (rose->lci >> 8) & 0x0F; | 138 | lci1 = (rose->lci >> 8) & 0x0F; |
| 137 | lci2 = (rose->lci >> 0) & 0xFF; | 139 | lci2 = (rose->lci >> 0) & 0xFF; |
| @@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype) | |||
| 146 | dptr += ROSE_ADDR_LEN; | 148 | dptr += ROSE_ADDR_LEN; |
| 147 | memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); | 149 | memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); |
| 148 | dptr += ROSE_ADDR_LEN; | 150 | dptr += ROSE_ADDR_LEN; |
| 149 | memcpy(dptr, buffer, faclen); | 151 | faclen = rose_create_facilities(dptr, rose); |
| 152 | skb_put(skb, faclen); | ||
| 150 | dptr += faclen; | 153 | dptr += faclen; |
| 151 | break; | 154 | break; |
| 152 | 155 | ||
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 736aa9281100..004c762c2e8d 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
| @@ -335,7 +335,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 335 | struct kvec iov[2]; | 335 | struct kvec iov[2]; |
| 336 | rxrpc_serial_t serial; | 336 | rxrpc_serial_t serial; |
| 337 | size_t len; | 337 | size_t len; |
| 338 | bool lost = false; | ||
| 339 | int ret, opt; | 338 | int ret, opt; |
| 340 | 339 | ||
| 341 | _enter(",{%d}", skb->len); | 340 | _enter(",{%d}", skb->len); |
| @@ -393,14 +392,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 393 | static int lose; | 392 | static int lose; |
| 394 | if ((lose++ & 7) == 7) { | 393 | if ((lose++ & 7) == 7) { |
| 395 | ret = 0; | 394 | ret = 0; |
| 396 | lost = true; | 395 | trace_rxrpc_tx_data(call, sp->hdr.seq, serial, |
| 396 | whdr.flags, retrans, true); | ||
| 397 | goto done; | ||
| 397 | } | 398 | } |
| 398 | } | 399 | } |
| 399 | 400 | ||
| 400 | trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, | 401 | trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans, |
| 401 | retrans, lost); | 402 | false); |
| 402 | if (lost) | ||
| 403 | goto done; | ||
| 404 | 403 | ||
| 405 | /* send the packet with the don't fragment bit set if we currently | 404 | /* send the packet with the don't fragment bit set if we currently |
| 406 | * think it's small enough */ | 405 | * think it's small enough */ |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 1b9afdee5ba9..5c02ad97ef23 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
| @@ -358,8 +358,7 @@ config NET_SCH_PIE | |||
| 358 | help | 358 | help |
| 359 | Say Y here if you want to use the Proportional Integral controller | 359 | Say Y here if you want to use the Proportional Integral controller |
| 360 | Enhanced scheduler packet scheduling algorithm. | 360 | Enhanced scheduler packet scheduling algorithm. |
| 361 | For more information, please see | 361 | For more information, please see https://tools.ietf.org/html/rfc8033 |
| 362 | http://tools.ietf.org/html/draft-pan-tsvwg-pie-00 | ||
| 363 | 362 | ||
| 364 | To compile this driver as a module, choose M here: the module | 363 | To compile this driver as a module, choose M here: the module |
| 365 | will be called sch_pie. | 364 | will be called sch_pie. |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index aecf1bf233c8..5a87e271d35a 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -28,27 +28,10 @@ | |||
| 28 | #include <net/act_api.h> | 28 | #include <net/act_api.h> |
| 29 | #include <net/netlink.h> | 29 | #include <net/netlink.h> |
| 30 | 30 | ||
| 31 | static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) | ||
| 32 | { | ||
| 33 | u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK; | ||
| 34 | |||
| 35 | if (!tp) | ||
| 36 | return -EINVAL; | ||
| 37 | a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index); | ||
| 38 | if (!a->goto_chain) | ||
| 39 | return -ENOMEM; | ||
| 40 | return 0; | ||
| 41 | } | ||
| 42 | |||
| 43 | static void tcf_action_goto_chain_fini(struct tc_action *a) | ||
| 44 | { | ||
| 45 | tcf_chain_put_by_act(a->goto_chain); | ||
| 46 | } | ||
| 47 | |||
| 48 | static void tcf_action_goto_chain_exec(const struct tc_action *a, | 31 | static void tcf_action_goto_chain_exec(const struct tc_action *a, |
| 49 | struct tcf_result *res) | 32 | struct tcf_result *res) |
| 50 | { | 33 | { |
| 51 | const struct tcf_chain *chain = a->goto_chain; | 34 | const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); |
| 52 | 35 | ||
| 53 | res->goto_tp = rcu_dereference_bh(chain->filter_chain); | 36 | res->goto_tp = rcu_dereference_bh(chain->filter_chain); |
| 54 | } | 37 | } |
| @@ -71,6 +54,51 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, | |||
| 71 | call_rcu(&old->rcu, tcf_free_cookie_rcu); | 54 | call_rcu(&old->rcu, tcf_free_cookie_rcu); |
| 72 | } | 55 | } |
| 73 | 56 | ||
| 57 | int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, | ||
| 58 | struct tcf_chain **newchain, | ||
| 59 | struct netlink_ext_ack *extack) | ||
| 60 | { | ||
| 61 | int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; | ||
| 62 | u32 chain_index; | ||
| 63 | |||
| 64 | if (!opcode) | ||
| 65 | ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; | ||
| 66 | else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) | ||
| 67 | ret = 0; | ||
| 68 | if (ret) { | ||
| 69 | NL_SET_ERR_MSG(extack, "invalid control action"); | ||
| 70 | goto end; | ||
| 71 | } | ||
| 72 | |||
| 73 | if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { | ||
| 74 | chain_index = action & TC_ACT_EXT_VAL_MASK; | ||
| 75 | if (!tp || !newchain) { | ||
| 76 | ret = -EINVAL; | ||
| 77 | NL_SET_ERR_MSG(extack, | ||
| 78 | "can't goto NULL proto/chain"); | ||
| 79 | goto end; | ||
| 80 | } | ||
| 81 | *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); | ||
| 82 | if (!*newchain) { | ||
| 83 | ret = -ENOMEM; | ||
| 84 | NL_SET_ERR_MSG(extack, | ||
| 85 | "can't allocate goto_chain"); | ||
| 86 | } | ||
| 87 | } | ||
| 88 | end: | ||
| 89 | return ret; | ||
| 90 | } | ||
| 91 | EXPORT_SYMBOL(tcf_action_check_ctrlact); | ||
| 92 | |||
| 93 | struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, | ||
| 94 | struct tcf_chain *goto_chain) | ||
| 95 | { | ||
| 96 | a->tcfa_action = action; | ||
| 97 | rcu_swap_protected(a->goto_chain, goto_chain, 1); | ||
| 98 | return goto_chain; | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL(tcf_action_set_ctrlact); | ||
| 101 | |||
| 74 | /* XXX: For standalone actions, we don't need a RCU grace period either, because | 102 | /* XXX: For standalone actions, we don't need a RCU grace period either, because |
| 75 | * actions are always connected to filters and filters are already destroyed in | 103 | * actions are always connected to filters and filters are already destroyed in |
| 76 | * RCU callbacks, so after a RCU grace period actions are already disconnected | 104 | * RCU callbacks, so after a RCU grace period actions are already disconnected |
| @@ -78,13 +106,15 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, | |||
| 78 | */ | 106 | */ |
| 79 | static void free_tcf(struct tc_action *p) | 107 | static void free_tcf(struct tc_action *p) |
| 80 | { | 108 | { |
| 109 | struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); | ||
| 110 | |||
| 81 | free_percpu(p->cpu_bstats); | 111 | free_percpu(p->cpu_bstats); |
| 82 | free_percpu(p->cpu_bstats_hw); | 112 | free_percpu(p->cpu_bstats_hw); |
| 83 | free_percpu(p->cpu_qstats); | 113 | free_percpu(p->cpu_qstats); |
| 84 | 114 | ||
| 85 | tcf_set_action_cookie(&p->act_cookie, NULL); | 115 | tcf_set_action_cookie(&p->act_cookie, NULL); |
| 86 | if (p->goto_chain) | 116 | if (chain) |
| 87 | tcf_action_goto_chain_fini(p); | 117 | tcf_chain_put_by_act(chain); |
| 88 | 118 | ||
| 89 | kfree(p); | 119 | kfree(p); |
| 90 | } | 120 | } |
| @@ -654,6 +684,10 @@ repeat: | |||
| 654 | return TC_ACT_OK; | 684 | return TC_ACT_OK; |
| 655 | } | 685 | } |
| 656 | } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { | 686 | } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { |
| 687 | if (unlikely(!rcu_access_pointer(a->goto_chain))) { | ||
| 688 | net_warn_ratelimited("can't go to NULL chain!\n"); | ||
| 689 | return TC_ACT_SHOT; | ||
| 690 | } | ||
| 657 | tcf_action_goto_chain_exec(a, res); | 691 | tcf_action_goto_chain_exec(a, res); |
| 658 | } | 692 | } |
| 659 | 693 | ||
| @@ -800,15 +834,6 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) | |||
| 800 | return c; | 834 | return c; |
| 801 | } | 835 | } |
| 802 | 836 | ||
| 803 | static bool tcf_action_valid(int action) | ||
| 804 | { | ||
| 805 | int opcode = TC_ACT_EXT_OPCODE(action); | ||
| 806 | |||
| 807 | if (!opcode) | ||
| 808 | return action <= TC_ACT_VALUE_MAX; | ||
| 809 | return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC; | ||
| 810 | } | ||
| 811 | |||
| 812 | struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | 837 | struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, |
| 813 | struct nlattr *nla, struct nlattr *est, | 838 | struct nlattr *nla, struct nlattr *est, |
| 814 | char *name, int ovr, int bind, | 839 | char *name, int ovr, int bind, |
| @@ -890,10 +915,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | |||
| 890 | /* backward compatibility for policer */ | 915 | /* backward compatibility for policer */ |
| 891 | if (name == NULL) | 916 | if (name == NULL) |
| 892 | err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, | 917 | err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, |
| 893 | rtnl_held, extack); | 918 | rtnl_held, tp, extack); |
| 894 | else | 919 | else |
| 895 | err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, | 920 | err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, |
| 896 | extack); | 921 | tp, extack); |
| 897 | if (err < 0) | 922 | if (err < 0) |
| 898 | goto err_mod; | 923 | goto err_mod; |
| 899 | 924 | ||
| @@ -907,18 +932,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | |||
| 907 | if (err != ACT_P_CREATED) | 932 | if (err != ACT_P_CREATED) |
| 908 | module_put(a_o->owner); | 933 | module_put(a_o->owner); |
| 909 | 934 | ||
| 910 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { | 935 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) && |
| 911 | err = tcf_action_goto_chain_init(a, tp); | 936 | !rcu_access_pointer(a->goto_chain)) { |
| 912 | if (err) { | ||
| 913 | tcf_action_destroy_1(a, bind); | ||
| 914 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); | ||
| 915 | return ERR_PTR(err); | ||
| 916 | } | ||
| 917 | } | ||
| 918 | |||
| 919 | if (!tcf_action_valid(a->tcfa_action)) { | ||
| 920 | tcf_action_destroy_1(a, bind); | 937 | tcf_action_destroy_1(a, bind); |
| 921 | NL_SET_ERR_MSG(extack, "Invalid control action value"); | 938 | NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain"); |
| 922 | return ERR_PTR(-EINVAL); | 939 | return ERR_PTR(-EINVAL); |
| 923 | } | 940 | } |
| 924 | 941 | ||
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index aa5c38d11a30..3841156aa09f 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <net/netlink.h> | 18 | #include <net/netlink.h> |
| 19 | #include <net/pkt_sched.h> | 19 | #include <net/pkt_sched.h> |
| 20 | #include <net/pkt_cls.h> | ||
| 20 | 21 | ||
| 21 | #include <linux/tc_act/tc_bpf.h> | 22 | #include <linux/tc_act/tc_bpf.h> |
| 22 | #include <net/tc_act/tc_bpf.h> | 23 | #include <net/tc_act/tc_bpf.h> |
| @@ -278,10 +279,11 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, | |||
| 278 | static int tcf_bpf_init(struct net *net, struct nlattr *nla, | 279 | static int tcf_bpf_init(struct net *net, struct nlattr *nla, |
| 279 | struct nlattr *est, struct tc_action **act, | 280 | struct nlattr *est, struct tc_action **act, |
| 280 | int replace, int bind, bool rtnl_held, | 281 | int replace, int bind, bool rtnl_held, |
| 281 | struct netlink_ext_ack *extack) | 282 | struct tcf_proto *tp, struct netlink_ext_ack *extack) |
| 282 | { | 283 | { |
| 283 | struct tc_action_net *tn = net_generic(net, bpf_net_id); | 284 | struct tc_action_net *tn = net_generic(net, bpf_net_id); |
| 284 | struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; | 285 | struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; |
| 286 | struct tcf_chain *goto_ch = NULL; | ||
| 285 | struct tcf_bpf_cfg cfg, old; | 287 | struct tcf_bpf_cfg cfg, old; |
| 286 | struct tc_act_bpf *parm; | 288 | struct tc_act_bpf *parm; |
| 287 | struct tcf_bpf *prog; | 289 | struct tcf_bpf *prog; |
| @@ -323,12 +325,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
| 323 | return ret; | 325 | return ret; |
| 324 | } | 326 | } |
| 325 | 327 | ||
| 328 | ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 329 | if (ret < 0) | ||
| 330 | goto release_idr; | ||
| 331 | |||
| 326 | is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; | 332 | is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; |
| 327 | is_ebpf = tb[TCA_ACT_BPF_FD]; | 333 | is_ebpf = tb[TCA_ACT_BPF_FD]; |
| 328 | 334 | ||
| 329 | if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { | 335 | if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { |
| 330 | ret = -EINVAL; | 336 | ret = -EINVAL; |
| 331 | goto out; | 337 | goto put_chain; |
| 332 | } | 338 | } |
| 333 | 339 | ||
| 334 | memset(&cfg, 0, sizeof(cfg)); | 340 | memset(&cfg, 0, sizeof(cfg)); |
| @@ -336,7 +342,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
| 336 | ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : | 342 | ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : |
| 337 | tcf_bpf_init_from_efd(tb, &cfg); | 343 | tcf_bpf_init_from_efd(tb, &cfg); |
| 338 | if (ret < 0) | 344 | if (ret < 0) |
| 339 | goto out; | 345 | goto put_chain; |
| 340 | 346 | ||
| 341 | prog = to_bpf(*act); | 347 | prog = to_bpf(*act); |
| 342 | 348 | ||
| @@ -350,10 +356,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
| 350 | if (cfg.bpf_num_ops) | 356 | if (cfg.bpf_num_ops) |
| 351 | prog->bpf_num_ops = cfg.bpf_num_ops; | 357 | prog->bpf_num_ops = cfg.bpf_num_ops; |
| 352 | 358 | ||
| 353 | prog->tcf_action = parm->action; | 359 | goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch); |
| 354 | rcu_assign_pointer(prog->filter, cfg.filter); | 360 | rcu_assign_pointer(prog->filter, cfg.filter); |
| 355 | spin_unlock_bh(&prog->tcf_lock); | 361 | spin_unlock_bh(&prog->tcf_lock); |
| 356 | 362 | ||
| 363 | if (goto_ch) | ||
| 364 | tcf_chain_put_by_act(goto_ch); | ||
| 365 | |||
| 357 | if (res == ACT_P_CREATED) { | 366 | if (res == ACT_P_CREATED) { |
| 358 | tcf_idr_insert(tn, *act); | 367 | tcf_idr_insert(tn, *act); |
| 359 | } else { | 368 | } else { |
| @@ -363,9 +372,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
| 363 | } | 372 | } |
| 364 | 373 | ||
| 365 | return res; | 374 | return res; |
| 366 | out: | ||
| 367 | tcf_idr_release(*act, bind); | ||
| 368 | 375 | ||
| 376 | put_chain: | ||
| 377 | if (goto_ch) | ||
| 378 | tcf_chain_put_by_act(goto_ch); | ||
| 379 | |||
| 380 | release_idr: | ||
| 381 | tcf_idr_release(*act, bind); | ||
| 369 | return ret; | 382 | return ret; |
| 370 | } | 383 | } |
| 371 | 384 | ||
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 5d24993cccfe..32ae0cd6e31c 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
| 22 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
| 23 | #include <net/act_api.h> | 23 | #include <net/act_api.h> |
| 24 | #include <net/pkt_cls.h> | ||
| 24 | #include <uapi/linux/tc_act/tc_connmark.h> | 25 | #include <uapi/linux/tc_act/tc_connmark.h> |
| 25 | #include <net/tc_act/tc_connmark.h> | 26 | #include <net/tc_act/tc_connmark.h> |
| 26 | 27 | ||
| @@ -97,13 +98,15 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = { | |||
| 97 | static int tcf_connmark_init(struct net *net, struct nlattr *nla, | 98 | static int tcf_connmark_init(struct net *net, struct nlattr *nla, |
| 98 | struct nlattr *est, struct tc_action **a, | 99 | struct nlattr *est, struct tc_action **a, |
| 99 | int ovr, int bind, bool rtnl_held, | 100 | int ovr, int bind, bool rtnl_held, |
| 101 | struct tcf_proto *tp, | ||
| 100 | struct netlink_ext_ack *extack) | 102 | struct netlink_ext_ack *extack) |
| 101 | { | 103 | { |
| 102 | struct tc_action_net *tn = net_generic(net, connmark_net_id); | 104 | struct tc_action_net *tn = net_generic(net, connmark_net_id); |
| 103 | struct nlattr *tb[TCA_CONNMARK_MAX + 1]; | 105 | struct nlattr *tb[TCA_CONNMARK_MAX + 1]; |
| 106 | struct tcf_chain *goto_ch = NULL; | ||
| 104 | struct tcf_connmark_info *ci; | 107 | struct tcf_connmark_info *ci; |
| 105 | struct tc_connmark *parm; | 108 | struct tc_connmark *parm; |
| 106 | int ret = 0; | 109 | int ret = 0, err; |
| 107 | 110 | ||
| 108 | if (!nla) | 111 | if (!nla) |
| 109 | return -EINVAL; | 112 | return -EINVAL; |
| @@ -128,7 +131,11 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, | |||
| 128 | } | 131 | } |
| 129 | 132 | ||
| 130 | ci = to_connmark(*a); | 133 | ci = to_connmark(*a); |
| 131 | ci->tcf_action = parm->action; | 134 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, |
| 135 | extack); | ||
| 136 | if (err < 0) | ||
| 137 | goto release_idr; | ||
| 138 | tcf_action_set_ctrlact(*a, parm->action, goto_ch); | ||
| 132 | ci->net = net; | 139 | ci->net = net; |
| 133 | ci->zone = parm->zone; | 140 | ci->zone = parm->zone; |
| 134 | 141 | ||
| @@ -142,15 +149,24 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, | |||
| 142 | tcf_idr_release(*a, bind); | 149 | tcf_idr_release(*a, bind); |
| 143 | return -EEXIST; | 150 | return -EEXIST; |
| 144 | } | 151 | } |
| 152 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, | ||
| 153 | extack); | ||
| 154 | if (err < 0) | ||
| 155 | goto release_idr; | ||
| 145 | /* replacing action and zone */ | 156 | /* replacing action and zone */ |
| 146 | spin_lock_bh(&ci->tcf_lock); | 157 | spin_lock_bh(&ci->tcf_lock); |
| 147 | ci->tcf_action = parm->action; | 158 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 148 | ci->zone = parm->zone; | 159 | ci->zone = parm->zone; |
| 149 | spin_unlock_bh(&ci->tcf_lock); | 160 | spin_unlock_bh(&ci->tcf_lock); |
| 161 | if (goto_ch) | ||
| 162 | tcf_chain_put_by_act(goto_ch); | ||
| 150 | ret = 0; | 163 | ret = 0; |
| 151 | } | 164 | } |
| 152 | 165 | ||
| 153 | return ret; | 166 | return ret; |
| 167 | release_idr: | ||
| 168 | tcf_idr_release(*a, bind); | ||
| 169 | return err; | ||
| 154 | } | 170 | } |
| 155 | 171 | ||
| 156 | static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, | 172 | static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index c79aca29505e..0c77e7bdf6d5 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <net/sctp/checksum.h> | 33 | #include <net/sctp/checksum.h> |
| 34 | 34 | ||
| 35 | #include <net/act_api.h> | 35 | #include <net/act_api.h> |
| 36 | #include <net/pkt_cls.h> | ||
| 36 | 37 | ||
| 37 | #include <linux/tc_act/tc_csum.h> | 38 | #include <linux/tc_act/tc_csum.h> |
| 38 | #include <net/tc_act/tc_csum.h> | 39 | #include <net/tc_act/tc_csum.h> |
| @@ -46,12 +47,13 @@ static struct tc_action_ops act_csum_ops; | |||
| 46 | 47 | ||
| 47 | static int tcf_csum_init(struct net *net, struct nlattr *nla, | 48 | static int tcf_csum_init(struct net *net, struct nlattr *nla, |
| 48 | struct nlattr *est, struct tc_action **a, int ovr, | 49 | struct nlattr *est, struct tc_action **a, int ovr, |
| 49 | int bind, bool rtnl_held, | 50 | int bind, bool rtnl_held, struct tcf_proto *tp, |
| 50 | struct netlink_ext_ack *extack) | 51 | struct netlink_ext_ack *extack) |
| 51 | { | 52 | { |
| 52 | struct tc_action_net *tn = net_generic(net, csum_net_id); | 53 | struct tc_action_net *tn = net_generic(net, csum_net_id); |
| 53 | struct tcf_csum_params *params_new; | 54 | struct tcf_csum_params *params_new; |
| 54 | struct nlattr *tb[TCA_CSUM_MAX + 1]; | 55 | struct nlattr *tb[TCA_CSUM_MAX + 1]; |
| 56 | struct tcf_chain *goto_ch = NULL; | ||
| 55 | struct tc_csum *parm; | 57 | struct tc_csum *parm; |
| 56 | struct tcf_csum *p; | 58 | struct tcf_csum *p; |
| 57 | int ret = 0, err; | 59 | int ret = 0, err; |
| @@ -87,21 +89,27 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, | |||
| 87 | return err; | 89 | return err; |
| 88 | } | 90 | } |
| 89 | 91 | ||
| 92 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 93 | if (err < 0) | ||
| 94 | goto release_idr; | ||
| 95 | |||
| 90 | p = to_tcf_csum(*a); | 96 | p = to_tcf_csum(*a); |
| 91 | 97 | ||
| 92 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); | 98 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
| 93 | if (unlikely(!params_new)) { | 99 | if (unlikely(!params_new)) { |
| 94 | tcf_idr_release(*a, bind); | 100 | err = -ENOMEM; |
| 95 | return -ENOMEM; | 101 | goto put_chain; |
| 96 | } | 102 | } |
| 97 | params_new->update_flags = parm->update_flags; | 103 | params_new->update_flags = parm->update_flags; |
| 98 | 104 | ||
| 99 | spin_lock_bh(&p->tcf_lock); | 105 | spin_lock_bh(&p->tcf_lock); |
| 100 | p->tcf_action = parm->action; | 106 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 101 | rcu_swap_protected(p->params, params_new, | 107 | rcu_swap_protected(p->params, params_new, |
| 102 | lockdep_is_held(&p->tcf_lock)); | 108 | lockdep_is_held(&p->tcf_lock)); |
| 103 | spin_unlock_bh(&p->tcf_lock); | 109 | spin_unlock_bh(&p->tcf_lock); |
| 104 | 110 | ||
| 111 | if (goto_ch) | ||
| 112 | tcf_chain_put_by_act(goto_ch); | ||
| 105 | if (params_new) | 113 | if (params_new) |
| 106 | kfree_rcu(params_new, rcu); | 114 | kfree_rcu(params_new, rcu); |
| 107 | 115 | ||
| @@ -109,6 +117,12 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, | |||
| 109 | tcf_idr_insert(tn, *a); | 117 | tcf_idr_insert(tn, *a); |
| 110 | 118 | ||
| 111 | return ret; | 119 | return ret; |
| 120 | put_chain: | ||
| 121 | if (goto_ch) | ||
| 122 | tcf_chain_put_by_act(goto_ch); | ||
| 123 | release_idr: | ||
| 124 | tcf_idr_release(*a, bind); | ||
| 125 | return err; | ||
| 112 | } | 126 | } |
| 113 | 127 | ||
| 114 | /** | 128 | /** |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 93da0004e9f4..e540e31069d7 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 21 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
| 22 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
| 23 | #include <net/pkt_cls.h> | ||
| 23 | #include <linux/tc_act/tc_gact.h> | 24 | #include <linux/tc_act/tc_gact.h> |
| 24 | #include <net/tc_act/tc_gact.h> | 25 | #include <net/tc_act/tc_gact.h> |
| 25 | 26 | ||
| @@ -57,10 +58,11 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { | |||
| 57 | static int tcf_gact_init(struct net *net, struct nlattr *nla, | 58 | static int tcf_gact_init(struct net *net, struct nlattr *nla, |
| 58 | struct nlattr *est, struct tc_action **a, | 59 | struct nlattr *est, struct tc_action **a, |
| 59 | int ovr, int bind, bool rtnl_held, | 60 | int ovr, int bind, bool rtnl_held, |
| 60 | struct netlink_ext_ack *extack) | 61 | struct tcf_proto *tp, struct netlink_ext_ack *extack) |
| 61 | { | 62 | { |
| 62 | struct tc_action_net *tn = net_generic(net, gact_net_id); | 63 | struct tc_action_net *tn = net_generic(net, gact_net_id); |
| 63 | struct nlattr *tb[TCA_GACT_MAX + 1]; | 64 | struct nlattr *tb[TCA_GACT_MAX + 1]; |
| 65 | struct tcf_chain *goto_ch = NULL; | ||
| 64 | struct tc_gact *parm; | 66 | struct tc_gact *parm; |
| 65 | struct tcf_gact *gact; | 67 | struct tcf_gact *gact; |
| 66 | int ret = 0; | 68 | int ret = 0; |
| @@ -116,10 +118,13 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
| 116 | return err; | 118 | return err; |
| 117 | } | 119 | } |
| 118 | 120 | ||
| 121 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 122 | if (err < 0) | ||
| 123 | goto release_idr; | ||
| 119 | gact = to_gact(*a); | 124 | gact = to_gact(*a); |
| 120 | 125 | ||
| 121 | spin_lock_bh(&gact->tcf_lock); | 126 | spin_lock_bh(&gact->tcf_lock); |
| 122 | gact->tcf_action = parm->action; | 127 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 123 | #ifdef CONFIG_GACT_PROB | 128 | #ifdef CONFIG_GACT_PROB |
| 124 | if (p_parm) { | 129 | if (p_parm) { |
| 125 | gact->tcfg_paction = p_parm->paction; | 130 | gact->tcfg_paction = p_parm->paction; |
| @@ -133,9 +138,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
| 133 | #endif | 138 | #endif |
| 134 | spin_unlock_bh(&gact->tcf_lock); | 139 | spin_unlock_bh(&gact->tcf_lock); |
| 135 | 140 | ||
| 141 | if (goto_ch) | ||
| 142 | tcf_chain_put_by_act(goto_ch); | ||
| 143 | |||
| 136 | if (ret == ACT_P_CREATED) | 144 | if (ret == ACT_P_CREATED) |
| 137 | tcf_idr_insert(tn, *a); | 145 | tcf_idr_insert(tn, *a); |
| 138 | return ret; | 146 | return ret; |
| 147 | release_idr: | ||
| 148 | tcf_idr_release(*a, bind); | ||
| 149 | return err; | ||
| 139 | } | 150 | } |
| 140 | 151 | ||
| 141 | static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, | 152 | static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, |
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 9b1f2b3990ee..31c6ffb6abe7 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <net/net_namespace.h> | 29 | #include <net/net_namespace.h> |
| 30 | #include <net/netlink.h> | 30 | #include <net/netlink.h> |
| 31 | #include <net/pkt_sched.h> | 31 | #include <net/pkt_sched.h> |
| 32 | #include <net/pkt_cls.h> | ||
| 32 | #include <uapi/linux/tc_act/tc_ife.h> | 33 | #include <uapi/linux/tc_act/tc_ife.h> |
| 33 | #include <net/tc_act/tc_ife.h> | 34 | #include <net/tc_act/tc_ife.h> |
| 34 | #include <linux/etherdevice.h> | 35 | #include <linux/etherdevice.h> |
| @@ -469,11 +470,12 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | |||
| 469 | static int tcf_ife_init(struct net *net, struct nlattr *nla, | 470 | static int tcf_ife_init(struct net *net, struct nlattr *nla, |
| 470 | struct nlattr *est, struct tc_action **a, | 471 | struct nlattr *est, struct tc_action **a, |
| 471 | int ovr, int bind, bool rtnl_held, | 472 | int ovr, int bind, bool rtnl_held, |
| 472 | struct netlink_ext_ack *extack) | 473 | struct tcf_proto *tp, struct netlink_ext_ack *extack) |
| 473 | { | 474 | { |
| 474 | struct tc_action_net *tn = net_generic(net, ife_net_id); | 475 | struct tc_action_net *tn = net_generic(net, ife_net_id); |
| 475 | struct nlattr *tb[TCA_IFE_MAX + 1]; | 476 | struct nlattr *tb[TCA_IFE_MAX + 1]; |
| 476 | struct nlattr *tb2[IFE_META_MAX + 1]; | 477 | struct nlattr *tb2[IFE_META_MAX + 1]; |
| 478 | struct tcf_chain *goto_ch = NULL; | ||
| 477 | struct tcf_ife_params *p; | 479 | struct tcf_ife_params *p; |
| 478 | struct tcf_ife_info *ife; | 480 | struct tcf_ife_info *ife; |
| 479 | u16 ife_type = ETH_P_IFE; | 481 | u16 ife_type = ETH_P_IFE; |
| @@ -531,6 +533,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
| 531 | } | 533 | } |
| 532 | 534 | ||
| 533 | ife = to_ife(*a); | 535 | ife = to_ife(*a); |
| 536 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 537 | if (err < 0) | ||
| 538 | goto release_idr; | ||
| 539 | |||
| 534 | p->flags = parm->flags; | 540 | p->flags = parm->flags; |
| 535 | 541 | ||
| 536 | if (parm->flags & IFE_ENCODE) { | 542 | if (parm->flags & IFE_ENCODE) { |
| @@ -563,13 +569,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
| 563 | if (tb[TCA_IFE_METALST]) { | 569 | if (tb[TCA_IFE_METALST]) { |
| 564 | err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], | 570 | err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], |
| 565 | NULL, NULL); | 571 | NULL, NULL); |
| 566 | if (err) { | 572 | if (err) |
| 567 | metadata_parse_err: | 573 | goto metadata_parse_err; |
| 568 | tcf_idr_release(*a, bind); | ||
| 569 | kfree(p); | ||
| 570 | return err; | ||
| 571 | } | ||
| 572 | |||
| 573 | err = populate_metalist(ife, tb2, exists, rtnl_held); | 574 | err = populate_metalist(ife, tb2, exists, rtnl_held); |
| 574 | if (err) | 575 | if (err) |
| 575 | goto metadata_parse_err; | 576 | goto metadata_parse_err; |
| @@ -581,21 +582,20 @@ metadata_parse_err: | |||
| 581 | * going to bail out | 582 | * going to bail out |
| 582 | */ | 583 | */ |
| 583 | err = use_all_metadata(ife, exists); | 584 | err = use_all_metadata(ife, exists); |
| 584 | if (err) { | 585 | if (err) |
| 585 | tcf_idr_release(*a, bind); | 586 | goto metadata_parse_err; |
| 586 | kfree(p); | ||
| 587 | return err; | ||
| 588 | } | ||
| 589 | } | 587 | } |
| 590 | 588 | ||
| 591 | if (exists) | 589 | if (exists) |
| 592 | spin_lock_bh(&ife->tcf_lock); | 590 | spin_lock_bh(&ife->tcf_lock); |
| 593 | ife->tcf_action = parm->action; | ||
| 594 | /* protected by tcf_lock when modifying existing action */ | 591 | /* protected by tcf_lock when modifying existing action */ |
| 592 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); | ||
| 595 | rcu_swap_protected(ife->params, p, 1); | 593 | rcu_swap_protected(ife->params, p, 1); |
| 596 | 594 | ||
| 597 | if (exists) | 595 | if (exists) |
| 598 | spin_unlock_bh(&ife->tcf_lock); | 596 | spin_unlock_bh(&ife->tcf_lock); |
| 597 | if (goto_ch) | ||
| 598 | tcf_chain_put_by_act(goto_ch); | ||
| 599 | if (p) | 599 | if (p) |
| 600 | kfree_rcu(p, rcu); | 600 | kfree_rcu(p, rcu); |
| 601 | 601 | ||
| @@ -603,6 +603,13 @@ metadata_parse_err: | |||
| 603 | tcf_idr_insert(tn, *a); | 603 | tcf_idr_insert(tn, *a); |
| 604 | 604 | ||
| 605 | return ret; | 605 | return ret; |
| 606 | metadata_parse_err: | ||
| 607 | if (goto_ch) | ||
| 608 | tcf_chain_put_by_act(goto_ch); | ||
| 609 | release_idr: | ||
| 610 | kfree(p); | ||
| 611 | tcf_idr_release(*a, bind); | ||
| 612 | return err; | ||
| 606 | } | 613 | } |
| 607 | 614 | ||
| 608 | static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, | 615 | static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 98f5b6ea77b4..04a0b5c61194 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
| @@ -97,7 +97,8 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { | |||
| 97 | 97 | ||
| 98 | static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, | 98 | static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, |
| 99 | struct nlattr *est, struct tc_action **a, | 99 | struct nlattr *est, struct tc_action **a, |
| 100 | const struct tc_action_ops *ops, int ovr, int bind) | 100 | const struct tc_action_ops *ops, int ovr, int bind, |
| 101 | struct tcf_proto *tp) | ||
| 101 | { | 102 | { |
| 102 | struct tc_action_net *tn = net_generic(net, id); | 103 | struct tc_action_net *tn = net_generic(net, id); |
| 103 | struct nlattr *tb[TCA_IPT_MAX + 1]; | 104 | struct nlattr *tb[TCA_IPT_MAX + 1]; |
| @@ -205,20 +206,20 @@ err1: | |||
| 205 | 206 | ||
| 206 | static int tcf_ipt_init(struct net *net, struct nlattr *nla, | 207 | static int tcf_ipt_init(struct net *net, struct nlattr *nla, |
| 207 | struct nlattr *est, struct tc_action **a, int ovr, | 208 | struct nlattr *est, struct tc_action **a, int ovr, |
| 208 | int bind, bool rtnl_held, | 209 | int bind, bool rtnl_held, struct tcf_proto *tp, |
| 209 | struct netlink_ext_ack *extack) | 210 | struct netlink_ext_ack *extack) |
| 210 | { | 211 | { |
| 211 | return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, | 212 | return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, |
| 212 | bind); | 213 | bind, tp); |
| 213 | } | 214 | } |
| 214 | 215 | ||
| 215 | static int tcf_xt_init(struct net *net, struct nlattr *nla, | 216 | static int tcf_xt_init(struct net *net, struct nlattr *nla, |
| 216 | struct nlattr *est, struct tc_action **a, int ovr, | 217 | struct nlattr *est, struct tc_action **a, int ovr, |
| 217 | int bind, bool unlocked, | 218 | int bind, bool unlocked, struct tcf_proto *tp, |
| 218 | struct netlink_ext_ack *extack) | 219 | struct netlink_ext_ack *extack) |
| 219 | { | 220 | { |
| 220 | return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, | 221 | return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, |
| 221 | bind); | 222 | bind, tp); |
| 222 | } | 223 | } |
| 223 | 224 | ||
| 224 | static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a, | 225 | static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a, |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 6692fd054617..17cc6bd4c57c 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
| @@ -94,10 +94,12 @@ static struct tc_action_ops act_mirred_ops; | |||
| 94 | static int tcf_mirred_init(struct net *net, struct nlattr *nla, | 94 | static int tcf_mirred_init(struct net *net, struct nlattr *nla, |
| 95 | struct nlattr *est, struct tc_action **a, | 95 | struct nlattr *est, struct tc_action **a, |
| 96 | int ovr, int bind, bool rtnl_held, | 96 | int ovr, int bind, bool rtnl_held, |
| 97 | struct tcf_proto *tp, | ||
| 97 | struct netlink_ext_ack *extack) | 98 | struct netlink_ext_ack *extack) |
| 98 | { | 99 | { |
| 99 | struct tc_action_net *tn = net_generic(net, mirred_net_id); | 100 | struct tc_action_net *tn = net_generic(net, mirred_net_id); |
| 100 | struct nlattr *tb[TCA_MIRRED_MAX + 1]; | 101 | struct nlattr *tb[TCA_MIRRED_MAX + 1]; |
| 102 | struct tcf_chain *goto_ch = NULL; | ||
| 101 | bool mac_header_xmit = false; | 103 | bool mac_header_xmit = false; |
| 102 | struct tc_mirred *parm; | 104 | struct tc_mirred *parm; |
| 103 | struct tcf_mirred *m; | 105 | struct tcf_mirred *m; |
| @@ -157,18 +159,23 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, | |||
| 157 | tcf_idr_release(*a, bind); | 159 | tcf_idr_release(*a, bind); |
| 158 | return -EEXIST; | 160 | return -EEXIST; |
| 159 | } | 161 | } |
| 162 | |||
| 160 | m = to_mirred(*a); | 163 | m = to_mirred(*a); |
| 164 | if (ret == ACT_P_CREATED) | ||
| 165 | INIT_LIST_HEAD(&m->tcfm_list); | ||
| 166 | |||
| 167 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 168 | if (err < 0) | ||
| 169 | goto release_idr; | ||
| 161 | 170 | ||
| 162 | spin_lock_bh(&m->tcf_lock); | 171 | spin_lock_bh(&m->tcf_lock); |
| 163 | m->tcf_action = parm->action; | ||
| 164 | m->tcfm_eaction = parm->eaction; | ||
| 165 | 172 | ||
| 166 | if (parm->ifindex) { | 173 | if (parm->ifindex) { |
| 167 | dev = dev_get_by_index(net, parm->ifindex); | 174 | dev = dev_get_by_index(net, parm->ifindex); |
| 168 | if (!dev) { | 175 | if (!dev) { |
| 169 | spin_unlock_bh(&m->tcf_lock); | 176 | spin_unlock_bh(&m->tcf_lock); |
| 170 | tcf_idr_release(*a, bind); | 177 | err = -ENODEV; |
| 171 | return -ENODEV; | 178 | goto put_chain; |
| 172 | } | 179 | } |
| 173 | mac_header_xmit = dev_is_mac_header_xmit(dev); | 180 | mac_header_xmit = dev_is_mac_header_xmit(dev); |
| 174 | rcu_swap_protected(m->tcfm_dev, dev, | 181 | rcu_swap_protected(m->tcfm_dev, dev, |
| @@ -177,7 +184,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, | |||
| 177 | dev_put(dev); | 184 | dev_put(dev); |
| 178 | m->tcfm_mac_header_xmit = mac_header_xmit; | 185 | m->tcfm_mac_header_xmit = mac_header_xmit; |
| 179 | } | 186 | } |
| 187 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); | ||
| 188 | m->tcfm_eaction = parm->eaction; | ||
| 180 | spin_unlock_bh(&m->tcf_lock); | 189 | spin_unlock_bh(&m->tcf_lock); |
| 190 | if (goto_ch) | ||
| 191 | tcf_chain_put_by_act(goto_ch); | ||
| 181 | 192 | ||
| 182 | if (ret == ACT_P_CREATED) { | 193 | if (ret == ACT_P_CREATED) { |
| 183 | spin_lock(&mirred_list_lock); | 194 | spin_lock(&mirred_list_lock); |
| @@ -188,6 +199,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, | |||
| 188 | } | 199 | } |
| 189 | 200 | ||
| 190 | return ret; | 201 | return ret; |
| 202 | put_chain: | ||
| 203 | if (goto_ch) | ||
| 204 | tcf_chain_put_by_act(goto_ch); | ||
| 205 | release_idr: | ||
| 206 | tcf_idr_release(*a, bind); | ||
| 207 | return err; | ||
| 191 | } | 208 | } |
| 192 | 209 | ||
| 193 | static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, | 210 | static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 543eab9193f1..e91bb8eb81ec 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
| 22 | #include <linux/tc_act/tc_nat.h> | 22 | #include <linux/tc_act/tc_nat.h> |
| 23 | #include <net/act_api.h> | 23 | #include <net/act_api.h> |
| 24 | #include <net/pkt_cls.h> | ||
| 24 | #include <net/icmp.h> | 25 | #include <net/icmp.h> |
| 25 | #include <net/ip.h> | 26 | #include <net/ip.h> |
| 26 | #include <net/netlink.h> | 27 | #include <net/netlink.h> |
| @@ -38,10 +39,12 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = { | |||
| 38 | 39 | ||
| 39 | static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | 40 | static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, |
| 40 | struct tc_action **a, int ovr, int bind, | 41 | struct tc_action **a, int ovr, int bind, |
| 41 | bool rtnl_held, struct netlink_ext_ack *extack) | 42 | bool rtnl_held, struct tcf_proto *tp, |
| 43 | struct netlink_ext_ack *extack) | ||
| 42 | { | 44 | { |
| 43 | struct tc_action_net *tn = net_generic(net, nat_net_id); | 45 | struct tc_action_net *tn = net_generic(net, nat_net_id); |
| 44 | struct nlattr *tb[TCA_NAT_MAX + 1]; | 46 | struct nlattr *tb[TCA_NAT_MAX + 1]; |
| 47 | struct tcf_chain *goto_ch = NULL; | ||
| 45 | struct tc_nat *parm; | 48 | struct tc_nat *parm; |
| 46 | int ret = 0, err; | 49 | int ret = 0, err; |
| 47 | struct tcf_nat *p; | 50 | struct tcf_nat *p; |
| @@ -76,6 +79,9 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
| 76 | } else { | 79 | } else { |
| 77 | return err; | 80 | return err; |
| 78 | } | 81 | } |
| 82 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 83 | if (err < 0) | ||
| 84 | goto release_idr; | ||
| 79 | p = to_tcf_nat(*a); | 85 | p = to_tcf_nat(*a); |
| 80 | 86 | ||
| 81 | spin_lock_bh(&p->tcf_lock); | 87 | spin_lock_bh(&p->tcf_lock); |
| @@ -84,13 +90,18 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
| 84 | p->mask = parm->mask; | 90 | p->mask = parm->mask; |
| 85 | p->flags = parm->flags; | 91 | p->flags = parm->flags; |
| 86 | 92 | ||
| 87 | p->tcf_action = parm->action; | 93 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 88 | spin_unlock_bh(&p->tcf_lock); | 94 | spin_unlock_bh(&p->tcf_lock); |
| 95 | if (goto_ch) | ||
| 96 | tcf_chain_put_by_act(goto_ch); | ||
| 89 | 97 | ||
| 90 | if (ret == ACT_P_CREATED) | 98 | if (ret == ACT_P_CREATED) |
| 91 | tcf_idr_insert(tn, *a); | 99 | tcf_idr_insert(tn, *a); |
| 92 | 100 | ||
| 93 | return ret; | 101 | return ret; |
| 102 | release_idr: | ||
| 103 | tcf_idr_release(*a, bind); | ||
| 104 | return err; | ||
| 94 | } | 105 | } |
| 95 | 106 | ||
| 96 | static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a, | 107 | static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a, |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index a80373878df7..287793abfaf9 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/tc_act/tc_pedit.h> | 23 | #include <linux/tc_act/tc_pedit.h> |
| 24 | #include <net/tc_act/tc_pedit.h> | 24 | #include <net/tc_act/tc_pedit.h> |
| 25 | #include <uapi/linux/tc_act/tc_pedit.h> | 25 | #include <uapi/linux/tc_act/tc_pedit.h> |
| 26 | #include <net/pkt_cls.h> | ||
| 26 | 27 | ||
| 27 | static unsigned int pedit_net_id; | 28 | static unsigned int pedit_net_id; |
| 28 | static struct tc_action_ops act_pedit_ops; | 29 | static struct tc_action_ops act_pedit_ops; |
| @@ -138,10 +139,11 @@ nla_failure: | |||
| 138 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, | 139 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
| 139 | struct nlattr *est, struct tc_action **a, | 140 | struct nlattr *est, struct tc_action **a, |
| 140 | int ovr, int bind, bool rtnl_held, | 141 | int ovr, int bind, bool rtnl_held, |
| 141 | struct netlink_ext_ack *extack) | 142 | struct tcf_proto *tp, struct netlink_ext_ack *extack) |
| 142 | { | 143 | { |
| 143 | struct tc_action_net *tn = net_generic(net, pedit_net_id); | 144 | struct tc_action_net *tn = net_generic(net, pedit_net_id); |
| 144 | struct nlattr *tb[TCA_PEDIT_MAX + 1]; | 145 | struct nlattr *tb[TCA_PEDIT_MAX + 1]; |
| 146 | struct tcf_chain *goto_ch = NULL; | ||
| 145 | struct tc_pedit_key *keys = NULL; | 147 | struct tc_pedit_key *keys = NULL; |
| 146 | struct tcf_pedit_key_ex *keys_ex; | 148 | struct tcf_pedit_key_ex *keys_ex; |
| 147 | struct tc_pedit *parm; | 149 | struct tc_pedit *parm; |
| @@ -205,6 +207,11 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
| 205 | goto out_free; | 207 | goto out_free; |
| 206 | } | 208 | } |
| 207 | 209 | ||
| 210 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 211 | if (err < 0) { | ||
| 212 | ret = err; | ||
| 213 | goto out_release; | ||
| 214 | } | ||
| 208 | p = to_pedit(*a); | 215 | p = to_pedit(*a); |
| 209 | spin_lock_bh(&p->tcf_lock); | 216 | spin_lock_bh(&p->tcf_lock); |
| 210 | 217 | ||
| @@ -214,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
| 214 | if (!keys) { | 221 | if (!keys) { |
| 215 | spin_unlock_bh(&p->tcf_lock); | 222 | spin_unlock_bh(&p->tcf_lock); |
| 216 | ret = -ENOMEM; | 223 | ret = -ENOMEM; |
| 217 | goto out_release; | 224 | goto put_chain; |
| 218 | } | 225 | } |
| 219 | kfree(p->tcfp_keys); | 226 | kfree(p->tcfp_keys); |
| 220 | p->tcfp_keys = keys; | 227 | p->tcfp_keys = keys; |
| @@ -223,16 +230,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
| 223 | memcpy(p->tcfp_keys, parm->keys, ksize); | 230 | memcpy(p->tcfp_keys, parm->keys, ksize); |
| 224 | 231 | ||
| 225 | p->tcfp_flags = parm->flags; | 232 | p->tcfp_flags = parm->flags; |
| 226 | p->tcf_action = parm->action; | 233 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 227 | 234 | ||
| 228 | kfree(p->tcfp_keys_ex); | 235 | kfree(p->tcfp_keys_ex); |
| 229 | p->tcfp_keys_ex = keys_ex; | 236 | p->tcfp_keys_ex = keys_ex; |
| 230 | 237 | ||
| 231 | spin_unlock_bh(&p->tcf_lock); | 238 | spin_unlock_bh(&p->tcf_lock); |
| 239 | if (goto_ch) | ||
| 240 | tcf_chain_put_by_act(goto_ch); | ||
| 232 | if (ret == ACT_P_CREATED) | 241 | if (ret == ACT_P_CREATED) |
| 233 | tcf_idr_insert(tn, *a); | 242 | tcf_idr_insert(tn, *a); |
| 234 | return ret; | 243 | return ret; |
| 235 | 244 | ||
| 245 | put_chain: | ||
| 246 | if (goto_ch) | ||
| 247 | tcf_chain_put_by_act(goto_ch); | ||
| 236 | out_release: | 248 | out_release: |
| 237 | tcf_idr_release(*a, bind); | 249 | tcf_idr_release(*a, bind); |
| 238 | out_free: | 250 | out_free: |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 8271a6263824..2b8581f6ab51 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 22 | #include <net/act_api.h> | 22 | #include <net/act_api.h> |
| 23 | #include <net/netlink.h> | 23 | #include <net/netlink.h> |
| 24 | #include <net/pkt_cls.h> | ||
| 24 | 25 | ||
| 25 | struct tcf_police_params { | 26 | struct tcf_police_params { |
| 26 | int tcfp_result; | 27 | int tcfp_result; |
| @@ -83,10 +84,12 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { | |||
| 83 | static int tcf_police_init(struct net *net, struct nlattr *nla, | 84 | static int tcf_police_init(struct net *net, struct nlattr *nla, |
| 84 | struct nlattr *est, struct tc_action **a, | 85 | struct nlattr *est, struct tc_action **a, |
| 85 | int ovr, int bind, bool rtnl_held, | 86 | int ovr, int bind, bool rtnl_held, |
| 87 | struct tcf_proto *tp, | ||
| 86 | struct netlink_ext_ack *extack) | 88 | struct netlink_ext_ack *extack) |
| 87 | { | 89 | { |
| 88 | int ret = 0, tcfp_result = TC_ACT_OK, err, size; | 90 | int ret = 0, tcfp_result = TC_ACT_OK, err, size; |
| 89 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 91 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
| 92 | struct tcf_chain *goto_ch = NULL; | ||
| 90 | struct tc_police *parm; | 93 | struct tc_police *parm; |
| 91 | struct tcf_police *police; | 94 | struct tcf_police *police; |
| 92 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; | 95 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
| @@ -128,6 +131,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 128 | tcf_idr_release(*a, bind); | 131 | tcf_idr_release(*a, bind); |
| 129 | return -EEXIST; | 132 | return -EEXIST; |
| 130 | } | 133 | } |
| 134 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 135 | if (err < 0) | ||
| 136 | goto release_idr; | ||
| 131 | 137 | ||
| 132 | police = to_police(*a); | 138 | police = to_police(*a); |
| 133 | if (parm->rate.rate) { | 139 | if (parm->rate.rate) { |
| @@ -213,12 +219,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 213 | if (new->peak_present) | 219 | if (new->peak_present) |
| 214 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; | 220 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; |
| 215 | spin_unlock_bh(&police->tcfp_lock); | 221 | spin_unlock_bh(&police->tcfp_lock); |
| 216 | police->tcf_action = parm->action; | 222 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 217 | rcu_swap_protected(police->params, | 223 | rcu_swap_protected(police->params, |
| 218 | new, | 224 | new, |
| 219 | lockdep_is_held(&police->tcf_lock)); | 225 | lockdep_is_held(&police->tcf_lock)); |
| 220 | spin_unlock_bh(&police->tcf_lock); | 226 | spin_unlock_bh(&police->tcf_lock); |
| 221 | 227 | ||
| 228 | if (goto_ch) | ||
| 229 | tcf_chain_put_by_act(goto_ch); | ||
| 222 | if (new) | 230 | if (new) |
| 223 | kfree_rcu(new, rcu); | 231 | kfree_rcu(new, rcu); |
| 224 | 232 | ||
| @@ -229,6 +237,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 229 | failure: | 237 | failure: |
| 230 | qdisc_put_rtab(P_tab); | 238 | qdisc_put_rtab(P_tab); |
| 231 | qdisc_put_rtab(R_tab); | 239 | qdisc_put_rtab(R_tab); |
| 240 | if (goto_ch) | ||
| 241 | tcf_chain_put_by_act(goto_ch); | ||
| 242 | release_idr: | ||
| 232 | tcf_idr_release(*a, bind); | 243 | tcf_idr_release(*a, bind); |
| 233 | return err; | 244 | return err; |
| 234 | } | 245 | } |
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 203e399e5c85..0f82d50ea232 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/tc_act/tc_sample.h> | 22 | #include <linux/tc_act/tc_sample.h> |
| 23 | #include <net/tc_act/tc_sample.h> | 23 | #include <net/tc_act/tc_sample.h> |
| 24 | #include <net/psample.h> | 24 | #include <net/psample.h> |
| 25 | #include <net/pkt_cls.h> | ||
| 25 | 26 | ||
| 26 | #include <linux/if_arp.h> | 27 | #include <linux/if_arp.h> |
| 27 | 28 | ||
| @@ -37,14 +38,15 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = { | |||
| 37 | 38 | ||
| 38 | static int tcf_sample_init(struct net *net, struct nlattr *nla, | 39 | static int tcf_sample_init(struct net *net, struct nlattr *nla, |
| 39 | struct nlattr *est, struct tc_action **a, int ovr, | 40 | struct nlattr *est, struct tc_action **a, int ovr, |
| 40 | int bind, bool rtnl_held, | 41 | int bind, bool rtnl_held, struct tcf_proto *tp, |
| 41 | struct netlink_ext_ack *extack) | 42 | struct netlink_ext_ack *extack) |
| 42 | { | 43 | { |
| 43 | struct tc_action_net *tn = net_generic(net, sample_net_id); | 44 | struct tc_action_net *tn = net_generic(net, sample_net_id); |
| 44 | struct nlattr *tb[TCA_SAMPLE_MAX + 1]; | 45 | struct nlattr *tb[TCA_SAMPLE_MAX + 1]; |
| 45 | struct psample_group *psample_group; | 46 | struct psample_group *psample_group; |
| 47 | struct tcf_chain *goto_ch = NULL; | ||
| 48 | u32 psample_group_num, rate; | ||
| 46 | struct tc_sample *parm; | 49 | struct tc_sample *parm; |
| 47 | u32 psample_group_num; | ||
| 48 | struct tcf_sample *s; | 50 | struct tcf_sample *s; |
| 49 | bool exists = false; | 51 | bool exists = false; |
| 50 | int ret, err; | 52 | int ret, err; |
| @@ -79,19 +81,28 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
| 79 | tcf_idr_release(*a, bind); | 81 | tcf_idr_release(*a, bind); |
| 80 | return -EEXIST; | 82 | return -EEXIST; |
| 81 | } | 83 | } |
| 84 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 85 | if (err < 0) | ||
| 86 | goto release_idr; | ||
| 82 | 87 | ||
| 88 | rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); | ||
| 89 | if (!rate) { | ||
| 90 | NL_SET_ERR_MSG(extack, "invalid sample rate"); | ||
| 91 | err = -EINVAL; | ||
| 92 | goto put_chain; | ||
| 93 | } | ||
| 83 | psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); | 94 | psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); |
| 84 | psample_group = psample_group_get(net, psample_group_num); | 95 | psample_group = psample_group_get(net, psample_group_num); |
| 85 | if (!psample_group) { | 96 | if (!psample_group) { |
| 86 | tcf_idr_release(*a, bind); | 97 | err = -ENOMEM; |
| 87 | return -ENOMEM; | 98 | goto put_chain; |
| 88 | } | 99 | } |
| 89 | 100 | ||
| 90 | s = to_sample(*a); | 101 | s = to_sample(*a); |
| 91 | 102 | ||
| 92 | spin_lock_bh(&s->tcf_lock); | 103 | spin_lock_bh(&s->tcf_lock); |
| 93 | s->tcf_action = parm->action; | 104 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 94 | s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); | 105 | s->rate = rate; |
| 95 | s->psample_group_num = psample_group_num; | 106 | s->psample_group_num = psample_group_num; |
| 96 | RCU_INIT_POINTER(s->psample_group, psample_group); | 107 | RCU_INIT_POINTER(s->psample_group, psample_group); |
| 97 | 108 | ||
| @@ -100,10 +111,18 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
| 100 | s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); | 111 | s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); |
| 101 | } | 112 | } |
| 102 | spin_unlock_bh(&s->tcf_lock); | 113 | spin_unlock_bh(&s->tcf_lock); |
| 114 | if (goto_ch) | ||
| 115 | tcf_chain_put_by_act(goto_ch); | ||
| 103 | 116 | ||
| 104 | if (ret == ACT_P_CREATED) | 117 | if (ret == ACT_P_CREATED) |
| 105 | tcf_idr_insert(tn, *a); | 118 | tcf_idr_insert(tn, *a); |
| 106 | return ret; | 119 | return ret; |
| 120 | put_chain: | ||
| 121 | if (goto_ch) | ||
| 122 | tcf_chain_put_by_act(goto_ch); | ||
| 123 | release_idr: | ||
| 124 | tcf_idr_release(*a, bind); | ||
| 125 | return err; | ||
| 107 | } | 126 | } |
| 108 | 127 | ||
| 109 | static void tcf_sample_cleanup(struct tc_action *a) | 128 | static void tcf_sample_cleanup(struct tc_action *a) |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index d54cb608dbaf..23c8ca5615e5 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/rtnetlink.h> | 18 | #include <linux/rtnetlink.h> |
| 19 | #include <net/netlink.h> | 19 | #include <net/netlink.h> |
| 20 | #include <net/pkt_sched.h> | 20 | #include <net/pkt_sched.h> |
| 21 | #include <net/pkt_cls.h> | ||
| 21 | 22 | ||
| 22 | #include <linux/tc_act/tc_defact.h> | 23 | #include <linux/tc_act/tc_defact.h> |
| 23 | #include <net/tc_act/tc_defact.h> | 24 | #include <net/tc_act/tc_defact.h> |
| @@ -60,14 +61,26 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata) | |||
| 60 | return 0; | 61 | return 0; |
| 61 | } | 62 | } |
| 62 | 63 | ||
| 63 | static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata, | 64 | static int reset_policy(struct tc_action *a, const struct nlattr *defdata, |
| 64 | struct tc_defact *p) | 65 | struct tc_defact *p, struct tcf_proto *tp, |
| 66 | struct netlink_ext_ack *extack) | ||
| 65 | { | 67 | { |
| 68 | struct tcf_chain *goto_ch = NULL; | ||
| 69 | struct tcf_defact *d; | ||
| 70 | int err; | ||
| 71 | |||
| 72 | err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack); | ||
| 73 | if (err < 0) | ||
| 74 | return err; | ||
| 75 | d = to_defact(a); | ||
| 66 | spin_lock_bh(&d->tcf_lock); | 76 | spin_lock_bh(&d->tcf_lock); |
| 67 | d->tcf_action = p->action; | 77 | goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch); |
| 68 | memset(d->tcfd_defdata, 0, SIMP_MAX_DATA); | 78 | memset(d->tcfd_defdata, 0, SIMP_MAX_DATA); |
| 69 | nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); | 79 | nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); |
| 70 | spin_unlock_bh(&d->tcf_lock); | 80 | spin_unlock_bh(&d->tcf_lock); |
| 81 | if (goto_ch) | ||
| 82 | tcf_chain_put_by_act(goto_ch); | ||
| 83 | return 0; | ||
| 71 | } | 84 | } |
| 72 | 85 | ||
| 73 | static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { | 86 | static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { |
| @@ -78,10 +91,11 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { | |||
| 78 | static int tcf_simp_init(struct net *net, struct nlattr *nla, | 91 | static int tcf_simp_init(struct net *net, struct nlattr *nla, |
| 79 | struct nlattr *est, struct tc_action **a, | 92 | struct nlattr *est, struct tc_action **a, |
| 80 | int ovr, int bind, bool rtnl_held, | 93 | int ovr, int bind, bool rtnl_held, |
| 81 | struct netlink_ext_ack *extack) | 94 | struct tcf_proto *tp, struct netlink_ext_ack *extack) |
| 82 | { | 95 | { |
| 83 | struct tc_action_net *tn = net_generic(net, simp_net_id); | 96 | struct tc_action_net *tn = net_generic(net, simp_net_id); |
| 84 | struct nlattr *tb[TCA_DEF_MAX + 1]; | 97 | struct nlattr *tb[TCA_DEF_MAX + 1]; |
| 98 | struct tcf_chain *goto_ch = NULL; | ||
| 85 | struct tc_defact *parm; | 99 | struct tc_defact *parm; |
| 86 | struct tcf_defact *d; | 100 | struct tcf_defact *d; |
| 87 | bool exists = false; | 101 | bool exists = false; |
| @@ -122,27 +136,37 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, | |||
| 122 | } | 136 | } |
| 123 | 137 | ||
| 124 | d = to_defact(*a); | 138 | d = to_defact(*a); |
| 125 | ret = alloc_defdata(d, tb[TCA_DEF_DATA]); | 139 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, |
| 126 | if (ret < 0) { | 140 | extack); |
| 127 | tcf_idr_release(*a, bind); | 141 | if (err < 0) |
| 128 | return ret; | 142 | goto release_idr; |
| 129 | } | 143 | |
| 130 | d->tcf_action = parm->action; | 144 | err = alloc_defdata(d, tb[TCA_DEF_DATA]); |
| 145 | if (err < 0) | ||
| 146 | goto put_chain; | ||
| 147 | |||
| 148 | tcf_action_set_ctrlact(*a, parm->action, goto_ch); | ||
| 131 | ret = ACT_P_CREATED; | 149 | ret = ACT_P_CREATED; |
| 132 | } else { | 150 | } else { |
| 133 | d = to_defact(*a); | ||
| 134 | |||
| 135 | if (!ovr) { | 151 | if (!ovr) { |
| 136 | tcf_idr_release(*a, bind); | 152 | err = -EEXIST; |
| 137 | return -EEXIST; | 153 | goto release_idr; |
| 138 | } | 154 | } |
| 139 | 155 | ||
| 140 | reset_policy(d, tb[TCA_DEF_DATA], parm); | 156 | err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack); |
| 157 | if (err) | ||
| 158 | goto release_idr; | ||
| 141 | } | 159 | } |
| 142 | 160 | ||
| 143 | if (ret == ACT_P_CREATED) | 161 | if (ret == ACT_P_CREATED) |
| 144 | tcf_idr_insert(tn, *a); | 162 | tcf_idr_insert(tn, *a); |
| 145 | return ret; | 163 | return ret; |
| 164 | put_chain: | ||
| 165 | if (goto_ch) | ||
| 166 | tcf_chain_put_by_act(goto_ch); | ||
| 167 | release_idr: | ||
| 168 | tcf_idr_release(*a, bind); | ||
| 169 | return err; | ||
| 146 | } | 170 | } |
| 147 | 171 | ||
| 148 | static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, | 172 | static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 65879500b688..7e1d261a31d2 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <net/ip.h> | 26 | #include <net/ip.h> |
| 27 | #include <net/ipv6.h> | 27 | #include <net/ipv6.h> |
| 28 | #include <net/dsfield.h> | 28 | #include <net/dsfield.h> |
| 29 | #include <net/pkt_cls.h> | ||
| 29 | 30 | ||
| 30 | #include <linux/tc_act/tc_skbedit.h> | 31 | #include <linux/tc_act/tc_skbedit.h> |
| 31 | #include <net/tc_act/tc_skbedit.h> | 32 | #include <net/tc_act/tc_skbedit.h> |
| @@ -96,11 +97,13 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { | |||
| 96 | static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | 97 | static int tcf_skbedit_init(struct net *net, struct nlattr *nla, |
| 97 | struct nlattr *est, struct tc_action **a, | 98 | struct nlattr *est, struct tc_action **a, |
| 98 | int ovr, int bind, bool rtnl_held, | 99 | int ovr, int bind, bool rtnl_held, |
| 100 | struct tcf_proto *tp, | ||
| 99 | struct netlink_ext_ack *extack) | 101 | struct netlink_ext_ack *extack) |
| 100 | { | 102 | { |
| 101 | struct tc_action_net *tn = net_generic(net, skbedit_net_id); | 103 | struct tc_action_net *tn = net_generic(net, skbedit_net_id); |
| 102 | struct tcf_skbedit_params *params_new; | 104 | struct tcf_skbedit_params *params_new; |
| 103 | struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; | 105 | struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; |
| 106 | struct tcf_chain *goto_ch = NULL; | ||
| 104 | struct tc_skbedit *parm; | 107 | struct tc_skbedit *parm; |
| 105 | struct tcf_skbedit *d; | 108 | struct tcf_skbedit *d; |
| 106 | u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; | 109 | u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; |
| @@ -186,11 +189,14 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
| 186 | return -EEXIST; | 189 | return -EEXIST; |
| 187 | } | 190 | } |
| 188 | } | 191 | } |
| 192 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 193 | if (err < 0) | ||
| 194 | goto release_idr; | ||
| 189 | 195 | ||
| 190 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); | 196 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
| 191 | if (unlikely(!params_new)) { | 197 | if (unlikely(!params_new)) { |
| 192 | tcf_idr_release(*a, bind); | 198 | err = -ENOMEM; |
| 193 | return -ENOMEM; | 199 | goto put_chain; |
| 194 | } | 200 | } |
| 195 | 201 | ||
| 196 | params_new->flags = flags; | 202 | params_new->flags = flags; |
| @@ -208,16 +214,24 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
| 208 | params_new->mask = *mask; | 214 | params_new->mask = *mask; |
| 209 | 215 | ||
| 210 | spin_lock_bh(&d->tcf_lock); | 216 | spin_lock_bh(&d->tcf_lock); |
| 211 | d->tcf_action = parm->action; | 217 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 212 | rcu_swap_protected(d->params, params_new, | 218 | rcu_swap_protected(d->params, params_new, |
| 213 | lockdep_is_held(&d->tcf_lock)); | 219 | lockdep_is_held(&d->tcf_lock)); |
| 214 | spin_unlock_bh(&d->tcf_lock); | 220 | spin_unlock_bh(&d->tcf_lock); |
| 215 | if (params_new) | 221 | if (params_new) |
| 216 | kfree_rcu(params_new, rcu); | 222 | kfree_rcu(params_new, rcu); |
| 223 | if (goto_ch) | ||
| 224 | tcf_chain_put_by_act(goto_ch); | ||
| 217 | 225 | ||
| 218 | if (ret == ACT_P_CREATED) | 226 | if (ret == ACT_P_CREATED) |
| 219 | tcf_idr_insert(tn, *a); | 227 | tcf_idr_insert(tn, *a); |
| 220 | return ret; | 228 | return ret; |
| 229 | put_chain: | ||
| 230 | if (goto_ch) | ||
| 231 | tcf_chain_put_by_act(goto_ch); | ||
| 232 | release_idr: | ||
| 233 | tcf_idr_release(*a, bind); | ||
| 234 | return err; | ||
| 221 | } | 235 | } |
| 222 | 236 | ||
| 223 | static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | 237 | static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, |
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 7bac1d78e7a3..1d4c324d0a42 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
| 17 | #include <net/netlink.h> | 17 | #include <net/netlink.h> |
| 18 | #include <net/pkt_sched.h> | 18 | #include <net/pkt_sched.h> |
| 19 | #include <net/pkt_cls.h> | ||
| 19 | 20 | ||
| 20 | #include <linux/tc_act/tc_skbmod.h> | 21 | #include <linux/tc_act/tc_skbmod.h> |
| 21 | #include <net/tc_act/tc_skbmod.h> | 22 | #include <net/tc_act/tc_skbmod.h> |
| @@ -82,11 +83,13 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { | |||
| 82 | static int tcf_skbmod_init(struct net *net, struct nlattr *nla, | 83 | static int tcf_skbmod_init(struct net *net, struct nlattr *nla, |
| 83 | struct nlattr *est, struct tc_action **a, | 84 | struct nlattr *est, struct tc_action **a, |
| 84 | int ovr, int bind, bool rtnl_held, | 85 | int ovr, int bind, bool rtnl_held, |
| 86 | struct tcf_proto *tp, | ||
| 85 | struct netlink_ext_ack *extack) | 87 | struct netlink_ext_ack *extack) |
| 86 | { | 88 | { |
| 87 | struct tc_action_net *tn = net_generic(net, skbmod_net_id); | 89 | struct tc_action_net *tn = net_generic(net, skbmod_net_id); |
| 88 | struct nlattr *tb[TCA_SKBMOD_MAX + 1]; | 90 | struct nlattr *tb[TCA_SKBMOD_MAX + 1]; |
| 89 | struct tcf_skbmod_params *p, *p_old; | 91 | struct tcf_skbmod_params *p, *p_old; |
| 92 | struct tcf_chain *goto_ch = NULL; | ||
| 90 | struct tc_skbmod *parm; | 93 | struct tc_skbmod *parm; |
| 91 | struct tcf_skbmod *d; | 94 | struct tcf_skbmod *d; |
| 92 | bool exists = false; | 95 | bool exists = false; |
| @@ -153,21 +156,24 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, | |||
| 153 | tcf_idr_release(*a, bind); | 156 | tcf_idr_release(*a, bind); |
| 154 | return -EEXIST; | 157 | return -EEXIST; |
| 155 | } | 158 | } |
| 159 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 160 | if (err < 0) | ||
| 161 | goto release_idr; | ||
| 156 | 162 | ||
| 157 | d = to_skbmod(*a); | 163 | d = to_skbmod(*a); |
| 158 | 164 | ||
| 159 | p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); | 165 | p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); |
| 160 | if (unlikely(!p)) { | 166 | if (unlikely(!p)) { |
| 161 | tcf_idr_release(*a, bind); | 167 | err = -ENOMEM; |
| 162 | return -ENOMEM; | 168 | goto put_chain; |
| 163 | } | 169 | } |
| 164 | 170 | ||
| 165 | p->flags = lflags; | 171 | p->flags = lflags; |
| 166 | d->tcf_action = parm->action; | ||
| 167 | 172 | ||
| 168 | if (ovr) | 173 | if (ovr) |
| 169 | spin_lock_bh(&d->tcf_lock); | 174 | spin_lock_bh(&d->tcf_lock); |
| 170 | /* Protected by tcf_lock if overwriting existing action. */ | 175 | /* Protected by tcf_lock if overwriting existing action. */ |
| 176 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); | ||
| 171 | p_old = rcu_dereference_protected(d->skbmod_p, 1); | 177 | p_old = rcu_dereference_protected(d->skbmod_p, 1); |
| 172 | 178 | ||
| 173 | if (lflags & SKBMOD_F_DMAC) | 179 | if (lflags & SKBMOD_F_DMAC) |
| @@ -183,10 +189,18 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, | |||
| 183 | 189 | ||
| 184 | if (p_old) | 190 | if (p_old) |
| 185 | kfree_rcu(p_old, rcu); | 191 | kfree_rcu(p_old, rcu); |
| 192 | if (goto_ch) | ||
| 193 | tcf_chain_put_by_act(goto_ch); | ||
| 186 | 194 | ||
| 187 | if (ret == ACT_P_CREATED) | 195 | if (ret == ACT_P_CREATED) |
| 188 | tcf_idr_insert(tn, *a); | 196 | tcf_idr_insert(tn, *a); |
| 189 | return ret; | 197 | return ret; |
| 198 | put_chain: | ||
| 199 | if (goto_ch) | ||
| 200 | tcf_chain_put_by_act(goto_ch); | ||
| 201 | release_idr: | ||
| 202 | tcf_idr_release(*a, bind); | ||
| 203 | return err; | ||
| 190 | } | 204 | } |
| 191 | 205 | ||
| 192 | static void tcf_skbmod_cleanup(struct tc_action *a) | 206 | static void tcf_skbmod_cleanup(struct tc_action *a) |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 7c6591b991d5..d5aaf90a3971 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <net/netlink.h> | 17 | #include <net/netlink.h> |
| 18 | #include <net/pkt_sched.h> | 18 | #include <net/pkt_sched.h> |
| 19 | #include <net/dst.h> | 19 | #include <net/dst.h> |
| 20 | #include <net/pkt_cls.h> | ||
| 20 | 21 | ||
| 21 | #include <linux/tc_act/tc_tunnel_key.h> | 22 | #include <linux/tc_act/tc_tunnel_key.h> |
| 22 | #include <net/tc_act/tc_tunnel_key.h> | 23 | #include <net/tc_act/tc_tunnel_key.h> |
| @@ -210,12 +211,14 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) | |||
| 210 | static int tunnel_key_init(struct net *net, struct nlattr *nla, | 211 | static int tunnel_key_init(struct net *net, struct nlattr *nla, |
| 211 | struct nlattr *est, struct tc_action **a, | 212 | struct nlattr *est, struct tc_action **a, |
| 212 | int ovr, int bind, bool rtnl_held, | 213 | int ovr, int bind, bool rtnl_held, |
| 214 | struct tcf_proto *tp, | ||
| 213 | struct netlink_ext_ack *extack) | 215 | struct netlink_ext_ack *extack) |
| 214 | { | 216 | { |
| 215 | struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); | 217 | struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); |
| 216 | struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; | 218 | struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; |
| 217 | struct tcf_tunnel_key_params *params_new; | 219 | struct tcf_tunnel_key_params *params_new; |
| 218 | struct metadata_dst *metadata = NULL; | 220 | struct metadata_dst *metadata = NULL; |
| 221 | struct tcf_chain *goto_ch = NULL; | ||
| 219 | struct tc_tunnel_key *parm; | 222 | struct tc_tunnel_key *parm; |
| 220 | struct tcf_tunnel_key *t; | 223 | struct tcf_tunnel_key *t; |
| 221 | bool exists = false; | 224 | bool exists = false; |
| @@ -359,6 +362,12 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 359 | goto release_tun_meta; | 362 | goto release_tun_meta; |
| 360 | } | 363 | } |
| 361 | 364 | ||
| 365 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 366 | if (err < 0) { | ||
| 367 | ret = err; | ||
| 368 | exists = true; | ||
| 369 | goto release_tun_meta; | ||
| 370 | } | ||
| 362 | t = to_tunnel_key(*a); | 371 | t = to_tunnel_key(*a); |
| 363 | 372 | ||
| 364 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); | 373 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
| @@ -366,23 +375,29 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 366 | NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); | 375 | NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); |
| 367 | ret = -ENOMEM; | 376 | ret = -ENOMEM; |
| 368 | exists = true; | 377 | exists = true; |
| 369 | goto release_tun_meta; | 378 | goto put_chain; |
| 370 | } | 379 | } |
| 371 | params_new->tcft_action = parm->t_action; | 380 | params_new->tcft_action = parm->t_action; |
| 372 | params_new->tcft_enc_metadata = metadata; | 381 | params_new->tcft_enc_metadata = metadata; |
| 373 | 382 | ||
| 374 | spin_lock_bh(&t->tcf_lock); | 383 | spin_lock_bh(&t->tcf_lock); |
| 375 | t->tcf_action = parm->action; | 384 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 376 | rcu_swap_protected(t->params, params_new, | 385 | rcu_swap_protected(t->params, params_new, |
| 377 | lockdep_is_held(&t->tcf_lock)); | 386 | lockdep_is_held(&t->tcf_lock)); |
| 378 | spin_unlock_bh(&t->tcf_lock); | 387 | spin_unlock_bh(&t->tcf_lock); |
| 379 | tunnel_key_release_params(params_new); | 388 | tunnel_key_release_params(params_new); |
| 389 | if (goto_ch) | ||
| 390 | tcf_chain_put_by_act(goto_ch); | ||
| 380 | 391 | ||
| 381 | if (ret == ACT_P_CREATED) | 392 | if (ret == ACT_P_CREATED) |
| 382 | tcf_idr_insert(tn, *a); | 393 | tcf_idr_insert(tn, *a); |
| 383 | 394 | ||
| 384 | return ret; | 395 | return ret; |
| 385 | 396 | ||
| 397 | put_chain: | ||
| 398 | if (goto_ch) | ||
| 399 | tcf_chain_put_by_act(goto_ch); | ||
| 400 | |||
| 386 | release_tun_meta: | 401 | release_tun_meta: |
| 387 | if (metadata) | 402 | if (metadata) |
| 388 | dst_release(&metadata->dst); | 403 | dst_release(&metadata->dst); |
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index ac0061599225..0f40d0a74423 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/if_vlan.h> | 15 | #include <linux/if_vlan.h> |
| 16 | #include <net/netlink.h> | 16 | #include <net/netlink.h> |
| 17 | #include <net/pkt_sched.h> | 17 | #include <net/pkt_sched.h> |
| 18 | #include <net/pkt_cls.h> | ||
| 18 | 19 | ||
| 19 | #include <linux/tc_act/tc_vlan.h> | 20 | #include <linux/tc_act/tc_vlan.h> |
| 20 | #include <net/tc_act/tc_vlan.h> | 21 | #include <net/tc_act/tc_vlan.h> |
| @@ -105,10 +106,11 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { | |||
| 105 | static int tcf_vlan_init(struct net *net, struct nlattr *nla, | 106 | static int tcf_vlan_init(struct net *net, struct nlattr *nla, |
| 106 | struct nlattr *est, struct tc_action **a, | 107 | struct nlattr *est, struct tc_action **a, |
| 107 | int ovr, int bind, bool rtnl_held, | 108 | int ovr, int bind, bool rtnl_held, |
| 108 | struct netlink_ext_ack *extack) | 109 | struct tcf_proto *tp, struct netlink_ext_ack *extack) |
| 109 | { | 110 | { |
| 110 | struct tc_action_net *tn = net_generic(net, vlan_net_id); | 111 | struct tc_action_net *tn = net_generic(net, vlan_net_id); |
| 111 | struct nlattr *tb[TCA_VLAN_MAX + 1]; | 112 | struct nlattr *tb[TCA_VLAN_MAX + 1]; |
| 113 | struct tcf_chain *goto_ch = NULL; | ||
| 112 | struct tcf_vlan_params *p; | 114 | struct tcf_vlan_params *p; |
| 113 | struct tc_vlan *parm; | 115 | struct tc_vlan *parm; |
| 114 | struct tcf_vlan *v; | 116 | struct tcf_vlan *v; |
| @@ -200,12 +202,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, | |||
| 200 | return -EEXIST; | 202 | return -EEXIST; |
| 201 | } | 203 | } |
| 202 | 204 | ||
| 205 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | ||
| 206 | if (err < 0) | ||
| 207 | goto release_idr; | ||
| 208 | |||
| 203 | v = to_vlan(*a); | 209 | v = to_vlan(*a); |
| 204 | 210 | ||
| 205 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 211 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
| 206 | if (!p) { | 212 | if (!p) { |
| 207 | tcf_idr_release(*a, bind); | 213 | err = -ENOMEM; |
| 208 | return -ENOMEM; | 214 | goto put_chain; |
| 209 | } | 215 | } |
| 210 | 216 | ||
| 211 | p->tcfv_action = action; | 217 | p->tcfv_action = action; |
| @@ -214,16 +220,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, | |||
| 214 | p->tcfv_push_proto = push_proto; | 220 | p->tcfv_push_proto = push_proto; |
| 215 | 221 | ||
| 216 | spin_lock_bh(&v->tcf_lock); | 222 | spin_lock_bh(&v->tcf_lock); |
| 217 | v->tcf_action = parm->action; | 223 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 218 | rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); | 224 | rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); |
| 219 | spin_unlock_bh(&v->tcf_lock); | 225 | spin_unlock_bh(&v->tcf_lock); |
| 220 | 226 | ||
| 227 | if (goto_ch) | ||
| 228 | tcf_chain_put_by_act(goto_ch); | ||
| 221 | if (p) | 229 | if (p) |
| 222 | kfree_rcu(p, rcu); | 230 | kfree_rcu(p, rcu); |
| 223 | 231 | ||
| 224 | if (ret == ACT_P_CREATED) | 232 | if (ret == ACT_P_CREATED) |
| 225 | tcf_idr_insert(tn, *a); | 233 | tcf_idr_insert(tn, *a); |
| 226 | return ret; | 234 | return ret; |
| 235 | put_chain: | ||
| 236 | if (goto_ch) | ||
| 237 | tcf_chain_put_by_act(goto_ch); | ||
| 238 | release_idr: | ||
| 239 | tcf_idr_release(*a, bind); | ||
| 240 | return err; | ||
| 227 | } | 241 | } |
| 228 | 242 | ||
| 229 | static void tcf_vlan_cleanup(struct tc_action *a) | 243 | static void tcf_vlan_cleanup(struct tc_action *a) |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index dc10525e90e7..99ae30c177c7 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -367,7 +367,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) | |||
| 367 | struct tcf_block *block = chain->block; | 367 | struct tcf_block *block = chain->block; |
| 368 | 368 | ||
| 369 | mutex_destroy(&chain->filter_chain_lock); | 369 | mutex_destroy(&chain->filter_chain_lock); |
| 370 | kfree(chain); | 370 | kfree_rcu(chain, rcu); |
| 371 | if (free_block) | 371 | if (free_block) |
| 372 | tcf_block_destroy(block); | 372 | tcf_block_destroy(block); |
| 373 | } | 373 | } |
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 459921bd3d87..a13bc351a414 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
| @@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held, | |||
| 130 | 130 | ||
| 131 | static void *mall_get(struct tcf_proto *tp, u32 handle) | 131 | static void *mall_get(struct tcf_proto *tp, u32 handle) |
| 132 | { | 132 | { |
| 133 | struct cls_mall_head *head = rtnl_dereference(tp->root); | ||
| 134 | |||
| 135 | if (head && head->handle == handle) | ||
| 136 | return head; | ||
| 137 | |||
| 133 | return NULL; | 138 | return NULL; |
| 134 | } | 139 | } |
| 135 | 140 | ||
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 1d2a12132abc..259d97bc2abd 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
| @@ -211,6 +211,9 @@ struct cake_sched_data { | |||
| 211 | u8 ack_filter; | 211 | u8 ack_filter; |
| 212 | u8 atm_mode; | 212 | u8 atm_mode; |
| 213 | 213 | ||
| 214 | u32 fwmark_mask; | ||
| 215 | u16 fwmark_shft; | ||
| 216 | |||
| 214 | /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ | 217 | /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ |
| 215 | u16 rate_shft; | 218 | u16 rate_shft; |
| 216 | ktime_t time_next_packet; | 219 | ktime_t time_next_packet; |
| @@ -258,8 +261,7 @@ enum { | |||
| 258 | CAKE_FLAG_AUTORATE_INGRESS = BIT(1), | 261 | CAKE_FLAG_AUTORATE_INGRESS = BIT(1), |
| 259 | CAKE_FLAG_INGRESS = BIT(2), | 262 | CAKE_FLAG_INGRESS = BIT(2), |
| 260 | CAKE_FLAG_WASH = BIT(3), | 263 | CAKE_FLAG_WASH = BIT(3), |
| 261 | CAKE_FLAG_SPLIT_GSO = BIT(4), | 264 | CAKE_FLAG_SPLIT_GSO = BIT(4) |
| 262 | CAKE_FLAG_FWMARK = BIT(5) | ||
| 263 | }; | 265 | }; |
| 264 | 266 | ||
| 265 | /* COBALT operates the Codel and BLUE algorithms in parallel, in order to | 267 | /* COBALT operates the Codel and BLUE algorithms in parallel, in order to |
| @@ -1515,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) | |||
| 1515 | 1517 | ||
| 1516 | static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) | 1518 | static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) |
| 1517 | { | 1519 | { |
| 1520 | int wlen = skb_network_offset(skb); | ||
| 1518 | u8 dscp; | 1521 | u8 dscp; |
| 1519 | 1522 | ||
| 1520 | switch (skb->protocol) { | 1523 | switch (tc_skb_protocol(skb)) { |
| 1521 | case htons(ETH_P_IP): | 1524 | case htons(ETH_P_IP): |
| 1525 | wlen += sizeof(struct iphdr); | ||
| 1526 | if (!pskb_may_pull(skb, wlen) || | ||
| 1527 | skb_try_make_writable(skb, wlen)) | ||
| 1528 | return 0; | ||
| 1529 | |||
| 1522 | dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; | 1530 | dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; |
| 1523 | if (wash && dscp) | 1531 | if (wash && dscp) |
| 1524 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); | 1532 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); |
| 1525 | return dscp; | 1533 | return dscp; |
| 1526 | 1534 | ||
| 1527 | case htons(ETH_P_IPV6): | 1535 | case htons(ETH_P_IPV6): |
| 1536 | wlen += sizeof(struct ipv6hdr); | ||
| 1537 | if (!pskb_may_pull(skb, wlen) || | ||
| 1538 | skb_try_make_writable(skb, wlen)) | ||
| 1539 | return 0; | ||
| 1540 | |||
| 1528 | dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; | 1541 | dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; |
| 1529 | if (wash && dscp) | 1542 | if (wash && dscp) |
| 1530 | ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); | 1543 | ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); |
| @@ -1543,7 +1556,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, | |||
| 1543 | struct sk_buff *skb) | 1556 | struct sk_buff *skb) |
| 1544 | { | 1557 | { |
| 1545 | struct cake_sched_data *q = qdisc_priv(sch); | 1558 | struct cake_sched_data *q = qdisc_priv(sch); |
| 1546 | u32 tin; | 1559 | u32 tin, mark; |
| 1547 | u8 dscp; | 1560 | u8 dscp; |
| 1548 | 1561 | ||
| 1549 | /* Tin selection: Default to diffserv-based selection, allow overriding | 1562 | /* Tin selection: Default to diffserv-based selection, allow overriding |
| @@ -1551,14 +1564,13 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, | |||
| 1551 | */ | 1564 | */ |
| 1552 | dscp = cake_handle_diffserv(skb, | 1565 | dscp = cake_handle_diffserv(skb, |
| 1553 | q->rate_flags & CAKE_FLAG_WASH); | 1566 | q->rate_flags & CAKE_FLAG_WASH); |
| 1567 | mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; | ||
| 1554 | 1568 | ||
| 1555 | if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) | 1569 | if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) |
| 1556 | tin = 0; | 1570 | tin = 0; |
| 1557 | 1571 | ||
| 1558 | else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */ | 1572 | else if (mark && mark <= q->tin_cnt) |
| 1559 | skb->mark && | 1573 | tin = q->tin_order[mark - 1]; |
| 1560 | skb->mark <= q->tin_cnt) | ||
| 1561 | tin = q->tin_order[skb->mark - 1]; | ||
| 1562 | 1574 | ||
| 1563 | else if (TC_H_MAJ(skb->priority) == sch->handle && | 1575 | else if (TC_H_MAJ(skb->priority) == sch->handle && |
| 1564 | TC_H_MIN(skb->priority) > 0 && | 1576 | TC_H_MIN(skb->priority) > 0 && |
| @@ -2172,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = { | |||
| 2172 | [TCA_CAKE_MPU] = { .type = NLA_U32 }, | 2184 | [TCA_CAKE_MPU] = { .type = NLA_U32 }, |
| 2173 | [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, | 2185 | [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, |
| 2174 | [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, | 2186 | [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, |
| 2187 | [TCA_CAKE_FWMARK] = { .type = NLA_U32 }, | ||
| 2175 | }; | 2188 | }; |
| 2176 | 2189 | ||
| 2177 | static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, | 2190 | static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, |
| @@ -2619,10 +2632,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 2619 | } | 2632 | } |
| 2620 | 2633 | ||
| 2621 | if (tb[TCA_CAKE_FWMARK]) { | 2634 | if (tb[TCA_CAKE_FWMARK]) { |
| 2622 | if (!!nla_get_u32(tb[TCA_CAKE_FWMARK])) | 2635 | q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); |
| 2623 | q->rate_flags |= CAKE_FLAG_FWMARK; | 2636 | q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; |
| 2624 | else | ||
| 2625 | q->rate_flags &= ~CAKE_FLAG_FWMARK; | ||
| 2626 | } | 2637 | } |
| 2627 | 2638 | ||
| 2628 | if (q->tins) { | 2639 | if (q->tins) { |
| @@ -2784,8 +2795,7 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
| 2784 | !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) | 2795 | !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) |
| 2785 | goto nla_put_failure; | 2796 | goto nla_put_failure; |
| 2786 | 2797 | ||
| 2787 | if (nla_put_u32(skb, TCA_CAKE_FWMARK, | 2798 | if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) |
| 2788 | !!(q->rate_flags & CAKE_FLAG_FWMARK))) | ||
| 2789 | goto nla_put_failure; | 2799 | goto nla_put_failure; |
| 2790 | 2800 | ||
| 2791 | return nla_nest_end(skb, opts); | 2801 | return nla_nest_end(skb, opts); |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 4dc05409e3fb..114b9048ea7e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
| @@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1358 | { | 1358 | { |
| 1359 | struct cbq_sched_data *q = qdisc_priv(sch); | 1359 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 1360 | struct cbq_class *cl = (struct cbq_class *)arg; | 1360 | struct cbq_class *cl = (struct cbq_class *)arg; |
| 1361 | __u32 qlen; | ||
| 1361 | 1362 | ||
| 1362 | cl->xstats.avgidle = cl->avgidle; | 1363 | cl->xstats.avgidle = cl->avgidle; |
| 1363 | cl->xstats.undertime = 0; | 1364 | cl->xstats.undertime = 0; |
| 1365 | qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); | ||
| 1364 | 1366 | ||
| 1365 | if (cl->undertime != PSCHED_PASTPERFECT) | 1367 | if (cl->undertime != PSCHED_PASTPERFECT) |
| 1366 | cl->xstats.undertime = cl->undertime - q->now; | 1368 | cl->xstats.undertime = cl->undertime - q->now; |
| @@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1368 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 1370 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 1369 | d, NULL, &cl->bstats) < 0 || | 1371 | d, NULL, &cl->bstats) < 0 || |
| 1370 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1372 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 1371 | gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) | 1373 | gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) |
| 1372 | return -1; | 1374 | return -1; |
| 1373 | 1375 | ||
| 1374 | return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); | 1376 | return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); |
| @@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) | |||
| 1665 | { | 1667 | { |
| 1666 | struct cbq_sched_data *q = qdisc_priv(sch); | 1668 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 1667 | struct cbq_class *cl = (struct cbq_class *)arg; | 1669 | struct cbq_class *cl = (struct cbq_class *)arg; |
| 1668 | unsigned int qlen, backlog; | ||
| 1669 | 1670 | ||
| 1670 | if (cl->filters || cl->children || cl == &q->link) | 1671 | if (cl->filters || cl->children || cl == &q->link) |
| 1671 | return -EBUSY; | 1672 | return -EBUSY; |
| 1672 | 1673 | ||
| 1673 | sch_tree_lock(sch); | 1674 | sch_tree_lock(sch); |
| 1674 | 1675 | ||
| 1675 | qlen = cl->q->q.qlen; | 1676 | qdisc_purge_queue(cl->q); |
| 1676 | backlog = cl->q->qstats.backlog; | ||
| 1677 | qdisc_reset(cl->q); | ||
| 1678 | qdisc_tree_reduce_backlog(cl->q, qlen, backlog); | ||
| 1679 | 1677 | ||
| 1680 | if (cl->next_alive) | 1678 | if (cl->next_alive) |
| 1681 | cbq_deactivate_class(cl); | 1679 | cbq_deactivate_class(cl); |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 09b800991065..430df9a55ec4 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
| @@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) | |||
| 50 | return container_of(clc, struct drr_class, common); | 50 | return container_of(clc, struct drr_class, common); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static void drr_purge_queue(struct drr_class *cl) | ||
| 54 | { | ||
| 55 | unsigned int len = cl->qdisc->q.qlen; | ||
| 56 | unsigned int backlog = cl->qdisc->qstats.backlog; | ||
| 57 | |||
| 58 | qdisc_reset(cl->qdisc); | ||
| 59 | qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); | ||
| 60 | } | ||
| 61 | |||
| 62 | static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { | 53 | static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { |
| 63 | [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, | 54 | [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, |
| 64 | }; | 55 | }; |
| @@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg) | |||
| 167 | 158 | ||
| 168 | sch_tree_lock(sch); | 159 | sch_tree_lock(sch); |
| 169 | 160 | ||
| 170 | drr_purge_queue(cl); | 161 | qdisc_purge_queue(cl->qdisc); |
| 171 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 162 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
| 172 | 163 | ||
| 173 | sch_tree_unlock(sch); | 164 | sch_tree_unlock(sch); |
| @@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 269 | struct gnet_dump *d) | 260 | struct gnet_dump *d) |
| 270 | { | 261 | { |
| 271 | struct drr_class *cl = (struct drr_class *)arg; | 262 | struct drr_class *cl = (struct drr_class *)arg; |
| 272 | __u32 qlen = cl->qdisc->q.qlen; | 263 | __u32 qlen = qdisc_qlen_sum(cl->qdisc); |
| 264 | struct Qdisc *cl_q = cl->qdisc; | ||
| 273 | struct tc_drr_stats xstats; | 265 | struct tc_drr_stats xstats; |
| 274 | 266 | ||
| 275 | memset(&xstats, 0, sizeof(xstats)); | 267 | memset(&xstats, 0, sizeof(xstats)); |
| @@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 279 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 271 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 280 | d, NULL, &cl->bstats) < 0 || | 272 | d, NULL, &cl->bstats) < 0 || |
| 281 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 273 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 282 | gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) | 274 | gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) |
| 283 | return -1; | 275 | return -1; |
| 284 | 276 | ||
| 285 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 277 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 24cc220a3218..d2ab463f22ae 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -845,16 +845,6 @@ qdisc_peek_len(struct Qdisc *sch) | |||
| 845 | } | 845 | } |
| 846 | 846 | ||
| 847 | static void | 847 | static void |
| 848 | hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) | ||
| 849 | { | ||
| 850 | unsigned int len = cl->qdisc->q.qlen; | ||
| 851 | unsigned int backlog = cl->qdisc->qstats.backlog; | ||
| 852 | |||
| 853 | qdisc_reset(cl->qdisc); | ||
| 854 | qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); | ||
| 855 | } | ||
| 856 | |||
| 857 | static void | ||
| 858 | hfsc_adjust_levels(struct hfsc_class *cl) | 848 | hfsc_adjust_levels(struct hfsc_class *cl) |
| 859 | { | 849 | { |
| 860 | struct hfsc_class *p; | 850 | struct hfsc_class *p; |
| @@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
| 1076 | qdisc_class_hash_insert(&q->clhash, &cl->cl_common); | 1066 | qdisc_class_hash_insert(&q->clhash, &cl->cl_common); |
| 1077 | list_add_tail(&cl->siblings, &parent->children); | 1067 | list_add_tail(&cl->siblings, &parent->children); |
| 1078 | if (parent->level == 0) | 1068 | if (parent->level == 0) |
| 1079 | hfsc_purge_queue(sch, parent); | 1069 | qdisc_purge_queue(parent->qdisc); |
| 1080 | hfsc_adjust_levels(parent); | 1070 | hfsc_adjust_levels(parent); |
| 1081 | sch_tree_unlock(sch); | 1071 | sch_tree_unlock(sch); |
| 1082 | 1072 | ||
| @@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg) | |||
| 1112 | list_del(&cl->siblings); | 1102 | list_del(&cl->siblings); |
| 1113 | hfsc_adjust_levels(cl->cl_parent); | 1103 | hfsc_adjust_levels(cl->cl_parent); |
| 1114 | 1104 | ||
| 1115 | hfsc_purge_queue(sch, cl); | 1105 | qdisc_purge_queue(cl->qdisc); |
| 1116 | qdisc_class_hash_remove(&q->clhash, &cl->cl_common); | 1106 | qdisc_class_hash_remove(&q->clhash, &cl->cl_common); |
| 1117 | 1107 | ||
| 1118 | sch_tree_unlock(sch); | 1108 | sch_tree_unlock(sch); |
| @@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1328 | { | 1318 | { |
| 1329 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1319 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
| 1330 | struct tc_hfsc_stats xstats; | 1320 | struct tc_hfsc_stats xstats; |
| 1321 | __u32 qlen; | ||
| 1331 | 1322 | ||
| 1332 | cl->qstats.backlog = cl->qdisc->qstats.backlog; | 1323 | qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog); |
| 1333 | xstats.level = cl->level; | 1324 | xstats.level = cl->level; |
| 1334 | xstats.period = cl->cl_vtperiod; | 1325 | xstats.period = cl->cl_vtperiod; |
| 1335 | xstats.work = cl->cl_total; | 1326 | xstats.work = cl->cl_total; |
| @@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1337 | 1328 | ||
| 1338 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || | 1329 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || |
| 1339 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1330 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 1340 | gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) | 1331 | gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) |
| 1341 | return -1; | 1332 | return -1; |
| 1342 | 1333 | ||
| 1343 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 1334 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 30f9da7e1076..2f9883b196e8 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) | |||
| 1127 | }; | 1127 | }; |
| 1128 | __u32 qlen = 0; | 1128 | __u32 qlen = 0; |
| 1129 | 1129 | ||
| 1130 | if (!cl->level && cl->leaf.q) { | 1130 | if (!cl->level && cl->leaf.q) |
| 1131 | qlen = cl->leaf.q->q.qlen; | 1131 | qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); |
| 1132 | qs.backlog = cl->leaf.q->qstats.backlog; | 1132 | |
| 1133 | } | ||
| 1134 | cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), | 1133 | cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), |
| 1135 | INT_MIN, INT_MAX); | 1134 | INT_MIN, INT_MAX); |
| 1136 | cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), | 1135 | cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), |
| @@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
| 1270 | 1269 | ||
| 1271 | sch_tree_lock(sch); | 1270 | sch_tree_lock(sch); |
| 1272 | 1271 | ||
| 1273 | if (!cl->level) { | 1272 | if (!cl->level) |
| 1274 | unsigned int qlen = cl->leaf.q->q.qlen; | 1273 | qdisc_purge_queue(cl->leaf.q); |
| 1275 | unsigned int backlog = cl->leaf.q->qstats.backlog; | ||
| 1276 | |||
| 1277 | qdisc_reset(cl->leaf.q); | ||
| 1278 | qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog); | ||
| 1279 | } | ||
| 1280 | 1274 | ||
| 1281 | /* delete from hash and active; remainder in destroy_class */ | 1275 | /* delete from hash and active; remainder in destroy_class */ |
| 1282 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 1276 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
| @@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
| 1404 | classid, NULL); | 1398 | classid, NULL); |
| 1405 | sch_tree_lock(sch); | 1399 | sch_tree_lock(sch); |
| 1406 | if (parent && !parent->level) { | 1400 | if (parent && !parent->level) { |
| 1407 | unsigned int qlen = parent->leaf.q->q.qlen; | ||
| 1408 | unsigned int backlog = parent->leaf.q->qstats.backlog; | ||
| 1409 | |||
| 1410 | /* turn parent into inner node */ | 1401 | /* turn parent into inner node */ |
| 1411 | qdisc_reset(parent->leaf.q); | 1402 | qdisc_purge_queue(parent->leaf.q); |
| 1412 | qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog); | ||
| 1413 | qdisc_put(parent->leaf.q); | 1403 | qdisc_put(parent->leaf.q); |
| 1414 | if (parent->prio_activity) | 1404 | if (parent->prio_activity) |
| 1415 | htb_deactivate(q, parent); | 1405 | htb_deactivate(q, parent); |
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 203659bc3906..3a3312467692 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c | |||
| @@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 249 | 249 | ||
| 250 | sch = dev_queue->qdisc_sleeping; | 250 | sch = dev_queue->qdisc_sleeping; |
| 251 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || | 251 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || |
| 252 | gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) | 252 | qdisc_qstats_copy(d, sch) < 0) |
| 253 | return -1; | 253 | return -1; |
| 254 | return 0; | 254 | return 0; |
| 255 | } | 255 | } |
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index d364e63c396d..ea0dc112b38d 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c | |||
| @@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 561 | sch = dev_queue->qdisc_sleeping; | 561 | sch = dev_queue->qdisc_sleeping; |
| 562 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 562 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 563 | d, NULL, &sch->bstats) < 0 || | 563 | d, NULL, &sch->bstats) < 0 || |
| 564 | gnet_stats_copy_queue(d, NULL, | 564 | qdisc_qstats_copy(d, sch) < 0) |
| 565 | &sch->qstats, sch->q.qlen) < 0) | ||
| 566 | return -1; | 565 | return -1; |
| 567 | } | 566 | } |
| 568 | return 0; | 567 | return 0; |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 7410ce4d0321..35b03ae08e0f 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
| @@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, | |||
| 201 | for (i = q->bands; i < q->max_bands; i++) { | 201 | for (i = q->bands; i < q->max_bands; i++) { |
| 202 | if (q->queues[i] != &noop_qdisc) { | 202 | if (q->queues[i] != &noop_qdisc) { |
| 203 | struct Qdisc *child = q->queues[i]; | 203 | struct Qdisc *child = q->queues[i]; |
| 204 | |||
| 204 | q->queues[i] = &noop_qdisc; | 205 | q->queues[i] = &noop_qdisc; |
| 205 | qdisc_tree_reduce_backlog(child, child->q.qlen, | 206 | qdisc_tree_flush_backlog(child); |
| 206 | child->qstats.backlog); | ||
| 207 | qdisc_put(child); | 207 | qdisc_put(child); |
| 208 | } | 208 | } |
| 209 | } | 209 | } |
| @@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, | |||
| 225 | qdisc_hash_add(child, true); | 225 | qdisc_hash_add(child, true); |
| 226 | 226 | ||
| 227 | if (old != &noop_qdisc) { | 227 | if (old != &noop_qdisc) { |
| 228 | qdisc_tree_reduce_backlog(old, | 228 | qdisc_tree_flush_backlog(old); |
| 229 | old->q.qlen, | ||
| 230 | old->qstats.backlog); | ||
| 231 | qdisc_put(old); | 229 | qdisc_put(old); |
| 232 | } | 230 | } |
| 233 | sch_tree_unlock(sch); | 231 | sch_tree_unlock(sch); |
| @@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 344 | cl_q = q->queues[cl - 1]; | 342 | cl_q = q->queues[cl - 1]; |
| 345 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 343 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 346 | d, NULL, &cl_q->bstats) < 0 || | 344 | d, NULL, &cl_q->bstats) < 0 || |
| 347 | gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) | 345 | qdisc_qstats_copy(d, cl_q) < 0) |
| 348 | return -1; | 346 | return -1; |
| 349 | 347 | ||
| 350 | return 0; | 348 | return 0; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 847141cd900f..d519b21535b3 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, | |||
| 216 | q->bands = qopt->bands; | 216 | q->bands = qopt->bands; |
| 217 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); | 217 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
| 218 | 218 | ||
| 219 | for (i = q->bands; i < oldbands; i++) { | 219 | for (i = q->bands; i < oldbands; i++) |
| 220 | struct Qdisc *child = q->queues[i]; | 220 | qdisc_tree_flush_backlog(q->queues[i]); |
| 221 | |||
| 222 | qdisc_tree_reduce_backlog(child, child->q.qlen, | ||
| 223 | child->qstats.backlog); | ||
| 224 | } | ||
| 225 | 221 | ||
| 226 | for (i = oldbands; i < q->bands; i++) { | 222 | for (i = oldbands; i < q->bands; i++) { |
| 227 | q->queues[i] = queues[i]; | 223 | q->queues[i] = queues[i]; |
| @@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 365 | cl_q = q->queues[cl - 1]; | 361 | cl_q = q->queues[cl - 1]; |
| 366 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 362 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 367 | d, NULL, &cl_q->bstats) < 0 || | 363 | d, NULL, &cl_q->bstats) < 0 || |
| 368 | gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) | 364 | qdisc_qstats_copy(d, cl_q) < 0) |
| 369 | return -1; | 365 | return -1; |
| 370 | 366 | ||
| 371 | return 0; | 367 | return 0; |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 29f5c4a24688..1589364b54da 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) | |||
| 217 | return container_of(clc, struct qfq_class, common); | 217 | return container_of(clc, struct qfq_class, common); |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | static void qfq_purge_queue(struct qfq_class *cl) | ||
| 221 | { | ||
| 222 | unsigned int len = cl->qdisc->q.qlen; | ||
| 223 | unsigned int backlog = cl->qdisc->qstats.backlog; | ||
| 224 | |||
| 225 | qdisc_reset(cl->qdisc); | ||
| 226 | qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); | ||
| 227 | } | ||
| 228 | |||
| 229 | static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { | 220 | static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { |
| 230 | [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, | 221 | [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, |
| 231 | [TCA_QFQ_LMAX] = { .type = NLA_U32 }, | 222 | [TCA_QFQ_LMAX] = { .type = NLA_U32 }, |
| @@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg) | |||
| 551 | 542 | ||
| 552 | sch_tree_lock(sch); | 543 | sch_tree_lock(sch); |
| 553 | 544 | ||
| 554 | qfq_purge_queue(cl); | 545 | qdisc_purge_queue(cl->qdisc); |
| 555 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 546 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
| 556 | 547 | ||
| 557 | sch_tree_unlock(sch); | 548 | sch_tree_unlock(sch); |
| @@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 655 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 646 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 656 | d, NULL, &cl->bstats) < 0 || | 647 | d, NULL, &cl->bstats) < 0 || |
| 657 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 648 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 658 | gnet_stats_copy_queue(d, NULL, | 649 | qdisc_qstats_copy(d, cl->qdisc) < 0) |
| 659 | &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) | ||
| 660 | return -1; | 650 | return -1; |
| 661 | 651 | ||
| 662 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 652 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 9df9942340ea..4e8c0abf6194 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 233 | q->flags = ctl->flags; | 233 | q->flags = ctl->flags; |
| 234 | q->limit = ctl->limit; | 234 | q->limit = ctl->limit; |
| 235 | if (child) { | 235 | if (child) { |
| 236 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | 236 | qdisc_tree_flush_backlog(q->qdisc); |
| 237 | q->qdisc->qstats.backlog); | ||
| 238 | old_child = q->qdisc; | 237 | old_child = q->qdisc; |
| 239 | q->qdisc = child; | 238 | q->qdisc = child; |
| 240 | } | 239 | } |
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index bab506b01a32..2419fdb75966 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c | |||
| @@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 521 | qdisc_hash_add(child, true); | 521 | qdisc_hash_add(child, true); |
| 522 | sch_tree_lock(sch); | 522 | sch_tree_lock(sch); |
| 523 | 523 | ||
| 524 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | 524 | qdisc_tree_flush_backlog(q->qdisc); |
| 525 | q->qdisc->qstats.backlog); | ||
| 526 | qdisc_put(q->qdisc); | 525 | qdisc_put(q->qdisc); |
| 527 | q->qdisc = child; | 526 | q->qdisc = child; |
| 528 | 527 | ||
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 206e4dbed12f..c7041999eb5d 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c | |||
| @@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 895 | 895 | ||
| 896 | sch = dev_queue->qdisc_sleeping; | 896 | sch = dev_queue->qdisc_sleeping; |
| 897 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || | 897 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || |
| 898 | gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) | 898 | qdisc_qstats_copy(d, sch) < 0) |
| 899 | return -1; | 899 | return -1; |
| 900 | return 0; | 900 | return 0; |
| 901 | } | 901 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 7f272a9070c5..f71578dbb9e3 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
| @@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 391 | 391 | ||
| 392 | sch_tree_lock(sch); | 392 | sch_tree_lock(sch); |
| 393 | if (child) { | 393 | if (child) { |
| 394 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | 394 | qdisc_tree_flush_backlog(q->qdisc); |
| 395 | q->qdisc->qstats.backlog); | ||
| 396 | qdisc_put(q->qdisc); | 395 | qdisc_put(q->qdisc); |
| 397 | q->qdisc = child; | 396 | q->qdisc = child; |
| 398 | } | 397 | } |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 6abc8b274270..951afdeea5e9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -600,6 +600,7 @@ out: | |||
| 600 | static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) | 600 | static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) |
| 601 | { | 601 | { |
| 602 | /* No address mapping for V4 sockets */ | 602 | /* No address mapping for V4 sockets */ |
| 603 | memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); | ||
| 603 | return sizeof(struct sockaddr_in); | 604 | return sizeof(struct sockaddr_in); |
| 604 | } | 605 | } |
| 605 | 606 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6140471efd4b..9874e60c9b0d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, | |||
| 999 | if (unlikely(addrs_size <= 0)) | 999 | if (unlikely(addrs_size <= 0)) |
| 1000 | return -EINVAL; | 1000 | return -EINVAL; |
| 1001 | 1001 | ||
| 1002 | kaddrs = vmemdup_user(addrs, addrs_size); | 1002 | kaddrs = memdup_user(addrs, addrs_size); |
| 1003 | if (unlikely(IS_ERR(kaddrs))) | 1003 | if (unlikely(IS_ERR(kaddrs))) |
| 1004 | return PTR_ERR(kaddrs); | 1004 | return PTR_ERR(kaddrs); |
| 1005 | 1005 | ||
| @@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, | |||
| 1007 | addr_buf = kaddrs; | 1007 | addr_buf = kaddrs; |
| 1008 | while (walk_size < addrs_size) { | 1008 | while (walk_size < addrs_size) { |
| 1009 | if (walk_size + sizeof(sa_family_t) > addrs_size) { | 1009 | if (walk_size + sizeof(sa_family_t) > addrs_size) { |
| 1010 | kvfree(kaddrs); | 1010 | kfree(kaddrs); |
| 1011 | return -EINVAL; | 1011 | return -EINVAL; |
| 1012 | } | 1012 | } |
| 1013 | 1013 | ||
| @@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, | |||
| 1018 | * causes the address buffer to overflow return EINVAL. | 1018 | * causes the address buffer to overflow return EINVAL. |
| 1019 | */ | 1019 | */ |
| 1020 | if (!af || (walk_size + af->sockaddr_len) > addrs_size) { | 1020 | if (!af || (walk_size + af->sockaddr_len) > addrs_size) { |
| 1021 | kvfree(kaddrs); | 1021 | kfree(kaddrs); |
| 1022 | return -EINVAL; | 1022 | return -EINVAL; |
| 1023 | } | 1023 | } |
| 1024 | addrcnt++; | 1024 | addrcnt++; |
| @@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, | |||
| 1054 | } | 1054 | } |
| 1055 | 1055 | ||
| 1056 | out: | 1056 | out: |
| 1057 | kvfree(kaddrs); | 1057 | kfree(kaddrs); |
| 1058 | 1058 | ||
| 1059 | return err; | 1059 | return err; |
| 1060 | } | 1060 | } |
| @@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk, | |||
| 1329 | if (unlikely(addrs_size <= 0)) | 1329 | if (unlikely(addrs_size <= 0)) |
| 1330 | return -EINVAL; | 1330 | return -EINVAL; |
| 1331 | 1331 | ||
| 1332 | kaddrs = vmemdup_user(addrs, addrs_size); | 1332 | kaddrs = memdup_user(addrs, addrs_size); |
| 1333 | if (unlikely(IS_ERR(kaddrs))) | 1333 | if (unlikely(IS_ERR(kaddrs))) |
| 1334 | return PTR_ERR(kaddrs); | 1334 | return PTR_ERR(kaddrs); |
| 1335 | 1335 | ||
| @@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk, | |||
| 1349 | err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); | 1349 | err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); |
| 1350 | 1350 | ||
| 1351 | out_free: | 1351 | out_free: |
| 1352 | kvfree(kaddrs); | 1352 | kfree(kaddrs); |
| 1353 | 1353 | ||
| 1354 | return err; | 1354 | return err; |
| 1355 | } | 1355 | } |
| @@ -2920,6 +2920,9 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk, | |||
| 2920 | return 0; | 2920 | return 0; |
| 2921 | } | 2921 | } |
| 2922 | 2922 | ||
| 2923 | if (sctp_style(sk, TCP)) | ||
| 2924 | params.sack_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 2925 | |||
| 2923 | if (params.sack_assoc_id == SCTP_FUTURE_ASSOC || | 2926 | if (params.sack_assoc_id == SCTP_FUTURE_ASSOC || |
| 2924 | params.sack_assoc_id == SCTP_ALL_ASSOC) { | 2927 | params.sack_assoc_id == SCTP_ALL_ASSOC) { |
| 2925 | if (params.sack_delay) { | 2928 | if (params.sack_delay) { |
| @@ -3024,6 +3027,9 @@ static int sctp_setsockopt_default_send_param(struct sock *sk, | |||
| 3024 | return 0; | 3027 | return 0; |
| 3025 | } | 3028 | } |
| 3026 | 3029 | ||
| 3030 | if (sctp_style(sk, TCP)) | ||
| 3031 | info.sinfo_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3032 | |||
| 3027 | if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC || | 3033 | if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC || |
| 3028 | info.sinfo_assoc_id == SCTP_ALL_ASSOC) { | 3034 | info.sinfo_assoc_id == SCTP_ALL_ASSOC) { |
| 3029 | sp->default_stream = info.sinfo_stream; | 3035 | sp->default_stream = info.sinfo_stream; |
| @@ -3081,6 +3087,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk, | |||
| 3081 | return 0; | 3087 | return 0; |
| 3082 | } | 3088 | } |
| 3083 | 3089 | ||
| 3090 | if (sctp_style(sk, TCP)) | ||
| 3091 | info.snd_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3092 | |||
| 3084 | if (info.snd_assoc_id == SCTP_FUTURE_ASSOC || | 3093 | if (info.snd_assoc_id == SCTP_FUTURE_ASSOC || |
| 3085 | info.snd_assoc_id == SCTP_ALL_ASSOC) { | 3094 | info.snd_assoc_id == SCTP_ALL_ASSOC) { |
| 3086 | sp->default_stream = info.snd_sid; | 3095 | sp->default_stream = info.snd_sid; |
| @@ -3531,6 +3540,9 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, | |||
| 3531 | return 0; | 3540 | return 0; |
| 3532 | } | 3541 | } |
| 3533 | 3542 | ||
| 3543 | if (sctp_style(sk, TCP)) | ||
| 3544 | params.assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3545 | |||
| 3534 | if (params.assoc_id == SCTP_FUTURE_ASSOC || | 3546 | if (params.assoc_id == SCTP_FUTURE_ASSOC || |
| 3535 | params.assoc_id == SCTP_ALL_ASSOC) | 3547 | params.assoc_id == SCTP_ALL_ASSOC) |
| 3536 | sp->default_rcv_context = params.assoc_value; | 3548 | sp->default_rcv_context = params.assoc_value; |
| @@ -3670,6 +3682,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk, | |||
| 3670 | return 0; | 3682 | return 0; |
| 3671 | } | 3683 | } |
| 3672 | 3684 | ||
| 3685 | if (sctp_style(sk, TCP)) | ||
| 3686 | params.assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3687 | |||
| 3673 | if (params.assoc_id == SCTP_FUTURE_ASSOC || | 3688 | if (params.assoc_id == SCTP_FUTURE_ASSOC || |
| 3674 | params.assoc_id == SCTP_ALL_ASSOC) | 3689 | params.assoc_id == SCTP_ALL_ASSOC) |
| 3675 | sp->max_burst = params.assoc_value; | 3690 | sp->max_burst = params.assoc_value; |
| @@ -3798,6 +3813,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk, | |||
| 3798 | goto out; | 3813 | goto out; |
| 3799 | } | 3814 | } |
| 3800 | 3815 | ||
| 3816 | if (sctp_style(sk, TCP)) | ||
| 3817 | authkey->sca_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3818 | |||
| 3801 | if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC || | 3819 | if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC || |
| 3802 | authkey->sca_assoc_id == SCTP_ALL_ASSOC) { | 3820 | authkey->sca_assoc_id == SCTP_ALL_ASSOC) { |
| 3803 | ret = sctp_auth_set_key(ep, asoc, authkey); | 3821 | ret = sctp_auth_set_key(ep, asoc, authkey); |
| @@ -3853,6 +3871,9 @@ static int sctp_setsockopt_active_key(struct sock *sk, | |||
| 3853 | if (asoc) | 3871 | if (asoc) |
| 3854 | return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); | 3872 | return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); |
| 3855 | 3873 | ||
| 3874 | if (sctp_style(sk, TCP)) | ||
| 3875 | val.scact_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3876 | |||
| 3856 | if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || | 3877 | if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || |
| 3857 | val.scact_assoc_id == SCTP_ALL_ASSOC) { | 3878 | val.scact_assoc_id == SCTP_ALL_ASSOC) { |
| 3858 | ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); | 3879 | ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); |
| @@ -3904,6 +3925,9 @@ static int sctp_setsockopt_del_key(struct sock *sk, | |||
| 3904 | if (asoc) | 3925 | if (asoc) |
| 3905 | return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); | 3926 | return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); |
| 3906 | 3927 | ||
| 3928 | if (sctp_style(sk, TCP)) | ||
| 3929 | val.scact_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3930 | |||
| 3907 | if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || | 3931 | if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || |
| 3908 | val.scact_assoc_id == SCTP_ALL_ASSOC) { | 3932 | val.scact_assoc_id == SCTP_ALL_ASSOC) { |
| 3909 | ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); | 3933 | ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); |
| @@ -3954,6 +3978,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval, | |||
| 3954 | if (asoc) | 3978 | if (asoc) |
| 3955 | return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); | 3979 | return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); |
| 3956 | 3980 | ||
| 3981 | if (sctp_style(sk, TCP)) | ||
| 3982 | val.scact_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 3983 | |||
| 3957 | if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || | 3984 | if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || |
| 3958 | val.scact_assoc_id == SCTP_ALL_ASSOC) { | 3985 | val.scact_assoc_id == SCTP_ALL_ASSOC) { |
| 3959 | ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); | 3986 | ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); |
| @@ -4169,6 +4196,9 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk, | |||
| 4169 | goto out; | 4196 | goto out; |
| 4170 | } | 4197 | } |
| 4171 | 4198 | ||
| 4199 | if (sctp_style(sk, TCP)) | ||
| 4200 | info.pr_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 4201 | |||
| 4172 | if (info.pr_assoc_id == SCTP_FUTURE_ASSOC || | 4202 | if (info.pr_assoc_id == SCTP_FUTURE_ASSOC || |
| 4173 | info.pr_assoc_id == SCTP_ALL_ASSOC) { | 4203 | info.pr_assoc_id == SCTP_ALL_ASSOC) { |
| 4174 | SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); | 4204 | SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); |
| @@ -4251,6 +4281,9 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk, | |||
| 4251 | goto out; | 4281 | goto out; |
| 4252 | } | 4282 | } |
| 4253 | 4283 | ||
| 4284 | if (sctp_style(sk, TCP)) | ||
| 4285 | params.assoc_id = SCTP_FUTURE_ASSOC; | ||
| 4286 | |||
| 4254 | if (params.assoc_id == SCTP_FUTURE_ASSOC || | 4287 | if (params.assoc_id == SCTP_FUTURE_ASSOC || |
| 4255 | params.assoc_id == SCTP_ALL_ASSOC) | 4288 | params.assoc_id == SCTP_ALL_ASSOC) |
| 4256 | ep->strreset_enable = params.assoc_value; | 4289 | ep->strreset_enable = params.assoc_value; |
| @@ -4376,6 +4409,9 @@ static int sctp_setsockopt_scheduler(struct sock *sk, | |||
| 4376 | if (asoc) | 4409 | if (asoc) |
| 4377 | return sctp_sched_set_sched(asoc, params.assoc_value); | 4410 | return sctp_sched_set_sched(asoc, params.assoc_value); |
| 4378 | 4411 | ||
| 4412 | if (sctp_style(sk, TCP)) | ||
| 4413 | params.assoc_id = SCTP_FUTURE_ASSOC; | ||
| 4414 | |||
| 4379 | if (params.assoc_id == SCTP_FUTURE_ASSOC || | 4415 | if (params.assoc_id == SCTP_FUTURE_ASSOC || |
| 4380 | params.assoc_id == SCTP_ALL_ASSOC) | 4416 | params.assoc_id == SCTP_ALL_ASSOC) |
| 4381 | sp->default_ss = params.assoc_value; | 4417 | sp->default_ss = params.assoc_value; |
| @@ -4541,6 +4577,9 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval, | |||
| 4541 | if (asoc) | 4577 | if (asoc) |
| 4542 | return sctp_assoc_ulpevent_type_set(¶m, asoc); | 4578 | return sctp_assoc_ulpevent_type_set(¶m, asoc); |
| 4543 | 4579 | ||
| 4580 | if (sctp_style(sk, TCP)) | ||
| 4581 | param.se_assoc_id = SCTP_FUTURE_ASSOC; | ||
| 4582 | |||
| 4544 | if (param.se_assoc_id == SCTP_FUTURE_ASSOC || | 4583 | if (param.se_assoc_id == SCTP_FUTURE_ASSOC || |
| 4545 | param.se_assoc_id == SCTP_ALL_ASSOC) | 4584 | param.se_assoc_id == SCTP_ALL_ASSOC) |
| 4546 | sctp_ulpevent_type_set(&sp->subscribe, | 4585 | sctp_ulpevent_type_set(&sp->subscribe, |
| @@ -9169,7 +9208,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to, | |||
| 9169 | { | 9208 | { |
| 9170 | int ancestor_size = sizeof(struct inet_sock) + | 9209 | int ancestor_size = sizeof(struct inet_sock) + |
| 9171 | sizeof(struct sctp_sock) - | 9210 | sizeof(struct sctp_sock) - |
| 9172 | offsetof(struct sctp_sock, auto_asconf_list); | 9211 | offsetof(struct sctp_sock, pd_lobby); |
| 9173 | 9212 | ||
| 9174 | if (sk_from->sk_family == PF_INET6) | 9213 | if (sk_from->sk_family == PF_INET6) |
| 9175 | ancestor_size += sizeof(struct ipv6_pinfo); | 9214 | ancestor_size += sizeof(struct ipv6_pinfo); |
| @@ -9253,7 +9292,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 9253 | * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. | 9292 | * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. |
| 9254 | * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. | 9293 | * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. |
| 9255 | */ | 9294 | */ |
| 9256 | skb_queue_head_init(&newsp->pd_lobby); | ||
| 9257 | atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); | 9295 | atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); |
| 9258 | 9296 | ||
| 9259 | if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { | 9297 | if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { |
diff --git a/net/socket.c b/net/socket.c index 3c176a12fe48..8255f5bda0aa 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -384,6 +384,18 @@ static struct file_system_type sock_fs_type = { | |||
| 384 | * but we take care of internal coherence yet. | 384 | * but we take care of internal coherence yet. |
| 385 | */ | 385 | */ |
| 386 | 386 | ||
| 387 | /** | ||
| 388 | * sock_alloc_file - Bind a &socket to a &file | ||
| 389 | * @sock: socket | ||
| 390 | * @flags: file status flags | ||
| 391 | * @dname: protocol name | ||
| 392 | * | ||
| 393 | * Returns the &file bound with @sock, implicitly storing it | ||
| 394 | * in sock->file. If dname is %NULL, sets to "". | ||
| 395 | * On failure the return is a ERR pointer (see linux/err.h). | ||
| 396 | * This function uses GFP_KERNEL internally. | ||
| 397 | */ | ||
| 398 | |||
| 387 | struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) | 399 | struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) |
| 388 | { | 400 | { |
| 389 | struct file *file; | 401 | struct file *file; |
| @@ -424,6 +436,14 @@ static int sock_map_fd(struct socket *sock, int flags) | |||
| 424 | return PTR_ERR(newfile); | 436 | return PTR_ERR(newfile); |
| 425 | } | 437 | } |
| 426 | 438 | ||
| 439 | /** | ||
| 440 | * sock_from_file - Return the &socket bounded to @file. | ||
| 441 | * @file: file | ||
| 442 | * @err: pointer to an error code return | ||
| 443 | * | ||
| 444 | * On failure returns %NULL and assigns -ENOTSOCK to @err. | ||
| 445 | */ | ||
| 446 | |||
| 427 | struct socket *sock_from_file(struct file *file, int *err) | 447 | struct socket *sock_from_file(struct file *file, int *err) |
| 428 | { | 448 | { |
| 429 | if (file->f_op == &socket_file_ops) | 449 | if (file->f_op == &socket_file_ops) |
| @@ -532,11 +552,11 @@ static const struct inode_operations sockfs_inode_ops = { | |||
| 532 | }; | 552 | }; |
| 533 | 553 | ||
| 534 | /** | 554 | /** |
| 535 | * sock_alloc - allocate a socket | 555 | * sock_alloc - allocate a socket |
| 536 | * | 556 | * |
| 537 | * Allocate a new inode and socket object. The two are bound together | 557 | * Allocate a new inode and socket object. The two are bound together |
| 538 | * and initialised. The socket is then returned. If we are out of inodes | 558 | * and initialised. The socket is then returned. If we are out of inodes |
| 539 | * NULL is returned. | 559 | * NULL is returned. This functions uses GFP_KERNEL internally. |
| 540 | */ | 560 | */ |
| 541 | 561 | ||
| 542 | struct socket *sock_alloc(void) | 562 | struct socket *sock_alloc(void) |
| @@ -561,7 +581,7 @@ struct socket *sock_alloc(void) | |||
| 561 | EXPORT_SYMBOL(sock_alloc); | 581 | EXPORT_SYMBOL(sock_alloc); |
| 562 | 582 | ||
| 563 | /** | 583 | /** |
| 564 | * sock_release - close a socket | 584 | * sock_release - close a socket |
| 565 | * @sock: socket to close | 585 | * @sock: socket to close |
| 566 | * | 586 | * |
| 567 | * The socket is released from the protocol stack if it has a release | 587 | * The socket is released from the protocol stack if it has a release |
| @@ -617,6 +637,15 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) | |||
| 617 | } | 637 | } |
| 618 | EXPORT_SYMBOL(__sock_tx_timestamp); | 638 | EXPORT_SYMBOL(__sock_tx_timestamp); |
| 619 | 639 | ||
| 640 | /** | ||
| 641 | * sock_sendmsg - send a message through @sock | ||
| 642 | * @sock: socket | ||
| 643 | * @msg: message to send | ||
| 644 | * | ||
| 645 | * Sends @msg through @sock, passing through LSM. | ||
| 646 | * Returns the number of bytes sent, or an error code. | ||
| 647 | */ | ||
| 648 | |||
| 620 | static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) | 649 | static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) |
| 621 | { | 650 | { |
| 622 | int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg)); | 651 | int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg)); |
| @@ -633,6 +662,18 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg) | |||
| 633 | } | 662 | } |
| 634 | EXPORT_SYMBOL(sock_sendmsg); | 663 | EXPORT_SYMBOL(sock_sendmsg); |
| 635 | 664 | ||
| 665 | /** | ||
| 666 | * kernel_sendmsg - send a message through @sock (kernel-space) | ||
| 667 | * @sock: socket | ||
| 668 | * @msg: message header | ||
| 669 | * @vec: kernel vec | ||
| 670 | * @num: vec array length | ||
| 671 | * @size: total message data size | ||
| 672 | * | ||
| 673 | * Builds the message data with @vec and sends it through @sock. | ||
| 674 | * Returns the number of bytes sent, or an error code. | ||
| 675 | */ | ||
| 676 | |||
| 636 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | 677 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, |
| 637 | struct kvec *vec, size_t num, size_t size) | 678 | struct kvec *vec, size_t num, size_t size) |
| 638 | { | 679 | { |
| @@ -641,6 +682,19 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | |||
| 641 | } | 682 | } |
| 642 | EXPORT_SYMBOL(kernel_sendmsg); | 683 | EXPORT_SYMBOL(kernel_sendmsg); |
| 643 | 684 | ||
| 685 | /** | ||
| 686 | * kernel_sendmsg_locked - send a message through @sock (kernel-space) | ||
| 687 | * @sk: sock | ||
| 688 | * @msg: message header | ||
| 689 | * @vec: output s/g array | ||
| 690 | * @num: output s/g array length | ||
| 691 | * @size: total message data size | ||
| 692 | * | ||
| 693 | * Builds the message data with @vec and sends it through @sock. | ||
| 694 | * Returns the number of bytes sent, or an error code. | ||
| 695 | * Caller must hold @sk. | ||
| 696 | */ | ||
| 697 | |||
| 644 | int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, | 698 | int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, |
| 645 | struct kvec *vec, size_t num, size_t size) | 699 | struct kvec *vec, size_t num, size_t size) |
| 646 | { | 700 | { |
| @@ -811,6 +865,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
| 811 | } | 865 | } |
| 812 | EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); | 866 | EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); |
| 813 | 867 | ||
| 868 | /** | ||
| 869 | * sock_recvmsg - receive a message from @sock | ||
| 870 | * @sock: socket | ||
| 871 | * @msg: message to receive | ||
| 872 | * @flags: message flags | ||
| 873 | * | ||
| 874 | * Receives @msg from @sock, passing through LSM. Returns the total number | ||
| 875 | * of bytes received, or an error. | ||
| 876 | */ | ||
| 877 | |||
| 814 | static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, | 878 | static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, |
| 815 | int flags) | 879 | int flags) |
| 816 | { | 880 | { |
| @@ -826,20 +890,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags) | |||
| 826 | EXPORT_SYMBOL(sock_recvmsg); | 890 | EXPORT_SYMBOL(sock_recvmsg); |
| 827 | 891 | ||
| 828 | /** | 892 | /** |
| 829 | * kernel_recvmsg - Receive a message from a socket (kernel space) | 893 | * kernel_recvmsg - Receive a message from a socket (kernel space) |
| 830 | * @sock: The socket to receive the message from | 894 | * @sock: The socket to receive the message from |
| 831 | * @msg: Received message | 895 | * @msg: Received message |
| 832 | * @vec: Input s/g array for message data | 896 | * @vec: Input s/g array for message data |
| 833 | * @num: Size of input s/g array | 897 | * @num: Size of input s/g array |
| 834 | * @size: Number of bytes to read | 898 | * @size: Number of bytes to read |
| 835 | * @flags: Message flags (MSG_DONTWAIT, etc...) | 899 | * @flags: Message flags (MSG_DONTWAIT, etc...) |
| 836 | * | 900 | * |
| 837 | * On return the msg structure contains the scatter/gather array passed in the | 901 | * On return the msg structure contains the scatter/gather array passed in the |
| 838 | * vec argument. The array is modified so that it consists of the unfilled | 902 | * vec argument. The array is modified so that it consists of the unfilled |
| 839 | * portion of the original array. | 903 | * portion of the original array. |
| 840 | * | 904 | * |
| 841 | * The returned value is the total number of bytes received, or an error. | 905 | * The returned value is the total number of bytes received, or an error. |
| 842 | */ | 906 | */ |
| 907 | |||
| 843 | int kernel_recvmsg(struct socket *sock, struct msghdr *msg, | 908 | int kernel_recvmsg(struct socket *sock, struct msghdr *msg, |
| 844 | struct kvec *vec, size_t num, size_t size, int flags) | 909 | struct kvec *vec, size_t num, size_t size, int flags) |
| 845 | { | 910 | { |
| @@ -1005,6 +1070,13 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, | |||
| 1005 | * what to do with it - that's up to the protocol still. | 1070 | * what to do with it - that's up to the protocol still. |
| 1006 | */ | 1071 | */ |
| 1007 | 1072 | ||
| 1073 | /** | ||
| 1074 | * get_net_ns - increment the refcount of the network namespace | ||
| 1075 | * @ns: common namespace (net) | ||
| 1076 | * | ||
| 1077 | * Returns the net's common namespace. | ||
| 1078 | */ | ||
| 1079 | |||
| 1008 | struct ns_common *get_net_ns(struct ns_common *ns) | 1080 | struct ns_common *get_net_ns(struct ns_common *ns) |
| 1009 | { | 1081 | { |
| 1010 | return &get_net(container_of(ns, struct net, ns))->ns; | 1082 | return &get_net(container_of(ns, struct net, ns))->ns; |
| @@ -1099,6 +1171,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
| 1099 | return err; | 1171 | return err; |
| 1100 | } | 1172 | } |
| 1101 | 1173 | ||
| 1174 | /** | ||
| 1175 | * sock_create_lite - creates a socket | ||
| 1176 | * @family: protocol family (AF_INET, ...) | ||
| 1177 | * @type: communication type (SOCK_STREAM, ...) | ||
| 1178 | * @protocol: protocol (0, ...) | ||
| 1179 | * @res: new socket | ||
| 1180 | * | ||
| 1181 | * Creates a new socket and assigns it to @res, passing through LSM. | ||
| 1182 | * The new socket initialization is not complete, see kernel_accept(). | ||
| 1183 | * Returns 0 or an error. On failure @res is set to %NULL. | ||
| 1184 | * This function internally uses GFP_KERNEL. | ||
| 1185 | */ | ||
| 1186 | |||
| 1102 | int sock_create_lite(int family, int type, int protocol, struct socket **res) | 1187 | int sock_create_lite(int family, int type, int protocol, struct socket **res) |
| 1103 | { | 1188 | { |
| 1104 | int err; | 1189 | int err; |
| @@ -1224,6 +1309,21 @@ call_kill: | |||
| 1224 | } | 1309 | } |
| 1225 | EXPORT_SYMBOL(sock_wake_async); | 1310 | EXPORT_SYMBOL(sock_wake_async); |
| 1226 | 1311 | ||
| 1312 | /** | ||
| 1313 | * __sock_create - creates a socket | ||
| 1314 | * @net: net namespace | ||
| 1315 | * @family: protocol family (AF_INET, ...) | ||
| 1316 | * @type: communication type (SOCK_STREAM, ...) | ||
| 1317 | * @protocol: protocol (0, ...) | ||
| 1318 | * @res: new socket | ||
| 1319 | * @kern: boolean for kernel space sockets | ||
| 1320 | * | ||
| 1321 | * Creates a new socket and assigns it to @res, passing through LSM. | ||
| 1322 | * Returns 0 or an error. On failure @res is set to %NULL. @kern must | ||
| 1323 | * be set to true if the socket resides in kernel space. | ||
| 1324 | * This function internally uses GFP_KERNEL. | ||
| 1325 | */ | ||
| 1326 | |||
| 1227 | int __sock_create(struct net *net, int family, int type, int protocol, | 1327 | int __sock_create(struct net *net, int family, int type, int protocol, |
| 1228 | struct socket **res, int kern) | 1328 | struct socket **res, int kern) |
| 1229 | { | 1329 | { |
| @@ -1333,12 +1433,35 @@ out_release: | |||
| 1333 | } | 1433 | } |
| 1334 | EXPORT_SYMBOL(__sock_create); | 1434 | EXPORT_SYMBOL(__sock_create); |
| 1335 | 1435 | ||
| 1436 | /** | ||
| 1437 | * sock_create - creates a socket | ||
| 1438 | * @family: protocol family (AF_INET, ...) | ||
| 1439 | * @type: communication type (SOCK_STREAM, ...) | ||
| 1440 | * @protocol: protocol (0, ...) | ||
| 1441 | * @res: new socket | ||
| 1442 | * | ||
| 1443 | * A wrapper around __sock_create(). | ||
| 1444 | * Returns 0 or an error. This function internally uses GFP_KERNEL. | ||
| 1445 | */ | ||
| 1446 | |||
| 1336 | int sock_create(int family, int type, int protocol, struct socket **res) | 1447 | int sock_create(int family, int type, int protocol, struct socket **res) |
| 1337 | { | 1448 | { |
| 1338 | return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); | 1449 | return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); |
| 1339 | } | 1450 | } |
| 1340 | EXPORT_SYMBOL(sock_create); | 1451 | EXPORT_SYMBOL(sock_create); |
| 1341 | 1452 | ||
| 1453 | /** | ||
| 1454 | * sock_create_kern - creates a socket (kernel space) | ||
| 1455 | * @net: net namespace | ||
| 1456 | * @family: protocol family (AF_INET, ...) | ||
| 1457 | * @type: communication type (SOCK_STREAM, ...) | ||
| 1458 | * @protocol: protocol (0, ...) | ||
| 1459 | * @res: new socket | ||
| 1460 | * | ||
| 1461 | * A wrapper around __sock_create(). | ||
| 1462 | * Returns 0 or an error. This function internally uses GFP_KERNEL. | ||
| 1463 | */ | ||
| 1464 | |||
| 1342 | int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res) | 1465 | int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res) |
| 1343 | { | 1466 | { |
| 1344 | return __sock_create(net, family, type, protocol, res, 1); | 1467 | return __sock_create(net, family, type, protocol, res, 1); |
| @@ -3322,18 +3445,46 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd, | |||
| 3322 | } | 3445 | } |
| 3323 | #endif | 3446 | #endif |
| 3324 | 3447 | ||
| 3448 | /** | ||
| 3449 | * kernel_bind - bind an address to a socket (kernel space) | ||
| 3450 | * @sock: socket | ||
| 3451 | * @addr: address | ||
| 3452 | * @addrlen: length of address | ||
| 3453 | * | ||
| 3454 | * Returns 0 or an error. | ||
| 3455 | */ | ||
| 3456 | |||
| 3325 | int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) | 3457 | int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) |
| 3326 | { | 3458 | { |
| 3327 | return sock->ops->bind(sock, addr, addrlen); | 3459 | return sock->ops->bind(sock, addr, addrlen); |
| 3328 | } | 3460 | } |
| 3329 | EXPORT_SYMBOL(kernel_bind); | 3461 | EXPORT_SYMBOL(kernel_bind); |
| 3330 | 3462 | ||
| 3463 | /** | ||
| 3464 | * kernel_listen - move socket to listening state (kernel space) | ||
| 3465 | * @sock: socket | ||
| 3466 | * @backlog: pending connections queue size | ||
| 3467 | * | ||
| 3468 | * Returns 0 or an error. | ||
| 3469 | */ | ||
| 3470 | |||
| 3331 | int kernel_listen(struct socket *sock, int backlog) | 3471 | int kernel_listen(struct socket *sock, int backlog) |
| 3332 | { | 3472 | { |
| 3333 | return sock->ops->listen(sock, backlog); | 3473 | return sock->ops->listen(sock, backlog); |
| 3334 | } | 3474 | } |
| 3335 | EXPORT_SYMBOL(kernel_listen); | 3475 | EXPORT_SYMBOL(kernel_listen); |
| 3336 | 3476 | ||
| 3477 | /** | ||
| 3478 | * kernel_accept - accept a connection (kernel space) | ||
| 3479 | * @sock: listening socket | ||
| 3480 | * @newsock: new connected socket | ||
| 3481 | * @flags: flags | ||
| 3482 | * | ||
| 3483 | * @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0. | ||
| 3484 | * If it fails, @newsock is guaranteed to be %NULL. | ||
| 3485 | * Returns 0 or an error. | ||
| 3486 | */ | ||
| 3487 | |||
| 3337 | int kernel_accept(struct socket *sock, struct socket **newsock, int flags) | 3488 | int kernel_accept(struct socket *sock, struct socket **newsock, int flags) |
| 3338 | { | 3489 | { |
| 3339 | struct sock *sk = sock->sk; | 3490 | struct sock *sk = sock->sk; |
| @@ -3359,6 +3510,19 @@ done: | |||
| 3359 | } | 3510 | } |
| 3360 | EXPORT_SYMBOL(kernel_accept); | 3511 | EXPORT_SYMBOL(kernel_accept); |
| 3361 | 3512 | ||
| 3513 | /** | ||
| 3514 | * kernel_connect - connect a socket (kernel space) | ||
| 3515 | * @sock: socket | ||
| 3516 | * @addr: address | ||
| 3517 | * @addrlen: address length | ||
| 3518 | * @flags: flags (O_NONBLOCK, ...) | ||
| 3519 | * | ||
| 3520 | * For datagram sockets, @addr is the addres to which datagrams are sent | ||
| 3521 | * by default, and the only address from which datagrams are received. | ||
| 3522 | * For stream sockets, attempts to connect to @addr. | ||
| 3523 | * Returns 0 or an error code. | ||
| 3524 | */ | ||
| 3525 | |||
| 3362 | int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, | 3526 | int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, |
| 3363 | int flags) | 3527 | int flags) |
| 3364 | { | 3528 | { |
| @@ -3366,18 +3530,48 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, | |||
| 3366 | } | 3530 | } |
| 3367 | EXPORT_SYMBOL(kernel_connect); | 3531 | EXPORT_SYMBOL(kernel_connect); |
| 3368 | 3532 | ||
| 3533 | /** | ||
| 3534 | * kernel_getsockname - get the address which the socket is bound (kernel space) | ||
| 3535 | * @sock: socket | ||
| 3536 | * @addr: address holder | ||
| 3537 | * | ||
| 3538 | * Fills the @addr pointer with the address which the socket is bound. | ||
| 3539 | * Returns 0 or an error code. | ||
| 3540 | */ | ||
| 3541 | |||
| 3369 | int kernel_getsockname(struct socket *sock, struct sockaddr *addr) | 3542 | int kernel_getsockname(struct socket *sock, struct sockaddr *addr) |
| 3370 | { | 3543 | { |
| 3371 | return sock->ops->getname(sock, addr, 0); | 3544 | return sock->ops->getname(sock, addr, 0); |
| 3372 | } | 3545 | } |
| 3373 | EXPORT_SYMBOL(kernel_getsockname); | 3546 | EXPORT_SYMBOL(kernel_getsockname); |
| 3374 | 3547 | ||
| 3548 | /** | ||
| 3549 | * kernel_peername - get the address which the socket is connected (kernel space) | ||
| 3550 | * @sock: socket | ||
| 3551 | * @addr: address holder | ||
| 3552 | * | ||
| 3553 | * Fills the @addr pointer with the address which the socket is connected. | ||
| 3554 | * Returns 0 or an error code. | ||
| 3555 | */ | ||
| 3556 | |||
| 3375 | int kernel_getpeername(struct socket *sock, struct sockaddr *addr) | 3557 | int kernel_getpeername(struct socket *sock, struct sockaddr *addr) |
| 3376 | { | 3558 | { |
| 3377 | return sock->ops->getname(sock, addr, 1); | 3559 | return sock->ops->getname(sock, addr, 1); |
| 3378 | } | 3560 | } |
| 3379 | EXPORT_SYMBOL(kernel_getpeername); | 3561 | EXPORT_SYMBOL(kernel_getpeername); |
| 3380 | 3562 | ||
| 3563 | /** | ||
| 3564 | * kernel_getsockopt - get a socket option (kernel space) | ||
| 3565 | * @sock: socket | ||
| 3566 | * @level: API level (SOL_SOCKET, ...) | ||
| 3567 | * @optname: option tag | ||
| 3568 | * @optval: option value | ||
| 3569 | * @optlen: option length | ||
| 3570 | * | ||
| 3571 | * Assigns the option length to @optlen. | ||
| 3572 | * Returns 0 or an error. | ||
| 3573 | */ | ||
| 3574 | |||
| 3381 | int kernel_getsockopt(struct socket *sock, int level, int optname, | 3575 | int kernel_getsockopt(struct socket *sock, int level, int optname, |
| 3382 | char *optval, int *optlen) | 3576 | char *optval, int *optlen) |
| 3383 | { | 3577 | { |
| @@ -3400,6 +3594,17 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, | |||
| 3400 | } | 3594 | } |
| 3401 | EXPORT_SYMBOL(kernel_getsockopt); | 3595 | EXPORT_SYMBOL(kernel_getsockopt); |
| 3402 | 3596 | ||
| 3597 | /** | ||
| 3598 | * kernel_setsockopt - set a socket option (kernel space) | ||
| 3599 | * @sock: socket | ||
| 3600 | * @level: API level (SOL_SOCKET, ...) | ||
| 3601 | * @optname: option tag | ||
| 3602 | * @optval: option value | ||
| 3603 | * @optlen: option length | ||
| 3604 | * | ||
| 3605 | * Returns 0 or an error. | ||
| 3606 | */ | ||
| 3607 | |||
| 3403 | int kernel_setsockopt(struct socket *sock, int level, int optname, | 3608 | int kernel_setsockopt(struct socket *sock, int level, int optname, |
| 3404 | char *optval, unsigned int optlen) | 3609 | char *optval, unsigned int optlen) |
| 3405 | { | 3610 | { |
| @@ -3420,6 +3625,17 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, | |||
| 3420 | } | 3625 | } |
| 3421 | EXPORT_SYMBOL(kernel_setsockopt); | 3626 | EXPORT_SYMBOL(kernel_setsockopt); |
| 3422 | 3627 | ||
| 3628 | /** | ||
| 3629 | * kernel_sendpage - send a &page through a socket (kernel space) | ||
| 3630 | * @sock: socket | ||
| 3631 | * @page: page | ||
| 3632 | * @offset: page offset | ||
| 3633 | * @size: total size in bytes | ||
| 3634 | * @flags: flags (MSG_DONTWAIT, ...) | ||
| 3635 | * | ||
| 3636 | * Returns the total amount sent in bytes or an error. | ||
| 3637 | */ | ||
| 3638 | |||
| 3423 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, | 3639 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, |
| 3424 | size_t size, int flags) | 3640 | size_t size, int flags) |
| 3425 | { | 3641 | { |
| @@ -3430,6 +3646,18 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, | |||
| 3430 | } | 3646 | } |
| 3431 | EXPORT_SYMBOL(kernel_sendpage); | 3647 | EXPORT_SYMBOL(kernel_sendpage); |
| 3432 | 3648 | ||
| 3649 | /** | ||
| 3650 | * kernel_sendpage_locked - send a &page through the locked sock (kernel space) | ||
| 3651 | * @sk: sock | ||
| 3652 | * @page: page | ||
| 3653 | * @offset: page offset | ||
| 3654 | * @size: total size in bytes | ||
| 3655 | * @flags: flags (MSG_DONTWAIT, ...) | ||
| 3656 | * | ||
| 3657 | * Returns the total amount sent in bytes or an error. | ||
| 3658 | * Caller must hold @sk. | ||
| 3659 | */ | ||
| 3660 | |||
| 3433 | int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, | 3661 | int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, |
| 3434 | size_t size, int flags) | 3662 | size_t size, int flags) |
| 3435 | { | 3663 | { |
| @@ -3443,17 +3671,30 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, | |||
| 3443 | } | 3671 | } |
| 3444 | EXPORT_SYMBOL(kernel_sendpage_locked); | 3672 | EXPORT_SYMBOL(kernel_sendpage_locked); |
| 3445 | 3673 | ||
| 3674 | /** | ||
| 3675 | * kernel_shutdown - shut down part of a full-duplex connection (kernel space) | ||
| 3676 | * @sock: socket | ||
| 3677 | * @how: connection part | ||
| 3678 | * | ||
| 3679 | * Returns 0 or an error. | ||
| 3680 | */ | ||
| 3681 | |||
| 3446 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) | 3682 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) |
| 3447 | { | 3683 | { |
| 3448 | return sock->ops->shutdown(sock, how); | 3684 | return sock->ops->shutdown(sock, how); |
| 3449 | } | 3685 | } |
| 3450 | EXPORT_SYMBOL(kernel_sock_shutdown); | 3686 | EXPORT_SYMBOL(kernel_sock_shutdown); |
| 3451 | 3687 | ||
| 3452 | /* This routine returns the IP overhead imposed by a socket i.e. | 3688 | /** |
| 3453 | * the length of the underlying IP header, depending on whether | 3689 | * kernel_sock_ip_overhead - returns the IP overhead imposed by a socket |
| 3454 | * this is an IPv4 or IPv6 socket and the length from IP options turned | 3690 | * @sk: socket |
| 3455 | * on at the socket. Assumes that the caller has a lock on the socket. | 3691 | * |
| 3692 | * This routine returns the IP overhead imposed by a socket i.e. | ||
| 3693 | * the length of the underlying IP header, depending on whether | ||
| 3694 | * this is an IPv4 or IPv6 socket and the length from IP options turned | ||
| 3695 | * on at the socket. Assumes that the caller has a lock on the socket. | ||
| 3456 | */ | 3696 | */ |
| 3697 | |||
| 3457 | u32 kernel_sock_ip_overhead(struct sock *sk) | 3698 | u32 kernel_sock_ip_overhead(struct sock *sk) |
| 3458 | { | 3699 | { |
| 3459 | struct inet_sock *inet; | 3700 | struct inet_sock *inet; |
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index da1a676860ca..860dcfb95ee4 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
| @@ -550,6 +550,8 @@ EXPORT_SYMBOL_GPL(strp_check_rcv); | |||
| 550 | static int __init strp_mod_init(void) | 550 | static int __init strp_mod_init(void) |
| 551 | { | 551 | { |
| 552 | strp_wq = create_singlethread_workqueue("kstrp"); | 552 | strp_wq = create_singlethread_workqueue("kstrp"); |
| 553 | if (unlikely(!strp_wq)) | ||
| 554 | return -ENOMEM; | ||
| 553 | 555 | ||
| 554 | return 0; | 556 | return 0; |
| 555 | } | 557 | } |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 228970e6e52b..187d10443a15 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -2311,6 +2311,15 @@ out_exit: | |||
| 2311 | rpc_exit(task, status); | 2311 | rpc_exit(task, status); |
| 2312 | } | 2312 | } |
| 2313 | 2313 | ||
| 2314 | static bool | ||
| 2315 | rpc_check_connected(const struct rpc_rqst *req) | ||
| 2316 | { | ||
| 2317 | /* No allocated request or transport? return true */ | ||
| 2318 | if (!req || !req->rq_xprt) | ||
| 2319 | return true; | ||
| 2320 | return xprt_connected(req->rq_xprt); | ||
| 2321 | } | ||
| 2322 | |||
| 2314 | static void | 2323 | static void |
| 2315 | rpc_check_timeout(struct rpc_task *task) | 2324 | rpc_check_timeout(struct rpc_task *task) |
| 2316 | { | 2325 | { |
| @@ -2322,10 +2331,11 @@ rpc_check_timeout(struct rpc_task *task) | |||
| 2322 | dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); | 2331 | dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); |
| 2323 | task->tk_timeouts++; | 2332 | task->tk_timeouts++; |
| 2324 | 2333 | ||
| 2325 | if (RPC_IS_SOFTCONN(task)) { | 2334 | if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { |
| 2326 | rpc_exit(task, -ETIMEDOUT); | 2335 | rpc_exit(task, -ETIMEDOUT); |
| 2327 | return; | 2336 | return; |
| 2328 | } | 2337 | } |
| 2338 | |||
| 2329 | if (RPC_IS_SOFT(task)) { | 2339 | if (RPC_IS_SOFT(task)) { |
| 2330 | if (clnt->cl_chatty) { | 2340 | if (clnt->cl_chatty) { |
| 2331 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", | 2341 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9359539907ba..732d4b57411a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -495,8 +495,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, | |||
| 495 | int flags, struct rpc_rqst *req) | 495 | int flags, struct rpc_rqst *req) |
| 496 | { | 496 | { |
| 497 | struct xdr_buf *buf = &req->rq_private_buf; | 497 | struct xdr_buf *buf = &req->rq_private_buf; |
| 498 | size_t want, read; | 498 | size_t want, uninitialized_var(read); |
| 499 | ssize_t ret; | 499 | ssize_t uninitialized_var(ret); |
| 500 | 500 | ||
| 501 | xs_read_header(transport, buf); | 501 | xs_read_header(transport, buf); |
| 502 | 502 | ||
diff --git a/net/tipc/group.c b/net/tipc/group.c index 06fee142f09f..63f39201e41e 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c | |||
| @@ -919,6 +919,9 @@ int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb) | |||
| 919 | { | 919 | { |
| 920 | struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP); | 920 | struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP); |
| 921 | 921 | ||
| 922 | if (!group) | ||
| 923 | return -EMSGSIZE; | ||
| 924 | |||
| 922 | if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID, | 925 | if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID, |
| 923 | grp->type) || | 926 | grp->type) || |
| 924 | nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE, | 927 | nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE, |
diff --git a/net/tipc/net.c b/net/tipc/net.c index f076edb74338..7ce1e86b024f 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
| @@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr) | |||
| 163 | 163 | ||
| 164 | void tipc_net_stop(struct net *net) | 164 | void tipc_net_stop(struct net *net) |
| 165 | { | 165 | { |
| 166 | u32 self = tipc_own_addr(net); | 166 | if (!tipc_own_id(net)) |
| 167 | |||
| 168 | if (!self) | ||
| 169 | return; | 167 | return; |
| 170 | 168 | ||
| 171 | tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self); | ||
| 172 | rtnl_lock(); | 169 | rtnl_lock(); |
| 173 | tipc_bearer_stop(net); | 170 | tipc_bearer_stop(net); |
| 174 | tipc_node_stop(net); | 171 | tipc_node_stop(net); |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 4ad3586da8f0..340a6e7c43a7 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
| @@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | |||
| 267 | if (msg->rep_type) | 267 | if (msg->rep_type) |
| 268 | tipc_tlv_init(msg->rep, msg->rep_type); | 268 | tipc_tlv_init(msg->rep, msg->rep_type); |
| 269 | 269 | ||
| 270 | if (cmd->header) | 270 | if (cmd->header) { |
| 271 | (*cmd->header)(msg); | 271 | err = (*cmd->header)(msg); |
| 272 | if (err) { | ||
| 273 | kfree_skb(msg->rep); | ||
| 274 | msg->rep = NULL; | ||
| 275 | return err; | ||
| 276 | } | ||
| 277 | } | ||
| 272 | 278 | ||
| 273 | arg = nlmsg_new(0, GFP_KERNEL); | 279 | arg = nlmsg_new(0, GFP_KERNEL); |
| 274 | if (!arg) { | 280 | if (!arg) { |
| @@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 397 | if (!bearer) | 403 | if (!bearer) |
| 398 | return -EMSGSIZE; | 404 | return -EMSGSIZE; |
| 399 | 405 | ||
| 400 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | 406 | len = TLV_GET_DATA_LEN(msg->req); |
| 407 | len -= offsetof(struct tipc_bearer_config, name); | ||
| 408 | if (len <= 0) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 411 | len = min_t(int, len, TIPC_MAX_BEARER_NAME); | ||
| 401 | if (!string_is_valid(b->name, len)) | 412 | if (!string_is_valid(b->name, len)) |
| 402 | return -EINVAL; | 413 | return -EINVAL; |
| 403 | 414 | ||
| @@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 766 | 777 | ||
| 767 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 778 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
| 768 | 779 | ||
| 769 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | 780 | len = TLV_GET_DATA_LEN(msg->req); |
| 781 | len -= offsetof(struct tipc_link_config, name); | ||
| 782 | if (len <= 0) | ||
| 783 | return -EINVAL; | ||
| 784 | |||
| 785 | len = min_t(int, len, TIPC_MAX_LINK_NAME); | ||
| 770 | if (!string_is_valid(lc->name, len)) | 786 | if (!string_is_valid(lc->name, len)) |
| 771 | return -EINVAL; | 787 | return -EINVAL; |
| 772 | 788 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 2dc4919ab23c..dd3b6dc17662 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -817,10 +817,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, | |||
| 817 | static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) | 817 | static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) |
| 818 | { | 818 | { |
| 819 | struct tipc_link_entry *le = &n->links[bearer_id]; | 819 | struct tipc_link_entry *le = &n->links[bearer_id]; |
| 820 | struct tipc_media_addr *maddr = NULL; | ||
| 820 | struct tipc_link *l = le->link; | 821 | struct tipc_link *l = le->link; |
| 821 | struct tipc_media_addr *maddr; | ||
| 822 | struct sk_buff_head xmitq; | ||
| 823 | int old_bearer_id = bearer_id; | 822 | int old_bearer_id = bearer_id; |
| 823 | struct sk_buff_head xmitq; | ||
| 824 | 824 | ||
| 825 | if (!l) | 825 | if (!l) |
| 826 | return; | 826 | return; |
| @@ -844,7 +844,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) | |||
| 844 | tipc_node_write_unlock(n); | 844 | tipc_node_write_unlock(n); |
| 845 | if (delete) | 845 | if (delete) |
| 846 | tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); | 846 | tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); |
| 847 | tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); | 847 | if (!skb_queue_empty(&xmitq)) |
| 848 | tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); | ||
| 848 | tipc_sk_rcv(n->net, &le->inputq); | 849 | tipc_sk_rcv(n->net, &le->inputq); |
| 849 | } | 850 | } |
| 850 | 851 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3274ef625dba..b542f14ed444 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) | |||
| 2349 | return 0; | 2349 | return 0; |
| 2350 | } | 2350 | } |
| 2351 | 2351 | ||
| 2352 | static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr) | ||
| 2353 | { | ||
| 2354 | if (addr->family != AF_TIPC) | ||
| 2355 | return false; | ||
| 2356 | if (addr->addrtype == TIPC_SERVICE_RANGE) | ||
| 2357 | return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper); | ||
| 2358 | return (addr->addrtype == TIPC_SERVICE_ADDR || | ||
| 2359 | addr->addrtype == TIPC_SOCKET_ADDR); | ||
| 2360 | } | ||
| 2361 | |||
| 2352 | /** | 2362 | /** |
| 2353 | * tipc_connect - establish a connection to another TIPC port | 2363 | * tipc_connect - establish a connection to another TIPC port |
| 2354 | * @sock: socket structure | 2364 | * @sock: socket structure |
| @@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, | |||
| 2384 | if (!tipc_sk_type_connectionless(sk)) | 2394 | if (!tipc_sk_type_connectionless(sk)) |
| 2385 | res = -EINVAL; | 2395 | res = -EINVAL; |
| 2386 | goto exit; | 2396 | goto exit; |
| 2387 | } else if (dst->family != AF_TIPC) { | ||
| 2388 | res = -EINVAL; | ||
| 2389 | } | 2397 | } |
| 2390 | if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) | 2398 | if (!tipc_sockaddr_is_sane(dst)) { |
| 2391 | res = -EINVAL; | 2399 | res = -EINVAL; |
| 2392 | if (res) | ||
| 2393 | goto exit; | 2400 | goto exit; |
| 2394 | 2401 | } | |
| 2395 | /* DGRAM/RDM connect(), just save the destaddr */ | 2402 | /* DGRAM/RDM connect(), just save the destaddr */ |
| 2396 | if (tipc_sk_type_connectionless(sk)) { | 2403 | if (tipc_sk_type_connectionless(sk)) { |
| 2397 | memcpy(&tsk->peer, dest, destlen); | 2404 | memcpy(&tsk->peer, dest, destlen); |
| 2398 | goto exit; | 2405 | goto exit; |
| 2406 | } else if (dst->addrtype == TIPC_SERVICE_RANGE) { | ||
| 2407 | res = -EINVAL; | ||
| 2408 | goto exit; | ||
| 2399 | } | 2409 | } |
| 2400 | 2410 | ||
| 2401 | previous = sk->sk_state; | 2411 | previous = sk->sk_state; |
| @@ -3255,6 +3265,8 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) | |||
| 3255 | peer_port = tsk_peer_port(tsk); | 3265 | peer_port = tsk_peer_port(tsk); |
| 3256 | 3266 | ||
| 3257 | nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); | 3267 | nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); |
| 3268 | if (!nest) | ||
| 3269 | return -EMSGSIZE; | ||
| 3258 | 3270 | ||
| 3259 | if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) | 3271 | if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) |
| 3260 | goto msg_full; | 3272 | goto msg_full; |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 4a708a4e8583..b45932d78004 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
| @@ -363,6 +363,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv, | |||
| 363 | struct tipc_subscription *sub; | 363 | struct tipc_subscription *sub; |
| 364 | 364 | ||
| 365 | if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) { | 365 | if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) { |
| 366 | s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL); | ||
| 366 | tipc_conn_delete_sub(con, s); | 367 | tipc_conn_delete_sub(con, s); |
| 367 | return 0; | 368 | return 0; |
| 368 | } | 369 | } |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 425351ac2a9b..20b191227969 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, | |||
| 1484 | 1484 | ||
| 1485 | return err; | 1485 | return err; |
| 1486 | } | 1486 | } |
| 1487 | } else { | ||
| 1488 | *zc = false; | ||
| 1487 | } | 1489 | } |
| 1488 | 1490 | ||
| 1489 | rxm->full_len -= padding_length(ctx, tls_ctx, skb); | 1491 | rxm->full_len -= padding_length(ctx, tls_ctx, skb); |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 77520eacee8f..989e52386c35 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
| @@ -193,9 +193,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem) | |||
| 193 | 193 | ||
| 194 | static void xdp_umem_release(struct xdp_umem *umem) | 194 | static void xdp_umem_release(struct xdp_umem *umem) |
| 195 | { | 195 | { |
| 196 | struct task_struct *task; | ||
| 197 | struct mm_struct *mm; | ||
| 198 | |||
| 199 | xdp_umem_clear_dev(umem); | 196 | xdp_umem_clear_dev(umem); |
| 200 | 197 | ||
| 201 | ida_simple_remove(&umem_ida, umem->id); | 198 | ida_simple_remove(&umem_ida, umem->id); |
| @@ -214,21 +211,10 @@ static void xdp_umem_release(struct xdp_umem *umem) | |||
| 214 | 211 | ||
| 215 | xdp_umem_unpin_pages(umem); | 212 | xdp_umem_unpin_pages(umem); |
| 216 | 213 | ||
| 217 | task = get_pid_task(umem->pid, PIDTYPE_PID); | ||
| 218 | put_pid(umem->pid); | ||
| 219 | if (!task) | ||
| 220 | goto out; | ||
| 221 | mm = get_task_mm(task); | ||
| 222 | put_task_struct(task); | ||
| 223 | if (!mm) | ||
| 224 | goto out; | ||
| 225 | |||
| 226 | mmput(mm); | ||
| 227 | kfree(umem->pages); | 214 | kfree(umem->pages); |
| 228 | umem->pages = NULL; | 215 | umem->pages = NULL; |
| 229 | 216 | ||
| 230 | xdp_umem_unaccount_pages(umem); | 217 | xdp_umem_unaccount_pages(umem); |
| 231 | out: | ||
| 232 | kfree(umem); | 218 | kfree(umem); |
| 233 | } | 219 | } |
| 234 | 220 | ||
| @@ -357,7 +343,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) | |||
| 357 | if (size_chk < 0) | 343 | if (size_chk < 0) |
| 358 | return -EINVAL; | 344 | return -EINVAL; |
| 359 | 345 | ||
| 360 | umem->pid = get_task_pid(current, PIDTYPE_PID); | ||
| 361 | umem->address = (unsigned long)addr; | 346 | umem->address = (unsigned long)addr; |
| 362 | umem->chunk_mask = ~((u64)chunk_size - 1); | 347 | umem->chunk_mask = ~((u64)chunk_size - 1); |
| 363 | umem->size = size; | 348 | umem->size = size; |
| @@ -373,7 +358,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) | |||
| 373 | 358 | ||
| 374 | err = xdp_umem_account_pages(umem); | 359 | err = xdp_umem_account_pages(umem); |
| 375 | if (err) | 360 | if (err) |
| 376 | goto out; | 361 | return err; |
| 377 | 362 | ||
| 378 | err = xdp_umem_pin_pages(umem); | 363 | err = xdp_umem_pin_pages(umem); |
| 379 | if (err) | 364 | if (err) |
| @@ -392,8 +377,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) | |||
| 392 | 377 | ||
| 393 | out_account: | 378 | out_account: |
| 394 | xdp_umem_unaccount_pages(umem); | 379 | xdp_umem_unaccount_pages(umem); |
| 395 | out: | ||
| 396 | put_pid(umem->pid); | ||
| 397 | return err; | 380 | return err; |
| 398 | } | 381 | } |
| 399 | 382 | ||
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 2554a15ecf2b..76ca30cc4791 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
| @@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ | |||
| 199 | "$(if $(part-of-module),1,0)" "$(@)"; | 199 | "$(if $(part-of-module),1,0)" "$(@)"; |
| 200 | recordmcount_source := $(srctree)/scripts/recordmcount.pl | 200 | recordmcount_source := $(srctree)/scripts/recordmcount.pl |
| 201 | endif # BUILD_C_RECORDMCOUNT | 201 | endif # BUILD_C_RECORDMCOUNT |
| 202 | cmd_record_mcount = \ | 202 | cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)), \ |
| 203 | if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \ | 203 | $(sub_cmd_record_mcount)) |
| 204 | "$(CC_FLAGS_FTRACE)" ]; then \ | ||
| 205 | $(sub_cmd_record_mcount) \ | ||
| 206 | fi | ||
| 207 | endif # CC_USING_RECORD_MCOUNT | 204 | endif # CC_USING_RECORD_MCOUNT |
| 208 | endif # CONFIG_FTRACE_MCOUNT_RECORD | 205 | endif # CONFIG_FTRACE_MCOUNT_RECORD |
| 209 | 206 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 5b756278df13..a09333fd7cef 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -5977,7 +5977,7 @@ sub process { | |||
| 5977 | while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) { | 5977 | while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) { |
| 5978 | $specifier = $1; | 5978 | $specifier = $1; |
| 5979 | $extension = $2; | 5979 | $extension = $2; |
| 5980 | if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) { | 5980 | if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) { |
| 5981 | $bad_specifier = $specifier; | 5981 | $bad_specifier = $specifier; |
| 5982 | last; | 5982 | last; |
| 5983 | } | 5983 | } |
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci new file mode 100644 index 000000000000..350145da7669 --- /dev/null +++ b/scripts/coccinelle/api/stream_open.cocci | |||
| @@ -0,0 +1,363 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Author: Kirill Smelkov (kirr@nexedi.com) | ||
| 3 | // | ||
| 4 | // Search for stream-like files that are using nonseekable_open and convert | ||
| 5 | // them to stream_open. A stream-like file is a file that does not use ppos in | ||
| 6 | // its read and write. Rationale for the conversion is to avoid deadlock in | ||
| 7 | // between read and write. | ||
| 8 | |||
| 9 | virtual report | ||
| 10 | virtual patch | ||
| 11 | virtual explain // explain decisions in the patch (SPFLAGS="-D explain") | ||
| 12 | |||
| 13 | // stream-like reader & writer - ones that do not depend on f_pos. | ||
| 14 | @ stream_reader @ | ||
| 15 | identifier readstream, ppos; | ||
| 16 | identifier f, buf, len; | ||
| 17 | type loff_t; | ||
| 18 | @@ | ||
| 19 | ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos) | ||
| 20 | { | ||
| 21 | ... when != ppos | ||
| 22 | } | ||
| 23 | |||
| 24 | @ stream_writer @ | ||
| 25 | identifier writestream, ppos; | ||
| 26 | identifier f, buf, len; | ||
| 27 | type loff_t; | ||
| 28 | @@ | ||
| 29 | ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos) | ||
| 30 | { | ||
| 31 | ... when != ppos | ||
| 32 | } | ||
| 33 | |||
| 34 | |||
| 35 | // a function that blocks | ||
| 36 | @ blocks @ | ||
| 37 | identifier block_f; | ||
| 38 | identifier wait_event =~ "^wait_event_.*"; | ||
| 39 | @@ | ||
| 40 | block_f(...) { | ||
| 41 | ... when exists | ||
| 42 | wait_event(...) | ||
| 43 | ... when exists | ||
| 44 | } | ||
| 45 | |||
| 46 | // stream_reader that can block inside. | ||
| 47 | // | ||
| 48 | // XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait()) | ||
| 49 | // XXX currently reader_blocks supports only direct and 1-level indirect cases. | ||
| 50 | @ reader_blocks_direct @ | ||
| 51 | identifier stream_reader.readstream; | ||
| 52 | identifier wait_event =~ "^wait_event_.*"; | ||
| 53 | @@ | ||
| 54 | readstream(...) | ||
| 55 | { | ||
| 56 | ... when exists | ||
| 57 | wait_event(...) | ||
| 58 | ... when exists | ||
| 59 | } | ||
| 60 | |||
| 61 | @ reader_blocks_1 @ | ||
| 62 | identifier stream_reader.readstream; | ||
| 63 | identifier blocks.block_f; | ||
| 64 | @@ | ||
| 65 | readstream(...) | ||
| 66 | { | ||
| 67 | ... when exists | ||
| 68 | block_f(...) | ||
| 69 | ... when exists | ||
| 70 | } | ||
| 71 | |||
| 72 | @ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @ | ||
| 73 | identifier stream_reader.readstream; | ||
| 74 | @@ | ||
| 75 | readstream(...) { | ||
| 76 | ... | ||
| 77 | } | ||
| 78 | |||
| 79 | |||
| 80 | // file_operations + whether they have _any_ .read, .write, .llseek ... at all. | ||
| 81 | // | ||
| 82 | // XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c) | ||
| 83 | @ fops0 @ | ||
| 84 | identifier fops; | ||
| 85 | @@ | ||
| 86 | struct file_operations fops = { | ||
| 87 | ... | ||
| 88 | }; | ||
| 89 | |||
| 90 | @ has_read @ | ||
| 91 | identifier fops0.fops; | ||
| 92 | identifier read_f; | ||
| 93 | @@ | ||
| 94 | struct file_operations fops = { | ||
| 95 | .read = read_f, | ||
| 96 | }; | ||
| 97 | |||
| 98 | @ has_read_iter @ | ||
| 99 | identifier fops0.fops; | ||
| 100 | identifier read_iter_f; | ||
| 101 | @@ | ||
| 102 | struct file_operations fops = { | ||
| 103 | .read_iter = read_iter_f, | ||
| 104 | }; | ||
| 105 | |||
| 106 | @ has_write @ | ||
| 107 | identifier fops0.fops; | ||
| 108 | identifier write_f; | ||
| 109 | @@ | ||
| 110 | struct file_operations fops = { | ||
| 111 | .write = write_f, | ||
| 112 | }; | ||
| 113 | |||
| 114 | @ has_write_iter @ | ||
| 115 | identifier fops0.fops; | ||
| 116 | identifier write_iter_f; | ||
| 117 | @@ | ||
| 118 | struct file_operations fops = { | ||
| 119 | .write_iter = write_iter_f, | ||
| 120 | }; | ||
| 121 | |||
| 122 | @ has_llseek @ | ||
| 123 | identifier fops0.fops; | ||
| 124 | identifier llseek_f; | ||
| 125 | @@ | ||
| 126 | struct file_operations fops = { | ||
| 127 | .llseek = llseek_f, | ||
| 128 | }; | ||
| 129 | |||
| 130 | @ has_no_llseek @ | ||
| 131 | identifier fops0.fops; | ||
| 132 | @@ | ||
| 133 | struct file_operations fops = { | ||
| 134 | .llseek = no_llseek, | ||
| 135 | }; | ||
| 136 | |||
| 137 | @ has_mmap @ | ||
| 138 | identifier fops0.fops; | ||
| 139 | identifier mmap_f; | ||
| 140 | @@ | ||
| 141 | struct file_operations fops = { | ||
| 142 | .mmap = mmap_f, | ||
| 143 | }; | ||
| 144 | |||
| 145 | @ has_copy_file_range @ | ||
| 146 | identifier fops0.fops; | ||
| 147 | identifier copy_file_range_f; | ||
| 148 | @@ | ||
| 149 | struct file_operations fops = { | ||
| 150 | .copy_file_range = copy_file_range_f, | ||
| 151 | }; | ||
| 152 | |||
| 153 | @ has_remap_file_range @ | ||
| 154 | identifier fops0.fops; | ||
| 155 | identifier remap_file_range_f; | ||
| 156 | @@ | ||
| 157 | struct file_operations fops = { | ||
| 158 | .remap_file_range = remap_file_range_f, | ||
| 159 | }; | ||
| 160 | |||
| 161 | @ has_splice_read @ | ||
| 162 | identifier fops0.fops; | ||
| 163 | identifier splice_read_f; | ||
| 164 | @@ | ||
| 165 | struct file_operations fops = { | ||
| 166 | .splice_read = splice_read_f, | ||
| 167 | }; | ||
| 168 | |||
| 169 | @ has_splice_write @ | ||
| 170 | identifier fops0.fops; | ||
| 171 | identifier splice_write_f; | ||
| 172 | @@ | ||
| 173 | struct file_operations fops = { | ||
| 174 | .splice_write = splice_write_f, | ||
| 175 | }; | ||
| 176 | |||
| 177 | |||
| 178 | // file_operations that is candidate for stream_open conversion - it does not | ||
| 179 | // use mmap and other methods that assume @offset access to file. | ||
| 180 | // | ||
| 181 | // XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now. | ||
| 182 | // XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops". | ||
| 183 | @ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @ | ||
| 184 | identifier fops0.fops; | ||
| 185 | @@ | ||
| 186 | struct file_operations fops = { | ||
| 187 | }; | ||
| 188 | |||
| 189 | |||
| 190 | // ---- conversions ---- | ||
| 191 | |||
| 192 | // XXX .open = nonseekable_open -> .open = stream_open | ||
| 193 | // XXX .open = func -> openfunc -> nonseekable_open | ||
| 194 | |||
| 195 | // read & write | ||
| 196 | // | ||
| 197 | // if both are used in the same file_operations together with an opener - | ||
| 198 | // under that conditions we can use stream_open instead of nonseekable_open. | ||
| 199 | @ fops_rw depends on maybe_stream @ | ||
| 200 | identifier fops0.fops, openfunc; | ||
| 201 | identifier stream_reader.readstream; | ||
| 202 | identifier stream_writer.writestream; | ||
| 203 | @@ | ||
| 204 | struct file_operations fops = { | ||
| 205 | .open = openfunc, | ||
| 206 | .read = readstream, | ||
| 207 | .write = writestream, | ||
| 208 | }; | ||
| 209 | |||
| 210 | @ report_rw depends on report @ | ||
| 211 | identifier fops_rw.openfunc; | ||
| 212 | position p1; | ||
| 213 | @@ | ||
| 214 | openfunc(...) { | ||
| 215 | <... | ||
| 216 | nonseekable_open@p1 | ||
| 217 | ...> | ||
| 218 | } | ||
| 219 | |||
| 220 | @ script:python depends on report && reader_blocks @ | ||
| 221 | fops << fops0.fops; | ||
| 222 | p << report_rw.p1; | ||
| 223 | @@ | ||
| 224 | coccilib.report.print_report(p[0], | ||
| 225 | "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,)) | ||
| 226 | |||
| 227 | @ script:python depends on report && !reader_blocks @ | ||
| 228 | fops << fops0.fops; | ||
| 229 | p << report_rw.p1; | ||
| 230 | @@ | ||
| 231 | coccilib.report.print_report(p[0], | ||
| 232 | "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) | ||
| 233 | |||
| 234 | |||
| 235 | @ explain_rw_deadlocked depends on explain && reader_blocks @ | ||
| 236 | identifier fops_rw.openfunc; | ||
| 237 | @@ | ||
| 238 | openfunc(...) { | ||
| 239 | <... | ||
| 240 | - nonseekable_open | ||
| 241 | + nonseekable_open /* read & write (was deadlock) */ | ||
| 242 | ...> | ||
| 243 | } | ||
| 244 | |||
| 245 | |||
| 246 | @ explain_rw_nodeadlock depends on explain && !reader_blocks @ | ||
| 247 | identifier fops_rw.openfunc; | ||
| 248 | @@ | ||
| 249 | openfunc(...) { | ||
| 250 | <... | ||
| 251 | - nonseekable_open | ||
| 252 | + nonseekable_open /* read & write (no direct deadlock) */ | ||
| 253 | ...> | ||
| 254 | } | ||
| 255 | |||
| 256 | @ patch_rw depends on patch @ | ||
| 257 | identifier fops_rw.openfunc; | ||
| 258 | @@ | ||
| 259 | openfunc(...) { | ||
| 260 | <... | ||
| 261 | - nonseekable_open | ||
| 262 | + stream_open | ||
| 263 | ...> | ||
| 264 | } | ||
| 265 | |||
| 266 | |||
| 267 | // read, but not write | ||
| 268 | @ fops_r depends on maybe_stream && !has_write @ | ||
| 269 | identifier fops0.fops, openfunc; | ||
| 270 | identifier stream_reader.readstream; | ||
| 271 | @@ | ||
| 272 | struct file_operations fops = { | ||
| 273 | .open = openfunc, | ||
| 274 | .read = readstream, | ||
| 275 | }; | ||
| 276 | |||
| 277 | @ report_r depends on report @ | ||
| 278 | identifier fops_r.openfunc; | ||
| 279 | position p1; | ||
| 280 | @@ | ||
| 281 | openfunc(...) { | ||
| 282 | <... | ||
| 283 | nonseekable_open@p1 | ||
| 284 | ...> | ||
| 285 | } | ||
| 286 | |||
| 287 | @ script:python depends on report @ | ||
| 288 | fops << fops0.fops; | ||
| 289 | p << report_r.p1; | ||
| 290 | @@ | ||
| 291 | coccilib.report.print_report(p[0], | ||
| 292 | "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) | ||
| 293 | |||
| 294 | @ explain_r depends on explain @ | ||
| 295 | identifier fops_r.openfunc; | ||
| 296 | @@ | ||
| 297 | openfunc(...) { | ||
| 298 | <... | ||
| 299 | - nonseekable_open | ||
| 300 | + nonseekable_open /* read only */ | ||
| 301 | ...> | ||
| 302 | } | ||
| 303 | |||
| 304 | @ patch_r depends on patch @ | ||
| 305 | identifier fops_r.openfunc; | ||
| 306 | @@ | ||
| 307 | openfunc(...) { | ||
| 308 | <... | ||
| 309 | - nonseekable_open | ||
| 310 | + stream_open | ||
| 311 | ...> | ||
| 312 | } | ||
| 313 | |||
| 314 | |||
| 315 | // write, but not read | ||
| 316 | @ fops_w depends on maybe_stream && !has_read @ | ||
| 317 | identifier fops0.fops, openfunc; | ||
| 318 | identifier stream_writer.writestream; | ||
| 319 | @@ | ||
| 320 | struct file_operations fops = { | ||
| 321 | .open = openfunc, | ||
| 322 | .write = writestream, | ||
| 323 | }; | ||
| 324 | |||
| 325 | @ report_w depends on report @ | ||
| 326 | identifier fops_w.openfunc; | ||
| 327 | position p1; | ||
| 328 | @@ | ||
| 329 | openfunc(...) { | ||
| 330 | <... | ||
| 331 | nonseekable_open@p1 | ||
| 332 | ...> | ||
| 333 | } | ||
| 334 | |||
| 335 | @ script:python depends on report @ | ||
| 336 | fops << fops0.fops; | ||
| 337 | p << report_w.p1; | ||
| 338 | @@ | ||
| 339 | coccilib.report.print_report(p[0], | ||
| 340 | "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) | ||
| 341 | |||
| 342 | @ explain_w depends on explain @ | ||
| 343 | identifier fops_w.openfunc; | ||
| 344 | @@ | ||
| 345 | openfunc(...) { | ||
| 346 | <... | ||
| 347 | - nonseekable_open | ||
| 348 | + nonseekable_open /* write only */ | ||
| 349 | ...> | ||
| 350 | } | ||
| 351 | |||
| 352 | @ patch_w depends on patch @ | ||
| 353 | identifier fops_w.openfunc; | ||
| 354 | @@ | ||
| 355 | openfunc(...) { | ||
| 356 | <... | ||
| 357 | - nonseekable_open | ||
| 358 | + stream_open | ||
| 359 | ...> | ||
| 360 | } | ||
| 361 | |||
| 362 | |||
| 363 | // no read, no write - don't change anything | ||
diff --git a/scripts/coccinelle/free/put_device.cocci b/scripts/coccinelle/free/put_device.cocci index 7395697e7f19..c9f071b0a0ab 100644 --- a/scripts/coccinelle/free/put_device.cocci +++ b/scripts/coccinelle/free/put_device.cocci | |||
| @@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; } | |||
| 32 | ( id | 32 | ( id |
| 33 | | (T2)dev_get_drvdata(&id->dev) | 33 | | (T2)dev_get_drvdata(&id->dev) |
| 34 | | (T3)platform_get_drvdata(id) | 34 | | (T3)platform_get_drvdata(id) |
| 35 | | &id->dev | ||
| 35 | ); | 36 | ); |
| 36 | | return@p2 ...; | 37 | | return@p2 ...; |
| 37 | ) | 38 | ) |
diff --git a/scripts/coccinelle/misc/badty.cocci b/scripts/coccinelle/misc/badty.cocci index 481cf301ccfc..08470362199c 100644 --- a/scripts/coccinelle/misc/badty.cocci +++ b/scripts/coccinelle/misc/badty.cocci | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element | 1 | /// Correct the size argument to alloc functions |
| 2 | /// | 2 | /// |
| 3 | //# This makes an effort to find cases where the argument to sizeof is wrong | 3 | //# This makes an effort to find cases where the argument to sizeof is wrong |
| 4 | //# in memory allocation functions by checking the type of the allocated memory | 4 | //# in memory allocation functions by checking the type of the allocated memory |
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c index 611945611bf8..1dcfb288ee63 100644 --- a/scripts/kconfig/lxdialog/inputbox.c +++ b/scripts/kconfig/lxdialog/inputbox.c | |||
| @@ -113,7 +113,8 @@ do_resize: | |||
| 113 | case KEY_DOWN: | 113 | case KEY_DOWN: |
| 114 | break; | 114 | break; |
| 115 | case KEY_BACKSPACE: | 115 | case KEY_BACKSPACE: |
| 116 | case 127: | 116 | case 8: /* ^H */ |
| 117 | case 127: /* ^? */ | ||
| 117 | if (pos) { | 118 | if (pos) { |
| 118 | wattrset(dialog, dlg.inputbox.atr); | 119 | wattrset(dialog, dlg.inputbox.atr); |
| 119 | if (input_x == 0) { | 120 | if (input_x == 0) { |
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index a4670f4e825a..ac92c0ded6c5 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c | |||
| @@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans) | |||
| 1048 | state->match_direction = FIND_NEXT_MATCH_UP; | 1048 | state->match_direction = FIND_NEXT_MATCH_UP; |
| 1049 | *ans = get_mext_match(state->pattern, | 1049 | *ans = get_mext_match(state->pattern, |
| 1050 | state->match_direction); | 1050 | state->match_direction); |
| 1051 | } else if (key == KEY_BACKSPACE || key == 127) { | 1051 | } else if (key == KEY_BACKSPACE || key == 8 || key == 127) { |
| 1052 | state->pattern[strlen(state->pattern)-1] = '\0'; | 1052 | state->pattern[strlen(state->pattern)-1] = '\0'; |
| 1053 | adj_match_dir(&state->match_direction); | 1053 | adj_match_dir(&state->match_direction); |
| 1054 | } else | 1054 | } else |
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 7be620a1fcdb..77f525a8617c 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c | |||
| @@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window, | |||
| 439 | case KEY_F(F_EXIT): | 439 | case KEY_F(F_EXIT): |
| 440 | case KEY_F(F_BACK): | 440 | case KEY_F(F_BACK): |
| 441 | break; | 441 | break; |
| 442 | case 127: | 442 | case 8: /* ^H */ |
| 443 | case 127: /* ^? */ | ||
| 443 | case KEY_BACKSPACE: | 444 | case KEY_BACKSPACE: |
| 444 | if (cursor_position > 0) { | 445 | if (cursor_position > 0) { |
| 445 | memmove(&result[cursor_position-1], | 446 | memmove(&result[cursor_position-1], |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 0b0d1080b1c5..f277e116e0eb 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
| @@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info, | |||
| 639 | info->sechdrs[sym->st_shndx].sh_offset - | 639 | info->sechdrs[sym->st_shndx].sh_offset - |
| 640 | (info->hdr->e_type != ET_REL ? | 640 | (info->hdr->e_type != ET_REL ? |
| 641 | info->sechdrs[sym->st_shndx].sh_addr : 0); | 641 | info->sechdrs[sym->st_shndx].sh_addr : 0); |
| 642 | crc = *crcp; | 642 | crc = TO_NATIVE(*crcp); |
| 643 | } | 643 | } |
| 644 | sym_update_crc(symname + strlen("__crc_"), mod, crc, | 644 | sym_update_crc(symname + strlen("__crc_"), mod, crc, |
| 645 | export); | 645 | export); |
diff --git a/security/Kconfig b/security/Kconfig index 1d6463fb1450..353cfef71d4e 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
| @@ -239,8 +239,46 @@ source "security/safesetid/Kconfig" | |||
| 239 | 239 | ||
| 240 | source "security/integrity/Kconfig" | 240 | source "security/integrity/Kconfig" |
| 241 | 241 | ||
| 242 | choice | ||
| 243 | prompt "First legacy 'major LSM' to be initialized" | ||
| 244 | default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX | ||
| 245 | default DEFAULT_SECURITY_SMACK if SECURITY_SMACK | ||
| 246 | default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO | ||
| 247 | default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR | ||
| 248 | default DEFAULT_SECURITY_DAC | ||
| 249 | |||
| 250 | help | ||
| 251 | This choice is there only for converting CONFIG_DEFAULT_SECURITY | ||
| 252 | in old kernel configs to CONFIG_LSM in new kernel configs. Don't | ||
| 253 | change this choice unless you are creating a fresh kernel config, | ||
| 254 | for this choice will be ignored after CONFIG_LSM has been set. | ||
| 255 | |||
| 256 | Selects the legacy "major security module" that will be | ||
| 257 | initialized first. Overridden by non-default CONFIG_LSM. | ||
| 258 | |||
| 259 | config DEFAULT_SECURITY_SELINUX | ||
| 260 | bool "SELinux" if SECURITY_SELINUX=y | ||
| 261 | |||
| 262 | config DEFAULT_SECURITY_SMACK | ||
| 263 | bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y | ||
| 264 | |||
| 265 | config DEFAULT_SECURITY_TOMOYO | ||
| 266 | bool "TOMOYO" if SECURITY_TOMOYO=y | ||
| 267 | |||
| 268 | config DEFAULT_SECURITY_APPARMOR | ||
| 269 | bool "AppArmor" if SECURITY_APPARMOR=y | ||
| 270 | |||
| 271 | config DEFAULT_SECURITY_DAC | ||
| 272 | bool "Unix Discretionary Access Controls" | ||
| 273 | |||
| 274 | endchoice | ||
| 275 | |||
| 242 | config LSM | 276 | config LSM |
| 243 | string "Ordered list of enabled LSMs" | 277 | string "Ordered list of enabled LSMs" |
| 278 | default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK | ||
| 279 | default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR | ||
| 280 | default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO | ||
| 281 | default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC | ||
| 244 | default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" | 282 | default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" |
| 245 | help | 283 | help |
| 246 | A comma-separated list of LSMs, in initialization order. | 284 | A comma-separated list of LSMs, in initialization order. |
diff --git a/security/keys/trusted.c b/security/keys/trusted.c index bcc9c6ead7fd..efdbf17f3915 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c | |||
| @@ -125,7 +125,7 @@ out: | |||
| 125 | */ | 125 | */ |
| 126 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, | 126 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, |
| 127 | unsigned int keylen, unsigned char *h1, | 127 | unsigned int keylen, unsigned char *h1, |
| 128 | unsigned char *h2, unsigned char h3, ...) | 128 | unsigned char *h2, unsigned int h3, ...) |
| 129 | { | 129 | { |
| 130 | unsigned char paramdigest[SHA1_DIGEST_SIZE]; | 130 | unsigned char paramdigest[SHA1_DIGEST_SIZE]; |
| 131 | struct sdesc *sdesc; | 131 | struct sdesc *sdesc; |
| @@ -135,13 +135,16 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key, | |||
| 135 | int ret; | 135 | int ret; |
| 136 | va_list argp; | 136 | va_list argp; |
| 137 | 137 | ||
| 138 | if (!chip) | ||
| 139 | return -ENODEV; | ||
| 140 | |||
| 138 | sdesc = init_sdesc(hashalg); | 141 | sdesc = init_sdesc(hashalg); |
| 139 | if (IS_ERR(sdesc)) { | 142 | if (IS_ERR(sdesc)) { |
| 140 | pr_info("trusted_key: can't alloc %s\n", hash_alg); | 143 | pr_info("trusted_key: can't alloc %s\n", hash_alg); |
| 141 | return PTR_ERR(sdesc); | 144 | return PTR_ERR(sdesc); |
| 142 | } | 145 | } |
| 143 | 146 | ||
| 144 | c = h3; | 147 | c = !!h3; |
| 145 | ret = crypto_shash_init(&sdesc->shash); | 148 | ret = crypto_shash_init(&sdesc->shash); |
| 146 | if (ret < 0) | 149 | if (ret < 0) |
| 147 | goto out; | 150 | goto out; |
| @@ -196,6 +199,9 @@ int TSS_checkhmac1(unsigned char *buffer, | |||
| 196 | va_list argp; | 199 | va_list argp; |
| 197 | int ret; | 200 | int ret; |
| 198 | 201 | ||
| 202 | if (!chip) | ||
| 203 | return -ENODEV; | ||
| 204 | |||
| 199 | bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); | 205 | bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); |
| 200 | tag = LOAD16(buffer, 0); | 206 | tag = LOAD16(buffer, 0); |
| 201 | ordinal = command; | 207 | ordinal = command; |
| @@ -363,6 +369,9 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen) | |||
| 363 | { | 369 | { |
| 364 | int rc; | 370 | int rc; |
| 365 | 371 | ||
| 372 | if (!chip) | ||
| 373 | return -ENODEV; | ||
| 374 | |||
| 366 | dump_tpm_buf(cmd); | 375 | dump_tpm_buf(cmd); |
| 367 | rc = tpm_send(chip, cmd, buflen); | 376 | rc = tpm_send(chip, cmd, buflen); |
| 368 | dump_tpm_buf(cmd); | 377 | dump_tpm_buf(cmd); |
| @@ -429,6 +438,9 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) | |||
| 429 | { | 438 | { |
| 430 | int ret; | 439 | int ret; |
| 431 | 440 | ||
| 441 | if (!chip) | ||
| 442 | return -ENODEV; | ||
| 443 | |||
| 432 | INIT_BUF(tb); | 444 | INIT_BUF(tb); |
| 433 | store16(tb, TPM_TAG_RQU_COMMAND); | 445 | store16(tb, TPM_TAG_RQU_COMMAND); |
| 434 | store32(tb, TPM_OIAP_SIZE); | 446 | store32(tb, TPM_OIAP_SIZE); |
| @@ -1245,9 +1257,13 @@ static int __init init_trusted(void) | |||
| 1245 | { | 1257 | { |
| 1246 | int ret; | 1258 | int ret; |
| 1247 | 1259 | ||
| 1260 | /* encrypted_keys.ko depends on successful load of this module even if | ||
| 1261 | * TPM is not used. | ||
| 1262 | */ | ||
| 1248 | chip = tpm_default_chip(); | 1263 | chip = tpm_default_chip(); |
| 1249 | if (!chip) | 1264 | if (!chip) |
| 1250 | return -ENOENT; | 1265 | return 0; |
| 1266 | |||
| 1251 | ret = init_digests(); | 1267 | ret = init_digests(); |
| 1252 | if (ret < 0) | 1268 | if (ret < 0) |
| 1253 | goto err_put; | 1269 | goto err_put; |
| @@ -1269,10 +1285,12 @@ err_put: | |||
| 1269 | 1285 | ||
| 1270 | static void __exit cleanup_trusted(void) | 1286 | static void __exit cleanup_trusted(void) |
| 1271 | { | 1287 | { |
| 1272 | put_device(&chip->dev); | 1288 | if (chip) { |
| 1273 | kfree(digests); | 1289 | put_device(&chip->dev); |
| 1274 | trusted_shash_release(); | 1290 | kfree(digests); |
| 1275 | unregister_key_type(&key_type_trusted); | 1291 | trusted_shash_release(); |
| 1292 | unregister_key_type(&key_type_trusted); | ||
| 1293 | } | ||
| 1276 | } | 1294 | } |
| 1277 | 1295 | ||
| 1278 | late_initcall(init_trusted); | 1296 | late_initcall(init_trusted); |
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 57cc60722dd3..efac68556b45 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
| @@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer, | |||
| 206 | * yama_task_free - check for task_pid to remove from exception list | 206 | * yama_task_free - check for task_pid to remove from exception list |
| 207 | * @task: task being removed | 207 | * @task: task being removed |
| 208 | */ | 208 | */ |
| 209 | void yama_task_free(struct task_struct *task) | 209 | static void yama_task_free(struct task_struct *task) |
| 210 | { | 210 | { |
| 211 | yama_ptracer_del(task, task); | 211 | yama_ptracer_del(task, task); |
| 212 | } | 212 | } |
| @@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task) | |||
| 222 | * Return 0 on success, -ve on error. -ENOSYS is returned when Yama | 222 | * Return 0 on success, -ve on error. -ENOSYS is returned when Yama |
| 223 | * does not handle the given option. | 223 | * does not handle the given option. |
| 224 | */ | 224 | */ |
| 225 | int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, | 225 | static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, |
| 226 | unsigned long arg4, unsigned long arg5) | 226 | unsigned long arg4, unsigned long arg5) |
| 227 | { | 227 | { |
| 228 | int rc = -ENOSYS; | 228 | int rc = -ENOSYS; |
| @@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child, | |||
| 401 | * | 401 | * |
| 402 | * Returns 0 if following the ptrace is allowed, -ve on error. | 402 | * Returns 0 if following the ptrace is allowed, -ve on error. |
| 403 | */ | 403 | */ |
| 404 | int yama_ptrace_traceme(struct task_struct *parent) | 404 | static int yama_ptrace_traceme(struct task_struct *parent) |
| 405 | { | 405 | { |
| 406 | int rc = 0; | 406 | int rc = 0; |
| 407 | 407 | ||
| @@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write, | |||
| 452 | static int zero; | 452 | static int zero; |
| 453 | static int max_scope = YAMA_SCOPE_NO_ATTACH; | 453 | static int max_scope = YAMA_SCOPE_NO_ATTACH; |
| 454 | 454 | ||
| 455 | struct ctl_path yama_sysctl_path[] = { | 455 | static struct ctl_path yama_sysctl_path[] = { |
| 456 | { .procname = "kernel", }, | 456 | { .procname = "kernel", }, |
| 457 | { .procname = "yama", }, | 457 | { .procname = "yama", }, |
| 458 | { } | 458 | { } |
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index d5b0d7ba83c4..f6ae68017608 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c | |||
| @@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) | |||
| 940 | oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * | 940 | oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * |
| 941 | params_channels(params) / 8; | 941 | params_channels(params) / 8; |
| 942 | 942 | ||
| 943 | err = snd_pcm_oss_period_size(substream, params, sparams); | ||
| 944 | if (err < 0) | ||
| 945 | goto failure; | ||
| 946 | |||
| 947 | n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); | ||
| 948 | err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); | ||
| 949 | if (err < 0) | ||
| 950 | goto failure; | ||
| 951 | |||
| 952 | err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS, | ||
| 953 | runtime->oss.periods, NULL); | ||
| 954 | if (err < 0) | ||
| 955 | goto failure; | ||
| 956 | |||
| 957 | snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); | ||
| 958 | |||
| 959 | err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams); | ||
| 960 | if (err < 0) { | ||
| 961 | pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err); | ||
| 962 | goto failure; | ||
| 963 | } | ||
| 964 | |||
| 943 | #ifdef CONFIG_SND_PCM_OSS_PLUGINS | 965 | #ifdef CONFIG_SND_PCM_OSS_PLUGINS |
| 944 | snd_pcm_oss_plugin_clear(substream); | 966 | snd_pcm_oss_plugin_clear(substream); |
| 945 | if (!direct) { | 967 | if (!direct) { |
| @@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) | |||
| 974 | } | 996 | } |
| 975 | #endif | 997 | #endif |
| 976 | 998 | ||
| 977 | err = snd_pcm_oss_period_size(substream, params, sparams); | ||
| 978 | if (err < 0) | ||
| 979 | goto failure; | ||
| 980 | |||
| 981 | n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); | ||
| 982 | err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); | ||
| 983 | if (err < 0) | ||
| 984 | goto failure; | ||
| 985 | |||
| 986 | err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS, | ||
| 987 | runtime->oss.periods, NULL); | ||
| 988 | if (err < 0) | ||
| 989 | goto failure; | ||
| 990 | |||
| 991 | snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); | ||
| 992 | |||
| 993 | if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) { | ||
| 994 | pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err); | ||
| 995 | goto failure; | ||
| 996 | } | ||
| 997 | |||
| 998 | if (runtime->oss.trigger) { | 999 | if (runtime->oss.trigger) { |
| 999 | sw_params->start_threshold = 1; | 1000 | sw_params->start_threshold = 1; |
| 1000 | } else { | 1001 | } else { |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index f731f904e8cc..1d8452912b14 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
| @@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) | |||
| 1445 | static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) | 1445 | static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) |
| 1446 | { | 1446 | { |
| 1447 | struct snd_pcm_runtime *runtime = substream->runtime; | 1447 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 1448 | if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) | 1448 | switch (runtime->status->state) { |
| 1449 | case SNDRV_PCM_STATE_SUSPENDED: | ||
| 1449 | return -EBUSY; | 1450 | return -EBUSY; |
| 1451 | /* unresumable PCM state; return -EBUSY for skipping suspend */ | ||
| 1452 | case SNDRV_PCM_STATE_OPEN: | ||
| 1453 | case SNDRV_PCM_STATE_SETUP: | ||
| 1454 | case SNDRV_PCM_STATE_DISCONNECTED: | ||
| 1455 | return -EBUSY; | ||
| 1456 | } | ||
| 1450 | runtime->trigger_master = substream; | 1457 | runtime->trigger_master = substream; |
| 1451 | return 0; | 1458 | return 0; |
| 1452 | } | 1459 | } |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index ee601d7f0926..c0690d1ecd55 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
| 32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
| 33 | #include <linux/nospec.h> | ||
| 33 | #include <sound/rawmidi.h> | 34 | #include <sound/rawmidi.h> |
| 34 | #include <sound/info.h> | 35 | #include <sound/info.h> |
| 35 | #include <sound/control.h> | 36 | #include <sound/control.h> |
| @@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card, | |||
| 601 | return -ENXIO; | 602 | return -ENXIO; |
| 602 | if (info->stream < 0 || info->stream > 1) | 603 | if (info->stream < 0 || info->stream > 1) |
| 603 | return -EINVAL; | 604 | return -EINVAL; |
| 605 | info->stream = array_index_nospec(info->stream, 2); | ||
| 604 | pstr = &rmidi->streams[info->stream]; | 606 | pstr = &rmidi->streams[info->stream]; |
| 605 | if (pstr->substream_count == 0) | 607 | if (pstr->substream_count == 0) |
| 606 | return -ENOENT; | 608 | return -ENOENT; |
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c index 278ebb993122..c93945917235 100644 --- a/sound/core/seq/oss/seq_oss_synth.c +++ b/sound/core/seq/oss/seq_oss_synth.c | |||
| @@ -617,13 +617,14 @@ int | |||
| 617 | snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) | 617 | snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) |
| 618 | { | 618 | { |
| 619 | struct seq_oss_synth *rec; | 619 | struct seq_oss_synth *rec; |
| 620 | struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); | ||
| 620 | 621 | ||
| 621 | if (dev < 0 || dev >= dp->max_synthdev) | 622 | if (!info) |
| 622 | return -ENXIO; | 623 | return -ENXIO; |
| 623 | 624 | ||
| 624 | if (dp->synths[dev].is_midi) { | 625 | if (info->is_midi) { |
| 625 | struct midi_info minf; | 626 | struct midi_info minf; |
| 626 | snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf); | 627 | snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf); |
| 627 | inf->synth_type = SYNTH_TYPE_MIDI; | 628 | inf->synth_type = SYNTH_TYPE_MIDI; |
| 628 | inf->synth_subtype = 0; | 629 | inf->synth_subtype = 0; |
| 629 | inf->nr_voices = 16; | 630 | inf->nr_voices = 16; |
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 29882bda7632..e1ebc6d5f382 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c | |||
| @@ -1005,7 +1005,6 @@ struct ca0132_spec { | |||
| 1005 | unsigned int scp_resp_header; | 1005 | unsigned int scp_resp_header; |
| 1006 | unsigned int scp_resp_data[4]; | 1006 | unsigned int scp_resp_data[4]; |
| 1007 | unsigned int scp_resp_count; | 1007 | unsigned int scp_resp_count; |
| 1008 | bool alt_firmware_present; | ||
| 1009 | bool startup_check_entered; | 1008 | bool startup_check_entered; |
| 1010 | bool dsp_reload; | 1009 | bool dsp_reload; |
| 1011 | 1010 | ||
| @@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec) | |||
| 7518 | bool dsp_loaded = false; | 7517 | bool dsp_loaded = false; |
| 7519 | struct ca0132_spec *spec = codec->spec; | 7518 | struct ca0132_spec *spec = codec->spec; |
| 7520 | const struct dsp_image_seg *dsp_os_image; | 7519 | const struct dsp_image_seg *dsp_os_image; |
| 7521 | const struct firmware *fw_entry; | 7520 | const struct firmware *fw_entry = NULL; |
| 7522 | /* | 7521 | /* |
| 7523 | * Alternate firmwares for different variants. The Recon3Di apparently | 7522 | * Alternate firmwares for different variants. The Recon3Di apparently |
| 7524 | * can use the default firmware, but I'll leave the option in case | 7523 | * can use the default firmware, but I'll leave the option in case |
| @@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec) | |||
| 7529 | case QUIRK_R3D: | 7528 | case QUIRK_R3D: |
| 7530 | case QUIRK_AE5: | 7529 | case QUIRK_AE5: |
| 7531 | if (request_firmware(&fw_entry, DESKTOP_EFX_FILE, | 7530 | if (request_firmware(&fw_entry, DESKTOP_EFX_FILE, |
| 7532 | codec->card->dev) != 0) { | 7531 | codec->card->dev) != 0) |
| 7533 | codec_dbg(codec, "Desktop firmware not found."); | 7532 | codec_dbg(codec, "Desktop firmware not found."); |
| 7534 | spec->alt_firmware_present = false; | 7533 | else |
| 7535 | } else { | ||
| 7536 | codec_dbg(codec, "Desktop firmware selected."); | 7534 | codec_dbg(codec, "Desktop firmware selected."); |
| 7537 | spec->alt_firmware_present = true; | ||
| 7538 | } | ||
| 7539 | break; | 7535 | break; |
| 7540 | case QUIRK_R3DI: | 7536 | case QUIRK_R3DI: |
| 7541 | if (request_firmware(&fw_entry, R3DI_EFX_FILE, | 7537 | if (request_firmware(&fw_entry, R3DI_EFX_FILE, |
| 7542 | codec->card->dev) != 0) { | 7538 | codec->card->dev) != 0) |
| 7543 | codec_dbg(codec, "Recon3Di alt firmware not detected."); | 7539 | codec_dbg(codec, "Recon3Di alt firmware not detected."); |
| 7544 | spec->alt_firmware_present = false; | 7540 | else |
| 7545 | } else { | ||
| 7546 | codec_dbg(codec, "Recon3Di firmware selected."); | 7541 | codec_dbg(codec, "Recon3Di firmware selected."); |
| 7547 | spec->alt_firmware_present = true; | ||
| 7548 | } | ||
| 7549 | break; | 7542 | break; |
| 7550 | default: | 7543 | default: |
| 7551 | spec->alt_firmware_present = false; | ||
| 7552 | break; | 7544 | break; |
| 7553 | } | 7545 | } |
| 7554 | /* | 7546 | /* |
| 7555 | * Use default ctefx.bin if no alt firmware is detected, or if none | 7547 | * Use default ctefx.bin if no alt firmware is detected, or if none |
| 7556 | * exists for your particular codec. | 7548 | * exists for your particular codec. |
| 7557 | */ | 7549 | */ |
| 7558 | if (!spec->alt_firmware_present) { | 7550 | if (!fw_entry) { |
| 7559 | codec_dbg(codec, "Default firmware selected."); | 7551 | codec_dbg(codec, "Default firmware selected."); |
| 7560 | if (request_firmware(&fw_entry, EFX_FILE, | 7552 | if (request_firmware(&fw_entry, EFX_FILE, |
| 7561 | codec->card->dev) != 0) | 7553 | codec->card->dev) != 0) |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 191830d4fa40..a3fb3d4c5730 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -5688,6 +5688,8 @@ enum { | |||
| 5688 | ALC225_FIXUP_WYSE_AUTO_MUTE, | 5688 | ALC225_FIXUP_WYSE_AUTO_MUTE, |
| 5689 | ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, | 5689 | ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, |
| 5690 | ALC286_FIXUP_ACER_AIO_HEADSET_MIC, | 5690 | ALC286_FIXUP_ACER_AIO_HEADSET_MIC, |
| 5691 | ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, | ||
| 5692 | ALC299_FIXUP_PREDATOR_SPK, | ||
| 5691 | }; | 5693 | }; |
| 5692 | 5694 | ||
| 5693 | static const struct hda_fixup alc269_fixups[] = { | 5695 | static const struct hda_fixup alc269_fixups[] = { |
| @@ -6696,6 +6698,22 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 6696 | .chained = true, | 6698 | .chained = true, |
| 6697 | .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE | 6699 | .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE |
| 6698 | }, | 6700 | }, |
| 6701 | [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = { | ||
| 6702 | .type = HDA_FIXUP_PINS, | ||
| 6703 | .v.pins = (const struct hda_pintbl[]) { | ||
| 6704 | { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */ | ||
| 6705 | { } | ||
| 6706 | }, | ||
| 6707 | .chained = true, | ||
| 6708 | .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE | ||
| 6709 | }, | ||
| 6710 | [ALC299_FIXUP_PREDATOR_SPK] = { | ||
| 6711 | .type = HDA_FIXUP_PINS, | ||
| 6712 | .v.pins = (const struct hda_pintbl[]) { | ||
| 6713 | { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */ | ||
| 6714 | { } | ||
| 6715 | } | ||
| 6716 | }, | ||
| 6699 | }; | 6717 | }; |
| 6700 | 6718 | ||
| 6701 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 6719 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
| @@ -6712,9 +6730,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6712 | SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), | 6730 | SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), |
| 6713 | SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), | 6731 | SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), |
| 6714 | SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), | 6732 | SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), |
| 6733 | SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), | ||
| 6734 | SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), | ||
| 6735 | SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), | ||
| 6715 | SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | 6736 | SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), |
| 6716 | SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | 6737 | SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), |
| 6717 | SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | 6738 | SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), |
| 6739 | SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | ||
| 6718 | SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), | 6740 | SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), |
| 6719 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | 6741 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), |
| 6720 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), | 6742 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), |
| @@ -7111,6 +7133,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
| 7111 | {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, | 7133 | {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, |
| 7112 | {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, | 7134 | {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, |
| 7113 | {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"}, | 7135 | {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"}, |
| 7136 | {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, | ||
| 7114 | {} | 7137 | {} |
| 7115 | }; | 7138 | }; |
| 7116 | #define ALC225_STANDARD_PINS \ | 7139 | #define ALC225_STANDARD_PINS \ |
| @@ -7331,6 +7354,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 7331 | {0x14, 0x90170110}, | 7354 | {0x14, 0x90170110}, |
| 7332 | {0x1b, 0x90a70130}, | 7355 | {0x1b, 0x90a70130}, |
| 7333 | {0x21, 0x03211020}), | 7356 | {0x21, 0x03211020}), |
| 7357 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, | ||
| 7358 | {0x12, 0x90a60130}, | ||
| 7359 | {0x14, 0x90170110}, | ||
| 7360 | {0x21, 0x03211020}), | ||
| 7361 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, | ||
| 7362 | {0x12, 0x90a60130}, | ||
| 7363 | {0x14, 0x90170110}, | ||
| 7364 | {0x21, 0x04211020}), | ||
| 7365 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, | ||
| 7366 | {0x1a, 0x90a70130}, | ||
| 7367 | {0x1b, 0x90170110}, | ||
| 7368 | {0x21, 0x03211020}), | ||
| 7334 | SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, | 7369 | SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, |
| 7335 | {0x12, 0xb7a60130}, | 7370 | {0x12, 0xb7a60130}, |
| 7336 | {0x13, 0xb8a61140}, | 7371 | {0x13, 0xb8a61140}, |
diff --git a/tools/arch/alpha/include/uapi/asm/mman.h b/tools/arch/alpha/include/uapi/asm/mman.h index c317d3e6867a..ea6a255ae61f 100644 --- a/tools/arch/alpha/include/uapi/asm/mman.h +++ b/tools/arch/alpha/include/uapi/asm/mman.h | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #define MAP_NONBLOCK 0x40000 | 27 | #define MAP_NONBLOCK 0x40000 |
| 28 | #define MAP_NORESERVE 0x10000 | 28 | #define MAP_NORESERVE 0x10000 |
| 29 | #define MAP_POPULATE 0x20000 | 29 | #define MAP_POPULATE 0x20000 |
| 30 | #define MAP_PRIVATE 0x02 | ||
| 31 | #define MAP_SHARED 0x01 | ||
| 32 | #define MAP_STACK 0x80000 | 30 | #define MAP_STACK 0x80000 |
| 33 | #define PROT_EXEC 0x4 | 31 | #define PROT_EXEC 0x4 |
| 34 | #define PROT_GROWSDOWN 0x01000000 | 32 | #define PROT_GROWSDOWN 0x01000000 |
diff --git a/tools/arch/mips/include/uapi/asm/mman.h b/tools/arch/mips/include/uapi/asm/mman.h index de2206883abc..c8acaa138d46 100644 --- a/tools/arch/mips/include/uapi/asm/mman.h +++ b/tools/arch/mips/include/uapi/asm/mman.h | |||
| @@ -28,8 +28,6 @@ | |||
| 28 | #define MAP_NONBLOCK 0x20000 | 28 | #define MAP_NONBLOCK 0x20000 |
| 29 | #define MAP_NORESERVE 0x0400 | 29 | #define MAP_NORESERVE 0x0400 |
| 30 | #define MAP_POPULATE 0x10000 | 30 | #define MAP_POPULATE 0x10000 |
| 31 | #define MAP_PRIVATE 0x002 | ||
| 32 | #define MAP_SHARED 0x001 | ||
| 33 | #define MAP_STACK 0x40000 | 31 | #define MAP_STACK 0x40000 |
| 34 | #define PROT_EXEC 0x04 | 32 | #define PROT_EXEC 0x04 |
| 35 | #define PROT_GROWSDOWN 0x01000000 | 33 | #define PROT_GROWSDOWN 0x01000000 |
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h index 1bd78758bde9..f9fd1325f5bd 100644 --- a/tools/arch/parisc/include/uapi/asm/mman.h +++ b/tools/arch/parisc/include/uapi/asm/mman.h | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #define MAP_NONBLOCK 0x20000 | 27 | #define MAP_NONBLOCK 0x20000 |
| 28 | #define MAP_NORESERVE 0x4000 | 28 | #define MAP_NORESERVE 0x4000 |
| 29 | #define MAP_POPULATE 0x10000 | 29 | #define MAP_POPULATE 0x10000 |
| 30 | #define MAP_PRIVATE 0x02 | ||
| 31 | #define MAP_SHARED 0x01 | ||
| 32 | #define MAP_STACK 0x40000 | 30 | #define MAP_STACK 0x40000 |
| 33 | #define PROT_EXEC 0x4 | 31 | #define PROT_EXEC 0x4 |
| 34 | #define PROT_GROWSDOWN 0x01000000 | 32 | #define PROT_GROWSDOWN 0x01000000 |
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 8c876c166ef2..26ca425f4c2c 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h | |||
| @@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char { | |||
| 463 | #define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58) | 463 | #define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58) |
| 464 | #define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57) | 464 | #define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57) |
| 465 | #define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56) | 465 | #define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56) |
| 466 | #define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) | ||
| 466 | 467 | ||
| 467 | #define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63) | 468 | #define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63) |
| 468 | #define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62) | 469 | #define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62) |
| 469 | #define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61) | 470 | #define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61) |
| 471 | #define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) | ||
| 470 | 472 | ||
| 471 | /* Per-vcpu XICS interrupt controller state */ | 473 | /* Per-vcpu XICS interrupt controller state */ |
| 472 | #define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) | 474 | #define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 6d6122524711..981ff9479648 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
| @@ -344,6 +344,7 @@ | |||
| 344 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ | 344 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ |
| 345 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ | 345 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ |
| 346 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ | 346 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ |
| 347 | #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ | ||
| 347 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ | 348 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ |
| 348 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ | 349 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
| 349 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ | 350 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
diff --git a/tools/arch/xtensa/include/uapi/asm/mman.h b/tools/arch/xtensa/include/uapi/asm/mman.h index 34dde6f44dae..f2b08c990afc 100644 --- a/tools/arch/xtensa/include/uapi/asm/mman.h +++ b/tools/arch/xtensa/include/uapi/asm/mman.h | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #define MAP_NONBLOCK 0x20000 | 27 | #define MAP_NONBLOCK 0x20000 |
| 28 | #define MAP_NORESERVE 0x0400 | 28 | #define MAP_NORESERVE 0x0400 |
| 29 | #define MAP_POPULATE 0x10000 | 29 | #define MAP_POPULATE 0x10000 |
| 30 | #define MAP_PRIVATE 0x002 | ||
| 31 | #define MAP_SHARED 0x001 | ||
| 32 | #define MAP_STACK 0x40000 | 30 | #define MAP_STACK 0x40000 |
| 33 | #define PROT_EXEC 0x4 | 31 | #define PROT_EXEC 0x4 |
| 34 | #define PROT_GROWSDOWN 0x01000000 | 32 | #define PROT_GROWSDOWN 0x01000000 |
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c index d68eb4fb40cc..2b0e02c38870 100644 --- a/tools/build/feature/test-libopencsd.c +++ b/tools/build/feature/test-libopencsd.c | |||
| @@ -4,9 +4,9 @@ | |||
| 4 | /* | 4 | /* |
| 5 | * Check OpenCSD library version is sufficient to provide required features | 5 | * Check OpenCSD library version is sufficient to provide required features |
| 6 | */ | 6 | */ |
| 7 | #define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0)) | 7 | #define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0)) |
| 8 | #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) | 8 | #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) |
| 9 | #error "OpenCSD >= 0.10.0 is required" | 9 | #error "OpenCSD >= 0.11.0 is required" |
| 10 | #endif | 10 | #endif |
| 11 | 11 | ||
| 12 | int main(void) | 12 | int main(void) |
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h new file mode 100644 index 000000000000..af7d0d3a3182 --- /dev/null +++ b/tools/include/uapi/asm-generic/mman-common-tools.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H | ||
| 3 | #define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H | ||
| 4 | |||
| 5 | #include <asm-generic/mman-common.h> | ||
| 6 | |||
| 7 | /* We need this because we need to have tools/include/uapi/ included in the tools | ||
| 8 | * header search path to get access to stuff that is not yet in the system's | ||
| 9 | * copy of the files in that directory, but since this cset: | ||
| 10 | * | ||
| 11 | * 746c9398f5ac ("arch: move common mmap flags to linux/mman.h") | ||
| 12 | * | ||
| 13 | * We end up making sys/mman.h, that is in the system headers, to not find the | ||
| 14 | * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy | ||
| 15 | * of asm-generic/mman-common.h. So we define them here and include this header | ||
| 16 | * from each of the per arch mman.h headers. | ||
| 17 | */ | ||
| 18 | #ifndef MAP_SHARED | ||
| 19 | #define MAP_SHARED 0x01 /* Share changes */ | ||
| 20 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
| 21 | #define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ | ||
| 22 | #endif | ||
| 23 | #endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H | ||
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index e7ee32861d51..abd238d0f7a4 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h | |||
| @@ -15,9 +15,7 @@ | |||
| 15 | #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ | 15 | #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ |
| 16 | #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ | 16 | #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ |
| 17 | 17 | ||
| 18 | #define MAP_SHARED 0x01 /* Share changes */ | 18 | /* 0x01 - 0x03 are defined in linux/mman.h */ |
| 19 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
| 20 | #define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ | ||
| 21 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ | 19 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ |
| 22 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ | 20 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ |
| 23 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ | 21 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ |
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h index 653687d9771b..36c197fc44a0 100644 --- a/tools/include/uapi/asm-generic/mman.h +++ b/tools/include/uapi/asm-generic/mman.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #ifndef __ASM_GENERIC_MMAN_H | 2 | #ifndef __ASM_GENERIC_MMAN_H |
| 3 | #define __ASM_GENERIC_MMAN_H | 3 | #define __ASM_GENERIC_MMAN_H |
| 4 | 4 | ||
| 5 | #include <asm-generic/mman-common.h> | 5 | #include <asm-generic/mman-common-tools.h> |
| 6 | 6 | ||
| 7 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | 7 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ |
| 8 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | 8 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ |
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index 12cdf611d217..dee7292e1df6 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h | |||
| @@ -824,8 +824,17 @@ __SYSCALL(__NR_futex_time64, sys_futex) | |||
| 824 | __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) | 824 | __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) |
| 825 | #endif | 825 | #endif |
| 826 | 826 | ||
| 827 | #define __NR_pidfd_send_signal 424 | ||
| 828 | __SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal) | ||
| 829 | #define __NR_io_uring_setup 425 | ||
| 830 | __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup) | ||
| 831 | #define __NR_io_uring_enter 426 | ||
| 832 | __SYSCALL(__NR_io_uring_enter, sys_io_uring_enter) | ||
| 833 | #define __NR_io_uring_register 427 | ||
| 834 | __SYSCALL(__NR_io_uring_register, sys_io_uring_register) | ||
| 835 | |||
| 827 | #undef __NR_syscalls | 836 | #undef __NR_syscalls |
| 828 | #define __NR_syscalls 424 | 837 | #define __NR_syscalls 428 |
| 829 | 838 | ||
| 830 | /* | 839 | /* |
| 831 | * 32 bit systems traditionally used different | 840 | * 32 bit systems traditionally used different |
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 298b2e197744..397810fa2d33 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h | |||
| @@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param { | |||
| 1486 | #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ | 1486 | #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ |
| 1487 | #define I915_CONTEXT_DEFAULT_PRIORITY 0 | 1487 | #define I915_CONTEXT_DEFAULT_PRIORITY 0 |
| 1488 | #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ | 1488 | #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ |
| 1489 | /* | ||
| 1490 | * When using the following param, value should be a pointer to | ||
| 1491 | * drm_i915_gem_context_param_sseu. | ||
| 1492 | */ | ||
| 1493 | #define I915_CONTEXT_PARAM_SSEU 0x7 | ||
| 1489 | __u64 value; | 1494 | __u64 value; |
| 1490 | }; | 1495 | }; |
| 1491 | 1496 | ||
| 1497 | /** | ||
| 1498 | * Context SSEU programming | ||
| 1499 | * | ||
| 1500 | * It may be necessary for either functional or performance reason to configure | ||
| 1501 | * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ | ||
| 1502 | * Sub-slice/EU). | ||
| 1503 | * | ||
| 1504 | * This is done by configuring SSEU configuration using the below | ||
| 1505 | * @struct drm_i915_gem_context_param_sseu for every supported engine which | ||
| 1506 | * userspace intends to use. | ||
| 1507 | * | ||
| 1508 | * Not all GPUs or engines support this functionality in which case an error | ||
| 1509 | * code -ENODEV will be returned. | ||
| 1510 | * | ||
| 1511 | * Also, flexibility of possible SSEU configuration permutations varies between | ||
| 1512 | * GPU generations and software imposed limitations. Requesting such a | ||
| 1513 | * combination will return an error code of -EINVAL. | ||
| 1514 | * | ||
| 1515 | * NOTE: When perf/OA is active the context's SSEU configuration is ignored in | ||
| 1516 | * favour of a single global setting. | ||
| 1517 | */ | ||
| 1518 | struct drm_i915_gem_context_param_sseu { | ||
| 1519 | /* | ||
| 1520 | * Engine class & instance to be configured or queried. | ||
| 1521 | */ | ||
| 1522 | __u16 engine_class; | ||
| 1523 | __u16 engine_instance; | ||
| 1524 | |||
| 1525 | /* | ||
| 1526 | * Unused for now. Must be cleared to zero. | ||
| 1527 | */ | ||
| 1528 | __u32 flags; | ||
| 1529 | |||
| 1530 | /* | ||
| 1531 | * Mask of slices to enable for the context. Valid values are a subset | ||
| 1532 | * of the bitmask value returned for I915_PARAM_SLICE_MASK. | ||
| 1533 | */ | ||
| 1534 | __u64 slice_mask; | ||
| 1535 | |||
| 1536 | /* | ||
| 1537 | * Mask of subslices to enable for the context. Valid values are a | ||
| 1538 | * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. | ||
| 1539 | */ | ||
| 1540 | __u64 subslice_mask; | ||
| 1541 | |||
| 1542 | /* | ||
| 1543 | * Minimum/Maximum number of EUs to enable per subslice for the | ||
| 1544 | * context. min_eus_per_subslice must be inferior or equal to | ||
| 1545 | * max_eus_per_subslice. | ||
| 1546 | */ | ||
| 1547 | __u16 min_eus_per_subslice; | ||
| 1548 | __u16 max_eus_per_subslice; | ||
| 1549 | |||
| 1550 | /* | ||
| 1551 | * Unused for now. Must be cleared to zero. | ||
| 1552 | */ | ||
| 1553 | __u32 rsvd; | ||
| 1554 | }; | ||
| 1555 | |||
| 1492 | enum drm_i915_oa_format { | 1556 | enum drm_i915_oa_format { |
| 1493 | I915_OA_FORMAT_A13 = 1, /* HSW only */ | 1557 | I915_OA_FORMAT_A13 = 1, /* HSW only */ |
| 1494 | I915_OA_FORMAT_A29, /* HSW only */ | 1558 | I915_OA_FORMAT_A29, /* HSW only */ |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3c38ac9a92a7..929c8e537a14 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
| @@ -502,16 +502,6 @@ union bpf_attr { | |||
| 502 | * Return | 502 | * Return |
| 503 | * 0 on success, or a negative error in case of failure. | 503 | * 0 on success, or a negative error in case of failure. |
| 504 | * | 504 | * |
| 505 | * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) | ||
| 506 | * Description | ||
| 507 | * Push an element *value* in *map*. *flags* is one of: | ||
| 508 | * | ||
| 509 | * **BPF_EXIST** | ||
| 510 | * If the queue/stack is full, the oldest element is removed to | ||
| 511 | * make room for this. | ||
| 512 | * Return | ||
| 513 | * 0 on success, or a negative error in case of failure. | ||
| 514 | * | ||
| 515 | * int bpf_probe_read(void *dst, u32 size, const void *src) | 505 | * int bpf_probe_read(void *dst, u32 size, const void *src) |
| 516 | * Description | 506 | * Description |
| 517 | * For tracing programs, safely attempt to read *size* bytes from | 507 | * For tracing programs, safely attempt to read *size* bytes from |
| @@ -1435,14 +1425,14 @@ union bpf_attr { | |||
| 1435 | * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) | 1425 | * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) |
| 1436 | * Description | 1426 | * Description |
| 1437 | * Equivalent to bpf_get_socket_cookie() helper that accepts | 1427 | * Equivalent to bpf_get_socket_cookie() helper that accepts |
| 1438 | * *skb*, but gets socket from **struct bpf_sock_addr** contex. | 1428 | * *skb*, but gets socket from **struct bpf_sock_addr** context. |
| 1439 | * Return | 1429 | * Return |
| 1440 | * A 8-byte long non-decreasing number. | 1430 | * A 8-byte long non-decreasing number. |
| 1441 | * | 1431 | * |
| 1442 | * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) | 1432 | * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) |
| 1443 | * Description | 1433 | * Description |
| 1444 | * Equivalent to bpf_get_socket_cookie() helper that accepts | 1434 | * Equivalent to bpf_get_socket_cookie() helper that accepts |
| 1445 | * *skb*, but gets socket from **struct bpf_sock_ops** contex. | 1435 | * *skb*, but gets socket from **struct bpf_sock_ops** context. |
| 1446 | * Return | 1436 | * Return |
| 1447 | * A 8-byte long non-decreasing number. | 1437 | * A 8-byte long non-decreasing number. |
| 1448 | * | 1438 | * |
| @@ -2098,52 +2088,52 @@ union bpf_attr { | |||
| 2098 | * Return | 2088 | * Return |
| 2099 | * 0 on success, or a negative error in case of failure. | 2089 | * 0 on success, or a negative error in case of failure. |
| 2100 | * | 2090 | * |
| 2101 | * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) | 2091 | * int bpf_rc_repeat(void *ctx) |
| 2102 | * Description | 2092 | * Description |
| 2103 | * This helper is used in programs implementing IR decoding, to | 2093 | * This helper is used in programs implementing IR decoding, to |
| 2104 | * report a successfully decoded key press with *scancode*, | 2094 | * report a successfully decoded repeat key message. This delays |
| 2105 | * *toggle* value in the given *protocol*. The scancode will be | 2095 | * the generation of a key up event for previously generated |
| 2106 | * translated to a keycode using the rc keymap, and reported as | 2096 | * key down event. |
| 2107 | * an input key down event. After a period a key up event is | ||
| 2108 | * generated. This period can be extended by calling either | ||
| 2109 | * **bpf_rc_keydown**\ () again with the same values, or calling | ||
| 2110 | * **bpf_rc_repeat**\ (). | ||
| 2111 | * | 2097 | * |
| 2112 | * Some protocols include a toggle bit, in case the button was | 2098 | * Some IR protocols like NEC have a special IR message for |
| 2113 | * released and pressed again between consecutive scancodes. | 2099 | * repeating last button, for when a button is held down. |
| 2114 | * | 2100 | * |
| 2115 | * The *ctx* should point to the lirc sample as passed into | 2101 | * The *ctx* should point to the lirc sample as passed into |
| 2116 | * the program. | 2102 | * the program. |
| 2117 | * | 2103 | * |
| 2118 | * The *protocol* is the decoded protocol number (see | ||
| 2119 | * **enum rc_proto** for some predefined values). | ||
| 2120 | * | ||
| 2121 | * This helper is only available is the kernel was compiled with | 2104 | * This helper is only available is the kernel was compiled with |
| 2122 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to | 2105 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2123 | * "**y**". | 2106 | * "**y**". |
| 2124 | * Return | 2107 | * Return |
| 2125 | * 0 | 2108 | * 0 |
| 2126 | * | 2109 | * |
| 2127 | * int bpf_rc_repeat(void *ctx) | 2110 | * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) |
| 2128 | * Description | 2111 | * Description |
| 2129 | * This helper is used in programs implementing IR decoding, to | 2112 | * This helper is used in programs implementing IR decoding, to |
| 2130 | * report a successfully decoded repeat key message. This delays | 2113 | * report a successfully decoded key press with *scancode*, |
| 2131 | * the generation of a key up event for previously generated | 2114 | * *toggle* value in the given *protocol*. The scancode will be |
| 2132 | * key down event. | 2115 | * translated to a keycode using the rc keymap, and reported as |
| 2116 | * an input key down event. After a period a key up event is | ||
| 2117 | * generated. This period can be extended by calling either | ||
| 2118 | * **bpf_rc_keydown**\ () again with the same values, or calling | ||
| 2119 | * **bpf_rc_repeat**\ (). | ||
| 2133 | * | 2120 | * |
| 2134 | * Some IR protocols like NEC have a special IR message for | 2121 | * Some protocols include a toggle bit, in case the button was |
| 2135 | * repeating last button, for when a button is held down. | 2122 | * released and pressed again between consecutive scancodes. |
| 2136 | * | 2123 | * |
| 2137 | * The *ctx* should point to the lirc sample as passed into | 2124 | * The *ctx* should point to the lirc sample as passed into |
| 2138 | * the program. | 2125 | * the program. |
| 2139 | * | 2126 | * |
| 2127 | * The *protocol* is the decoded protocol number (see | ||
| 2128 | * **enum rc_proto** for some predefined values). | ||
| 2129 | * | ||
| 2140 | * This helper is only available is the kernel was compiled with | 2130 | * This helper is only available is the kernel was compiled with |
| 2141 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to | 2131 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2142 | * "**y**". | 2132 | * "**y**". |
| 2143 | * Return | 2133 | * Return |
| 2144 | * 0 | 2134 | * 0 |
| 2145 | * | 2135 | * |
| 2146 | * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb) | 2136 | * u64 bpf_skb_cgroup_id(struct sk_buff *skb) |
| 2147 | * Description | 2137 | * Description |
| 2148 | * Return the cgroup v2 id of the socket associated with the *skb*. | 2138 | * Return the cgroup v2 id of the socket associated with the *skb*. |
| 2149 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () | 2139 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () |
| @@ -2159,30 +2149,12 @@ union bpf_attr { | |||
| 2159 | * Return | 2149 | * Return |
| 2160 | * The id is returned or 0 in case the id could not be retrieved. | 2150 | * The id is returned or 0 in case the id could not be retrieved. |
| 2161 | * | 2151 | * |
| 2162 | * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) | ||
| 2163 | * Description | ||
| 2164 | * Return id of cgroup v2 that is ancestor of cgroup associated | ||
| 2165 | * with the *skb* at the *ancestor_level*. The root cgroup is at | ||
| 2166 | * *ancestor_level* zero and each step down the hierarchy | ||
| 2167 | * increments the level. If *ancestor_level* == level of cgroup | ||
| 2168 | * associated with *skb*, then return value will be same as that | ||
| 2169 | * of **bpf_skb_cgroup_id**\ (). | ||
| 2170 | * | ||
| 2171 | * The helper is useful to implement policies based on cgroups | ||
| 2172 | * that are upper in hierarchy than immediate cgroup associated | ||
| 2173 | * with *skb*. | ||
| 2174 | * | ||
| 2175 | * The format of returned id and helper limitations are same as in | ||
| 2176 | * **bpf_skb_cgroup_id**\ (). | ||
| 2177 | * Return | ||
| 2178 | * The id is returned or 0 in case the id could not be retrieved. | ||
| 2179 | * | ||
| 2180 | * u64 bpf_get_current_cgroup_id(void) | 2152 | * u64 bpf_get_current_cgroup_id(void) |
| 2181 | * Return | 2153 | * Return |
| 2182 | * A 64-bit integer containing the current cgroup id based | 2154 | * A 64-bit integer containing the current cgroup id based |
| 2183 | * on the cgroup within which the current task is running. | 2155 | * on the cgroup within which the current task is running. |
| 2184 | * | 2156 | * |
| 2185 | * void* get_local_storage(void *map, u64 flags) | 2157 | * void *bpf_get_local_storage(void *map, u64 flags) |
| 2186 | * Description | 2158 | * Description |
| 2187 | * Get the pointer to the local storage area. | 2159 | * Get the pointer to the local storage area. |
| 2188 | * The type and the size of the local storage is defined | 2160 | * The type and the size of the local storage is defined |
| @@ -2209,6 +2181,24 @@ union bpf_attr { | |||
| 2209 | * Return | 2181 | * Return |
| 2210 | * 0 on success, or a negative error in case of failure. | 2182 | * 0 on success, or a negative error in case of failure. |
| 2211 | * | 2183 | * |
| 2184 | * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) | ||
| 2185 | * Description | ||
| 2186 | * Return id of cgroup v2 that is ancestor of cgroup associated | ||
| 2187 | * with the *skb* at the *ancestor_level*. The root cgroup is at | ||
| 2188 | * *ancestor_level* zero and each step down the hierarchy | ||
| 2189 | * increments the level. If *ancestor_level* == level of cgroup | ||
| 2190 | * associated with *skb*, then return value will be same as that | ||
| 2191 | * of **bpf_skb_cgroup_id**\ (). | ||
| 2192 | * | ||
| 2193 | * The helper is useful to implement policies based on cgroups | ||
| 2194 | * that are upper in hierarchy than immediate cgroup associated | ||
| 2195 | * with *skb*. | ||
| 2196 | * | ||
| 2197 | * The format of returned id and helper limitations are same as in | ||
| 2198 | * **bpf_skb_cgroup_id**\ (). | ||
| 2199 | * Return | ||
| 2200 | * The id is returned or 0 in case the id could not be retrieved. | ||
| 2201 | * | ||
| 2212 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) | 2202 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2213 | * Description | 2203 | * Description |
| 2214 | * Look for TCP socket matching *tuple*, optionally in a child | 2204 | * Look for TCP socket matching *tuple*, optionally in a child |
| @@ -2289,6 +2279,16 @@ union bpf_attr { | |||
| 2289 | * Return | 2279 | * Return |
| 2290 | * 0 on success, or a negative error in case of failure. | 2280 | * 0 on success, or a negative error in case of failure. |
| 2291 | * | 2281 | * |
| 2282 | * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) | ||
| 2283 | * Description | ||
| 2284 | * Push an element *value* in *map*. *flags* is one of: | ||
| 2285 | * | ||
| 2286 | * **BPF_EXIST** | ||
| 2287 | * If the queue/stack is full, the oldest element is | ||
| 2288 | * removed to make room for this. | ||
| 2289 | * Return | ||
| 2290 | * 0 on success, or a negative error in case of failure. | ||
| 2291 | * | ||
| 2292 | * int bpf_map_pop_elem(struct bpf_map *map, void *value) | 2292 | * int bpf_map_pop_elem(struct bpf_map *map, void *value) |
| 2293 | * Description | 2293 | * Description |
| 2294 | * Pop an element from *map*. | 2294 | * Pop an element from *map*. |
| @@ -2343,29 +2343,94 @@ union bpf_attr { | |||
| 2343 | * Return | 2343 | * Return |
| 2344 | * 0 | 2344 | * 0 |
| 2345 | * | 2345 | * |
| 2346 | * int bpf_spin_lock(struct bpf_spin_lock *lock) | ||
| 2347 | * Description | ||
| 2348 | * Acquire a spinlock represented by the pointer *lock*, which is | ||
| 2349 | * stored as part of a value of a map. Taking the lock allows to | ||
| 2350 | * safely update the rest of the fields in that value. The | ||
| 2351 | * spinlock can (and must) later be released with a call to | ||
| 2352 | * **bpf_spin_unlock**\ (\ *lock*\ ). | ||
| 2353 | * | ||
| 2354 | * Spinlocks in BPF programs come with a number of restrictions | ||
| 2355 | * and constraints: | ||
| 2356 | * | ||
| 2357 | * * **bpf_spin_lock** objects are only allowed inside maps of | ||
| 2358 | * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this | ||
| 2359 | * list could be extended in the future). | ||
| 2360 | * * BTF description of the map is mandatory. | ||
| 2361 | * * The BPF program can take ONE lock at a time, since taking two | ||
| 2362 | * or more could cause dead locks. | ||
| 2363 | * * Only one **struct bpf_spin_lock** is allowed per map element. | ||
| 2364 | * * When the lock is taken, calls (either BPF to BPF or helpers) | ||
| 2365 | * are not allowed. | ||
| 2366 | * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not | ||
| 2367 | * allowed inside a spinlock-ed region. | ||
| 2368 | * * The BPF program MUST call **bpf_spin_unlock**\ () to release | ||
| 2369 | * the lock, on all execution paths, before it returns. | ||
| 2370 | * * The BPF program can access **struct bpf_spin_lock** only via | ||
| 2371 | * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () | ||
| 2372 | * helpers. Loading or storing data into the **struct | ||
| 2373 | * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. | ||
| 2374 | * * To use the **bpf_spin_lock**\ () helper, the BTF description | ||
| 2375 | * of the map value must be a struct and have **struct | ||
| 2376 | * bpf_spin_lock** *anyname*\ **;** field at the top level. | ||
| 2377 | * Nested lock inside another struct is not allowed. | ||
| 2378 | * * The **struct bpf_spin_lock** *lock* field in a map value must | ||
| 2379 | * be aligned on a multiple of 4 bytes in that value. | ||
| 2380 | * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy | ||
| 2381 | * the **bpf_spin_lock** field to user space. | ||
| 2382 | * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from | ||
| 2383 | * a BPF program, do not update the **bpf_spin_lock** field. | ||
| 2384 | * * **bpf_spin_lock** cannot be on the stack or inside a | ||
| 2385 | * networking packet (it can only be inside of a map values). | ||
| 2386 | * * **bpf_spin_lock** is available to root only. | ||
| 2387 | * * Tracing programs and socket filter programs cannot use | ||
| 2388 | * **bpf_spin_lock**\ () due to insufficient preemption checks | ||
| 2389 | * (but this may change in the future). | ||
| 2390 | * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. | ||
| 2391 | * Return | ||
| 2392 | * 0 | ||
| 2393 | * | ||
| 2394 | * int bpf_spin_unlock(struct bpf_spin_lock *lock) | ||
| 2395 | * Description | ||
| 2396 | * Release the *lock* previously locked by a call to | ||
| 2397 | * **bpf_spin_lock**\ (\ *lock*\ ). | ||
| 2398 | * Return | ||
| 2399 | * 0 | ||
| 2400 | * | ||
| 2346 | * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) | 2401 | * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) |
| 2347 | * Description | 2402 | * Description |
| 2348 | * This helper gets a **struct bpf_sock** pointer such | 2403 | * This helper gets a **struct bpf_sock** pointer such |
| 2349 | * that all the fields in bpf_sock can be accessed. | 2404 | * that all the fields in this **bpf_sock** can be accessed. |
| 2350 | * Return | 2405 | * Return |
| 2351 | * A **struct bpf_sock** pointer on success, or NULL in | 2406 | * A **struct bpf_sock** pointer on success, or **NULL** in |
| 2352 | * case of failure. | 2407 | * case of failure. |
| 2353 | * | 2408 | * |
| 2354 | * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) | 2409 | * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) |
| 2355 | * Description | 2410 | * Description |
| 2356 | * This helper gets a **struct bpf_tcp_sock** pointer from a | 2411 | * This helper gets a **struct bpf_tcp_sock** pointer from a |
| 2357 | * **struct bpf_sock** pointer. | 2412 | * **struct bpf_sock** pointer. |
| 2358 | * | ||
| 2359 | * Return | 2413 | * Return |
| 2360 | * A **struct bpf_tcp_sock** pointer on success, or NULL in | 2414 | * A **struct bpf_tcp_sock** pointer on success, or **NULL** in |
| 2361 | * case of failure. | 2415 | * case of failure. |
| 2362 | * | 2416 | * |
| 2363 | * int bpf_skb_ecn_set_ce(struct sk_buf *skb) | 2417 | * int bpf_skb_ecn_set_ce(struct sk_buf *skb) |
| 2364 | * Description | 2418 | * Description |
| 2365 | * Sets ECN of IP header to ce (congestion encountered) if | 2419 | * Set ECN (Explicit Congestion Notification) field of IP header |
| 2366 | * current value is ect (ECN capable). Works with IPv6 and IPv4. | 2420 | * to **CE** (Congestion Encountered) if current value is **ECT** |
| 2367 | * Return | 2421 | * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 |
| 2368 | * 1 if set, 0 if not set. | 2422 | * and IPv4. |
| 2423 | * Return | ||
| 2424 | * 1 if the **CE** flag is set (either by the current helper call | ||
| 2425 | * or because it was already present), 0 if it is not set. | ||
| 2426 | * | ||
| 2427 | * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) | ||
| 2428 | * Description | ||
| 2429 | * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. | ||
| 2430 | * **bpf_sk_release**\ () is unnecessary and not allowed. | ||
| 2431 | * Return | ||
| 2432 | * A **struct bpf_sock** pointer on success, or **NULL** in | ||
| 2433 | * case of failure. | ||
| 2369 | */ | 2434 | */ |
| 2370 | #define __BPF_FUNC_MAPPER(FN) \ | 2435 | #define __BPF_FUNC_MAPPER(FN) \ |
| 2371 | FN(unspec), \ | 2436 | FN(unspec), \ |
| @@ -2465,7 +2530,8 @@ union bpf_attr { | |||
| 2465 | FN(spin_unlock), \ | 2530 | FN(spin_unlock), \ |
| 2466 | FN(sk_fullsock), \ | 2531 | FN(sk_fullsock), \ |
| 2467 | FN(tcp_sock), \ | 2532 | FN(tcp_sock), \ |
| 2468 | FN(skb_ecn_set_ce), | 2533 | FN(skb_ecn_set_ce), \ |
| 2534 | FN(get_listener_sock), | ||
| 2469 | 2535 | ||
| 2470 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 2536 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
| 2471 | * function eBPF program intends to call | 2537 | * function eBPF program intends to call |
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h index 6448cdd9a350..a2f8658f1c55 100644 --- a/tools/include/uapi/linux/fcntl.h +++ b/tools/include/uapi/linux/fcntl.h | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ | 41 | #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ |
| 42 | #define F_SEAL_GROW 0x0004 /* prevent file from growing */ | 42 | #define F_SEAL_GROW 0x0004 /* prevent file from growing */ |
| 43 | #define F_SEAL_WRITE 0x0008 /* prevent writes */ | 43 | #define F_SEAL_WRITE 0x0008 /* prevent writes */ |
| 44 | #define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */ | ||
| 44 | /* (1U << 31) is reserved for signed error codes */ | 45 | /* (1U << 31) is reserved for signed error codes */ |
| 45 | 46 | ||
| 46 | /* | 47 | /* |
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h index d0f515d53299..fc1a64c3447b 100644 --- a/tools/include/uapi/linux/mman.h +++ b/tools/include/uapi/linux/mman.h | |||
| @@ -12,6 +12,10 @@ | |||
| 12 | #define OVERCOMMIT_ALWAYS 1 | 12 | #define OVERCOMMIT_ALWAYS 1 |
| 13 | #define OVERCOMMIT_NEVER 2 | 13 | #define OVERCOMMIT_NEVER 2 |
| 14 | 14 | ||
| 15 | #define MAP_SHARED 0x01 /* Share changes */ | ||
| 16 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
| 17 | #define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ | ||
| 18 | |||
| 15 | /* | 19 | /* |
| 16 | * Huge page size encoding when MAP_HUGETLB is specified, and a huge page | 20 | * Huge page size encoding when MAP_HUGETLB is specified, and a huge page |
| 17 | * size other than the default is desired. See hugetlb_encode.h. | 21 | * size other than the default is desired. See hugetlb_encode.h. |
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 61aaacf0cfa1..8e7c56e9590f 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | BPF_VERSION = 0 | 4 | BPF_VERSION = 0 |
| 5 | BPF_PATCHLEVEL = 0 | 5 | BPF_PATCHLEVEL = 0 |
| 6 | BPF_EXTRAVERSION = 1 | 6 | BPF_EXTRAVERSION = 2 |
| 7 | 7 | ||
| 8 | MAKEFLAGS += --no-print-directory | 8 | MAKEFLAGS += --no-print-directory |
| 9 | 9 | ||
| @@ -79,8 +79,6 @@ export prefix libdir src obj | |||
| 79 | libdir_SQ = $(subst ','\'',$(libdir)) | 79 | libdir_SQ = $(subst ','\'',$(libdir)) |
| 80 | libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) | 80 | libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) |
| 81 | 81 | ||
| 82 | LIB_FILE = libbpf.a libbpf.so | ||
| 83 | |||
| 84 | VERSION = $(BPF_VERSION) | 82 | VERSION = $(BPF_VERSION) |
| 85 | PATCHLEVEL = $(BPF_PATCHLEVEL) | 83 | PATCHLEVEL = $(BPF_PATCHLEVEL) |
| 86 | EXTRAVERSION = $(BPF_EXTRAVERSION) | 84 | EXTRAVERSION = $(BPF_EXTRAVERSION) |
| @@ -88,7 +86,10 @@ EXTRAVERSION = $(BPF_EXTRAVERSION) | |||
| 88 | OBJ = $@ | 86 | OBJ = $@ |
| 89 | N = | 87 | N = |
| 90 | 88 | ||
| 91 | LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION) | 89 | LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION) |
| 90 | |||
| 91 | LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION) | ||
| 92 | LIB_FILE = libbpf.a libbpf.so* | ||
| 92 | 93 | ||
| 93 | # Set compile option CFLAGS | 94 | # Set compile option CFLAGS |
| 94 | ifdef EXTRA_CFLAGS | 95 | ifdef EXTRA_CFLAGS |
| @@ -128,16 +129,18 @@ all: | |||
| 128 | export srctree OUTPUT CC LD CFLAGS V | 129 | export srctree OUTPUT CC LD CFLAGS V |
| 129 | include $(srctree)/tools/build/Makefile.include | 130 | include $(srctree)/tools/build/Makefile.include |
| 130 | 131 | ||
| 131 | BPF_IN := $(OUTPUT)libbpf-in.o | 132 | BPF_IN := $(OUTPUT)libbpf-in.o |
| 132 | LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) | 133 | VERSION_SCRIPT := libbpf.map |
| 133 | VERSION_SCRIPT := libbpf.map | 134 | |
| 135 | LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) | ||
| 136 | LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) | ||
| 134 | 137 | ||
| 135 | GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ | 138 | GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ |
| 136 | awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}') | 139 | awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}') |
| 137 | VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ | 140 | VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ |
| 138 | grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) | 141 | grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) |
| 139 | 142 | ||
| 140 | CMD_TARGETS = $(LIB_FILE) | 143 | CMD_TARGETS = $(LIB_TARGET) |
| 141 | 144 | ||
| 142 | CXX_TEST_TARGET = $(OUTPUT)test_libbpf | 145 | CXX_TEST_TARGET = $(OUTPUT)test_libbpf |
| 143 | 146 | ||
| @@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep | |||
| 170 | echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true | 173 | echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true |
| 171 | $(Q)$(MAKE) $(build)=libbpf | 174 | $(Q)$(MAKE) $(build)=libbpf |
| 172 | 175 | ||
| 173 | $(OUTPUT)libbpf.so: $(BPF_IN) | 176 | $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) |
| 174 | $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \ | 177 | |
| 175 | $^ -o $@ | 178 | $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) |
| 179 | $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ | ||
| 180 | -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@ | ||
| 181 | @ln -sf $(@F) $(OUTPUT)libbpf.so | ||
| 182 | @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) | ||
| 176 | 183 | ||
| 177 | $(OUTPUT)libbpf.a: $(BPF_IN) | 184 | $(OUTPUT)libbpf.a: $(BPF_IN) |
| 178 | $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ | 185 | $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ |
| @@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so | |||
| 192 | exit 1; \ | 199 | exit 1; \ |
| 193 | fi | 200 | fi |
| 194 | 201 | ||
| 202 | define do_install_mkdir | ||
| 203 | if [ ! -d '$(DESTDIR_SQ)$1' ]; then \ | ||
| 204 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \ | ||
| 205 | fi | ||
| 206 | endef | ||
| 207 | |||
| 195 | define do_install | 208 | define do_install |
| 196 | if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ | 209 | if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ |
| 197 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ | 210 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ |
| @@ -200,14 +213,16 @@ define do_install | |||
| 200 | endef | 213 | endef |
| 201 | 214 | ||
| 202 | install_lib: all_cmd | 215 | install_lib: all_cmd |
| 203 | $(call QUIET_INSTALL, $(LIB_FILE)) \ | 216 | $(call QUIET_INSTALL, $(LIB_TARGET)) \ |
| 204 | $(call do_install,$(LIB_FILE),$(libdir_SQ)) | 217 | $(call do_install_mkdir,$(libdir_SQ)); \ |
| 218 | cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ) | ||
| 205 | 219 | ||
| 206 | install_headers: | 220 | install_headers: |
| 207 | $(call QUIET_INSTALL, headers) \ | 221 | $(call QUIET_INSTALL, headers) \ |
| 208 | $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ | 222 | $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ |
| 209 | $(call do_install,libbpf.h,$(prefix)/include/bpf,644); | 223 | $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \ |
| 210 | $(call do_install,btf.h,$(prefix)/include/bpf,644); | 224 | $(call do_install,btf.h,$(prefix)/include/bpf,644); \ |
| 225 | $(call do_install,xsk.h,$(prefix)/include/bpf,644); | ||
| 211 | 226 | ||
| 212 | install: install_lib | 227 | install: install_lib |
| 213 | 228 | ||
| @@ -219,7 +234,7 @@ config-clean: | |||
| 219 | 234 | ||
| 220 | clean: | 235 | clean: |
| 221 | $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \ | 236 | $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \ |
| 222 | *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS | 237 | *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS |
| 223 | $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf | 238 | $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf |
| 224 | 239 | ||
| 225 | 240 | ||
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst index 5788479384ca..cef7b77eab69 100644 --- a/tools/lib/bpf/README.rst +++ b/tools/lib/bpf/README.rst | |||
| @@ -111,6 +111,7 @@ starting from ``0.0.1``. | |||
| 111 | 111 | ||
| 112 | Every time ABI is being changed, e.g. because a new symbol is added or | 112 | Every time ABI is being changed, e.g. because a new symbol is added or |
| 113 | semantic of existing symbol is changed, ABI version should be bumped. | 113 | semantic of existing symbol is changed, ABI version should be bumped. |
| 114 | This bump in ABI version is at most once per kernel development cycle. | ||
| 114 | 115 | ||
| 115 | For example, if current state of ``libbpf.map`` is: | 116 | For example, if current state of ``libbpf.map`` is: |
| 116 | 117 | ||
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 1b8d8cdd3575..cf119c9b6f27 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c | |||
| @@ -1602,16 +1602,12 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) | |||
| 1602 | /* Calculate type signature hash of ENUM. */ | 1602 | /* Calculate type signature hash of ENUM. */ |
| 1603 | static __u32 btf_hash_enum(struct btf_type *t) | 1603 | static __u32 btf_hash_enum(struct btf_type *t) |
| 1604 | { | 1604 | { |
| 1605 | struct btf_enum *member = (struct btf_enum *)(t + 1); | 1605 | __u32 h; |
| 1606 | __u32 vlen = BTF_INFO_VLEN(t->info); | ||
| 1607 | __u32 h = btf_hash_common(t); | ||
| 1608 | int i; | ||
| 1609 | 1606 | ||
| 1610 | for (i = 0; i < vlen; i++) { | 1607 | /* don't hash vlen and enum members to support enum fwd resolving */ |
| 1611 | h = hash_combine(h, member->name_off); | 1608 | h = hash_combine(0, t->name_off); |
| 1612 | h = hash_combine(h, member->val); | 1609 | h = hash_combine(h, t->info & ~0xffff); |
| 1613 | member++; | 1610 | h = hash_combine(h, t->size); |
| 1614 | } | ||
| 1615 | return h; | 1611 | return h; |
| 1616 | } | 1612 | } |
| 1617 | 1613 | ||
| @@ -1637,6 +1633,22 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) | |||
| 1637 | return true; | 1633 | return true; |
| 1638 | } | 1634 | } |
| 1639 | 1635 | ||
| 1636 | static inline bool btf_is_enum_fwd(struct btf_type *t) | ||
| 1637 | { | ||
| 1638 | return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM && | ||
| 1639 | BTF_INFO_VLEN(t->info) == 0; | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) | ||
| 1643 | { | ||
| 1644 | if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) | ||
| 1645 | return btf_equal_enum(t1, t2); | ||
| 1646 | /* ignore vlen when comparing */ | ||
| 1647 | return t1->name_off == t2->name_off && | ||
| 1648 | (t1->info & ~0xffff) == (t2->info & ~0xffff) && | ||
| 1649 | t1->size == t2->size; | ||
| 1650 | } | ||
| 1651 | |||
| 1640 | /* | 1652 | /* |
| 1641 | * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, | 1653 | * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, |
| 1642 | * as referenced type IDs equivalence is established separately during type | 1654 | * as referenced type IDs equivalence is established separately during type |
| @@ -1860,6 +1872,17 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) | |||
| 1860 | new_id = cand_node->type_id; | 1872 | new_id = cand_node->type_id; |
| 1861 | break; | 1873 | break; |
| 1862 | } | 1874 | } |
| 1875 | if (d->opts.dont_resolve_fwds) | ||
| 1876 | continue; | ||
| 1877 | if (btf_compat_enum(t, cand)) { | ||
| 1878 | if (btf_is_enum_fwd(t)) { | ||
| 1879 | /* resolve fwd to full enum */ | ||
| 1880 | new_id = cand_node->type_id; | ||
| 1881 | break; | ||
| 1882 | } | ||
| 1883 | /* resolve canonical enum fwd to full enum */ | ||
| 1884 | d->map[cand_node->type_id] = type_id; | ||
| 1885 | } | ||
| 1863 | } | 1886 | } |
| 1864 | break; | 1887 | break; |
| 1865 | 1888 | ||
| @@ -2084,7 +2107,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, | |||
| 2084 | return fwd_kind == real_kind; | 2107 | return fwd_kind == real_kind; |
| 2085 | } | 2108 | } |
| 2086 | 2109 | ||
| 2087 | if (cand_type->info != canon_type->info) | 2110 | if (cand_kind != canon_kind) |
| 2088 | return 0; | 2111 | return 0; |
| 2089 | 2112 | ||
| 2090 | switch (cand_kind) { | 2113 | switch (cand_kind) { |
| @@ -2092,7 +2115,10 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, | |||
| 2092 | return btf_equal_int(cand_type, canon_type); | 2115 | return btf_equal_int(cand_type, canon_type); |
| 2093 | 2116 | ||
| 2094 | case BTF_KIND_ENUM: | 2117 | case BTF_KIND_ENUM: |
| 2095 | return btf_equal_enum(cand_type, canon_type); | 2118 | if (d->opts.dont_resolve_fwds) |
| 2119 | return btf_equal_enum(cand_type, canon_type); | ||
| 2120 | else | ||
| 2121 | return btf_compat_enum(cand_type, canon_type); | ||
| 2096 | 2122 | ||
| 2097 | case BTF_KIND_FWD: | 2123 | case BTF_KIND_FWD: |
| 2098 | return btf_equal_common(cand_type, canon_type); | 2124 | return btf_equal_common(cand_type, canon_type); |
| @@ -2103,6 +2129,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, | |||
| 2103 | case BTF_KIND_PTR: | 2129 | case BTF_KIND_PTR: |
| 2104 | case BTF_KIND_TYPEDEF: | 2130 | case BTF_KIND_TYPEDEF: |
| 2105 | case BTF_KIND_FUNC: | 2131 | case BTF_KIND_FUNC: |
| 2132 | if (cand_type->info != canon_type->info) | ||
| 2133 | return 0; | ||
| 2106 | return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); | 2134 | return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); |
| 2107 | 2135 | ||
| 2108 | case BTF_KIND_ARRAY: { | 2136 | case BTF_KIND_ARRAY: { |
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e6ad87512519..11c25d9ea431 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c | |||
| @@ -840,12 +840,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) | |||
| 840 | obj->efile.maps_shndx = idx; | 840 | obj->efile.maps_shndx = idx; |
| 841 | else if (strcmp(name, BTF_ELF_SEC) == 0) { | 841 | else if (strcmp(name, BTF_ELF_SEC) == 0) { |
| 842 | obj->btf = btf__new(data->d_buf, data->d_size); | 842 | obj->btf = btf__new(data->d_buf, data->d_size); |
| 843 | if (IS_ERR(obj->btf) || btf__load(obj->btf)) { | 843 | if (IS_ERR(obj->btf)) { |
| 844 | pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", | 844 | pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", |
| 845 | BTF_ELF_SEC, PTR_ERR(obj->btf)); | 845 | BTF_ELF_SEC, PTR_ERR(obj->btf)); |
| 846 | if (!IS_ERR(obj->btf)) | ||
| 847 | btf__free(obj->btf); | ||
| 848 | obj->btf = NULL; | 846 | obj->btf = NULL; |
| 847 | continue; | ||
| 848 | } | ||
| 849 | err = btf__load(obj->btf); | ||
| 850 | if (err) { | ||
| 851 | pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n", | ||
| 852 | BTF_ELF_SEC, err); | ||
| 853 | btf__free(obj->btf); | ||
| 854 | obj->btf = NULL; | ||
| 855 | err = 0; | ||
| 849 | } | 856 | } |
| 850 | } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { | 857 | } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { |
| 851 | btf_ext_data = data; | 858 | btf_ext_data = data; |
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index f98ac82c9aea..8d0078b65486 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c | |||
| @@ -126,8 +126,8 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg, | |||
| 126 | cfg->frame_headroom = usr_cfg->frame_headroom; | 126 | cfg->frame_headroom = usr_cfg->frame_headroom; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, | 129 | static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, |
| 130 | const struct xsk_socket_config *usr_cfg) | 130 | const struct xsk_socket_config *usr_cfg) |
| 131 | { | 131 | { |
| 132 | if (!usr_cfg) { | 132 | if (!usr_cfg) { |
| 133 | cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; | 133 | cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; |
| @@ -135,14 +135,19 @@ static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, | |||
| 135 | cfg->libbpf_flags = 0; | 135 | cfg->libbpf_flags = 0; |
| 136 | cfg->xdp_flags = 0; | 136 | cfg->xdp_flags = 0; |
| 137 | cfg->bind_flags = 0; | 137 | cfg->bind_flags = 0; |
| 138 | return; | 138 | return 0; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD) | ||
| 142 | return -EINVAL; | ||
| 143 | |||
| 141 | cfg->rx_size = usr_cfg->rx_size; | 144 | cfg->rx_size = usr_cfg->rx_size; |
| 142 | cfg->tx_size = usr_cfg->tx_size; | 145 | cfg->tx_size = usr_cfg->tx_size; |
| 143 | cfg->libbpf_flags = usr_cfg->libbpf_flags; | 146 | cfg->libbpf_flags = usr_cfg->libbpf_flags; |
| 144 | cfg->xdp_flags = usr_cfg->xdp_flags; | 147 | cfg->xdp_flags = usr_cfg->xdp_flags; |
| 145 | cfg->bind_flags = usr_cfg->bind_flags; | 148 | cfg->bind_flags = usr_cfg->bind_flags; |
| 149 | |||
| 150 | return 0; | ||
| 146 | } | 151 | } |
| 147 | 152 | ||
| 148 | int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, | 153 | int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, |
| @@ -557,7 +562,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, | |||
| 557 | } | 562 | } |
| 558 | strncpy(xsk->ifname, ifname, IFNAMSIZ); | 563 | strncpy(xsk->ifname, ifname, IFNAMSIZ); |
| 559 | 564 | ||
| 560 | xsk_set_xdp_socket_config(&xsk->config, usr_config); | 565 | err = xsk_set_xdp_socket_config(&xsk->config, usr_config); |
| 566 | if (err) | ||
| 567 | goto out_socket; | ||
| 561 | 568 | ||
| 562 | if (rx) { | 569 | if (rx) { |
| 563 | err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, | 570 | err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, |
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index c9d038f91af6..53f8be0f4a1f 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile | |||
| @@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a | |||
| 25 | OBJTOOL := $(OUTPUT)objtool | 25 | OBJTOOL := $(OUTPUT)objtool |
| 26 | OBJTOOL_IN := $(OBJTOOL)-in.o | 26 | OBJTOOL_IN := $(OBJTOOL)-in.o |
| 27 | 27 | ||
| 28 | LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null) | ||
| 29 | LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) | ||
| 30 | |||
| 28 | all: $(OBJTOOL) | 31 | all: $(OBJTOOL) |
| 29 | 32 | ||
| 30 | INCLUDES := -I$(srctree)/tools/include \ | 33 | INCLUDES := -I$(srctree)/tools/include \ |
| 31 | -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ | 34 | -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ |
| 32 | -I$(srctree)/tools/objtool/arch/$(ARCH)/include | 35 | -I$(srctree)/tools/objtool/arch/$(ARCH)/include |
| 33 | WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed | 36 | WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed |
| 34 | CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) | 37 | CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) |
| 35 | LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) | 38 | LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) |
| 36 | 39 | ||
| 37 | # Allow old libelf to be used: | 40 | # Allow old libelf to be used: |
| 38 | elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) | 41 | elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) |
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 01f7555fd933..e8c9f77e9010 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf | |||
| @@ -481,8 +481,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t | |||
| 481 | mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c | 481 | mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c |
| 482 | mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh | 482 | mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh |
| 483 | 483 | ||
| 484 | $(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl) | 484 | $(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl) |
| 485 | $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ | 485 | $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ |
| 486 | 486 | ||
| 487 | mount_flags_array := $(beauty_outdir)/mount_flags_array.c | 487 | mount_flags_array := $(beauty_outdir)/mount_flags_array.c |
| 488 | mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh | 488 | mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh |
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 2ae92fddb6d5..92ee0b4378d4 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | |||
| @@ -345,6 +345,10 @@ | |||
| 345 | 334 common rseq __x64_sys_rseq | 345 | 334 common rseq __x64_sys_rseq |
| 346 | # don't use numbers 387 through 423, add new calls after the last | 346 | # don't use numbers 387 through 423, add new calls after the last |
| 347 | # 'common' entry | 347 | # 'common' entry |
| 348 | 424 common pidfd_send_signal __x64_sys_pidfd_send_signal | ||
| 349 | 425 common io_uring_setup __x64_sys_io_uring_setup | ||
| 350 | 426 common io_uring_enter __x64_sys_io_uring_enter | ||
| 351 | 427 common io_uring_register __x64_sys_io_uring_register | ||
| 348 | 352 | ||
| 349 | # | 353 | # |
| 350 | # x32-specific system call numbers start at 512 to avoid cache impact | 354 | # x32-specific system call numbers start at 512 to avoid cache impact |
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 7b55613924de..c68ee06cae63 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh | |||
| @@ -103,7 +103,7 @@ done | |||
| 103 | # diff with extra ignore lines | 103 | # diff with extra ignore lines |
| 104 | check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' | 104 | check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' |
| 105 | check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' | 105 | check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' |
| 106 | check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"' | 106 | check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"' |
| 107 | check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' | 107 | check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' |
| 108 | 108 | ||
| 109 | # diff non-symmetric files | 109 | # diff non-symmetric files |
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index e38518cdcbc3..74ef92f1d19a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py | |||
| @@ -107,6 +107,7 @@ import os | |||
| 107 | from PySide.QtCore import * | 107 | from PySide.QtCore import * |
| 108 | from PySide.QtGui import * | 108 | from PySide.QtGui import * |
| 109 | from PySide.QtSql import * | 109 | from PySide.QtSql import * |
| 110 | pyside_version_1 = True | ||
| 110 | from decimal import * | 111 | from decimal import * |
| 111 | from ctypes import * | 112 | from ctypes import * |
| 112 | from multiprocessing import Process, Array, Value, Event | 113 | from multiprocessing import Process, Array, Value, Event |
| @@ -1526,6 +1527,19 @@ def BranchDataPrep(query): | |||
| 1526 | " (" + dsoname(query.value(15)) + ")") | 1527 | " (" + dsoname(query.value(15)) + ")") |
| 1527 | return data | 1528 | return data |
| 1528 | 1529 | ||
| 1530 | def BranchDataPrepWA(query): | ||
| 1531 | data = [] | ||
| 1532 | data.append(query.value(0)) | ||
| 1533 | # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string | ||
| 1534 | data.append("{:>19}".format(query.value(1))) | ||
| 1535 | for i in xrange(2, 8): | ||
| 1536 | data.append(query.value(i)) | ||
| 1537 | data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + | ||
| 1538 | " (" + dsoname(query.value(11)) + ")" + " -> " + | ||
| 1539 | tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + | ||
| 1540 | " (" + dsoname(query.value(15)) + ")") | ||
| 1541 | return data | ||
| 1542 | |||
| 1529 | # Branch data model | 1543 | # Branch data model |
| 1530 | 1544 | ||
| 1531 | class BranchModel(TreeModel): | 1545 | class BranchModel(TreeModel): |
| @@ -1553,7 +1567,11 @@ class BranchModel(TreeModel): | |||
| 1553 | " AND evsel_id = " + str(self.event_id) + | 1567 | " AND evsel_id = " + str(self.event_id) + |
| 1554 | " ORDER BY samples.id" | 1568 | " ORDER BY samples.id" |
| 1555 | " LIMIT " + str(glb_chunk_sz)) | 1569 | " LIMIT " + str(glb_chunk_sz)) |
| 1556 | self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) | 1570 | if pyside_version_1 and sys.version_info[0] == 3: |
| 1571 | prep = BranchDataPrepWA | ||
| 1572 | else: | ||
| 1573 | prep = BranchDataPrep | ||
| 1574 | self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample) | ||
| 1557 | self.fetcher.done.connect(self.Update) | 1575 | self.fetcher.done.connect(self.Update) |
| 1558 | self.fetcher.Fetch(glb_chunk_sz) | 1576 | self.fetcher.Fetch(glb_chunk_sz) |
| 1559 | 1577 | ||
| @@ -2079,14 +2097,6 @@ def IsSelectable(db, table, sql = ""): | |||
| 2079 | return False | 2097 | return False |
| 2080 | return True | 2098 | return True |
| 2081 | 2099 | ||
| 2082 | # SQL data preparation | ||
| 2083 | |||
| 2084 | def SQLTableDataPrep(query, count): | ||
| 2085 | data = [] | ||
| 2086 | for i in xrange(count): | ||
| 2087 | data.append(query.value(i)) | ||
| 2088 | return data | ||
| 2089 | |||
| 2090 | # SQL table data model item | 2100 | # SQL table data model item |
| 2091 | 2101 | ||
| 2092 | class SQLTableItem(): | 2102 | class SQLTableItem(): |
| @@ -2110,7 +2120,7 @@ class SQLTableModel(TableModel): | |||
| 2110 | self.more = True | 2120 | self.more = True |
| 2111 | self.populated = 0 | 2121 | self.populated = 0 |
| 2112 | self.column_headers = column_headers | 2122 | self.column_headers = column_headers |
| 2113 | self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample) | 2123 | self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample) |
| 2114 | self.fetcher.done.connect(self.Update) | 2124 | self.fetcher.done.connect(self.Update) |
| 2115 | self.fetcher.Fetch(glb_chunk_sz) | 2125 | self.fetcher.Fetch(glb_chunk_sz) |
| 2116 | 2126 | ||
| @@ -2154,6 +2164,12 @@ class SQLTableModel(TableModel): | |||
| 2154 | def columnHeader(self, column): | 2164 | def columnHeader(self, column): |
| 2155 | return self.column_headers[column] | 2165 | return self.column_headers[column] |
| 2156 | 2166 | ||
| 2167 | def SQLTableDataPrep(self, query, count): | ||
| 2168 | data = [] | ||
| 2169 | for i in xrange(count): | ||
| 2170 | data.append(query.value(i)) | ||
| 2171 | return data | ||
| 2172 | |||
| 2157 | # SQL automatic table data model | 2173 | # SQL automatic table data model |
| 2158 | 2174 | ||
| 2159 | class SQLAutoTableModel(SQLTableModel): | 2175 | class SQLAutoTableModel(SQLTableModel): |
| @@ -2182,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel): | |||
| 2182 | QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") | 2198 | QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") |
| 2183 | while query.next(): | 2199 | while query.next(): |
| 2184 | column_headers.append(query.value(0)) | 2200 | column_headers.append(query.value(0)) |
| 2201 | if pyside_version_1 and sys.version_info[0] == 3: | ||
| 2202 | if table_name == "samples_view": | ||
| 2203 | self.SQLTableDataPrep = self.samples_view_DataPrep | ||
| 2204 | if table_name == "samples": | ||
| 2205 | self.SQLTableDataPrep = self.samples_DataPrep | ||
| 2185 | super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) | 2206 | super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) |
| 2186 | 2207 | ||
| 2208 | def samples_view_DataPrep(self, query, count): | ||
| 2209 | data = [] | ||
| 2210 | data.append(query.value(0)) | ||
| 2211 | # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string | ||
| 2212 | data.append("{:>19}".format(query.value(1))) | ||
| 2213 | for i in xrange(2, count): | ||
| 2214 | data.append(query.value(i)) | ||
| 2215 | return data | ||
| 2216 | |||
| 2217 | def samples_DataPrep(self, query, count): | ||
| 2218 | data = [] | ||
| 2219 | for i in xrange(9): | ||
| 2220 | data.append(query.value(i)) | ||
| 2221 | # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string | ||
| 2222 | data.append("{:>19}".format(query.value(9))) | ||
| 2223 | for i in xrange(10, count): | ||
| 2224 | data.append(query.value(i)) | ||
| 2225 | return data | ||
| 2226 | |||
| 2187 | # Base class for custom ResizeColumnsToContents | 2227 | # Base class for custom ResizeColumnsToContents |
| 2188 | 2228 | ||
| 2189 | class ResizeColumnsToContentsBase(QObject): | 2229 | class ResizeColumnsToContentsBase(QObject): |
| @@ -2868,9 +2908,13 @@ class LibXED(): | |||
| 2868 | ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) | 2908 | ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) |
| 2869 | if not ok: | 2909 | if not ok: |
| 2870 | return 0, "" | 2910 | return 0, "" |
| 2911 | if sys.version_info[0] == 2: | ||
| 2912 | result = inst.buffer.value | ||
| 2913 | else: | ||
| 2914 | result = inst.buffer.value.decode() | ||
| 2871 | # Return instruction length and the disassembled instruction text | 2915 | # Return instruction length and the disassembled instruction text |
| 2872 | # For now, assume the length is in byte 166 | 2916 | # For now, assume the length is in byte 166 |
| 2873 | return inst.xedd[166], inst.buffer.value | 2917 | return inst.xedd[166], result |
| 2874 | 2918 | ||
| 2875 | def TryOpen(file_name): | 2919 | def TryOpen(file_name): |
| 2876 | try: | 2920 | try: |
| @@ -2886,9 +2930,14 @@ def Is64Bit(f): | |||
| 2886 | header = f.read(7) | 2930 | header = f.read(7) |
| 2887 | f.seek(pos) | 2931 | f.seek(pos) |
| 2888 | magic = header[0:4] | 2932 | magic = header[0:4] |
| 2889 | eclass = ord(header[4]) | 2933 | if sys.version_info[0] == 2: |
| 2890 | encoding = ord(header[5]) | 2934 | eclass = ord(header[4]) |
| 2891 | version = ord(header[6]) | 2935 | encoding = ord(header[5]) |
| 2936 | version = ord(header[6]) | ||
| 2937 | else: | ||
| 2938 | eclass = header[4] | ||
| 2939 | encoding = header[5] | ||
| 2940 | version = header[6] | ||
| 2892 | if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: | 2941 | if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: |
| 2893 | result = True if eclass == 2 else False | 2942 | result = True if eclass == 2 else False |
| 2894 | return result | 2943 | return result |
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh index 32bac9c0d694..5f5eefcb3c74 100755 --- a/tools/perf/trace/beauty/mmap_flags.sh +++ b/tools/perf/trace/beauty/mmap_flags.sh | |||
| @@ -1,15 +1,18 @@ | |||
| 1 | #!/bin/sh | 1 | #!/bin/sh |
| 2 | # SPDX-License-Identifier: LGPL-2.1 | 2 | # SPDX-License-Identifier: LGPL-2.1 |
| 3 | 3 | ||
| 4 | if [ $# -ne 2 ] ; then | 4 | if [ $# -ne 3 ] ; then |
| 5 | [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/` | 5 | [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/` |
| 6 | linux_header_dir=tools/include/uapi/linux | ||
| 6 | header_dir=tools/include/uapi/asm-generic | 7 | header_dir=tools/include/uapi/asm-generic |
| 7 | arch_header_dir=tools/arch/${hostarch}/include/uapi/asm | 8 | arch_header_dir=tools/arch/${hostarch}/include/uapi/asm |
| 8 | else | 9 | else |
| 9 | header_dir=$1 | 10 | linux_header_dir=$1 |
| 10 | arch_header_dir=$2 | 11 | header_dir=$2 |
| 12 | arch_header_dir=$3 | ||
| 11 | fi | 13 | fi |
| 12 | 14 | ||
| 15 | linux_mman=${linux_header_dir}/mman.h | ||
| 13 | arch_mman=${arch_header_dir}/mman.h | 16 | arch_mman=${arch_header_dir}/mman.h |
| 14 | 17 | ||
| 15 | # those in egrep -vw are flags, we want just the bits | 18 | # those in egrep -vw are flags, we want just the bits |
| @@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \ | |||
| 20 | (egrep $regex ${arch_mman} | \ | 23 | (egrep $regex ${arch_mman} | \ |
| 21 | sed -r "s/$regex/\2 \1/g" | \ | 24 | sed -r "s/$regex/\2 \1/g" | \ |
| 22 | xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") | 25 | xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") |
| 26 | egrep -q $regex ${linux_mman} && \ | ||
| 27 | (egrep $regex ${linux_mman} | \ | ||
| 28 | egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ | ||
| 29 | sed -r "s/$regex/\2 \1/g" | \ | ||
| 30 | xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") | ||
| 23 | ([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) && | 31 | ([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) && |
| 24 | (egrep $regex ${header_dir}/mman-common.h | \ | 32 | (egrep $regex ${header_dir}/mman-common.h | \ |
| 25 | egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ | 33 | egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ |
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index ba4c623cd8de..39fe21e1cf93 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c | |||
| @@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder, | |||
| 387 | break; | 387 | break; |
| 388 | case OCSD_INSTR_ISB: | 388 | case OCSD_INSTR_ISB: |
| 389 | case OCSD_INSTR_DSB_DMB: | 389 | case OCSD_INSTR_DSB_DMB: |
| 390 | case OCSD_INSTR_WFI_WFE: | ||
| 390 | case OCSD_INSTR_OTHER: | 391 | case OCSD_INSTR_OTHER: |
| 391 | default: | 392 | default: |
| 392 | packet->last_instr_taken_branch = false; | 393 | packet->last_instr_taken_branch = false; |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index ec78e93085de..6689378ee577 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
| @@ -231,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist) | |||
| 231 | } | 231 | } |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr) | ||
| 235 | { | ||
| 236 | struct perf_event_attr attr = { | ||
| 237 | .type = PERF_TYPE_HARDWARE, | ||
| 238 | .config = PERF_COUNT_HW_CPU_CYCLES, | ||
| 239 | .exclude_kernel = 1, | ||
| 240 | .precise_ip = 3, | ||
| 241 | }; | ||
| 242 | |||
| 243 | event_attr_init(&attr); | ||
| 244 | |||
| 245 | /* | ||
| 246 | * Unnamed union member, not supported as struct member named | ||
| 247 | * initializer in older compilers such as gcc 4.4.7 | ||
| 248 | */ | ||
| 249 | attr.sample_period = 1; | ||
| 250 | |||
| 251 | while (attr.precise_ip != 0) { | ||
| 252 | int fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | ||
| 253 | if (fd != -1) { | ||
| 254 | close(fd); | ||
| 255 | break; | ||
| 256 | } | ||
| 257 | --attr.precise_ip; | ||
| 258 | } | ||
| 259 | |||
| 260 | pattr->precise_ip = attr.precise_ip; | ||
| 261 | } | ||
| 262 | |||
| 263 | int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) | 234 | int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) |
| 264 | { | 235 | { |
| 265 | struct perf_evsel *evsel = perf_evsel__new_cycles(precise); | 236 | struct perf_evsel *evsel = perf_evsel__new_cycles(precise); |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index dcb68f34d2cd..6a94785b9100 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
| @@ -315,8 +315,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist, | |||
| 315 | void perf_evlist__set_tracking_event(struct perf_evlist *evlist, | 315 | void perf_evlist__set_tracking_event(struct perf_evlist *evlist, |
| 316 | struct perf_evsel *tracking_evsel); | 316 | struct perf_evsel *tracking_evsel); |
| 317 | 317 | ||
| 318 | void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr); | ||
| 319 | |||
| 320 | struct perf_evsel * | 318 | struct perf_evsel * |
| 321 | perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); | 319 | perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); |
| 322 | 320 | ||
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 7835e05f0c0a..66d066f18b5b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
| @@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise) | |||
| 295 | if (!precise) | 295 | if (!precise) |
| 296 | goto new_event; | 296 | goto new_event; |
| 297 | 297 | ||
| 298 | perf_event_attr__set_max_precise_ip(&attr); | ||
| 299 | /* | 298 | /* |
| 300 | * Now let the usual logic to set up the perf_event_attr defaults | 299 | * Now let the usual logic to set up the perf_event_attr defaults |
| 301 | * to kick in when we return and before perf_evsel__open() is called. | 300 | * to kick in when we return and before perf_evsel__open() is called. |
| @@ -305,6 +304,8 @@ new_event: | |||
| 305 | if (evsel == NULL) | 304 | if (evsel == NULL) |
| 306 | goto out; | 305 | goto out; |
| 307 | 306 | ||
| 307 | evsel->precise_max = true; | ||
| 308 | |||
| 308 | /* use asprintf() because free(evsel) assumes name is allocated */ | 309 | /* use asprintf() because free(evsel) assumes name is allocated */ |
| 309 | if (asprintf(&evsel->name, "cycles%s%s%.*s", | 310 | if (asprintf(&evsel->name, "cycles%s%s%.*s", |
| 310 | (attr.precise_ip || attr.exclude_kernel) ? ":" : "", | 311 | (attr.precise_ip || attr.exclude_kernel) ? ":" : "", |
| @@ -1083,7 +1084,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, | |||
| 1083 | } | 1084 | } |
| 1084 | 1085 | ||
| 1085 | if (evsel->precise_max) | 1086 | if (evsel->precise_max) |
| 1086 | perf_event_attr__set_max_precise_ip(attr); | 1087 | attr->precise_ip = 3; |
| 1087 | 1088 | ||
| 1088 | if (opts->all_user) { | 1089 | if (opts->all_user) { |
| 1089 | attr->exclude_kernel = 1; | 1090 | attr->exclude_kernel = 1; |
| @@ -1749,6 +1750,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel, | |||
| 1749 | return true; | 1750 | return true; |
| 1750 | } | 1751 | } |
| 1751 | 1752 | ||
| 1753 | static void display_attr(struct perf_event_attr *attr) | ||
| 1754 | { | ||
| 1755 | if (verbose >= 2) { | ||
| 1756 | fprintf(stderr, "%.60s\n", graph_dotted_line); | ||
| 1757 | fprintf(stderr, "perf_event_attr:\n"); | ||
| 1758 | perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL); | ||
| 1759 | fprintf(stderr, "%.60s\n", graph_dotted_line); | ||
| 1760 | } | ||
| 1761 | } | ||
| 1762 | |||
| 1763 | static int perf_event_open(struct perf_evsel *evsel, | ||
| 1764 | pid_t pid, int cpu, int group_fd, | ||
| 1765 | unsigned long flags) | ||
| 1766 | { | ||
| 1767 | int precise_ip = evsel->attr.precise_ip; | ||
| 1768 | int fd; | ||
| 1769 | |||
| 1770 | while (1) { | ||
| 1771 | pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", | ||
| 1772 | pid, cpu, group_fd, flags); | ||
| 1773 | |||
| 1774 | fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags); | ||
| 1775 | if (fd >= 0) | ||
| 1776 | break; | ||
| 1777 | |||
| 1778 | /* | ||
| 1779 | * Do quick precise_ip fallback if: | ||
| 1780 | * - there is precise_ip set in perf_event_attr | ||
| 1781 | * - maximum precise is requested | ||
| 1782 | * - sys_perf_event_open failed with ENOTSUP error, | ||
| 1783 | * which is associated with wrong precise_ip | ||
| 1784 | */ | ||
| 1785 | if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP)) | ||
| 1786 | break; | ||
| 1787 | |||
| 1788 | /* | ||
| 1789 | * We tried all the precise_ip values, and it's | ||
| 1790 | * still failing, so leave it to standard fallback. | ||
| 1791 | */ | ||
| 1792 | if (!evsel->attr.precise_ip) { | ||
| 1793 | evsel->attr.precise_ip = precise_ip; | ||
| 1794 | break; | ||
| 1795 | } | ||
| 1796 | |||
| 1797 | pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP); | ||
| 1798 | evsel->attr.precise_ip--; | ||
| 1799 | pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip); | ||
| 1800 | display_attr(&evsel->attr); | ||
| 1801 | } | ||
| 1802 | |||
| 1803 | return fd; | ||
| 1804 | } | ||
| 1805 | |||
| 1752 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 1806 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
| 1753 | struct thread_map *threads) | 1807 | struct thread_map *threads) |
| 1754 | { | 1808 | { |
| @@ -1824,12 +1878,7 @@ retry_sample_id: | |||
| 1824 | if (perf_missing_features.sample_id_all) | 1878 | if (perf_missing_features.sample_id_all) |
| 1825 | evsel->attr.sample_id_all = 0; | 1879 | evsel->attr.sample_id_all = 0; |
| 1826 | 1880 | ||
| 1827 | if (verbose >= 2) { | 1881 | display_attr(&evsel->attr); |
| 1828 | fprintf(stderr, "%.60s\n", graph_dotted_line); | ||
| 1829 | fprintf(stderr, "perf_event_attr:\n"); | ||
| 1830 | perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL); | ||
| 1831 | fprintf(stderr, "%.60s\n", graph_dotted_line); | ||
| 1832 | } | ||
| 1833 | 1882 | ||
| 1834 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 1883 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
| 1835 | 1884 | ||
| @@ -1841,13 +1890,10 @@ retry_sample_id: | |||
| 1841 | 1890 | ||
| 1842 | group_fd = get_group_fd(evsel, cpu, thread); | 1891 | group_fd = get_group_fd(evsel, cpu, thread); |
| 1843 | retry_open: | 1892 | retry_open: |
| 1844 | pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", | ||
| 1845 | pid, cpus->map[cpu], group_fd, flags); | ||
| 1846 | |||
| 1847 | test_attr__ready(); | 1893 | test_attr__ready(); |
| 1848 | 1894 | ||
| 1849 | fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu], | 1895 | fd = perf_event_open(evsel, pid, cpus->map[cpu], |
| 1850 | group_fd, flags); | 1896 | group_fd, flags); |
| 1851 | 1897 | ||
| 1852 | FD(evsel, cpu, thread) = fd; | 1898 | FD(evsel, cpu, thread) = fd; |
| 1853 | 1899 | ||
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 6e03db142091..872fab163585 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c | |||
| @@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) | |||
| 251 | if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) | 251 | if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) |
| 252 | decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / | 252 | decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / |
| 253 | decoder->tsc_ctc_ratio_d; | 253 | decoder->tsc_ctc_ratio_d; |
| 254 | |||
| 255 | /* | ||
| 256 | * Allow for timestamps appearing to backwards because a TSC | ||
| 257 | * packet has slipped past a MTC packet, so allow 2 MTC ticks | ||
| 258 | * or ... | ||
| 259 | */ | ||
| 260 | decoder->tsc_slip = multdiv(2 << decoder->mtc_shift, | ||
| 261 | decoder->tsc_ctc_ratio_n, | ||
| 262 | decoder->tsc_ctc_ratio_d); | ||
| 263 | } | 254 | } |
| 264 | /* ... or 0x100 paranoia */ | 255 | |
| 265 | if (decoder->tsc_slip < 0x100) | 256 | /* |
| 266 | decoder->tsc_slip = 0x100; | 257 | * A TSC packet can slip past MTC packets so that the timestamp appears |
| 258 | * to go backwards. One estimate is that can be up to about 40 CPU | ||
| 259 | * cycles, which is certainly less than 0x1000 TSC ticks, but accept | ||
| 260 | * slippage an order of magnitude more to be on the safe side. | ||
| 261 | */ | ||
| 262 | decoder->tsc_slip = 0x10000; | ||
| 267 | 263 | ||
| 268 | intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); | 264 | intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); |
| 269 | intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); | 265 | intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); |
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 61959aba7e27..3c520baa198c 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
| @@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine, | |||
| 1421 | machine->vmlinux_map->end = ~0ULL; | 1421 | machine->vmlinux_map->end = ~0ULL; |
| 1422 | } | 1422 | } |
| 1423 | 1423 | ||
| 1424 | static void machine__update_kernel_mmap(struct machine *machine, | ||
| 1425 | u64 start, u64 end) | ||
| 1426 | { | ||
| 1427 | struct map *map = machine__kernel_map(machine); | ||
| 1428 | |||
| 1429 | map__get(map); | ||
| 1430 | map_groups__remove(&machine->kmaps, map); | ||
| 1431 | |||
| 1432 | machine__set_kernel_mmap(machine, start, end); | ||
| 1433 | |||
| 1434 | map_groups__insert(&machine->kmaps, map); | ||
| 1435 | map__put(map); | ||
| 1436 | } | ||
| 1437 | |||
| 1424 | int machine__create_kernel_maps(struct machine *machine) | 1438 | int machine__create_kernel_maps(struct machine *machine) |
| 1425 | { | 1439 | { |
| 1426 | struct dso *kernel = machine__get_kernel(machine); | 1440 | struct dso *kernel = machine__get_kernel(machine); |
| @@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine) | |||
| 1453 | goto out_put; | 1467 | goto out_put; |
| 1454 | } | 1468 | } |
| 1455 | 1469 | ||
| 1456 | /* we have a real start address now, so re-order the kmaps */ | 1470 | /* |
| 1457 | map = machine__kernel_map(machine); | 1471 | * we have a real start address now, so re-order the kmaps |
| 1458 | 1472 | * assume it's the last in the kmaps | |
| 1459 | map__get(map); | 1473 | */ |
| 1460 | map_groups__remove(&machine->kmaps, map); | 1474 | machine__update_kernel_mmap(machine, addr, ~0ULL); |
| 1461 | |||
| 1462 | /* assume it's the last in the kmaps */ | ||
| 1463 | machine__set_kernel_mmap(machine, addr, ~0ULL); | ||
| 1464 | |||
| 1465 | map_groups__insert(&machine->kmaps, map); | ||
| 1466 | map__put(map); | ||
| 1467 | } | 1475 | } |
| 1468 | 1476 | ||
| 1469 | if (machine__create_extra_kernel_maps(machine, kernel)) | 1477 | if (machine__create_extra_kernel_maps(machine, kernel)) |
| @@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine, | |||
| 1599 | if (strstr(kernel->long_name, "vmlinux")) | 1607 | if (strstr(kernel->long_name, "vmlinux")) |
| 1600 | dso__set_short_name(kernel, "[kernel.vmlinux]", false); | 1608 | dso__set_short_name(kernel, "[kernel.vmlinux]", false); |
| 1601 | 1609 | ||
| 1602 | machine__set_kernel_mmap(machine, event->mmap.start, | 1610 | machine__update_kernel_mmap(machine, event->mmap.start, |
| 1603 | event->mmap.start + event->mmap.len); | 1611 | event->mmap.start + event->mmap.len); |
| 1604 | 1612 | ||
| 1605 | /* | 1613 | /* |
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 6199a3174ab9..e0429f4ef335 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
| @@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) | |||
| 732 | 732 | ||
| 733 | if (!is_arm_pmu_core(name)) { | 733 | if (!is_arm_pmu_core(name)) { |
| 734 | pname = pe->pmu ? pe->pmu : "cpu"; | 734 | pname = pe->pmu ? pe->pmu : "cpu"; |
| 735 | |||
| 736 | /* | ||
| 737 | * uncore alias may be from different PMU | ||
| 738 | * with common prefix | ||
| 739 | */ | ||
| 740 | if (pmu_is_uncore(name) && | ||
| 741 | !strncmp(pname, name, strlen(pname))) | ||
| 742 | goto new_alias; | ||
| 743 | |||
| 735 | if (strcmp(pname, name)) | 744 | if (strcmp(pname, name)) |
| 736 | continue; | 745 | continue; |
| 737 | } | 746 | } |
| 738 | 747 | ||
| 748 | new_alias: | ||
| 739 | /* need type casts to override 'const' */ | 749 | /* need type casts to override 'const' */ |
| 740 | __perf_pmu__new_alias(head, NULL, (char *)pe->name, | 750 | __perf_pmu__new_alias(head, NULL, (char *)pe->name, |
| 741 | (char *)pe->desc, (char *)pe->event, | 751 | (char *)pe->desc, (char *)pe->event, |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index c3fad065c89c..c7727be9719f 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <cpuid.h> | 44 | #include <cpuid.h> |
| 45 | #include <linux/capability.h> | 45 | #include <linux/capability.h> |
| 46 | #include <errno.h> | 46 | #include <errno.h> |
| 47 | #include <math.h> | ||
| 47 | 48 | ||
| 48 | char *proc_stat = "/proc/stat"; | 49 | char *proc_stat = "/proc/stat"; |
| 49 | FILE *outf; | 50 | FILE *outf; |
| @@ -63,7 +64,6 @@ unsigned int dump_only; | |||
| 63 | unsigned int do_snb_cstates; | 64 | unsigned int do_snb_cstates; |
| 64 | unsigned int do_knl_cstates; | 65 | unsigned int do_knl_cstates; |
| 65 | unsigned int do_slm_cstates; | 66 | unsigned int do_slm_cstates; |
| 66 | unsigned int do_cnl_cstates; | ||
| 67 | unsigned int use_c1_residency_msr; | 67 | unsigned int use_c1_residency_msr; |
| 68 | unsigned int has_aperf; | 68 | unsigned int has_aperf; |
| 69 | unsigned int has_epb; | 69 | unsigned int has_epb; |
| @@ -141,9 +141,21 @@ unsigned int first_counter_read = 1; | |||
| 141 | 141 | ||
| 142 | #define RAPL_CORES_ENERGY_STATUS (1 << 9) | 142 | #define RAPL_CORES_ENERGY_STATUS (1 << 9) |
| 143 | /* 0x639 MSR_PP0_ENERGY_STATUS */ | 143 | /* 0x639 MSR_PP0_ENERGY_STATUS */ |
| 144 | #define RAPL_PER_CORE_ENERGY (1 << 10) | ||
| 145 | /* Indicates cores energy collection is per-core, | ||
| 146 | * not per-package. */ | ||
| 147 | #define RAPL_AMD_F17H (1 << 11) | ||
| 148 | /* 0xc0010299 MSR_RAPL_PWR_UNIT */ | ||
| 149 | /* 0xc001029a MSR_CORE_ENERGY_STAT */ | ||
| 150 | /* 0xc001029b MSR_PKG_ENERGY_STAT */ | ||
| 144 | #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) | 151 | #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) |
| 145 | #define TJMAX_DEFAULT 100 | 152 | #define TJMAX_DEFAULT 100 |
| 146 | 153 | ||
| 154 | /* MSRs that are not yet in the kernel-provided header. */ | ||
| 155 | #define MSR_RAPL_PWR_UNIT 0xc0010299 | ||
| 156 | #define MSR_CORE_ENERGY_STAT 0xc001029a | ||
| 157 | #define MSR_PKG_ENERGY_STAT 0xc001029b | ||
| 158 | |||
| 147 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) | 159 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) |
| 148 | 160 | ||
| 149 | /* | 161 | /* |
| @@ -187,6 +199,7 @@ struct core_data { | |||
| 187 | unsigned long long c7; | 199 | unsigned long long c7; |
| 188 | unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ | 200 | unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ |
| 189 | unsigned int core_temp_c; | 201 | unsigned int core_temp_c; |
| 202 | unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */ | ||
| 190 | unsigned int core_id; | 203 | unsigned int core_id; |
| 191 | unsigned long long counter[MAX_ADDED_COUNTERS]; | 204 | unsigned long long counter[MAX_ADDED_COUNTERS]; |
| 192 | } *core_even, *core_odd; | 205 | } *core_even, *core_odd; |
| @@ -273,6 +286,7 @@ struct system_summary { | |||
| 273 | 286 | ||
| 274 | struct cpu_topology { | 287 | struct cpu_topology { |
| 275 | int physical_package_id; | 288 | int physical_package_id; |
| 289 | int die_id; | ||
| 276 | int logical_cpu_id; | 290 | int logical_cpu_id; |
| 277 | int physical_node_id; | 291 | int physical_node_id; |
| 278 | int logical_node_id; /* 0-based count within the package */ | 292 | int logical_node_id; /* 0-based count within the package */ |
| @@ -283,6 +297,7 @@ struct cpu_topology { | |||
| 283 | 297 | ||
| 284 | struct topo_params { | 298 | struct topo_params { |
| 285 | int num_packages; | 299 | int num_packages; |
| 300 | int num_die; | ||
| 286 | int num_cpus; | 301 | int num_cpus; |
| 287 | int num_cores; | 302 | int num_cores; |
| 288 | int max_cpu_num; | 303 | int max_cpu_num; |
| @@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg | |||
| 314 | int retval, pkg_no, core_no, thread_no, node_no; | 329 | int retval, pkg_no, core_no, thread_no, node_no; |
| 315 | 330 | ||
| 316 | for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { | 331 | for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { |
| 317 | for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { | 332 | for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) { |
| 318 | for (node_no = 0; node_no < topo.nodes_per_pkg; | 333 | for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { |
| 319 | node_no++) { | ||
| 320 | for (thread_no = 0; thread_no < | 334 | for (thread_no = 0; thread_no < |
| 321 | topo.threads_per_core; ++thread_no) { | 335 | topo.threads_per_core; ++thread_no) { |
| 322 | struct thread_data *t; | 336 | struct thread_data *t; |
| @@ -442,6 +456,7 @@ struct msr_counter bic[] = { | |||
| 442 | { 0x0, "CPU" }, | 456 | { 0x0, "CPU" }, |
| 443 | { 0x0, "APIC" }, | 457 | { 0x0, "APIC" }, |
| 444 | { 0x0, "X2APIC" }, | 458 | { 0x0, "X2APIC" }, |
| 459 | { 0x0, "Die" }, | ||
| 445 | }; | 460 | }; |
| 446 | 461 | ||
| 447 | #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) | 462 | #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) |
| @@ -495,6 +510,7 @@ struct msr_counter bic[] = { | |||
| 495 | #define BIC_CPU (1ULL << 47) | 510 | #define BIC_CPU (1ULL << 47) |
| 496 | #define BIC_APIC (1ULL << 48) | 511 | #define BIC_APIC (1ULL << 48) |
| 497 | #define BIC_X2APIC (1ULL << 49) | 512 | #define BIC_X2APIC (1ULL << 49) |
| 513 | #define BIC_Die (1ULL << 50) | ||
| 498 | 514 | ||
| 499 | #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) | 515 | #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) |
| 500 | 516 | ||
| @@ -621,6 +637,8 @@ void print_header(char *delim) | |||
| 621 | outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); | 637 | outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); |
| 622 | if (DO_BIC(BIC_Package)) | 638 | if (DO_BIC(BIC_Package)) |
| 623 | outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); | 639 | outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); |
| 640 | if (DO_BIC(BIC_Die)) | ||
| 641 | outp += sprintf(outp, "%sDie", (printed++ ? delim : "")); | ||
| 624 | if (DO_BIC(BIC_Node)) | 642 | if (DO_BIC(BIC_Node)) |
| 625 | outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); | 643 | outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); |
| 626 | if (DO_BIC(BIC_Core)) | 644 | if (DO_BIC(BIC_Core)) |
| @@ -667,7 +685,7 @@ void print_header(char *delim) | |||
| 667 | 685 | ||
| 668 | if (DO_BIC(BIC_CPU_c1)) | 686 | if (DO_BIC(BIC_CPU_c1)) |
| 669 | outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); | 687 | outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); |
| 670 | if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) | 688 | if (DO_BIC(BIC_CPU_c3)) |
| 671 | outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); | 689 | outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); |
| 672 | if (DO_BIC(BIC_CPU_c6)) | 690 | if (DO_BIC(BIC_CPU_c6)) |
| 673 | outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); | 691 | outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); |
| @@ -680,6 +698,14 @@ void print_header(char *delim) | |||
| 680 | if (DO_BIC(BIC_CoreTmp)) | 698 | if (DO_BIC(BIC_CoreTmp)) |
| 681 | outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); | 699 | outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); |
| 682 | 700 | ||
| 701 | if (do_rapl && !rapl_joules) { | ||
| 702 | if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 703 | outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); | ||
| 704 | } else if (do_rapl && rapl_joules) { | ||
| 705 | if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 706 | outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); | ||
| 707 | } | ||
| 708 | |||
| 683 | for (mp = sys.cp; mp; mp = mp->next) { | 709 | for (mp = sys.cp; mp; mp = mp->next) { |
| 684 | if (mp->format == FORMAT_RAW) { | 710 | if (mp->format == FORMAT_RAW) { |
| 685 | if (mp->width == 64) | 711 | if (mp->width == 64) |
| @@ -734,7 +760,7 @@ void print_header(char *delim) | |||
| 734 | if (do_rapl && !rapl_joules) { | 760 | if (do_rapl && !rapl_joules) { |
| 735 | if (DO_BIC(BIC_PkgWatt)) | 761 | if (DO_BIC(BIC_PkgWatt)) |
| 736 | outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); | 762 | outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); |
| 737 | if (DO_BIC(BIC_CorWatt)) | 763 | if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 738 | outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); | 764 | outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); |
| 739 | if (DO_BIC(BIC_GFXWatt)) | 765 | if (DO_BIC(BIC_GFXWatt)) |
| 740 | outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); | 766 | outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); |
| @@ -747,7 +773,7 @@ void print_header(char *delim) | |||
| 747 | } else if (do_rapl && rapl_joules) { | 773 | } else if (do_rapl && rapl_joules) { |
| 748 | if (DO_BIC(BIC_Pkg_J)) | 774 | if (DO_BIC(BIC_Pkg_J)) |
| 749 | outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); | 775 | outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); |
| 750 | if (DO_BIC(BIC_Cor_J)) | 776 | if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 751 | outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); | 777 | outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); |
| 752 | if (DO_BIC(BIC_GFX_J)) | 778 | if (DO_BIC(BIC_GFX_J)) |
| 753 | outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); | 779 | outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); |
| @@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, | |||
| 808 | outp += sprintf(outp, "c6: %016llX\n", c->c6); | 834 | outp += sprintf(outp, "c6: %016llX\n", c->c6); |
| 809 | outp += sprintf(outp, "c7: %016llX\n", c->c7); | 835 | outp += sprintf(outp, "c7: %016llX\n", c->c7); |
| 810 | outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); | 836 | outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); |
| 837 | outp += sprintf(outp, "Joules: %0X\n", c->core_energy); | ||
| 811 | 838 | ||
| 812 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 839 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 813 | outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", | 840 | outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", |
| @@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 904 | if (t == &average.threads) { | 931 | if (t == &average.threads) { |
| 905 | if (DO_BIC(BIC_Package)) | 932 | if (DO_BIC(BIC_Package)) |
| 906 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 933 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 934 | if (DO_BIC(BIC_Die)) | ||
| 935 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | ||
| 907 | if (DO_BIC(BIC_Node)) | 936 | if (DO_BIC(BIC_Node)) |
| 908 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 937 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 909 | if (DO_BIC(BIC_Core)) | 938 | if (DO_BIC(BIC_Core)) |
| @@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 921 | else | 950 | else |
| 922 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 951 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 923 | } | 952 | } |
| 953 | if (DO_BIC(BIC_Die)) { | ||
| 954 | if (c) | ||
| 955 | outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id); | ||
| 956 | else | ||
| 957 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | ||
| 958 | } | ||
| 924 | if (DO_BIC(BIC_Node)) { | 959 | if (DO_BIC(BIC_Node)) { |
| 925 | if (t) | 960 | if (t) |
| 926 | outp += sprintf(outp, "%s%d", | 961 | outp += sprintf(outp, "%s%d", |
| @@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1003 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) | 1038 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) |
| 1004 | goto done; | 1039 | goto done; |
| 1005 | 1040 | ||
| 1006 | if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) | 1041 | if (DO_BIC(BIC_CPU_c3)) |
| 1007 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); | 1042 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); |
| 1008 | if (DO_BIC(BIC_CPU_c6)) | 1043 | if (DO_BIC(BIC_CPU_c6)) |
| 1009 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); | 1044 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); |
| @@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1033 | } | 1068 | } |
| 1034 | } | 1069 | } |
| 1035 | 1070 | ||
| 1071 | /* | ||
| 1072 | * If measurement interval exceeds minimum RAPL Joule Counter range, | ||
| 1073 | * indicate that results are suspect by printing "**" in fraction place. | ||
| 1074 | */ | ||
| 1075 | if (interval_float < rapl_joule_counter_range) | ||
| 1076 | fmt8 = "%s%.2f"; | ||
| 1077 | else | ||
| 1078 | fmt8 = "%6.0f**"; | ||
| 1079 | |||
| 1080 | if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 1081 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); | ||
| 1082 | if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 1083 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); | ||
| 1084 | |||
| 1036 | /* print per-package data only for 1st core in package */ | 1085 | /* print per-package data only for 1st core in package */ |
| 1037 | if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) | 1086 | if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) |
| 1038 | goto done; | 1087 | goto done; |
| @@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1085 | if (DO_BIC(BIC_SYS_LPI)) | 1134 | if (DO_BIC(BIC_SYS_LPI)) |
| 1086 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); | 1135 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); |
| 1087 | 1136 | ||
| 1088 | /* | ||
| 1089 | * If measurement interval exceeds minimum RAPL Joule Counter range, | ||
| 1090 | * indicate that results are suspect by printing "**" in fraction place. | ||
| 1091 | */ | ||
| 1092 | if (interval_float < rapl_joule_counter_range) | ||
| 1093 | fmt8 = "%s%.2f"; | ||
| 1094 | else | ||
| 1095 | fmt8 = "%6.0f**"; | ||
| 1096 | |||
| 1097 | if (DO_BIC(BIC_PkgWatt)) | 1137 | if (DO_BIC(BIC_PkgWatt)) |
| 1098 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); | 1138 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); |
| 1099 | if (DO_BIC(BIC_CorWatt)) | 1139 | if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 1100 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); | 1140 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); |
| 1101 | if (DO_BIC(BIC_GFXWatt)) | 1141 | if (DO_BIC(BIC_GFXWatt)) |
| 1102 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); | 1142 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); |
| @@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1104 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); | 1144 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); |
| 1105 | if (DO_BIC(BIC_Pkg_J)) | 1145 | if (DO_BIC(BIC_Pkg_J)) |
| 1106 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); | 1146 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); |
| 1107 | if (DO_BIC(BIC_Cor_J)) | 1147 | if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 1108 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); | 1148 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); |
| 1109 | if (DO_BIC(BIC_GFX_J)) | 1149 | if (DO_BIC(BIC_GFX_J)) |
| 1110 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); | 1150 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); |
| @@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old) | |||
| 1249 | old->core_temp_c = new->core_temp_c; | 1289 | old->core_temp_c = new->core_temp_c; |
| 1250 | old->mc6_us = new->mc6_us - old->mc6_us; | 1290 | old->mc6_us = new->mc6_us - old->mc6_us; |
| 1251 | 1291 | ||
| 1292 | DELTA_WRAP32(new->core_energy, old->core_energy); | ||
| 1293 | |||
| 1252 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 1294 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 1253 | if (mp->format == FORMAT_RAW) | 1295 | if (mp->format == FORMAT_RAW) |
| 1254 | old->counter[i] = new->counter[i]; | 1296 | old->counter[i] = new->counter[i]; |
| @@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data | |||
| 1391 | c->c7 = 0; | 1433 | c->c7 = 0; |
| 1392 | c->mc6_us = 0; | 1434 | c->mc6_us = 0; |
| 1393 | c->core_temp_c = 0; | 1435 | c->core_temp_c = 0; |
| 1436 | c->core_energy = 0; | ||
| 1394 | 1437 | ||
| 1395 | p->pkg_wtd_core_c0 = 0; | 1438 | p->pkg_wtd_core_c0 = 0; |
| 1396 | p->pkg_any_core_c0 = 0; | 1439 | p->pkg_any_core_c0 = 0; |
| @@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c, | |||
| 1473 | 1516 | ||
| 1474 | average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); | 1517 | average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); |
| 1475 | 1518 | ||
| 1519 | average.cores.core_energy += c->core_energy; | ||
| 1520 | |||
| 1476 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 1521 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 1477 | if (mp->format == FORMAT_RAW) | 1522 | if (mp->format == FORMAT_RAW) |
| 1478 | continue; | 1523 | continue; |
| @@ -1818,7 +1863,7 @@ retry: | |||
| 1818 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) | 1863 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) |
| 1819 | goto done; | 1864 | goto done; |
| 1820 | 1865 | ||
| 1821 | if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) { | 1866 | if (DO_BIC(BIC_CPU_c3)) { |
| 1822 | if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) | 1867 | if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) |
| 1823 | return -6; | 1868 | return -6; |
| 1824 | } | 1869 | } |
| @@ -1845,6 +1890,12 @@ retry: | |||
| 1845 | c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); | 1890 | c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); |
| 1846 | } | 1891 | } |
| 1847 | 1892 | ||
| 1893 | if (do_rapl & RAPL_AMD_F17H) { | ||
| 1894 | if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr)) | ||
| 1895 | return -14; | ||
| 1896 | c->core_energy = msr & 0xFFFFFFFF; | ||
| 1897 | } | ||
| 1898 | |||
| 1848 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 1899 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 1849 | if (get_mp(cpu, mp, &c->counter[i])) | 1900 | if (get_mp(cpu, mp, &c->counter[i])) |
| 1850 | return -10; | 1901 | return -10; |
| @@ -1934,6 +1985,11 @@ retry: | |||
| 1934 | return -16; | 1985 | return -16; |
| 1935 | p->rapl_dram_perf_status = msr & 0xFFFFFFFF; | 1986 | p->rapl_dram_perf_status = msr & 0xFFFFFFFF; |
| 1936 | } | 1987 | } |
| 1988 | if (do_rapl & RAPL_AMD_F17H) { | ||
| 1989 | if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr)) | ||
| 1990 | return -13; | ||
| 1991 | p->energy_pkg = msr & 0xFFFFFFFF; | ||
| 1992 | } | ||
| 1937 | if (DO_BIC(BIC_PkgTmp)) { | 1993 | if (DO_BIC(BIC_PkgTmp)) { |
| 1938 | if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) | 1994 | if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) |
| 1939 | return -17; | 1995 | return -17; |
| @@ -2456,6 +2512,8 @@ void free_all_buffers(void) | |||
| 2456 | 2512 | ||
| 2457 | /* | 2513 | /* |
| 2458 | * Parse a file containing a single int. | 2514 | * Parse a file containing a single int. |
| 2515 | * Return 0 if file can not be opened | ||
| 2516 | * Exit if file can be opened, but can not be parsed | ||
| 2459 | */ | 2517 | */ |
| 2460 | int parse_int_file(const char *fmt, ...) | 2518 | int parse_int_file(const char *fmt, ...) |
| 2461 | { | 2519 | { |
| @@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...) | |||
| 2467 | va_start(args, fmt); | 2525 | va_start(args, fmt); |
| 2468 | vsnprintf(path, sizeof(path), fmt, args); | 2526 | vsnprintf(path, sizeof(path), fmt, args); |
| 2469 | va_end(args); | 2527 | va_end(args); |
| 2470 | filep = fopen_or_die(path, "r"); | 2528 | filep = fopen(path, "r"); |
| 2529 | if (!filep) | ||
| 2530 | return 0; | ||
| 2471 | if (fscanf(filep, "%d", &value) != 1) | 2531 | if (fscanf(filep, "%d", &value) != 1) |
| 2472 | err(1, "%s: failed to parse number from file", path); | 2532 | err(1, "%s: failed to parse number from file", path); |
| 2473 | fclose(filep); | 2533 | fclose(filep); |
| @@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu) | |||
| 2488 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); | 2548 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); |
| 2489 | } | 2549 | } |
| 2490 | 2550 | ||
| 2551 | int get_die_id(int cpu) | ||
| 2552 | { | ||
| 2553 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu); | ||
| 2554 | } | ||
| 2555 | |||
| 2491 | int get_core_id(int cpu) | 2556 | int get_core_id(int cpu) |
| 2492 | { | 2557 | { |
| 2493 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); | 2558 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); |
| @@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu) | |||
| 2578 | filep = fopen_or_die(path, "r"); | 2643 | filep = fopen_or_die(path, "r"); |
| 2579 | do { | 2644 | do { |
| 2580 | offset -= BITMASK_SIZE; | 2645 | offset -= BITMASK_SIZE; |
| 2581 | fscanf(filep, "%lx%c", &map, &character); | 2646 | if (fscanf(filep, "%lx%c", &map, &character) != 2) |
| 2647 | err(1, "%s: failed to parse file", path); | ||
| 2582 | for (shift = 0; shift < BITMASK_SIZE; shift++) { | 2648 | for (shift = 0; shift < BITMASK_SIZE; shift++) { |
| 2583 | if ((map >> shift) & 0x1) { | 2649 | if ((map >> shift) & 0x1) { |
| 2584 | so = shift + offset; | 2650 | so = shift + offset; |
| @@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void) | |||
| 2855 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); | 2921 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); |
| 2856 | 2922 | ||
| 2857 | retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); | 2923 | retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); |
| 2858 | if (retval != 1) | 2924 | if (retval != 1) { |
| 2859 | err(1, "CPU LPI"); | 2925 | fprintf(stderr, "Disabling Low Power Idle CPU output\n"); |
| 2926 | BIC_NOT_PRESENT(BIC_CPU_LPI); | ||
| 2927 | return -1; | ||
| 2928 | } | ||
| 2860 | 2929 | ||
| 2861 | fclose(fp); | 2930 | fclose(fp); |
| 2862 | 2931 | ||
| @@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void) | |||
| 2878 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); | 2947 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); |
| 2879 | 2948 | ||
| 2880 | retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); | 2949 | retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); |
| 2881 | if (retval != 1) | 2950 | if (retval != 1) { |
| 2882 | err(1, "SYS LPI"); | 2951 | fprintf(stderr, "Disabling Low Power Idle System output\n"); |
| 2883 | 2952 | BIC_NOT_PRESENT(BIC_SYS_LPI); | |
| 2953 | return -1; | ||
| 2954 | } | ||
| 2884 | fclose(fp); | 2955 | fclose(fp); |
| 2885 | 2956 | ||
| 2886 | return 0; | 2957 | return 0; |
| @@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void) | |||
| 3410 | input = fopen(path, "r"); | 3481 | input = fopen(path, "r"); |
| 3411 | if (input == NULL) | 3482 | if (input == NULL) |
| 3412 | continue; | 3483 | continue; |
| 3413 | fgets(name_buf, sizeof(name_buf), input); | 3484 | if (!fgets(name_buf, sizeof(name_buf), input)) |
| 3485 | err(1, "%s: failed to read file", path); | ||
| 3414 | 3486 | ||
| 3415 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ | 3487 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ |
| 3416 | sp = strchr(name_buf, '-'); | 3488 | sp = strchr(name_buf, '-'); |
| 3417 | if (!sp) | 3489 | if (!sp) |
| 3418 | sp = strchrnul(name_buf, '\n'); | 3490 | sp = strchrnul(name_buf, '\n'); |
| 3419 | *sp = '\0'; | 3491 | *sp = '\0'; |
| 3420 | |||
| 3421 | fclose(input); | 3492 | fclose(input); |
| 3422 | 3493 | ||
| 3423 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", | 3494 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", |
| @@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void) | |||
| 3425 | input = fopen(path, "r"); | 3496 | input = fopen(path, "r"); |
| 3426 | if (input == NULL) | 3497 | if (input == NULL) |
| 3427 | continue; | 3498 | continue; |
| 3428 | fgets(desc, sizeof(desc), input); | 3499 | if (!fgets(desc, sizeof(desc), input)) |
| 3500 | err(1, "%s: failed to read file", path); | ||
| 3429 | 3501 | ||
| 3430 | fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); | 3502 | fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); |
| 3431 | fclose(input); | 3503 | fclose(input); |
| @@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void) | |||
| 3444 | base_cpu); | 3516 | base_cpu); |
| 3445 | input = fopen(path, "r"); | 3517 | input = fopen(path, "r"); |
| 3446 | if (input == NULL) { | 3518 | if (input == NULL) { |
| 3447 | fprintf(stderr, "NSFOD %s\n", path); | 3519 | fprintf(outf, "NSFOD %s\n", path); |
| 3448 | return; | 3520 | return; |
| 3449 | } | 3521 | } |
| 3450 | fgets(driver_buf, sizeof(driver_buf), input); | 3522 | if (!fgets(driver_buf, sizeof(driver_buf), input)) |
| 3523 | err(1, "%s: failed to read file", path); | ||
| 3451 | fclose(input); | 3524 | fclose(input); |
| 3452 | 3525 | ||
| 3453 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", | 3526 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", |
| 3454 | base_cpu); | 3527 | base_cpu); |
| 3455 | input = fopen(path, "r"); | 3528 | input = fopen(path, "r"); |
| 3456 | if (input == NULL) { | 3529 | if (input == NULL) { |
| 3457 | fprintf(stderr, "NSFOD %s\n", path); | 3530 | fprintf(outf, "NSFOD %s\n", path); |
| 3458 | return; | 3531 | return; |
| 3459 | } | 3532 | } |
| 3460 | fgets(governor_buf, sizeof(governor_buf), input); | 3533 | if (!fgets(governor_buf, sizeof(governor_buf), input)) |
| 3534 | err(1, "%s: failed to read file", path); | ||
| 3461 | fclose(input); | 3535 | fclose(input); |
| 3462 | 3536 | ||
| 3463 | fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); | 3537 | fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); |
| @@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void) | |||
| 3466 | sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); | 3540 | sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); |
| 3467 | input = fopen(path, "r"); | 3541 | input = fopen(path, "r"); |
| 3468 | if (input != NULL) { | 3542 | if (input != NULL) { |
| 3469 | fscanf(input, "%d", &turbo); | 3543 | if (fscanf(input, "%d", &turbo) != 1) |
| 3544 | err(1, "%s: failed to parse number from file", path); | ||
| 3470 | fprintf(outf, "cpufreq boost: %d\n", turbo); | 3545 | fprintf(outf, "cpufreq boost: %d\n", turbo); |
| 3471 | fclose(input); | 3546 | fclose(input); |
| 3472 | } | 3547 | } |
| @@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void) | |||
| 3474 | sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); | 3549 | sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); |
| 3475 | input = fopen(path, "r"); | 3550 | input = fopen(path, "r"); |
| 3476 | if (input != NULL) { | 3551 | if (input != NULL) { |
| 3477 | fscanf(input, "%d", &turbo); | 3552 | if (fscanf(input, "%d", &turbo) != 1) |
| 3553 | err(1, "%s: failed to parse number from file", path); | ||
| 3478 | fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); | 3554 | fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); |
| 3479 | fclose(input); | 3555 | fclose(input); |
| 3480 | } | 3556 | } |
| @@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data | |||
| 3718 | #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ | 3794 | #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ |
| 3719 | #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ | 3795 | #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ |
| 3720 | 3796 | ||
| 3721 | double get_tdp(unsigned int model) | 3797 | double get_tdp_intel(unsigned int model) |
| 3722 | { | 3798 | { |
| 3723 | unsigned long long msr; | 3799 | unsigned long long msr; |
| 3724 | 3800 | ||
| @@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model) | |||
| 3735 | } | 3811 | } |
| 3736 | } | 3812 | } |
| 3737 | 3813 | ||
| 3814 | double get_tdp_amd(unsigned int family) | ||
| 3815 | { | ||
| 3816 | switch (family) { | ||
| 3817 | case 0x17: | ||
| 3818 | default: | ||
| 3819 | /* This is the max stock TDP of HEDT/Server Fam17h chips */ | ||
| 3820 | return 250.0; | ||
| 3821 | } | ||
| 3822 | } | ||
| 3823 | |||
| 3738 | /* | 3824 | /* |
| 3739 | * rapl_dram_energy_units_probe() | 3825 | * rapl_dram_energy_units_probe() |
| 3740 | * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. | 3826 | * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. |
| @@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units) | |||
| 3754 | } | 3840 | } |
| 3755 | } | 3841 | } |
| 3756 | 3842 | ||
| 3757 | 3843 | void rapl_probe_intel(unsigned int family, unsigned int model) | |
| 3758 | /* | ||
| 3759 | * rapl_probe() | ||
| 3760 | * | ||
| 3761 | * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units | ||
| 3762 | */ | ||
| 3763 | void rapl_probe(unsigned int family, unsigned int model) | ||
| 3764 | { | 3844 | { |
| 3765 | unsigned long long msr; | 3845 | unsigned long long msr; |
| 3766 | unsigned int time_unit; | 3846 | unsigned int time_unit; |
| 3767 | double tdp; | 3847 | double tdp; |
| 3768 | 3848 | ||
| 3769 | if (!genuine_intel) | ||
| 3770 | return; | ||
| 3771 | |||
| 3772 | if (family != 6) | 3849 | if (family != 6) |
| 3773 | return; | 3850 | return; |
| 3774 | 3851 | ||
| @@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model) | |||
| 3892 | 3969 | ||
| 3893 | rapl_time_units = 1.0 / (1 << (time_unit)); | 3970 | rapl_time_units = 1.0 / (1 << (time_unit)); |
| 3894 | 3971 | ||
| 3895 | tdp = get_tdp(model); | 3972 | tdp = get_tdp_intel(model); |
| 3896 | 3973 | ||
| 3897 | rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; | 3974 | rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; |
| 3898 | if (!quiet) | 3975 | if (!quiet) |
| 3899 | fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); | 3976 | fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); |
| 3977 | } | ||
| 3900 | 3978 | ||
| 3901 | return; | 3979 | void rapl_probe_amd(unsigned int family, unsigned int model) |
| 3980 | { | ||
| 3981 | unsigned long long msr; | ||
| 3982 | unsigned int eax, ebx, ecx, edx; | ||
| 3983 | unsigned int has_rapl = 0; | ||
| 3984 | double tdp; | ||
| 3985 | |||
| 3986 | if (max_extended_level >= 0x80000007) { | ||
| 3987 | __cpuid(0x80000007, eax, ebx, ecx, edx); | ||
| 3988 | /* RAPL (Fam 17h) */ | ||
| 3989 | has_rapl = edx & (1 << 14); | ||
| 3990 | } | ||
| 3991 | |||
| 3992 | if (!has_rapl) | ||
| 3993 | return; | ||
| 3994 | |||
| 3995 | switch (family) { | ||
| 3996 | case 0x17: /* Zen, Zen+ */ | ||
| 3997 | do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; | ||
| 3998 | if (rapl_joules) { | ||
| 3999 | BIC_PRESENT(BIC_Pkg_J); | ||
| 4000 | BIC_PRESENT(BIC_Cor_J); | ||
| 4001 | } else { | ||
| 4002 | BIC_PRESENT(BIC_PkgWatt); | ||
| 4003 | BIC_PRESENT(BIC_CorWatt); | ||
| 4004 | } | ||
| 4005 | break; | ||
| 4006 | default: | ||
| 4007 | return; | ||
| 4008 | } | ||
| 4009 | |||
| 4010 | if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr)) | ||
| 4011 | return; | ||
| 4012 | |||
| 4013 | rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf)); | ||
| 4014 | rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); | ||
| 4015 | rapl_power_units = ldexp(1.0, -(msr & 0xf)); | ||
| 4016 | |||
| 4017 | tdp = get_tdp_amd(model); | ||
| 4018 | |||
| 4019 | rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; | ||
| 4020 | if (!quiet) | ||
| 4021 | fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); | ||
| 4022 | } | ||
| 4023 | |||
| 4024 | /* | ||
| 4025 | * rapl_probe() | ||
| 4026 | * | ||
| 4027 | * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units | ||
| 4028 | */ | ||
| 4029 | void rapl_probe(unsigned int family, unsigned int model) | ||
| 4030 | { | ||
| 4031 | if (genuine_intel) | ||
| 4032 | rapl_probe_intel(family, model); | ||
| 4033 | if (authentic_amd) | ||
| 4034 | rapl_probe_amd(family, model); | ||
| 3902 | } | 4035 | } |
| 3903 | 4036 | ||
| 3904 | void perf_limit_reasons_probe(unsigned int family, unsigned int model) | 4037 | void perf_limit_reasons_probe(unsigned int family, unsigned int model) |
| @@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label) | |||
| 4003 | int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) | 4136 | int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) |
| 4004 | { | 4137 | { |
| 4005 | unsigned long long msr; | 4138 | unsigned long long msr; |
| 4139 | const char *msr_name; | ||
| 4006 | int cpu; | 4140 | int cpu; |
| 4007 | 4141 | ||
| 4008 | if (!do_rapl) | 4142 | if (!do_rapl) |
| @@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
| 4018 | return -1; | 4152 | return -1; |
| 4019 | } | 4153 | } |
| 4020 | 4154 | ||
| 4021 | if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) | 4155 | if (do_rapl & RAPL_AMD_F17H) { |
| 4022 | return -1; | 4156 | msr_name = "MSR_RAPL_PWR_UNIT"; |
| 4157 | if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr)) | ||
| 4158 | return -1; | ||
| 4159 | } else { | ||
| 4160 | msr_name = "MSR_RAPL_POWER_UNIT"; | ||
| 4161 | if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) | ||
| 4162 | return -1; | ||
| 4163 | } | ||
| 4023 | 4164 | ||
| 4024 | fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, | 4165 | fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr, |
| 4025 | rapl_power_units, rapl_energy_units, rapl_time_units); | 4166 | rapl_power_units, rapl_energy_units, rapl_time_units); |
| 4026 | 4167 | ||
| 4027 | if (do_rapl & RAPL_PKG_POWER_INFO) { | 4168 | if (do_rapl & RAPL_PKG_POWER_INFO) { |
| @@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model) | |||
| 4451 | case INTEL_FAM6_KABYLAKE_MOBILE: | 4592 | case INTEL_FAM6_KABYLAKE_MOBILE: |
| 4452 | case INTEL_FAM6_KABYLAKE_DESKTOP: | 4593 | case INTEL_FAM6_KABYLAKE_DESKTOP: |
| 4453 | return INTEL_FAM6_SKYLAKE_MOBILE; | 4594 | return INTEL_FAM6_SKYLAKE_MOBILE; |
| 4595 | |||
| 4596 | case INTEL_FAM6_ICELAKE_MOBILE: | ||
| 4597 | return INTEL_FAM6_CANNONLAKE_MOBILE; | ||
| 4454 | } | 4598 | } |
| 4455 | return model; | 4599 | return model; |
| 4456 | } | 4600 | } |
| @@ -4702,7 +4846,9 @@ void process_cpuid() | |||
| 4702 | } | 4846 | } |
| 4703 | do_slm_cstates = is_slm(family, model); | 4847 | do_slm_cstates = is_slm(family, model); |
| 4704 | do_knl_cstates = is_knl(family, model); | 4848 | do_knl_cstates = is_knl(family, model); |
| 4705 | do_cnl_cstates = is_cnl(family, model); | 4849 | |
| 4850 | if (do_slm_cstates || do_knl_cstates || is_cnl(family, model)) | ||
| 4851 | BIC_NOT_PRESENT(BIC_CPU_c3); | ||
| 4706 | 4852 | ||
| 4707 | if (!quiet) | 4853 | if (!quiet) |
| 4708 | decode_misc_pwr_mgmt_msr(); | 4854 | decode_misc_pwr_mgmt_msr(); |
| @@ -4769,6 +4915,7 @@ void topology_probe() | |||
| 4769 | int i; | 4915 | int i; |
| 4770 | int max_core_id = 0; | 4916 | int max_core_id = 0; |
| 4771 | int max_package_id = 0; | 4917 | int max_package_id = 0; |
| 4918 | int max_die_id = 0; | ||
| 4772 | int max_siblings = 0; | 4919 | int max_siblings = 0; |
| 4773 | 4920 | ||
| 4774 | /* Initialize num_cpus, max_cpu_num */ | 4921 | /* Initialize num_cpus, max_cpu_num */ |
| @@ -4835,6 +4982,11 @@ void topology_probe() | |||
| 4835 | if (cpus[i].physical_package_id > max_package_id) | 4982 | if (cpus[i].physical_package_id > max_package_id) |
| 4836 | max_package_id = cpus[i].physical_package_id; | 4983 | max_package_id = cpus[i].physical_package_id; |
| 4837 | 4984 | ||
| 4985 | /* get die information */ | ||
| 4986 | cpus[i].die_id = get_die_id(i); | ||
| 4987 | if (cpus[i].die_id > max_die_id) | ||
| 4988 | max_die_id = cpus[i].die_id; | ||
| 4989 | |||
| 4838 | /* get numa node information */ | 4990 | /* get numa node information */ |
| 4839 | cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); | 4991 | cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); |
| 4840 | if (cpus[i].physical_node_id > topo.max_node_num) | 4992 | if (cpus[i].physical_node_id > topo.max_node_num) |
| @@ -4860,6 +5012,13 @@ void topology_probe() | |||
| 4860 | if (!summary_only && topo.cores_per_node > 1) | 5012 | if (!summary_only && topo.cores_per_node > 1) |
| 4861 | BIC_PRESENT(BIC_Core); | 5013 | BIC_PRESENT(BIC_Core); |
| 4862 | 5014 | ||
| 5015 | topo.num_die = max_die_id + 1; | ||
| 5016 | if (debug > 1) | ||
| 5017 | fprintf(outf, "max_die_id %d, sizing for %d die\n", | ||
| 5018 | max_die_id, topo.num_die); | ||
| 5019 | if (!summary_only && topo.num_die > 1) | ||
| 5020 | BIC_PRESENT(BIC_Die); | ||
| 5021 | |||
| 4863 | topo.num_packages = max_package_id + 1; | 5022 | topo.num_packages = max_package_id + 1; |
| 4864 | if (debug > 1) | 5023 | if (debug > 1) |
| 4865 | fprintf(outf, "max_package_id %d, sizing for %d packages\n", | 5024 | fprintf(outf, "max_package_id %d, sizing for %d packages\n", |
| @@ -4884,8 +5043,8 @@ void topology_probe() | |||
| 4884 | if (cpu_is_not_present(i)) | 5043 | if (cpu_is_not_present(i)) |
| 4885 | continue; | 5044 | continue; |
| 4886 | fprintf(outf, | 5045 | fprintf(outf, |
| 4887 | "cpu %d pkg %d node %d lnode %d core %d thread %d\n", | 5046 | "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n", |
| 4888 | i, cpus[i].physical_package_id, | 5047 | i, cpus[i].physical_package_id, cpus[i].die_id, |
| 4889 | cpus[i].physical_node_id, | 5048 | cpus[i].physical_node_id, |
| 4890 | cpus[i].logical_node_id, | 5049 | cpus[i].logical_node_id, |
| 4891 | cpus[i].physical_core_id, | 5050 | cpus[i].physical_core_id, |
| @@ -5122,7 +5281,7 @@ int get_and_dump_counters(void) | |||
| 5122 | } | 5281 | } |
| 5123 | 5282 | ||
| 5124 | void print_version() { | 5283 | void print_version() { |
| 5125 | fprintf(outf, "turbostat version 18.07.27" | 5284 | fprintf(outf, "turbostat version 19.03.20" |
| 5126 | " - Len Brown <lenb@kernel.org>\n"); | 5285 | " - Len Brown <lenb@kernel.org>\n"); |
| 5127 | } | 5286 | } |
| 5128 | 5287 | ||
| @@ -5319,7 +5478,8 @@ void probe_sysfs(void) | |||
| 5319 | input = fopen(path, "r"); | 5478 | input = fopen(path, "r"); |
| 5320 | if (input == NULL) | 5479 | if (input == NULL) |
| 5321 | continue; | 5480 | continue; |
| 5322 | fgets(name_buf, sizeof(name_buf), input); | 5481 | if (!fgets(name_buf, sizeof(name_buf), input)) |
| 5482 | err(1, "%s: failed to read file", path); | ||
| 5323 | 5483 | ||
| 5324 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ | 5484 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ |
| 5325 | sp = strchr(name_buf, '-'); | 5485 | sp = strchr(name_buf, '-'); |
| @@ -5346,7 +5506,8 @@ void probe_sysfs(void) | |||
| 5346 | input = fopen(path, "r"); | 5506 | input = fopen(path, "r"); |
| 5347 | if (input == NULL) | 5507 | if (input == NULL) |
| 5348 | continue; | 5508 | continue; |
| 5349 | fgets(name_buf, sizeof(name_buf), input); | 5509 | if (!fgets(name_buf, sizeof(name_buf), input)) |
| 5510 | err(1, "%s: failed to read file", path); | ||
| 5350 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ | 5511 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ |
| 5351 | sp = strchr(name_buf, '-'); | 5512 | sp = strchr(name_buf, '-'); |
| 5352 | if (!sp) | 5513 | if (!sp) |
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index c9433a496d54..c81fc350f7ad 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h | |||
| @@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = | |||
| 180 | (void *) BPF_FUNC_sk_fullsock; | 180 | (void *) BPF_FUNC_sk_fullsock; |
| 181 | static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = | 181 | static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = |
| 182 | (void *) BPF_FUNC_tcp_sock; | 182 | (void *) BPF_FUNC_tcp_sock; |
| 183 | static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = | ||
| 184 | (void *) BPF_FUNC_get_listener_sock; | ||
| 183 | static int (*bpf_skb_ecn_set_ce)(void *ctx) = | 185 | static int (*bpf_skb_ecn_set_ce)(void *ctx) = |
| 184 | (void *) BPF_FUNC_skb_ecn_set_ce; | 186 | (void *) BPF_FUNC_skb_ecn_set_ce; |
| 185 | 187 | ||
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index bcbd928c96ab..fc818bc1d729 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c | |||
| @@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = { | |||
| 39 | .n_proto = __bpf_constant_htons(ETH_P_IPV6), | 39 | .n_proto = __bpf_constant_htons(ETH_P_IPV6), |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | #define VLAN_HLEN 4 | ||
| 43 | |||
| 44 | static struct { | ||
| 45 | struct ethhdr eth; | ||
| 46 | __u16 vlan_tci; | ||
| 47 | __u16 vlan_proto; | ||
| 48 | struct iphdr iph; | ||
| 49 | struct tcphdr tcp; | ||
| 50 | } __packed pkt_vlan_v4 = { | ||
| 51 | .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), | ||
| 52 | .vlan_proto = __bpf_constant_htons(ETH_P_IP), | ||
| 53 | .iph.ihl = 5, | ||
| 54 | .iph.protocol = IPPROTO_TCP, | ||
| 55 | .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), | ||
| 56 | .tcp.urg_ptr = 123, | ||
| 57 | .tcp.doff = 5, | ||
| 58 | }; | ||
| 59 | |||
| 60 | static struct bpf_flow_keys pkt_vlan_v4_flow_keys = { | ||
| 61 | .nhoff = VLAN_HLEN, | ||
| 62 | .thoff = VLAN_HLEN + sizeof(struct iphdr), | ||
| 63 | .addr_proto = ETH_P_IP, | ||
| 64 | .ip_proto = IPPROTO_TCP, | ||
| 65 | .n_proto = __bpf_constant_htons(ETH_P_IP), | ||
| 66 | }; | ||
| 67 | |||
| 68 | static struct { | ||
| 69 | struct ethhdr eth; | ||
| 70 | __u16 vlan_tci; | ||
| 71 | __u16 vlan_proto; | ||
| 72 | __u16 vlan_tci2; | ||
| 73 | __u16 vlan_proto2; | ||
| 74 | struct ipv6hdr iph; | ||
| 75 | struct tcphdr tcp; | ||
| 76 | } __packed pkt_vlan_v6 = { | ||
| 77 | .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), | ||
| 78 | .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), | ||
| 79 | .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), | ||
| 80 | .iph.nexthdr = IPPROTO_TCP, | ||
| 81 | .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), | ||
| 82 | .tcp.urg_ptr = 123, | ||
| 83 | .tcp.doff = 5, | ||
| 84 | }; | ||
| 85 | |||
| 86 | static struct bpf_flow_keys pkt_vlan_v6_flow_keys = { | ||
| 87 | .nhoff = VLAN_HLEN * 2, | ||
| 88 | .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr), | ||
| 89 | .addr_proto = ETH_P_IPV6, | ||
| 90 | .ip_proto = IPPROTO_TCP, | ||
| 91 | .n_proto = __bpf_constant_htons(ETH_P_IPV6), | ||
| 92 | }; | ||
| 93 | |||
| 42 | void test_flow_dissector(void) | 94 | void test_flow_dissector(void) |
| 43 | { | 95 | { |
| 44 | struct bpf_flow_keys flow_keys; | 96 | struct bpf_flow_keys flow_keys; |
| @@ -68,5 +120,21 @@ void test_flow_dissector(void) | |||
| 68 | err, errno, retval, duration, size, sizeof(flow_keys)); | 120 | err, errno, retval, duration, size, sizeof(flow_keys)); |
| 69 | CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); | 121 | CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); |
| 70 | 122 | ||
| 123 | err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4), | ||
| 124 | &flow_keys, &size, &retval, &duration); | ||
| 125 | CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4", | ||
| 126 | "err %d errno %d retval %d duration %d size %u/%lu\n", | ||
| 127 | err, errno, retval, duration, size, sizeof(flow_keys)); | ||
| 128 | CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys, | ||
| 129 | pkt_vlan_v4_flow_keys); | ||
| 130 | |||
| 131 | err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6), | ||
| 132 | &flow_keys, &size, &retval, &duration); | ||
| 133 | CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6", | ||
| 134 | "err %d errno %d retval %d duration %d size %u/%lu\n", | ||
| 135 | err, errno, retval, duration, size, sizeof(flow_keys)); | ||
| 136 | CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys, | ||
| 137 | pkt_vlan_v6_flow_keys); | ||
| 138 | |||
| 71 | bpf_object__close(obj); | 139 | bpf_object__close(obj); |
| 72 | } | 140 | } |
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index 90f8a206340a..ee99368c595c 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c | |||
| @@ -37,7 +37,7 @@ void test_map_lock(void) | |||
| 37 | const char *file = "./test_map_lock.o"; | 37 | const char *file = "./test_map_lock.o"; |
| 38 | int prog_fd, map_fd[2], vars[17] = {}; | 38 | int prog_fd, map_fd[2], vars[17] = {}; |
| 39 | pthread_t thread_id[6]; | 39 | pthread_t thread_id[6]; |
| 40 | struct bpf_object *obj; | 40 | struct bpf_object *obj = NULL; |
| 41 | int err = 0, key = 0, i; | 41 | int err = 0, key = 0, i; |
| 42 | void *ret; | 42 | void *ret; |
| 43 | 43 | ||
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 9a573a9675d7..114ebe6a438e 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c | |||
| @@ -5,7 +5,7 @@ void test_spinlock(void) | |||
| 5 | { | 5 | { |
| 6 | const char *file = "./test_spin_lock.o"; | 6 | const char *file = "./test_spin_lock.o"; |
| 7 | pthread_t thread_id[4]; | 7 | pthread_t thread_id[4]; |
| 8 | struct bpf_object *obj; | 8 | struct bpf_object *obj = NULL; |
| 9 | int prog_fd; | 9 | int prog_fd; |
| 10 | int err = 0, i; | 10 | int err = 0, i; |
| 11 | void *ret; | 11 | void *ret; |
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 284660f5aa95..75b17cada539 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c | |||
| @@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) | |||
| 92 | { | 92 | { |
| 93 | struct bpf_flow_keys *keys = skb->flow_keys; | 93 | struct bpf_flow_keys *keys = skb->flow_keys; |
| 94 | 94 | ||
| 95 | keys->n_proto = proto; | ||
| 96 | switch (proto) { | 95 | switch (proto) { |
| 97 | case bpf_htons(ETH_P_IP): | 96 | case bpf_htons(ETH_P_IP): |
| 98 | bpf_tail_call(skb, &jmp_table, IP); | 97 | bpf_tail_call(skb, &jmp_table, IP); |
| @@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) | |||
| 119 | SEC("flow_dissector") | 118 | SEC("flow_dissector") |
| 120 | int _dissect(struct __sk_buff *skb) | 119 | int _dissect(struct __sk_buff *skb) |
| 121 | { | 120 | { |
| 122 | if (!skb->vlan_present) | 121 | struct bpf_flow_keys *keys = skb->flow_keys; |
| 123 | return parse_eth_proto(skb, skb->protocol); | 122 | |
| 124 | else | 123 | return parse_eth_proto(skb, keys->n_proto); |
| 125 | return parse_eth_proto(skb, skb->vlan_proto); | ||
| 126 | } | 124 | } |
| 127 | 125 | ||
| 128 | /* Parses on IPPROTO_* */ | 126 | /* Parses on IPPROTO_* */ |
| @@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb) | |||
| 336 | { | 334 | { |
| 337 | struct bpf_flow_keys *keys = skb->flow_keys; | 335 | struct bpf_flow_keys *keys = skb->flow_keys; |
| 338 | struct vlan_hdr *vlan, _vlan; | 336 | struct vlan_hdr *vlan, _vlan; |
| 339 | __be16 proto; | ||
| 340 | |||
| 341 | /* Peek back to see if single or double-tagging */ | ||
| 342 | if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto, | ||
| 343 | sizeof(proto))) | ||
| 344 | return BPF_DROP; | ||
| 345 | 337 | ||
| 346 | /* Account for double-tagging */ | 338 | /* Account for double-tagging */ |
| 347 | if (proto == bpf_htons(ETH_P_8021AD)) { | 339 | if (keys->n_proto == bpf_htons(ETH_P_8021AD)) { |
| 348 | vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); | 340 | vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); |
| 349 | if (!vlan) | 341 | if (!vlan) |
| 350 | return BPF_DROP; | 342 | return BPF_DROP; |
| @@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb) | |||
| 352 | if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) | 344 | if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) |
| 353 | return BPF_DROP; | 345 | return BPF_DROP; |
| 354 | 346 | ||
| 347 | keys->nhoff += sizeof(*vlan); | ||
| 355 | keys->thoff += sizeof(*vlan); | 348 | keys->thoff += sizeof(*vlan); |
| 356 | } | 349 | } |
| 357 | 350 | ||
| @@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb) | |||
| 359 | if (!vlan) | 352 | if (!vlan) |
| 360 | return BPF_DROP; | 353 | return BPF_DROP; |
| 361 | 354 | ||
| 355 | keys->nhoff += sizeof(*vlan); | ||
| 362 | keys->thoff += sizeof(*vlan); | 356 | keys->thoff += sizeof(*vlan); |
| 363 | /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ | 357 | /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ |
| 364 | if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || | 358 | if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || |
| 365 | vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) | 359 | vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) |
| 366 | return BPF_DROP; | 360 | return BPF_DROP; |
| 367 | 361 | ||
| 362 | keys->n_proto = vlan->h_vlan_encapsulated_proto; | ||
| 368 | return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); | 363 | return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); |
| 369 | } | 364 | } |
| 370 | 365 | ||
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c index de1a43e8f610..37328f148538 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c | |||
| @@ -8,38 +8,51 @@ | |||
| 8 | #include "bpf_helpers.h" | 8 | #include "bpf_helpers.h" |
| 9 | #include "bpf_endian.h" | 9 | #include "bpf_endian.h" |
| 10 | 10 | ||
| 11 | enum bpf_array_idx { | 11 | enum bpf_addr_array_idx { |
| 12 | SRV_IDX, | 12 | ADDR_SRV_IDX, |
| 13 | CLI_IDX, | 13 | ADDR_CLI_IDX, |
| 14 | __NR_BPF_ARRAY_IDX, | 14 | __NR_BPF_ADDR_ARRAY_IDX, |
| 15 | }; | ||
| 16 | |||
| 17 | enum bpf_result_array_idx { | ||
| 18 | EGRESS_SRV_IDX, | ||
| 19 | EGRESS_CLI_IDX, | ||
| 20 | INGRESS_LISTEN_IDX, | ||
| 21 | __NR_BPF_RESULT_ARRAY_IDX, | ||
| 22 | }; | ||
| 23 | |||
| 24 | enum bpf_linum_array_idx { | ||
| 25 | EGRESS_LINUM_IDX, | ||
| 26 | INGRESS_LINUM_IDX, | ||
| 27 | __NR_BPF_LINUM_ARRAY_IDX, | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | struct bpf_map_def SEC("maps") addr_map = { | 30 | struct bpf_map_def SEC("maps") addr_map = { |
| 18 | .type = BPF_MAP_TYPE_ARRAY, | 31 | .type = BPF_MAP_TYPE_ARRAY, |
| 19 | .key_size = sizeof(__u32), | 32 | .key_size = sizeof(__u32), |
| 20 | .value_size = sizeof(struct sockaddr_in6), | 33 | .value_size = sizeof(struct sockaddr_in6), |
| 21 | .max_entries = __NR_BPF_ARRAY_IDX, | 34 | .max_entries = __NR_BPF_ADDR_ARRAY_IDX, |
| 22 | }; | 35 | }; |
| 23 | 36 | ||
| 24 | struct bpf_map_def SEC("maps") sock_result_map = { | 37 | struct bpf_map_def SEC("maps") sock_result_map = { |
| 25 | .type = BPF_MAP_TYPE_ARRAY, | 38 | .type = BPF_MAP_TYPE_ARRAY, |
| 26 | .key_size = sizeof(__u32), | 39 | .key_size = sizeof(__u32), |
| 27 | .value_size = sizeof(struct bpf_sock), | 40 | .value_size = sizeof(struct bpf_sock), |
| 28 | .max_entries = __NR_BPF_ARRAY_IDX, | 41 | .max_entries = __NR_BPF_RESULT_ARRAY_IDX, |
| 29 | }; | 42 | }; |
| 30 | 43 | ||
| 31 | struct bpf_map_def SEC("maps") tcp_sock_result_map = { | 44 | struct bpf_map_def SEC("maps") tcp_sock_result_map = { |
| 32 | .type = BPF_MAP_TYPE_ARRAY, | 45 | .type = BPF_MAP_TYPE_ARRAY, |
| 33 | .key_size = sizeof(__u32), | 46 | .key_size = sizeof(__u32), |
| 34 | .value_size = sizeof(struct bpf_tcp_sock), | 47 | .value_size = sizeof(struct bpf_tcp_sock), |
| 35 | .max_entries = __NR_BPF_ARRAY_IDX, | 48 | .max_entries = __NR_BPF_RESULT_ARRAY_IDX, |
| 36 | }; | 49 | }; |
| 37 | 50 | ||
| 38 | struct bpf_map_def SEC("maps") linum_map = { | 51 | struct bpf_map_def SEC("maps") linum_map = { |
| 39 | .type = BPF_MAP_TYPE_ARRAY, | 52 | .type = BPF_MAP_TYPE_ARRAY, |
| 40 | .key_size = sizeof(__u32), | 53 | .key_size = sizeof(__u32), |
| 41 | .value_size = sizeof(__u32), | 54 | .value_size = sizeof(__u32), |
| 42 | .max_entries = 1, | 55 | .max_entries = __NR_BPF_LINUM_ARRAY_IDX, |
| 43 | }; | 56 | }; |
| 44 | 57 | ||
| 45 | static bool is_loopback6(__u32 *a6) | 58 | static bool is_loopback6(__u32 *a6) |
| @@ -100,18 +113,20 @@ static void tpcpy(struct bpf_tcp_sock *dst, | |||
| 100 | 113 | ||
| 101 | #define RETURN { \ | 114 | #define RETURN { \ |
| 102 | linum = __LINE__; \ | 115 | linum = __LINE__; \ |
| 103 | bpf_map_update_elem(&linum_map, &idx0, &linum, 0); \ | 116 | bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \ |
| 104 | return 1; \ | 117 | return 1; \ |
| 105 | } | 118 | } |
| 106 | 119 | ||
| 107 | SEC("cgroup_skb/egress") | 120 | SEC("cgroup_skb/egress") |
| 108 | int read_sock_fields(struct __sk_buff *skb) | 121 | int egress_read_sock_fields(struct __sk_buff *skb) |
| 109 | { | 122 | { |
| 110 | __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx; | 123 | __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx; |
| 111 | struct sockaddr_in6 *srv_sa6, *cli_sa6; | 124 | struct sockaddr_in6 *srv_sa6, *cli_sa6; |
| 112 | struct bpf_tcp_sock *tp, *tp_ret; | 125 | struct bpf_tcp_sock *tp, *tp_ret; |
| 113 | struct bpf_sock *sk, *sk_ret; | 126 | struct bpf_sock *sk, *sk_ret; |
| 114 | __u32 linum, idx0 = 0; | 127 | __u32 linum, linum_idx; |
| 128 | |||
| 129 | linum_idx = EGRESS_LINUM_IDX; | ||
| 115 | 130 | ||
| 116 | sk = skb->sk; | 131 | sk = skb->sk; |
| 117 | if (!sk || sk->state == 10) | 132 | if (!sk || sk->state == 10) |
| @@ -132,14 +147,55 @@ int read_sock_fields(struct __sk_buff *skb) | |||
| 132 | RETURN; | 147 | RETURN; |
| 133 | 148 | ||
| 134 | if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port)) | 149 | if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port)) |
| 135 | idx = srv_idx; | 150 | result_idx = EGRESS_SRV_IDX; |
| 136 | else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port)) | 151 | else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port)) |
| 137 | idx = cli_idx; | 152 | result_idx = EGRESS_CLI_IDX; |
| 138 | else | 153 | else |
| 139 | RETURN; | 154 | RETURN; |
| 140 | 155 | ||
| 141 | sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx); | 156 | sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx); |
| 142 | tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx); | 157 | tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx); |
| 158 | if (!sk_ret || !tp_ret) | ||
| 159 | RETURN; | ||
| 160 | |||
| 161 | skcpy(sk_ret, sk); | ||
| 162 | tpcpy(tp_ret, tp); | ||
| 163 | |||
| 164 | RETURN; | ||
| 165 | } | ||
| 166 | |||
| 167 | SEC("cgroup_skb/ingress") | ||
| 168 | int ingress_read_sock_fields(struct __sk_buff *skb) | ||
| 169 | { | ||
| 170 | __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX; | ||
| 171 | struct bpf_tcp_sock *tp, *tp_ret; | ||
| 172 | struct bpf_sock *sk, *sk_ret; | ||
| 173 | struct sockaddr_in6 *srv_sa6; | ||
| 174 | __u32 linum, linum_idx; | ||
| 175 | |||
| 176 | linum_idx = INGRESS_LINUM_IDX; | ||
| 177 | |||
| 178 | sk = skb->sk; | ||
| 179 | if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6)) | ||
| 180 | RETURN; | ||
| 181 | |||
| 182 | srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx); | ||
| 183 | if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port)) | ||
| 184 | RETURN; | ||
| 185 | |||
| 186 | if (sk->state != 10 && sk->state != 12) | ||
| 187 | RETURN; | ||
| 188 | |||
| 189 | sk = bpf_get_listener_sock(sk); | ||
| 190 | if (!sk) | ||
| 191 | RETURN; | ||
| 192 | |||
| 193 | tp = bpf_tcp_sock(sk); | ||
| 194 | if (!tp) | ||
| 195 | RETURN; | ||
| 196 | |||
| 197 | sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx); | ||
| 198 | tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx); | ||
| 143 | if (!sk_ret || !tp_ret) | 199 | if (!sk_ret || !tp_ret) |
| 144 | RETURN; | 200 | RETURN; |
| 145 | 201 | ||
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 38797aa627a7..ec5794e4205b 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
| @@ -5777,6 +5777,53 @@ const struct btf_dedup_test dedup_tests[] = { | |||
| 5777 | }, | 5777 | }, |
| 5778 | }, | 5778 | }, |
| 5779 | { | 5779 | { |
| 5780 | .descr = "dedup: void equiv check", | ||
| 5781 | /* | ||
| 5782 | * // CU 1: | ||
| 5783 | * struct s { | ||
| 5784 | * struct {} *x; | ||
| 5785 | * }; | ||
| 5786 | * // CU 2: | ||
| 5787 | * struct s { | ||
| 5788 | * int *x; | ||
| 5789 | * }; | ||
| 5790 | */ | ||
| 5791 | .input = { | ||
| 5792 | .raw_types = { | ||
| 5793 | /* CU 1 */ | ||
| 5794 | BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */ | ||
| 5795 | BTF_PTR_ENC(1), /* [2] ptr -> [1] */ | ||
| 5796 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */ | ||
| 5797 | BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), | ||
| 5798 | /* CU 2 */ | ||
| 5799 | BTF_PTR_ENC(0), /* [4] ptr -> void */ | ||
| 5800 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */ | ||
| 5801 | BTF_MEMBER_ENC(NAME_NTH(2), 4, 0), | ||
| 5802 | BTF_END_RAW, | ||
| 5803 | }, | ||
| 5804 | BTF_STR_SEC("\0s\0x"), | ||
| 5805 | }, | ||
| 5806 | .expect = { | ||
| 5807 | .raw_types = { | ||
| 5808 | /* CU 1 */ | ||
| 5809 | BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */ | ||
| 5810 | BTF_PTR_ENC(1), /* [2] ptr -> [1] */ | ||
| 5811 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */ | ||
| 5812 | BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), | ||
| 5813 | /* CU 2 */ | ||
| 5814 | BTF_PTR_ENC(0), /* [4] ptr -> void */ | ||
| 5815 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */ | ||
| 5816 | BTF_MEMBER_ENC(NAME_NTH(2), 4, 0), | ||
| 5817 | BTF_END_RAW, | ||
| 5818 | }, | ||
| 5819 | BTF_STR_SEC("\0s\0x"), | ||
| 5820 | }, | ||
| 5821 | .opts = { | ||
| 5822 | .dont_resolve_fwds = false, | ||
| 5823 | .dedup_table_size = 1, /* force hash collisions */ | ||
| 5824 | }, | ||
| 5825 | }, | ||
| 5826 | { | ||
| 5780 | .descr = "dedup: all possible kinds (no duplicates)", | 5827 | .descr = "dedup: all possible kinds (no duplicates)", |
| 5781 | .input = { | 5828 | .input = { |
| 5782 | .raw_types = { | 5829 | .raw_types = { |
| @@ -5874,6 +5921,50 @@ const struct btf_dedup_test dedup_tests[] = { | |||
| 5874 | .dont_resolve_fwds = false, | 5921 | .dont_resolve_fwds = false, |
| 5875 | }, | 5922 | }, |
| 5876 | }, | 5923 | }, |
| 5924 | { | ||
| 5925 | .descr = "dedup: enum fwd resolution", | ||
| 5926 | .input = { | ||
| 5927 | .raw_types = { | ||
| 5928 | /* [1] fwd enum 'e1' before full enum */ | ||
| 5929 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4), | ||
| 5930 | /* [2] full enum 'e1' after fwd */ | ||
| 5931 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), | ||
| 5932 | BTF_ENUM_ENC(NAME_NTH(2), 123), | ||
| 5933 | /* [3] full enum 'e2' before fwd */ | ||
| 5934 | BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), | ||
| 5935 | BTF_ENUM_ENC(NAME_NTH(4), 456), | ||
| 5936 | /* [4] fwd enum 'e2' after full enum */ | ||
| 5937 | BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4), | ||
| 5938 | /* [5] incompatible fwd enum with different size */ | ||
| 5939 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1), | ||
| 5940 | /* [6] incompatible full enum with different value */ | ||
| 5941 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), | ||
| 5942 | BTF_ENUM_ENC(NAME_NTH(2), 321), | ||
| 5943 | BTF_END_RAW, | ||
| 5944 | }, | ||
| 5945 | BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), | ||
| 5946 | }, | ||
| 5947 | .expect = { | ||
| 5948 | .raw_types = { | ||
| 5949 | /* [1] full enum 'e1' */ | ||
| 5950 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), | ||
| 5951 | BTF_ENUM_ENC(NAME_NTH(2), 123), | ||
| 5952 | /* [2] full enum 'e2' */ | ||
| 5953 | BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), | ||
| 5954 | BTF_ENUM_ENC(NAME_NTH(4), 456), | ||
| 5955 | /* [3] incompatible fwd enum with different size */ | ||
| 5956 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1), | ||
| 5957 | /* [4] incompatible full enum with different value */ | ||
| 5958 | BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), | ||
| 5959 | BTF_ENUM_ENC(NAME_NTH(2), 321), | ||
| 5960 | BTF_END_RAW, | ||
| 5961 | }, | ||
| 5962 | BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), | ||
| 5963 | }, | ||
| 5964 | .opts = { | ||
| 5965 | .dont_resolve_fwds = false, | ||
| 5966 | }, | ||
| 5967 | }, | ||
| 5877 | 5968 | ||
| 5878 | }; | 5969 | }; |
| 5879 | 5970 | ||
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c index bc8943938bf5..dcae7f664dce 100644 --- a/tools/testing/selftests/bpf/test_sock_fields.c +++ b/tools/testing/selftests/bpf/test_sock_fields.c | |||
| @@ -16,10 +16,23 @@ | |||
| 16 | #include "cgroup_helpers.h" | 16 | #include "cgroup_helpers.h" |
| 17 | #include "bpf_rlimit.h" | 17 | #include "bpf_rlimit.h" |
| 18 | 18 | ||
| 19 | enum bpf_array_idx { | 19 | enum bpf_addr_array_idx { |
| 20 | SRV_IDX, | 20 | ADDR_SRV_IDX, |
| 21 | CLI_IDX, | 21 | ADDR_CLI_IDX, |
| 22 | __NR_BPF_ARRAY_IDX, | 22 | __NR_BPF_ADDR_ARRAY_IDX, |
| 23 | }; | ||
| 24 | |||
| 25 | enum bpf_result_array_idx { | ||
| 26 | EGRESS_SRV_IDX, | ||
| 27 | EGRESS_CLI_IDX, | ||
| 28 | INGRESS_LISTEN_IDX, | ||
| 29 | __NR_BPF_RESULT_ARRAY_IDX, | ||
| 30 | }; | ||
| 31 | |||
| 32 | enum bpf_linum_array_idx { | ||
| 33 | EGRESS_LINUM_IDX, | ||
| 34 | INGRESS_LINUM_IDX, | ||
| 35 | __NR_BPF_LINUM_ARRAY_IDX, | ||
| 23 | }; | 36 | }; |
| 24 | 37 | ||
| 25 | #define CHECK(condition, tag, format...) ({ \ | 38 | #define CHECK(condition, tag, format...) ({ \ |
| @@ -41,8 +54,16 @@ static int linum_map_fd; | |||
| 41 | static int addr_map_fd; | 54 | static int addr_map_fd; |
| 42 | static int tp_map_fd; | 55 | static int tp_map_fd; |
| 43 | static int sk_map_fd; | 56 | static int sk_map_fd; |
| 44 | static __u32 srv_idx = SRV_IDX; | 57 | |
| 45 | static __u32 cli_idx = CLI_IDX; | 58 | static __u32 addr_srv_idx = ADDR_SRV_IDX; |
| 59 | static __u32 addr_cli_idx = ADDR_CLI_IDX; | ||
| 60 | |||
| 61 | static __u32 egress_srv_idx = EGRESS_SRV_IDX; | ||
| 62 | static __u32 egress_cli_idx = EGRESS_CLI_IDX; | ||
| 63 | static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX; | ||
| 64 | |||
| 65 | static __u32 egress_linum_idx = EGRESS_LINUM_IDX; | ||
| 66 | static __u32 ingress_linum_idx = INGRESS_LINUM_IDX; | ||
| 46 | 67 | ||
| 47 | static void init_loopback6(struct sockaddr_in6 *sa6) | 68 | static void init_loopback6(struct sockaddr_in6 *sa6) |
| 48 | { | 69 | { |
| @@ -93,29 +114,46 @@ static void print_tp(const struct bpf_tcp_sock *tp) | |||
| 93 | 114 | ||
| 94 | static void check_result(void) | 115 | static void check_result(void) |
| 95 | { | 116 | { |
| 96 | struct bpf_tcp_sock srv_tp, cli_tp; | 117 | struct bpf_tcp_sock srv_tp, cli_tp, listen_tp; |
| 97 | struct bpf_sock srv_sk, cli_sk; | 118 | struct bpf_sock srv_sk, cli_sk, listen_sk; |
| 98 | __u32 linum, idx0 = 0; | 119 | __u32 ingress_linum, egress_linum; |
| 99 | int err; | 120 | int err; |
| 100 | 121 | ||
| 101 | err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum); | 122 | err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx, |
| 123 | &egress_linum); | ||
| 102 | CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)", | 124 | CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)", |
| 103 | "err:%d errno:%d", err, errno); | 125 | "err:%d errno:%d", err, errno); |
| 104 | 126 | ||
| 105 | err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk); | 127 | err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx, |
| 106 | CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)", | 128 | &ingress_linum); |
| 129 | CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)", | ||
| 130 | "err:%d errno:%d", err, errno); | ||
| 131 | |||
| 132 | err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk); | ||
| 133 | CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)", | ||
| 134 | "err:%d errno:%d", err, errno); | ||
| 135 | err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp); | ||
| 136 | CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)", | ||
| 137 | "err:%d errno:%d", err, errno); | ||
| 138 | |||
| 139 | err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk); | ||
| 140 | CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)", | ||
| 107 | "err:%d errno:%d", err, errno); | 141 | "err:%d errno:%d", err, errno); |
| 108 | err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp); | 142 | err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp); |
| 109 | CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)", | 143 | CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)", |
| 110 | "err:%d errno:%d", err, errno); | 144 | "err:%d errno:%d", err, errno); |
| 111 | 145 | ||
| 112 | err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk); | 146 | err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk); |
| 113 | CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)", | 147 | CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)", |
| 114 | "err:%d errno:%d", err, errno); | 148 | "err:%d errno:%d", err, errno); |
| 115 | err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp); | 149 | err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp); |
| 116 | CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)", | 150 | CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)", |
| 117 | "err:%d errno:%d", err, errno); | 151 | "err:%d errno:%d", err, errno); |
| 118 | 152 | ||
| 153 | printf("listen_sk: "); | ||
| 154 | print_sk(&listen_sk); | ||
| 155 | printf("\n"); | ||
| 156 | |||
| 119 | printf("srv_sk: "); | 157 | printf("srv_sk: "); |
| 120 | print_sk(&srv_sk); | 158 | print_sk(&srv_sk); |
| 121 | printf("\n"); | 159 | printf("\n"); |
| @@ -124,6 +162,10 @@ static void check_result(void) | |||
| 124 | print_sk(&cli_sk); | 162 | print_sk(&cli_sk); |
| 125 | printf("\n"); | 163 | printf("\n"); |
| 126 | 164 | ||
| 165 | printf("listen_tp: "); | ||
| 166 | print_tp(&listen_tp); | ||
| 167 | printf("\n"); | ||
| 168 | |||
| 127 | printf("srv_tp: "); | 169 | printf("srv_tp: "); |
| 128 | print_tp(&srv_tp); | 170 | print_tp(&srv_tp); |
| 129 | printf("\n"); | 171 | printf("\n"); |
| @@ -132,6 +174,19 @@ static void check_result(void) | |||
| 132 | print_tp(&cli_tp); | 174 | print_tp(&cli_tp); |
| 133 | printf("\n"); | 175 | printf("\n"); |
| 134 | 176 | ||
| 177 | CHECK(listen_sk.state != 10 || | ||
| 178 | listen_sk.family != AF_INET6 || | ||
| 179 | listen_sk.protocol != IPPROTO_TCP || | ||
| 180 | memcmp(listen_sk.src_ip6, &in6addr_loopback, | ||
| 181 | sizeof(listen_sk.src_ip6)) || | ||
| 182 | listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] || | ||
| 183 | listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] || | ||
| 184 | listen_sk.src_port != ntohs(srv_sa6.sin6_port) || | ||
| 185 | listen_sk.dst_port, | ||
| 186 | "Unexpected listen_sk", | ||
| 187 | "Check listen_sk output. ingress_linum:%u", | ||
| 188 | ingress_linum); | ||
| 189 | |||
| 135 | CHECK(srv_sk.state == 10 || | 190 | CHECK(srv_sk.state == 10 || |
| 136 | !srv_sk.state || | 191 | !srv_sk.state || |
| 137 | srv_sk.family != AF_INET6 || | 192 | srv_sk.family != AF_INET6 || |
| @@ -142,7 +197,8 @@ static void check_result(void) | |||
| 142 | sizeof(srv_sk.dst_ip6)) || | 197 | sizeof(srv_sk.dst_ip6)) || |
| 143 | srv_sk.src_port != ntohs(srv_sa6.sin6_port) || | 198 | srv_sk.src_port != ntohs(srv_sa6.sin6_port) || |
| 144 | srv_sk.dst_port != cli_sa6.sin6_port, | 199 | srv_sk.dst_port != cli_sa6.sin6_port, |
| 145 | "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum); | 200 | "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u", |
| 201 | egress_linum); | ||
| 146 | 202 | ||
| 147 | CHECK(cli_sk.state == 10 || | 203 | CHECK(cli_sk.state == 10 || |
| 148 | !cli_sk.state || | 204 | !cli_sk.state || |
| @@ -154,21 +210,31 @@ static void check_result(void) | |||
| 154 | sizeof(cli_sk.dst_ip6)) || | 210 | sizeof(cli_sk.dst_ip6)) || |
| 155 | cli_sk.src_port != ntohs(cli_sa6.sin6_port) || | 211 | cli_sk.src_port != ntohs(cli_sa6.sin6_port) || |
| 156 | cli_sk.dst_port != srv_sa6.sin6_port, | 212 | cli_sk.dst_port != srv_sa6.sin6_port, |
| 157 | "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum); | 213 | "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u", |
| 214 | egress_linum); | ||
| 215 | |||
| 216 | CHECK(listen_tp.data_segs_out || | ||
| 217 | listen_tp.data_segs_in || | ||
| 218 | listen_tp.total_retrans || | ||
| 219 | listen_tp.bytes_acked, | ||
| 220 | "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u", | ||
| 221 | ingress_linum); | ||
| 158 | 222 | ||
| 159 | CHECK(srv_tp.data_segs_out != 1 || | 223 | CHECK(srv_tp.data_segs_out != 1 || |
| 160 | srv_tp.data_segs_in || | 224 | srv_tp.data_segs_in || |
| 161 | srv_tp.snd_cwnd != 10 || | 225 | srv_tp.snd_cwnd != 10 || |
| 162 | srv_tp.total_retrans || | 226 | srv_tp.total_retrans || |
| 163 | srv_tp.bytes_acked != DATA_LEN, | 227 | srv_tp.bytes_acked != DATA_LEN, |
| 164 | "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum); | 228 | "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u", |
| 229 | egress_linum); | ||
| 165 | 230 | ||
| 166 | CHECK(cli_tp.data_segs_out || | 231 | CHECK(cli_tp.data_segs_out || |
| 167 | cli_tp.data_segs_in != 1 || | 232 | cli_tp.data_segs_in != 1 || |
| 168 | cli_tp.snd_cwnd != 10 || | 233 | cli_tp.snd_cwnd != 10 || |
| 169 | cli_tp.total_retrans || | 234 | cli_tp.total_retrans || |
| 170 | cli_tp.bytes_received != DATA_LEN, | 235 | cli_tp.bytes_received != DATA_LEN, |
| 171 | "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum); | 236 | "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u", |
| 237 | egress_linum); | ||
| 172 | } | 238 | } |
| 173 | 239 | ||
| 174 | static void test(void) | 240 | static void test(void) |
| @@ -211,10 +277,10 @@ static void test(void) | |||
| 211 | err, errno); | 277 | err, errno); |
| 212 | 278 | ||
| 213 | /* Update addr_map with srv_sa6 and cli_sa6 */ | 279 | /* Update addr_map with srv_sa6 and cli_sa6 */ |
| 214 | err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0); | 280 | err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0); |
| 215 | CHECK(err, "map_update", "err:%d errno:%d", err, errno); | 281 | CHECK(err, "map_update", "err:%d errno:%d", err, errno); |
| 216 | 282 | ||
| 217 | err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0); | 283 | err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0); |
| 218 | CHECK(err, "map_update", "err:%d errno:%d", err, errno); | 284 | CHECK(err, "map_update", "err:%d errno:%d", err, errno); |
| 219 | 285 | ||
| 220 | /* Connect from cli_sa6 to srv_sa6 */ | 286 | /* Connect from cli_sa6 to srv_sa6 */ |
| @@ -273,9 +339,9 @@ int main(int argc, char **argv) | |||
| 273 | struct bpf_prog_load_attr attr = { | 339 | struct bpf_prog_load_attr attr = { |
| 274 | .file = "test_sock_fields_kern.o", | 340 | .file = "test_sock_fields_kern.o", |
| 275 | .prog_type = BPF_PROG_TYPE_CGROUP_SKB, | 341 | .prog_type = BPF_PROG_TYPE_CGROUP_SKB, |
| 276 | .expected_attach_type = BPF_CGROUP_INET_EGRESS, | ||
| 277 | }; | 342 | }; |
| 278 | int cgroup_fd, prog_fd, err; | 343 | int cgroup_fd, egress_fd, ingress_fd, err; |
| 344 | struct bpf_program *ingress_prog; | ||
| 279 | struct bpf_object *obj; | 345 | struct bpf_object *obj; |
| 280 | struct bpf_map *map; | 346 | struct bpf_map *map; |
| 281 | 347 | ||
| @@ -293,12 +359,24 @@ int main(int argc, char **argv) | |||
| 293 | err = join_cgroup(TEST_CGROUP); | 359 | err = join_cgroup(TEST_CGROUP); |
| 294 | CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno); | 360 | CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno); |
| 295 | 361 | ||
| 296 | err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); | 362 | err = bpf_prog_load_xattr(&attr, &obj, &egress_fd); |
| 297 | CHECK(err, "bpf_prog_load_xattr()", "err:%d", err); | 363 | CHECK(err, "bpf_prog_load_xattr()", "err:%d", err); |
| 298 | 364 | ||
| 299 | err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0); | 365 | ingress_prog = bpf_object__find_program_by_title(obj, |
| 366 | "cgroup_skb/ingress"); | ||
| 367 | CHECK(!ingress_prog, | ||
| 368 | "bpf_object__find_program_by_title(cgroup_skb/ingress)", | ||
| 369 | "not found"); | ||
| 370 | ingress_fd = bpf_program__fd(ingress_prog); | ||
| 371 | |||
| 372 | err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0); | ||
| 300 | CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)", | 373 | CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)", |
| 301 | "err:%d errno%d", err, errno); | 374 | "err:%d errno%d", err, errno); |
| 375 | |||
| 376 | err = bpf_prog_attach(ingress_fd, cgroup_fd, | ||
| 377 | BPF_CGROUP_INET_INGRESS, 0); | ||
| 378 | CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)", | ||
| 379 | "err:%d errno%d", err, errno); | ||
| 302 | close(cgroup_fd); | 380 | close(cgroup_fd); |
| 303 | 381 | ||
| 304 | map = bpf_object__find_map_by_name(obj, "addr_map"); | 382 | map = bpf_object__find_map_by_name(obj, "addr_map"); |
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 4004891afa9c..fb11240b758b 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c | |||
| @@ -908,6 +908,44 @@ | |||
| 908 | .result = REJECT, | 908 | .result = REJECT, |
| 909 | }, | 909 | }, |
| 910 | { | 910 | { |
| 911 | "calls: stack depth check in dead code", | ||
| 912 | .insns = { | ||
| 913 | /* main */ | ||
| 914 | BPF_MOV64_IMM(BPF_REG_1, 0), | ||
| 915 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ | ||
| 916 | BPF_EXIT_INSN(), | ||
| 917 | /* A */ | ||
| 918 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), | ||
| 919 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */ | ||
| 920 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 921 | BPF_EXIT_INSN(), | ||
| 922 | /* B */ | ||
| 923 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ | ||
| 924 | BPF_EXIT_INSN(), | ||
| 925 | /* C */ | ||
| 926 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ | ||
| 927 | BPF_EXIT_INSN(), | ||
| 928 | /* D */ | ||
| 929 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ | ||
| 930 | BPF_EXIT_INSN(), | ||
| 931 | /* E */ | ||
| 932 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ | ||
| 933 | BPF_EXIT_INSN(), | ||
| 934 | /* F */ | ||
| 935 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ | ||
| 936 | BPF_EXIT_INSN(), | ||
| 937 | /* G */ | ||
| 938 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ | ||
| 939 | BPF_EXIT_INSN(), | ||
| 940 | /* H */ | ||
| 941 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 942 | BPF_EXIT_INSN(), | ||
| 943 | }, | ||
| 944 | .prog_type = BPF_PROG_TYPE_XDP, | ||
| 945 | .errstr = "call stack", | ||
| 946 | .result = REJECT, | ||
| 947 | }, | ||
| 948 | { | ||
| 911 | "calls: spill into caller stack frame", | 949 | "calls: spill into caller stack frame", |
| 912 | .insns = { | 950 | .insns = { |
| 913 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | 951 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |
| @@ -1940,3 +1978,28 @@ | |||
| 1940 | .errstr = "!read_ok", | 1978 | .errstr = "!read_ok", |
| 1941 | .result = REJECT, | 1979 | .result = REJECT, |
| 1942 | }, | 1980 | }, |
| 1981 | { | ||
| 1982 | "calls: cross frame pruning - liveness propagation", | ||
| 1983 | .insns = { | ||
| 1984 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), | ||
| 1985 | BPF_MOV64_IMM(BPF_REG_8, 0), | ||
| 1986 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 1987 | BPF_MOV64_IMM(BPF_REG_8, 1), | ||
| 1988 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), | ||
| 1989 | BPF_MOV64_IMM(BPF_REG_9, 0), | ||
| 1990 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 1991 | BPF_MOV64_IMM(BPF_REG_9, 1), | ||
| 1992 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 1993 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
| 1994 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), | ||
| 1995 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), | ||
| 1996 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 1997 | BPF_EXIT_INSN(), | ||
| 1998 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), | ||
| 1999 | BPF_EXIT_INSN(), | ||
| 2000 | }, | ||
| 2001 | .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, | ||
| 2002 | .errstr_unpriv = "function calls to other bpf functions are allowed for root only", | ||
| 2003 | .errstr = "!read_ok", | ||
| 2004 | .result = REJECT, | ||
| 2005 | }, | ||
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 3ed3593bd8b6..923f2110072d 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c | |||
| @@ -605,3 +605,171 @@ | |||
| 605 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | 605 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, |
| 606 | .result = ACCEPT, | 606 | .result = ACCEPT, |
| 607 | }, | 607 | }, |
| 608 | { | ||
| 609 | "reference tracking: use ptr from bpf_tcp_sock() after release", | ||
| 610 | .insns = { | ||
| 611 | BPF_SK_LOOKUP, | ||
| 612 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 613 | BPF_EXIT_INSN(), | ||
| 614 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 615 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 616 | BPF_EMIT_CALL(BPF_FUNC_tcp_sock), | ||
| 617 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), | ||
| 618 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 619 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 620 | BPF_EXIT_INSN(), | ||
| 621 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
| 622 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 623 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 624 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)), | ||
| 625 | BPF_EXIT_INSN(), | ||
| 626 | }, | ||
| 627 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 628 | .result = REJECT, | ||
| 629 | .errstr = "invalid mem access", | ||
| 630 | }, | ||
| 631 | { | ||
| 632 | "reference tracking: use ptr from bpf_sk_fullsock() after release", | ||
| 633 | .insns = { | ||
| 634 | BPF_SK_LOOKUP, | ||
| 635 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 636 | BPF_EXIT_INSN(), | ||
| 637 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 638 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 639 | BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), | ||
| 640 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), | ||
| 641 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 642 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 643 | BPF_EXIT_INSN(), | ||
| 644 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
| 645 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 646 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 647 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), | ||
| 648 | BPF_EXIT_INSN(), | ||
| 649 | }, | ||
| 650 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 651 | .result = REJECT, | ||
| 652 | .errstr = "invalid mem access", | ||
| 653 | }, | ||
| 654 | { | ||
| 655 | "reference tracking: use ptr from bpf_sk_fullsock(tp) after release", | ||
| 656 | .insns = { | ||
| 657 | BPF_SK_LOOKUP, | ||
| 658 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 659 | BPF_EXIT_INSN(), | ||
| 660 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 661 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 662 | BPF_EMIT_CALL(BPF_FUNC_tcp_sock), | ||
| 663 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), | ||
| 664 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 665 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 666 | BPF_EXIT_INSN(), | ||
| 667 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 668 | BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), | ||
| 669 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 670 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 671 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 672 | BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), | ||
| 673 | BPF_EXIT_INSN(), | ||
| 674 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), | ||
| 675 | BPF_EXIT_INSN(), | ||
| 676 | }, | ||
| 677 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 678 | .result = REJECT, | ||
| 679 | .errstr = "invalid mem access", | ||
| 680 | }, | ||
| 681 | { | ||
| 682 | "reference tracking: use sk after bpf_sk_release(tp)", | ||
| 683 | .insns = { | ||
| 684 | BPF_SK_LOOKUP, | ||
| 685 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 686 | BPF_EXIT_INSN(), | ||
| 687 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 688 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 689 | BPF_EMIT_CALL(BPF_FUNC_tcp_sock), | ||
| 690 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), | ||
| 691 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 692 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 693 | BPF_EXIT_INSN(), | ||
| 694 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 695 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 696 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), | ||
| 697 | BPF_EXIT_INSN(), | ||
| 698 | }, | ||
| 699 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 700 | .result = REJECT, | ||
| 701 | .errstr = "invalid mem access", | ||
| 702 | }, | ||
| 703 | { | ||
| 704 | "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)", | ||
| 705 | .insns = { | ||
| 706 | BPF_SK_LOOKUP, | ||
| 707 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 708 | BPF_EXIT_INSN(), | ||
| 709 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 710 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 711 | BPF_EMIT_CALL(BPF_FUNC_get_listener_sock), | ||
| 712 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), | ||
| 713 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 714 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 715 | BPF_EXIT_INSN(), | ||
| 716 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 717 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 718 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 719 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)), | ||
| 720 | BPF_EXIT_INSN(), | ||
| 721 | }, | ||
| 722 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 723 | .result = ACCEPT, | ||
| 724 | }, | ||
| 725 | { | ||
| 726 | "reference tracking: bpf_sk_release(listen_sk)", | ||
| 727 | .insns = { | ||
| 728 | BPF_SK_LOOKUP, | ||
| 729 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 730 | BPF_EXIT_INSN(), | ||
| 731 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 732 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 733 | BPF_EMIT_CALL(BPF_FUNC_get_listener_sock), | ||
| 734 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), | ||
| 735 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 736 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 737 | BPF_EXIT_INSN(), | ||
| 738 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 739 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 740 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), | ||
| 741 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 742 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 743 | BPF_EXIT_INSN(), | ||
| 744 | }, | ||
| 745 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 746 | .result = REJECT, | ||
| 747 | .errstr = "reference has not been acquired before", | ||
| 748 | }, | ||
| 749 | { | ||
| 750 | /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ | ||
| 751 | "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)", | ||
| 752 | .insns = { | ||
| 753 | BPF_SK_LOOKUP, | ||
| 754 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 755 | BPF_EXIT_INSN(), | ||
| 756 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
| 757 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
| 758 | BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), | ||
| 759 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
| 760 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 761 | BPF_EMIT_CALL(BPF_FUNC_tcp_sock), | ||
| 762 | BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), | ||
| 763 | BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3), | ||
| 764 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 765 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 766 | BPF_EXIT_INSN(), | ||
| 767 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)), | ||
| 768 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 769 | BPF_EMIT_CALL(BPF_FUNC_sk_release), | ||
| 770 | BPF_EXIT_INSN(), | ||
| 771 | }, | ||
| 772 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
| 773 | .result = REJECT, | ||
| 774 | .errstr = "invalid mem access", | ||
| 775 | }, | ||
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c index 0ddfdf76aba5..416436231fab 100644 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ b/tools/testing/selftests/bpf/verifier/sock.c | |||
| @@ -342,7 +342,7 @@ | |||
| 342 | }, | 342 | }, |
| 343 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | 343 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, |
| 344 | .result = REJECT, | 344 | .result = REJECT, |
| 345 | .errstr = "type=sock_common expected=sock", | 345 | .errstr = "reference has not been acquired before", |
| 346 | }, | 346 | }, |
| 347 | { | 347 | { |
| 348 | "bpf_sk_release(bpf_sk_fullsock(skb->sk))", | 348 | "bpf_sk_release(bpf_sk_fullsock(skb->sk))", |
| @@ -380,5 +380,5 @@ | |||
| 380 | }, | 380 | }, |
| 381 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | 381 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, |
| 382 | .result = REJECT, | 382 | .result = REJECT, |
| 383 | .errstr = "type=tcp_sock expected=sock", | 383 | .errstr = "reference has not been acquired before", |
| 384 | }, | 384 | }, |
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 3c1f4bdf9000..7514fcea91a7 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
| @@ -29,8 +29,8 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) | |||
| 29 | INSTALL_HDR_PATH = $(top_srcdir)/usr | 29 | INSTALL_HDR_PATH = $(top_srcdir)/usr |
| 30 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 30 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
| 31 | LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include | 31 | LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include |
| 32 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. | 32 | CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. |
| 33 | LDFLAGS += -pthread | 33 | LDFLAGS += -pthread -no-pie |
| 34 | 34 | ||
| 35 | # After inclusion, $(OUTPUT) is defined and | 35 | # After inclusion, $(OUTPUT) is defined and |
| 36 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 36 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index a84785b02557..07b71ad9734a 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h | |||
| @@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); | |||
| 102 | struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); | 102 | struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); |
| 103 | void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); | 103 | void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); |
| 104 | int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); | 104 | int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); |
| 105 | void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid); | ||
| 105 | void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, | 106 | void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, |
| 106 | struct kvm_mp_state *mp_state); | 107 | struct kvm_mp_state *mp_state); |
| 107 | void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); | 108 | void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); |
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index b52cfdefecbf..efa0aad8b3c6 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
| @@ -1121,6 +1121,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) | |||
| 1121 | return rc; | 1121 | return rc; |
| 1122 | } | 1122 | } |
| 1123 | 1123 | ||
| 1124 | void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) | ||
| 1125 | { | ||
| 1126 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | ||
| 1127 | int ret; | ||
| 1128 | |||
| 1129 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | ||
| 1130 | |||
| 1131 | vcpu->state->immediate_exit = 1; | ||
| 1132 | ret = ioctl(vcpu->fd, KVM_RUN, NULL); | ||
| 1133 | vcpu->state->immediate_exit = 0; | ||
| 1134 | |||
| 1135 | TEST_ASSERT(ret == -1 && errno == EINTR, | ||
| 1136 | "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", | ||
| 1137 | ret, errno); | ||
| 1138 | } | ||
| 1139 | |||
| 1124 | /* | 1140 | /* |
| 1125 | * VM VCPU Set MP State | 1141 | * VM VCPU Set MP State |
| 1126 | * | 1142 | * |
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c index d503a51fad30..7c2c4d4055a8 100644 --- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c +++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c | |||
| @@ -87,22 +87,25 @@ int main(int argc, char *argv[]) | |||
| 87 | while (1) { | 87 | while (1) { |
| 88 | rc = _vcpu_run(vm, VCPU_ID); | 88 | rc = _vcpu_run(vm, VCPU_ID); |
| 89 | 89 | ||
| 90 | if (run->exit_reason == KVM_EXIT_IO) { | 90 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
| 91 | switch (get_ucall(vm, VCPU_ID, &uc)) { | 91 | "Unexpected exit reason: %u (%s),\n", |
| 92 | case UCALL_SYNC: | 92 | run->exit_reason, |
| 93 | /* emulate hypervisor clearing CR4.OSXSAVE */ | 93 | exit_reason_str(run->exit_reason)); |
| 94 | vcpu_sregs_get(vm, VCPU_ID, &sregs); | 94 | |
| 95 | sregs.cr4 &= ~X86_CR4_OSXSAVE; | 95 | switch (get_ucall(vm, VCPU_ID, &uc)) { |
| 96 | vcpu_sregs_set(vm, VCPU_ID, &sregs); | 96 | case UCALL_SYNC: |
| 97 | break; | 97 | /* emulate hypervisor clearing CR4.OSXSAVE */ |
| 98 | case UCALL_ABORT: | 98 | vcpu_sregs_get(vm, VCPU_ID, &sregs); |
| 99 | TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); | 99 | sregs.cr4 &= ~X86_CR4_OSXSAVE; |
| 100 | break; | 100 | vcpu_sregs_set(vm, VCPU_ID, &sregs); |
| 101 | case UCALL_DONE: | 101 | break; |
| 102 | goto done; | 102 | case UCALL_ABORT: |
| 103 | default: | 103 | TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); |
| 104 | TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); | 104 | break; |
| 105 | } | 105 | case UCALL_DONE: |
| 106 | goto done; | ||
| 107 | default: | ||
| 108 | TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); | ||
| 106 | } | 109 | } |
| 107 | } | 110 | } |
| 108 | 111 | ||
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index 4b3f556265f1..30f75856cf39 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c | |||
| @@ -134,6 +134,11 @@ int main(int argc, char *argv[]) | |||
| 134 | 134 | ||
| 135 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | 135 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); |
| 136 | 136 | ||
| 137 | if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { | ||
| 138 | fprintf(stderr, "immediate_exit not available, skipping test\n"); | ||
| 139 | exit(KSFT_SKIP); | ||
| 140 | } | ||
| 141 | |||
| 137 | /* Create VM */ | 142 | /* Create VM */ |
| 138 | vm = vm_create_default(VCPU_ID, 0, guest_code); | 143 | vm = vm_create_default(VCPU_ID, 0, guest_code); |
| 139 | vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); | 144 | vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); |
| @@ -156,8 +161,6 @@ int main(int argc, char *argv[]) | |||
| 156 | stage, run->exit_reason, | 161 | stage, run->exit_reason, |
| 157 | exit_reason_str(run->exit_reason)); | 162 | exit_reason_str(run->exit_reason)); |
| 158 | 163 | ||
| 159 | memset(®s1, 0, sizeof(regs1)); | ||
| 160 | vcpu_regs_get(vm, VCPU_ID, ®s1); | ||
| 161 | switch (get_ucall(vm, VCPU_ID, &uc)) { | 164 | switch (get_ucall(vm, VCPU_ID, &uc)) { |
| 162 | case UCALL_ABORT: | 165 | case UCALL_ABORT: |
| 163 | TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], | 166 | TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], |
| @@ -176,6 +179,17 @@ int main(int argc, char *argv[]) | |||
| 176 | uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", | 179 | uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", |
| 177 | stage, (ulong)uc.args[1]); | 180 | stage, (ulong)uc.args[1]); |
| 178 | 181 | ||
| 182 | /* | ||
| 183 | * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees | ||
| 184 | * guest state is consistent only after userspace re-enters the | ||
| 185 | * kernel with KVM_RUN. Complete IO prior to migrating state | ||
| 186 | * to a new VM. | ||
| 187 | */ | ||
| 188 | vcpu_run_complete_io(vm, VCPU_ID); | ||
| 189 | |||
| 190 | memset(®s1, 0, sizeof(regs1)); | ||
| 191 | vcpu_regs_get(vm, VCPU_ID, ®s1); | ||
| 192 | |||
| 179 | state = vcpu_save_state(vm, VCPU_ID); | 193 | state = vcpu_save_state(vm, VCPU_ID); |
| 180 | kvm_vm_release(vm); | 194 | kvm_vm_release(vm); |
| 181 | 195 | ||
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 5970cee6d05f..b074ea9b6fe8 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json | |||
| @@ -286,5 +286,30 @@ | |||
| 286 | "teardown": [ | 286 | "teardown": [ |
| 287 | "$TC action flush action bpf" | 287 | "$TC action flush action bpf" |
| 288 | ] | 288 | ] |
| 289 | }, | ||
| 290 | { | ||
| 291 | "id": "b8a1", | ||
| 292 | "name": "Replace bpf action with invalid goto_chain control", | ||
| 293 | "category": [ | ||
| 294 | "actions", | ||
| 295 | "bpf" | ||
| 296 | ], | ||
| 297 | "setup": [ | ||
| 298 | [ | ||
| 299 | "$TC actions flush action bpf", | ||
| 300 | 0, | ||
| 301 | 1, | ||
| 302 | 255 | ||
| 303 | ], | ||
| 304 | "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90" | ||
| 305 | ], | ||
| 306 | "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0", | ||
| 307 | "expExitCode": "255", | ||
| 308 | "verifyCmd": "$TC action list action bpf", | ||
| 309 | "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90", | ||
| 310 | "matchCount": "1", | ||
| 311 | "teardown": [ | ||
| 312 | "$TC action flush action bpf" | ||
| 313 | ] | ||
| 289 | } | 314 | } |
| 290 | ] | 315 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json index 13147a1f5731..cadde8f41fcd 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json | |||
| @@ -287,5 +287,30 @@ | |||
| 287 | "teardown": [ | 287 | "teardown": [ |
| 288 | "$TC actions flush action connmark" | 288 | "$TC actions flush action connmark" |
| 289 | ] | 289 | ] |
| 290 | }, | ||
| 291 | { | ||
| 292 | "id": "c506", | ||
| 293 | "name": "Replace connmark with invalid goto chain control", | ||
| 294 | "category": [ | ||
| 295 | "actions", | ||
| 296 | "connmark" | ||
| 297 | ], | ||
| 298 | "setup": [ | ||
| 299 | [ | ||
| 300 | "$TC actions flush action connmark", | ||
| 301 | 0, | ||
| 302 | 1, | ||
| 303 | 255 | ||
| 304 | ], | ||
| 305 | "$TC actions add action connmark pass index 90" | ||
| 306 | ], | ||
| 307 | "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0", | ||
| 308 | "expExitCode": "255", | ||
| 309 | "verifyCmd": "$TC actions get action connmark index 90", | ||
| 310 | "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref", | ||
| 311 | "matchCount": "1", | ||
| 312 | "teardown": [ | ||
| 313 | "$TC actions flush action connmark" | ||
| 314 | ] | ||
| 290 | } | 315 | } |
| 291 | ] | 316 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json index a022792d392a..ddabb2fbb7c7 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json | |||
| @@ -500,5 +500,30 @@ | |||
| 500 | "matchPattern": "^[ \t]+index [0-9]+ ref", | 500 | "matchPattern": "^[ \t]+index [0-9]+ ref", |
| 501 | "matchCount": "0", | 501 | "matchCount": "0", |
| 502 | "teardown": [] | 502 | "teardown": [] |
| 503 | }, | ||
| 504 | { | ||
| 505 | "id": "d128", | ||
| 506 | "name": "Replace csum action with invalid goto chain control", | ||
| 507 | "category": [ | ||
| 508 | "actions", | ||
| 509 | "csum" | ||
| 510 | ], | ||
| 511 | "setup": [ | ||
| 512 | [ | ||
| 513 | "$TC actions flush action csum", | ||
| 514 | 0, | ||
| 515 | 1, | ||
| 516 | 255 | ||
| 517 | ], | ||
| 518 | "$TC actions add action csum iph index 90" | ||
| 519 | ], | ||
| 520 | "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0", | ||
| 521 | "expExitCode": "255", | ||
| 522 | "verifyCmd": "$TC actions get action csum index 90", | ||
| 523 | "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref", | ||
| 524 | "matchCount": "1", | ||
| 525 | "teardown": [ | ||
| 526 | "$TC actions flush action csum" | ||
| 527 | ] | ||
| 503 | } | 528 | } |
| 504 | ] | 529 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json index 89189a03ce3d..814b7a8a478b 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json | |||
| @@ -560,5 +560,30 @@ | |||
| 560 | "teardown": [ | 560 | "teardown": [ |
| 561 | "$TC actions flush action gact" | 561 | "$TC actions flush action gact" |
| 562 | ] | 562 | ] |
| 563 | }, | ||
| 564 | { | ||
| 565 | "id": "ca89", | ||
| 566 | "name": "Replace gact action with invalid goto chain control", | ||
| 567 | "category": [ | ||
| 568 | "actions", | ||
| 569 | "gact" | ||
| 570 | ], | ||
| 571 | "setup": [ | ||
| 572 | [ | ||
| 573 | "$TC actions flush action gact", | ||
| 574 | 0, | ||
| 575 | 1, | ||
| 576 | 255 | ||
| 577 | ], | ||
| 578 | "$TC actions add action pass random determ drop 2 index 90" | ||
| 579 | ], | ||
| 580 | "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0", | ||
| 581 | "expExitCode": "255", | ||
| 582 | "verifyCmd": "$TC actions list action gact", | ||
| 583 | "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref", | ||
| 584 | "matchCount": "1", | ||
| 585 | "teardown": [ | ||
| 586 | "$TC actions flush action gact" | ||
| 587 | ] | ||
| 563 | } | 588 | } |
| 564 | ] | 589 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json index 0da3545cabdb..c13a68b98fc7 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json | |||
| @@ -1060,5 +1060,30 @@ | |||
| 1060 | "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4", | 1060 | "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4", |
| 1061 | "matchCount": "0", | 1061 | "matchCount": "0", |
| 1062 | "teardown": [] | 1062 | "teardown": [] |
| 1063 | }, | ||
| 1064 | { | ||
| 1065 | "id": "a0e2", | ||
| 1066 | "name": "Replace ife encode action with invalid goto chain control", | ||
| 1067 | "category": [ | ||
| 1068 | "actions", | ||
| 1069 | "ife" | ||
| 1070 | ], | ||
| 1071 | "setup": [ | ||
| 1072 | [ | ||
| 1073 | "$TC actions flush action ife", | ||
| 1074 | 0, | ||
| 1075 | 1, | ||
| 1076 | 255 | ||
| 1077 | ], | ||
| 1078 | "$TC actions add action ife encode allow mark pass index 90" | ||
| 1079 | ], | ||
| 1080 | "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0", | ||
| 1081 | "expExitCode": "255", | ||
| 1082 | "verifyCmd": "$TC actions get action ife index 90", | ||
| 1083 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref", | ||
| 1084 | "matchCount": "1", | ||
| 1085 | "teardown": [ | ||
| 1086 | "$TC actions flush action ife" | ||
| 1087 | ] | ||
| 1063 | } | 1088 | } |
| 1064 | ] | 1089 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json index db49fd0f8445..6e5fb3d25681 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json | |||
| @@ -434,5 +434,30 @@ | |||
| 434 | "teardown": [ | 434 | "teardown": [ |
| 435 | "$TC actions flush action mirred" | 435 | "$TC actions flush action mirred" |
| 436 | ] | 436 | ] |
| 437 | }, | ||
| 438 | { | ||
| 439 | "id": "2a9a", | ||
| 440 | "name": "Replace mirred action with invalid goto chain control", | ||
| 441 | "category": [ | ||
| 442 | "actions", | ||
| 443 | "mirred" | ||
| 444 | ], | ||
| 445 | "setup": [ | ||
| 446 | [ | ||
| 447 | "$TC actions flush action mirred", | ||
| 448 | 0, | ||
| 449 | 1, | ||
| 450 | 255 | ||
| 451 | ], | ||
| 452 | "$TC actions add action mirred ingress mirror dev lo drop index 90" | ||
| 453 | ], | ||
| 454 | "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0", | ||
| 455 | "expExitCode": "255", | ||
| 456 | "verifyCmd": "$TC actions get action mirred index 90", | ||
| 457 | "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref", | ||
| 458 | "matchCount": "1", | ||
| 459 | "teardown": [ | ||
| 460 | "$TC actions flush action mirred" | ||
| 461 | ] | ||
| 437 | } | 462 | } |
| 438 | ] | 463 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json index 0080dc2fd41c..bc12c1ccad30 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json | |||
| @@ -589,5 +589,30 @@ | |||
| 589 | "teardown": [ | 589 | "teardown": [ |
| 590 | "$TC actions flush action nat" | 590 | "$TC actions flush action nat" |
| 591 | ] | 591 | ] |
| 592 | }, | ||
| 593 | { | ||
| 594 | "id": "4b12", | ||
| 595 | "name": "Replace nat action with invalid goto chain control", | ||
| 596 | "category": [ | ||
| 597 | "actions", | ||
| 598 | "nat" | ||
| 599 | ], | ||
| 600 | "setup": [ | ||
| 601 | [ | ||
| 602 | "$TC actions flush action nat", | ||
| 603 | 0, | ||
| 604 | 1, | ||
| 605 | 255 | ||
| 606 | ], | ||
| 607 | "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90" | ||
| 608 | ], | ||
| 609 | "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0", | ||
| 610 | "expExitCode": "255", | ||
| 611 | "verifyCmd": "$TC actions get action nat index 90", | ||
| 612 | "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref", | ||
| 613 | "matchCount": "1", | ||
| 614 | "teardown": [ | ||
| 615 | "$TC actions flush action nat" | ||
| 616 | ] | ||
| 592 | } | 617 | } |
| 593 | ] | 618 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json new file mode 100644 index 000000000000..b73ceb9e28b1 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | [ | ||
| 2 | { | ||
| 3 | "id": "319a", | ||
| 4 | "name": "Add pedit action that mangles IP TTL", | ||
| 5 | "category": [ | ||
| 6 | "actions", | ||
| 7 | "pedit" | ||
| 8 | ], | ||
| 9 | "setup": [ | ||
| 10 | [ | ||
| 11 | "$TC actions flush action pedit", | ||
| 12 | 0, | ||
| 13 | 1, | ||
| 14 | 255 | ||
| 15 | ] | ||
| 16 | ], | ||
| 17 | "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10", | ||
| 18 | "expExitCode": "0", | ||
| 19 | "verifyCmd": "$TC actions ls action pedit", | ||
| 20 | "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 1 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff", | ||
| 21 | "matchCount": "1", | ||
| 22 | "teardown": [ | ||
| 23 | "$TC actions flush action pedit" | ||
| 24 | ] | ||
| 25 | }, | ||
| 26 | { | ||
| 27 | "id": "7e67", | ||
| 28 | "name": "Replace pedit action with invalid goto chain", | ||
| 29 | "category": [ | ||
| 30 | "actions", | ||
| 31 | "pedit" | ||
| 32 | ], | ||
| 33 | "setup": [ | ||
| 34 | [ | ||
| 35 | "$TC actions flush action pedit", | ||
| 36 | 0, | ||
| 37 | 1, | ||
| 38 | 255 | ||
| 39 | ], | ||
| 40 | "$TC actions add action pedit ex munge ip ttl set 10 pass index 90" | ||
| 41 | ], | ||
| 42 | "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0", | ||
| 43 | "expExitCode": "255", | ||
| 44 | "verifyCmd": "$TC actions ls action pedit", | ||
| 45 | "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 90 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff", | ||
| 46 | "matchCount": "1", | ||
| 47 | "teardown": [ | ||
| 48 | "$TC actions flush action pedit" | ||
| 49 | ] | ||
| 50 | } | ||
| 51 | ] | ||
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index 4086a50a670e..b8268da5adaa 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json | |||
| @@ -739,5 +739,30 @@ | |||
| 739 | "teardown": [ | 739 | "teardown": [ |
| 740 | "$TC actions flush action police" | 740 | "$TC actions flush action police" |
| 741 | ] | 741 | ] |
| 742 | }, | ||
| 743 | { | ||
| 744 | "id": "689e", | ||
| 745 | "name": "Replace police action with invalid goto chain control", | ||
| 746 | "category": [ | ||
| 747 | "actions", | ||
| 748 | "police" | ||
| 749 | ], | ||
| 750 | "setup": [ | ||
| 751 | [ | ||
| 752 | "$TC actions flush action police", | ||
| 753 | 0, | ||
| 754 | 1, | ||
| 755 | 255 | ||
| 756 | ], | ||
| 757 | "$TC actions add action police rate 3mbit burst 250k drop index 90" | ||
| 758 | ], | ||
| 759 | "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0", | ||
| 760 | "expExitCode": "255", | ||
| 761 | "verifyCmd": "$TC actions get action police index 90", | ||
| 762 | "matchPattern": "action order [0-9]*: police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop", | ||
| 763 | "matchCount": "1", | ||
| 764 | "teardown": [ | ||
| 765 | "$TC actions flush action police" | ||
| 766 | ] | ||
| 742 | } | 767 | } |
| 743 | ] | 768 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json index 3aca33c00039..ddabb160a11b 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json | |||
| @@ -144,6 +144,30 @@ | |||
| 144 | ] | 144 | ] |
| 145 | }, | 145 | }, |
| 146 | { | 146 | { |
| 147 | "id": "7571", | ||
| 148 | "name": "Add sample action with invalid rate", | ||
| 149 | "category": [ | ||
| 150 | "actions", | ||
| 151 | "sample" | ||
| 152 | ], | ||
| 153 | "setup": [ | ||
| 154 | [ | ||
| 155 | "$TC actions flush action sample", | ||
| 156 | 0, | ||
| 157 | 1, | ||
| 158 | 255 | ||
| 159 | ] | ||
| 160 | ], | ||
| 161 | "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2", | ||
| 162 | "expExitCode": "255", | ||
| 163 | "verifyCmd": "$TC actions get action sample index 2", | ||
| 164 | "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref", | ||
| 165 | "matchCount": "0", | ||
| 166 | "teardown": [ | ||
| 167 | "$TC actions flush action sample" | ||
| 168 | ] | ||
| 169 | }, | ||
| 170 | { | ||
| 147 | "id": "b6d4", | 171 | "id": "b6d4", |
| 148 | "name": "Add sample action with mandatory arguments and invalid control action", | 172 | "name": "Add sample action with mandatory arguments and invalid control action", |
| 149 | "category": [ | 173 | "category": [ |
| @@ -584,5 +608,30 @@ | |||
| 584 | "teardown": [ | 608 | "teardown": [ |
| 585 | "$TC actions flush action sample" | 609 | "$TC actions flush action sample" |
| 586 | ] | 610 | ] |
| 611 | }, | ||
| 612 | { | ||
| 613 | "id": "0a6e", | ||
| 614 | "name": "Replace sample action with invalid goto chain control", | ||
| 615 | "category": [ | ||
| 616 | "actions", | ||
| 617 | "sample" | ||
| 618 | ], | ||
| 619 | "setup": [ | ||
| 620 | [ | ||
| 621 | "$TC actions flush action sample", | ||
| 622 | 0, | ||
| 623 | 1, | ||
| 624 | 255 | ||
| 625 | ], | ||
| 626 | "$TC actions add action sample rate 1024 group 4 pass index 90" | ||
| 627 | ], | ||
| 628 | "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0", | ||
| 629 | "expExitCode": "255", | ||
| 630 | "verifyCmd": "$TC actions list action sample", | ||
| 631 | "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90", | ||
| 632 | "matchCount": "1", | ||
| 633 | "teardown": [ | ||
| 634 | "$TC actions flush action sample" | ||
| 635 | ] | ||
| 587 | } | 636 | } |
| 588 | ] | 637 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json index e89a7aa4012d..8e8c1ae12260 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json | |||
| @@ -126,5 +126,30 @@ | |||
| 126 | "teardown": [ | 126 | "teardown": [ |
| 127 | "" | 127 | "" |
| 128 | ] | 128 | ] |
| 129 | }, | ||
| 130 | { | ||
| 131 | "id": "b776", | ||
| 132 | "name": "Replace simple action with invalid goto chain control", | ||
| 133 | "category": [ | ||
| 134 | "actions", | ||
| 135 | "simple" | ||
| 136 | ], | ||
| 137 | "setup": [ | ||
| 138 | [ | ||
| 139 | "$TC actions flush action simple", | ||
| 140 | 0, | ||
| 141 | 1, | ||
| 142 | 255 | ||
| 143 | ], | ||
| 144 | "$TC actions add action simple sdata \"hello\" pass index 90" | ||
| 145 | ], | ||
| 146 | "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index 90 cookie c1a0c1a0", | ||
| 147 | "expExitCode": "255", | ||
| 148 | "verifyCmd": "$TC actions list action simple", | ||
| 149 | "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref", | ||
| 150 | "matchCount": "1", | ||
| 151 | "teardown": [ | ||
| 152 | "$TC actions flush action simple" | ||
| 153 | ] | ||
| 129 | } | 154 | } |
| 130 | ] | 155 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json index 5aaf593b914a..ecd96eda7f6a 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json | |||
| @@ -484,5 +484,30 @@ | |||
| 484 | "teardown": [ | 484 | "teardown": [ |
| 485 | "$TC actions flush action skbedit" | 485 | "$TC actions flush action skbedit" |
| 486 | ] | 486 | ] |
| 487 | }, | ||
| 488 | { | ||
| 489 | "id": "1b2b", | ||
| 490 | "name": "Replace skbedit action with invalid goto_chain control", | ||
| 491 | "category": [ | ||
| 492 | "actions", | ||
| 493 | "skbedit" | ||
| 494 | ], | ||
| 495 | "setup": [ | ||
| 496 | [ | ||
| 497 | "$TC actions flush action skbedit", | ||
| 498 | 0, | ||
| 499 | 1, | ||
| 500 | 255 | ||
| 501 | ], | ||
| 502 | "$TC actions add action skbedit ptype host pass index 90" | ||
| 503 | ], | ||
| 504 | "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0", | ||
| 505 | "expExitCode": "255", | ||
| 506 | "verifyCmd": "$TC actions list action skbedit", | ||
| 507 | "matchPattern": "action order [0-9]*: skbedit ptype host pass.*index 90 ref", | ||
| 508 | "matchCount": "1", | ||
| 509 | "teardown": [ | ||
| 510 | "$TC actions flush action skbedit" | ||
| 511 | ] | ||
| 487 | } | 512 | } |
| 488 | ] | 513 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json index fe3326e939c1..6eb4c4f97060 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json | |||
| @@ -392,5 +392,30 @@ | |||
| 392 | "teardown": [ | 392 | "teardown": [ |
| 393 | "$TC actions flush action skbmod" | 393 | "$TC actions flush action skbmod" |
| 394 | ] | 394 | ] |
| 395 | }, | ||
| 396 | { | ||
| 397 | "id": "b651", | ||
| 398 | "name": "Replace skbmod action with invalid goto_chain control", | ||
| 399 | "category": [ | ||
| 400 | "actions", | ||
| 401 | "skbmod" | ||
| 402 | ], | ||
| 403 | "setup": [ | ||
| 404 | [ | ||
| 405 | "$TC actions flush action skbmod", | ||
| 406 | 0, | ||
| 407 | 1, | ||
| 408 | 255 | ||
| 409 | ], | ||
| 410 | "$TC actions add action skbmod set etype 0x1111 pass index 90" | ||
| 411 | ], | ||
| 412 | "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0", | ||
| 413 | "expExitCode": "255", | ||
| 414 | "verifyCmd": "$TC actions ls action skbmod", | ||
| 415 | "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref", | ||
| 416 | "matchCount": "1", | ||
| 417 | "teardown": [ | ||
| 418 | "$TC actions flush action skbmod" | ||
| 419 | ] | ||
| 395 | } | 420 | } |
| 396 | ] | 421 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index e7e15a7336b6..28453a445fdb 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json | |||
| @@ -884,5 +884,30 @@ | |||
| 884 | "teardown": [ | 884 | "teardown": [ |
| 885 | "$TC actions flush action tunnel_key" | 885 | "$TC actions flush action tunnel_key" |
| 886 | ] | 886 | ] |
| 887 | }, | ||
| 888 | { | ||
| 889 | "id": "8242", | ||
| 890 | "name": "Replace tunnel_key set action with invalid goto chain", | ||
| 891 | "category": [ | ||
| 892 | "actions", | ||
| 893 | "tunnel_key" | ||
| 894 | ], | ||
| 895 | "setup": [ | ||
| 896 | [ | ||
| 897 | "$TC actions flush action tunnel_key", | ||
| 898 | 0, | ||
| 899 | 1, | ||
| 900 | 255 | ||
| 901 | ], | ||
| 902 | "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90" | ||
| 903 | ], | ||
| 904 | "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0", | ||
| 905 | "expExitCode": "255", | ||
| 906 | "verifyCmd": "$TC actions get action tunnel_key index 90", | ||
| 907 | "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref", | ||
| 908 | "matchCount": "1", | ||
| 909 | "teardown": [ | ||
| 910 | "$TC actions flush action tunnel_key" | ||
| 911 | ] | ||
| 887 | } | 912 | } |
| 888 | ] | 913 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json index 69ea09eefffc..cc7c7d758008 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json | |||
| @@ -688,5 +688,30 @@ | |||
| 688 | "teardown": [ | 688 | "teardown": [ |
| 689 | "$TC actions flush action vlan" | 689 | "$TC actions flush action vlan" |
| 690 | ] | 690 | ] |
| 691 | }, | ||
| 692 | { | ||
| 693 | "id": "e394", | ||
| 694 | "name": "Replace vlan push action with invalid goto chain control", | ||
| 695 | "category": [ | ||
| 696 | "actions", | ||
| 697 | "vlan" | ||
| 698 | ], | ||
| 699 | "setup": [ | ||
| 700 | [ | ||
| 701 | "$TC actions flush action vlan", | ||
| 702 | 0, | ||
| 703 | 1, | ||
| 704 | 255 | ||
| 705 | ], | ||
| 706 | "$TC actions add action vlan push id 500 pass index 90" | ||
| 707 | ], | ||
| 708 | "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0", | ||
| 709 | "expExitCode": "255", | ||
| 710 | "verifyCmd": "$TC actions get action vlan index 90", | ||
| 711 | "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref", | ||
| 712 | "matchCount": "1", | ||
| 713 | "teardown": [ | ||
| 714 | "$TC actions flush action vlan" | ||
| 715 | ] | ||
| 691 | } | 716 | } |
| 692 | ] | 717 | ] |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json index 99a5ffca1088..2d096b2abf2c 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json | |||
| @@ -19,6 +19,26 @@ | |||
| 19 | ] | 19 | ] |
| 20 | }, | 20 | }, |
| 21 | { | 21 | { |
| 22 | "id": "2638", | ||
| 23 | "name": "Add matchall and try to get it", | ||
| 24 | "category": [ | ||
| 25 | "filter", | ||
| 26 | "matchall" | ||
| 27 | ], | ||
| 28 | "setup": [ | ||
| 29 | "$TC qdisc add dev $DEV1 clsact", | ||
| 30 | "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok" | ||
| 31 | ], | ||
| 32 | "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall", | ||
| 33 | "expExitCode": "0", | ||
| 34 | "verifyCmd": "$TC filter show dev $DEV1 ingress", | ||
| 35 | "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234", | ||
| 36 | "matchCount": "1", | ||
| 37 | "teardown": [ | ||
| 38 | "$TC qdisc del dev $DEV1 clsact" | ||
| 39 | ] | ||
| 40 | }, | ||
| 41 | { | ||
| 22 | "id": "d052", | 42 | "id": "d052", |
| 23 | "name": "Add 1M filters with the same action", | 43 | "name": "Add 1M filters with the same action", |
| 24 | "category": [ | 44 | "category": [ |
diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py index 40ea95ce2ead..828c18584624 100644 --- a/tools/testing/selftests/tpm2/tpm2.py +++ b/tools/testing/selftests/tpm2/tpm2.py | |||
| @@ -22,6 +22,7 @@ TPM2_CC_UNSEAL = 0x015E | |||
| 22 | TPM2_CC_FLUSH_CONTEXT = 0x0165 | 22 | TPM2_CC_FLUSH_CONTEXT = 0x0165 |
| 23 | TPM2_CC_START_AUTH_SESSION = 0x0176 | 23 | TPM2_CC_START_AUTH_SESSION = 0x0176 |
| 24 | TPM2_CC_GET_CAPABILITY = 0x017A | 24 | TPM2_CC_GET_CAPABILITY = 0x017A |
| 25 | TPM2_CC_GET_RANDOM = 0x017B | ||
| 25 | TPM2_CC_PCR_READ = 0x017E | 26 | TPM2_CC_PCR_READ = 0x017E |
| 26 | TPM2_CC_POLICY_PCR = 0x017F | 27 | TPM2_CC_POLICY_PCR = 0x017F |
| 27 | TPM2_CC_PCR_EXTEND = 0x0182 | 28 | TPM2_CC_PCR_EXTEND = 0x0182 |
| @@ -357,9 +358,9 @@ class Client: | |||
| 357 | self.flags = flags | 358 | self.flags = flags |
| 358 | 359 | ||
| 359 | if (self.flags & Client.FLAG_SPACE) == 0: | 360 | if (self.flags & Client.FLAG_SPACE) == 0: |
| 360 | self.tpm = open('/dev/tpm0', 'r+b') | 361 | self.tpm = open('/dev/tpm0', 'r+b', buffering=0) |
| 361 | else: | 362 | else: |
| 362 | self.tpm = open('/dev/tpmrm0', 'r+b') | 363 | self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0) |
| 363 | 364 | ||
| 364 | def close(self): | 365 | def close(self): |
| 365 | self.tpm.close() | 366 | self.tpm.close() |
diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py index 3bb066fea4a0..d4973be53493 100644 --- a/tools/testing/selftests/tpm2/tpm2_tests.py +++ b/tools/testing/selftests/tpm2/tpm2_tests.py | |||
| @@ -158,6 +158,69 @@ class SmokeTest(unittest.TestCase): | |||
| 158 | pass | 158 | pass |
| 159 | self.assertEqual(rejected, True) | 159 | self.assertEqual(rejected, True) |
| 160 | 160 | ||
| 161 | def test_read_partial_resp(self): | ||
| 162 | try: | ||
| 163 | fmt = '>HIIH' | ||
| 164 | cmd = struct.pack(fmt, | ||
| 165 | tpm2.TPM2_ST_NO_SESSIONS, | ||
| 166 | struct.calcsize(fmt), | ||
| 167 | tpm2.TPM2_CC_GET_RANDOM, | ||
| 168 | 0x20) | ||
| 169 | self.client.tpm.write(cmd) | ||
| 170 | hdr = self.client.tpm.read(10) | ||
| 171 | sz = struct.unpack('>I', hdr[2:6])[0] | ||
| 172 | rsp = self.client.tpm.read() | ||
| 173 | except: | ||
| 174 | pass | ||
| 175 | self.assertEqual(sz, 10 + 2 + 32) | ||
| 176 | self.assertEqual(len(rsp), 2 + 32) | ||
| 177 | |||
| 178 | def test_read_partial_overwrite(self): | ||
| 179 | try: | ||
| 180 | fmt = '>HIIH' | ||
| 181 | cmd = struct.pack(fmt, | ||
| 182 | tpm2.TPM2_ST_NO_SESSIONS, | ||
| 183 | struct.calcsize(fmt), | ||
| 184 | tpm2.TPM2_CC_GET_RANDOM, | ||
| 185 | 0x20) | ||
| 186 | self.client.tpm.write(cmd) | ||
| 187 | # Read part of the respone | ||
| 188 | rsp1 = self.client.tpm.read(15) | ||
| 189 | |||
| 190 | # Send a new cmd | ||
| 191 | self.client.tpm.write(cmd) | ||
| 192 | |||
| 193 | # Read the whole respone | ||
| 194 | rsp2 = self.client.tpm.read() | ||
| 195 | except: | ||
| 196 | pass | ||
| 197 | self.assertEqual(len(rsp1), 15) | ||
| 198 | self.assertEqual(len(rsp2), 10 + 2 + 32) | ||
| 199 | |||
| 200 | def test_send_two_cmds(self): | ||
| 201 | rejected = False | ||
| 202 | try: | ||
| 203 | fmt = '>HIIH' | ||
| 204 | cmd = struct.pack(fmt, | ||
| 205 | tpm2.TPM2_ST_NO_SESSIONS, | ||
| 206 | struct.calcsize(fmt), | ||
| 207 | tpm2.TPM2_CC_GET_RANDOM, | ||
| 208 | 0x20) | ||
| 209 | self.client.tpm.write(cmd) | ||
| 210 | |||
| 211 | # expect the second one to raise -EBUSY error | ||
| 212 | self.client.tpm.write(cmd) | ||
| 213 | rsp = self.client.tpm.read() | ||
| 214 | |||
| 215 | except IOError, e: | ||
| 216 | # read the response | ||
| 217 | rsp = self.client.tpm.read() | ||
| 218 | rejected = True | ||
| 219 | pass | ||
| 220 | except: | ||
| 221 | pass | ||
| 222 | self.assertEqual(rejected, True) | ||
| 223 | |||
| 161 | class SpaceTest(unittest.TestCase): | 224 | class SpaceTest(unittest.TestCase): |
| 162 | def setUp(self): | 225 | def setUp(self): |
| 163 | logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG) | 226 | logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG) |
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index 264d92da3240..370bd6c5e6cb 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
| @@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
| 222 | } | 222 | } |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | if (used_lrs) { | 225 | if (used_lrs || cpu_if->its_vpe.its_vm) { |
| 226 | int i; | 226 | int i; |
| 227 | u32 elrsr; | 227 | u32 elrsr; |
| 228 | 228 | ||
| @@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
| 247 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; | 247 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
| 248 | int i; | 248 | int i; |
| 249 | 249 | ||
| 250 | if (used_lrs) { | 250 | if (used_lrs || cpu_if->its_vpe.its_vm) { |
| 251 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | 251 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
| 252 | 252 | ||
| 253 | for (i = 0; i < used_lrs; i++) | 253 | for (i = 0; i < used_lrs; i++) |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ffd7acdceac7..27c958306449 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
| @@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn) | |||
| 102 | * @addr: IPA | 102 | * @addr: IPA |
| 103 | * @pmd: pmd pointer for IPA | 103 | * @pmd: pmd pointer for IPA |
| 104 | * | 104 | * |
| 105 | * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all | 105 | * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. |
| 106 | * pages in the range dirty. | ||
| 107 | */ | 106 | */ |
| 108 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) | 107 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) |
| 109 | { | 108 | { |
| @@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) | |||
| 121 | * @addr: IPA | 120 | * @addr: IPA |
| 122 | * @pud: pud pointer for IPA | 121 | * @pud: pud pointer for IPA |
| 123 | * | 122 | * |
| 124 | * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all | 123 | * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. |
| 125 | * pages in the range dirty. | ||
| 126 | */ | 124 | */ |
| 127 | static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) | 125 | static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) |
| 128 | { | 126 | { |
| @@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, | |||
| 899 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 897 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
| 900 | * @kvm: The KVM struct pointer for the VM. | 898 | * @kvm: The KVM struct pointer for the VM. |
| 901 | * | 899 | * |
| 902 | * Allocates only the stage-2 HW PGD level table(s) (can support either full | 900 | * Allocates only the stage-2 HW PGD level table(s) of size defined by |
| 903 | * 40-bit input addresses or limited to 32-bit input addresses). Clears the | 901 | * stage2_pgd_size(kvm). |
| 904 | * allocated pages. | ||
| 905 | * | 902 | * |
| 906 | * Note we don't need locking here as this is only called when the VM is | 903 | * Note we don't need locking here as this is only called when the VM is |
| 907 | * created, which can only be done once. | 904 | * created, which can only be done once. |
| @@ -1067,25 +1064,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | |||
| 1067 | { | 1064 | { |
| 1068 | pmd_t *pmd, old_pmd; | 1065 | pmd_t *pmd, old_pmd; |
| 1069 | 1066 | ||
| 1067 | retry: | ||
| 1070 | pmd = stage2_get_pmd(kvm, cache, addr); | 1068 | pmd = stage2_get_pmd(kvm, cache, addr); |
| 1071 | VM_BUG_ON(!pmd); | 1069 | VM_BUG_ON(!pmd); |
| 1072 | 1070 | ||
| 1073 | old_pmd = *pmd; | 1071 | old_pmd = *pmd; |
| 1072 | /* | ||
| 1073 | * Multiple vcpus faulting on the same PMD entry, can | ||
| 1074 | * lead to them sequentially updating the PMD with the | ||
| 1075 | * same value. Following the break-before-make | ||
| 1076 | * (pmd_clear() followed by tlb_flush()) process can | ||
| 1077 | * hinder forward progress due to refaults generated | ||
| 1078 | * on missing translations. | ||
| 1079 | * | ||
| 1080 | * Skip updating the page table if the entry is | ||
| 1081 | * unchanged. | ||
| 1082 | */ | ||
| 1083 | if (pmd_val(old_pmd) == pmd_val(*new_pmd)) | ||
| 1084 | return 0; | ||
| 1085 | |||
| 1074 | if (pmd_present(old_pmd)) { | 1086 | if (pmd_present(old_pmd)) { |
| 1075 | /* | 1087 | /* |
| 1076 | * Multiple vcpus faulting on the same PMD entry, can | 1088 | * If we already have PTE level mapping for this block, |
| 1077 | * lead to them sequentially updating the PMD with the | 1089 | * we must unmap it to avoid inconsistent TLB state and |
| 1078 | * same value. Following the break-before-make | 1090 | * leaking the table page. We could end up in this situation |
| 1079 | * (pmd_clear() followed by tlb_flush()) process can | 1091 | * if the memory slot was marked for dirty logging and was |
| 1080 | * hinder forward progress due to refaults generated | 1092 | * reverted, leaving PTE level mappings for the pages accessed |
| 1081 | * on missing translations. | 1093 | * during the period. So, unmap the PTE level mapping for this |
| 1094 | * block and retry, as we could have released the upper level | ||
| 1095 | * table in the process. | ||
| 1082 | * | 1096 | * |
| 1083 | * Skip updating the page table if the entry is | 1097 | * Normal THP split/merge follows mmu_notifier callbacks and do |
| 1084 | * unchanged. | 1098 | * get handled accordingly. |
| 1085 | */ | 1099 | */ |
| 1086 | if (pmd_val(old_pmd) == pmd_val(*new_pmd)) | 1100 | if (!pmd_thp_or_huge(old_pmd)) { |
| 1087 | return 0; | 1101 | unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE); |
| 1088 | 1102 | goto retry; | |
| 1103 | } | ||
| 1089 | /* | 1104 | /* |
| 1090 | * Mapping in huge pages should only happen through a | 1105 | * Mapping in huge pages should only happen through a |
| 1091 | * fault. If a page is merged into a transparent huge | 1106 | * fault. If a page is merged into a transparent huge |
| @@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | |||
| 1097 | * should become splitting first, unmapped, merged, | 1112 | * should become splitting first, unmapped, merged, |
| 1098 | * and mapped back in on-demand. | 1113 | * and mapped back in on-demand. |
| 1099 | */ | 1114 | */ |
| 1100 | VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); | 1115 | WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); |
| 1101 | |||
| 1102 | pmd_clear(pmd); | 1116 | pmd_clear(pmd); |
| 1103 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 1117 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
| 1104 | } else { | 1118 | } else { |
| @@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac | |||
| 1114 | { | 1128 | { |
| 1115 | pud_t *pudp, old_pud; | 1129 | pud_t *pudp, old_pud; |
| 1116 | 1130 | ||
| 1131 | retry: | ||
| 1117 | pudp = stage2_get_pud(kvm, cache, addr); | 1132 | pudp = stage2_get_pud(kvm, cache, addr); |
| 1118 | VM_BUG_ON(!pudp); | 1133 | VM_BUG_ON(!pudp); |
| 1119 | 1134 | ||
| @@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac | |||
| 1121 | 1136 | ||
| 1122 | /* | 1137 | /* |
| 1123 | * A large number of vcpus faulting on the same stage 2 entry, | 1138 | * A large number of vcpus faulting on the same stage 2 entry, |
| 1124 | * can lead to a refault due to the | 1139 | * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). |
| 1125 | * stage2_pud_clear()/tlb_flush(). Skip updating the page | 1140 | * Skip updating the page tables if there is no change. |
| 1126 | * tables if there is no change. | ||
| 1127 | */ | 1141 | */ |
| 1128 | if (pud_val(old_pud) == pud_val(*new_pudp)) | 1142 | if (pud_val(old_pud) == pud_val(*new_pudp)) |
| 1129 | return 0; | 1143 | return 0; |
| 1130 | 1144 | ||
| 1131 | if (stage2_pud_present(kvm, old_pud)) { | 1145 | if (stage2_pud_present(kvm, old_pud)) { |
| 1146 | /* | ||
| 1147 | * If we already have table level mapping for this block, unmap | ||
| 1148 | * the range for this block and retry. | ||
| 1149 | */ | ||
| 1150 | if (!stage2_pud_huge(kvm, old_pud)) { | ||
| 1151 | unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE); | ||
| 1152 | goto retry; | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); | ||
| 1132 | stage2_pud_clear(kvm, pudp); | 1156 | stage2_pud_clear(kvm, pudp); |
| 1133 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 1157 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
| 1134 | } else { | 1158 | } else { |
| @@ -1451,13 +1475,11 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud, | |||
| 1451 | } | 1475 | } |
| 1452 | 1476 | ||
| 1453 | /** | 1477 | /** |
| 1454 | * stage2_wp_puds - write protect PGD range | 1478 | * stage2_wp_puds - write protect PGD range |
| 1455 | * @pgd: pointer to pgd entry | 1479 | * @pgd: pointer to pgd entry |
| 1456 | * @addr: range start address | 1480 | * @addr: range start address |
| 1457 | * @end: range end address | 1481 | * @end: range end address |
| 1458 | * | 1482 | */ |
| 1459 | * Process PUD entries, for a huge PUD we cause a panic. | ||
| 1460 | */ | ||
| 1461 | static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd, | 1483 | static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd, |
| 1462 | phys_addr_t addr, phys_addr_t end) | 1484 | phys_addr_t addr, phys_addr_t end) |
| 1463 | { | 1485 | { |
| @@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address, | |||
| 1594 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); | 1616 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
| 1595 | } | 1617 | } |
| 1596 | 1618 | ||
| 1597 | static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, | 1619 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
| 1598 | unsigned long hva) | 1620 | unsigned long hva, |
| 1621 | unsigned long map_size) | ||
| 1599 | { | 1622 | { |
| 1600 | gpa_t gpa_start; | 1623 | gpa_t gpa_start; |
| 1601 | hva_t uaddr_start, uaddr_end; | 1624 | hva_t uaddr_start, uaddr_end; |
| @@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, | |||
| 1610 | 1633 | ||
| 1611 | /* | 1634 | /* |
| 1612 | * Pages belonging to memslots that don't have the same alignment | 1635 | * Pages belonging to memslots that don't have the same alignment |
| 1613 | * within a PMD for userspace and IPA cannot be mapped with stage-2 | 1636 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
| 1614 | * PMD entries, because we'll end up mapping the wrong pages. | 1637 | * PMD/PUD entries, because we'll end up mapping the wrong pages. |
| 1615 | * | 1638 | * |
| 1616 | * Consider a layout like the following: | 1639 | * Consider a layout like the following: |
| 1617 | * | 1640 | * |
| 1618 | * memslot->userspace_addr: | 1641 | * memslot->userspace_addr: |
| 1619 | * +-----+--------------------+--------------------+---+ | 1642 | * +-----+--------------------+--------------------+---+ |
| 1620 | * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz| | 1643 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
| 1621 | * +-----+--------------------+--------------------+---+ | 1644 | * +-----+--------------------+--------------------+---+ |
| 1622 | * | 1645 | * |
| 1623 | * memslot->base_gfn << PAGE_SIZE: | 1646 | * memslot->base_gfn << PAGE_SIZE: |
| 1624 | * +---+--------------------+--------------------+-----+ | 1647 | * +---+--------------------+--------------------+-----+ |
| 1625 | * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz| | 1648 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
| 1626 | * +---+--------------------+--------------------+-----+ | 1649 | * +---+--------------------+--------------------+-----+ |
| 1627 | * | 1650 | * |
| 1628 | * If we create those stage-2 PMDs, we'll end up with this incorrect | 1651 | * If we create those stage-2 blocks, we'll end up with this incorrect |
| 1629 | * mapping: | 1652 | * mapping: |
| 1630 | * d -> f | 1653 | * d -> f |
| 1631 | * e -> g | 1654 | * e -> g |
| 1632 | * f -> h | 1655 | * f -> h |
| 1633 | */ | 1656 | */ |
| 1634 | if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK)) | 1657 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
| 1635 | return false; | 1658 | return false; |
| 1636 | 1659 | ||
| 1637 | /* | 1660 | /* |
| 1638 | * Next, let's make sure we're not trying to map anything not covered | 1661 | * Next, let's make sure we're not trying to map anything not covered |
| 1639 | * by the memslot. This means we have to prohibit PMD size mappings | 1662 | * by the memslot. This means we have to prohibit block size mappings |
| 1640 | * for the beginning and end of a non-PMD aligned and non-PMD sized | 1663 | * for the beginning and end of a non-block aligned and non-block sized |
| 1641 | * memory slot (illustrated by the head and tail parts of the | 1664 | * memory slot (illustrated by the head and tail parts of the |
| 1642 | * userspace view above containing pages 'abcde' and 'xyz', | 1665 | * userspace view above containing pages 'abcde' and 'xyz', |
| 1643 | * respectively). | 1666 | * respectively). |
| @@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, | |||
| 1646 | * userspace_addr or the base_gfn, as both are equally aligned (per | 1669 | * userspace_addr or the base_gfn, as both are equally aligned (per |
| 1647 | * the check above) and equally sized. | 1670 | * the check above) and equally sized. |
| 1648 | */ | 1671 | */ |
| 1649 | return (hva & S2_PMD_MASK) >= uaddr_start && | 1672 | return (hva & ~(map_size - 1)) >= uaddr_start && |
| 1650 | (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end; | 1673 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; |
| 1651 | } | 1674 | } |
| 1652 | 1675 | ||
| 1653 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 1676 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| @@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
| 1676 | return -EFAULT; | 1699 | return -EFAULT; |
| 1677 | } | 1700 | } |
| 1678 | 1701 | ||
| 1679 | if (!fault_supports_stage2_pmd_mappings(memslot, hva)) | ||
| 1680 | force_pte = true; | ||
| 1681 | |||
| 1682 | if (logging_active) | ||
| 1683 | force_pte = true; | ||
| 1684 | |||
| 1685 | /* Let's check if we will get back a huge page backed by hugetlbfs */ | 1702 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
| 1686 | down_read(¤t->mm->mmap_sem); | 1703 | down_read(¤t->mm->mmap_sem); |
| 1687 | vma = find_vma_intersection(current->mm, hva, hva + 1); | 1704 | vma = find_vma_intersection(current->mm, hva, hva + 1); |
| @@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
| 1692 | } | 1709 | } |
| 1693 | 1710 | ||
| 1694 | vma_pagesize = vma_kernel_pagesize(vma); | 1711 | vma_pagesize = vma_kernel_pagesize(vma); |
| 1712 | if (logging_active || | ||
| 1713 | !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { | ||
| 1714 | force_pte = true; | ||
| 1715 | vma_pagesize = PAGE_SIZE; | ||
| 1716 | } | ||
| 1717 | |||
| 1695 | /* | 1718 | /* |
| 1696 | * The stage2 has a minimum of 2 level table (For arm64 see | 1719 | * The stage2 has a minimum of 2 level table (For arm64 see |
| 1697 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can | 1720 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can |
| @@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
| 1699 | * As for PUD huge maps, we must make sure that we have at least | 1722 | * As for PUD huge maps, we must make sure that we have at least |
| 1700 | * 3 levels, i.e, PMD is not folded. | 1723 | * 3 levels, i.e, PMD is not folded. |
| 1701 | */ | 1724 | */ |
| 1702 | if ((vma_pagesize == PMD_SIZE || | 1725 | if (vma_pagesize == PMD_SIZE || |
| 1703 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && | 1726 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) |
| 1704 | !force_pte) { | ||
| 1705 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; | 1727 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
| 1706 | } | ||
| 1707 | up_read(¤t->mm->mmap_sem); | 1728 | up_read(¤t->mm->mmap_sem); |
| 1708 | 1729 | ||
| 1709 | /* We need minimum second+third level pages */ | 1730 | /* We need minimum second+third level pages */ |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index ab3f47745d9c..44ceaccb18cf 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
| @@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
| 754 | u64 indirect_ptr, type = GITS_BASER_TYPE(baser); | 754 | u64 indirect_ptr, type = GITS_BASER_TYPE(baser); |
| 755 | phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); | 755 | phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); |
| 756 | int esz = GITS_BASER_ENTRY_SIZE(baser); | 756 | int esz = GITS_BASER_ENTRY_SIZE(baser); |
| 757 | int index; | 757 | int index, idx; |
| 758 | gfn_t gfn; | 758 | gfn_t gfn; |
| 759 | bool ret; | ||
| 759 | 760 | ||
| 760 | switch (type) { | 761 | switch (type) { |
| 761 | case GITS_BASER_TYPE_DEVICE: | 762 | case GITS_BASER_TYPE_DEVICE: |
| @@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
| 782 | 783 | ||
| 783 | if (eaddr) | 784 | if (eaddr) |
| 784 | *eaddr = addr; | 785 | *eaddr = addr; |
| 785 | return kvm_is_visible_gfn(its->dev->kvm, gfn); | 786 | |
| 787 | goto out; | ||
| 786 | } | 788 | } |
| 787 | 789 | ||
| 788 | /* calculate and check the index into the 1st level */ | 790 | /* calculate and check the index into the 1st level */ |
| @@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
| 812 | 814 | ||
| 813 | if (eaddr) | 815 | if (eaddr) |
| 814 | *eaddr = indirect_ptr; | 816 | *eaddr = indirect_ptr; |
| 815 | return kvm_is_visible_gfn(its->dev->kvm, gfn); | 817 | |
| 818 | out: | ||
| 819 | idx = srcu_read_lock(&its->dev->kvm->srcu); | ||
| 820 | ret = kvm_is_visible_gfn(its->dev->kvm, gfn); | ||
| 821 | srcu_read_unlock(&its->dev->kvm->srcu, idx); | ||
| 822 | return ret; | ||
| 816 | } | 823 | } |
| 817 | 824 | ||
| 818 | static int vgic_its_alloc_collection(struct vgic_its *its, | 825 | static int vgic_its_alloc_collection(struct vgic_its *its, |
| @@ -1729,8 +1736,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev) | |||
| 1729 | kfree(its); | 1736 | kfree(its); |
| 1730 | } | 1737 | } |
| 1731 | 1738 | ||
| 1732 | int vgic_its_has_attr_regs(struct kvm_device *dev, | 1739 | static int vgic_its_has_attr_regs(struct kvm_device *dev, |
| 1733 | struct kvm_device_attr *attr) | 1740 | struct kvm_device_attr *attr) |
| 1734 | { | 1741 | { |
| 1735 | const struct vgic_register_region *region; | 1742 | const struct vgic_register_region *region; |
| 1736 | gpa_t offset = attr->attr; | 1743 | gpa_t offset = attr->attr; |
| @@ -1750,9 +1757,9 @@ int vgic_its_has_attr_regs(struct kvm_device *dev, | |||
| 1750 | return 0; | 1757 | return 0; |
| 1751 | } | 1758 | } |
| 1752 | 1759 | ||
| 1753 | int vgic_its_attr_regs_access(struct kvm_device *dev, | 1760 | static int vgic_its_attr_regs_access(struct kvm_device *dev, |
| 1754 | struct kvm_device_attr *attr, | 1761 | struct kvm_device_attr *attr, |
| 1755 | u64 *reg, bool is_write) | 1762 | u64 *reg, bool is_write) |
| 1756 | { | 1763 | { |
| 1757 | const struct vgic_register_region *region; | 1764 | const struct vgic_register_region *region; |
| 1758 | struct vgic_its *its; | 1765 | struct vgic_its *its; |
| @@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, | |||
| 1919 | ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | | 1926 | ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | |
| 1920 | ite->collection->collection_id; | 1927 | ite->collection->collection_id; |
| 1921 | val = cpu_to_le64(val); | 1928 | val = cpu_to_le64(val); |
| 1922 | return kvm_write_guest(kvm, gpa, &val, ite_esz); | 1929 | return kvm_write_guest_lock(kvm, gpa, &val, ite_esz); |
| 1923 | } | 1930 | } |
| 1924 | 1931 | ||
| 1925 | /** | 1932 | /** |
| @@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, | |||
| 2066 | (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | | 2073 | (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | |
| 2067 | (dev->num_eventid_bits - 1)); | 2074 | (dev->num_eventid_bits - 1)); |
| 2068 | val = cpu_to_le64(val); | 2075 | val = cpu_to_le64(val); |
| 2069 | return kvm_write_guest(kvm, ptr, &val, dte_esz); | 2076 | return kvm_write_guest_lock(kvm, ptr, &val, dte_esz); |
| 2070 | } | 2077 | } |
| 2071 | 2078 | ||
| 2072 | /** | 2079 | /** |
| @@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its, | |||
| 2246 | ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | | 2253 | ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | |
| 2247 | collection->collection_id); | 2254 | collection->collection_id); |
| 2248 | val = cpu_to_le64(val); | 2255 | val = cpu_to_le64(val); |
| 2249 | return kvm_write_guest(its->dev->kvm, gpa, &val, esz); | 2256 | return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz); |
| 2250 | } | 2257 | } |
| 2251 | 2258 | ||
| 2252 | static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) | 2259 | static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) |
| @@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) | |||
| 2317 | */ | 2324 | */ |
| 2318 | val = 0; | 2325 | val = 0; |
| 2319 | BUG_ON(cte_esz > sizeof(val)); | 2326 | BUG_ON(cte_esz > sizeof(val)); |
| 2320 | ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz); | 2327 | ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); |
| 2321 | return ret; | 2328 | return ret; |
| 2322 | } | 2329 | } |
| 2323 | 2330 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 408a78eb6a97..9f87e58dbd4a 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -358,7 +358,7 @@ retry: | |||
| 358 | if (status) { | 358 | if (status) { |
| 359 | /* clear consumed data */ | 359 | /* clear consumed data */ |
| 360 | val &= ~(1 << bit_nr); | 360 | val &= ~(1 << bit_nr); |
| 361 | ret = kvm_write_guest(kvm, ptr, &val, 1); | 361 | ret = kvm_write_guest_lock(kvm, ptr, &val, 1); |
| 362 | if (ret) | 362 | if (ret) |
| 363 | return ret; | 363 | return ret; |
| 364 | } | 364 | } |
| @@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) | |||
| 409 | else | 409 | else |
| 410 | val &= ~(1 << bit_nr); | 410 | val &= ~(1 << bit_nr); |
| 411 | 411 | ||
| 412 | ret = kvm_write_guest(kvm, ptr, &val, 1); | 412 | ret = kvm_write_guest_lock(kvm, ptr, &val, 1); |
| 413 | if (ret) | 413 | if (ret) |
| 414 | return ret; | 414 | return ret; |
| 415 | } | 415 | } |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index abd9c7352677..3af69f2a3866 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
| @@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 867 | * either observe the new interrupt before or after doing this check, | 867 | * either observe the new interrupt before or after doing this check, |
| 868 | * and introducing additional synchronization mechanism doesn't change | 868 | * and introducing additional synchronization mechanism doesn't change |
| 869 | * this. | 869 | * this. |
| 870 | * | ||
| 871 | * Note that we still need to go through the whole thing if anything | ||
| 872 | * can be directly injected (GICv4). | ||
| 870 | */ | 873 | */ |
| 871 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | 874 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && |
| 875 | !vgic_supports_direct_msis(vcpu->kvm)) | ||
| 872 | return; | 876 | return; |
| 873 | 877 | ||
| 874 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 878 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
| 875 | 879 | ||
| 876 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 880 | if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { |
| 877 | vgic_flush_lr_state(vcpu); | 881 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 878 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 882 | vgic_flush_lr_state(vcpu); |
| 883 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | ||
| 884 | } | ||
| 879 | 885 | ||
| 880 | if (can_access_vgic_from_kernel()) | 886 | if (can_access_vgic_from_kernel()) |
| 881 | vgic_restore_state(vcpu); | 887 | vgic_restore_state(vcpu); |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 4325250afd72..001aeda4c154 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
| @@ -214,9 +214,9 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) | |||
| 214 | 214 | ||
| 215 | if (flags & EPOLLHUP) { | 215 | if (flags & EPOLLHUP) { |
| 216 | /* The eventfd is closing, detach from KVM */ | 216 | /* The eventfd is closing, detach from KVM */ |
| 217 | unsigned long flags; | 217 | unsigned long iflags; |
| 218 | 218 | ||
| 219 | spin_lock_irqsave(&kvm->irqfds.lock, flags); | 219 | spin_lock_irqsave(&kvm->irqfds.lock, iflags); |
| 220 | 220 | ||
| 221 | /* | 221 | /* |
| 222 | * We must check if someone deactivated the irqfd before | 222 | * We must check if someone deactivated the irqfd before |
| @@ -230,7 +230,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) | |||
| 230 | if (irqfd_is_active(irqfd)) | 230 | if (irqfd_is_active(irqfd)) |
| 231 | irqfd_deactivate(irqfd); | 231 | irqfd_deactivate(irqfd); |
| 232 | 232 | ||
| 233 | spin_unlock_irqrestore(&kvm->irqfds.lock, flags); | 233 | spin_unlock_irqrestore(&kvm->irqfds.lock, iflags); |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | return 0; | 236 | return 0; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f25aa98a94df..55fe8e20d8fd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -2905,6 +2905,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, | |||
| 2905 | { | 2905 | { |
| 2906 | struct kvm_device *dev = filp->private_data; | 2906 | struct kvm_device *dev = filp->private_data; |
| 2907 | 2907 | ||
| 2908 | if (dev->kvm->mm != current->mm) | ||
| 2909 | return -EIO; | ||
| 2910 | |||
| 2908 | switch (ioctl) { | 2911 | switch (ioctl) { |
| 2909 | case KVM_SET_DEVICE_ATTR: | 2912 | case KVM_SET_DEVICE_ATTR: |
| 2910 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); | 2913 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); |
