diff options
556 files changed, 6027 insertions, 3023 deletions
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt new file mode 100644 index 000000000000..b2830b435895 --- /dev/null +++ b/Documentation/block/null_blk.txt | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | Null block device driver | ||
| 2 | ================================================================================ | ||
| 3 | |||
| 4 | I. Overview | ||
| 5 | |||
| 6 | The null block device (/dev/nullb*) is used for benchmarking the various | ||
| 7 | block-layer implementations. It emulates a block device of X gigabytes in size. | ||
| 8 | The following instances are possible: | ||
| 9 | |||
| 10 | Single-queue block-layer | ||
| 11 | - Request-based. | ||
| 12 | - Single submission queue per device. | ||
| 13 | - Implements IO scheduling algorithms (CFQ, Deadline, noop). | ||
| 14 | Multi-queue block-layer | ||
| 15 | - Request-based. | ||
| 16 | - Configurable submission queues per device. | ||
| 17 | No block-layer (Known as bio-based) | ||
| 18 | - Bio-based. IO requests are submitted directly to the device driver. | ||
| 19 | - Directly accepts bio data structure and returns them. | ||
| 20 | |||
| 21 | All of them have a completion queue for each core in the system. | ||
| 22 | |||
| 23 | II. Module parameters applicable for all instances: | ||
| 24 | |||
| 25 | queue_mode=[0-2]: Default: 2-Multi-queue | ||
| 26 | Selects which block-layer the module should instantiate with. | ||
| 27 | |||
| 28 | 0: Bio-based. | ||
| 29 | 1: Single-queue. | ||
| 30 | 2: Multi-queue. | ||
| 31 | |||
| 32 | home_node=[0--nr_nodes]: Default: NUMA_NO_NODE | ||
| 33 | Selects what CPU node the data structures are allocated from. | ||
| 34 | |||
| 35 | gb=[Size in GB]: Default: 250GB | ||
| 36 | The size of the device reported to the system. | ||
| 37 | |||
| 38 | bs=[Block size (in bytes)]: Default: 512 bytes | ||
| 39 | The block size reported to the system. | ||
| 40 | |||
| 41 | nr_devices=[Number of devices]: Default: 2 | ||
| 42 | Number of block devices instantiated. They are instantiated as /dev/nullb0, | ||
| 43 | etc. | ||
| 44 | |||
| 45 | irq_mode=[0-2]: Default: 1-Soft-irq | ||
| 46 | The completion mode used for completing IOs to the block-layer. | ||
| 47 | |||
| 48 | 0: None. | ||
| 49 | 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead | ||
| 50 | when IOs are issued from another CPU node than the home the device is | ||
| 51 | connected to. | ||
| 52 | 2: Timer: Waits a specific period (completion_nsec) for each IO before | ||
| 53 | completion. | ||
| 54 | |||
| 55 | completion_nsec=[ns]: Default: 10.000ns | ||
| 56 | Combined with irq_mode=2 (timer). The time each completion event must wait. | ||
| 57 | |||
| 58 | submit_queues=[0..nr_cpus]: | ||
| 59 | The number of submission queues attached to the device driver. If unset, it | ||
| 60 | defaults to 1 on single-queue and bio-based instances. For multi-queue, | ||
| 61 | it is ignored when use_per_node_hctx module parameter is 1. | ||
| 62 | |||
| 63 | hw_queue_depth=[0..qdepth]: Default: 64 | ||
| 64 | The hardware queue depth of the device. | ||
| 65 | |||
| 66 | III: Multi-queue specific parameters | ||
| 67 | |||
| 68 | use_per_node_hctx=[0/1]: Default: 0 | ||
| 69 | 0: The number of submit queues are set to the value of the submit_queues | ||
| 70 | parameter. | ||
| 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch | ||
| 72 | queue for each CPU node in the system. | ||
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt index 46f5c791ea0d..0f2f920e8734 100644 --- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt | |||
| @@ -159,6 +159,8 @@ clock which they consume. | |||
| 159 | mixer 343 | 159 | mixer 343 |
| 160 | hdmi 344 | 160 | hdmi 344 |
| 161 | g2d 345 | 161 | g2d 345 |
| 162 | mdma0 346 | ||
| 163 | smmu_mdma0 347 | ||
| 162 | 164 | ||
| 163 | 165 | ||
| 164 | [Clock Muxes] | 166 | [Clock Muxes] |
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt index 2e3304888ffc..b4bd98af1cc7 100644 --- a/Documentation/devicetree/bindings/mfd/tps65910.txt +++ b/Documentation/devicetree/bindings/mfd/tps65910.txt | |||
| @@ -21,7 +21,7 @@ Required properties: | |||
| 21 | 21 | ||
| 22 | The valid regulator-compatible values are: | 22 | The valid regulator-compatible values are: |
| 23 | tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1, | 23 | tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1, |
| 24 | vaux2, vaux33, vmmc | 24 | vaux2, vaux33, vmmc, vbb |
| 25 | tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5, | 25 | tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5, |
| 26 | ldo6, ldo7, ldo8 | 26 | ldo6, ldo7, ldo8 |
| 27 | 27 | ||
| @@ -38,7 +38,7 @@ Required properties: | |||
| 38 | vcc4-supply: VAUX1 and VAUX2 input. | 38 | vcc4-supply: VAUX1 and VAUX2 input. |
| 39 | vcc5-supply: VPLL and VDAC input. | 39 | vcc5-supply: VPLL and VDAC input. |
| 40 | vcc6-supply: VDIG1 and VDIG2 input. | 40 | vcc6-supply: VDIG1 and VDIG2 input. |
| 41 | vcc7-supply: VRTC input. | 41 | vcc7-supply: VRTC and VBB input. |
| 42 | vccio-supply: VIO input. | 42 | vccio-supply: VIO input. |
| 43 | tps65911: | 43 | tps65911: |
| 44 | vcc1-supply: VDD1 input. | 44 | vcc1-supply: VDD1 input. |
diff --git a/Documentation/devicetree/bindings/regulator/act8865-regulator.txt b/Documentation/devicetree/bindings/regulator/act8865-regulator.txt new file mode 100644 index 000000000000..bef1fbb647ca --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/act8865-regulator.txt | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | ACT8865 regulator | ||
| 2 | ------------------- | ||
| 3 | |||
| 4 | Required properties: | ||
| 5 | - compatible: "active-semi,act8865" | ||
| 6 | - reg: I2C slave address | ||
| 7 | |||
| 8 | Any standard regulator properties can be used to configure the single regulator. | ||
| 9 | |||
| 10 | The valid names for regulators are: | ||
| 11 | DCDC_REG1, DCDC_REG2, DCDC_REG3, LDO_REG1, LDO_REG2, LDO_REG3, LDO_REG4. | ||
| 12 | |||
| 13 | Example: | ||
| 14 | -------- | ||
| 15 | |||
| 16 | i2c1: i2c@f0018000 { | ||
| 17 | pmic: act8865@5b { | ||
| 18 | compatible = "active-semi,act8865"; | ||
| 19 | reg = <0x5b>; | ||
| 20 | status = "disabled"; | ||
| 21 | |||
| 22 | regulators { | ||
| 23 | vcc_1v8_reg: DCDC_REG1 { | ||
| 24 | regulator-name = "VCC_1V8"; | ||
| 25 | regulator-min-microvolt = <1800000>; | ||
| 26 | regulator-max-microvolt = <1800000>; | ||
| 27 | regulator-always-on; | ||
| 28 | }; | ||
| 29 | |||
| 30 | vcc_1v2_reg: DCDC_REG2 { | ||
| 31 | regulator-name = "VCC_1V2"; | ||
| 32 | regulator-min-microvolt = <1100000>; | ||
| 33 | regulator-max-microvolt = <1300000>; | ||
| 34 | regulator-suspend-mem-microvolt = <1150000>; | ||
| 35 | regulator-suspend-standby-microvolt = <1150000>; | ||
| 36 | regulator-always-on; | ||
| 37 | }; | ||
| 38 | |||
| 39 | vcc_3v3_reg: DCDC_REG3 { | ||
| 40 | regulator-name = "VCC_3V3"; | ||
| 41 | regulator-min-microvolt = <3300000>; | ||
| 42 | regulator-max-microvolt = <3300000>; | ||
| 43 | regulator-always-on; | ||
| 44 | }; | ||
| 45 | |||
| 46 | vddana_reg: LDO_REG1 { | ||
| 47 | regulator-name = "VDDANA"; | ||
| 48 | regulator-min-microvolt = <3300000>; | ||
| 49 | regulator-max-microvolt = <3300000>; | ||
| 50 | regulator-always-on; | ||
| 51 | }; | ||
| 52 | |||
| 53 | vddfuse_reg: LDO_REG2 { | ||
| 54 | regulator-name = "FUSE_2V5"; | ||
| 55 | regulator-min-microvolt = <2500000>; | ||
| 56 | regulator-max-microvolt = <2500000>; | ||
| 57 | }; | ||
| 58 | }; | ||
| 59 | }; | ||
| 60 | }; | ||
diff --git a/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt b/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt index d1660a90fc06..fc6b38f035bd 100644 --- a/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt | |||
| @@ -83,7 +83,7 @@ as per the datasheet of s5m8767. | |||
| 83 | 83 | ||
| 84 | - LDOn | 84 | - LDOn |
| 85 | - valid values for n are 1 to 28 | 85 | - valid values for n are 1 to 28 |
| 86 | - Example: LDO0, LD01, LDO28 | 86 | - Example: LDO1, LD02, LDO28 |
| 87 | - BUCKn | 87 | - BUCKn |
| 88 | - valid values for n are 1 to 9. | 88 | - valid values for n are 1 to 9. |
| 89 | - Example: BUCK1, BUCK2, BUCK9 | 89 | - Example: BUCK1, BUCK2, BUCK9 |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index edbb8d88c85e..519421f28691 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
| @@ -3,6 +3,7 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order. | |||
| 3 | This isn't an exhaustive list, but you should add new prefixes to it before | 3 | This isn't an exhaustive list, but you should add new prefixes to it before |
| 4 | using them to avoid name-space collisions. | 4 | using them to avoid name-space collisions. |
| 5 | 5 | ||
| 6 | active-semi Active-Semi International Inc | ||
| 6 | ad Avionic Design GmbH | 7 | ad Avionic Design GmbH |
| 7 | adi Analog Devices, Inc. | 8 | adi Analog Devices, Inc. |
| 8 | aeroflexgaisler Aeroflex Gaisler AB | 9 | aeroflexgaisler Aeroflex Gaisler AB |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 50680a59a2ff..b9e9bd854298 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 1529 | 1529 | ||
| 1530 | * atapi_dmadir: Enable ATAPI DMADIR bridge support | 1530 | * atapi_dmadir: Enable ATAPI DMADIR bridge support |
| 1531 | 1531 | ||
| 1532 | * disable: Disable this device. | ||
| 1533 | |||
| 1532 | If there are multiple matching configurations changing | 1534 | If there are multiple matching configurations changing |
| 1533 | the same attribute, the last one is used. | 1535 | the same attribute, the last one is used. |
| 1534 | 1536 | ||
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt new file mode 100644 index 000000000000..2b40e04d3c49 --- /dev/null +++ b/Documentation/module-signing.txt | |||
| @@ -0,0 +1,240 @@ | |||
| 1 | ============================== | ||
| 2 | KERNEL MODULE SIGNING FACILITY | ||
| 3 | ============================== | ||
| 4 | |||
| 5 | CONTENTS | ||
| 6 | |||
| 7 | - Overview. | ||
| 8 | - Configuring module signing. | ||
| 9 | - Generating signing keys. | ||
| 10 | - Public keys in the kernel. | ||
| 11 | - Manually signing modules. | ||
| 12 | - Signed modules and stripping. | ||
| 13 | - Loading signed modules. | ||
| 14 | - Non-valid signatures and unsigned modules. | ||
| 15 | - Administering/protecting the private key. | ||
| 16 | |||
| 17 | |||
| 18 | ======== | ||
| 19 | OVERVIEW | ||
| 20 | ======== | ||
| 21 | |||
| 22 | The kernel module signing facility cryptographically signs modules during | ||
| 23 | installation and then checks the signature upon loading the module. This | ||
| 24 | allows increased kernel security by disallowing the loading of unsigned modules | ||
| 25 | or modules signed with an invalid key. Module signing increases security by | ||
| 26 | making it harder to load a malicious module into the kernel. The module | ||
| 27 | signature checking is done by the kernel so that it is not necessary to have | ||
| 28 | trusted userspace bits. | ||
| 29 | |||
| 30 | This facility uses X.509 ITU-T standard certificates to encode the public keys | ||
| 31 | involved. The signatures are not themselves encoded in any industrial standard | ||
| 32 | type. The facility currently only supports the RSA public key encryption | ||
| 33 | standard (though it is pluggable and permits others to be used). The possible | ||
| 34 | hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and | ||
| 35 | SHA-512 (the algorithm is selected by data in the signature). | ||
| 36 | |||
| 37 | |||
| 38 | ========================== | ||
| 39 | CONFIGURING MODULE SIGNING | ||
| 40 | ========================== | ||
| 41 | |||
| 42 | The module signing facility is enabled by going to the "Enable Loadable Module | ||
| 43 | Support" section of the kernel configuration and turning on | ||
| 44 | |||
| 45 | CONFIG_MODULE_SIG "Module signature verification" | ||
| 46 | |||
| 47 | This has a number of options available: | ||
| 48 | |||
| 49 | (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE) | ||
| 50 | |||
| 51 | This specifies how the kernel should deal with a module that has a | ||
| 52 | signature for which the key is not known or a module that is unsigned. | ||
| 53 | |||
| 54 | If this is off (ie. "permissive"), then modules for which the key is not | ||
| 55 | available and modules that are unsigned are permitted, but the kernel will | ||
| 56 | be marked as being tainted. | ||
| 57 | |||
| 58 | If this is on (ie. "restrictive"), only modules that have a valid | ||
| 59 | signature that can be verified by a public key in the kernel's possession | ||
| 60 | will be loaded. All other modules will generate an error. | ||
| 61 | |||
| 62 | Irrespective of the setting here, if the module has a signature block that | ||
| 63 | cannot be parsed, it will be rejected out of hand. | ||
| 64 | |||
| 65 | |||
| 66 | (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL) | ||
| 67 | |||
| 68 | If this is on then modules will be automatically signed during the | ||
| 69 | modules_install phase of a build. If this is off, then the modules must | ||
| 70 | be signed manually using: | ||
| 71 | |||
| 72 | scripts/sign-file | ||
| 73 | |||
| 74 | |||
| 75 | (3) "Which hash algorithm should modules be signed with?" | ||
| 76 | |||
| 77 | This presents a choice of which hash algorithm the installation phase will | ||
| 78 | sign the modules with: | ||
| 79 | |||
| 80 | CONFIG_SIG_SHA1 "Sign modules with SHA-1" | ||
| 81 | CONFIG_SIG_SHA224 "Sign modules with SHA-224" | ||
| 82 | CONFIG_SIG_SHA256 "Sign modules with SHA-256" | ||
| 83 | CONFIG_SIG_SHA384 "Sign modules with SHA-384" | ||
| 84 | CONFIG_SIG_SHA512 "Sign modules with SHA-512" | ||
| 85 | |||
| 86 | The algorithm selected here will also be built into the kernel (rather | ||
| 87 | than being a module) so that modules signed with that algorithm can have | ||
| 88 | their signatures checked without causing a dependency loop. | ||
| 89 | |||
| 90 | |||
| 91 | ======================= | ||
| 92 | GENERATING SIGNING KEYS | ||
| 93 | ======================= | ||
| 94 | |||
| 95 | Cryptographic keypairs are required to generate and check signatures. A | ||
| 96 | private key is used to generate a signature and the corresponding public key is | ||
| 97 | used to check it. The private key is only needed during the build, after which | ||
| 98 | it can be deleted or stored securely. The public key gets built into the | ||
| 99 | kernel so that it can be used to check the signatures as the modules are | ||
| 100 | loaded. | ||
| 101 | |||
| 102 | Under normal conditions, the kernel build will automatically generate a new | ||
| 103 | keypair using openssl if one does not exist in the files: | ||
| 104 | |||
| 105 | signing_key.priv | ||
| 106 | signing_key.x509 | ||
| 107 | |||
| 108 | during the building of vmlinux (the public part of the key needs to be built | ||
| 109 | into vmlinux) using parameters in the: | ||
| 110 | |||
| 111 | x509.genkey | ||
| 112 | |||
| 113 | file (which is also generated if it does not already exist). | ||
| 114 | |||
| 115 | It is strongly recommended that you provide your own x509.genkey file. | ||
| 116 | |||
| 117 | Most notably, in the x509.genkey file, the req_distinguished_name section | ||
| 118 | should be altered from the default: | ||
| 119 | |||
| 120 | [ req_distinguished_name ] | ||
| 121 | O = Magrathea | ||
| 122 | CN = Glacier signing key | ||
| 123 | emailAddress = slartibartfast@magrathea.h2g2 | ||
| 124 | |||
| 125 | The generated RSA key size can also be set with: | ||
| 126 | |||
| 127 | [ req ] | ||
| 128 | default_bits = 4096 | ||
| 129 | |||
| 130 | |||
| 131 | It is also possible to manually generate the key private/public files using the | ||
| 132 | x509.genkey key generation configuration file in the root node of the Linux | ||
| 133 | kernel sources tree and the openssl command. The following is an example to | ||
| 134 | generate the public/private key files: | ||
| 135 | |||
| 136 | openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \ | ||
| 137 | -config x509.genkey -outform DER -out signing_key.x509 \ | ||
| 138 | -keyout signing_key.priv | ||
| 139 | |||
| 140 | |||
| 141 | ========================= | ||
| 142 | PUBLIC KEYS IN THE KERNEL | ||
| 143 | ========================= | ||
| 144 | |||
| 145 | The kernel contains a ring of public keys that can be viewed by root. They're | ||
| 146 | in a keyring called ".system_keyring" that can be seen by: | ||
| 147 | |||
| 148 | [root@deneb ~]# cat /proc/keys | ||
| 149 | ... | ||
| 150 | 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1 | ||
| 151 | 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 [] | ||
| 152 | ... | ||
| 153 | |||
| 154 | Beyond the public key generated specifically for module signing, any file | ||
| 155 | placed in the kernel source root directory or the kernel build root directory | ||
| 156 | whose name is suffixed with ".x509" will be assumed to be an X.509 public key | ||
| 157 | and will be added to the keyring. | ||
| 158 | |||
| 159 | Further, the architecture code may take public keys from a hardware store and | ||
| 160 | add those in also (e.g. from the UEFI key database). | ||
| 161 | |||
| 162 | Finally, it is possible to add additional public keys by doing: | ||
| 163 | |||
| 164 | keyctl padd asymmetric "" [.system_keyring-ID] <[key-file] | ||
| 165 | |||
| 166 | e.g.: | ||
| 167 | |||
| 168 | keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509 | ||
| 169 | |||
| 170 | Note, however, that the kernel will only permit keys to be added to | ||
| 171 | .system_keyring _if_ the new key's X.509 wrapper is validly signed by a key | ||
| 172 | that is already resident in the .system_keyring at the time the key was added. | ||
| 173 | |||
| 174 | |||
| 175 | ========================= | ||
| 176 | MANUALLY SIGNING MODULES | ||
| 177 | ========================= | ||
| 178 | |||
| 179 | To manually sign a module, use the scripts/sign-file tool available in | ||
| 180 | the Linux kernel source tree. The script requires 4 arguments: | ||
| 181 | |||
| 182 | 1. The hash algorithm (e.g., sha256) | ||
| 183 | 2. The private key filename | ||
| 184 | 3. The public key filename | ||
| 185 | 4. The kernel module to be signed | ||
| 186 | |||
| 187 | The following is an example to sign a kernel module: | ||
| 188 | |||
| 189 | scripts/sign-file sha512 kernel-signkey.priv \ | ||
| 190 | kernel-signkey.x509 module.ko | ||
| 191 | |||
| 192 | The hash algorithm used does not have to match the one configured, but if it | ||
| 193 | doesn't, you should make sure that hash algorithm is either built into the | ||
| 194 | kernel or can be loaded without requiring itself. | ||
| 195 | |||
| 196 | |||
| 197 | ============================ | ||
| 198 | SIGNED MODULES AND STRIPPING | ||
| 199 | ============================ | ||
| 200 | |||
| 201 | A signed module has a digital signature simply appended at the end. The string | ||
| 202 | "~Module signature appended~." at the end of the module's file confirms that a | ||
| 203 | signature is present but it does not confirm that the signature is valid! | ||
| 204 | |||
| 205 | Signed modules are BRITTLE as the signature is outside of the defined ELF | ||
| 206 | container. Thus they MAY NOT be stripped once the signature is computed and | ||
| 207 | attached. Note the entire module is the signed payload, including any and all | ||
| 208 | debug information present at the time of signing. | ||
| 209 | |||
| 210 | |||
| 211 | ====================== | ||
| 212 | LOADING SIGNED MODULES | ||
| 213 | ====================== | ||
| 214 | |||
| 215 | Modules are loaded with insmod, modprobe, init_module() or finit_module(), | ||
| 216 | exactly as for unsigned modules as no processing is done in userspace. The | ||
| 217 | signature checking is all done within the kernel. | ||
| 218 | |||
| 219 | |||
| 220 | ========================================= | ||
| 221 | NON-VALID SIGNATURES AND UNSIGNED MODULES | ||
| 222 | ========================================= | ||
| 223 | |||
| 224 | If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on | ||
| 225 | the kernel command line, the kernel will only load validly signed modules | ||
| 226 | for which it has a public key. Otherwise, it will also load modules that are | ||
| 227 | unsigned. Any module for which the kernel has a key, but which proves to have | ||
| 228 | a signature mismatch will not be permitted to load. | ||
| 229 | |||
| 230 | Any module that has an unparseable signature will be rejected. | ||
| 231 | |||
| 232 | |||
| 233 | ========================================= | ||
| 234 | ADMINISTERING/PROTECTING THE PRIVATE KEY | ||
| 235 | ========================================= | ||
| 236 | |||
| 237 | Since the private key is used to sign modules, viruses and malware could use | ||
| 238 | the private key to sign modules and compromise the operating system. The | ||
| 239 | private key must be either destroyed or moved to a secure location and not kept | ||
| 240 | in the root node of the kernel source tree. | ||
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 3c12d9a7ed00..8a984e994e61 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
| @@ -16,8 +16,12 @@ ip_default_ttl - INTEGER | |||
| 16 | Default: 64 (as recommended by RFC1700) | 16 | Default: 64 (as recommended by RFC1700) |
| 17 | 17 | ||
| 18 | ip_no_pmtu_disc - BOOLEAN | 18 | ip_no_pmtu_disc - BOOLEAN |
| 19 | Disable Path MTU Discovery. | 19 | Disable Path MTU Discovery. If enabled and a |
| 20 | default FALSE | 20 | fragmentation-required ICMP is received, the PMTU to this |
| 21 | destination will be set to min_pmtu (see below). You will need | ||
| 22 | to raise min_pmtu to the smallest interface MTU on your system | ||
| 23 | manually if you want to avoid locally generated fragments. | ||
| 24 | Default: FALSE | ||
| 21 | 25 | ||
| 22 | min_pmtu - INTEGER | 26 | min_pmtu - INTEGER |
| 23 | default 552 - minimum discovered Path MTU | 27 | default 552 - minimum discovered Path MTU |
diff --git a/MAINTAINERS b/MAINTAINERS index 1344816c4c06..31a046213274 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts | |||
| 783 | F: arch/arm/boot/dts/sama*.dtsi | 783 | F: arch/arm/boot/dts/sama*.dtsi |
| 784 | 784 | ||
| 785 | ARM/CALXEDA HIGHBANK ARCHITECTURE | 785 | ARM/CALXEDA HIGHBANK ARCHITECTURE |
| 786 | M: Rob Herring <rob.herring@calxeda.com> | 786 | M: Rob Herring <robh@kernel.org> |
| 787 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 787 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 788 | S: Maintained | 788 | S: Maintained |
| 789 | F: arch/arm/mach-highbank/ | 789 | F: arch/arm/mach-highbank/ |
| @@ -1008,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com> | |||
| 1008 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1008 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1009 | S: Maintained | 1009 | S: Maintained |
| 1010 | F: arch/arm/mach-keystone/ | 1010 | F: arch/arm/mach-keystone/ |
| 1011 | F: drivers/clk/keystone/ | ||
| 1012 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git | ||
| 1011 | 1013 | ||
| 1012 | ARM/LOGICPD PXA270 MACHINE SUPPORT | 1014 | ARM/LOGICPD PXA270 MACHINE SUPPORT |
| 1013 | M: Lennert Buytenhek <kernel@wantstofly.org> | 1015 | M: Lennert Buytenhek <kernel@wantstofly.org> |
| @@ -1366,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git | |||
| 1366 | S: Supported | 1368 | S: Supported |
| 1367 | F: arch/arm/mach-zynq/ | 1369 | F: arch/arm/mach-zynq/ |
| 1368 | F: drivers/cpuidle/cpuidle-zynq.c | 1370 | F: drivers/cpuidle/cpuidle-zynq.c |
| 1371 | N: zynq | ||
| 1372 | N: xilinx | ||
| 1373 | F: drivers/clocksource/cadence_ttc_timer.c | ||
| 1369 | 1374 | ||
| 1370 | ARM SMMU DRIVER | 1375 | ARM SMMU DRIVER |
| 1371 | M: Will Deacon <will.deacon@arm.com> | 1376 | M: Will Deacon <will.deacon@arm.com> |
| @@ -2823,8 +2828,10 @@ F: include/uapi/drm/ | |||
| 2823 | 2828 | ||
| 2824 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) | 2829 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) |
| 2825 | M: Daniel Vetter <daniel.vetter@ffwll.ch> | 2830 | M: Daniel Vetter <daniel.vetter@ffwll.ch> |
| 2831 | M: Jani Nikula <jani.nikula@linux.intel.com> | ||
| 2826 | L: intel-gfx@lists.freedesktop.org | 2832 | L: intel-gfx@lists.freedesktop.org |
| 2827 | L: dri-devel@lists.freedesktop.org | 2833 | L: dri-devel@lists.freedesktop.org |
| 2834 | Q: http://patchwork.freedesktop.org/project/intel-gfx/ | ||
| 2828 | T: git git://people.freedesktop.org/~danvet/drm-intel | 2835 | T: git git://people.freedesktop.org/~danvet/drm-intel |
| 2829 | S: Supported | 2836 | S: Supported |
| 2830 | F: drivers/gpu/drm/i915/ | 2837 | F: drivers/gpu/drm/i915/ |
| @@ -3761,9 +3768,11 @@ F: include/uapi/linux/gigaset_dev.h | |||
| 3761 | 3768 | ||
| 3762 | GPIO SUBSYSTEM | 3769 | GPIO SUBSYSTEM |
| 3763 | M: Linus Walleij <linus.walleij@linaro.org> | 3770 | M: Linus Walleij <linus.walleij@linaro.org> |
| 3764 | S: Maintained | 3771 | M: Alexandre Courbot <gnurou@gmail.com> |
| 3765 | L: linux-gpio@vger.kernel.org | 3772 | L: linux-gpio@vger.kernel.org |
| 3766 | F: Documentation/gpio.txt | 3773 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git |
| 3774 | S: Maintained | ||
| 3775 | F: Documentation/gpio/ | ||
| 3767 | F: drivers/gpio/ | 3776 | F: drivers/gpio/ |
| 3768 | F: include/linux/gpio* | 3777 | F: include/linux/gpio* |
| 3769 | F: include/asm-generic/gpio.h | 3778 | F: include/asm-generic/gpio.h |
| @@ -3831,6 +3840,12 @@ T: git git://linuxtv.org/media_tree.git | |||
| 3831 | S: Maintained | 3840 | S: Maintained |
| 3832 | F: drivers/media/usb/gspca/ | 3841 | F: drivers/media/usb/gspca/ |
| 3833 | 3842 | ||
| 3843 | GUID PARTITION TABLE (GPT) | ||
| 3844 | M: Davidlohr Bueso <davidlohr@hp.com> | ||
| 3845 | L: linux-efi@vger.kernel.org | ||
| 3846 | S: Maintained | ||
| 3847 | F: block/partitions/efi.* | ||
| 3848 | |||
| 3834 | STK1160 USB VIDEO CAPTURE DRIVER | 3849 | STK1160 USB VIDEO CAPTURE DRIVER |
| 3835 | M: Ezequiel Garcia <elezegarcia@gmail.com> | 3850 | M: Ezequiel Garcia <elezegarcia@gmail.com> |
| 3836 | L: linux-media@vger.kernel.org | 3851 | L: linux-media@vger.kernel.org |
| @@ -5911,12 +5926,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com> | |||
| 5911 | M: Herbert Xu <herbert@gondor.apana.org.au> | 5926 | M: Herbert Xu <herbert@gondor.apana.org.au> |
| 5912 | M: "David S. Miller" <davem@davemloft.net> | 5927 | M: "David S. Miller" <davem@davemloft.net> |
| 5913 | L: netdev@vger.kernel.org | 5928 | L: netdev@vger.kernel.org |
| 5914 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git | 5929 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git |
| 5930 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git | ||
| 5915 | S: Maintained | 5931 | S: Maintained |
| 5916 | F: net/xfrm/ | 5932 | F: net/xfrm/ |
| 5917 | F: net/key/ | 5933 | F: net/key/ |
| 5918 | F: net/ipv4/xfrm* | 5934 | F: net/ipv4/xfrm* |
| 5935 | F: net/ipv4/esp4.c | ||
| 5936 | F: net/ipv4/ah4.c | ||
| 5937 | F: net/ipv4/ipcomp.c | ||
| 5938 | F: net/ipv4/ip_vti.c | ||
| 5919 | F: net/ipv6/xfrm* | 5939 | F: net/ipv6/xfrm* |
| 5940 | F: net/ipv6/esp6.c | ||
| 5941 | F: net/ipv6/ah6.c | ||
| 5942 | F: net/ipv6/ipcomp6.c | ||
| 5943 | F: net/ipv6/ip6_vti.c | ||
| 5920 | F: include/uapi/linux/xfrm.h | 5944 | F: include/uapi/linux/xfrm.h |
| 5921 | F: include/net/xfrm.h | 5945 | F: include/net/xfrm.h |
| 5922 | 5946 | ||
| @@ -6237,7 +6261,7 @@ F: drivers/i2c/busses/i2c-ocores.c | |||
| 6237 | 6261 | ||
| 6238 | OPEN FIRMWARE AND FLATTENED DEVICE TREE | 6262 | OPEN FIRMWARE AND FLATTENED DEVICE TREE |
| 6239 | M: Grant Likely <grant.likely@linaro.org> | 6263 | M: Grant Likely <grant.likely@linaro.org> |
| 6240 | M: Rob Herring <rob.herring@calxeda.com> | 6264 | M: Rob Herring <robh+dt@kernel.org> |
| 6241 | L: devicetree@vger.kernel.org | 6265 | L: devicetree@vger.kernel.org |
| 6242 | W: http://fdt.secretlab.ca | 6266 | W: http://fdt.secretlab.ca |
| 6243 | T: git git://git.secretlab.ca/git/linux-2.6.git | 6267 | T: git git://git.secretlab.ca/git/linux-2.6.git |
| @@ -6249,7 +6273,7 @@ K: of_get_property | |||
| 6249 | K: of_match_table | 6273 | K: of_match_table |
| 6250 | 6274 | ||
| 6251 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS | 6275 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS |
| 6252 | M: Rob Herring <rob.herring@calxeda.com> | 6276 | M: Rob Herring <robh+dt@kernel.org> |
| 6253 | M: Pawel Moll <pawel.moll@arm.com> | 6277 | M: Pawel Moll <pawel.moll@arm.com> |
| 6254 | M: Mark Rutland <mark.rutland@arm.com> | 6278 | M: Mark Rutland <mark.rutland@arm.com> |
| 6255 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> | 6279 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> |
| @@ -9571,7 +9595,7 @@ F: drivers/xen/*swiotlb* | |||
| 9571 | 9595 | ||
| 9572 | XFS FILESYSTEM | 9596 | XFS FILESYSTEM |
| 9573 | P: Silicon Graphics Inc | 9597 | P: Silicon Graphics Inc |
| 9574 | M: Dave Chinner <dchinner@fromorbit.com> | 9598 | M: Dave Chinner <david@fromorbit.com> |
| 9575 | M: Ben Myers <bpm@sgi.com> | 9599 | M: Ben Myers <bpm@sgi.com> |
| 9576 | M: xfs@oss.sgi.com | 9600 | M: xfs@oss.sgi.com |
| 9577 | L: xfs@oss.sgi.com | 9601 | L: xfs@oss.sgi.com |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 13 | 2 | PATCHLEVEL = 13 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc8 |
| 5 | NAME = One Giant Leap for Frogkind | 5 | NAME = One Giant Leap for Frogkind |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
| @@ -732,19 +732,15 @@ export mod_strip_cmd | |||
| 732 | # Select initial ramdisk compression format, default is gzip(1). | 732 | # Select initial ramdisk compression format, default is gzip(1). |
| 733 | # This shall be used by the dracut(8) tool while creating an initramfs image. | 733 | # This shall be used by the dracut(8) tool while creating an initramfs image. |
| 734 | # | 734 | # |
| 735 | INITRD_COMPRESS=gzip | 735 | INITRD_COMPRESS-y := gzip |
| 736 | ifeq ($(CONFIG_RD_BZIP2), y) | 736 | INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2 |
| 737 | INITRD_COMPRESS=bzip2 | 737 | INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma |
| 738 | else ifeq ($(CONFIG_RD_LZMA), y) | 738 | INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz |
| 739 | INITRD_COMPRESS=lzma | 739 | INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo |
| 740 | else ifeq ($(CONFIG_RD_XZ), y) | 740 | INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 |
| 741 | INITRD_COMPRESS=xz | 741 | # do not export INITRD_COMPRESS, since we didn't actually |
| 742 | else ifeq ($(CONFIG_RD_LZO), y) | 742 | # choose a sane default compression above. |
| 743 | INITRD_COMPRESS=lzo | 743 | # export INITRD_COMPRESS := $(INITRD_COMPRESS-y) |
| 744 | else ifeq ($(CONFIG_RD_LZ4), y) | ||
| 745 | INITRD_COMPRESS=lz4 | ||
| 746 | endif | ||
| 747 | export INITRD_COMPRESS | ||
| 748 | 744 | ||
| 749 | ifdef CONFIG_MODULE_SIG_ALL | 745 | ifdef CONFIG_MODULE_SIG_ALL |
| 750 | MODSECKEY = ./signing_key.priv | 746 | MODSECKEY = ./signing_key.priv |
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 68125dd766c6..39e58d1cdf90 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h | |||
| @@ -8,7 +8,11 @@ | |||
| 8 | 8 | ||
| 9 | /******** no-legacy-syscalls-ABI *******/ | 9 | /******** no-legacy-syscalls-ABI *******/ |
| 10 | 10 | ||
| 11 | #ifndef _UAPI_ASM_ARC_UNISTD_H | 11 | /* |
| 12 | * Non-typical guard macro to enable inclusion twice in ARCH sys.c | ||
| 13 | * That is how the Generic syscall wrapper generator works | ||
| 14 | */ | ||
| 15 | #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) | ||
| 12 | #define _UAPI_ASM_ARC_UNISTD_H | 16 | #define _UAPI_ASM_ARC_UNISTD_H |
| 13 | 17 | ||
| 14 | #define __ARCH_WANT_SYS_EXECVE | 18 | #define __ARCH_WANT_SYS_EXECVE |
| @@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) | |||
| 36 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) | 40 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) |
| 37 | __SYSCALL(__NR_sysfs, sys_sysfs) | 41 | __SYSCALL(__NR_sysfs, sys_sysfs) |
| 38 | 42 | ||
| 43 | #undef __SYSCALL | ||
| 44 | |||
| 39 | #endif | 45 | #endif |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9db5047812f3..177becde7a26 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
| @@ -559,7 +559,7 @@ | |||
| 559 | compatible = "arm,pl330", "arm,primecell"; | 559 | compatible = "arm,pl330", "arm,primecell"; |
| 560 | reg = <0x10800000 0x1000>; | 560 | reg = <0x10800000 0x1000>; |
| 561 | interrupts = <0 33 0>; | 561 | interrupts = <0 33 0>; |
| 562 | clocks = <&clock 271>; | 562 | clocks = <&clock 346>; |
| 563 | clock-names = "apb_pclk"; | 563 | clock-names = "apb_pclk"; |
| 564 | #dma-cells = <1>; | 564 | #dma-cells = <1>; |
| 565 | #dma-channels = <8>; | 565 | #dma-channels = <8>; |
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fad939b..9987dd0e9c59 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
| @@ -87,9 +87,9 @@ | |||
| 87 | interrupts = <1 9 0xf04>; | 87 | interrupts = <1 9 0xf04>; |
| 88 | }; | 88 | }; |
| 89 | 89 | ||
| 90 | gpio0: gpio@ffc40000 { | 90 | gpio0: gpio@e6050000 { |
| 91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
| 92 | reg = <0 0xffc40000 0 0x2c>; | 92 | reg = <0 0xe6050000 0 0x50>; |
| 93 | interrupt-parent = <&gic>; | 93 | interrupt-parent = <&gic>; |
| 94 | interrupts = <0 4 0x4>; | 94 | interrupts = <0 4 0x4>; |
| 95 | #gpio-cells = <2>; | 95 | #gpio-cells = <2>; |
| @@ -99,9 +99,9 @@ | |||
| 99 | interrupt-controller; | 99 | interrupt-controller; |
| 100 | }; | 100 | }; |
| 101 | 101 | ||
| 102 | gpio1: gpio@ffc41000 { | 102 | gpio1: gpio@e6051000 { |
| 103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
| 104 | reg = <0 0xffc41000 0 0x2c>; | 104 | reg = <0 0xe6051000 0 0x50>; |
| 105 | interrupt-parent = <&gic>; | 105 | interrupt-parent = <&gic>; |
| 106 | interrupts = <0 5 0x4>; | 106 | interrupts = <0 5 0x4>; |
| 107 | #gpio-cells = <2>; | 107 | #gpio-cells = <2>; |
| @@ -111,9 +111,9 @@ | |||
| 111 | interrupt-controller; | 111 | interrupt-controller; |
| 112 | }; | 112 | }; |
| 113 | 113 | ||
| 114 | gpio2: gpio@ffc42000 { | 114 | gpio2: gpio@e6052000 { |
| 115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
| 116 | reg = <0 0xffc42000 0 0x2c>; | 116 | reg = <0 0xe6052000 0 0x50>; |
| 117 | interrupt-parent = <&gic>; | 117 | interrupt-parent = <&gic>; |
| 118 | interrupts = <0 6 0x4>; | 118 | interrupts = <0 6 0x4>; |
| 119 | #gpio-cells = <2>; | 119 | #gpio-cells = <2>; |
| @@ -123,9 +123,9 @@ | |||
| 123 | interrupt-controller; | 123 | interrupt-controller; |
| 124 | }; | 124 | }; |
| 125 | 125 | ||
| 126 | gpio3: gpio@ffc43000 { | 126 | gpio3: gpio@e6053000 { |
| 127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
| 128 | reg = <0 0xffc43000 0 0x2c>; | 128 | reg = <0 0xe6053000 0 0x50>; |
| 129 | interrupt-parent = <&gic>; | 129 | interrupt-parent = <&gic>; |
| 130 | interrupts = <0 7 0x4>; | 130 | interrupts = <0 7 0x4>; |
| 131 | #gpio-cells = <2>; | 131 | #gpio-cells = <2>; |
| @@ -135,9 +135,9 @@ | |||
| 135 | interrupt-controller; | 135 | interrupt-controller; |
| 136 | }; | 136 | }; |
| 137 | 137 | ||
| 138 | gpio4: gpio@ffc44000 { | 138 | gpio4: gpio@e6054000 { |
| 139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
| 140 | reg = <0 0xffc44000 0 0x2c>; | 140 | reg = <0 0xe6054000 0 0x50>; |
| 141 | interrupt-parent = <&gic>; | 141 | interrupt-parent = <&gic>; |
| 142 | interrupts = <0 8 0x4>; | 142 | interrupts = <0 8 0x4>; |
| 143 | #gpio-cells = <2>; | 143 | #gpio-cells = <2>; |
| @@ -147,9 +147,9 @@ | |||
| 147 | interrupt-controller; | 147 | interrupt-controller; |
| 148 | }; | 148 | }; |
| 149 | 149 | ||
| 150 | gpio5: gpio@ffc45000 { | 150 | gpio5: gpio@e6055000 { |
| 151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
| 152 | reg = <0 0xffc45000 0 0x2c>; | 152 | reg = <0 0xe6055000 0 0x50>; |
| 153 | interrupt-parent = <&gic>; | 153 | interrupt-parent = <&gic>; |
| 154 | interrupts = <0 9 0x4>; | 154 | interrupts = <0 9 0x4>; |
| 155 | #gpio-cells = <2>; | 155 | #gpio-cells = <2>; |
| @@ -241,7 +241,7 @@ | |||
| 241 | 241 | ||
| 242 | sdhi0: sdhi@ee100000 { | 242 | sdhi0: sdhi@ee100000 { |
| 243 | compatible = "renesas,sdhi-r8a7790"; | 243 | compatible = "renesas,sdhi-r8a7790"; |
| 244 | reg = <0 0xee100000 0 0x100>; | 244 | reg = <0 0xee100000 0 0x200>; |
| 245 | interrupt-parent = <&gic>; | 245 | interrupt-parent = <&gic>; |
| 246 | interrupts = <0 165 4>; | 246 | interrupts = <0 165 4>; |
| 247 | cap-sd-highspeed; | 247 | cap-sd-highspeed; |
| @@ -250,7 +250,7 @@ | |||
| 250 | 250 | ||
| 251 | sdhi1: sdhi@ee120000 { | 251 | sdhi1: sdhi@ee120000 { |
| 252 | compatible = "renesas,sdhi-r8a7790"; | 252 | compatible = "renesas,sdhi-r8a7790"; |
| 253 | reg = <0 0xee120000 0 0x100>; | 253 | reg = <0 0xee120000 0 0x200>; |
| 254 | interrupt-parent = <&gic>; | 254 | interrupt-parent = <&gic>; |
| 255 | interrupts = <0 166 4>; | 255 | interrupts = <0 166 4>; |
| 256 | cap-sd-highspeed; | 256 | cap-sd-highspeed; |
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 64205d453260..71e5fc7cfb18 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped | |||
| @@ -58,7 +58,7 @@ | |||
| 58 | # define VFP_ABI_FRAME 0 | 58 | # define VFP_ABI_FRAME 0 |
| 59 | # define BSAES_ASM_EXTENDED_KEY | 59 | # define BSAES_ASM_EXTENDED_KEY |
| 60 | # define XTS_CHAIN_TWEAK | 60 | # define XTS_CHAIN_TWEAK |
| 61 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | 61 | # define __ARM_ARCH__ 7 |
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | #ifdef __thumb__ | 64 | #ifdef __thumb__ |
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index f3d96d932573..be068db960ee 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl | |||
| @@ -701,7 +701,7 @@ $code.=<<___; | |||
| 701 | # define VFP_ABI_FRAME 0 | 701 | # define VFP_ABI_FRAME 0 |
| 702 | # define BSAES_ASM_EXTENDED_KEY | 702 | # define BSAES_ASM_EXTENDED_KEY |
| 703 | # define XTS_CHAIN_TWEAK | 703 | # define XTS_CHAIN_TWEAK |
| 704 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | 704 | # define __ARM_ARCH__ 7 |
| 705 | #endif | 705 | #endif |
| 706 | 706 | ||
| 707 | #ifdef __thumb__ | 707 | #ifdef __thumb__ |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3c597c222ef2..fbeb39c869e9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
| @@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); | |||
| 329 | */ | 329 | */ |
| 330 | #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) | 330 | #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) |
| 331 | #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) | 331 | #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) |
| 332 | #define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) | 332 | #define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) |
| 333 | #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) | 333 | #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) |
| 334 | #define iounmap __arm_iounmap | 334 | #define iounmap __arm_iounmap |
| 335 | 335 | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 6976b03e5213..8756e4bcdba0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
| @@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
| 347 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET | 347 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET |
| 348 | 348 | ||
| 349 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 349 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 350 | #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) | 350 | #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ |
| 351 | && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) | ||
| 351 | 352 | ||
| 352 | #endif | 353 | #endif |
| 353 | 354 | ||
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 75579a9d6f76..3759cacdd7f8 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
| @@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
| 117 | return __set_phys_to_machine(pfn, mfn); | 117 | return __set_phys_to_machine(pfn, mfn); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | #define xen_remap(cookie, size) ioremap_cached((cookie), (size)); | 120 | #define xen_remap(cookie, size) ioremap_cache((cookie), (size)); |
| 121 | 121 | ||
| 122 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 122 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7940241f0576..6eda3bf85c52 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
| @@ -36,7 +36,13 @@ | |||
| 36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
| 37 | #include <asm/opcodes.h> | 37 | #include <asm/opcodes.h> |
| 38 | 38 | ||
| 39 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 39 | static const char *handler[]= { |
| 40 | "prefetch abort", | ||
| 41 | "data abort", | ||
| 42 | "address exception", | ||
| 43 | "interrupt", | ||
| 44 | "undefined instruction", | ||
| 45 | }; | ||
| 40 | 46 | ||
| 41 | void *vectors_page; | 47 | void *vectors_page; |
| 42 | 48 | ||
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 9ee78f7b4990..782f6c71fa0a 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c | |||
| @@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = { | |||
| 96 | void __init footbridge_timer_init(void) | 96 | void __init footbridge_timer_init(void) |
| 97 | { | 97 | { |
| 98 | struct clock_event_device *ce = &ckevt_dc21285; | 98 | struct clock_event_device *ce = &ckevt_dc21285; |
| 99 | unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); | ||
| 99 | 100 | ||
| 100 | clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); | 101 | clocksource_register_hz(&cksrc_dc21285, rate); |
| 101 | 102 | ||
| 102 | setup_irq(ce->irq, &footbridge_timer_irq); | 103 | setup_irq(ce->irq, &footbridge_timer_irq); |
| 103 | 104 | ||
| 104 | ce->cpumask = cpumask_of(smp_processor_id()); | 105 | ce->cpumask = cpumask_of(smp_processor_id()); |
| 105 | clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); | 106 | clockevents_config_and_register(ce, rate, 0x4, 0xffffff); |
| 106 | } | 107 | } |
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492..44a59c3abfb0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c | |||
| @@ -242,12 +242,18 @@ static void __init ldp_display_init(void) | |||
| 242 | 242 | ||
| 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) |
| 244 | { | 244 | { |
| 245 | int res; | ||
| 246 | |||
| 245 | /* LCD enable GPIO */ | 247 | /* LCD enable GPIO */ |
| 246 | ldp_lcd_pdata.enable_gpio = gpio + 7; | 248 | ldp_lcd_pdata.enable_gpio = gpio + 7; |
| 247 | 249 | ||
| 248 | /* Backlight enable GPIO */ | 250 | /* Backlight enable GPIO */ |
| 249 | ldp_lcd_pdata.backlight_gpio = gpio + 15; | 251 | ldp_lcd_pdata.backlight_gpio = gpio + 15; |
| 250 | 252 | ||
| 253 | res = platform_device_register(&ldp_lcd_device); | ||
| 254 | if (res) | ||
| 255 | pr_err("Unable to register LCD: %d\n", res); | ||
| 256 | |||
| 251 | return 0; | 257 | return 0; |
| 252 | } | 258 | } |
| 253 | 259 | ||
| @@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
| 346 | 352 | ||
| 347 | static struct platform_device *ldp_devices[] __initdata = { | 353 | static struct platform_device *ldp_devices[] __initdata = { |
| 348 | &ldp_gpio_keys_device, | 354 | &ldp_gpio_keys_device, |
| 349 | &ldp_lcd_device, | ||
| 350 | }; | 355 | }; |
| 351 | 356 | ||
| 352 | #ifdef CONFIG_OMAP_MUX | 357 | #ifdef CONFIG_OMAP_MUX |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 58347bb874a0..4cf165502b35 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
| @@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { | |||
| 101 | { "dss_hdmi", "omapdss_hdmi", -1 }, | 101 | { "dss_hdmi", "omapdss_hdmi", -1 }, |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | ||
| 105 | { | ||
| 106 | u32 enable_mask, enable_shift; | ||
| 107 | u32 pipd_mask, pipd_shift; | ||
| 108 | u32 reg; | ||
| 109 | |||
| 110 | if (dsi_id == 0) { | ||
| 111 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | ||
| 112 | enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; | ||
| 113 | pipd_mask = OMAP4_DSI1_PIPD_MASK; | ||
| 114 | pipd_shift = OMAP4_DSI1_PIPD_SHIFT; | ||
| 115 | } else if (dsi_id == 1) { | ||
| 116 | enable_mask = OMAP4_DSI2_LANEENABLE_MASK; | ||
| 117 | enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; | ||
| 118 | pipd_mask = OMAP4_DSI2_PIPD_MASK; | ||
| 119 | pipd_shift = OMAP4_DSI2_PIPD_SHIFT; | ||
| 120 | } else { | ||
| 121 | return -ENODEV; | ||
| 122 | } | ||
| 123 | |||
| 124 | reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
| 125 | |||
| 126 | reg &= ~enable_mask; | ||
| 127 | reg &= ~pipd_mask; | ||
| 128 | |||
| 129 | reg |= (lanes << enable_shift) & enable_mask; | ||
| 130 | reg |= (lanes << pipd_shift) & pipd_mask; | ||
| 131 | |||
| 132 | omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
| 133 | |||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 104 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) | 137 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) |
| 105 | { | 138 | { |
| 139 | if (cpu_is_omap44xx()) | ||
| 140 | return omap4_dsi_mux_pads(dsi_id, lane_mask); | ||
| 141 | |||
| 106 | return 0; | 142 | return 0; |
| 107 | } | 143 | } |
| 108 | 144 | ||
| 109 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) | 145 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) |
| 110 | { | 146 | { |
| 147 | if (cpu_is_omap44xx()) | ||
| 148 | omap4_dsi_mux_pads(dsi_id, 0); | ||
| 111 | } | 149 | } |
| 112 | 150 | ||
| 113 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) | 151 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e..d23c77fadb31 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
| @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { | |||
| 796 | 796 | ||
| 797 | /* gpmc */ | 797 | /* gpmc */ |
| 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { |
| 799 | { .irq = 20 }, | 799 | { .irq = 20 + OMAP_INTC_START, }, |
| 800 | { .irq = -1 } | 800 | { .irq = -1 } |
| 801 | }; | 801 | }; |
| 802 | 802 | ||
| @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { | |||
| 841 | }; | 841 | }; |
| 842 | 842 | ||
| 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { |
| 844 | { .irq = 52 }, | 844 | { .irq = 52 + OMAP_INTC_START, }, |
| 845 | { .irq = -1 } | 845 | { .irq = -1 } |
| 846 | }; | 846 | }; |
| 847 | 847 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d33742908f97..4c3b1e6df508 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
| @@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { | |||
| 2165 | }; | 2165 | }; |
| 2166 | 2166 | ||
| 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { |
| 2168 | { .irq = 20 }, | 2168 | { .irq = 20 + OMAP_INTC_START, }, |
| 2169 | { .irq = -1 } | 2169 | { .irq = -1 } |
| 2170 | }; | 2170 | }; |
| 2171 | 2171 | ||
| @@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | |||
| 2999 | 2999 | ||
| 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; |
| 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { |
| 3002 | { .irq = 24 }, | 3002 | { .irq = 24 + OMAP_INTC_START, }, |
| 3003 | { .irq = -1 } | 3003 | { .irq = -1 } |
| 3004 | }; | 3004 | }; |
| 3005 | 3005 | ||
| @@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | |||
| 3041 | 3041 | ||
| 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; |
| 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { |
| 3044 | { .irq = 28 }, | 3044 | { .irq = 28 + OMAP_INTC_START, }, |
| 3045 | { .irq = -1 } | 3045 | { .irq = -1 } |
| 3046 | }; | 3046 | }; |
| 3047 | 3047 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b11..18f333c440db 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
| @@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { | |||
| 1637 | .class = &dra7xx_uart_hwmod_class, | 1637 | .class = &dra7xx_uart_hwmod_class, |
| 1638 | .clkdm_name = "l4per_clkdm", | 1638 | .clkdm_name = "l4per_clkdm", |
| 1639 | .main_clk = "uart1_gfclk_mux", | 1639 | .main_clk = "uart1_gfclk_mux", |
| 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, |
| 1641 | .prcm = { | 1641 | .prcm = { |
| 1642 | .omap4 = { | 1642 | .omap4 = { |
| 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb..958cd6af9384 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <mach/irqs.h> | ||
| 14 | |||
| 13 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS | 15 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS |
| 14 | 16 | ||
| 15 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS | 17 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS |
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10fc1af..2fddf38192df 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c | |||
| @@ -8,8 +8,6 @@ | |||
| 8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/clk-provider.h> | ||
| 12 | #include <linux/irqchip.h> | ||
| 13 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
| 14 | 12 | ||
| 15 | #include <asm/mach/arch.h> | 13 | #include <asm/mach/arch.h> |
| @@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) | |||
| 48 | panic("SoC is not S3C64xx!"); | 46 | panic("SoC is not S3C64xx!"); |
| 49 | } | 47 | } |
| 50 | 48 | ||
| 51 | static void __init s3c64xx_dt_init_irq(void) | ||
| 52 | { | ||
| 53 | of_clk_init(NULL); | ||
| 54 | samsung_wdt_reset_of_init(); | ||
| 55 | irqchip_init(); | ||
| 56 | }; | ||
| 57 | |||
| 58 | static void __init s3c64xx_dt_init_machine(void) | 49 | static void __init s3c64xx_dt_init_machine(void) |
| 59 | { | 50 | { |
| 51 | samsung_wdt_reset_of_init(); | ||
| 60 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 52 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
| 61 | } | 53 | } |
| 62 | 54 | ||
| @@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") | |||
| 79 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ | 71 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ |
| 80 | .dt_compat = s3c64xx_dt_compat, | 72 | .dt_compat = s3c64xx_dt_compat, |
| 81 | .map_io = s3c64xx_dt_map_io, | 73 | .map_io = s3c64xx_dt_map_io, |
| 82 | .init_irq = s3c64xx_dt_init_irq, | ||
| 83 | .init_machine = s3c64xx_dt_init_machine, | 74 | .init_machine = s3c64xx_dt_init_machine, |
| 84 | .restart = s3c64xx_dt_restart, | 75 | .restart = s3c64xx_dt_restart, |
| 85 | MACHINE_END | 76 | MACHINE_END |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2..8ea87bd45c33 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
| @@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = { | |||
| 483 | .id = 0, | 483 | .id = 0, |
| 484 | .dev = { | 484 | .dev = { |
| 485 | .platform_data = &lcdc0_info, | 485 | .platform_data = &lcdc0_info, |
| 486 | .coherent_dma_mask = ~0, | 486 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 487 | }, | 487 | }, |
| 488 | }; | 488 | }; |
| 489 | 489 | ||
| @@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = { | |||
| 580 | .id = 1, | 580 | .id = 1, |
| 581 | .dev = { | 581 | .dev = { |
| 582 | .platform_data = &hdmi_lcdc_info, | 582 | .platform_data = &hdmi_lcdc_info, |
| 583 | .coherent_dma_mask = ~0, | 583 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 584 | }, | 584 | }, |
| 585 | }; | 585 | }; |
| 586 | 586 | ||
| @@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { | |||
| 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), |
| 615 | }; | 615 | }; |
| 616 | 616 | ||
| 617 | /* Fixed 3.3V regulator used by LCD backlight */ | ||
| 618 | static struct regulator_consumer_supply fixed5v0_power_consumers[] = { | ||
| 619 | REGULATOR_SUPPLY("power", "pwm-backlight.0"), | ||
| 620 | }; | ||
| 621 | |||
| 617 | /* Fixed 3.3V regulator to be used by SDHI0 */ | 622 | /* Fixed 3.3V regulator to be used by SDHI0 */ |
| 618 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | 623 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { |
| 619 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | 624 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), |
| @@ -1196,6 +1201,8 @@ static void __init eva_init(void) | |||
| 1196 | 1201 | ||
| 1197 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | 1202 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, |
| 1198 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | 1203 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); |
| 1204 | regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, | ||
| 1205 | ARRAY_SIZE(fixed5v0_power_consumers), 5000000); | ||
| 1199 | 1206 | ||
| 1200 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | 1207 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); |
| 1201 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | 1208 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); |
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a..3c4995aebd22 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
| @@ -679,7 +679,7 @@ static void __init bockw_init(void) | |||
| 679 | .id = i, | 679 | .id = i, |
| 680 | .data = &rsnd_card_info[i], | 680 | .data = &rsnd_card_info[i], |
| 681 | .size_data = sizeof(struct asoc_simple_card_info), | 681 | .size_data = sizeof(struct asoc_simple_card_info), |
| 682 | .dma_mask = ~0, | 682 | .dma_mask = DMA_BIT_MASK(32), |
| 683 | }; | 683 | }; |
| 684 | 684 | ||
| 685 | platform_device_register_full(&cardinfo); | 685 | platform_device_register_full(&cardinfo); |
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index fe689b7fdc9e..bc40b853ffd3 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c | |||
| @@ -334,7 +334,7 @@ static struct platform_device lcdc_device = { | |||
| 334 | .resource = lcdc_resources, | 334 | .resource = lcdc_resources, |
| 335 | .dev = { | 335 | .dev = { |
| 336 | .platform_data = &lcdc_info, | 336 | .platform_data = &lcdc_info, |
| 337 | .coherent_dma_mask = ~0, | 337 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 338 | }, | 338 | }, |
| 339 | }; | 339 | }; |
| 340 | 340 | ||
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce646fb9..e0406fd37390 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
| @@ -245,7 +245,9 @@ static void __init lager_init(void) | |||
| 245 | { | 245 | { |
| 246 | lager_add_standard_devices(); | 246 | lager_add_standard_devices(); |
| 247 | 247 | ||
| 248 | phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); | 248 | if (IS_ENABLED(CONFIG_PHYLIB)) |
| 249 | phy_register_fixup_for_id("r8a7790-ether-ff:01", | ||
| 250 | lager_ksz8041_fixup); | ||
| 249 | } | 251 | } |
| 250 | 252 | ||
| 251 | static const char * const lager_boards_compat_dt[] __initconst = { | 253 | static const char * const lager_boards_compat_dt[] __initconst = { |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index af06753eb809..e721d2ccceae 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
| @@ -409,7 +409,7 @@ static struct platform_device lcdc_device = { | |||
| 409 | .resource = lcdc_resources, | 409 | .resource = lcdc_resources, |
| 410 | .dev = { | 410 | .dev = { |
| 411 | .platform_data = &lcdc_info, | 411 | .platform_data = &lcdc_info, |
| 412 | .coherent_dma_mask = ~0, | 412 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 413 | }, | 413 | }, |
| 414 | }; | 414 | }; |
| 415 | 415 | ||
| @@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = { | |||
| 499 | .id = 1, | 499 | .id = 1, |
| 500 | .dev = { | 500 | .dev = { |
| 501 | .platform_data = &hdmi_lcdc_info, | 501 | .platform_data = &hdmi_lcdc_info, |
| 502 | .coherent_dma_mask = ~0, | 502 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 503 | }, | 503 | }, |
| 504 | }; | 504 | }; |
| 505 | 505 | ||
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6d5ba9afb16a..3387e60e4ea3 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
| 175 | unsigned long i; | 175 | unsigned long i; |
| 176 | if (cache_is_vipt_nonaliasing()) { | 176 | if (cache_is_vipt_nonaliasing()) { |
| 177 | for (i = 0; i < (1 << compound_order(page)); i++) { | 177 | for (i = 0; i < (1 << compound_order(page)); i++) { |
| 178 | void *addr = kmap_atomic(page); | 178 | void *addr = kmap_atomic(page + i); |
| 179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 180 | kunmap_atomic(addr); | 180 | kunmap_atomic(addr); |
| 181 | } | 181 | } |
| 182 | } else { | 182 | } else { |
| 183 | for (i = 0; i < (1 << compound_order(page)); i++) { | 183 | for (i = 0; i < (1 << compound_order(page)); i++) { |
| 184 | void *addr = kmap_high_get(page); | 184 | void *addr = kmap_high_get(page + i); |
| 185 | if (addr) { | 185 | if (addr) { |
| 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 187 | kunmap_high(page); | 187 | kunmap_high(page + i); |
| 188 | } | 188 | } |
| 189 | } | 189 | } |
| 190 | } | 190 | } |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f959ee47..85501238b425 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
| @@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, | |||
| 96 | struct remap_data *info = data; | 96 | struct remap_data *info = data; |
| 97 | struct page *page = info->pages[info->index++]; | 97 | struct page *page = info->pages[info->index++]; |
| 98 | unsigned long pfn = page_to_pfn(page); | 98 | unsigned long pfn = page_to_pfn(page); |
| 99 | pte_t pte = pfn_pte(pfn, info->prot); | 99 | pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); |
| 100 | 100 | ||
| 101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) | 101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) |
| 102 | return -EFAULT; | 102 | return -EFAULT; |
| @@ -224,10 +224,10 @@ static int __init xen_guest_init(void) | |||
| 224 | } | 224 | } |
| 225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) | 225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) |
| 226 | return 0; | 226 | return 0; |
| 227 | xen_hvm_resume_frames = res.start >> PAGE_SHIFT; | 227 | xen_hvm_resume_frames = res.start; |
| 228 | xen_events_irq = irq_of_parse_and_map(node, 0); | 228 | xen_events_irq = irq_of_parse_and_map(node, 0); |
| 229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", | 229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", |
| 230 | version, xen_events_irq, xen_hvm_resume_frames); | 230 | version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); |
| 231 | xen_domain_type = XEN_HVM_DOMAIN; | 231 | xen_domain_type = XEN_HVM_DOMAIN; |
| 232 | 232 | ||
| 233 | xen_setup_features(); | 233 | xen_setup_features(); |
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a6eebe..dde3fc9c49f0 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h | |||
| @@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
| 23 | unsigned long offset, size_t size, enum dma_data_direction dir, | 23 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 24 | struct dma_attrs *attrs) | 24 | struct dma_attrs *attrs) |
| 25 | { | 25 | { |
| 26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
| 27 | } | 26 | } |
| 28 | 27 | ||
| 29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
| 30 | size_t size, enum dma_data_direction dir, | 29 | size_t size, enum dma_data_direction dir, |
| 31 | struct dma_attrs *attrs) | 30 | struct dma_attrs *attrs) |
| 32 | { | 31 | { |
| 33 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
| 34 | } | 32 | } |
| 35 | 33 | ||
| 36 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 34 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
| 37 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 35 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 38 | { | 36 | { |
| 39 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 40 | } | 37 | } |
| 41 | 38 | ||
| 42 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | 39 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
| 43 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 40 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 44 | { | 41 | { |
| 45 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
| 46 | } | 42 | } |
| 47 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | 43 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a2192b83..6a8928bba03c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
| @@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | |||
| 214 | { | 214 | { |
| 215 | int err, len, type, disabled = !ctrl.enabled; | 215 | int err, len, type, disabled = !ctrl.enabled; |
| 216 | 216 | ||
| 217 | if (disabled) { | 217 | attr->disabled = disabled; |
| 218 | len = 0; | 218 | if (disabled) |
| 219 | type = HW_BREAKPOINT_EMPTY; | 219 | return 0; |
| 220 | } else { | 220 | |
| 221 | err = arch_bp_generic_fields(ctrl, &len, &type); | 221 | err = arch_bp_generic_fields(ctrl, &len, &type); |
| 222 | if (err) | 222 | if (err) |
| 223 | return err; | 223 | return err; |
| 224 | 224 | ||
| 225 | switch (note_type) { | 225 | switch (note_type) { |
| 226 | case NT_ARM_HW_BREAK: | 226 | case NT_ARM_HW_BREAK: |
| 227 | if ((type & HW_BREAKPOINT_X) != type) | 227 | if ((type & HW_BREAKPOINT_X) != type) |
| 228 | return -EINVAL; | ||
| 229 | break; | ||
| 230 | case NT_ARM_HW_WATCH: | ||
| 231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
| 232 | return -EINVAL; | ||
| 233 | break; | ||
| 234 | default: | ||
| 235 | return -EINVAL; | 228 | return -EINVAL; |
| 236 | } | 229 | break; |
| 230 | case NT_ARM_HW_WATCH: | ||
| 231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
| 232 | return -EINVAL; | ||
| 233 | break; | ||
| 234 | default: | ||
| 235 | return -EINVAL; | ||
| 237 | } | 236 | } |
| 238 | 237 | ||
| 239 | attr->bp_len = len; | 238 | attr->bp_len = len; |
| 240 | attr->bp_type = type; | 239 | attr->bp_type = type; |
| 241 | attr->disabled = disabled; | ||
| 242 | 240 | ||
| 243 | return 0; | 241 | return 0; |
| 244 | } | 242 | } |
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f0e2784e7cca..2f9b751878ba 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
| @@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma | |||
| 125 | void mark_rodata_ro(void); | 125 | void mark_rodata_ro(void); |
| 126 | #endif | 126 | #endif |
| 127 | 127 | ||
| 128 | #ifdef CONFIG_PA8X00 | ||
| 129 | /* Only pa8800, pa8900 needs this */ | ||
| 130 | |||
| 131 | #include <asm/kmap_types.h> | 128 | #include <asm/kmap_types.h> |
| 132 | 129 | ||
| 133 | #define ARCH_HAS_KMAP | 130 | #define ARCH_HAS_KMAP |
| 134 | 131 | ||
| 135 | void kunmap_parisc(void *addr); | ||
| 136 | |||
| 137 | static inline void *kmap(struct page *page) | 132 | static inline void *kmap(struct page *page) |
| 138 | { | 133 | { |
| 139 | might_sleep(); | 134 | might_sleep(); |
| 135 | flush_dcache_page(page); | ||
| 140 | return page_address(page); | 136 | return page_address(page); |
| 141 | } | 137 | } |
| 142 | 138 | ||
| 143 | static inline void kunmap(struct page *page) | 139 | static inline void kunmap(struct page *page) |
| 144 | { | 140 | { |
| 145 | kunmap_parisc(page_address(page)); | 141 | flush_kernel_dcache_page_addr(page_address(page)); |
| 146 | } | 142 | } |
| 147 | 143 | ||
| 148 | static inline void *kmap_atomic(struct page *page) | 144 | static inline void *kmap_atomic(struct page *page) |
| 149 | { | 145 | { |
| 150 | pagefault_disable(); | 146 | pagefault_disable(); |
| 147 | flush_dcache_page(page); | ||
| 151 | return page_address(page); | 148 | return page_address(page); |
| 152 | } | 149 | } |
| 153 | 150 | ||
| 154 | static inline void __kunmap_atomic(void *addr) | 151 | static inline void __kunmap_atomic(void *addr) |
| 155 | { | 152 | { |
| 156 | kunmap_parisc(addr); | 153 | flush_kernel_dcache_page_addr(addr); |
| 157 | pagefault_enable(); | 154 | pagefault_enable(); |
| 158 | } | 155 | } |
| 159 | 156 | ||
| 160 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) | 157 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
| 161 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) | 158 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
| 162 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | 159 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
| 163 | #endif | ||
| 164 | 160 | ||
| 165 | #endif /* _PARISC_CACHEFLUSH_H */ | 161 | #endif /* _PARISC_CACHEFLUSH_H */ |
| 166 | 162 | ||
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index b7adb2ac049c..c53fc63149e8 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h | |||
| @@ -28,9 +28,8 @@ struct page; | |||
| 28 | 28 | ||
| 29 | void clear_page_asm(void *page); | 29 | void clear_page_asm(void *page); |
| 30 | void copy_page_asm(void *to, void *from); | 30 | void copy_page_asm(void *to, void *from); |
| 31 | void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); | 31 | #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) |
| 32 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | 32 | #define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) |
| 33 | struct page *pg); | ||
| 34 | 33 | ||
| 35 | /* #define CONFIG_PARISC_TMPALIAS */ | 34 | /* #define CONFIG_PARISC_TMPALIAS */ |
| 36 | 35 | ||
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c035673209f7..a72545554a31 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
| @@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) | |||
| 388 | } | 388 | } |
| 389 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); | 389 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); |
| 390 | 390 | ||
| 391 | void clear_user_page(void *vto, unsigned long vaddr, struct page *page) | ||
| 392 | { | ||
| 393 | clear_page_asm(vto); | ||
| 394 | if (!parisc_requires_coherency()) | ||
| 395 | flush_kernel_dcache_page_asm(vto); | ||
| 396 | } | ||
| 397 | EXPORT_SYMBOL(clear_user_page); | ||
| 398 | |||
| 399 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
| 400 | struct page *pg) | ||
| 401 | { | ||
| 402 | /* Copy using kernel mapping. No coherency is needed | ||
| 403 | (all in kmap/kunmap) on machines that don't support | ||
| 404 | non-equivalent aliasing. However, the `from' page | ||
| 405 | needs to be flushed before it can be accessed through | ||
| 406 | the kernel mapping. */ | ||
| 407 | preempt_disable(); | ||
| 408 | flush_dcache_page_asm(__pa(vfrom), vaddr); | ||
| 409 | preempt_enable(); | ||
| 410 | copy_page_asm(vto, vfrom); | ||
| 411 | if (!parisc_requires_coherency()) | ||
| 412 | flush_kernel_dcache_page_asm(vto); | ||
| 413 | } | ||
| 414 | EXPORT_SYMBOL(copy_user_page); | ||
| 415 | |||
| 416 | #ifdef CONFIG_PA8X00 | ||
| 417 | |||
| 418 | void kunmap_parisc(void *addr) | ||
| 419 | { | ||
| 420 | if (parisc_requires_coherency()) | ||
| 421 | flush_kernel_dcache_page_addr(addr); | ||
| 422 | } | ||
| 423 | EXPORT_SYMBOL(kunmap_parisc); | ||
| 424 | #endif | ||
| 425 | |||
| 426 | void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | 391 | void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) |
| 427 | { | 392 | { |
| 428 | unsigned long flags; | 393 | unsigned long flags; |
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62240c2..a618dfc13e4c 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts | |||
| @@ -58,7 +58,6 @@ | |||
| 58 | compatible = "fsl,mpc5121-immr"; | 58 | compatible = "fsl,mpc5121-immr"; |
| 59 | #address-cells = <1>; | 59 | #address-cells = <1>; |
| 60 | #size-cells = <1>; | 60 | #size-cells = <1>; |
| 61 | #interrupt-cells = <2>; | ||
| 62 | ranges = <0x0 0x80000000 0x400000>; | 61 | ranges = <0x0 0x80000000 0x400000>; |
| 63 | reg = <0x80000000 0x400000>; | 62 | reg = <0x80000000 0x400000>; |
| 64 | bus-frequency = <66000000>; // 66 MHz ips bus | 63 | bus-frequency = <66000000>; // 66 MHz ips bus |
| @@ -189,6 +188,10 @@ | |||
| 189 | reg = <0xA000 0x1000>; | 188 | reg = <0xA000 0x1000>; |
| 190 | }; | 189 | }; |
| 191 | 190 | ||
| 191 | // disable USB1 port | ||
| 192 | // TODO: | ||
| 193 | // correct pinmux config and fix USB3320 ulpi dependency | ||
| 194 | // before re-enabling it | ||
| 192 | usb@3000 { | 195 | usb@3000 { |
| 193 | compatible = "fsl,mpc5121-usb2-dr"; | 196 | compatible = "fsl,mpc5121-usb2-dr"; |
| 194 | reg = <0x3000 0x400>; | 197 | reg = <0x3000 0x400>; |
| @@ -197,6 +200,7 @@ | |||
| 197 | interrupts = <43 0x8>; | 200 | interrupts = <43 0x8>; |
| 198 | dr_mode = "host"; | 201 | dr_mode = "host"; |
| 199 | phy_type = "ulpi"; | 202 | phy_type = "ulpi"; |
| 203 | status = "disabled"; | ||
| 200 | }; | 204 | }; |
| 201 | 205 | ||
| 202 | // 5125 PSCs are not 52xx or 5121 PSC compatible | 206 | // 5125 PSCs are not 52xx or 5121 PSC compatible |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5..243ce69ad685 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
| @@ -284,7 +284,7 @@ do_kvm_##n: \ | |||
| 284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | 284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ |
| 285 | beq- 1f; \ | 285 | beq- 1f; \ |
| 286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | 286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ |
| 287 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | 287 | 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ |
| 288 | blt+ cr1,3f; /* abort if it is */ \ | 288 | blt+ cr1,3f; /* abort if it is */ \ |
| 289 | li r1,(n); /* will be reloaded later */ \ | 289 | li r1,(n); /* will be reloaded later */ \ |
| 290 | sth r1,PACA_TRAP_SAVE(r13); \ | 290 | sth r1,PACA_TRAP_SAVE(r13); \ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | |||
| 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
| 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); |
| 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
| 195 | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
| 196 | struct kvm_vcpu *vcpu); | ||
| 197 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
| 198 | struct kvmppc_book3s_shadow_vcpu *svcpu); | ||
| 195 | 199 | ||
| 196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 200 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
| 197 | { | 201 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
| @@ -79,6 +79,7 @@ struct kvmppc_host_state { | |||
| 79 | ulong vmhandler; | 79 | ulong vmhandler; |
| 80 | ulong scratch0; | 80 | ulong scratch0; |
| 81 | ulong scratch1; | 81 | ulong scratch1; |
| 82 | ulong scratch2; | ||
| 82 | u8 in_guest; | 83 | u8 in_guest; |
| 83 | u8 restore_hid5; | 84 | u8 restore_hid5; |
| 84 | u8 napping; | 85 | u8 napping; |
| @@ -106,6 +107,7 @@ struct kvmppc_host_state { | |||
| 106 | }; | 107 | }; |
| 107 | 108 | ||
| 108 | struct kvmppc_book3s_shadow_vcpu { | 109 | struct kvmppc_book3s_shadow_vcpu { |
| 110 | bool in_use; | ||
| 109 | ulong gpr[14]; | 111 | ulong gpr[14]; |
| 110 | u32 cr; | 112 | u32 cr; |
| 111 | u32 xer; | 113 | u32 xer; |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06be1d84..7bdcf340016c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
| @@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | |||
| 720 | int64_t opal_pci_poll(uint64_t phb_id); | 720 | int64_t opal_pci_poll(uint64_t phb_id); |
| 721 | int64_t opal_return_cpu(void); | 721 | int64_t opal_return_cpu(void); |
| 722 | 722 | ||
| 723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); | 723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); |
| 724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | 724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); |
| 725 | 725 | ||
| 726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
| 727 | uint32_t addr, uint32_t data, uint32_t sz); | 727 | uint32_t addr, uint32_t data, uint32_t sz); |
| 728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
| 729 | uint32_t addr, uint32_t *data, uint32_t sz); | 729 | uint32_t addr, __be32 *data, uint32_t sz); |
| 730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | 730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); |
| 731 | int64_t opal_manage_flash(uint8_t op); | 731 | int64_t opal_manage_flash(uint8_t op); |
| 732 | int64_t opal_update_flash(uint64_t blk_list); | 732 | int64_t opal_update_flash(uint64_t blk_list); |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
| @@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | |||
| 35 | extern void enable_kernel_spe(void); | 35 | extern void enable_kernel_spe(void); |
| 36 | extern void giveup_spe(struct task_struct *); | 36 | extern void giveup_spe(struct task_struct *); |
| 37 | extern void load_up_spe(struct task_struct *); | 37 | extern void load_up_spe(struct task_struct *); |
| 38 | extern void switch_booke_debug_regs(struct thread_struct *new_thread); | 38 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
| 39 | 39 | ||
| 40 | #ifndef CONFIG_SMP | 40 | #ifndef CONFIG_SMP |
| 41 | extern void discard_lazy_cpu_state(void); | 41 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h | |||
| @@ -4,13 +4,18 @@ | |||
| 4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * The PowerPC can do unaligned accesses itself in big endian mode. | 7 | * The PowerPC can do unaligned accesses itself based on its endian mode. |
| 8 | */ | 8 | */ |
| 9 | #include <linux/unaligned/access_ok.h> | 9 | #include <linux/unaligned/access_ok.h> |
| 10 | #include <linux/unaligned/generic.h> | 10 | #include <linux/unaligned/generic.h> |
| 11 | 11 | ||
| 12 | #ifdef __LITTLE_ENDIAN__ | ||
| 13 | #define get_unaligned __get_unaligned_le | ||
| 14 | #define put_unaligned __put_unaligned_le | ||
| 15 | #else | ||
| 12 | #define get_unaligned __get_unaligned_be | 16 | #define get_unaligned __get_unaligned_be |
| 13 | #define put_unaligned __put_unaligned_be | 17 | #define put_unaligned __put_unaligned_be |
| 18 | #endif | ||
| 14 | 19 | ||
| 15 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
| 16 | #endif /* _ASM_POWERPC_UNALIGNED_H */ | 21 | #endif /* _ASM_POWERPC_UNALIGNED_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
| @@ -576,6 +576,7 @@ int main(void) | |||
| 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
| 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
| 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
| 579 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||
| 579 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 580 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
| 580 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 581 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
| 581 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 582 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c26435..11c1d069d920 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
| @@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
| 124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | 124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) |
| 125 | { | 125 | { |
| 126 | unsigned long addr; | 126 | unsigned long addr; |
| 127 | const u32 *basep, *sizep; | 127 | const __be32 *basep, *sizep; |
| 128 | unsigned int rtas_start = 0, rtas_end = 0; | 128 | unsigned int rtas_start = 0, rtas_end = 0; |
| 129 | 129 | ||
| 130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | 130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); |
| 131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | 131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); |
| 132 | 132 | ||
| 133 | if (basep && sizep) { | 133 | if (basep && sizep) { |
| 134 | rtas_start = *basep; | 134 | rtas_start = be32_to_cpup(basep); |
| 135 | rtas_end = *basep + *sizep; | 135 | rtas_end = rtas_start + be32_to_cpup(sizep); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba4053..4f0946de2d5c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
| @@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) | |||
| 80 | * of the function that the cpu should jump to to continue | 80 | * of the function that the cpu should jump to to continue |
| 81 | * initialization. | 81 | * initialization. |
| 82 | */ | 82 | */ |
| 83 | .balign 8 | ||
| 83 | .globl __secondary_hold_spinloop | 84 | .globl __secondary_hold_spinloop |
| 84 | __secondary_hold_spinloop: | 85 | __secondary_hold_spinloop: |
| 85 | .llong 0x0 | 86 | .llong 0x0 |
| @@ -470,6 +471,7 @@ _STATIC(__after_prom_start) | |||
| 470 | mtctr r8 | 471 | mtctr r8 |
| 471 | bctr | 472 | bctr |
| 472 | 473 | ||
| 474 | .balign 8 | ||
| 473 | p_end: .llong _end - _stext | 475 | p_end: .llong _end - _stext |
| 474 | 476 | ||
| 475 | 4: /* Now copy the rest of the kernel up to _end */ | 477 | 4: /* Now copy the rest of the kernel up to _end */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8ab7eb0..4a96556fd2d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
| 339 | #endif | 339 | #endif |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct debug_reg *debug) |
| 343 | { | 343 | { |
| 344 | /* | 344 | /* |
| 345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
| @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
| 348 | */ | 348 | */ |
| 349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
| 350 | 350 | ||
| 351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, debug->iac1); |
| 352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, debug->iac2); |
| 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
| 354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, debug->iac3); |
| 355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, debug->iac4); |
| 356 | #endif | 356 | #endif |
| 357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, debug->dac1); |
| 358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, debug->dac2); |
| 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
| 360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, debug->dvc1); |
| 361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, debug->dvc2); |
| 362 | #endif | 362 | #endif |
| 363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, debug->dbcr0); |
| 364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, debug->dbcr1); |
| 365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
| 366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, debug->dbcr2); |
| 367 | #endif | 367 | #endif |
| 368 | } | 368 | } |
| 369 | /* | 369 | /* |
| @@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
| 371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
| 372 | * stored in the new thread. | 372 | * stored in the new thread. |
| 373 | */ | 373 | */ |
| 374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
| 375 | { | 375 | { |
| 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
| 377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_debug->dbcr0 & DBCR0_IDM)) |
| 378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_debug); |
| 379 | } | 379 | } |
| 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
| 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
| @@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
| 683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
| 684 | 684 | ||
| 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread.debug); |
| 687 | #else | 687 | #else |
| 688 | /* | 688 | /* |
| 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb40498b41..2e3d2bf536c5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 1555 | 1555 | ||
| 1556 | flush_fp_to_thread(child); | 1556 | flush_fp_to_thread(child); |
| 1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
| 1558 | memcpy(&tmp, &child->thread.fp_state.fpr, | 1558 | memcpy(&tmp, &child->thread.TS_FPR(fpidx), |
| 1559 | sizeof(long)); | 1559 | sizeof(long)); |
| 1560 | else | 1560 | else |
| 1561 | tmp = child->thread.fp_state.fpscr; | 1561 | tmp = child->thread.fp_state.fpscr; |
| @@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 1588 | 1588 | ||
| 1589 | flush_fp_to_thread(child); | 1589 | flush_fp_to_thread(child); |
| 1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
| 1591 | memcpy(&child->thread.fp_state.fpr, &data, | 1591 | memcpy(&child->thread.TS_FPR(fpidx), &data, |
| 1592 | sizeof(long)); | 1592 | sizeof(long)); |
| 1593 | else | 1593 | else |
| 1594 | child->thread.fp_state.fpscr = data; | 1594 | child->thread.fp_state.fpscr = data; |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc80445d25..bc76cc6b419c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
| @@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) | |||
| 479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | 479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && |
| 480 | (dn = of_find_node_by_path("/rtas"))) { | 480 | (dn = of_find_node_by_path("/rtas"))) { |
| 481 | int num_addr_cell, num_size_cell, maxcpus; | 481 | int num_addr_cell, num_size_cell, maxcpus; |
| 482 | const unsigned int *ireg; | 482 | const __be32 *ireg; |
| 483 | 483 | ||
| 484 | num_addr_cell = of_n_addr_cells(dn); | 484 | num_addr_cell = of_n_addr_cells(dn); |
| 485 | num_size_cell = of_n_size_cells(dn); | 485 | num_size_cell = of_n_size_cells(dn); |
| @@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) | |||
| 489 | if (!ireg) | 489 | if (!ireg) |
| 490 | goto out; | 490 | goto out; |
| 491 | 491 | ||
| 492 | maxcpus = ireg[num_addr_cell + num_size_cell]; | 492 | maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); |
| 493 | 493 | ||
| 494 | /* Double maxcpus for processors which have SMT capability */ | 494 | /* Double maxcpus for processors which have SMT capability */ |
| 495 | if (cpu_has_feature(CPU_FTR_SMT)) | 495 | if (cpu_has_feature(CPU_FTR_SMT)) |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3bf9a2..c1cf4a1522d9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
| 580 | int cpu_to_core_id(int cpu) | 580 | int cpu_to_core_id(int cpu) |
| 581 | { | 581 | { |
| 582 | struct device_node *np; | 582 | struct device_node *np; |
| 583 | const int *reg; | 583 | const __be32 *reg; |
| 584 | int id = -1; | 584 | int id = -1; |
| 585 | 585 | ||
| 586 | np = of_get_cpu_node(cpu, NULL); | 586 | np = of_get_cpu_node(cpu, NULL); |
| @@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) | |||
| 591 | if (!reg) | 591 | if (!reg) |
| 592 | goto out; | 592 | goto out; |
| 593 | 593 | ||
| 594 | id = *reg; | 594 | id = be32_to_cpup(reg); |
| 595 | out: | 595 | out: |
| 596 | of_node_put(np); | 596 | of_node_put(np); |
| 597 | return id; | 597 | return id; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; | 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
| 470 | } | 470 | } |
| 471 | 471 | ||
| 472 | preempt_disable(); | ||
| 472 | /* Find the HPTE in the hash table */ | 473 | /* Find the HPTE in the hash table */ |
| 473 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 474 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
| 474 | HPTE_V_VALID | HPTE_V_ABSENT); | 475 | HPTE_V_VALID | HPTE_V_ABSENT); |
| 475 | if (index < 0) | 476 | if (index < 0) { |
| 477 | preempt_enable(); | ||
| 476 | return -ENOENT; | 478 | return -ENOENT; |
| 479 | } | ||
| 477 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 480 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
| 478 | v = hptep[0] & ~HPTE_V_HVLOCK; | 481 | v = hptep[0] & ~HPTE_V_HVLOCK; |
| 479 | gr = kvm->arch.revmap[index].guest_rpte; | 482 | gr = kvm->arch.revmap[index].guest_rpte; |
| @@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 481 | /* Unlock the HPTE */ | 484 | /* Unlock the HPTE */ |
| 482 | asm volatile("lwsync" : : : "memory"); | 485 | asm volatile("lwsync" : : : "memory"); |
| 483 | hptep[0] = v; | 486 | hptep[0] = v; |
| 487 | preempt_enable(); | ||
| 484 | 488 | ||
| 485 | gpte->eaddr = eaddr; | 489 | gpte->eaddr = eaddr; |
| 486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 490 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
| @@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 665 | return -EFAULT; | 669 | return -EFAULT; |
| 666 | } else { | 670 | } else { |
| 667 | page = pages[0]; | 671 | page = pages[0]; |
| 672 | pfn = page_to_pfn(page); | ||
| 668 | if (PageHuge(page)) { | 673 | if (PageHuge(page)) { |
| 669 | page = compound_head(page); | 674 | page = compound_head(page); |
| 670 | pte_size <<= compound_order(page); | 675 | pte_size <<= compound_order(page); |
| @@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 689 | } | 694 | } |
| 690 | rcu_read_unlock_sched(); | 695 | rcu_read_unlock_sched(); |
| 691 | } | 696 | } |
| 692 | pfn = page_to_pfn(page); | ||
| 693 | } | 697 | } |
| 694 | 698 | ||
| 695 | ret = -EFAULT; | 699 | ret = -EFAULT; |
| @@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 707 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 711 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
| 708 | } | 712 | } |
| 709 | 713 | ||
| 710 | /* Set the HPTE to point to pfn */ | 714 | /* |
| 711 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 715 | * Set the HPTE to point to pfn. |
| 716 | * Since the pfn is at PAGE_SIZE granularity, make sure we | ||
| 717 | * don't mask out lower-order bits if psize < PAGE_SIZE. | ||
| 718 | */ | ||
| 719 | if (psize < PAGE_SIZE) | ||
| 720 | psize = PAGE_SIZE; | ||
| 721 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||
| 712 | if (hpte_is_writable(r) && !write_ok) | 722 | if (hpte_is_writable(r) && !write_ok) |
| 713 | r = hpte_make_readonly(r); | 723 | r = hpte_make_readonly(r); |
| 714 | ret = RESUME_GUEST; | 724 | ret = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | |||
| 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
| 132 | { | 132 | { |
| 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 134 | unsigned long flags; | ||
| 134 | 135 | ||
| 135 | spin_lock(&vcpu->arch.tbacct_lock); | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
| 136 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
| 137 | vc->preempt_tb != TB_NIL) { | 138 | vc->preempt_tb != TB_NIL) { |
| 138 | vc->stolen_tb += mftb() - vc->preempt_tb; | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
| @@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | |||
| 143 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
| 144 | vcpu->arch.busy_preempt = TB_NIL; | 145 | vcpu->arch.busy_preempt = TB_NIL; |
| 145 | } | 146 | } |
| 146 | spin_unlock(&vcpu->arch.tbacct_lock); | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
| 147 | } | 148 | } |
| 148 | 149 | ||
| 149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
| 150 | { | 151 | { |
| 151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 153 | unsigned long flags; | ||
| 152 | 154 | ||
| 153 | spin_lock(&vcpu->arch.tbacct_lock); | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
| 154 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
| 155 | vc->preempt_tb = mftb(); | 157 | vc->preempt_tb = mftb(); |
| 156 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
| 157 | vcpu->arch.busy_preempt = mftb(); | 159 | vcpu->arch.busy_preempt = mftb(); |
| 158 | spin_unlock(&vcpu->arch.tbacct_lock); | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
| 159 | } | 161 | } |
| 160 | 162 | ||
| 161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
| @@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |||
| 486 | */ | 488 | */ |
| 487 | if (vc->vcore_state != VCORE_INACTIVE && | 489 | if (vc->vcore_state != VCORE_INACTIVE && |
| 488 | vc->runner->arch.run_task != current) { | 490 | vc->runner->arch.run_task != current) { |
| 489 | spin_lock(&vc->runner->arch.tbacct_lock); | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
| 490 | p = vc->stolen_tb; | 492 | p = vc->stolen_tb; |
| 491 | if (vc->preempt_tb != TB_NIL) | 493 | if (vc->preempt_tb != TB_NIL) |
| 492 | p += now - vc->preempt_tb; | 494 | p += now - vc->preempt_tb; |
| 493 | spin_unlock(&vc->runner->arch.tbacct_lock); | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
| 494 | } else { | 496 | } else { |
| 495 | p = vc->stolen_tb; | 497 | p = vc->stolen_tb; |
| 496 | } | 498 | } |
| @@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
| 512 | core_stolen = vcore_stolen_time(vc, now); | 514 | core_stolen = vcore_stolen_time(vc, now); |
| 513 | stolen = core_stolen - vcpu->arch.stolen_logged; | 515 | stolen = core_stolen - vcpu->arch.stolen_logged; |
| 514 | vcpu->arch.stolen_logged = core_stolen; | 516 | vcpu->arch.stolen_logged = core_stolen; |
| 515 | spin_lock(&vcpu->arch.tbacct_lock); | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
| 516 | stolen += vcpu->arch.busy_stolen; | 518 | stolen += vcpu->arch.busy_stolen; |
| 517 | vcpu->arch.busy_stolen = 0; | 519 | vcpu->arch.busy_stolen = 0; |
| 518 | spin_unlock(&vcpu->arch.tbacct_lock); | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
| 519 | if (!dt || !vpa) | 521 | if (!dt || !vpa) |
| 520 | return; | 522 | return; |
| 521 | memset(dt, 0, sizeof(struct dtl_entry)); | 523 | memset(dt, 0, sizeof(struct dtl_entry)); |
| @@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
| 589 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
| 590 | return RESUME_HOST; | 592 | return RESUME_HOST; |
| 591 | 593 | ||
| 594 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
| 592 | rc = kvmppc_rtas_hcall(vcpu); | 595 | rc = kvmppc_rtas_hcall(vcpu); |
| 596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
| 593 | 597 | ||
| 594 | if (rc == -ENOENT) | 598 | if (rc == -ENOENT) |
| 595 | return RESUME_HOST; | 599 | return RESUME_HOST; |
| @@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
| 1115 | 1119 | ||
| 1116 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
| 1117 | return; | 1121 | return; |
| 1118 | spin_lock(&vcpu->arch.tbacct_lock); | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
| 1119 | now = mftb(); | 1123 | now = mftb(); |
| 1120 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
| 1121 | vcpu->arch.stolen_logged; | 1125 | vcpu->arch.stolen_logged; |
| 1122 | vcpu->arch.busy_preempt = now; | 1126 | vcpu->arch.busy_preempt = now; |
| 1123 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
| 1124 | spin_unlock(&vcpu->arch.tbacct_lock); | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
| 1125 | --vc->n_runnable; | 1129 | --vc->n_runnable; |
| 1126 | list_del(&vcpu->arch.run_list); | 1130 | list_del(&vcpu->arch.run_list); |
| 1127 | } | 1131 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
| @@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
| 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); | 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
| 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
| 227 | pa &= PAGE_MASK; | 227 | pa &= PAGE_MASK; |
| 228 | pa |= gpa & ~PAGE_MASK; | ||
| 228 | } else { | 229 | } else { |
| 229 | /* Translate to host virtual address */ | 230 | /* Translate to host virtual address */ |
| 230 | hva = __gfn_to_hva_memslot(memslot, gfn); | 231 | hva = __gfn_to_hva_memslot(memslot, gfn); |
| @@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
| 238 | ptel = hpte_make_readonly(ptel); | 239 | ptel = hpte_make_readonly(ptel); |
| 239 | is_io = hpte_cache_bits(pte_val(pte)); | 240 | is_io = hpte_cache_bits(pte_val(pte)); |
| 240 | pa = pte_pfn(pte) << PAGE_SHIFT; | 241 | pa = pte_pfn(pte) << PAGE_SHIFT; |
| 242 | pa |= hva & (pte_size - 1); | ||
| 243 | pa |= gpa & ~PAGE_MASK; | ||
| 241 | } | 244 | } |
| 242 | } | 245 | } |
| 243 | 246 | ||
| 244 | if (pte_size < psize) | 247 | if (pte_size < psize) |
| 245 | return H_PARAMETER; | 248 | return H_PARAMETER; |
| 246 | if (pa && pte_size > psize) | ||
| 247 | pa |= gpa & (pte_size - 1); | ||
| 248 | 249 | ||
| 249 | ptel &= ~(HPTE_R_PP0 - psize); | 250 | ptel &= ~(HPTE_R_PP0 - psize); |
| 250 | ptel |= pa; | 251 | ptel |= pa; |
| @@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | |||
| 749 | 20, /* 1M, unsupported */ | 750 | 20, /* 1M, unsupported */ |
| 750 | }; | 751 | }; |
| 751 | 752 | ||
| 753 | /* When called from virtmode, this func should be protected by | ||
| 754 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||
| 755 | * can trigger deadlock issue. | ||
| 756 | */ | ||
| 752 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | 757 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
| 753 | unsigned long valid) | 758 | unsigned long valid) |
| 754 | { | 759 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
| 153 | 153 | ||
| 154 | 13: b machine_check_fwnmi | 154 | 13: b machine_check_fwnmi |
| 155 | 155 | ||
| 156 | |||
| 157 | /* | 156 | /* |
| 158 | * We come in here when wakened from nap mode on a secondary hw thread. | 157 | * We come in here when wakened from nap mode on a secondary hw thread. |
| 159 | * Relocation is off and most register values are lost. | 158 | * Relocation is off and most register values are lost. |
| @@ -224,6 +223,11 @@ kvm_start_guest: | |||
| 224 | /* Clear our vcpu pointer so we don't come back in early */ | 223 | /* Clear our vcpu pointer so we don't come back in early */ |
| 225 | li r0, 0 | 224 | li r0, 0 |
| 226 | std r0, HSTATE_KVM_VCPU(r13) | 225 | std r0, HSTATE_KVM_VCPU(r13) |
| 226 | /* | ||
| 227 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||
| 228 | * the nap_count, because once the increment to nap_count is | ||
| 229 | * visible we could be given another vcpu. | ||
| 230 | */ | ||
| 227 | lwsync | 231 | lwsync |
| 228 | /* Clear any pending IPI - we're an offline thread */ | 232 | /* Clear any pending IPI - we're an offline thread */ |
| 229 | ld r5, HSTATE_XICS_PHYS(r13) | 233 | ld r5, HSTATE_XICS_PHYS(r13) |
| @@ -241,7 +245,6 @@ kvm_start_guest: | |||
| 241 | /* increment the nap count and then go to nap mode */ | 245 | /* increment the nap count and then go to nap mode */ |
| 242 | ld r4, HSTATE_KVM_VCORE(r13) | 246 | ld r4, HSTATE_KVM_VCORE(r13) |
| 243 | addi r4, r4, VCORE_NAP_COUNT | 247 | addi r4, r4, VCORE_NAP_COUNT |
| 244 | lwsync /* make previous updates visible */ | ||
| 245 | 51: lwarx r3, 0, r4 | 248 | 51: lwarx r3, 0, r4 |
| 246 | addi r3, r3, 1 | 249 | addi r3, r3, 1 |
| 247 | stwcx. r3, 0, r4 | 250 | stwcx. r3, 0, r4 |
| @@ -751,15 +754,14 @@ kvmppc_interrupt_hv: | |||
| 751 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 754 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
| 752 | * guest R13 saved in SPRN_SCRATCH0 | 755 | * guest R13 saved in SPRN_SCRATCH0 |
| 753 | */ | 756 | */ |
| 754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 757 | std r9, HSTATE_SCRATCH2(r13) |
| 755 | std r9, HSTATE_HOST_R2(r13) | ||
| 756 | 758 | ||
| 757 | lbz r9, HSTATE_IN_GUEST(r13) | 759 | lbz r9, HSTATE_IN_GUEST(r13) |
| 758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | 760 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
| 759 | beq kvmppc_bad_host_intr | 761 | beq kvmppc_bad_host_intr |
| 760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 762 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 761 | cmpwi r9, KVM_GUEST_MODE_GUEST | 763 | cmpwi r9, KVM_GUEST_MODE_GUEST |
| 762 | ld r9, HSTATE_HOST_R2(r13) | 764 | ld r9, HSTATE_SCRATCH2(r13) |
| 763 | beq kvmppc_interrupt_pr | 765 | beq kvmppc_interrupt_pr |
| 764 | #endif | 766 | #endif |
| 765 | /* We're now back in the host but in guest MMU context */ | 767 | /* We're now back in the host but in guest MMU context */ |
| @@ -779,7 +781,7 @@ kvmppc_interrupt_hv: | |||
| 779 | std r6, VCPU_GPR(R6)(r9) | 781 | std r6, VCPU_GPR(R6)(r9) |
| 780 | std r7, VCPU_GPR(R7)(r9) | 782 | std r7, VCPU_GPR(R7)(r9) |
| 781 | std r8, VCPU_GPR(R8)(r9) | 783 | std r8, VCPU_GPR(R8)(r9) |
| 782 | ld r0, HSTATE_HOST_R2(r13) | 784 | ld r0, HSTATE_SCRATCH2(r13) |
| 783 | std r0, VCPU_GPR(R9)(r9) | 785 | std r0, VCPU_GPR(R9)(r9) |
| 784 | std r10, VCPU_GPR(R10)(r9) | 786 | std r10, VCPU_GPR(R10)(r9) |
| 785 | std r11, VCPU_GPR(R11)(r9) | 787 | std r11, VCPU_GPR(R11)(r9) |
| @@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
| 990 | */ | 992 | */ |
| 991 | /* Increment the threads-exiting-guest count in the 0xff00 | 993 | /* Increment the threads-exiting-guest count in the 0xff00 |
| 992 | bits of vcore->entry_exit_count */ | 994 | bits of vcore->entry_exit_count */ |
| 993 | lwsync | ||
| 994 | ld r5,HSTATE_KVM_VCORE(r13) | 995 | ld r5,HSTATE_KVM_VCORE(r13) |
| 995 | addi r6,r5,VCORE_ENTRY_EXIT | 996 | addi r6,r5,VCORE_ENTRY_EXIT |
| 996 | 41: lwarx r3,0,r6 | 997 | 41: lwarx r3,0,r6 |
| 997 | addi r0,r3,0x100 | 998 | addi r0,r3,0x100 |
| 998 | stwcx. r0,0,r6 | 999 | stwcx. r0,0,r6 |
| 999 | bne 41b | 1000 | bne 41b |
| 1000 | lwsync | 1001 | isync /* order stwcx. vs. reading napping_threads */ |
| 1001 | 1002 | ||
| 1002 | /* | 1003 | /* |
| 1003 | * At this point we have an interrupt that we have to pass | 1004 | * At this point we have an interrupt that we have to pass |
| @@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
| 1030 | sld r0,r0,r4 | 1031 | sld r0,r0,r4 |
| 1031 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ | 1032 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
| 1032 | beq 43f | 1033 | beq 43f |
| 1034 | /* Order entry/exit update vs. IPIs */ | ||
| 1035 | sync | ||
| 1033 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | 1036 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
| 1034 | subf r6,r4,r13 | 1037 | subf r6,r4,r13 |
| 1035 | 42: andi. r0,r3,1 | 1038 | 42: andi. r0,r3,1 |
| @@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
| 1638 | bge kvm_cede_exit | 1641 | bge kvm_cede_exit |
| 1639 | stwcx. r4,0,r6 | 1642 | stwcx. r4,0,r6 |
| 1640 | bne 31b | 1643 | bne 31b |
| 1644 | /* order napping_threads update vs testing entry_exit_count */ | ||
| 1645 | isync | ||
| 1641 | li r0,1 | 1646 | li r0,1 |
| 1642 | stb r0,HSTATE_NAPPING(r13) | 1647 | stb r0,HSTATE_NAPPING(r13) |
| 1643 | /* order napping_threads update vs testing entry_exit_count */ | ||
| 1644 | lwsync | ||
| 1645 | mr r4,r3 | 1648 | mr r4,r3 |
| 1646 | lwz r7,VCORE_ENTRY_EXIT(r5) | 1649 | lwz r7,VCORE_ENTRY_EXIT(r5) |
| 1647 | cmpwi r7,0x100 | 1650 | cmpwi r7,0x100 |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
| @@ -129,29 +129,32 @@ kvm_start_lightweight: | |||
| 129 | * R12 = exit handler id | 129 | * R12 = exit handler id |
| 130 | * R13 = PACA | 130 | * R13 = PACA |
| 131 | * SVCPU.* = guest * | 131 | * SVCPU.* = guest * |
| 132 | * MSR.EE = 1 | ||
| 132 | * | 133 | * |
| 133 | */ | 134 | */ |
| 134 | 135 | ||
| 136 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
| 137 | |||
| 138 | /* | ||
| 139 | * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||
| 140 | * the exit handler id to the vcpu and restore it from there later. | ||
| 141 | */ | ||
| 142 | stw r12, VCPU_TRAP(r3) | ||
| 143 | |||
| 135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ | 144 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
| 136 | /* On 64-bit, interrupts are still off at this point */ | 145 | /* On 64-bit, interrupts are still off at this point */ |
| 137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | 146 | |
| 138 | GET_SHADOW_VCPU(r4) | 147 | GET_SHADOW_VCPU(r4) |
| 139 | bl FUNC(kvmppc_copy_from_svcpu) | 148 | bl FUNC(kvmppc_copy_from_svcpu) |
| 140 | nop | 149 | nop |
| 141 | 150 | ||
| 142 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 143 | /* Re-enable interrupts */ | ||
| 144 | ld r3, HSTATE_HOST_MSR(r13) | ||
| 145 | ori r3, r3, MSR_EE | ||
| 146 | MTMSR_EERI(r3) | ||
| 147 | |||
| 148 | /* | 152 | /* |
| 149 | * Reload kernel SPRG3 value. | 153 | * Reload kernel SPRG3 value. |
| 150 | * No need to save guest value as usermode can't modify SPRG3. | 154 | * No need to save guest value as usermode can't modify SPRG3. |
| 151 | */ | 155 | */ |
| 152 | ld r3, PACA_SPRG3(r13) | 156 | ld r3, PACA_SPRG3(r13) |
| 153 | mtspr SPRN_SPRG3, r3 | 157 | mtspr SPRN_SPRG3, r3 |
| 154 | |||
| 155 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 158 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 156 | 159 | ||
| 157 | /* R7 = vcpu */ | 160 | /* R7 = vcpu */ |
| @@ -177,7 +180,7 @@ kvm_start_lightweight: | |||
| 177 | PPC_STL r31, VCPU_GPR(R31)(r7) | 180 | PPC_STL r31, VCPU_GPR(R31)(r7) |
| 178 | 181 | ||
| 179 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 182 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
| 180 | mr r5, r12 | 183 | lwz r5, VCPU_TRAP(r7) |
| 181 | 184 | ||
| 182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 185 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
| 183 | REST_2GPRS(3, r1) | 186 | REST_2GPRS(3, r1) |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
| 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
| 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
| 69 | svcpu->in_use = 0; | ||
| 69 | svcpu_put(svcpu); | 70 | svcpu_put(svcpu); |
| 70 | #endif | 71 | #endif |
| 71 | vcpu->cpu = smp_processor_id(); | 72 | vcpu->cpu = smp_processor_id(); |
| @@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
| 78 | { | 79 | { |
| 79 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 81 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 82 | if (svcpu->in_use) { | ||
| 83 | kvmppc_copy_from_svcpu(vcpu, svcpu); | ||
| 84 | } | ||
| 81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 85 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
| 82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 86 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
| 83 | svcpu_put(svcpu); | 87 | svcpu_put(svcpu); |
| @@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
| 110 | svcpu->ctr = vcpu->arch.ctr; | 114 | svcpu->ctr = vcpu->arch.ctr; |
| 111 | svcpu->lr = vcpu->arch.lr; | 115 | svcpu->lr = vcpu->arch.lr; |
| 112 | svcpu->pc = vcpu->arch.pc; | 116 | svcpu->pc = vcpu->arch.pc; |
| 117 | svcpu->in_use = true; | ||
| 113 | } | 118 | } |
| 114 | 119 | ||
| 115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 120 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
| 116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | 121 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, |
| 117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | 122 | struct kvmppc_book3s_shadow_vcpu *svcpu) |
| 118 | { | 123 | { |
| 124 | /* | ||
| 125 | * vcpu_put would just call us again because in_use hasn't | ||
| 126 | * been updated yet. | ||
| 127 | */ | ||
| 128 | preempt_disable(); | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Maybe we were already preempted and synced the svcpu from | ||
| 132 | * our preempt notifiers. Don't bother touching this svcpu then. | ||
| 133 | */ | ||
| 134 | if (!svcpu->in_use) | ||
| 135 | goto out; | ||
| 136 | |||
| 119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 137 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
| 120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 138 | vcpu->arch.gpr[1] = svcpu->gpr[1]; |
| 121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 139 | vcpu->arch.gpr[2] = svcpu->gpr[2]; |
| @@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
| 139 | vcpu->arch.fault_dar = svcpu->fault_dar; | 157 | vcpu->arch.fault_dar = svcpu->fault_dar; |
| 140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 158 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
| 141 | vcpu->arch.last_inst = svcpu->last_inst; | 159 | vcpu->arch.last_inst = svcpu->last_inst; |
| 160 | svcpu->in_use = false; | ||
| 161 | |||
| 162 | out: | ||
| 163 | preempt_enable(); | ||
| 142 | } | 164 | } |
| 143 | 165 | ||
| 144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 166 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
| @@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
| 153 | 153 | ||
| 154 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
| 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
| 156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
| 157 | /* | 156 | /* |
| 158 | * Set EE in HOST_MSR so that it's enabled when we get into our | 157 | * Set EE in HOST_MSR so that it's enabled when we get into our |
| 159 | * C exit handler function. On 64-bit we delay enabling | 158 | * C exit handler function. |
| 160 | * interrupts until we have finished transferring stuff | ||
| 161 | * to or from the PACA. | ||
| 162 | */ | 159 | */ |
| 163 | ori r5, r5, MSR_EE | 160 | ori r5, r5, MSR_EE |
| 164 | #endif | ||
| 165 | mtsrr0 r7 | 161 | mtsrr0 r7 |
| 166 | mtsrr1 r6 | 162 | mtsrr1 r6 |
| 167 | RFI | 163 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
| 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
| 682 | { | 682 | { |
| 683 | int ret, s; | 683 | int ret, s; |
| 684 | struct thread_struct thread; | 684 | struct debug_reg debug; |
| 685 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
| 686 | struct thread_fp_state fp; | 686 | struct thread_fp_state fp; |
| 687 | int fpexc_mode; | 687 | int fpexc_mode; |
| @@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 723 | #endif | 723 | #endif |
| 724 | 724 | ||
| 725 | /* Switch to guest debug context */ | 725 | /* Switch to guest debug context */ |
| 726 | thread.debug = vcpu->arch.shadow_dbg_reg; | 726 | debug = vcpu->arch.shadow_dbg_reg; |
| 727 | switch_booke_debug_regs(&thread); | 727 | switch_booke_debug_regs(&debug); |
| 728 | thread.debug = current->thread.debug; | 728 | debug = current->thread.debug; |
| 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; |
| 730 | 730 | ||
| 731 | kvmppc_fix_ee_before_entry(); | 731 | kvmppc_fix_ee_before_entry(); |
| @@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 736 | We also get here with interrupts enabled. */ | 736 | We also get here with interrupts enabled. */ |
| 737 | 737 | ||
| 738 | /* Switch back to user space debug context */ | 738 | /* Switch back to user space debug context */ |
| 739 | switch_booke_debug_regs(&thread); | 739 | switch_booke_debug_regs(&debug); |
| 740 | current->thread.debug = thread.debug; | 740 | current->thread.debug = debug; |
| 741 | 741 | ||
| 742 | #ifdef CONFIG_PPC_FPU | 742 | #ifdef CONFIG_PPC_FPU |
| 743 | kvmppc_save_guest_fp(vcpu); | 743 | kvmppc_save_guest_fp(vcpu); |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..596a285c0755 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
| @@ -9,6 +9,14 @@ | |||
| 9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
| 10 | #include <asm/ppc_asm.h> | 10 | #include <asm/ppc_asm.h> |
| 11 | 11 | ||
| 12 | #ifdef __BIG_ENDIAN__ | ||
| 13 | #define sLd sld /* Shift towards low-numbered address. */ | ||
| 14 | #define sHd srd /* Shift towards high-numbered address. */ | ||
| 15 | #else | ||
| 16 | #define sLd srd /* Shift towards low-numbered address. */ | ||
| 17 | #define sHd sld /* Shift towards high-numbered address. */ | ||
| 18 | #endif | ||
| 19 | |||
| 12 | .align 7 | 20 | .align 7 |
| 13 | _GLOBAL(__copy_tofrom_user) | 21 | _GLOBAL(__copy_tofrom_user) |
| 14 | BEGIN_FTR_SECTION | 22 | BEGIN_FTR_SECTION |
| @@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
| 118 | 126 | ||
| 119 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | 127 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ |
| 120 | 25: ld r0,8(r4) | 128 | 25: ld r0,8(r4) |
| 121 | sld r6,r9,r10 | 129 | sLd r6,r9,r10 |
| 122 | 26: ldu r9,16(r4) | 130 | 26: ldu r9,16(r4) |
| 123 | srd r7,r0,r11 | 131 | sHd r7,r0,r11 |
| 124 | sld r8,r0,r10 | 132 | sLd r8,r0,r10 |
| 125 | or r7,r7,r6 | 133 | or r7,r7,r6 |
| 126 | blt cr6,79f | 134 | blt cr6,79f |
| 127 | 27: ld r0,8(r4) | 135 | 27: ld r0,8(r4) |
| @@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
| 129 | 137 | ||
| 130 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | 138 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ |
| 131 | 29: ldu r9,8(r4) | 139 | 29: ldu r9,8(r4) |
| 132 | sld r8,r0,r10 | 140 | sLd r8,r0,r10 |
| 133 | addi r3,r3,-8 | 141 | addi r3,r3,-8 |
| 134 | blt cr6,5f | 142 | blt cr6,5f |
| 135 | 30: ld r0,8(r4) | 143 | 30: ld r0,8(r4) |
| 136 | srd r12,r9,r11 | 144 | sHd r12,r9,r11 |
| 137 | sld r6,r9,r10 | 145 | sLd r6,r9,r10 |
| 138 | 31: ldu r9,16(r4) | 146 | 31: ldu r9,16(r4) |
| 139 | or r12,r8,r12 | 147 | or r12,r8,r12 |
| 140 | srd r7,r0,r11 | 148 | sHd r7,r0,r11 |
| 141 | sld r8,r0,r10 | 149 | sLd r8,r0,r10 |
| 142 | addi r3,r3,16 | 150 | addi r3,r3,16 |
| 143 | beq cr6,78f | 151 | beq cr6,78f |
| 144 | 152 | ||
| 145 | 1: or r7,r7,r6 | 153 | 1: or r7,r7,r6 |
| 146 | 32: ld r0,8(r4) | 154 | 32: ld r0,8(r4) |
| 147 | 76: std r12,8(r3) | 155 | 76: std r12,8(r3) |
| 148 | 2: srd r12,r9,r11 | 156 | 2: sHd r12,r9,r11 |
| 149 | sld r6,r9,r10 | 157 | sLd r6,r9,r10 |
| 150 | 33: ldu r9,16(r4) | 158 | 33: ldu r9,16(r4) |
| 151 | or r12,r8,r12 | 159 | or r12,r8,r12 |
| 152 | 77: stdu r7,16(r3) | 160 | 77: stdu r7,16(r3) |
| 153 | srd r7,r0,r11 | 161 | sHd r7,r0,r11 |
| 154 | sld r8,r0,r10 | 162 | sLd r8,r0,r10 |
| 155 | bdnz 1b | 163 | bdnz 1b |
| 156 | 164 | ||
| 157 | 78: std r12,8(r3) | 165 | 78: std r12,8(r3) |
| 158 | or r7,r7,r6 | 166 | or r7,r7,r6 |
| 159 | 79: std r7,16(r3) | 167 | 79: std r7,16(r3) |
| 160 | 5: srd r12,r9,r11 | 168 | 5: sHd r12,r9,r11 |
| 161 | or r12,r8,r12 | 169 | or r12,r8,r12 |
| 162 | 80: std r12,24(r3) | 170 | 80: std r12,24(r3) |
| 163 | bne 6f | 171 | bne 6f |
| @@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
| 165 | blr | 173 | blr |
| 166 | 6: cmpwi cr1,r5,8 | 174 | 6: cmpwi cr1,r5,8 |
| 167 | addi r3,r3,32 | 175 | addi r3,r3,32 |
| 168 | sld r9,r9,r10 | 176 | sLd r9,r9,r10 |
| 169 | ble cr1,7f | 177 | ble cr1,7f |
| 170 | 34: ld r0,8(r4) | 178 | 34: ld r0,8(r4) |
| 171 | srd r7,r0,r11 | 179 | sHd r7,r0,r11 |
| 172 | or r9,r7,r9 | 180 | or r9,r7,r9 |
| 173 | 7: | 181 | 7: |
| 174 | bf cr7*4+1,1f | 182 | bf cr7*4+1,1f |
| 183 | #ifdef __BIG_ENDIAN__ | ||
| 175 | rotldi r9,r9,32 | 184 | rotldi r9,r9,32 |
| 185 | #endif | ||
| 176 | 94: stw r9,0(r3) | 186 | 94: stw r9,0(r3) |
| 187 | #ifdef __LITTLE_ENDIAN__ | ||
| 188 | rotrdi r9,r9,32 | ||
| 189 | #endif | ||
| 177 | addi r3,r3,4 | 190 | addi r3,r3,4 |
| 178 | 1: bf cr7*4+2,2f | 191 | 1: bf cr7*4+2,2f |
| 192 | #ifdef __BIG_ENDIAN__ | ||
| 179 | rotldi r9,r9,16 | 193 | rotldi r9,r9,16 |
| 194 | #endif | ||
| 180 | 95: sth r9,0(r3) | 195 | 95: sth r9,0(r3) |
| 196 | #ifdef __LITTLE_ENDIAN__ | ||
| 197 | rotrdi r9,r9,16 | ||
| 198 | #endif | ||
| 181 | addi r3,r3,2 | 199 | addi r3,r3,2 |
| 182 | 2: bf cr7*4+3,3f | 200 | 2: bf cr7*4+3,3f |
| 201 | #ifdef __BIG_ENDIAN__ | ||
| 183 | rotldi r9,r9,8 | 202 | rotldi r9,r9,8 |
| 203 | #endif | ||
| 184 | 96: stb r9,0(r3) | 204 | 96: stb r9,0(r3) |
| 205 | #ifdef __LITTLE_ENDIAN__ | ||
| 206 | rotrdi r9,r9,8 | ||
| 207 | #endif | ||
| 185 | 3: li r3,0 | 208 | 3: li r3,0 |
| 186 | blr | 209 | blr |
| 187 | 210 | ||
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245cee7818..d7ddcee7feb8 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include "powernv.h" | 36 | #include "powernv.h" |
| 37 | #include "pci.h" | 37 | #include "pci.h" |
| 38 | 38 | ||
| 39 | static char *hub_diag = NULL; | ||
| 40 | static int ioda_eeh_nb_init = 0; | 39 | static int ioda_eeh_nb_init = 0; |
| 41 | 40 | ||
| 42 | static int ioda_eeh_event(struct notifier_block *nb, | 41 | static int ioda_eeh_event(struct notifier_block *nb, |
| @@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) | |||
| 140 | ioda_eeh_nb_init = 1; | 139 | ioda_eeh_nb_init = 1; |
| 141 | } | 140 | } |
| 142 | 141 | ||
| 143 | /* We needn't HUB diag-data on PHB3 */ | ||
| 144 | if (phb->type == PNV_PHB_IODA1 && !hub_diag) { | ||
| 145 | hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
| 146 | if (!hub_diag) { | ||
| 147 | pr_err("%s: Out of memory !\n", __func__); | ||
| 148 | return -ENOMEM; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | |||
| 152 | #ifdef CONFIG_DEBUG_FS | 142 | #ifdef CONFIG_DEBUG_FS |
| 153 | if (phb->dbgfs) { | 143 | if (phb->dbgfs) { |
| 154 | debugfs_create_file("err_injct_outbound", 0600, | 144 | debugfs_create_file("err_injct_outbound", 0600, |
| @@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | |||
| 633 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | 623 | static void ioda_eeh_hub_diag(struct pci_controller *hose) |
| 634 | { | 624 | { |
| 635 | struct pnv_phb *phb = hose->private_data; | 625 | struct pnv_phb *phb = hose->private_data; |
| 636 | struct OpalIoP7IOCErrorData *data; | 626 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; |
| 637 | long rc; | 627 | long rc; |
| 638 | 628 | ||
| 639 | data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; | 629 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); |
| 640 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); | ||
| 641 | if (rc != OPAL_SUCCESS) { | 630 | if (rc != OPAL_SUCCESS) { |
| 642 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", | 631 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", |
| 643 | __func__, phb->hub_id, rc); | 632 | __func__, phb->hub_id, rc); |
| @@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) | |||
| 820 | struct OpalIoPhbErrorCommon *common; | 809 | struct OpalIoPhbErrorCommon *common; |
| 821 | long rc; | 810 | long rc; |
| 822 | 811 | ||
| 823 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | 812 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, |
| 824 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); | 813 | PNV_PCI_DIAG_BUF_SIZE); |
| 825 | if (rc != OPAL_SUCCESS) { | 814 | if (rc != OPAL_SUCCESS) { |
| 826 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | 815 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", |
| 827 | __func__, hose->global_number, rc); | 816 | __func__, hose->global_number, rc); |
| 828 | return; | 817 | return; |
| 829 | } | 818 | } |
| 830 | 819 | ||
| 820 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | ||
| 831 | switch (common->ioType) { | 821 | switch (common->ioType) { |
| 832 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: | 822 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: |
| 833 | ioda_eeh_p7ioc_phb_diag(hose, common); | 823 | ioda_eeh_p7ioc_phb_diag(hose, common); |
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4f9892..79d83cad3d67 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c | |||
| @@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; | |||
| 24 | static u8 opal_lpc_inb(unsigned long port) | 24 | static u8 opal_lpc_inb(unsigned long port) |
| 25 | { | 25 | { |
| 26 | int64_t rc; | 26 | int64_t rc; |
| 27 | uint32_t data; | 27 | __be32 data; |
| 28 | 28 | ||
| 29 | if (opal_lpc_chip_id < 0 || port > 0xffff) | 29 | if (opal_lpc_chip_id < 0 || port > 0xffff) |
| 30 | return 0xff; | 30 | return 0xff; |
| 31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); | 31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); |
| 32 | return rc ? 0xff : data; | 32 | return rc ? 0xff : be32_to_cpu(data); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | static __le16 __opal_lpc_inw(unsigned long port) | 35 | static __le16 __opal_lpc_inw(unsigned long port) |
| 36 | { | 36 | { |
| 37 | int64_t rc; | 37 | int64_t rc; |
| 38 | uint32_t data; | 38 | __be32 data; |
| 39 | 39 | ||
| 40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) | 40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) |
| 41 | return 0xffff; | 41 | return 0xffff; |
| 42 | if (port & 1) | 42 | if (port & 1) |
| 43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); | 43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); |
| 44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); | 44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); |
| 45 | return rc ? 0xffff : data; | 45 | return rc ? 0xffff : be32_to_cpu(data); |
| 46 | } | 46 | } |
| 47 | static u16 opal_lpc_inw(unsigned long port) | 47 | static u16 opal_lpc_inw(unsigned long port) |
| 48 | { | 48 | { |
| @@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) | |||
| 52 | static __le32 __opal_lpc_inl(unsigned long port) | 52 | static __le32 __opal_lpc_inl(unsigned long port) |
| 53 | { | 53 | { |
| 54 | int64_t rc; | 54 | int64_t rc; |
| 55 | uint32_t data; | 55 | __be32 data; |
| 56 | 56 | ||
| 57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) | 57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) |
| 58 | return 0xffffffff; | 58 | return 0xffffffff; |
| @@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) | |||
| 62 | (__le32)opal_lpc_inb(port + 2) << 8 | | 62 | (__le32)opal_lpc_inb(port + 2) << 8 | |
| 63 | opal_lpc_inb(port + 3); | 63 | opal_lpc_inb(port + 3); |
| 64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); | 64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); |
| 65 | return rc ? 0xffffffff : data; | 65 | return rc ? 0xffffffff : be32_to_cpu(data); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static u32 opal_lpc_inl(unsigned long port) | 68 | static u32 opal_lpc_inl(unsigned long port) |
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8fd55ac..4fbf276ac99e 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
| @@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | |||
| 96 | { | 96 | { |
| 97 | struct opal_scom_map *m = map; | 97 | struct opal_scom_map *m = map; |
| 98 | int64_t rc; | 98 | int64_t rc; |
| 99 | __be64 v; | ||
| 99 | 100 | ||
| 100 | reg = opal_scom_unmangle(reg); | 101 | reg = opal_scom_unmangle(reg); |
| 101 | rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); | 102 | rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); |
| 103 | *value = be64_to_cpu(v); | ||
| 102 | return opal_xscom_err_xlate(rc); | 104 | return opal_xscom_err_xlate(rc); |
| 103 | } | 105 | } |
| 104 | 106 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24ef033e..1ed8d5f40f5a 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
| @@ -172,11 +172,13 @@ struct pnv_phb { | |||
| 172 | } ioda; | 172 | } ioda; |
| 173 | }; | 173 | }; |
| 174 | 174 | ||
| 175 | /* PHB status structure */ | 175 | /* PHB and hub status structure */ |
| 176 | union { | 176 | union { |
| 177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; | 177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; |
| 178 | struct OpalIoP7IOCPhbErrorData p7ioc; | 178 | struct OpalIoP7IOCPhbErrorData p7ioc; |
| 179 | struct OpalIoP7IOCErrorData hub_diag; | ||
| 179 | } diag; | 180 | } diag; |
| 181 | |||
| 180 | }; | 182 | }; |
| 181 | 183 | ||
| 182 | extern struct pci_ops pnv_pci_ops; | 184 | extern struct pci_ops pnv_pci_ops; |
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007eae64..c9fecf09b8fa 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c | |||
| @@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
| 157 | { | 157 | { |
| 158 | struct hvcall_ppp_data ppp_data; | 158 | struct hvcall_ppp_data ppp_data; |
| 159 | struct device_node *root; | 159 | struct device_node *root; |
| 160 | const int *perf_level; | 160 | const __be32 *perf_level; |
| 161 | int rc; | 161 | int rc; |
| 162 | 162 | ||
| 163 | rc = h_get_ppp(&ppp_data); | 163 | rc = h_get_ppp(&ppp_data); |
| @@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
| 201 | perf_level = of_get_property(root, | 201 | perf_level = of_get_property(root, |
| 202 | "ibm,partition-performance-parameters-level", | 202 | "ibm,partition-performance-parameters-level", |
| 203 | NULL); | 203 | NULL); |
| 204 | if (perf_level && (*perf_level >= 1)) { | 204 | if (perf_level && (be32_to_cpup(perf_level) >= 1)) { |
| 205 | seq_printf(m, | 205 | seq_printf(m, |
| 206 | "physical_procs_allocated_to_virtualization=%d\n", | 206 | "physical_procs_allocated_to_virtualization=%d\n", |
| 207 | ppp_data.phys_platform_procs); | 207 | ppp_data.phys_platform_procs); |
| @@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
| 435 | int partition_potential_processors; | 435 | int partition_potential_processors; |
| 436 | int partition_active_processors; | 436 | int partition_active_processors; |
| 437 | struct device_node *rtas_node; | 437 | struct device_node *rtas_node; |
| 438 | const int *lrdrp = NULL; | 438 | const __be32 *lrdrp = NULL; |
| 439 | 439 | ||
| 440 | rtas_node = of_find_node_by_path("/rtas"); | 440 | rtas_node = of_find_node_by_path("/rtas"); |
| 441 | if (rtas_node) | 441 | if (rtas_node) |
| @@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
| 444 | if (lrdrp == NULL) { | 444 | if (lrdrp == NULL) { |
| 445 | partition_potential_processors = vdso_data->processorCount; | 445 | partition_potential_processors = vdso_data->processorCount; |
| 446 | } else { | 446 | } else { |
| 447 | partition_potential_processors = *(lrdrp + 4); | 447 | partition_potential_processors = be32_to_cpup(lrdrp + 4); |
| 448 | } | 448 | } |
| 449 | of_node_put(rtas_node); | 449 | of_node_put(rtas_node); |
| 450 | 450 | ||
| @@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
| 654 | const char *model = ""; | 654 | const char *model = ""; |
| 655 | const char *system_id = ""; | 655 | const char *system_id = ""; |
| 656 | const char *tmp; | 656 | const char *tmp; |
| 657 | const unsigned int *lp_index_ptr; | 657 | const __be32 *lp_index_ptr; |
| 658 | unsigned int lp_index = 0; | 658 | unsigned int lp_index = 0; |
| 659 | 659 | ||
| 660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); | 660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); |
| @@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
| 670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", | 670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", |
| 671 | NULL); | 671 | NULL); |
| 672 | if (lp_index_ptr) | 672 | if (lp_index_ptr) |
| 673 | lp_index = *lp_index_ptr; | 673 | lp_index = be32_to_cpup(lp_index_ptr); |
| 674 | of_node_put(rootdn); | 674 | of_node_put(rootdn); |
| 675 | } | 675 | } |
| 676 | seq_printf(m, "serial_number=%s\n", system_id); | 676 | seq_printf(m, "serial_number=%s\n", system_id); |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0abce6fa..0c882e83c4ce 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
| @@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
| 130 | { | 130 | { |
| 131 | struct device_node *dn; | 131 | struct device_node *dn; |
| 132 | struct pci_dn *pdn; | 132 | struct pci_dn *pdn; |
| 133 | const u32 *req_msi; | 133 | const __be32 *p; |
| 134 | u32 req_msi; | ||
| 134 | 135 | ||
| 135 | pdn = pci_get_pdn(pdev); | 136 | pdn = pci_get_pdn(pdev); |
| 136 | if (!pdn) | 137 | if (!pdn) |
| @@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
| 138 | 139 | ||
| 139 | dn = pdn->node; | 140 | dn = pdn->node; |
| 140 | 141 | ||
| 141 | req_msi = of_get_property(dn, prop_name, NULL); | 142 | p = of_get_property(dn, prop_name, NULL); |
| 142 | if (!req_msi) { | 143 | if (!p) { |
| 143 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); | 144 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); |
| 144 | return -ENOENT; | 145 | return -ENOENT; |
| 145 | } | 146 | } |
| 146 | 147 | ||
| 147 | if (*req_msi < nvec) { | 148 | req_msi = be32_to_cpup(p); |
| 149 | if (req_msi < nvec) { | ||
| 148 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); | 150 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); |
| 149 | 151 | ||
| 150 | if (*req_msi == 0) /* Be paranoid */ | 152 | if (req_msi == 0) /* Be paranoid */ |
| 151 | return -ENOSPC; | 153 | return -ENOSPC; |
| 152 | 154 | ||
| 153 | return *req_msi; | 155 | return req_msi; |
| 154 | } | 156 | } |
| 155 | 157 | ||
| 156 | return 0; | 158 | return 0; |
| @@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) | |||
| 171 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | 173 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) |
| 172 | { | 174 | { |
| 173 | struct device_node *dn; | 175 | struct device_node *dn; |
| 174 | const u32 *p; | 176 | const __be32 *p; |
| 175 | 177 | ||
| 176 | dn = of_node_get(pci_device_to_OF_node(dev)); | 178 | dn = of_node_get(pci_device_to_OF_node(dev)); |
| 177 | while (dn) { | 179 | while (dn) { |
| @@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | |||
| 179 | if (p) { | 181 | if (p) { |
| 180 | pr_debug("rtas_msi: found prop on dn %s\n", | 182 | pr_debug("rtas_msi: found prop on dn %s\n", |
| 181 | dn->full_name); | 183 | dn->full_name); |
| 182 | *total = *p; | 184 | *total = be32_to_cpup(p); |
| 183 | return dn; | 185 | return dn; |
| 184 | } | 186 | } |
| 185 | 187 | ||
| @@ -232,13 +234,13 @@ struct msi_counts { | |||
| 232 | static void *count_non_bridge_devices(struct device_node *dn, void *data) | 234 | static void *count_non_bridge_devices(struct device_node *dn, void *data) |
| 233 | { | 235 | { |
| 234 | struct msi_counts *counts = data; | 236 | struct msi_counts *counts = data; |
| 235 | const u32 *p; | 237 | const __be32 *p; |
| 236 | u32 class; | 238 | u32 class; |
| 237 | 239 | ||
| 238 | pr_debug("rtas_msi: counting %s\n", dn->full_name); | 240 | pr_debug("rtas_msi: counting %s\n", dn->full_name); |
| 239 | 241 | ||
| 240 | p = of_get_property(dn, "class-code", NULL); | 242 | p = of_get_property(dn, "class-code", NULL); |
| 241 | class = p ? *p : 0; | 243 | class = p ? be32_to_cpup(p) : 0; |
| 242 | 244 | ||
| 243 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) | 245 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) |
| 244 | counts->num_devices++; | 246 | counts->num_devices++; |
| @@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) | |||
| 249 | static void *count_spare_msis(struct device_node *dn, void *data) | 251 | static void *count_spare_msis(struct device_node *dn, void *data) |
| 250 | { | 252 | { |
| 251 | struct msi_counts *counts = data; | 253 | struct msi_counts *counts = data; |
| 252 | const u32 *p; | 254 | const __be32 *p; |
| 253 | int req; | 255 | int req; |
| 254 | 256 | ||
| 255 | if (dn == counts->requestor) | 257 | if (dn == counts->requestor) |
| @@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) | |||
| 260 | req = 0; | 262 | req = 0; |
| 261 | p = of_get_property(dn, "ibm,req#msi", NULL); | 263 | p = of_get_property(dn, "ibm,req#msi", NULL); |
| 262 | if (p) | 264 | if (p) |
| 263 | req = *p; | 265 | req = be32_to_cpup(p); |
| 264 | 266 | ||
| 265 | p = of_get_property(dn, "ibm,req#msi-x", NULL); | 267 | p = of_get_property(dn, "ibm,req#msi-x", NULL); |
| 266 | if (p) | 268 | if (p) |
| 267 | req = max(req, (int)*p); | 269 | req = max(req, (int)be32_to_cpup(p)); |
| 268 | } | 270 | } |
| 269 | 271 | ||
| 270 | if (req < counts->quota) | 272 | if (req < counts->quota) |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58d4664..d7096f2f7751 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
| @@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ | |||
| 43 | static DEFINE_SPINLOCK(nvram_lock); | 43 | static DEFINE_SPINLOCK(nvram_lock); |
| 44 | 44 | ||
| 45 | struct err_log_info { | 45 | struct err_log_info { |
| 46 | int error_type; | 46 | __be32 error_type; |
| 47 | unsigned int seq_num; | 47 | __be32 seq_num; |
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | struct nvram_os_partition { | 50 | struct nvram_os_partition { |
| @@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { | |||
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | struct oops_log_info { | 81 | struct oops_log_info { |
| 82 | u16 version; | 82 | __be16 version; |
| 83 | u16 report_length; | 83 | __be16 report_length; |
| 84 | u64 timestamp; | 84 | __be64 timestamp; |
| 85 | } __attribute__((packed)); | 85 | } __attribute__((packed)); |
| 86 | 86 | ||
| 87 | static void oops_to_nvram(struct kmsg_dumper *dumper, | 87 | static void oops_to_nvram(struct kmsg_dumper *dumper, |
| @@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | |||
| 291 | length = part->size; | 291 | length = part->size; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | info.error_type = err_type; | 294 | info.error_type = cpu_to_be32(err_type); |
| 295 | info.seq_num = error_log_cnt; | 295 | info.seq_num = cpu_to_be32(error_log_cnt); |
| 296 | 296 | ||
| 297 | tmp_index = part->index; | 297 | tmp_index = part->index; |
| 298 | 298 | ||
| @@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, | |||
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | if (part->os_partition) { | 366 | if (part->os_partition) { |
| 367 | *error_log_cnt = info.seq_num; | 367 | *error_log_cnt = be32_to_cpu(info.seq_num); |
| 368 | *err_type = info.error_type; | 368 | *err_type = be32_to_cpu(info.error_type); |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | return 0; | 371 | return 0; |
| @@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) | |||
| 529 | pr_err("nvram: logging uncompressed oops/panic report\n"); | 529 | pr_err("nvram: logging uncompressed oops/panic report\n"); |
| 530 | return -1; | 530 | return -1; |
| 531 | } | 531 | } |
| 532 | oops_hdr->version = OOPS_HDR_VERSION; | 532 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
| 533 | oops_hdr->report_length = (u16) zipped_len; | 533 | oops_hdr->report_length = cpu_to_be16(zipped_len); |
| 534 | oops_hdr->timestamp = get_seconds(); | 534 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
| 535 | return 0; | 535 | return 0; |
| 536 | } | 536 | } |
| 537 | 537 | ||
| @@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, | |||
| 574 | clobbering_unread_rtas_event()) | 574 | clobbering_unread_rtas_event()) |
| 575 | return -1; | 575 | return -1; |
| 576 | 576 | ||
| 577 | oops_hdr->version = OOPS_HDR_VERSION; | 577 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
| 578 | oops_hdr->report_length = (u16) size; | 578 | oops_hdr->report_length = cpu_to_be16(size); |
| 579 | oops_hdr->timestamp = get_seconds(); | 579 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
| 580 | 580 | ||
| 581 | if (compressed) | 581 | if (compressed) |
| 582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; | 582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; |
| @@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | |||
| 670 | size_t length, hdr_size; | 670 | size_t length, hdr_size; |
| 671 | 671 | ||
| 672 | oops_hdr = (struct oops_log_info *)buff; | 672 | oops_hdr = (struct oops_log_info *)buff; |
| 673 | if (oops_hdr->version < OOPS_HDR_VERSION) { | 673 | if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { |
| 674 | /* Old format oops header had 2-byte record size */ | 674 | /* Old format oops header had 2-byte record size */ |
| 675 | hdr_size = sizeof(u16); | 675 | hdr_size = sizeof(u16); |
| 676 | length = oops_hdr->version; | 676 | length = be16_to_cpu(oops_hdr->version); |
| 677 | time->tv_sec = 0; | 677 | time->tv_sec = 0; |
| 678 | time->tv_nsec = 0; | 678 | time->tv_nsec = 0; |
| 679 | } else { | 679 | } else { |
| 680 | hdr_size = sizeof(*oops_hdr); | 680 | hdr_size = sizeof(*oops_hdr); |
| 681 | length = oops_hdr->report_length; | 681 | length = be16_to_cpu(oops_hdr->report_length); |
| 682 | time->tv_sec = oops_hdr->timestamp; | 682 | time->tv_sec = be64_to_cpu(oops_hdr->timestamp); |
| 683 | time->tv_nsec = 0; | 683 | time->tv_nsec = 0; |
| 684 | } | 684 | } |
| 685 | *buf = kmalloc(length, GFP_KERNEL); | 685 | *buf = kmalloc(length, GFP_KERNEL); |
| @@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | |||
| 889 | kmsg_dump_get_buffer(dumper, false, | 889 | kmsg_dump_get_buffer(dumper, false, |
| 890 | oops_data, oops_data_sz, &text_len); | 890 | oops_data, oops_data_sz, &text_len); |
| 891 | err_type = ERR_TYPE_KERNEL_PANIC; | 891 | err_type = ERR_TYPE_KERNEL_PANIC; |
| 892 | oops_hdr->version = OOPS_HDR_VERSION; | 892 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
| 893 | oops_hdr->report_length = (u16) text_len; | 893 | oops_hdr->report_length = cpu_to_be16(text_len); |
| 894 | oops_hdr->timestamp = get_seconds(); | 894 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
| 895 | } | 895 | } |
| 896 | 896 | ||
| 897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | 897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, |
| 898 | (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, | 898 | (int) (sizeof(*oops_hdr) + text_len), err_type, |
| 899 | ++oops_count); | 899 | ++oops_count); |
| 900 | 900 | ||
| 901 | spin_unlock_irqrestore(&lock, flags); | 901 | spin_unlock_irqrestore(&lock, flags); |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856cdf47..70670a2d9cf2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
| @@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
| 113 | { | 113 | { |
| 114 | struct device_node *dn, *pdn; | 114 | struct device_node *dn, *pdn; |
| 115 | struct pci_bus *bus; | 115 | struct pci_bus *bus; |
| 116 | const uint32_t *pcie_link_speed_stats; | 116 | const __be32 *pcie_link_speed_stats; |
| 117 | 117 | ||
| 118 | bus = bridge->bus; | 118 | bus = bridge->bus; |
| 119 | 119 | ||
| @@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
| 122 | return 0; | 122 | return 0; |
| 123 | 123 | ||
| 124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { |
| 125 | pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, | 125 | pcie_link_speed_stats = of_get_property(pdn, |
| 126 | "ibm,pcie-link-speed-stats", NULL); | 126 | "ibm,pcie-link-speed-stats", NULL); |
| 127 | if (pcie_link_speed_stats) | 127 | if (pcie_link_speed_stats) |
| 128 | break; | 128 | break; |
| @@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
| 135 | return 0; | 135 | return 0; |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | switch (pcie_link_speed_stats[0]) { | 138 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
| 139 | case 0x01: | 139 | case 0x01: |
| 140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; | 140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; |
| 141 | break; | 141 | break; |
| @@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
| 147 | break; | 147 | break; |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | switch (pcie_link_speed_stats[1]) { | 150 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
| 151 | case 0x01: | 151 | case 0x01: |
| 152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; |
| 153 | break; | 153 | break; |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1e1a03d2d19f..e9f312532526 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -135,7 +135,6 @@ config S390 | |||
| 135 | select HAVE_SYSCALL_TRACEPOINTS | 135 | select HAVE_SYSCALL_TRACEPOINTS |
| 136 | select HAVE_UID16 if 32BIT | 136 | select HAVE_UID16 if 32BIT |
| 137 | select HAVE_VIRT_CPU_ACCOUNTING | 137 | select HAVE_VIRT_CPU_ACCOUNTING |
| 138 | select INIT_ALL_POSSIBLE | ||
| 139 | select KTIME_SCALAR if 32BIT | 138 | select KTIME_SCALAR if 32BIT |
| 140 | select MODULES_USE_ELF_RELA | 139 | select MODULES_USE_ELF_RELA |
| 141 | select OLD_SIGACTION | 140 | select OLD_SIGACTION |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ac9bed8e103f..160779394096 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
| @@ -31,6 +31,7 @@ extern void smp_yield(void); | |||
| 31 | extern void smp_stop_cpu(void); | 31 | extern void smp_stop_cpu(void); |
| 32 | extern void smp_cpu_set_polarization(int cpu, int val); | 32 | extern void smp_cpu_set_polarization(int cpu, int val); |
| 33 | extern int smp_cpu_get_polarization(int cpu); | 33 | extern int smp_cpu_get_polarization(int cpu); |
| 34 | extern void smp_fill_possible_mask(void); | ||
| 34 | 35 | ||
| 35 | #else /* CONFIG_SMP */ | 36 | #else /* CONFIG_SMP */ |
| 36 | 37 | ||
| @@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; } | |||
| 50 | static inline void smp_yield_cpu(int cpu) { } | 51 | static inline void smp_yield_cpu(int cpu) { } |
| 51 | static inline void smp_yield(void) { } | 52 | static inline void smp_yield(void) { } |
| 52 | static inline void smp_stop_cpu(void) { } | 53 | static inline void smp_stop_cpu(void) { } |
| 54 | static inline void smp_fill_possible_mask(void) { } | ||
| 53 | 55 | ||
| 54 | #endif /* CONFIG_SMP */ | 56 | #endif /* CONFIG_SMP */ |
| 55 | 57 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4444875266ee..0f3d44ecbfc6 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 1023 | setup_vmcoreinfo(); | 1023 | setup_vmcoreinfo(); |
| 1024 | setup_lowcore(); | 1024 | setup_lowcore(); |
| 1025 | 1025 | ||
| 1026 | smp_fill_possible_mask(); | ||
| 1026 | cpu_init(); | 1027 | cpu_init(); |
| 1027 | s390_init_cpu_topology(); | 1028 | s390_init_cpu_topology(); |
| 1028 | 1029 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index dc4a53465060..958704798f4a 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
| 721 | return 0; | 721 | return 0; |
| 722 | } | 722 | } |
| 723 | 723 | ||
| 724 | static int __init setup_possible_cpus(char *s) | 724 | static unsigned int setup_possible_cpus __initdata; |
| 725 | { | ||
| 726 | int max, cpu; | ||
| 727 | 725 | ||
| 728 | if (kstrtoint(s, 0, &max) < 0) | 726 | static int __init _setup_possible_cpus(char *s) |
| 729 | return 0; | 727 | { |
| 730 | init_cpu_possible(cpumask_of(0)); | 728 | get_option(&s, &setup_possible_cpus); |
| 731 | for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) | ||
| 732 | set_cpu_possible(cpu, true); | ||
| 733 | return 0; | 729 | return 0; |
| 734 | } | 730 | } |
| 735 | early_param("possible_cpus", setup_possible_cpus); | 731 | early_param("possible_cpus", _setup_possible_cpus); |
| 736 | 732 | ||
| 737 | #ifdef CONFIG_HOTPLUG_CPU | 733 | #ifdef CONFIG_HOTPLUG_CPU |
| 738 | 734 | ||
| @@ -775,6 +771,17 @@ void __noreturn cpu_die(void) | |||
| 775 | 771 | ||
| 776 | #endif /* CONFIG_HOTPLUG_CPU */ | 772 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 777 | 773 | ||
| 774 | void __init smp_fill_possible_mask(void) | ||
| 775 | { | ||
| 776 | unsigned int possible, cpu; | ||
| 777 | |||
| 778 | possible = setup_possible_cpus; | ||
| 779 | if (!possible) | ||
| 780 | possible = MACHINE_IS_VM ? 64 : nr_cpu_ids; | ||
| 781 | for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) | ||
| 782 | set_cpu_possible(cpu, true); | ||
| 783 | } | ||
| 784 | |||
| 778 | void __init smp_prepare_cpus(unsigned int max_cpus) | 785 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 779 | { | 786 | { |
| 780 | /* request the 0x1201 emergency signal external interrupt */ | 787 | /* request the 0x1201 emergency signal external interrupt */ |
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 800f064b0da7..069607209a30 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
| @@ -75,6 +75,7 @@ void zpci_event_availability(void *data) | |||
| 75 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) | 75 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) |
| 76 | break; | 76 | break; |
| 77 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | 77 | zdev->state = ZPCI_FN_STATE_CONFIGURED; |
| 78 | zdev->fh = ccdf->fh; | ||
| 78 | ret = zpci_enable_device(zdev); | 79 | ret = zpci_enable_device(zdev); |
| 79 | if (ret) | 80 | if (ret) |
| 80 | break; | 81 | break; |
| @@ -101,6 +102,7 @@ void zpci_event_availability(void *data) | |||
| 101 | if (pdev) | 102 | if (pdev) |
| 102 | pci_stop_and_remove_bus_device(pdev); | 103 | pci_stop_and_remove_bus_device(pdev); |
| 103 | 104 | ||
| 105 | zdev->fh = ccdf->fh; | ||
| 104 | zpci_disable_device(zdev); | 106 | zpci_disable_device(zdev); |
| 105 | zdev->state = ZPCI_FN_STATE_STANDBY; | 107 | zdev->state = ZPCI_FN_STATE_STANDBY; |
| 106 | break; | 108 | break; |
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 2a0a596ebf67..d77f2f6c7ff0 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
| @@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic); | |||
| 20 | EXPORT_SYMBOL(copy_page); | 20 | EXPORT_SYMBOL(copy_page); |
| 21 | EXPORT_SYMBOL(__clear_user); | 21 | EXPORT_SYMBOL(__clear_user); |
| 22 | EXPORT_SYMBOL(empty_zero_page); | 22 | EXPORT_SYMBOL(empty_zero_page); |
| 23 | #ifdef CONFIG_FLATMEM | ||
| 24 | /* need in pfn_valid macro */ | ||
| 25 | EXPORT_SYMBOL(min_low_pfn); | ||
| 26 | EXPORT_SYMBOL(max_low_pfn); | ||
| 27 | #endif | ||
| 23 | 28 | ||
| 24 | #define DECLARE_EXPORT(name) \ | 29 | #define DECLARE_EXPORT(name) \ |
| 25 | extern void name(void);EXPORT_SYMBOL(name) | 30 | extern void name(void);EXPORT_SYMBOL(name) |
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29e3174..3baff31e58cf 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
| @@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ | |||
| 6 | checksum.o strlen.o div64.o div64-generic.o | 6 | checksum.o strlen.o div64.o div64-generic.o |
| 7 | 7 | ||
| 8 | # Extracted from libgcc | 8 | # Extracted from libgcc |
| 9 | lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ | 9 | obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ |
| 10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ | 10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ |
| 11 | udiv_qrnnd.o | 11 | udiv_qrnnd.o |
| 12 | 12 | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc144959..0f9e94537eee 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) | |||
| 619 | } | 619 | } |
| 620 | 620 | ||
| 621 | #define pte_accessible pte_accessible | 621 | #define pte_accessible pte_accessible |
| 622 | static inline unsigned long pte_accessible(pte_t a) | 622 | static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) |
| 623 | { | 623 | { |
| 624 | return pte_val(a) & _PAGE_VALID; | 624 | return pte_val(a) & _PAGE_VALID; |
| 625 | } | 625 | } |
| @@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
| 847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | 847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U |
| 848 | * and SUN4V pte layout, so this inline test is fine. | 848 | * and SUN4V pte layout, so this inline test is fine. |
| 849 | */ | 849 | */ |
| 850 | if (likely(mm != &init_mm) && pte_accessible(orig)) | 850 | if (likely(mm != &init_mm) && pte_accessible(mm, orig)) |
| 851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); | 851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); |
| 852 | } | 852 | } |
| 853 | 853 | ||
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e562d3caee57..ad7e178337f1 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
| @@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); | |||
| 262 | extern __must_check long strlen_user(const char __user *str); | 262 | extern __must_check long strlen_user(const char __user *str); |
| 263 | extern __must_check long strnlen_user(const char __user *str, long n); | 263 | extern __must_check long strnlen_user(const char __user *str, long n); |
| 264 | 264 | ||
| 265 | #define __copy_to_user_inatomic ___copy_to_user | 265 | #define __copy_to_user_inatomic __copy_to_user |
| 266 | #define __copy_from_user_inatomic ___copy_from_user | 266 | #define __copy_from_user_inatomic __copy_from_user |
| 267 | 267 | ||
| 268 | struct pt_regs; | 268 | struct pt_regs; |
| 269 | extern unsigned long compute_effective_address(struct pt_regs *, | 269 | extern unsigned long compute_effective_address(struct pt_regs *, |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed141aac7..76663b019eb5 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
| @@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
| 854 | return 1; | 854 | return 1; |
| 855 | 855 | ||
| 856 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
| 857 | if (dev->bus == &pci_bus_type) | 857 | if (dev_is_pci(dev)) |
| 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
| 859 | #endif | 859 | #endif |
| 860 | 860 | ||
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468de9b2..e7e215dfa866 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
| @@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); | |||
| 666 | */ | 666 | */ |
| 667 | int dma_supported(struct device *dev, u64 mask) | 667 | int dma_supported(struct device *dev, u64 mask) |
| 668 | { | 668 | { |
| 669 | #ifdef CONFIG_PCI | 669 | if (dev_is_pci(dev)) |
| 670 | if (dev->bus == &pci_bus_type) | ||
| 671 | return 1; | 670 | return 1; |
| 672 | #endif | 671 | |
| 673 | return 0; | 672 | return 0; |
| 674 | } | 673 | } |
| 675 | EXPORT_SYMBOL(dma_supported); | 674 | EXPORT_SYMBOL(dma_supported); |
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 60b19f50c80a..b45fe3fb4d2c 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/kgdb.h> | 6 | #include <linux/kgdb.h> |
| 7 | #include <linux/kdebug.h> | 7 | #include <linux/kdebug.h> |
| 8 | #include <linux/ftrace.h> | 8 | #include <linux/ftrace.h> |
| 9 | #include <linux/context_tracking.h> | ||
| 9 | 10 | ||
| 10 | #include <asm/cacheflush.h> | 11 | #include <asm/cacheflush.h> |
| 11 | #include <asm/kdebug.h> | 12 | #include <asm/kdebug.h> |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b66a5338231e..b085311dcd0e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
| @@ -123,11 +123,12 @@ void smp_callin(void) | |||
| 123 | rmb(); | 123 | rmb(); |
| 124 | 124 | ||
| 125 | set_cpu_online(cpuid, true); | 125 | set_cpu_online(cpuid, true); |
| 126 | local_irq_enable(); | ||
| 127 | 126 | ||
| 128 | /* idle thread is expected to have preempt disabled */ | 127 | /* idle thread is expected to have preempt disabled */ |
| 129 | preempt_disable(); | 128 | preempt_disable(); |
| 130 | 129 | ||
| 130 | local_irq_enable(); | ||
| 131 | |||
| 131 | cpu_startup_entry(CPUHP_ONLINE); | 132 | cpu_startup_entry(CPUHP_ONLINE); |
| 132 | } | 133 | } |
| 133 | 134 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e903c71f7e69..0952ecd60eca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -26,6 +26,7 @@ config X86 | |||
| 26 | select HAVE_AOUT if X86_32 | 26 | select HAVE_AOUT if X86_32 |
| 27 | select HAVE_UNSTABLE_SCHED_CLOCK | 27 | select HAVE_UNSTABLE_SCHED_CLOCK |
| 28 | select ARCH_SUPPORTS_NUMA_BALANCING | 28 | select ARCH_SUPPORTS_NUMA_BALANCING |
| 29 | select ARCH_SUPPORTS_INT128 if X86_64 | ||
| 29 | select ARCH_WANTS_PROT_NUMA_PROT_NONE | 30 | select ARCH_WANTS_PROT_NUMA_PROT_NONE |
| 30 | select HAVE_IDE | 31 | select HAVE_IDE |
| 31 | select HAVE_OPROFILE | 32 | select HAVE_OPROFILE |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d1999458709..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
| @@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) | |||
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
| 455 | static inline int pte_accessible(pte_t a) | 455 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
| 456 | { | 456 | { |
| 457 | return pte_flags(a) & _PAGE_PRESENT; | 457 | if (pte_flags(a) & _PAGE_PRESENT) |
| 458 | return true; | ||
| 459 | |||
| 460 | if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && | ||
| 461 | mm_tlb_flush_pending(mm)) | ||
| 462 | return true; | ||
| 463 | |||
| 464 | return false; | ||
| 458 | } | 465 | } |
| 459 | 466 | ||
| 460 | static inline int pte_hidden(pte_t pte) | 467 | static inline int pte_hidden(pte_t pte) |
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723636fd..c8b051933b1b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h | |||
| @@ -8,6 +8,12 @@ | |||
| 8 | DECLARE_PER_CPU(int, __preempt_count); | 8 | DECLARE_PER_CPU(int, __preempt_count); |
| 9 | 9 | ||
| 10 | /* | 10 | /* |
| 11 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such | ||
| 12 | * that a decrement hitting 0 means we can and should reschedule. | ||
| 13 | */ | ||
| 14 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) | ||
| 15 | |||
| 16 | /* | ||
| 11 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | 17 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
| 12 | * that think a non-zero value indicates we cannot preempt. | 18 | * that think a non-zero value indicates we cannot preempt. |
| 13 | */ | 19 | */ |
| @@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) | |||
| 74 | __this_cpu_add_4(__preempt_count, -val); | 80 | __this_cpu_add_4(__preempt_count, -val); |
| 75 | } | 81 | } |
| 76 | 82 | ||
| 83 | /* | ||
| 84 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule | ||
| 85 | * a decrement which hits zero means we have no preempt_count and should | ||
| 86 | * reschedule. | ||
| 87 | */ | ||
| 77 | static __always_inline bool __preempt_count_dec_and_test(void) | 88 | static __always_inline bool __preempt_count_dec_and_test(void) |
| 78 | { | 89 | { |
| 79 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | 90 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939..ea04b342c026 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
| 387 | set_cpu_cap(c, X86_FEATURE_PEBS); | 387 | set_cpu_cap(c, X86_FEATURE_PEBS); |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | 390 | if (c->x86 == 6 && cpu_has_clflush && |
| 391 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | ||
| 391 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 392 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); |
| 392 | 393 | ||
| 393 | #ifdef CONFIG_X86_64 | 394 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fd00bb29425d..c1a861829d81 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
| @@ -262,11 +262,20 @@ struct cpu_hw_events { | |||
| 262 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 262 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
| 263 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | 263 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 264 | 264 | ||
| 265 | #define EVENT_CONSTRAINT_END \ | 265 | /* |
| 266 | EVENT_CONSTRAINT(0, 0, 0) | 266 | * We define the end marker as having a weight of -1 |
| 267 | * to enable blacklisting of events using a counter bitmask | ||
| 268 | * of zero and thus a weight of zero. | ||
| 269 | * The end marker has a weight that cannot possibly be | ||
| 270 | * obtained from counting the bits in the bitmask. | ||
| 271 | */ | ||
| 272 | #define EVENT_CONSTRAINT_END { .weight = -1 } | ||
| 267 | 273 | ||
| 274 | /* | ||
| 275 | * Check for end marker with weight == -1 | ||
| 276 | */ | ||
| 268 | #define for_each_event_constraint(e, c) \ | 277 | #define for_each_event_constraint(e, c) \ |
| 269 | for ((e) = (c); (e)->weight; (e)++) | 278 | for ((e) = (c); (e)->weight != -1; (e)++) |
| 270 | 279 | ||
| 271 | /* | 280 | /* |
| 272 | * Extra registers for specific events. | 281 | * Extra registers for specific events. |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index dec48bfaddb8..1673940cf9c3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
| 1350 | return; | 1350 | return; |
| 1351 | } | 1351 | } |
| 1352 | 1352 | ||
| 1353 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
| 1354 | value &= ~MSR_IA32_APICBASE_BSP; | ||
| 1355 | vcpu->arch.apic_base = value; | ||
| 1356 | |||
| 1353 | /* update jump label if enable bit changes */ | 1357 | /* update jump label if enable bit changes */ |
| 1354 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { | 1358 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { |
| 1355 | if (value & MSR_IA32_APICBASE_ENABLE) | 1359 | if (value & MSR_IA32_APICBASE_ENABLE) |
| @@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
| 1359 | recalculate_apic_map(vcpu->kvm); | 1363 | recalculate_apic_map(vcpu->kvm); |
| 1360 | } | 1364 | } |
| 1361 | 1365 | ||
| 1362 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
| 1363 | value &= ~MSR_IA32_APICBASE_BSP; | ||
| 1364 | |||
| 1365 | vcpu->arch.apic_base = value; | ||
| 1366 | if ((old_value ^ value) & X2APIC_ENABLE) { | 1366 | if ((old_value ^ value) & X2APIC_ENABLE) { |
| 1367 | if (value & X2APIC_ENABLE) { | 1367 | if (value & X2APIC_ENABLE) { |
| 1368 | u32 id = kvm_apic_id(apic); | 1368 | u32 id = kvm_apic_id(apic); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b2fe1c252f35..da7837e1349d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
| 8283 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | 8283 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
| 8284 | kvm_set_cr4(vcpu, vmcs12->host_cr4); | 8284 | kvm_set_cr4(vcpu, vmcs12->host_cr4); |
| 8285 | 8285 | ||
| 8286 | if (nested_cpu_has_ept(vmcs12)) | 8286 | nested_ept_uninit_mmu_context(vcpu); |
| 8287 | nested_ept_uninit_mmu_context(vcpu); | ||
| 8288 | 8287 | ||
| 8289 | kvm_set_cr3(vcpu, vmcs12->host_cr3); | 8288 | kvm_set_cr3(vcpu, vmcs12->host_cr3); |
| 8290 | kvm_mmu_reset_context(vcpu); | 8289 | kvm_mmu_reset_context(vcpu); |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46828c0..0596e8e0cc19 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
| @@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
| 83 | pte_t pte = gup_get_pte(ptep); | 83 | pte_t pte = gup_get_pte(ptep); |
| 84 | struct page *page; | 84 | struct page *page; |
| 85 | 85 | ||
| 86 | /* Similar to the PMD case, NUMA hinting must take slow path */ | ||
| 87 | if (pte_numa(pte)) { | ||
| 88 | pte_unmap(ptep); | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 86 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | 92 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { |
| 87 | pte_unmap(ptep); | 93 | pte_unmap(ptep); |
| 88 | return 0; | 94 | return 0; |
| @@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
| 167 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | 173 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
| 168 | return 0; | 174 | return 0; |
| 169 | if (unlikely(pmd_large(pmd))) { | 175 | if (unlikely(pmd_large(pmd))) { |
| 176 | /* | ||
| 177 | * NUMA hinting faults need to be handled in the GUP | ||
| 178 | * slowpath for accounting purposes and so that they | ||
| 179 | * can be serialised against THP migration. | ||
| 180 | */ | ||
| 181 | if (pmd_numa(pmd)) | ||
| 182 | return 0; | ||
| 170 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | 183 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) |
| 171 | return 0; | 184 | return 0; |
| 172 | } else { | 185 | } else { |
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba6cf8e9aa0a..b91ce75bd35d 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c | |||
| @@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { | |||
| 335 | void blk_mq_unregister_disk(struct gendisk *disk) | 335 | void blk_mq_unregister_disk(struct gendisk *disk) |
| 336 | { | 336 | { |
| 337 | struct request_queue *q = disk->queue; | 337 | struct request_queue *q = disk->queue; |
| 338 | struct blk_mq_hw_ctx *hctx; | ||
| 339 | struct blk_mq_ctx *ctx; | ||
| 340 | int i, j; | ||
| 341 | |||
| 342 | queue_for_each_hw_ctx(q, hctx, i) { | ||
| 343 | hctx_for_each_ctx(hctx, ctx, j) { | ||
| 344 | kobject_del(&ctx->kobj); | ||
| 345 | kobject_put(&ctx->kobj); | ||
| 346 | } | ||
| 347 | kobject_del(&hctx->kobj); | ||
| 348 | kobject_put(&hctx->kobj); | ||
| 349 | } | ||
| 338 | 350 | ||
| 339 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | 351 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
| 340 | kobject_del(&q->mq_kobj); | 352 | kobject_del(&q->mq_kobj); |
| 353 | kobject_put(&q->mq_kobj); | ||
| 341 | 354 | ||
| 342 | kobject_put(&disk_to_dev(disk)->kobj); | 355 | kobject_put(&disk_to_dev(disk)->kobj); |
| 343 | } | 356 | } |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5d9248526d78..4770de5707b9 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig" | |||
| 348 | config ACPI_EXTLOG | 348 | config ACPI_EXTLOG |
| 349 | tristate "Extended Error Log support" | 349 | tristate "Extended Error Log support" |
| 350 | depends on X86_MCE && X86_LOCAL_APIC | 350 | depends on X86_MCE && X86_LOCAL_APIC |
| 351 | select EFI | ||
| 352 | select UEFI_CPER | 351 | select UEFI_CPER |
| 353 | default n | 352 | default n |
| 354 | help | 353 | help |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 8711e3797165..3c2e4aa529c4 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
| @@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev) | |||
| 207 | goto end; | 207 | goto end; |
| 208 | 208 | ||
| 209 | result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), | 209 | result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), |
| 210 | ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); | 210 | ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); |
| 211 | if (result) { | 211 | if (result) { |
| 212 | power_supply_unregister(&ac->charger); | 212 | power_supply_unregister(&ac->charger); |
| 213 | goto end; | 213 | goto end; |
| @@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev) | |||
| 255 | return -EINVAL; | 255 | return -EINVAL; |
| 256 | 256 | ||
| 257 | acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), | 257 | acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), |
| 258 | ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); | 258 | ACPI_ALL_NOTIFY, acpi_ac_notify_handler); |
| 259 | 259 | ||
| 260 | ac = platform_get_drvdata(pdev); | 260 | ac = platform_get_drvdata(pdev); |
| 261 | if (ac->charger.dev) | 261 | if (ac->charger.dev) |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 6745fe137b9e..e60390597372 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | |||
| 162 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, | 162 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, |
| 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, | 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, |
| 164 | { "INT33B2", }, | 164 | { "INT33B2", }, |
| 165 | { "INT33FC", }, | ||
| 165 | 166 | ||
| 166 | { "INT3430", (unsigned long)&lpt_dev_desc }, | 167 | { "INT3430", (unsigned long)&lpt_dev_desc }, |
| 167 | { "INT3431", (unsigned long)&lpt_dev_desc }, | 168 | { "INT3431", (unsigned long)&lpt_dev_desc }, |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 786294bb682c..3650b2183227 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
| @@ -2,7 +2,6 @@ config ACPI_APEI | |||
| 2 | bool "ACPI Platform Error Interface (APEI)" | 2 | bool "ACPI Platform Error Interface (APEI)" |
| 3 | select MISC_FILESYSTEMS | 3 | select MISC_FILESYSTEMS |
| 4 | select PSTORE | 4 | select PSTORE |
| 5 | select EFI | ||
| 6 | select UEFI_CPER | 5 | select UEFI_CPER |
| 7 | depends on X86 | 6 | depends on X86 |
| 8 | help | 7 | help |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 26311f23c824..cb1d557fc22c 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
| @@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count, | |||
| 942 | static struct pstore_info erst_info = { | 942 | static struct pstore_info erst_info = { |
| 943 | .owner = THIS_MODULE, | 943 | .owner = THIS_MODULE, |
| 944 | .name = "erst", | 944 | .name = "erst", |
| 945 | .flags = PSTORE_FLAGS_FRAGILE, | ||
| 945 | .open = erst_open_pstore, | 946 | .open = erst_open_pstore, |
| 946 | .close = erst_close_pstore, | 947 | .close = erst_close_pstore, |
| 947 | .read = erst_reader, | 948 | .read = erst_reader, |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index fbf1aceda8b8..5876a49dfd38 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>"); | |||
| 62 | MODULE_DESCRIPTION("ACPI Battery Driver"); | 62 | MODULE_DESCRIPTION("ACPI Battery Driver"); |
| 63 | MODULE_LICENSE("GPL"); | 63 | MODULE_LICENSE("GPL"); |
| 64 | 64 | ||
| 65 | static int battery_bix_broken_package; | ||
| 65 | static unsigned int cache_time = 1000; | 66 | static unsigned int cache_time = 1000; |
| 66 | module_param(cache_time, uint, 0644); | 67 | module_param(cache_time, uint, 0644); |
| 67 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 68 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
| @@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery) | |||
| 416 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); | 417 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); |
| 417 | return -ENODEV; | 418 | return -ENODEV; |
| 418 | } | 419 | } |
| 419 | if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) | 420 | |
| 421 | if (battery_bix_broken_package) | ||
| 422 | result = extract_package(battery, buffer.pointer, | ||
| 423 | extended_info_offsets + 1, | ||
| 424 | ARRAY_SIZE(extended_info_offsets) - 1); | ||
| 425 | else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) | ||
| 420 | result = extract_package(battery, buffer.pointer, | 426 | result = extract_package(battery, buffer.pointer, |
| 421 | extended_info_offsets, | 427 | extended_info_offsets, |
| 422 | ARRAY_SIZE(extended_info_offsets)); | 428 | ARRAY_SIZE(extended_info_offsets)); |
| @@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb, | |||
| 754 | return 0; | 760 | return 0; |
| 755 | } | 761 | } |
| 756 | 762 | ||
| 763 | static struct dmi_system_id bat_dmi_table[] = { | ||
| 764 | { | ||
| 765 | .ident = "NEC LZ750/LS", | ||
| 766 | .matches = { | ||
| 767 | DMI_MATCH(DMI_SYS_VENDOR, "NEC"), | ||
| 768 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"), | ||
| 769 | }, | ||
| 770 | }, | ||
| 771 | {}, | ||
| 772 | }; | ||
| 773 | |||
| 757 | static int acpi_battery_add(struct acpi_device *device) | 774 | static int acpi_battery_add(struct acpi_device *device) |
| 758 | { | 775 | { |
| 759 | int result = 0; | 776 | int result = 0; |
| @@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) | |||
| 846 | { | 863 | { |
| 847 | if (acpi_disabled) | 864 | if (acpi_disabled) |
| 848 | return; | 865 | return; |
| 866 | |||
| 867 | if (dmi_check_system(bat_dmi_table)) | ||
| 868 | battery_bix_broken_package = 1; | ||
| 849 | acpi_bus_register_driver(&acpi_battery_driver); | 869 | acpi_bus_register_driver(&acpi_battery_driver); |
| 850 | } | 870 | } |
| 851 | 871 | ||
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index bba9b72e25f8..0710004055c8 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) | |||
| 156 | } | 156 | } |
| 157 | EXPORT_SYMBOL(acpi_bus_get_private_data); | 157 | EXPORT_SYMBOL(acpi_bus_get_private_data); |
| 158 | 158 | ||
| 159 | void acpi_bus_no_hotplug(acpi_handle handle) | ||
| 160 | { | ||
| 161 | struct acpi_device *adev = NULL; | ||
| 162 | |||
| 163 | acpi_bus_get_device(handle, &adev); | ||
| 164 | if (adev) | ||
| 165 | adev->flags.no_hotplug = true; | ||
| 166 | } | ||
| 167 | EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug); | ||
| 168 | |||
| 159 | static void acpi_print_osc_error(acpi_handle handle, | 169 | static void acpi_print_osc_error(acpi_handle handle, |
| 160 | struct acpi_osc_context *context, char *error) | 170 | struct acpi_osc_context *context, char *error) |
| 161 | { | 171 | { |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 14f1e9506338..e3a92a6da39a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 427 | .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ | 427 | .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ |
| 428 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), | 428 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), |
| 429 | .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ | 429 | .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ |
| 430 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178, | ||
| 431 | PCI_VENDOR_ID_MARVELL_EXT, 0x9170), | ||
| 432 | .driver_data = board_ahci_yes_fbs }, /* 88se9170 */ | ||
| 430 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), | 433 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), |
| 431 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ | 434 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ |
| 432 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), | 435 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), |
| @@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1238 | if (rc) | 1241 | if (rc) |
| 1239 | return rc; | 1242 | return rc; |
| 1240 | 1243 | ||
| 1241 | /* AHCI controllers often implement SFF compatible interface. | ||
| 1242 | * Grab all PCI BARs just in case. | ||
| 1243 | */ | ||
| 1244 | rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||
| 1245 | if (rc == -EBUSY) | ||
| 1246 | pcim_pin_device(pdev); | ||
| 1247 | if (rc) | ||
| 1248 | return rc; | ||
| 1249 | |||
| 1250 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 1244 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
| 1251 | (pdev->device == 0x2652 || pdev->device == 0x2653)) { | 1245 | (pdev->device == 0x2652 || pdev->device == 0x2653)) { |
| 1252 | u8 map; | 1246 | u8 map; |
| @@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1263 | } | 1257 | } |
| 1264 | } | 1258 | } |
| 1265 | 1259 | ||
| 1260 | /* AHCI controllers often implement SFF compatible interface. | ||
| 1261 | * Grab all PCI BARs just in case. | ||
| 1262 | */ | ||
| 1263 | rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||
| 1264 | if (rc == -EBUSY) | ||
| 1265 | pcim_pin_device(pdev); | ||
| 1266 | if (rc) | ||
| 1267 | return rc; | ||
| 1268 | |||
| 1266 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | 1269 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); |
| 1267 | if (!hpriv) | 1270 | if (!hpriv) |
| 1268 | return -ENOMEM; | 1271 | return -ENOMEM; |
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index ae2d73fe321e..3e23e9941dad 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c | |||
| @@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | |||
| 113 | /* | 113 | /* |
| 114 | * set PHY Paremeters, two steps to configure the GPR13, | 114 | * set PHY Paremeters, two steps to configure the GPR13, |
| 115 | * one write for rest of parameters, mask of first write | 115 | * one write for rest of parameters, mask of first write |
| 116 | * is 0x07fffffd, and the other one write for setting | 116 | * is 0x07ffffff, and the other one write for setting |
| 117 | * the mpll_clk_en. | 117 | * the mpll_clk_en. |
| 118 | */ | 118 | */ |
| 119 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | 119 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
| @@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | |||
| 124 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK | 124 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
| 125 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK | 125 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK |
| 126 | | IMX6Q_GPR13_SATA_TX_LVL_MASK | 126 | | IMX6Q_GPR13_SATA_TX_LVL_MASK |
| 127 | | IMX6Q_GPR13_SATA_MPLL_CLK_EN | ||
| 127 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE | 128 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE |
| 128 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | 129 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
| 129 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | 130 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 75b93678bbcd..1393a5890ed5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev, | |||
| 2149 | "failed to get NCQ Send/Recv Log Emask 0x%x\n", | 2149 | "failed to get NCQ Send/Recv Log Emask 0x%x\n", |
| 2150 | err_mask); | 2150 | err_mask); |
| 2151 | } else { | 2151 | } else { |
| 2152 | u8 *cmds = dev->ncq_send_recv_cmds; | ||
| 2153 | |||
| 2152 | dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; | 2154 | dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; |
| 2153 | memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, | 2155 | memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); |
| 2154 | ATA_LOG_NCQ_SEND_RECV_SIZE); | 2156 | |
| 2157 | if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { | ||
| 2158 | ata_dev_dbg(dev, "disabling queued TRIM support\n"); | ||
| 2159 | cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= | ||
| 2160 | ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; | ||
| 2161 | } | ||
| 2155 | } | 2162 | } |
| 2156 | } | 2163 | } |
| 2157 | 2164 | ||
| @@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4156 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | | 4163 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
| 4157 | ATA_HORKAGE_FIRMWARE_WARN }, | 4164 | ATA_HORKAGE_FIRMWARE_WARN }, |
| 4158 | 4165 | ||
| 4166 | /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ | ||
| 4167 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | ||
| 4168 | |||
| 4159 | /* Blacklist entries taken from Silicon Image 3124/3132 | 4169 | /* Blacklist entries taken from Silicon Image 3124/3132 |
| 4160 | Windows driver .inf file - also several Linux problem reports */ | 4170 | Windows driver .inf file - also several Linux problem reports */ |
| 4161 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, | 4171 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, |
| @@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4202 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, | 4212 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
| 4203 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4213 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
| 4204 | 4214 | ||
| 4215 | /* devices that don't properly handle queued TRIM commands */ | ||
| 4216 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
| 4217 | { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
| 4218 | |||
| 4205 | /* End Marker */ | 4219 | /* End Marker */ |
| 4206 | { } | 4220 | { } |
| 4207 | }; | 4221 | }; |
| @@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur, | |||
| 6519 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, | 6533 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, |
| 6520 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, | 6534 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, |
| 6521 | { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, | 6535 | { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, |
| 6536 | { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, | ||
| 6522 | }; | 6537 | }; |
| 6523 | char *start = *cur, *p = *cur; | 6538 | char *start = *cur, *p = *cur; |
| 6524 | char *id, *val, *endp; | 6539 | char *id, *val, *endp; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ab58556d347c..377eb889f555 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
| 3872 | return; | 3872 | return; |
| 3873 | } | 3873 | } |
| 3874 | 3874 | ||
| 3875 | /* | ||
| 3876 | * XXX - UGLY HACK | ||
| 3877 | * | ||
| 3878 | * The block layer suspend/resume path is fundamentally broken due | ||
| 3879 | * to freezable kthreads and workqueue and may deadlock if a block | ||
| 3880 | * device gets removed while resume is in progress. I don't know | ||
| 3881 | * what the solution is short of removing freezable kthreads and | ||
| 3882 | * workqueues altogether. | ||
| 3883 | * | ||
| 3884 | * The following is an ugly hack to avoid kicking off device | ||
| 3885 | * removal while freezer is active. This is a joke but does avoid | ||
| 3886 | * this particular deadlock scenario. | ||
| 3887 | * | ||
| 3888 | * https://bugzilla.kernel.org/show_bug.cgi?id=62801 | ||
| 3889 | * http://marc.info/?l=linux-kernel&m=138695698516487 | ||
| 3890 | */ | ||
| 3891 | #ifdef CONFIG_FREEZER | ||
| 3892 | while (pm_freezing) | ||
| 3893 | msleep(10); | ||
| 3894 | #endif | ||
| 3895 | |||
| 3875 | DPRINTK("ENTER\n"); | 3896 | DPRINTK("ENTER\n"); |
| 3876 | mutex_lock(&ap->scsi_scan_mutex); | 3897 | mutex_lock(&ap->scsi_scan_mutex); |
| 3877 | 3898 | ||
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index fe3ca0989b14..1ad2f62d34b9 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
| @@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = { | |||
| 83 | .id_table = sis_pci_tbl, | 83 | .id_table = sis_pci_tbl, |
| 84 | .probe = sis_init_one, | 84 | .probe = sis_init_one, |
| 85 | .remove = ata_pci_remove_one, | 85 | .remove = ata_pci_remove_one, |
| 86 | #ifdef CONFIG_PM | ||
| 87 | .suspend = ata_pci_device_suspend, | ||
| 88 | .resume = ata_pci_device_resume, | ||
| 89 | #endif | ||
| 86 | }; | 90 | }; |
| 87 | 91 | ||
| 88 | static struct scsi_host_template sis_sht = { | 92 | static struct scsi_host_template sis_sht = { |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f370fc13aea5..83a598ebb65a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
| 2 | |||
| 2 | #include <linux/moduleparam.h> | 3 | #include <linux/moduleparam.h> |
| 3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 4 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
| @@ -65,7 +66,7 @@ enum { | |||
| 65 | NULL_Q_MQ = 2, | 66 | NULL_Q_MQ = 2, |
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | static int submit_queues = 1; | 69 | static int submit_queues; |
| 69 | module_param(submit_queues, int, S_IRUGO); | 70 | module_param(submit_queues, int, S_IRUGO); |
| 70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | 71 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
| 71 | 72 | ||
| @@ -101,9 +102,9 @@ static int hw_queue_depth = 64; | |||
| 101 | module_param(hw_queue_depth, int, S_IRUGO); | 102 | module_param(hw_queue_depth, int, S_IRUGO); |
| 102 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | 103 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); |
| 103 | 104 | ||
| 104 | static bool use_per_node_hctx = true; | 105 | static bool use_per_node_hctx = false; |
| 105 | module_param(use_per_node_hctx, bool, S_IRUGO); | 106 | module_param(use_per_node_hctx, bool, S_IRUGO); |
| 106 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); | 107 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
| 107 | 108 | ||
| 108 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | 109 | static void put_tag(struct nullb_queue *nq, unsigned int tag) |
| 109 | { | 110 | { |
| @@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
| 346 | 347 | ||
| 347 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | 348 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) |
| 348 | { | 349 | { |
| 349 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, | 350 | int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); |
| 350 | hctx_index); | 351 | int tip = (reg->nr_hw_queues % nr_online_nodes); |
| 352 | int node = 0, i, n; | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Split submit queues evenly wrt to the number of nodes. If uneven, | ||
| 356 | * fill the first buckets with one extra, until the rest is filled with | ||
| 357 | * no extra. | ||
| 358 | */ | ||
| 359 | for (i = 0, n = 1; i < hctx_index; i++, n++) { | ||
| 360 | if (n % b_size == 0) { | ||
| 361 | n = 0; | ||
| 362 | node++; | ||
| 363 | |||
| 364 | tip--; | ||
| 365 | if (!tip) | ||
| 366 | b_size = reg->nr_hw_queues / nr_online_nodes; | ||
| 367 | } | ||
| 368 | } | ||
| 369 | |||
| 370 | /* | ||
| 371 | * A node might not be online, therefore map the relative node id to the | ||
| 372 | * real node id. | ||
| 373 | */ | ||
| 374 | for_each_online_node(n) { | ||
| 375 | if (!node) | ||
| 376 | break; | ||
| 377 | node--; | ||
| 378 | } | ||
| 379 | |||
| 380 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | ||
| 351 | } | 381 | } |
| 352 | 382 | ||
| 353 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | 383 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) |
| @@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | |||
| 355 | kfree(hctx); | 385 | kfree(hctx); |
| 356 | } | 386 | } |
| 357 | 387 | ||
| 388 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) | ||
| 389 | { | ||
| 390 | BUG_ON(!nullb); | ||
| 391 | BUG_ON(!nq); | ||
| 392 | |||
| 393 | init_waitqueue_head(&nq->wait); | ||
| 394 | nq->queue_depth = nullb->queue_depth; | ||
| 395 | } | ||
| 396 | |||
| 358 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | 397 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
| 359 | unsigned int index) | 398 | unsigned int index) |
| 360 | { | 399 | { |
| 361 | struct nullb *nullb = data; | 400 | struct nullb *nullb = data; |
| 362 | struct nullb_queue *nq = &nullb->queues[index]; | 401 | struct nullb_queue *nq = &nullb->queues[index]; |
| 363 | 402 | ||
| 364 | init_waitqueue_head(&nq->wait); | ||
| 365 | nq->queue_depth = nullb->queue_depth; | ||
| 366 | nullb->nr_queues++; | ||
| 367 | hctx->driver_data = nq; | 403 | hctx->driver_data = nq; |
| 404 | null_init_queue(nullb, nq); | ||
| 405 | nullb->nr_queues++; | ||
| 368 | 406 | ||
| 369 | return 0; | 407 | return 0; |
| 370 | } | 408 | } |
| @@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb) | |||
| 387 | list_del_init(&nullb->list); | 425 | list_del_init(&nullb->list); |
| 388 | 426 | ||
| 389 | del_gendisk(nullb->disk); | 427 | del_gendisk(nullb->disk); |
| 390 | if (queue_mode == NULL_Q_MQ) | 428 | blk_cleanup_queue(nullb->q); |
| 391 | blk_mq_free_queue(nullb->q); | ||
| 392 | else | ||
| 393 | blk_cleanup_queue(nullb->q); | ||
| 394 | put_disk(nullb->disk); | 429 | put_disk(nullb->disk); |
| 395 | kfree(nullb); | 430 | kfree(nullb); |
| 396 | } | 431 | } |
| @@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq) | |||
| 417 | 452 | ||
| 418 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | 453 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); |
| 419 | if (!nq->cmds) | 454 | if (!nq->cmds) |
| 420 | return 1; | 455 | return -ENOMEM; |
| 421 | 456 | ||
| 422 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | 457 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; |
| 423 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | 458 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); |
| 424 | if (!nq->tag_map) { | 459 | if (!nq->tag_map) { |
| 425 | kfree(nq->cmds); | 460 | kfree(nq->cmds); |
| 426 | return 1; | 461 | return -ENOMEM; |
| 427 | } | 462 | } |
| 428 | 463 | ||
| 429 | for (i = 0; i < nq->queue_depth; i++) { | 464 | for (i = 0; i < nq->queue_depth; i++) { |
| @@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb) | |||
| 454 | 489 | ||
| 455 | static int setup_queues(struct nullb *nullb) | 490 | static int setup_queues(struct nullb *nullb) |
| 456 | { | 491 | { |
| 457 | struct nullb_queue *nq; | 492 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
| 458 | int i; | 493 | GFP_KERNEL); |
| 459 | |||
| 460 | nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); | ||
| 461 | if (!nullb->queues) | 494 | if (!nullb->queues) |
| 462 | return 1; | 495 | return -ENOMEM; |
| 463 | 496 | ||
| 464 | nullb->nr_queues = 0; | 497 | nullb->nr_queues = 0; |
| 465 | nullb->queue_depth = hw_queue_depth; | 498 | nullb->queue_depth = hw_queue_depth; |
| 466 | 499 | ||
| 467 | if (queue_mode == NULL_Q_MQ) | 500 | return 0; |
| 468 | return 0; | 501 | } |
| 502 | |||
| 503 | static int init_driver_queues(struct nullb *nullb) | ||
| 504 | { | ||
| 505 | struct nullb_queue *nq; | ||
| 506 | int i, ret = 0; | ||
| 469 | 507 | ||
| 470 | for (i = 0; i < submit_queues; i++) { | 508 | for (i = 0; i < submit_queues; i++) { |
| 471 | nq = &nullb->queues[i]; | 509 | nq = &nullb->queues[i]; |
| 472 | init_waitqueue_head(&nq->wait); | 510 | |
| 473 | nq->queue_depth = hw_queue_depth; | 511 | null_init_queue(nullb, nq); |
| 474 | if (setup_commands(nq)) | 512 | |
| 475 | break; | 513 | ret = setup_commands(nq); |
| 514 | if (ret) | ||
| 515 | goto err_queue; | ||
| 476 | nullb->nr_queues++; | 516 | nullb->nr_queues++; |
| 477 | } | 517 | } |
| 478 | 518 | ||
| 479 | if (i == submit_queues) | 519 | return 0; |
| 480 | return 0; | 520 | err_queue: |
| 481 | |||
| 482 | cleanup_queues(nullb); | 521 | cleanup_queues(nullb); |
| 483 | return 1; | 522 | return ret; |
| 484 | } | 523 | } |
| 485 | 524 | ||
| 486 | static int null_add_dev(void) | 525 | static int null_add_dev(void) |
| @@ -518,11 +557,13 @@ static int null_add_dev(void) | |||
| 518 | } else if (queue_mode == NULL_Q_BIO) { | 557 | } else if (queue_mode == NULL_Q_BIO) { |
| 519 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 558 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
| 520 | blk_queue_make_request(nullb->q, null_queue_bio); | 559 | blk_queue_make_request(nullb->q, null_queue_bio); |
| 560 | init_driver_queues(nullb); | ||
| 521 | } else { | 561 | } else { |
| 522 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 562 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
| 523 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 563 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
| 524 | if (nullb->q) | 564 | if (nullb->q) |
| 525 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | 565 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
| 566 | init_driver_queues(nullb); | ||
| 526 | } | 567 | } |
| 527 | 568 | ||
| 528 | if (!nullb->q) | 569 | if (!nullb->q) |
| @@ -534,10 +575,7 @@ static int null_add_dev(void) | |||
| 534 | disk = nullb->disk = alloc_disk_node(1, home_node); | 575 | disk = nullb->disk = alloc_disk_node(1, home_node); |
| 535 | if (!disk) { | 576 | if (!disk) { |
| 536 | queue_fail: | 577 | queue_fail: |
| 537 | if (queue_mode == NULL_Q_MQ) | 578 | blk_cleanup_queue(nullb->q); |
| 538 | blk_mq_free_queue(nullb->q); | ||
| 539 | else | ||
| 540 | blk_cleanup_queue(nullb->q); | ||
| 541 | cleanup_queues(nullb); | 579 | cleanup_queues(nullb); |
| 542 | err: | 580 | err: |
| 543 | kfree(nullb); | 581 | kfree(nullb); |
| @@ -579,7 +617,13 @@ static int __init null_init(void) | |||
| 579 | } | 617 | } |
| 580 | #endif | 618 | #endif |
| 581 | 619 | ||
| 582 | if (submit_queues > nr_cpu_ids) | 620 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
| 621 | if (submit_queues < nr_online_nodes) { | ||
| 622 | pr_warn("null_blk: submit_queues param is set to %u.", | ||
| 623 | nr_online_nodes); | ||
| 624 | submit_queues = nr_online_nodes; | ||
| 625 | } | ||
| 626 | } else if (submit_queues > nr_cpu_ids) | ||
| 583 | submit_queues = nr_cpu_ids; | 627 | submit_queues = nr_cpu_ids; |
| 584 | else if (!submit_queues) | 628 | else if (!submit_queues) |
| 585 | submit_queues = 1; | 629 | submit_queues = 1; |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 9199c93be926..eb6e1e0e8db2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
| @@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) | |||
| 5269 | } | 5269 | } |
| 5270 | } | 5270 | } |
| 5271 | 5271 | ||
| 5272 | const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | 5272 | static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) |
| 5273 | { | 5273 | { |
| 5274 | switch (state) { | 5274 | switch (state) { |
| 5275 | case SKD_MSG_STATE_IDLE: | 5275 | case SKD_MSG_STATE_IDLE: |
| @@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | |||
| 5281 | } | 5281 | } |
| 5282 | } | 5282 | } |
| 5283 | 5283 | ||
| 5284 | const char *skd_skreq_state_to_str(enum skd_req_state state) | 5284 | static const char *skd_skreq_state_to_str(enum skd_req_state state) |
| 5285 | { | 5285 | { |
| 5286 | switch (state) { | 5286 | switch (state) { |
| 5287 | case SKD_REQ_STATE_IDLE: | 5287 | case SKD_REQ_STATE_IDLE: |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 6bfc1bb318f6..dceb85f8d9a8 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
| @@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = { | |||
| 87 | { USB_DEVICE(0x0CF3, 0xE004) }, | 87 | { USB_DEVICE(0x0CF3, 0xE004) }, |
| 88 | { USB_DEVICE(0x0CF3, 0xE005) }, | 88 | { USB_DEVICE(0x0CF3, 0xE005) }, |
| 89 | { USB_DEVICE(0x0930, 0x0219) }, | 89 | { USB_DEVICE(0x0930, 0x0219) }, |
| 90 | { USB_DEVICE(0x0930, 0x0220) }, | ||
| 90 | { USB_DEVICE(0x0489, 0xe057) }, | 91 | { USB_DEVICE(0x0489, 0xe057) }, |
| 91 | { USB_DEVICE(0x13d3, 0x3393) }, | 92 | { USB_DEVICE(0x13d3, 0x3393) }, |
| 92 | { USB_DEVICE(0x0489, 0xe04e) }, | 93 | { USB_DEVICE(0x0489, 0xe04e) }, |
| @@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { | |||
| 129 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 130 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
| 130 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | 131 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, |
| 131 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 132 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
| 133 | { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, | ||
| 132 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 134 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
| 133 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 135 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
| 134 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, | 136 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c0ff34f2d2df..3980fd18f6ea 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
| @@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
| 154 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 154 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
| 155 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | 155 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, |
| 156 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 156 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
| 157 | { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, | ||
| 157 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 158 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
| 158 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 159 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
| 159 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, | 160 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 8e562dc65601..e1f3337a0cf9 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c | |||
| @@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM"; | |||
| 27 | static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, | 27 | static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, |
| 28 | void **return_value) | 28 | void **return_value) |
| 29 | { | 29 | { |
| 30 | acpi_status status; | 30 | acpi_status status = AE_OK; |
| 31 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 31 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 32 | status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 32 | |
| 33 | if (strstr(buffer.pointer, context) != NULL) { | 33 | if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) { |
| 34 | *return_value = handle; | 34 | if (strstr(buffer.pointer, context) != NULL) { |
| 35 | *return_value = handle; | ||
| 36 | status = AE_CTRL_TERMINATE; | ||
| 37 | } | ||
| 35 | kfree(buffer.pointer); | 38 | kfree(buffer.pointer); |
| 36 | return AE_CTRL_TERMINATE; | ||
| 37 | } | 39 | } |
| 38 | return AE_OK; | 40 | |
| 41 | return status; | ||
| 39 | } | 42 | } |
| 40 | 43 | ||
| 41 | static inline void ppi_assign_params(union acpi_object params[4], | 44 | static inline void ppi_assign_params(union acpi_object params[4], |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 8d3009e44fba..5543b7df8e16 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
| @@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table, | |||
| 87 | return 0; | 87 | return 0; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static unsigned int _get_val(struct clk_divider *divider, u8 div) | 90 | static unsigned int _get_val(struct clk_divider *divider, unsigned int div) |
| 91 | { | 91 | { |
| 92 | if (divider->flags & CLK_DIVIDER_ONE_BASED) | 92 | if (divider->flags & CLK_DIVIDER_ONE_BASED) |
| 93 | return div; | 93 | return div; |
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 39b40aaede2b..68e515d093d8 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c | |||
| @@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data; | |||
| 26 | #define ASS_CLK_DIV 0x4 | 26 | #define ASS_CLK_DIV 0x4 |
| 27 | #define ASS_CLK_GATE 0x8 | 27 | #define ASS_CLK_GATE 0x8 |
| 28 | 28 | ||
| 29 | /* list of all parent clock list */ | ||
| 30 | static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; | ||
| 31 | static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; | ||
| 32 | |||
| 33 | #ifdef CONFIG_PM_SLEEP | ||
| 29 | static unsigned long reg_save[][2] = { | 34 | static unsigned long reg_save[][2] = { |
| 30 | {ASS_CLK_SRC, 0}, | 35 | {ASS_CLK_SRC, 0}, |
| 31 | {ASS_CLK_DIV, 0}, | 36 | {ASS_CLK_DIV, 0}, |
| 32 | {ASS_CLK_GATE, 0}, | 37 | {ASS_CLK_GATE, 0}, |
| 33 | }; | 38 | }; |
| 34 | 39 | ||
| 35 | /* list of all parent clock list */ | ||
| 36 | static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; | ||
| 37 | static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; | ||
| 38 | |||
| 39 | #ifdef CONFIG_PM_SLEEP | ||
| 40 | static int exynos_audss_clk_suspend(void) | 40 | static int exynos_audss_clk_suspend(void) |
| 41 | { | 41 | { |
| 42 | int i; | 42 | int i; |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index ad5ff50c5f28..1a7c1b929c69 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
| @@ -39,7 +39,7 @@ | |||
| 39 | #define SRC_TOP1 0xc214 | 39 | #define SRC_TOP1 0xc214 |
| 40 | #define SRC_CAM 0xc220 | 40 | #define SRC_CAM 0xc220 |
| 41 | #define SRC_TV 0xc224 | 41 | #define SRC_TV 0xc224 |
| 42 | #define SRC_MFC 0xcc28 | 42 | #define SRC_MFC 0xc228 |
| 43 | #define SRC_G3D 0xc22c | 43 | #define SRC_G3D 0xc22c |
| 44 | #define E4210_SRC_IMAGE 0xc230 | 44 | #define E4210_SRC_IMAGE 0xc230 |
| 45 | #define SRC_LCD0 0xc234 | 45 | #define SRC_LCD0 0xc234 |
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index adf32343c9f9..e52359cf9b6f 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #define MPLL_LOCK 0x4000 | 25 | #define MPLL_LOCK 0x4000 |
| 26 | #define MPLL_CON0 0x4100 | 26 | #define MPLL_CON0 0x4100 |
| 27 | #define SRC_CORE1 0x4204 | 27 | #define SRC_CORE1 0x4204 |
| 28 | #define GATE_IP_ACP 0x8800 | ||
| 28 | #define CPLL_LOCK 0x10020 | 29 | #define CPLL_LOCK 0x10020 |
| 29 | #define EPLL_LOCK 0x10030 | 30 | #define EPLL_LOCK 0x10030 |
| 30 | #define VPLL_LOCK 0x10040 | 31 | #define VPLL_LOCK 0x10040 |
| @@ -75,7 +76,6 @@ | |||
| 75 | #define SRC_CDREX 0x20200 | 76 | #define SRC_CDREX 0x20200 |
| 76 | #define PLL_DIV2_SEL 0x20a24 | 77 | #define PLL_DIV2_SEL 0x20a24 |
| 77 | #define GATE_IP_DISP1 0x10928 | 78 | #define GATE_IP_DISP1 0x10928 |
| 78 | #define GATE_IP_ACP 0x10000 | ||
| 79 | 79 | ||
| 80 | /* list of PLLs to be registered */ | 80 | /* list of PLLs to be registered */ |
| 81 | enum exynos5250_plls { | 81 | enum exynos5250_plls { |
| @@ -120,7 +120,8 @@ enum exynos5250_clks { | |||
| 120 | spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, | 120 | spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, |
| 121 | hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, | 121 | hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, |
| 122 | tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, | 122 | tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, |
| 123 | wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, | 123 | wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0, |
| 124 | smmu_mdma0, | ||
| 124 | 125 | ||
| 125 | /* mux clocks */ | 126 | /* mux clocks */ |
| 126 | mout_hdmi = 1024, | 127 | mout_hdmi = 1024, |
| @@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { | |||
| 354 | GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), | 355 | GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), |
| 355 | GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), | 356 | GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), |
| 356 | GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), | 357 | GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), |
| 357 | GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), | 358 | GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0), |
| 358 | GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), | 359 | GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0), |
| 359 | GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), | 360 | GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), |
| 360 | GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), | 361 | GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), |
| 361 | GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), | 362 | GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), |
| @@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { | |||
| 406 | GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), | 407 | GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), |
| 407 | GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), | 408 | GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), |
| 408 | GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), | 409 | GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), |
| 409 | GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), | 410 | GATE(sysreg, "sysreg", "aclk66", |
| 411 | GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0), | ||
| 410 | GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), | 412 | GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), |
| 411 | GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), | 413 | GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), |
| 412 | GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), | 414 | GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), |
| @@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { | |||
| 492 | GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), | 494 | GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), |
| 493 | GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), | 495 | GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), |
| 494 | GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), | 496 | GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), |
| 497 | GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0), | ||
| 498 | GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0), | ||
| 495 | }; | 499 | }; |
| 496 | 500 | ||
| 497 | static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { | 501 | static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5c07a56962db..634c4d6dd45a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK | |||
| 75 | config CLKSRC_EFM32 | 75 | config CLKSRC_EFM32 |
| 76 | bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 | 76 | bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 |
| 77 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) | 77 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) |
| 78 | select CLKSRC_MMIO | ||
| 78 | default ARCH_EFM32 | 79 | default ARCH_EFM32 |
| 79 | help | 80 | help |
| 80 | Support to use the timers of EFM32 SoCs as clock source and clock | 81 | Support to use the timers of EFM32 SoCs as clock source and clock |
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 35639cf4e5a2..b9ddd9e3a2f5 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c | |||
| @@ -35,6 +35,5 @@ void __init clocksource_of_init(void) | |||
| 35 | 35 | ||
| 36 | init_func = match->data; | 36 | init_func = match->data; |
| 37 | init_func(np); | 37 | init_func(np); |
| 38 | of_node_put(np); | ||
| 39 | } | 38 | } |
| 40 | } | 39 | } |
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 45ba8aecc729..2a2ea2717f3a 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c | |||
| @@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer) | |||
| 108 | 108 | ||
| 109 | static u64 read_sched_clock(void) | 109 | static u64 read_sched_clock(void) |
| 110 | { | 110 | { |
| 111 | return __raw_readl(sched_io_base); | 111 | return ~__raw_readl(sched_io_base); |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static const struct of_device_id sptimer_ids[] __initconst = { | 114 | static const struct of_device_id sptimer_ids[] __initconst = { |
| 115 | { .compatible = "picochip,pc3x2-rtc" }, | 115 | { .compatible = "picochip,pc3x2-rtc" }, |
| 116 | { .compatible = "snps,dw-apb-timer-sp" }, | ||
| 117 | { /* Sentinel */ }, | 116 | { /* Sentinel */ }, |
| 118 | }; | 117 | }; |
| 119 | 118 | ||
| @@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer) | |||
| 151 | num_called++; | 150 | num_called++; |
| 152 | } | 151 | } |
| 153 | CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); | 152 | CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); |
| 154 | CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); | 153 | CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); |
| 154 | CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); | ||
| 155 | CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); | ||
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 2fb4695a28d8..a4f6119aafd8 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c | |||
| @@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node) | |||
| 179 | writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), | 179 | writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), |
| 180 | timer_base + TIMER_CTL_REG(0)); | 180 | timer_base + TIMER_CTL_REG(0)); |
| 181 | 181 | ||
| 182 | /* Make sure timer is stopped before playing with interrupts */ | ||
| 183 | sun4i_clkevt_time_stop(0); | ||
| 184 | |||
| 182 | ret = setup_irq(irq, &sun4i_timer_irq); | 185 | ret = setup_irq(irq, &sun4i_timer_irq); |
| 183 | if (ret) | 186 | if (ret) |
| 184 | pr_warn("failed to setup irq %d\n", irq); | 187 | pr_warn("failed to setup irq %d\n", irq); |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index d8e47e502785..4e7f6802e840 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
| @@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
| 256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | 256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; |
| 257 | 257 | ||
| 258 | /* | 258 | /* |
| 259 | * Set scale and timer for sched_clock. | ||
| 260 | */ | ||
| 261 | sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); | ||
| 262 | |||
| 263 | /* | ||
| 264 | * Setup free-running clocksource timer (interrupts | 259 | * Setup free-running clocksource timer (interrupts |
| 265 | * disabled). | 260 | * disabled). |
| 266 | */ | 261 | */ |
| @@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
| 270 | timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | | 265 | timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | |
| 271 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | 266 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); |
| 272 | 267 | ||
| 268 | /* | ||
| 269 | * Set scale and timer for sched_clock. | ||
| 270 | */ | ||
| 271 | sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); | ||
| 272 | |||
| 273 | clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, | 273 | clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, |
| 274 | "armada_370_xp_clocksource", | 274 | "armada_370_xp_clocksource", |
| 275 | timer_clk, 300, 32, clocksource_mmio_readl_down); | 275 | timer_clk, 300, 32, clocksource_mmio_readl_down); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 02d534da22dd..8d19f7c06010 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
| 828 | int ret = 0; | 828 | int ret = 0; |
| 829 | 829 | ||
| 830 | memcpy(&new_policy, policy, sizeof(*policy)); | 830 | memcpy(&new_policy, policy, sizeof(*policy)); |
| 831 | |||
| 832 | /* Use the default policy if its valid. */ | ||
| 833 | if (cpufreq_driver->setpolicy) | ||
| 834 | cpufreq_parse_governor(policy->governor->name, | ||
| 835 | &new_policy.policy, NULL); | ||
| 836 | |||
| 831 | /* assure that the starting sequence is run in cpufreq_set_policy */ | 837 | /* assure that the starting sequence is run in cpufreq_set_policy */ |
| 832 | policy->governor = NULL; | 838 | policy->governor = NULL; |
| 833 | 839 | ||
| 834 | /* set default policy */ | 840 | /* set default policy */ |
| 835 | ret = cpufreq_set_policy(policy, &new_policy); | 841 | ret = cpufreq_set_policy(policy, &new_policy); |
| 836 | policy->user_policy.policy = policy->policy; | ||
| 837 | policy->user_policy.governor = policy->governor; | ||
| 838 | |||
| 839 | if (ret) { | 842 | if (ret) { |
| 840 | pr_debug("setting policy failed\n"); | 843 | pr_debug("setting policy failed\n"); |
| 841 | if (cpufreq_driver->exit) | 844 | if (cpufreq_driver->exit) |
| @@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
| 845 | 848 | ||
| 846 | #ifdef CONFIG_HOTPLUG_CPU | 849 | #ifdef CONFIG_HOTPLUG_CPU |
| 847 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | 850 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, |
| 848 | unsigned int cpu, struct device *dev, | 851 | unsigned int cpu, struct device *dev) |
| 849 | bool frozen) | ||
| 850 | { | 852 | { |
| 851 | int ret = 0; | 853 | int ret = 0; |
| 852 | unsigned long flags; | 854 | unsigned long flags; |
| @@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
| 877 | } | 879 | } |
| 878 | } | 880 | } |
| 879 | 881 | ||
| 880 | /* Don't touch sysfs links during light-weight init */ | 882 | return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); |
| 881 | if (!frozen) | ||
| 882 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||
| 883 | |||
| 884 | return ret; | ||
| 885 | } | 883 | } |
| 886 | #endif | 884 | #endif |
| 887 | 885 | ||
| @@ -926,6 +924,27 @@ err_free_policy: | |||
| 926 | return NULL; | 924 | return NULL; |
| 927 | } | 925 | } |
| 928 | 926 | ||
| 927 | static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) | ||
| 928 | { | ||
| 929 | struct kobject *kobj; | ||
| 930 | struct completion *cmp; | ||
| 931 | |||
| 932 | down_read(&policy->rwsem); | ||
| 933 | kobj = &policy->kobj; | ||
| 934 | cmp = &policy->kobj_unregister; | ||
| 935 | up_read(&policy->rwsem); | ||
| 936 | kobject_put(kobj); | ||
| 937 | |||
| 938 | /* | ||
| 939 | * We need to make sure that the underlying kobj is | ||
| 940 | * actually not referenced anymore by anybody before we | ||
| 941 | * proceed with unloading. | ||
| 942 | */ | ||
| 943 | pr_debug("waiting for dropping of refcount\n"); | ||
| 944 | wait_for_completion(cmp); | ||
| 945 | pr_debug("wait complete\n"); | ||
| 946 | } | ||
| 947 | |||
| 929 | static void cpufreq_policy_free(struct cpufreq_policy *policy) | 948 | static void cpufreq_policy_free(struct cpufreq_policy *policy) |
| 930 | { | 949 | { |
| 931 | free_cpumask_var(policy->related_cpus); | 950 | free_cpumask_var(policy->related_cpus); |
| @@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 986 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { | 1005 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { |
| 987 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { | 1006 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { |
| 988 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1007 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
| 989 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); | 1008 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); |
| 990 | up_read(&cpufreq_rwsem); | 1009 | up_read(&cpufreq_rwsem); |
| 991 | return ret; | 1010 | return ret; |
| 992 | } | 1011 | } |
| @@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 994 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1013 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
| 995 | #endif | 1014 | #endif |
| 996 | 1015 | ||
| 997 | if (frozen) | 1016 | /* |
| 998 | /* Restore the saved policy when doing light-weight init */ | 1017 | * Restore the saved policy when doing light-weight init and fall back |
| 999 | policy = cpufreq_policy_restore(cpu); | 1018 | * to the full init if that fails. |
| 1000 | else | 1019 | */ |
| 1020 | policy = frozen ? cpufreq_policy_restore(cpu) : NULL; | ||
| 1021 | if (!policy) { | ||
| 1022 | frozen = false; | ||
| 1001 | policy = cpufreq_policy_alloc(); | 1023 | policy = cpufreq_policy_alloc(); |
| 1002 | 1024 | if (!policy) | |
| 1003 | if (!policy) | 1025 | goto nomem_out; |
| 1004 | goto nomem_out; | 1026 | } |
| 1005 | |||
| 1006 | 1027 | ||
| 1007 | /* | 1028 | /* |
| 1008 | * In the resume path, since we restore a saved policy, the assignment | 1029 | * In the resume path, since we restore a saved policy, the assignment |
| @@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 1047 | */ | 1068 | */ |
| 1048 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | 1069 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); |
| 1049 | 1070 | ||
| 1050 | policy->user_policy.min = policy->min; | 1071 | if (!frozen) { |
| 1051 | policy->user_policy.max = policy->max; | 1072 | policy->user_policy.min = policy->min; |
| 1073 | policy->user_policy.max = policy->max; | ||
| 1074 | } | ||
| 1052 | 1075 | ||
| 1053 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1076 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
| 1054 | CPUFREQ_START, policy); | 1077 | CPUFREQ_START, policy); |
| @@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 1079 | 1102 | ||
| 1080 | cpufreq_init_policy(policy); | 1103 | cpufreq_init_policy(policy); |
| 1081 | 1104 | ||
| 1105 | if (!frozen) { | ||
| 1106 | policy->user_policy.policy = policy->policy; | ||
| 1107 | policy->user_policy.governor = policy->governor; | ||
| 1108 | } | ||
| 1109 | |||
| 1082 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 1110 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
| 1083 | up_read(&cpufreq_rwsem); | 1111 | up_read(&cpufreq_rwsem); |
| 1084 | 1112 | ||
| @@ -1096,7 +1124,13 @@ err_get_freq: | |||
| 1096 | if (cpufreq_driver->exit) | 1124 | if (cpufreq_driver->exit) |
| 1097 | cpufreq_driver->exit(policy); | 1125 | cpufreq_driver->exit(policy); |
| 1098 | err_set_policy_cpu: | 1126 | err_set_policy_cpu: |
| 1127 | if (frozen) { | ||
| 1128 | /* Do not leave stale fallback data behind. */ | ||
| 1129 | per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; | ||
| 1130 | cpufreq_policy_put_kobj(policy); | ||
| 1131 | } | ||
| 1099 | cpufreq_policy_free(policy); | 1132 | cpufreq_policy_free(policy); |
| 1133 | |||
| 1100 | nomem_out: | 1134 | nomem_out: |
| 1101 | up_read(&cpufreq_rwsem); | 1135 | up_read(&cpufreq_rwsem); |
| 1102 | 1136 | ||
| @@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
| 1118 | } | 1152 | } |
| 1119 | 1153 | ||
| 1120 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | 1154 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, |
| 1121 | unsigned int old_cpu, bool frozen) | 1155 | unsigned int old_cpu) |
| 1122 | { | 1156 | { |
| 1123 | struct device *cpu_dev; | 1157 | struct device *cpu_dev; |
| 1124 | int ret; | 1158 | int ret; |
| @@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
| 1126 | /* first sibling now owns the new sysfs dir */ | 1160 | /* first sibling now owns the new sysfs dir */ |
| 1127 | cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); | 1161 | cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); |
| 1128 | 1162 | ||
| 1129 | /* Don't touch sysfs files during light-weight tear-down */ | ||
| 1130 | if (frozen) | ||
| 1131 | return cpu_dev->id; | ||
| 1132 | |||
| 1133 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | 1163 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); |
| 1134 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); | 1164 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); |
| 1135 | if (ret) { | 1165 | if (ret) { |
| @@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
| 1196 | if (!frozen) | 1226 | if (!frozen) |
| 1197 | sysfs_remove_link(&dev->kobj, "cpufreq"); | 1227 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
| 1198 | } else if (cpus > 1) { | 1228 | } else if (cpus > 1) { |
| 1199 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); | 1229 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); |
| 1200 | if (new_cpu >= 0) { | 1230 | if (new_cpu >= 0) { |
| 1201 | update_policy_cpu(policy, new_cpu); | 1231 | update_policy_cpu(policy, new_cpu); |
| 1202 | 1232 | ||
| @@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
| 1218 | int ret; | 1248 | int ret; |
| 1219 | unsigned long flags; | 1249 | unsigned long flags; |
| 1220 | struct cpufreq_policy *policy; | 1250 | struct cpufreq_policy *policy; |
| 1221 | struct kobject *kobj; | ||
| 1222 | struct completion *cmp; | ||
| 1223 | 1251 | ||
| 1224 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 1252 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
| 1225 | policy = per_cpu(cpufreq_cpu_data, cpu); | 1253 | policy = per_cpu(cpufreq_cpu_data, cpu); |
| @@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
| 1249 | } | 1277 | } |
| 1250 | } | 1278 | } |
| 1251 | 1279 | ||
| 1252 | if (!frozen) { | 1280 | if (!frozen) |
| 1253 | down_read(&policy->rwsem); | 1281 | cpufreq_policy_put_kobj(policy); |
| 1254 | kobj = &policy->kobj; | ||
| 1255 | cmp = &policy->kobj_unregister; | ||
| 1256 | up_read(&policy->rwsem); | ||
| 1257 | kobject_put(kobj); | ||
| 1258 | |||
| 1259 | /* | ||
| 1260 | * We need to make sure that the underlying kobj is | ||
| 1261 | * actually not referenced anymore by anybody before we | ||
| 1262 | * proceed with unloading. | ||
| 1263 | */ | ||
| 1264 | pr_debug("waiting for dropping of refcount\n"); | ||
| 1265 | wait_for_completion(cmp); | ||
| 1266 | pr_debug("wait complete\n"); | ||
| 1267 | } | ||
| 1268 | 1282 | ||
| 1269 | /* | 1283 | /* |
| 1270 | * Perform the ->exit() even during light-weight tear-down, | 1284 | * Perform the ->exit() even during light-weight tear-down, |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 5f1cbae36961..d51f17ed691e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
| 581 | } | 581 | } |
| 582 | 582 | ||
| 583 | #define ICPU(model, policy) \ | 583 | #define ICPU(model, policy) \ |
| 584 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } | 584 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ |
| 585 | (unsigned long)&policy } | ||
| 585 | 586 | ||
| 586 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 587 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
| 587 | ICPU(0x2a, core_params), | 588 | ICPU(0x2a, core_params), |
| @@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
| 614 | cpu = all_cpu_data[cpunum]; | 615 | cpu = all_cpu_data[cpunum]; |
| 615 | 616 | ||
| 616 | intel_pstate_get_cpu_pstates(cpu); | 617 | intel_pstate_get_cpu_pstates(cpu); |
| 618 | if (!cpu->pstate.current_pstate) { | ||
| 619 | all_cpu_data[cpunum] = NULL; | ||
| 620 | kfree(cpu); | ||
| 621 | return -ENODATA; | ||
| 622 | } | ||
| 617 | 623 | ||
| 618 | cpu->cpu = cpunum; | 624 | cpu->cpu = cpunum; |
| 619 | 625 | ||
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index 36795639df0d..6e51114057d0 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c | |||
| @@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = { | |||
| 65 | .state_count = 2, | 65 | .state_count = 2, |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static int __init calxeda_cpuidle_probe(struct platform_device *pdev) | 68 | static int calxeda_cpuidle_probe(struct platform_device *pdev) |
| 69 | { | 69 | { |
| 70 | return cpuidle_register(&calxeda_idle_driver, NULL); | 70 | return cpuidle_register(&calxeda_idle_driver, NULL); |
| 71 | } | 71 | } |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 9dd6e01eac33..f757a0f428bd 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = { | |||
| 1410 | static int __init ixp_module_init(void) | 1410 | static int __init ixp_module_init(void) |
| 1411 | { | 1411 | { |
| 1412 | int num = ARRAY_SIZE(ixp4xx_algos); | 1412 | int num = ARRAY_SIZE(ixp4xx_algos); |
| 1413 | int i, err ; | 1413 | int i, err; |
| 1414 | 1414 | ||
| 1415 | pdev = platform_device_register_full(&ixp_dev_info); | 1415 | pdev = platform_device_register_full(&ixp_dev_info); |
| 1416 | if (IS_ERR(pdev)) | 1416 | if (IS_ERR(pdev)) |
| 1417 | return PTR_ERR(pdev); | 1417 | return PTR_ERR(pdev); |
| 1418 | 1418 | ||
| 1419 | dev = &pdev->dev; | ||
| 1420 | |||
| 1421 | spin_lock_init(&desc_lock); | 1419 | spin_lock_init(&desc_lock); |
| 1422 | spin_lock_init(&emerg_lock); | 1420 | spin_lock_init(&emerg_lock); |
| 1423 | 1421 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687cc2334..c823daaf9043 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -62,6 +62,7 @@ config INTEL_IOATDMA | |||
| 62 | tristate "Intel I/OAT DMA support" | 62 | tristate "Intel I/OAT DMA support" |
| 63 | depends on PCI && X86 | 63 | depends on PCI && X86 |
| 64 | select DMA_ENGINE | 64 | select DMA_ENGINE |
| 65 | select DMA_ENGINE_RAID | ||
| 65 | select DCA | 66 | select DCA |
| 66 | help | 67 | help |
| 67 | Enable support for the Intel(R) I/OAT DMA engine present | 68 | Enable support for the Intel(R) I/OAT DMA engine present |
| @@ -112,6 +113,7 @@ config MV_XOR | |||
| 112 | bool "Marvell XOR engine support" | 113 | bool "Marvell XOR engine support" |
| 113 | depends on PLAT_ORION | 114 | depends on PLAT_ORION |
| 114 | select DMA_ENGINE | 115 | select DMA_ENGINE |
| 116 | select DMA_ENGINE_RAID | ||
| 115 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 117 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 116 | ---help--- | 118 | ---help--- |
| 117 | Enable support for the Marvell XOR engine. | 119 | Enable support for the Marvell XOR engine. |
| @@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA | |||
| 187 | tristate "AMCC PPC440SPe ADMA support" | 189 | tristate "AMCC PPC440SPe ADMA support" |
| 188 | depends on 440SPe || 440SP | 190 | depends on 440SPe || 440SP |
| 189 | select DMA_ENGINE | 191 | select DMA_ENGINE |
| 192 | select DMA_ENGINE_RAID | ||
| 190 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 193 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
| 191 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 194 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 192 | help | 195 | help |
| @@ -352,6 +355,7 @@ config NET_DMA | |||
| 352 | bool "Network: TCP receive copy offload" | 355 | bool "Network: TCP receive copy offload" |
| 353 | depends on DMA_ENGINE && NET | 356 | depends on DMA_ENGINE && NET |
| 354 | default (INTEL_IOATDMA || FSL_DMA) | 357 | default (INTEL_IOATDMA || FSL_DMA) |
| 358 | depends on BROKEN | ||
| 355 | help | 359 | help |
| 356 | This enables the use of DMA engines in the network stack to | 360 | This enables the use of DMA engines in the network stack to |
| 357 | offload receive copy-to-user operations, freeing CPU cycles. | 361 | offload receive copy-to-user operations, freeing CPU cycles. |
| @@ -377,4 +381,7 @@ config DMATEST | |||
| 377 | Simple DMA test client. Say N unless you're debugging a | 381 | Simple DMA test client. Say N unless you're debugging a |
| 378 | DMA Device driver. | 382 | DMA Device driver. |
| 379 | 383 | ||
| 384 | config DMA_ENGINE_RAID | ||
| 385 | bool | ||
| 386 | |||
| 380 | endif | 387 | endif |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index f31d647acdfa..2787aba60c6b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
| @@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) | |||
| 347 | { | 347 | { |
| 348 | return &chan->dev->device; | 348 | return &chan->dev->device; |
| 349 | } | 349 | } |
| 350 | static struct device *chan2parent(struct dma_chan *chan) | ||
| 351 | { | ||
| 352 | return chan->dev->device.parent; | ||
| 353 | } | ||
| 354 | 350 | ||
| 355 | #if defined(VERBOSE_DEBUG) | 351 | #if defined(VERBOSE_DEBUG) |
| 356 | static void vdbg_dump_regs(struct at_dma_chan *atchan) | 352 | static void vdbg_dump_regs(struct at_dma_chan *atchan) |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ea806bdc12ef..ef63b9058f3c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -912,7 +912,7 @@ struct dmaengine_unmap_pool { | |||
| 912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } | 912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
| 913 | static struct dmaengine_unmap_pool unmap_pool[] = { | 913 | static struct dmaengine_unmap_pool unmap_pool[] = { |
| 914 | __UNMAP_POOL(2), | 914 | __UNMAP_POOL(2), |
| 915 | #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) | 915 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
| 916 | __UNMAP_POOL(16), | 916 | __UNMAP_POOL(16), |
| 917 | __UNMAP_POOL(128), | 917 | __UNMAP_POOL(128), |
| 918 | __UNMAP_POOL(256), | 918 | __UNMAP_POOL(256), |
| @@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
| 1054 | dma_cookie_t cookie; | 1054 | dma_cookie_t cookie; |
| 1055 | unsigned long flags; | 1055 | unsigned long flags; |
| 1056 | 1056 | ||
| 1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); | 1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); |
| 1058 | if (!unmap) | 1058 | if (!unmap) |
| 1059 | return -ENOMEM; | 1059 | return -ENOMEM; |
| 1060 | 1060 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3aaf926..9dfcaf5c1288 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -539,9 +539,9 @@ static int dmatest_func(void *data) | |||
| 539 | 539 | ||
| 540 | um->len = params->buf_size; | 540 | um->len = params->buf_size; |
| 541 | for (i = 0; i < src_cnt; i++) { | 541 | for (i = 0; i < src_cnt; i++) { |
| 542 | unsigned long buf = (unsigned long) thread->srcs[i]; | 542 | void *buf = thread->srcs[i]; |
| 543 | struct page *pg = virt_to_page(buf); | 543 | struct page *pg = virt_to_page(buf); |
| 544 | unsigned pg_off = buf & ~PAGE_MASK; | 544 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; |
| 545 | 545 | ||
| 546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
| 547 | um->len, DMA_TO_DEVICE); | 547 | um->len, DMA_TO_DEVICE); |
| @@ -559,9 +559,9 @@ static int dmatest_func(void *data) | |||
| 559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
| 560 | dsts = &um->addr[src_cnt]; | 560 | dsts = &um->addr[src_cnt]; |
| 561 | for (i = 0; i < dst_cnt; i++) { | 561 | for (i = 0; i < dst_cnt; i++) { |
| 562 | unsigned long buf = (unsigned long) thread->dsts[i]; | 562 | void *buf = thread->dsts[i]; |
| 563 | struct page *pg = virt_to_page(buf); | 563 | struct page *pg = virt_to_page(buf); |
| 564 | unsigned pg_off = buf & ~PAGE_MASK; | 564 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; |
| 565 | 565 | ||
| 566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | 566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, |
| 567 | DMA_BIDIRECTIONAL); | 567 | DMA_BIDIRECTIONAL); |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7086a16a55f2..f157c6f76b32 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, | |||
| 86 | hw->count = CPU_TO_DMA(chan, count, 32); | 86 | hw->count = CPU_TO_DMA(chan, count, 32); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | ||
| 90 | { | ||
| 91 | return DMA_TO_CPU(chan, desc->hw.count, 32); | ||
| 92 | } | ||
| 93 | |||
| 94 | static void set_desc_src(struct fsldma_chan *chan, | 89 | static void set_desc_src(struct fsldma_chan *chan, |
| 95 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 90 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
| 96 | { | 91 | { |
| @@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, | |||
| 101 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 96 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
| 102 | } | 97 | } |
| 103 | 98 | ||
| 104 | static dma_addr_t get_desc_src(struct fsldma_chan *chan, | ||
| 105 | struct fsl_desc_sw *desc) | ||
| 106 | { | ||
| 107 | u64 snoop_bits; | ||
| 108 | |||
| 109 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
| 110 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
| 111 | return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void set_desc_dst(struct fsldma_chan *chan, | 99 | static void set_desc_dst(struct fsldma_chan *chan, |
| 115 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 100 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
| 116 | { | 101 | { |
| @@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, | |||
| 121 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 106 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
| 122 | } | 107 | } |
| 123 | 108 | ||
| 124 | static dma_addr_t get_desc_dst(struct fsldma_chan *chan, | ||
| 125 | struct fsl_desc_sw *desc) | ||
| 126 | { | ||
| 127 | u64 snoop_bits; | ||
| 128 | |||
| 129 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
| 130 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
| 131 | return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; | ||
| 132 | } | ||
| 133 | |||
| 134 | static void set_desc_next(struct fsldma_chan *chan, | 109 | static void set_desc_next(struct fsldma_chan *chan, |
| 135 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 110 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
| 136 | { | 111 | { |
| @@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 408 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 383 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
| 409 | struct fsl_desc_sw *child; | 384 | struct fsl_desc_sw *child; |
| 410 | unsigned long flags; | 385 | unsigned long flags; |
| 411 | dma_cookie_t cookie; | 386 | dma_cookie_t cookie = -EINVAL; |
| 412 | 387 | ||
| 413 | spin_lock_irqsave(&chan->desc_lock, flags); | 388 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 414 | 389 | ||
| @@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | |||
| 854 | struct fsl_desc_sw *desc) | 829 | struct fsl_desc_sw *desc) |
| 855 | { | 830 | { |
| 856 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | 831 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
| 857 | struct device *dev = chan->common.device->dev; | ||
| 858 | dma_addr_t src = get_desc_src(chan, desc); | ||
| 859 | dma_addr_t dst = get_desc_dst(chan, desc); | ||
| 860 | u32 len = get_desc_cnt(chan, desc); | ||
| 861 | 832 | ||
| 862 | /* Run the link descriptor callback function */ | 833 | /* Run the link descriptor callback function */ |
| 863 | if (txd->callback) { | 834 | if (txd->callback) { |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a49c777607c..87529181efcc 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
| @@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 817 | } | 817 | } |
| 818 | 818 | ||
| 819 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 819 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
| 820 | if (dma_mapping_error(dev, dma_src)) { | ||
| 821 | dev_err(dev, "mapping src buffer failed\n"); | ||
| 822 | goto free_resources; | ||
| 823 | } | ||
| 820 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 824 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
| 825 | if (dma_mapping_error(dev, dma_dest)) { | ||
| 826 | dev_err(dev, "mapping dest buffer failed\n"); | ||
| 827 | goto unmap_src; | ||
| 828 | } | ||
| 821 | flags = DMA_PREP_INTERRUPT; | 829 | flags = DMA_PREP_INTERRUPT; |
| 822 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 830 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
| 823 | IOAT_TEST_SIZE, flags); | 831 | IOAT_TEST_SIZE, flags); |
| @@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 855 | } | 863 | } |
| 856 | 864 | ||
| 857 | unmap_dma: | 865 | unmap_dma: |
| 858 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
| 859 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 866 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
| 867 | unmap_src: | ||
| 868 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
| 860 | free_resources: | 869 | free_resources: |
| 861 | dma->device_free_chan_resources(dma_chan); | 870 | dma->device_free_chan_resources(dma_chan); |
| 862 | out: | 871 | out: |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7807f0ef4e20..53fb0c8365b0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | |||
| 54 | hw_desc->desc_command = (1 << 31); | 54 | hw_desc->desc_command = (1 << 31); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | ||
| 58 | { | ||
| 59 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
| 60 | return hw_desc->phy_dest_addr; | ||
| 61 | } | ||
| 62 | |||
| 63 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 57 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
| 64 | u32 byte_count) | 58 | u32 byte_count) |
| 65 | { | 59 | { |
| @@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) | |||
| 787 | /* | 781 | /* |
| 788 | * Perform a transaction to verify the HW works. | 782 | * Perform a transaction to verify the HW works. |
| 789 | */ | 783 | */ |
| 790 | #define MV_XOR_TEST_SIZE 2000 | ||
| 791 | 784 | ||
| 792 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | 785 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) |
| 793 | { | 786 | { |
| @@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 797 | struct dma_chan *dma_chan; | 790 | struct dma_chan *dma_chan; |
| 798 | dma_cookie_t cookie; | 791 | dma_cookie_t cookie; |
| 799 | struct dma_async_tx_descriptor *tx; | 792 | struct dma_async_tx_descriptor *tx; |
| 793 | struct dmaengine_unmap_data *unmap; | ||
| 800 | int err = 0; | 794 | int err = 0; |
| 801 | 795 | ||
| 802 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 796 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
| 803 | if (!src) | 797 | if (!src) |
| 804 | return -ENOMEM; | 798 | return -ENOMEM; |
| 805 | 799 | ||
| 806 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 800 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
| 807 | if (!dest) { | 801 | if (!dest) { |
| 808 | kfree(src); | 802 | kfree(src); |
| 809 | return -ENOMEM; | 803 | return -ENOMEM; |
| 810 | } | 804 | } |
| 811 | 805 | ||
| 812 | /* Fill in src buffer */ | 806 | /* Fill in src buffer */ |
| 813 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | 807 | for (i = 0; i < PAGE_SIZE; i++) |
| 814 | ((u8 *) src)[i] = (u8)i; | 808 | ((u8 *) src)[i] = (u8)i; |
| 815 | 809 | ||
| 816 | dma_chan = &mv_chan->dmachan; | 810 | dma_chan = &mv_chan->dmachan; |
| @@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 819 | goto out; | 813 | goto out; |
| 820 | } | 814 | } |
| 821 | 815 | ||
| 822 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | 816 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
| 823 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 817 | if (!unmap) { |
| 818 | err = -ENOMEM; | ||
| 819 | goto free_resources; | ||
| 820 | } | ||
| 821 | |||
| 822 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | ||
| 823 | PAGE_SIZE, DMA_TO_DEVICE); | ||
| 824 | unmap->to_cnt = 1; | ||
| 825 | unmap->addr[0] = src_dma; | ||
| 824 | 826 | ||
| 825 | src_dma = dma_map_single(dma_chan->device->dev, src, | 827 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
| 826 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | 828 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 829 | unmap->from_cnt = 1; | ||
| 830 | unmap->addr[1] = dest_dma; | ||
| 831 | |||
| 832 | unmap->len = PAGE_SIZE; | ||
| 827 | 833 | ||
| 828 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 834 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
| 829 | MV_XOR_TEST_SIZE, 0); | 835 | PAGE_SIZE, 0); |
| 830 | cookie = mv_xor_tx_submit(tx); | 836 | cookie = mv_xor_tx_submit(tx); |
| 831 | mv_xor_issue_pending(dma_chan); | 837 | mv_xor_issue_pending(dma_chan); |
| 832 | async_tx_ack(tx); | 838 | async_tx_ack(tx); |
| @@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 841 | } | 847 | } |
| 842 | 848 | ||
| 843 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | 849 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
| 844 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 850 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 845 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | 851 | if (memcmp(src, dest, PAGE_SIZE)) { |
| 846 | dev_err(dma_chan->device->dev, | 852 | dev_err(dma_chan->device->dev, |
| 847 | "Self-test copy failed compare, disabling\n"); | 853 | "Self-test copy failed compare, disabling\n"); |
| 848 | err = -ENODEV; | 854 | err = -ENODEV; |
| @@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 850 | } | 856 | } |
| 851 | 857 | ||
| 852 | free_resources: | 858 | free_resources: |
| 859 | dmaengine_unmap_put(unmap); | ||
| 853 | mv_xor_free_chan_resources(dma_chan); | 860 | mv_xor_free_chan_resources(dma_chan); |
| 854 | out: | 861 | out: |
| 855 | kfree(src); | 862 | kfree(src); |
| @@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
| 867 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | 874 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; |
| 868 | dma_addr_t dest_dma; | 875 | dma_addr_t dest_dma; |
| 869 | struct dma_async_tx_descriptor *tx; | 876 | struct dma_async_tx_descriptor *tx; |
| 877 | struct dmaengine_unmap_data *unmap; | ||
| 870 | struct dma_chan *dma_chan; | 878 | struct dma_chan *dma_chan; |
| 871 | dma_cookie_t cookie; | 879 | dma_cookie_t cookie; |
| 872 | u8 cmp_byte = 0; | 880 | u8 cmp_byte = 0; |
| 873 | u32 cmp_word; | 881 | u32 cmp_word; |
| 874 | int err = 0; | 882 | int err = 0; |
| 883 | int src_count = MV_XOR_NUM_SRC_TEST; | ||
| 875 | 884 | ||
| 876 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 885 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
| 877 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 886 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
| 878 | if (!xor_srcs[src_idx]) { | 887 | if (!xor_srcs[src_idx]) { |
| 879 | while (src_idx--) | 888 | while (src_idx--) |
| @@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
| 890 | } | 899 | } |
| 891 | 900 | ||
| 892 | /* Fill in src buffers */ | 901 | /* Fill in src buffers */ |
| 893 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 902 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
| 894 | u8 *ptr = page_address(xor_srcs[src_idx]); | 903 | u8 *ptr = page_address(xor_srcs[src_idx]); |
| 895 | for (i = 0; i < PAGE_SIZE; i++) | 904 | for (i = 0; i < PAGE_SIZE; i++) |
| 896 | ptr[i] = (1 << src_idx); | 905 | ptr[i] = (1 << src_idx); |
| 897 | } | 906 | } |
| 898 | 907 | ||
| 899 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | 908 | for (src_idx = 0; src_idx < src_count; src_idx++) |
| 900 | cmp_byte ^= (u8) (1 << src_idx); | 909 | cmp_byte ^= (u8) (1 << src_idx); |
| 901 | 910 | ||
| 902 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | 911 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | |
| @@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
| 910 | goto out; | 919 | goto out; |
| 911 | } | 920 | } |
| 912 | 921 | ||
| 922 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, | ||
| 923 | GFP_KERNEL); | ||
| 924 | if (!unmap) { | ||
| 925 | err = -ENOMEM; | ||
| 926 | goto free_resources; | ||
| 927 | } | ||
| 928 | |||
| 913 | /* test xor */ | 929 | /* test xor */ |
| 914 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | 930 | for (i = 0; i < src_count; i++) { |
| 915 | DMA_FROM_DEVICE); | 931 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], |
| 932 | 0, PAGE_SIZE, DMA_TO_DEVICE); | ||
| 933 | dma_srcs[i] = unmap->addr[i]; | ||
| 934 | unmap->to_cnt++; | ||
| 935 | } | ||
| 916 | 936 | ||
| 917 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | 937 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
| 918 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 938 | DMA_FROM_DEVICE); |
| 919 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 939 | dest_dma = unmap->addr[src_count]; |
| 940 | unmap->from_cnt = 1; | ||
| 941 | unmap->len = PAGE_SIZE; | ||
| 920 | 942 | ||
| 921 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 943 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
| 922 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | 944 | src_count, PAGE_SIZE, 0); |
| 923 | 945 | ||
| 924 | cookie = mv_xor_tx_submit(tx); | 946 | cookie = mv_xor_tx_submit(tx); |
| 925 | mv_xor_issue_pending(dma_chan); | 947 | mv_xor_issue_pending(dma_chan); |
| @@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
| 948 | } | 970 | } |
| 949 | 971 | ||
| 950 | free_resources: | 972 | free_resources: |
| 973 | dmaengine_unmap_put(unmap); | ||
| 951 | mv_xor_free_chan_resources(dma_chan); | 974 | mv_xor_free_chan_resources(dma_chan); |
| 952 | out: | 975 | out: |
| 953 | src_idx = MV_XOR_NUM_SRC_TEST; | 976 | src_idx = src_count; |
| 954 | while (src_idx--) | 977 | while (src_idx--) |
| 955 | __free_page(xor_srcs[src_idx]); | 978 | __free_page(xor_srcs[src_idx]); |
| 956 | __free_page(dest); | 979 | __free_page(dest); |
| @@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
| 1176 | int i = 0; | 1199 | int i = 0; |
| 1177 | 1200 | ||
| 1178 | for_each_child_of_node(pdev->dev.of_node, np) { | 1201 | for_each_child_of_node(pdev->dev.of_node, np) { |
| 1202 | struct mv_xor_chan *chan; | ||
| 1179 | dma_cap_mask_t cap_mask; | 1203 | dma_cap_mask_t cap_mask; |
| 1180 | int irq; | 1204 | int irq; |
| 1181 | 1205 | ||
| @@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
| 1193 | goto err_channel_add; | 1217 | goto err_channel_add; |
| 1194 | } | 1218 | } |
| 1195 | 1219 | ||
| 1196 | xordev->channels[i] = | 1220 | chan = mv_xor_channel_add(xordev, pdev, i, |
| 1197 | mv_xor_channel_add(xordev, pdev, i, | 1221 | cap_mask, irq); |
| 1198 | cap_mask, irq); | 1222 | if (IS_ERR(chan)) { |
| 1199 | if (IS_ERR(xordev->channels[i])) { | 1223 | ret = PTR_ERR(chan); |
| 1200 | ret = PTR_ERR(xordev->channels[i]); | ||
| 1201 | xordev->channels[i] = NULL; | ||
| 1202 | irq_dispose_mapping(irq); | 1224 | irq_dispose_mapping(irq); |
| 1203 | goto err_channel_add; | 1225 | goto err_channel_add; |
| 1204 | } | 1226 | } |
| 1205 | 1227 | ||
| 1228 | xordev->channels[i] = chan; | ||
| 1206 | i++; | 1229 | i++; |
| 1207 | } | 1230 | } |
| 1208 | } else if (pdata && pdata->channels) { | 1231 | } else if (pdata && pdata->channels) { |
| 1209 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | 1232 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { |
| 1210 | struct mv_xor_channel_data *cd; | 1233 | struct mv_xor_channel_data *cd; |
| 1234 | struct mv_xor_chan *chan; | ||
| 1211 | int irq; | 1235 | int irq; |
| 1212 | 1236 | ||
| 1213 | cd = &pdata->channels[i]; | 1237 | cd = &pdata->channels[i]; |
| @@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
| 1222 | goto err_channel_add; | 1246 | goto err_channel_add; |
| 1223 | } | 1247 | } |
| 1224 | 1248 | ||
| 1225 | xordev->channels[i] = | 1249 | chan = mv_xor_channel_add(xordev, pdev, i, |
| 1226 | mv_xor_channel_add(xordev, pdev, i, | 1250 | cd->cap_mask, irq); |
| 1227 | cd->cap_mask, irq); | 1251 | if (IS_ERR(chan)) { |
| 1228 | if (IS_ERR(xordev->channels[i])) { | 1252 | ret = PTR_ERR(chan); |
| 1229 | ret = PTR_ERR(xordev->channels[i]); | ||
| 1230 | goto err_channel_add; | 1253 | goto err_channel_add; |
| 1231 | } | 1254 | } |
| 1255 | |||
| 1256 | xordev->channels[i] = chan; | ||
| 1232 | } | 1257 | } |
| 1233 | } | 1258 | } |
| 1234 | 1259 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483b8f2d..536632f6479c 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 2492 | 2492 | ||
| 2493 | static inline void _init_desc(struct dma_pl330_desc *desc) | 2493 | static inline void _init_desc(struct dma_pl330_desc *desc) |
| 2494 | { | 2494 | { |
| 2495 | desc->pchan = NULL; | ||
| 2496 | desc->req.x = &desc->px; | 2495 | desc->req.x = &desc->px; |
| 2497 | desc->req.token = desc; | 2496 | desc->req.token = desc; |
| 2498 | desc->rqcfg.swap = SWAP_NO; | 2497 | desc->rqcfg.swap = SWAP_NO; |
| 2499 | desc->rqcfg.privileged = 0; | ||
| 2500 | desc->rqcfg.insnaccess = 0; | ||
| 2501 | desc->rqcfg.scctl = SCCTRL0; | 2498 | desc->rqcfg.scctl = SCCTRL0; |
| 2502 | desc->rqcfg.dcctl = DCCTRL0; | 2499 | desc->rqcfg.dcctl = DCCTRL0; |
| 2503 | desc->req.cfg = &desc->rqcfg; | 2500 | desc->req.cfg = &desc->rqcfg; |
| @@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | |||
| 2517 | if (!pdmac) | 2514 | if (!pdmac) |
| 2518 | return 0; | 2515 | return 0; |
| 2519 | 2516 | ||
| 2520 | desc = kmalloc(count * sizeof(*desc), flg); | 2517 | desc = kcalloc(count, sizeof(*desc), flg); |
| 2521 | if (!desc) | 2518 | if (!desc) |
| 2522 | return 0; | 2519 | return 0; |
| 2523 | 2520 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6b2a38..8bba298535b0 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
| @@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, | |||
| 533 | } | 533 | } |
| 534 | 534 | ||
| 535 | /** | 535 | /** |
| 536 | * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation | ||
| 537 | */ | ||
| 538 | static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, | ||
| 539 | int value, unsigned long flags) | ||
| 540 | { | ||
| 541 | struct dma_cdb *hw_desc = desc->hw_desc; | ||
| 542 | |||
| 543 | memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); | ||
| 544 | desc->hw_next = NULL; | ||
| 545 | desc->src_cnt = 1; | ||
| 546 | desc->dst_cnt = 1; | ||
| 547 | |||
| 548 | if (flags & DMA_PREP_INTERRUPT) | ||
| 549 | set_bit(PPC440SPE_DESC_INT, &desc->flags); | ||
| 550 | else | ||
| 551 | clear_bit(PPC440SPE_DESC_INT, &desc->flags); | ||
| 552 | |||
| 553 | hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); | ||
| 554 | hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); | ||
| 555 | hw_desc->opc = DMA_CDB_OPC_DFILL128; | ||
| 556 | } | ||
| 557 | |||
| 558 | /** | ||
| 559 | * ppc440spe_desc_set_src_addr - set source address into the descriptor | 536 | * ppc440spe_desc_set_src_addr - set source address into the descriptor |
| 560 | */ | 537 | */ |
| 561 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, | 538 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, |
| @@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
| 1504 | struct ppc440spe_adma_chan *chan, | 1481 | struct ppc440spe_adma_chan *chan, |
| 1505 | dma_cookie_t cookie) | 1482 | dma_cookie_t cookie) |
| 1506 | { | 1483 | { |
| 1507 | int i; | ||
| 1508 | |||
| 1509 | BUG_ON(desc->async_tx.cookie < 0); | 1484 | BUG_ON(desc->async_tx.cookie < 0); |
| 1510 | if (desc->async_tx.cookie > 0) { | 1485 | if (desc->async_tx.cookie > 0) { |
| 1511 | cookie = desc->async_tx.cookie; | 1486 | cookie = desc->async_tx.cookie; |
| @@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
| 3898 | ppc440spe_adma_prep_dma_interrupt; | 3873 | ppc440spe_adma_prep_dma_interrupt; |
| 3899 | } | 3874 | } |
| 3900 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " | 3875 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " |
| 3901 | "( %s%s%s%s%s%s%s)\n", | 3876 | "( %s%s%s%s%s%s)\n", |
| 3902 | dev_name(adev->dev), | 3877 | dev_name(adev->dev), |
| 3903 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", | 3878 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", |
| 3904 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", | 3879 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index bae6c29f5502..17686caf64d5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
| @@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
| 406 | dma_async_tx_callback callback; | 406 | dma_async_tx_callback callback; |
| 407 | void *param; | 407 | void *param; |
| 408 | struct dma_async_tx_descriptor *txd = &desc->txd; | 408 | struct dma_async_tx_descriptor *txd = &desc->txd; |
| 409 | struct txx9dmac_slave *ds = dc->chan.private; | ||
| 410 | 409 | ||
| 411 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 410 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
| 412 | txd->cookie, desc); | 411 | txd->cookie, desc); |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index b0bb056458a3..281029daf98c 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
| @@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = { | |||
| 1623 | .cmd_per_lun = 1, | 1623 | .cmd_per_lun = 1, |
| 1624 | .can_queue = 1, | 1624 | .can_queue = 1, |
| 1625 | .sdev_attrs = sbp2_scsi_sysfs_attrs, | 1625 | .sdev_attrs = sbp2_scsi_sysfs_attrs, |
| 1626 | .no_write_same = 1, | ||
| 1627 | }; | 1626 | }; |
| 1628 | 1627 | ||
| 1629 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | 1628 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 299fad6b5867..5373dc5b6011 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
| @@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | |||
| 14 | 14 | ||
| 15 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ | 15 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ |
| 16 | obj-$(CONFIG_EFI) += efi/ | 16 | obj-$(CONFIG_EFI) += efi/ |
| 17 | obj-$(CONFIG_UEFI_CPER) += efi/ | ||
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 3150aa4874e8..6aecbc86ec94 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig | |||
| @@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE | |||
| 36 | backend for pstore by default. This setting can be overridden | 36 | backend for pstore by default. This setting can be overridden |
| 37 | using the efivars module's pstore_disable parameter. | 37 | using the efivars module's pstore_disable parameter. |
| 38 | 38 | ||
| 39 | config UEFI_CPER | ||
| 40 | def_bool n | ||
| 41 | |||
| 42 | endmenu | 39 | endmenu |
| 40 | |||
| 41 | config UEFI_CPER | ||
| 42 | bool | ||
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 9ba156d3c775..6c2a41ec21ba 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for linux kernel | 2 | # Makefile for linux kernel |
| 3 | # | 3 | # |
| 4 | obj-y += efi.o vars.o | 4 | obj-$(CONFIG_EFI) += efi.o vars.o |
| 5 | obj-$(CONFIG_EFI_VARS) += efivars.o | 5 | obj-$(CONFIG_EFI_VARS) += efivars.o |
| 6 | obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o | 6 | obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o |
| 7 | obj-$(CONFIG_UEFI_CPER) += cper.o | 7 | obj-$(CONFIG_UEFI_CPER) += cper.o |
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 743fd426f21b..4b9dc836dcf9 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c | |||
| @@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, | |||
| 356 | static struct pstore_info efi_pstore_info = { | 356 | static struct pstore_info efi_pstore_info = { |
| 357 | .owner = THIS_MODULE, | 357 | .owner = THIS_MODULE, |
| 358 | .name = "efi", | 358 | .name = "efi", |
| 359 | .flags = PSTORE_FLAGS_FRAGILE, | ||
| 359 | .open = efi_pstore_open, | 360 | .open = efi_pstore_open, |
| 360 | .close = efi_pstore_close, | 361 | .close = efi_pstore_close, |
| 361 | .read = efi_pstore_read, | 362 | .read = efi_pstore_read, |
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index 7b37300973db..2baf0ddf7e02 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c | |||
| @@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) | |||
| 252 | 252 | ||
| 253 | spin_lock_irqsave(&tlmm_lock, irq_flags); | 253 | spin_lock_irqsave(&tlmm_lock, irq_flags); |
| 254 | writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); | 254 | writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); |
| 255 | clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | 255 | clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); |
| 256 | __clear_bit(gpio, msm_gpio.enabled_irqs); | 256 | __clear_bit(gpio, msm_gpio.enabled_irqs); |
| 257 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 257 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); |
| 258 | } | 258 | } |
| @@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) | |||
| 264 | 264 | ||
| 265 | spin_lock_irqsave(&tlmm_lock, irq_flags); | 265 | spin_lock_irqsave(&tlmm_lock, irq_flags); |
| 266 | __set_bit(gpio, msm_gpio.enabled_irqs); | 266 | __set_bit(gpio, msm_gpio.enabled_irqs); |
| 267 | set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | 267 | set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); |
| 268 | writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); | 268 | writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); |
| 269 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 269 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); |
| 270 | } | 270 | } |
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index fe088a30567a..8b7e719a68c3 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c | |||
| @@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) | |||
| 169 | u32 pending; | 169 | u32 pending; |
| 170 | unsigned int offset, irqs_handled = 0; | 170 | unsigned int offset, irqs_handled = 0; |
| 171 | 171 | ||
| 172 | while ((pending = gpio_rcar_read(p, INTDT))) { | 172 | while ((pending = gpio_rcar_read(p, INTDT) & |
| 173 | gpio_rcar_read(p, INTMSK))) { | ||
| 173 | offset = __ffs(pending); | 174 | offset = __ffs(pending); |
| 174 | gpio_rcar_write(p, INTCLR, BIT(offset)); | 175 | gpio_rcar_write(p, INTCLR, BIT(offset)); |
| 175 | generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); | 176 | generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); |
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index b97d6a6577b9..f9996899c1f2 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c | |||
| @@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset) | |||
| 300 | if (offset < TWL4030_GPIO_MAX) | 300 | if (offset < TWL4030_GPIO_MAX) |
| 301 | ret = twl4030_set_gpio_direction(offset, 1); | 301 | ret = twl4030_set_gpio_direction(offset, 1); |
| 302 | else | 302 | else |
| 303 | ret = -EINVAL; | 303 | ret = -EINVAL; /* LED outputs can't be set as input */ |
| 304 | 304 | ||
| 305 | if (!ret) | 305 | if (!ret) |
| 306 | priv->direction &= ~BIT(offset); | 306 | priv->direction &= ~BIT(offset); |
| @@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 354 | static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) | 354 | static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) |
| 355 | { | 355 | { |
| 356 | struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); | 356 | struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); |
| 357 | int ret = -EINVAL; | 357 | int ret = 0; |
| 358 | 358 | ||
| 359 | mutex_lock(&priv->mutex); | 359 | mutex_lock(&priv->mutex); |
| 360 | if (offset < TWL4030_GPIO_MAX) | 360 | if (offset < TWL4030_GPIO_MAX) { |
| 361 | ret = twl4030_set_gpio_direction(offset, 0); | 361 | ret = twl4030_set_gpio_direction(offset, 0); |
| 362 | if (ret) { | ||
| 363 | mutex_unlock(&priv->mutex); | ||
| 364 | return ret; | ||
| 365 | } | ||
| 366 | } | ||
| 367 | |||
| 368 | /* | ||
| 369 | * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output | ||
| 370 | */ | ||
| 362 | 371 | ||
| 363 | priv->direction |= BIT(offset); | 372 | priv->direction |= BIT(offset); |
| 364 | mutex_unlock(&priv->mutex); | 373 | mutex_unlock(&priv->mutex); |
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index eef09ec9a5ff..a72cae03b99b 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h | |||
| @@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *, | |||
| 103 | extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; | 103 | extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; |
| 104 | 104 | ||
| 105 | int armada_fbdev_init(struct drm_device *); | 105 | int armada_fbdev_init(struct drm_device *); |
| 106 | void armada_fbdev_lastclose(struct drm_device *); | ||
| 106 | void armada_fbdev_fini(struct drm_device *); | 107 | void armada_fbdev_fini(struct drm_device *); |
| 107 | 108 | ||
| 108 | int armada_overlay_plane_create(struct drm_device *, unsigned long); | 109 | int armada_overlay_plane_create(struct drm_device *, unsigned long); |
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 4f2b28354915..62d0ff3efddf 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
| @@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = { | |||
| 321 | DRM_UNLOCKED), | 321 | DRM_UNLOCKED), |
| 322 | }; | 322 | }; |
| 323 | 323 | ||
| 324 | static void armada_drm_lastclose(struct drm_device *dev) | ||
| 325 | { | ||
| 326 | armada_fbdev_lastclose(dev); | ||
| 327 | } | ||
| 328 | |||
| 324 | static const struct file_operations armada_drm_fops = { | 329 | static const struct file_operations armada_drm_fops = { |
| 325 | .owner = THIS_MODULE, | 330 | .owner = THIS_MODULE, |
| 326 | .llseek = no_llseek, | 331 | .llseek = no_llseek, |
| @@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = { | |||
| 337 | .open = NULL, | 342 | .open = NULL, |
| 338 | .preclose = NULL, | 343 | .preclose = NULL, |
| 339 | .postclose = NULL, | 344 | .postclose = NULL, |
| 340 | .lastclose = NULL, | 345 | .lastclose = armada_drm_lastclose, |
| 341 | .unload = armada_drm_unload, | 346 | .unload = armada_drm_unload, |
| 342 | .get_vblank_counter = drm_vblank_count, | 347 | .get_vblank_counter = drm_vblank_count, |
| 343 | .enable_vblank = armada_drm_enable_vblank, | 348 | .enable_vblank = armada_drm_enable_vblank, |
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index dd5ea77dac96..948cb14c561e 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c | |||
| @@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh, | |||
| 105 | drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); | 105 | drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); |
| 106 | drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); | 106 | drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); |
| 107 | 107 | ||
| 108 | DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", | 108 | DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n", |
| 109 | dfb->fb.width, dfb->fb.height, | 109 | dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel, |
| 110 | dfb->fb.bits_per_pixel, obj->phys_addr); | 110 | (unsigned long long)obj->phys_addr); |
| 111 | 111 | ||
| 112 | return 0; | 112 | return 0; |
| 113 | 113 | ||
| @@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev) | |||
| 177 | return ret; | 177 | return ret; |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | void armada_fbdev_lastclose(struct drm_device *dev) | ||
| 181 | { | ||
| 182 | struct armada_private *priv = dev->dev_private; | ||
| 183 | |||
| 184 | drm_modeset_lock_all(dev); | ||
| 185 | if (priv->fbdev) | ||
| 186 | drm_fb_helper_restore_fbdev_mode(priv->fbdev); | ||
| 187 | drm_modeset_unlock_all(dev); | ||
| 188 | } | ||
| 189 | |||
| 180 | void armada_fbdev_fini(struct drm_device *dev) | 190 | void armada_fbdev_fini(struct drm_device *dev) |
| 181 | { | 191 | { |
| 182 | struct armada_private *priv = dev->dev_private; | 192 | struct armada_private *priv = dev->dev_private; |
| @@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev) | |||
| 192 | framebuffer_release(info); | 202 | framebuffer_release(info); |
| 193 | } | 203 | } |
| 194 | 204 | ||
| 205 | drm_fb_helper_fini(fbh); | ||
| 206 | |||
| 195 | if (fbh->fb) | 207 | if (fbh->fb) |
| 196 | fbh->fb->funcs->destroy(fbh->fb); | 208 | fbh->fb->funcs->destroy(fbh->fb); |
| 197 | 209 | ||
| 198 | drm_fb_helper_fini(fbh); | ||
| 199 | |||
| 200 | priv->fbdev = NULL; | 210 | priv->fbdev = NULL; |
| 201 | } | 211 | } |
| 202 | } | 212 | } |
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 9f2356bae7fd..887816f43476 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c | |||
| @@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) | |||
| 172 | obj->dev_addr = obj->linear->start; | 172 | obj->dev_addr = obj->linear->start; |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", | 175 | DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, |
| 176 | obj, obj->phys_addr, obj->dev_addr); | 176 | (unsigned long long)obj->phys_addr, |
| 177 | (unsigned long long)obj->dev_addr); | ||
| 177 | 178 | ||
| 178 | return 0; | 179 | return 0; |
| 179 | } | 180 | } |
| @@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) | |||
| 557 | * refcount on the gem object itself. | 558 | * refcount on the gem object itself. |
| 558 | */ | 559 | */ |
| 559 | drm_gem_object_reference(obj); | 560 | drm_gem_object_reference(obj); |
| 560 | dma_buf_put(buf); | ||
| 561 | return obj; | 561 | return obj; |
| 562 | } | 562 | } |
| 563 | } | 563 | } |
| @@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) | |||
| 573 | } | 573 | } |
| 574 | 574 | ||
| 575 | dobj->obj.import_attach = attach; | 575 | dobj->obj.import_attach = attach; |
| 576 | get_dma_buf(buf); | ||
| 576 | 577 | ||
| 577 | /* | 578 | /* |
| 578 | * Don't call dma_buf_map_attachment() here - it maps the | 579 | * Don't call dma_buf_map_attachment() here - it maps the |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0a1e4a5f4234..8835dcddfac3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -68,6 +68,8 @@ | |||
| 68 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) | 68 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) |
| 69 | /* Force reduced-blanking timings for detailed modes */ | 69 | /* Force reduced-blanking timings for detailed modes */ |
| 70 | #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) | 70 | #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) |
| 71 | /* Force 8bpc */ | ||
| 72 | #define EDID_QUIRK_FORCE_8BPC (1 << 8) | ||
| 71 | 73 | ||
| 72 | struct detailed_mode_closure { | 74 | struct detailed_mode_closure { |
| 73 | struct drm_connector *connector; | 75 | struct drm_connector *connector; |
| @@ -128,6 +130,9 @@ static struct edid_quirk { | |||
| 128 | 130 | ||
| 129 | /* Medion MD 30217 PG */ | 131 | /* Medion MD 30217 PG */ |
| 130 | { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, | 132 | { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, |
| 133 | |||
| 134 | /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ | ||
| 135 | { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, | ||
| 131 | }; | 136 | }; |
| 132 | 137 | ||
| 133 | /* | 138 | /* |
| @@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) | |||
| 3435 | 3440 | ||
| 3436 | drm_add_display_info(edid, &connector->display_info); | 3441 | drm_add_display_info(edid, &connector->display_info); |
| 3437 | 3442 | ||
| 3443 | if (quirks & EDID_QUIRK_FORCE_8BPC) | ||
| 3444 | connector->display_info.bpc = 8; | ||
| 3445 | |||
| 3438 | return num_modes; | 3446 | return num_modes; |
| 3439 | } | 3447 | } |
| 3440 | EXPORT_SYMBOL(drm_add_edid_modes); | 3448 | EXPORT_SYMBOL(drm_add_edid_modes); |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index f53d5246979c..66dd3a001cf1 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
| @@ -566,11 +566,11 @@ err_unload: | |||
| 566 | if (dev->driver->unload) | 566 | if (dev->driver->unload) |
| 567 | dev->driver->unload(dev); | 567 | dev->driver->unload(dev); |
| 568 | err_primary_node: | 568 | err_primary_node: |
| 569 | drm_put_minor(dev->primary); | 569 | drm_unplug_minor(dev->primary); |
| 570 | err_render_node: | 570 | err_render_node: |
| 571 | drm_put_minor(dev->render); | 571 | drm_unplug_minor(dev->render); |
| 572 | err_control_node: | 572 | err_control_node: |
| 573 | drm_put_minor(dev->control); | 573 | drm_unplug_minor(dev->control); |
| 574 | err_agp: | 574 | err_agp: |
| 575 | if (dev->driver->bus->agp_destroy) | 575 | if (dev->driver->bus->agp_destroy) |
| 576 | dev->driver->bus->agp_destroy(dev); | 576 | dev->driver->bus->agp_destroy(dev); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0cab2d045135..5c648425c1e0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) | |||
| 83 | drm_i915_private_t *dev_priv = dev->dev_private; | 83 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 84 | struct drm_i915_master_private *master_priv; | 84 | struct drm_i915_master_private *master_priv; |
| 85 | 85 | ||
| 86 | /* | ||
| 87 | * The dri breadcrumb update races against the drm master disappearing. | ||
| 88 | * Instead of trying to fix this (this is by far not the only ums issue) | ||
| 89 | * just don't do the update in kms mode. | ||
| 90 | */ | ||
| 91 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 92 | return; | ||
| 93 | |||
| 86 | if (dev->primary->master) { | 94 | if (dev->primary->master) { |
| 87 | master_priv = dev->primary->master->driver_priv; | 95 | master_priv = dev->primary->master->driver_priv; |
| 88 | if (master_priv->sarea_priv) | 96 | if (master_priv->sarea_priv) |
| @@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1490 | spin_lock_init(&dev_priv->uncore.lock); | 1498 | spin_lock_init(&dev_priv->uncore.lock); |
| 1491 | spin_lock_init(&dev_priv->mm.object_stat_lock); | 1499 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
| 1492 | mutex_init(&dev_priv->dpio_lock); | 1500 | mutex_init(&dev_priv->dpio_lock); |
| 1493 | mutex_init(&dev_priv->rps.hw_lock); | ||
| 1494 | mutex_init(&dev_priv->modeset_restore_lock); | 1501 | mutex_init(&dev_priv->modeset_restore_lock); |
| 1495 | 1502 | ||
| 1496 | mutex_init(&dev_priv->pc8.lock); | 1503 | intel_pm_setup(dev); |
| 1497 | dev_priv->pc8.requirements_met = false; | ||
| 1498 | dev_priv->pc8.gpu_idle = false; | ||
| 1499 | dev_priv->pc8.irqs_disabled = false; | ||
| 1500 | dev_priv->pc8.enabled = false; | ||
| 1501 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ | ||
| 1502 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); | ||
| 1503 | 1504 | ||
| 1504 | intel_display_crc_init(dev); | 1505 | intel_display_crc_init(dev); |
| 1505 | 1506 | ||
| @@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1603 | } | 1604 | } |
| 1604 | 1605 | ||
| 1605 | intel_irq_init(dev); | 1606 | intel_irq_init(dev); |
| 1606 | intel_pm_init(dev); | ||
| 1607 | intel_uncore_sanitize(dev); | 1607 | intel_uncore_sanitize(dev); |
| 1608 | 1608 | ||
| 1609 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1609 | /* Try to make sure MCHBAR is enabled before poking at it */ |
| @@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
| 1848 | 1848 | ||
| 1849 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | 1849 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
| 1850 | { | 1850 | { |
| 1851 | mutex_lock(&dev->struct_mutex); | ||
| 1851 | i915_gem_context_close(dev, file_priv); | 1852 | i915_gem_context_close(dev, file_priv); |
| 1852 | i915_gem_release(dev, file_priv); | 1853 | i915_gem_release(dev, file_priv); |
| 1854 | mutex_unlock(&dev->struct_mutex); | ||
| 1853 | } | 1855 | } |
| 1854 | 1856 | ||
| 1855 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | 1857 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2e367a1c6a64..5b7b7e06cb3a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | |||
| 651 | intel_modeset_init_hw(dev); | 651 | intel_modeset_init_hw(dev); |
| 652 | 652 | ||
| 653 | drm_modeset_lock_all(dev); | 653 | drm_modeset_lock_all(dev); |
| 654 | drm_mode_config_reset(dev); | ||
| 654 | intel_modeset_setup_hw_state(dev, true); | 655 | intel_modeset_setup_hw_state(dev, true); |
| 655 | drm_modeset_unlock_all(dev); | 656 | drm_modeset_unlock_all(dev); |
| 656 | 657 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ccdbecca070d..90fcccba17b0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -1755,8 +1755,13 @@ struct drm_i915_file_private { | |||
| 1755 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1755 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
| 1756 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ | 1756 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
| 1757 | ((dev)->pdev->device & 0xFF00) == 0x0C00) | 1757 | ((dev)->pdev->device & 0xFF00) == 0x0C00) |
| 1758 | #define IS_ULT(dev) (IS_HASWELL(dev) && \ | 1758 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
| 1759 | (((dev)->pdev->device & 0xf) == 0x2 || \ | ||
| 1760 | ((dev)->pdev->device & 0xf) == 0x6 || \ | ||
| 1761 | ((dev)->pdev->device & 0xf) == 0xe)) | ||
| 1762 | #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ | ||
| 1759 | ((dev)->pdev->device & 0xFF00) == 0x0A00) | 1763 | ((dev)->pdev->device & 0xFF00) == 0x0A00) |
| 1764 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) | ||
| 1760 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ | 1765 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
| 1761 | ((dev)->pdev->device & 0x00F0) == 0x0020) | 1766 | ((dev)->pdev->device & 0x00F0) == 0x0020) |
| 1762 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | 1767 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
| @@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev); | |||
| 1901 | void i915_handle_error(struct drm_device *dev, bool wedged); | 1906 | void i915_handle_error(struct drm_device *dev, bool wedged); |
| 1902 | 1907 | ||
| 1903 | extern void intel_irq_init(struct drm_device *dev); | 1908 | extern void intel_irq_init(struct drm_device *dev); |
| 1904 | extern void intel_pm_init(struct drm_device *dev); | ||
| 1905 | extern void intel_hpd_init(struct drm_device *dev); | 1909 | extern void intel_hpd_init(struct drm_device *dev); |
| 1906 | extern void intel_pm_init(struct drm_device *dev); | ||
| 1907 | 1910 | ||
| 1908 | extern void intel_uncore_sanitize(struct drm_device *dev); | 1911 | extern void intel_uncore_sanitize(struct drm_device *dev); |
| 1909 | extern void intel_uncore_early_sanitize(struct drm_device *dev); | 1912 | extern void intel_uncore_early_sanitize(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 621c7c67a643..76d3d1ab73c6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) | |||
| 2343 | kfree(request); | 2343 | kfree(request); |
| 2344 | } | 2344 | } |
| 2345 | 2345 | ||
| 2346 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | 2346 | static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, |
| 2347 | struct intel_ring_buffer *ring) | 2347 | struct intel_ring_buffer *ring) |
| 2348 | { | 2348 | { |
| 2349 | u32 completed_seqno; | 2349 | u32 completed_seqno = ring->get_seqno(ring, false); |
| 2350 | u32 acthd; | 2350 | u32 acthd = intel_ring_get_active_head(ring); |
| 2351 | struct drm_i915_gem_request *request; | ||
| 2352 | |||
| 2353 | list_for_each_entry(request, &ring->request_list, list) { | ||
| 2354 | if (i915_seqno_passed(completed_seqno, request->seqno)) | ||
| 2355 | continue; | ||
| 2351 | 2356 | ||
| 2352 | acthd = intel_ring_get_active_head(ring); | 2357 | i915_set_reset_status(ring, request, acthd); |
| 2353 | completed_seqno = ring->get_seqno(ring, false); | 2358 | } |
| 2359 | } | ||
| 2354 | 2360 | ||
| 2361 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | ||
| 2362 | struct intel_ring_buffer *ring) | ||
| 2363 | { | ||
| 2355 | while (!list_empty(&ring->request_list)) { | 2364 | while (!list_empty(&ring->request_list)) { |
| 2356 | struct drm_i915_gem_request *request; | 2365 | struct drm_i915_gem_request *request; |
| 2357 | 2366 | ||
| @@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
| 2359 | struct drm_i915_gem_request, | 2368 | struct drm_i915_gem_request, |
| 2360 | list); | 2369 | list); |
| 2361 | 2370 | ||
| 2362 | if (request->seqno > completed_seqno) | ||
| 2363 | i915_set_reset_status(ring, request, acthd); | ||
| 2364 | |||
| 2365 | i915_gem_free_request(request); | 2371 | i915_gem_free_request(request); |
| 2366 | } | 2372 | } |
| 2367 | 2373 | ||
| @@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev) | |||
| 2403 | struct intel_ring_buffer *ring; | 2409 | struct intel_ring_buffer *ring; |
| 2404 | int i; | 2410 | int i; |
| 2405 | 2411 | ||
| 2412 | /* | ||
| 2413 | * Before we free the objects from the requests, we need to inspect | ||
| 2414 | * them for finding the guilty party. As the requests only borrow | ||
| 2415 | * their reference to the objects, the inspection must be done first. | ||
| 2416 | */ | ||
| 2417 | for_each_ring(ring, dev_priv, i) | ||
| 2418 | i915_gem_reset_ring_status(dev_priv, ring); | ||
| 2419 | |||
| 2406 | for_each_ring(ring, dev_priv, i) | 2420 | for_each_ring(ring, dev_priv, i) |
| 2407 | i915_gem_reset_ring_lists(dev_priv, ring); | 2421 | i915_gem_reset_ring_cleanup(dev_priv, ring); |
| 2408 | 2422 | ||
| 2409 | i915_gem_cleanup_ringbuffer(dev); | 2423 | i915_gem_cleanup_ringbuffer(dev); |
| 2410 | 2424 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 72a3df32292f..b0f42b9ca037 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) | |||
| 347 | { | 347 | { |
| 348 | struct drm_i915_file_private *file_priv = file->driver_priv; | 348 | struct drm_i915_file_private *file_priv = file->driver_priv; |
| 349 | 349 | ||
| 350 | mutex_lock(&dev->struct_mutex); | ||
| 351 | idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); | 350 | idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); |
| 352 | idr_destroy(&file_priv->context_idr); | 351 | idr_destroy(&file_priv->context_idr); |
| 353 | mutex_unlock(&dev->struct_mutex); | ||
| 354 | } | 352 | } |
| 355 | 353 | ||
| 356 | static struct i915_hw_context * | 354 | static struct i915_hw_context * |
| @@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to) | |||
| 423 | if (ret) | 421 | if (ret) |
| 424 | return ret; | 422 | return ret; |
| 425 | 423 | ||
| 426 | /* Clear this page out of any CPU caches for coherent swap-in/out. Note | 424 | /* |
| 425 | * Pin can switch back to the default context if we end up calling into | ||
| 426 | * evict_everything - as a last ditch gtt defrag effort that also | ||
| 427 | * switches to the default context. Hence we need to reload from here. | ||
| 428 | */ | ||
| 429 | from = ring->last_context; | ||
| 430 | |||
| 431 | /* | ||
| 432 | * Clear this page out of any CPU caches for coherent swap-in/out. Note | ||
| 427 | * that thanks to write = false in this call and us not setting any gpu | 433 | * that thanks to write = false in this call and us not setting any gpu |
| 428 | * write domains when putting a context object onto the active list | 434 | * write domains when putting a context object onto the active list |
| 429 | * (when switching away from it), this won't block. | 435 | * (when switching away from it), this won't block. |
| 430 | * XXX: We need a real interface to do this instead of trickery. */ | 436 | * |
| 437 | * XXX: We need a real interface to do this instead of trickery. | ||
| 438 | */ | ||
| 431 | ret = i915_gem_object_set_to_gtt_domain(to->obj, false); | 439 | ret = i915_gem_object_set_to_gtt_domain(to->obj, false); |
| 432 | if (ret) { | 440 | if (ret) { |
| 433 | i915_gem_object_unpin(to->obj); | 441 | i915_gem_object_unpin(to->obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index b7376533633d..8f3adc7d0dc8 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
| @@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
| 88 | } else | 88 | } else |
| 89 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | 89 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
| 90 | 90 | ||
| 91 | search_again: | ||
| 91 | /* First see if there is a large enough contiguous idle region... */ | 92 | /* First see if there is a large enough contiguous idle region... */ |
| 92 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { | 93 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { |
| 93 | if (mark_free(vma, &unwind_list)) | 94 | if (mark_free(vma, &unwind_list)) |
| @@ -115,10 +116,17 @@ none: | |||
| 115 | list_del_init(&vma->exec_list); | 116 | list_del_init(&vma->exec_list); |
| 116 | } | 117 | } |
| 117 | 118 | ||
| 118 | /* We expect the caller to unpin, evict all and try again, or give up. | 119 | /* Can we unpin some objects such as idle hw contents, |
| 119 | * So calling i915_gem_evict_vm() is unnecessary. | 120 | * or pending flips? |
| 120 | */ | 121 | */ |
| 121 | return -ENOSPC; | 122 | ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev); |
| 123 | if (ret) | ||
| 124 | return ret; | ||
| 125 | |||
| 126 | /* Only idle the GPU and repeat the search once */ | ||
| 127 | i915_gem_retire_requests(dev); | ||
| 128 | nonblocking = true; | ||
| 129 | goto search_again; | ||
| 122 | 130 | ||
| 123 | found: | 131 | found: |
| 124 | /* drm_mm doesn't allow any other other operations while | 132 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b7e787fb4649..a3ba9a8cd687 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 93 | { | 93 | { |
| 94 | struct drm_i915_gem_object *obj; | 94 | struct drm_i915_gem_object *obj; |
| 95 | struct list_head objects; | 95 | struct list_head objects; |
| 96 | int i, ret = 0; | 96 | int i, ret; |
| 97 | 97 | ||
| 98 | INIT_LIST_HEAD(&objects); | 98 | INIT_LIST_HEAD(&objects); |
| 99 | spin_lock(&file->table_lock); | 99 | spin_lock(&file->table_lock); |
| @@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 106 | DRM_DEBUG("Invalid object handle %d at index %d\n", | 106 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
| 107 | exec[i].handle, i); | 107 | exec[i].handle, i); |
| 108 | ret = -ENOENT; | 108 | ret = -ENOENT; |
| 109 | goto out; | 109 | goto err; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | if (!list_empty(&obj->obj_exec_link)) { | 112 | if (!list_empty(&obj->obj_exec_link)) { |
| @@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 114 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", | 114 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
| 115 | obj, exec[i].handle, i); | 115 | obj, exec[i].handle, i); |
| 116 | ret = -EINVAL; | 116 | ret = -EINVAL; |
| 117 | goto out; | 117 | goto err; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | drm_gem_object_reference(&obj->base); | 120 | drm_gem_object_reference(&obj->base); |
| @@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 123 | spin_unlock(&file->table_lock); | 123 | spin_unlock(&file->table_lock); |
| 124 | 124 | ||
| 125 | i = 0; | 125 | i = 0; |
| 126 | list_for_each_entry(obj, &objects, obj_exec_link) { | 126 | while (!list_empty(&objects)) { |
| 127 | struct i915_vma *vma; | 127 | struct i915_vma *vma; |
| 128 | 128 | ||
| 129 | obj = list_first_entry(&objects, | ||
| 130 | struct drm_i915_gem_object, | ||
| 131 | obj_exec_link); | ||
| 132 | |||
| 129 | /* | 133 | /* |
| 130 | * NOTE: We can leak any vmas created here when something fails | 134 | * NOTE: We can leak any vmas created here when something fails |
| 131 | * later on. But that's no issue since vma_unbind can deal with | 135 | * later on. But that's no issue since vma_unbind can deal with |
| @@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 138 | if (IS_ERR(vma)) { | 142 | if (IS_ERR(vma)) { |
| 139 | DRM_DEBUG("Failed to lookup VMA\n"); | 143 | DRM_DEBUG("Failed to lookup VMA\n"); |
| 140 | ret = PTR_ERR(vma); | 144 | ret = PTR_ERR(vma); |
| 141 | goto out; | 145 | goto err; |
| 142 | } | 146 | } |
| 143 | 147 | ||
| 148 | /* Transfer ownership from the objects list to the vmas list. */ | ||
| 144 | list_add_tail(&vma->exec_list, &eb->vmas); | 149 | list_add_tail(&vma->exec_list, &eb->vmas); |
| 150 | list_del_init(&obj->obj_exec_link); | ||
| 145 | 151 | ||
| 146 | vma->exec_entry = &exec[i]; | 152 | vma->exec_entry = &exec[i]; |
| 147 | if (eb->and < 0) { | 153 | if (eb->and < 0) { |
| @@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 155 | ++i; | 161 | ++i; |
| 156 | } | 162 | } |
| 157 | 163 | ||
| 164 | return 0; | ||
| 165 | |||
| 158 | 166 | ||
| 159 | out: | 167 | err: |
| 160 | while (!list_empty(&objects)) { | 168 | while (!list_empty(&objects)) { |
| 161 | obj = list_first_entry(&objects, | 169 | obj = list_first_entry(&objects, |
| 162 | struct drm_i915_gem_object, | 170 | struct drm_i915_gem_object, |
| 163 | obj_exec_link); | 171 | obj_exec_link); |
| 164 | list_del_init(&obj->obj_exec_link); | 172 | list_del_init(&obj->obj_exec_link); |
| 165 | if (ret) | 173 | drm_gem_object_unreference(&obj->base); |
| 166 | drm_gem_object_unreference(&obj->base); | ||
| 167 | } | 174 | } |
| 175 | /* | ||
| 176 | * Objects already transfered to the vmas list will be unreferenced by | ||
| 177 | * eb_destroy. | ||
| 178 | */ | ||
| 179 | |||
| 168 | return ret; | 180 | return ret; |
| 169 | } | 181 | } |
| 170 | 182 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 38cb8d44a013..d3c3b5b15824 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |||
| 337 | kfree(ppgtt->gen8_pt_dma_addr[i]); | 337 | kfree(ppgtt->gen8_pt_dma_addr[i]); |
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); | 340 | __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); |
| 341 | __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); | 341 | __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | /** | 344 | /** |
| @@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | |||
| 906 | WARN_ON(readq(>t_entries[i-1]) | 906 | WARN_ON(readq(>t_entries[i-1]) |
| 907 | != gen8_pte_encode(addr, level, true)); | 907 | != gen8_pte_encode(addr, level, true)); |
| 908 | 908 | ||
| 909 | #if 0 /* TODO: Still needed on GEN8? */ | ||
| 910 | /* This next bit makes the above posting read even more important. We | 909 | /* This next bit makes the above posting read even more important. We |
| 911 | * want to flush the TLBs only after we're certain all the PTE updates | 910 | * want to flush the TLBs only after we're certain all the PTE updates |
| 912 | * have finished. | 911 | * have finished. |
| 913 | */ | 912 | */ |
| 914 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 913 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
| 915 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 914 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
| 916 | #endif | ||
| 917 | } | 915 | } |
| 918 | 916 | ||
| 919 | /* | 917 | /* |
| @@ -1241,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) | |||
| 1241 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; | 1239 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
| 1242 | if (bdw_gmch_ctl) | 1240 | if (bdw_gmch_ctl) |
| 1243 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; | 1241 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
| 1242 | if (bdw_gmch_ctl > 4) { | ||
| 1243 | WARN_ON(!i915_preliminary_hw_support); | ||
| 1244 | return 4<<20; | ||
| 1245 | } | ||
| 1246 | |||
| 1244 | return bdw_gmch_ctl << 20; | 1247 | return bdw_gmch_ctl << 20; |
| 1245 | } | 1248 | } |
| 1246 | 1249 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 080f6fd4e839..769b864465a9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | |||
| 6303 | uint32_t val; | 6303 | uint32_t val; |
| 6304 | 6304 | ||
| 6305 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) | 6305 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) |
| 6306 | WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", | 6306 | WARN(crtc->active, "CRTC for pipe %c enabled\n", |
| 6307 | pipe_name(crtc->pipe)); | 6307 | pipe_name(crtc->pipe)); |
| 6308 | 6308 | ||
| 6309 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); | 6309 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); |
| @@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 9135 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) | 9135 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) |
| 9136 | PIPE_CONF_CHECK_I(pipe_bpp); | 9136 | PIPE_CONF_CHECK_I(pipe_bpp); |
| 9137 | 9137 | ||
| 9138 | if (!IS_HASWELL(dev)) { | 9138 | if (!HAS_DDI(dev)) { |
| 9139 | PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); | 9139 | PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); |
| 9140 | PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); | 9140 | PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); |
| 9141 | } | 9141 | } |
| @@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = { | |||
| 10541 | /* Sony Vaio Y cannot use SSC on LVDS */ | 10541 | /* Sony Vaio Y cannot use SSC on LVDS */ |
| 10542 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | 10542 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
| 10543 | 10543 | ||
| 10544 | /* | 10544 | /* Acer Aspire 5734Z must invert backlight brightness */ |
| 10545 | * All GM45 Acer (and its brands eMachines and Packard Bell) laptops | 10545 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
| 10546 | * seem to use inverted backlight PWM. | 10546 | |
| 10547 | */ | 10547 | /* Acer/eMachines G725 */ |
| 10548 | { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, | 10548 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, |
| 10549 | |||
| 10550 | /* Acer/eMachines e725 */ | ||
| 10551 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, | ||
| 10552 | |||
| 10553 | /* Acer/Packard Bell NCL20 */ | ||
| 10554 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, | ||
| 10555 | |||
| 10556 | /* Acer Aspire 4736Z */ | ||
| 10557 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | ||
| 10549 | 10558 | ||
| 10550 | /* Dell XPS13 HD Sandy Bridge */ | 10559 | /* Dell XPS13 HD Sandy Bridge */ |
| 10551 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | 10560 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, |
| @@ -11036,8 +11045,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 11036 | } | 11045 | } |
| 11037 | 11046 | ||
| 11038 | intel_modeset_check_state(dev); | 11047 | intel_modeset_check_state(dev); |
| 11039 | |||
| 11040 | drm_mode_config_reset(dev); | ||
| 11041 | } | 11048 | } |
| 11042 | 11049 | ||
| 11043 | void intel_modeset_gem_init(struct drm_device *dev) | 11050 | void intel_modeset_gem_init(struct drm_device *dev) |
| @@ -11046,7 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
| 11046 | 11053 | ||
| 11047 | intel_setup_overlay(dev); | 11054 | intel_setup_overlay(dev); |
| 11048 | 11055 | ||
| 11056 | drm_modeset_lock_all(dev); | ||
| 11057 | drm_mode_config_reset(dev); | ||
| 11049 | intel_modeset_setup_hw_state(dev, false); | 11058 | intel_modeset_setup_hw_state(dev, false); |
| 11059 | drm_modeset_unlock_all(dev); | ||
| 11050 | } | 11060 | } |
| 11051 | 11061 | ||
| 11052 | void intel_modeset_cleanup(struct drm_device *dev) | 11062 | void intel_modeset_cleanup(struct drm_device *dev) |
| @@ -11125,14 +11135,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector, | |||
| 11125 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | 11135 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) |
| 11126 | { | 11136 | { |
| 11127 | struct drm_i915_private *dev_priv = dev->dev_private; | 11137 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 11138 | unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; | ||
| 11128 | u16 gmch_ctrl; | 11139 | u16 gmch_ctrl; |
| 11129 | 11140 | ||
| 11130 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); | 11141 | pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); |
| 11131 | if (state) | 11142 | if (state) |
| 11132 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; | 11143 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; |
| 11133 | else | 11144 | else |
| 11134 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; | 11145 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; |
| 11135 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 11146 | pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); |
| 11136 | return 0; | 11147 | return 0; |
| 11137 | } | 11148 | } |
| 11138 | 11149 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a18e88b3e425..79f91f26e288 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, | |||
| 821 | uint32_t sprite_width, int pixel_size, | 821 | uint32_t sprite_width, int pixel_size, |
| 822 | bool enabled, bool scaled); | 822 | bool enabled, bool scaled); |
| 823 | void intel_init_pm(struct drm_device *dev); | 823 | void intel_init_pm(struct drm_device *dev); |
| 824 | void intel_pm_setup(struct drm_device *dev); | ||
| 824 | bool intel_fbc_enabled(struct drm_device *dev); | 825 | bool intel_fbc_enabled(struct drm_device *dev); |
| 825 | void intel_update_fbc(struct drm_device *dev); | 826 | void intel_update_fbc(struct drm_device *dev); |
| 826 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 827 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index f161ac02c4f6..e6f782d1c669 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev, | |||
| 451 | 451 | ||
| 452 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 452 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); |
| 453 | 453 | ||
| 454 | if (HAS_PCH_SPLIT(dev)) { | 454 | if (IS_BROADWELL(dev)) { |
| 455 | val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 456 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 455 | val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 457 | val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
| 456 | } else { | 458 | } else { |
| 457 | if (IS_VALLEYVIEW(dev)) | 459 | if (IS_VALLEYVIEW(dev)) |
| @@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev, | |||
| 479 | return val; | 481 | return val; |
| 480 | } | 482 | } |
| 481 | 483 | ||
| 484 | static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level) | ||
| 485 | { | ||
| 486 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 487 | u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 488 | I915_WRITE(BLC_PWM_PCH_CTL2, val | level); | ||
| 489 | } | ||
| 490 | |||
| 482 | static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) | 491 | static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) |
| 483 | { | 492 | { |
| 484 | struct drm_i915_private *dev_priv = dev->dev_private; | 493 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, | |||
| 496 | DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); | 505 | DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); |
| 497 | level = intel_panel_compute_brightness(dev, pipe, level); | 506 | level = intel_panel_compute_brightness(dev, pipe, level); |
| 498 | 507 | ||
| 499 | if (HAS_PCH_SPLIT(dev)) | 508 | if (IS_BROADWELL(dev)) |
| 509 | return intel_bdw_panel_set_backlight(dev, level); | ||
| 510 | else if (HAS_PCH_SPLIT(dev)) | ||
| 500 | return intel_pch_panel_set_backlight(dev, level); | 511 | return intel_pch_panel_set_backlight(dev, level); |
| 501 | 512 | ||
| 502 | if (is_backlight_combination_mode(dev)) { | 513 | if (is_backlight_combination_mode(dev)) { |
| @@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector) | |||
| 666 | POSTING_READ(reg); | 677 | POSTING_READ(reg); |
| 667 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | 678 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); |
| 668 | 679 | ||
| 669 | if (HAS_PCH_SPLIT(dev) && | 680 | if (IS_BROADWELL(dev)) { |
| 681 | /* | ||
| 682 | * Broadwell requires PCH override to drive the PCH | ||
| 683 | * backlight pin. The above will configure the CPU | ||
| 684 | * backlight pin, which we don't plan to use. | ||
| 685 | */ | ||
| 686 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 687 | tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE; | ||
| 688 | I915_WRITE(BLC_PWM_PCH_CTL1, tmp); | ||
| 689 | } else if (HAS_PCH_SPLIT(dev) && | ||
| 670 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | 690 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { |
| 671 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 691 | tmp = I915_READ(BLC_PWM_PCH_CTL1); |
| 672 | tmp |= BLM_PCH_PWM_ENABLE; | 692 | tmp |= BLM_PCH_PWM_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6e0d5e075b15..26c29c173221 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -5685,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5685 | { | 5685 | { |
| 5686 | struct drm_i915_private *dev_priv = dev->dev_private; | 5686 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5687 | bool is_enabled, enable_requested; | 5687 | bool is_enabled, enable_requested; |
| 5688 | unsigned long irqflags; | ||
| 5688 | uint32_t tmp; | 5689 | uint32_t tmp; |
| 5689 | 5690 | ||
| 5691 | WARN_ON(dev_priv->pc8.enabled); | ||
| 5692 | |||
| 5690 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | 5693 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
| 5691 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; | 5694 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; |
| 5692 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; | 5695 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; |
| @@ -5702,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5702 | HSW_PWR_WELL_STATE_ENABLED), 20)) | 5705 | HSW_PWR_WELL_STATE_ENABLED), 20)) |
| 5703 | DRM_ERROR("Timeout enabling power well\n"); | 5706 | DRM_ERROR("Timeout enabling power well\n"); |
| 5704 | } | 5707 | } |
| 5708 | |||
| 5709 | if (IS_BROADWELL(dev)) { | ||
| 5710 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 5711 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
| 5712 | dev_priv->de_irq_mask[PIPE_B]); | ||
| 5713 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
| 5714 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
| 5715 | GEN8_PIPE_VBLANK); | ||
| 5716 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
| 5717 | dev_priv->de_irq_mask[PIPE_C]); | ||
| 5718 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
| 5719 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
| 5720 | GEN8_PIPE_VBLANK); | ||
| 5721 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
| 5722 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 5723 | } | ||
| 5705 | } else { | 5724 | } else { |
| 5706 | if (enable_requested) { | 5725 | if (enable_requested) { |
| 5707 | unsigned long irqflags; | ||
| 5708 | enum pipe p; | 5726 | enum pipe p; |
| 5709 | 5727 | ||
| 5710 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5728 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
| @@ -5731,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5731 | static void __intel_power_well_get(struct drm_device *dev, | 5749 | static void __intel_power_well_get(struct drm_device *dev, |
| 5732 | struct i915_power_well *power_well) | 5750 | struct i915_power_well *power_well) |
| 5733 | { | 5751 | { |
| 5734 | if (!power_well->count++) | 5752 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5753 | |||
| 5754 | if (!power_well->count++) { | ||
| 5755 | hsw_disable_package_c8(dev_priv); | ||
| 5735 | __intel_set_power_well(dev, true); | 5756 | __intel_set_power_well(dev, true); |
| 5757 | } | ||
| 5736 | } | 5758 | } |
| 5737 | 5759 | ||
| 5738 | static void __intel_power_well_put(struct drm_device *dev, | 5760 | static void __intel_power_well_put(struct drm_device *dev, |
| 5739 | struct i915_power_well *power_well) | 5761 | struct i915_power_well *power_well) |
| 5740 | { | 5762 | { |
| 5763 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5764 | |||
| 5741 | WARN_ON(!power_well->count); | 5765 | WARN_ON(!power_well->count); |
| 5742 | if (!--power_well->count && i915_disable_power_well) | 5766 | if (!--power_well->count && i915_disable_power_well) { |
| 5743 | __intel_set_power_well(dev, false); | 5767 | __intel_set_power_well(dev, false); |
| 5768 | hsw_enable_package_c8(dev_priv); | ||
| 5769 | } | ||
| 5744 | } | 5770 | } |
| 5745 | 5771 | ||
| 5746 | void intel_display_power_get(struct drm_device *dev, | 5772 | void intel_display_power_get(struct drm_device *dev, |
| @@ -6130,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val) | |||
| 6130 | return val; | 6156 | return val; |
| 6131 | } | 6157 | } |
| 6132 | 6158 | ||
| 6133 | void intel_pm_init(struct drm_device *dev) | 6159 | void intel_pm_setup(struct drm_device *dev) |
| 6134 | { | 6160 | { |
| 6135 | struct drm_i915_private *dev_priv = dev->dev_private; | 6161 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 6136 | 6162 | ||
| 6163 | mutex_init(&dev_priv->rps.hw_lock); | ||
| 6164 | |||
| 6165 | mutex_init(&dev_priv->pc8.lock); | ||
| 6166 | dev_priv->pc8.requirements_met = false; | ||
| 6167 | dev_priv->pc8.gpu_idle = false; | ||
| 6168 | dev_priv->pc8.irqs_disabled = false; | ||
| 6169 | dev_priv->pc8.enabled = false; | ||
| 6170 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ | ||
| 6171 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); | ||
| 6137 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 6172 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
| 6138 | intel_gen6_powersave_work); | 6173 | intel_gen6_powersave_work); |
| 6139 | } | 6174 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b620337e6d67..c2f09d456300 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
| 965 | } else if (IS_GEN6(ring->dev)) { | 965 | } else if (IS_GEN6(ring->dev)) { |
| 966 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | 966 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
| 967 | } else { | 967 | } else { |
| 968 | /* XXX: gen8 returns to sanity */ | ||
| 968 | mmio = RING_HWS_PGA(ring->mmio_base); | 969 | mmio = RING_HWS_PGA(ring->mmio_base); |
| 969 | } | 970 | } |
| 970 | 971 | ||
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 0b02078a0b84..25cbe073c388 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
| @@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev) | |||
| 784 | int intel_gpu_reset(struct drm_device *dev) | 784 | int intel_gpu_reset(struct drm_device *dev) |
| 785 | { | 785 | { |
| 786 | switch (INTEL_INFO(dev)->gen) { | 786 | switch (INTEL_INFO(dev)->gen) { |
| 787 | case 8: | ||
| 787 | case 7: | 788 | case 7: |
| 788 | case 6: return gen6_do_reset(dev); | 789 | case 6: return gen6_do_reset(dev); |
| 789 | case 5: return ironlake_do_reset(dev); | 790 | case 5: return ironlake_do_reset(dev); |
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index 48f06378d3f9..2ea5568b6cf5 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c | |||
| @@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent, | |||
| 104 | 104 | ||
| 105 | if (parent) { | 105 | if (parent) { |
| 106 | struct nouveau_device *device = nv_device(parent); | 106 | struct nouveau_device *device = nv_device(parent); |
| 107 | int subidx = nv_hclass(subdev) & 0xff; | ||
| 108 | |||
| 109 | subdev->debug = nouveau_dbgopt(device->dbgopt, subname); | 107 | subdev->debug = nouveau_dbgopt(device->dbgopt, subname); |
| 110 | subdev->mmio = nv_subdev(device)->mmio; | 108 | subdev->mmio = nv_subdev(device)->mmio; |
| 111 | device->subdev[subidx] = *pobject; | ||
| 112 | } | 109 | } |
| 113 | 110 | ||
| 114 | return 0; | 111 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c index 9135b25a29d0..dd01c6c435d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c | |||
| @@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
| 268 | if (ret) | 268 | if (ret) |
| 269 | return ret; | 269 | return ret; |
| 270 | 270 | ||
| 271 | device->subdev[i] = devobj->subdev[i]; | ||
| 272 | |||
| 271 | /* note: can't init *any* subdevs until devinit has been run | 273 | /* note: can't init *any* subdevs until devinit has been run |
| 272 | * due to not knowing exactly what the vbios init tables will | 274 | * due to not knowing exactly what the vbios init tables will |
| 273 | * mess with. devinit also can't be run until all of its | 275 | * mess with. devinit also can't be run until all of its |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 8d06eef2b9ee..dbc5e33de94f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | |||
| @@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device) | |||
| 161 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 161 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 162 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 162 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 163 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 163 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; |
| 164 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 164 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
| 165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index 434bb4b0fa2e..5c8a63dc506a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c | |||
| @@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds) | |||
| 334 | while ((mthd = &mthds[i++]) && (init = mthd->init)) { | 334 | while ((mthd = &mthds[i++]) && (init = mthd->init)) { |
| 335 | u32 addr = 0x80000000 | mthd->oclass; | 335 | u32 addr = 0x80000000 | mthd->oclass; |
| 336 | for (data = 0; init->count; init++) { | 336 | for (data = 0; init->count; init++) { |
| 337 | if (data != init->data) { | 337 | if (init == mthd->init || data != init->data) { |
| 338 | nv_wr32(priv, 0x40448c, init->data); | 338 | nv_wr32(priv, 0x40448c, init->data); |
| 339 | data = init->data; | 339 | data = init->data; |
| 340 | } | 340 | } |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h index 8541aa382ff2..d89dbdf39b0d 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h | |||
| @@ -75,6 +75,11 @@ struct nouveau_fb { | |||
| 75 | static inline struct nouveau_fb * | 75 | static inline struct nouveau_fb * |
| 76 | nouveau_fb(void *obj) | 76 | nouveau_fb(void *obj) |
| 77 | { | 77 | { |
| 78 | /* fbram uses this before device subdev pointer is valid */ | ||
| 79 | if (nv_iclass(obj, NV_SUBDEV_CLASS) && | ||
| 80 | nv_subidx(obj) == NVDEV_SUBDEV_FB) | ||
| 81 | return obj; | ||
| 82 | |||
| 78 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; | 83 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; |
| 79 | } | 84 | } |
| 80 | 85 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index 420908cb82b6..df1b1b423093 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c | |||
| @@ -365,13 +365,13 @@ static u16 | |||
| 365 | init_script(struct nouveau_bios *bios, int index) | 365 | init_script(struct nouveau_bios *bios, int index) |
| 366 | { | 366 | { |
| 367 | struct nvbios_init init = { .bios = bios }; | 367 | struct nvbios_init init = { .bios = bios }; |
| 368 | u16 data; | 368 | u16 bmp_ver = bmp_version(bios), data; |
| 369 | 369 | ||
| 370 | if (bmp_version(bios) && bmp_version(bios) < 0x0510) { | 370 | if (bmp_ver && bmp_ver < 0x0510) { |
| 371 | if (index > 1) | 371 | if (index > 1 || bmp_ver < 0x0100) |
| 372 | return 0x0000; | 372 | return 0x0000; |
| 373 | 373 | ||
| 374 | data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); | 374 | data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18); |
| 375 | return nv_ro16(bios, data + (index * 2)); | 375 | return nv_ro16(bios, data + (index * 2)); |
| 376 | } | 376 | } |
| 377 | 377 | ||
| @@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init) | |||
| 1294 | u16 offset = nv_ro16(bios, init->offset + 1); | 1294 | u16 offset = nv_ro16(bios, init->offset + 1); |
| 1295 | 1295 | ||
| 1296 | trace("JUMP\t0x%04x\n", offset); | 1296 | trace("JUMP\t0x%04x\n", offset); |
| 1297 | init->offset = offset; | 1297 | |
| 1298 | if (init_exec(init)) | ||
| 1299 | init->offset = offset; | ||
| 1300 | else | ||
| 1301 | init->offset += 3; | ||
| 1298 | } | 1302 | } |
| 1299 | 1303 | ||
| 1300 | /** | 1304 | /** |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 6828d81ed7b9..900fae01793e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
| @@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
| 447 | if (ret) | 447 | if (ret) |
| 448 | goto done; | 448 | goto done; |
| 449 | 449 | ||
| 450 | info->offset = ntfy->node->offset; | ||
| 451 | |||
| 450 | done: | 452 | done: |
| 451 | if (ret) | 453 | if (ret) |
| 452 | nouveau_abi16_ntfy_fini(chan, ntfy); | 454 | nouveau_abi16_ntfy_fini(chan, ntfy); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 95c740454049..ba0183fb84f3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -51,6 +51,7 @@ static struct nouveau_dsm_priv { | |||
| 51 | bool dsm_detected; | 51 | bool dsm_detected; |
| 52 | bool optimus_detected; | 52 | bool optimus_detected; |
| 53 | acpi_handle dhandle; | 53 | acpi_handle dhandle; |
| 54 | acpi_handle other_handle; | ||
| 54 | acpi_handle rom_handle; | 55 | acpi_handle rom_handle; |
| 55 | } nouveau_dsm_priv; | 56 | } nouveau_dsm_priv; |
| 56 | 57 | ||
| @@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) | |||
| 260 | if (!dhandle) | 261 | if (!dhandle) |
| 261 | return false; | 262 | return false; |
| 262 | 263 | ||
| 263 | if (!acpi_has_method(dhandle, "_DSM")) | 264 | if (!acpi_has_method(dhandle, "_DSM")) { |
| 265 | nouveau_dsm_priv.other_handle = dhandle; | ||
| 264 | return false; | 266 | return false; |
| 265 | 267 | } | |
| 266 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) | 268 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) |
| 267 | retval |= NOUVEAU_DSM_HAS_MUX; | 269 | retval |= NOUVEAU_DSM_HAS_MUX; |
| 268 | 270 | ||
| @@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void) | |||
| 338 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", | 340 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", |
| 339 | acpi_method_name); | 341 | acpi_method_name); |
| 340 | nouveau_dsm_priv.dsm_detected = true; | 342 | nouveau_dsm_priv.dsm_detected = true; |
| 343 | /* | ||
| 344 | * On some systems hotplug events are generated for the device | ||
| 345 | * being switched off when _DSM is executed. They cause ACPI | ||
| 346 | * hotplug to trigger and attempt to remove the device from | ||
| 347 | * the system, which causes it to break down. Prevent that from | ||
| 348 | * happening by setting the no_hotplug flag for the involved | ||
| 349 | * ACPI device objects. | ||
| 350 | */ | ||
| 351 | acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle); | ||
| 352 | acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle); | ||
| 341 | ret = true; | 353 | ret = true; |
| 342 | } | 354 | } |
| 343 | 355 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 29c3efdfc7dd..25ea82f8def3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 610 | ret = nouveau_fence_sync(fence, chan); | 610 | ret = nouveau_fence_sync(fence, chan); |
| 611 | nouveau_fence_unref(&fence); | 611 | nouveau_fence_unref(&fence); |
| 612 | if (ret) | 612 | if (ret) |
| 613 | return ret; | 613 | goto fail_free; |
| 614 | 614 | ||
| 615 | if (new_bo != old_bo) { | 615 | if (new_bo != old_bo) { |
| 616 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 616 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7a3759f1c41a..98a22e6e27a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) | |||
| 858 | if (nouveau_runtime_pm == 0) | 858 | if (nouveau_runtime_pm == 0) |
| 859 | return -EINVAL; | 859 | return -EINVAL; |
| 860 | 860 | ||
| 861 | /* are we optimus enabled? */ | ||
| 862 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | ||
| 863 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | ||
| 864 | return -EINVAL; | ||
| 865 | } | ||
| 866 | |||
| 861 | nv_debug_level(SILENT); | 867 | nv_debug_level(SILENT); |
| 862 | drm_kms_helper_poll_disable(drm_dev); | 868 | drm_kms_helper_poll_disable(drm_dev); |
| 863 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); | 869 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); |
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 037d324bf58f..66ac0ff95f5a 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig | |||
| @@ -8,5 +8,6 @@ config DRM_QXL | |||
| 8 | select DRM_KMS_HELPER | 8 | select DRM_KMS_HELPER |
| 9 | select DRM_KMS_FB_HELPER | 9 | select DRM_KMS_FB_HELPER |
| 10 | select DRM_TTM | 10 | select DRM_TTM |
| 11 | select CRC32 | ||
| 11 | help | 12 | help |
| 12 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. | 13 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5e827c29d194..d70aafb83307 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | 26 | ||
| 27 | #include "linux/crc32.h" | 27 | #include <linux/crc32.h> |
| 28 | 28 | ||
| 29 | #include "qxl_drv.h" | 29 | #include "qxl_drv.h" |
| 30 | #include "qxl_object.h" | 30 | #include "qxl_object.h" |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 80a20120e625..0b9621c9aeea 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1143 | } | 1143 | } |
| 1144 | 1144 | ||
| 1145 | if (tiling_flags & RADEON_TILING_MACRO) { | 1145 | if (tiling_flags & RADEON_TILING_MACRO) { |
| 1146 | if (rdev->family >= CHIP_BONAIRE) | 1146 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); |
| 1147 | tmp = rdev->config.cik.tile_config; | ||
| 1148 | else if (rdev->family >= CHIP_TAHITI) | ||
| 1149 | tmp = rdev->config.si.tile_config; | ||
| 1150 | else if (rdev->family >= CHIP_CAYMAN) | ||
| 1151 | tmp = rdev->config.cayman.tile_config; | ||
| 1152 | else | ||
| 1153 | tmp = rdev->config.evergreen.tile_config; | ||
| 1154 | 1147 | ||
| 1155 | switch ((tmp & 0xf0) >> 4) { | 1148 | /* Set NUM_BANKS. */ |
| 1156 | case 0: /* 4 banks */ | 1149 | if (rdev->family >= CHIP_BONAIRE) { |
| 1157 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); | 1150 | unsigned tileb, index, num_banks, tile_split_bytes; |
| 1158 | break; | 1151 | |
| 1159 | case 1: /* 8 banks */ | 1152 | /* Calculate the macrotile mode index. */ |
| 1160 | default: | 1153 | tile_split_bytes = 64 << tile_split; |
| 1161 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); | 1154 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; |
| 1162 | break; | 1155 | tileb = min(tile_split_bytes, tileb); |
| 1163 | case 2: /* 16 banks */ | 1156 | |
| 1164 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); | 1157 | for (index = 0; tileb > 64; index++) { |
| 1165 | break; | 1158 | tileb >>= 1; |
| 1159 | } | ||
| 1160 | |||
| 1161 | if (index >= 16) { | ||
| 1162 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", | ||
| 1163 | target_fb->bits_per_pixel, tile_split); | ||
| 1164 | return -EINVAL; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | ||
| 1168 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); | ||
| 1169 | } else { | ||
| 1170 | /* SI and older. */ | ||
| 1171 | if (rdev->family >= CHIP_TAHITI) | ||
| 1172 | tmp = rdev->config.si.tile_config; | ||
| 1173 | else if (rdev->family >= CHIP_CAYMAN) | ||
| 1174 | tmp = rdev->config.cayman.tile_config; | ||
| 1175 | else | ||
| 1176 | tmp = rdev->config.evergreen.tile_config; | ||
| 1177 | |||
| 1178 | switch ((tmp & 0xf0) >> 4) { | ||
| 1179 | case 0: /* 4 banks */ | ||
| 1180 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); | ||
| 1181 | break; | ||
| 1182 | case 1: /* 8 banks */ | ||
| 1183 | default: | ||
| 1184 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); | ||
| 1185 | break; | ||
| 1186 | case 2: /* 16 banks */ | ||
| 1187 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); | ||
| 1188 | break; | ||
| 1189 | } | ||
| 1166 | } | 1190 | } |
| 1167 | 1191 | ||
| 1168 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); | 1192 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); |
| 1169 | |||
| 1170 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | ||
| 1171 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); | 1193 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); |
| 1172 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); | 1194 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); |
| 1173 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); | 1195 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); |
| @@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1180 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); | 1202 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); |
| 1181 | 1203 | ||
| 1182 | if (rdev->family >= CHIP_BONAIRE) { | 1204 | if (rdev->family >= CHIP_BONAIRE) { |
| 1183 | u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; | 1205 | /* Read the pipe config from the 2D TILED SCANOUT mode. |
| 1184 | u32 num_rb = rdev->config.cik.max_backends_per_se; | 1206 | * It should be the same for the other modes too, but not all |
| 1185 | if (num_pipe_configs > 8) | 1207 | * modes set the pipe config field. */ |
| 1186 | num_pipe_configs = 8; | 1208 | u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f; |
| 1187 | if (num_pipe_configs == 8) | 1209 | |
| 1188 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); | 1210 | fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config); |
| 1189 | else if (num_pipe_configs == 4) { | ||
| 1190 | if (num_rb == 4) | ||
| 1191 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16); | ||
| 1192 | else if (num_rb < 4) | ||
| 1193 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16); | ||
| 1194 | } else if (num_pipe_configs == 2) | ||
| 1195 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2); | ||
| 1196 | } else if ((rdev->family == CHIP_TAHITI) || | 1211 | } else if ((rdev->family == CHIP_TAHITI) || |
| 1197 | (rdev->family == CHIP_PITCAIRN)) | 1212 | (rdev->family == CHIP_PITCAIRN)) |
| 1198 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); | 1213 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); |
| 1199 | else if (rdev->family == CHIP_VERDE) | 1214 | else if ((rdev->family == CHIP_VERDE) || |
| 1215 | (rdev->family == CHIP_OLAND) || | ||
| 1216 | (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */ | ||
| 1200 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); | 1217 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); |
| 1201 | 1218 | ||
| 1202 | switch (radeon_crtc->crtc_id) { | 1219 | switch (radeon_crtc->crtc_id) { |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b43a3a3c9067..e950fabd7f5e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width) | |||
| 3057 | * Returns the disabled RB bitmask. | 3057 | * Returns the disabled RB bitmask. |
| 3058 | */ | 3058 | */ |
| 3059 | static u32 cik_get_rb_disabled(struct radeon_device *rdev, | 3059 | static u32 cik_get_rb_disabled(struct radeon_device *rdev, |
| 3060 | u32 max_rb_num, u32 se_num, | 3060 | u32 max_rb_num_per_se, |
| 3061 | u32 sh_per_se) | 3061 | u32 sh_per_se) |
| 3062 | { | 3062 | { |
| 3063 | u32 data, mask; | 3063 | u32 data, mask; |
| @@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev, | |||
| 3071 | 3071 | ||
| 3072 | data >>= BACKEND_DISABLE_SHIFT; | 3072 | data >>= BACKEND_DISABLE_SHIFT; |
| 3073 | 3073 | ||
| 3074 | mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); | 3074 | mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se); |
| 3075 | 3075 | ||
| 3076 | return data & mask; | 3076 | return data & mask; |
| 3077 | } | 3077 | } |
| @@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev, | |||
| 3088 | */ | 3088 | */ |
| 3089 | static void cik_setup_rb(struct radeon_device *rdev, | 3089 | static void cik_setup_rb(struct radeon_device *rdev, |
| 3090 | u32 se_num, u32 sh_per_se, | 3090 | u32 se_num, u32 sh_per_se, |
| 3091 | u32 max_rb_num) | 3091 | u32 max_rb_num_per_se) |
| 3092 | { | 3092 | { |
| 3093 | int i, j; | 3093 | int i, j; |
| 3094 | u32 data, mask; | 3094 | u32 data, mask; |
| @@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev, | |||
| 3098 | for (i = 0; i < se_num; i++) { | 3098 | for (i = 0; i < se_num; i++) { |
| 3099 | for (j = 0; j < sh_per_se; j++) { | 3099 | for (j = 0; j < sh_per_se; j++) { |
| 3100 | cik_select_se_sh(rdev, i, j); | 3100 | cik_select_se_sh(rdev, i, j); |
| 3101 | data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | 3101 | data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); |
| 3102 | if (rdev->family == CHIP_HAWAII) | 3102 | if (rdev->family == CHIP_HAWAII) |
| 3103 | disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); | 3103 | disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); |
| 3104 | else | 3104 | else |
| @@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev, | |||
| 3108 | cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 3108 | cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
| 3109 | 3109 | ||
| 3110 | mask = 1; | 3110 | mask = 1; |
| 3111 | for (i = 0; i < max_rb_num; i++) { | 3111 | for (i = 0; i < max_rb_num_per_se * se_num; i++) { |
| 3112 | if (!(disabled_rbs & mask)) | 3112 | if (!(disabled_rbs & mask)) |
| 3113 | enabled_rbs |= mask; | 3113 | enabled_rbs |= mask; |
| 3114 | mask <<= 1; | 3114 | mask <<= 1; |
| 3115 | } | 3115 | } |
| 3116 | 3116 | ||
| 3117 | rdev->config.cik.backend_enable_mask = enabled_rbs; | ||
| 3118 | |||
| 3117 | for (i = 0; i < se_num; i++) { | 3119 | for (i = 0; i < se_num; i++) { |
| 3118 | cik_select_se_sh(rdev, i, 0xffffffff); | 3120 | cik_select_se_sh(rdev, i, 0xffffffff); |
| 3119 | data = 0; | 3121 | data = 0; |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 0300727a4f70..d08b83c6267b 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev, | |||
| 458 | radeon_ring_write(ring, 0); /* src/dst endian swap */ | 458 | radeon_ring_write(ring, 0); /* src/dst endian swap */ |
| 459 | radeon_ring_write(ring, src_offset & 0xffffffff); | 459 | radeon_ring_write(ring, src_offset & 0xffffffff); |
| 460 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); | 460 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); |
| 461 | radeon_ring_write(ring, dst_offset & 0xfffffffc); | 461 | radeon_ring_write(ring, dst_offset & 0xffffffff); |
| 462 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); | 462 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); |
| 463 | src_offset += cur_size_in_bytes; | 463 | src_offset += cur_size_in_bytes; |
| 464 | dst_offset += cur_size_in_bytes; | 464 | dst_offset += cur_size_in_bytes; |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index de86493cbc44..713a5d359901 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
| @@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) | |||
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); | 176 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); |
| 177 | if (sad_count < 0) { | 177 | if (sad_count <= 0) { |
| 178 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); | 178 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
| 179 | return; | 179 | return; |
| 180 | } | 180 | } |
| @@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) | |||
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); | 237 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); |
| 238 | if (sad_count < 0) { | 238 | if (sad_count <= 0) { |
| 239 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); | 239 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
| 240 | return; | 240 | return; |
| 241 | } | 241 | } |
| @@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev) | |||
| 308 | rdev->audio.enabled = true; | 308 | rdev->audio.enabled = true; |
| 309 | 309 | ||
| 310 | if (ASIC_IS_DCE8(rdev)) | 310 | if (ASIC_IS_DCE8(rdev)) |
| 311 | rdev->audio.num_pins = 7; | 311 | rdev->audio.num_pins = 6; |
| 312 | else if (ASIC_IS_DCE61(rdev)) | ||
| 313 | rdev->audio.num_pins = 4; | ||
| 312 | else | 314 | else |
| 313 | rdev->audio.num_pins = 6; | 315 | rdev->audio.num_pins = 6; |
| 314 | 316 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index aa695c4feb3d..0c6d5cef4cf1 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) | |||
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); | 120 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); |
| 121 | if (sad_count < 0) { | 121 | if (sad_count <= 0) { |
| 122 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); | 122 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
| 123 | return; | 123 | return; |
| 124 | } | 124 | } |
| @@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) | |||
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); | 175 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); |
| 176 | if (sad_count < 0) { | 176 | if (sad_count <= 0) { |
| 177 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); | 177 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
| 178 | return; | 178 | return; |
| 179 | } | 179 | } |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 11aab2ab54ce..f59a9e9fccf8 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
| 895 | (rdev->pdev->device == 0x999C)) { | 895 | (rdev->pdev->device == 0x999C)) { |
| 896 | rdev->config.cayman.max_simds_per_se = 6; | 896 | rdev->config.cayman.max_simds_per_se = 6; |
| 897 | rdev->config.cayman.max_backends_per_se = 2; | 897 | rdev->config.cayman.max_backends_per_se = 2; |
| 898 | rdev->config.cayman.max_hw_contexts = 8; | ||
| 899 | rdev->config.cayman.sx_max_export_size = 256; | ||
| 900 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
| 901 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
| 898 | } else if ((rdev->pdev->device == 0x9903) || | 902 | } else if ((rdev->pdev->device == 0x9903) || |
| 899 | (rdev->pdev->device == 0x9904) || | 903 | (rdev->pdev->device == 0x9904) || |
| 900 | (rdev->pdev->device == 0x990A) || | 904 | (rdev->pdev->device == 0x990A) || |
| @@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
| 905 | (rdev->pdev->device == 0x999D)) { | 909 | (rdev->pdev->device == 0x999D)) { |
| 906 | rdev->config.cayman.max_simds_per_se = 4; | 910 | rdev->config.cayman.max_simds_per_se = 4; |
| 907 | rdev->config.cayman.max_backends_per_se = 2; | 911 | rdev->config.cayman.max_backends_per_se = 2; |
| 912 | rdev->config.cayman.max_hw_contexts = 8; | ||
| 913 | rdev->config.cayman.sx_max_export_size = 256; | ||
| 914 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
| 915 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
| 908 | } else if ((rdev->pdev->device == 0x9919) || | 916 | } else if ((rdev->pdev->device == 0x9919) || |
| 909 | (rdev->pdev->device == 0x9990) || | 917 | (rdev->pdev->device == 0x9990) || |
| 910 | (rdev->pdev->device == 0x9991) || | 918 | (rdev->pdev->device == 0x9991) || |
| @@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
| 915 | (rdev->pdev->device == 0x99A0)) { | 923 | (rdev->pdev->device == 0x99A0)) { |
| 916 | rdev->config.cayman.max_simds_per_se = 3; | 924 | rdev->config.cayman.max_simds_per_se = 3; |
| 917 | rdev->config.cayman.max_backends_per_se = 1; | 925 | rdev->config.cayman.max_backends_per_se = 1; |
| 926 | rdev->config.cayman.max_hw_contexts = 4; | ||
| 927 | rdev->config.cayman.sx_max_export_size = 128; | ||
| 928 | rdev->config.cayman.sx_max_export_pos_size = 32; | ||
| 929 | rdev->config.cayman.sx_max_export_smx_size = 96; | ||
| 918 | } else { | 930 | } else { |
| 919 | rdev->config.cayman.max_simds_per_se = 2; | 931 | rdev->config.cayman.max_simds_per_se = 2; |
| 920 | rdev->config.cayman.max_backends_per_se = 1; | 932 | rdev->config.cayman.max_backends_per_se = 1; |
| 933 | rdev->config.cayman.max_hw_contexts = 4; | ||
| 934 | rdev->config.cayman.sx_max_export_size = 128; | ||
| 935 | rdev->config.cayman.sx_max_export_pos_size = 32; | ||
| 936 | rdev->config.cayman.sx_max_export_smx_size = 96; | ||
| 921 | } | 937 | } |
| 922 | rdev->config.cayman.max_texture_channel_caches = 2; | 938 | rdev->config.cayman.max_texture_channel_caches = 2; |
| 923 | rdev->config.cayman.max_gprs = 256; | 939 | rdev->config.cayman.max_gprs = 256; |
| @@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
| 925 | rdev->config.cayman.max_gs_threads = 32; | 941 | rdev->config.cayman.max_gs_threads = 32; |
| 926 | rdev->config.cayman.max_stack_entries = 512; | 942 | rdev->config.cayman.max_stack_entries = 512; |
| 927 | rdev->config.cayman.sx_num_of_sets = 8; | 943 | rdev->config.cayman.sx_num_of_sets = 8; |
| 928 | rdev->config.cayman.sx_max_export_size = 256; | ||
| 929 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
| 930 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
| 931 | rdev->config.cayman.max_hw_contexts = 8; | ||
| 932 | rdev->config.cayman.sq_num_cf_insts = 2; | 944 | rdev->config.cayman.sq_num_cf_insts = 2; |
| 933 | 945 | ||
| 934 | rdev->config.cayman.sc_prim_fifo_size = 0x40; | 946 | rdev->config.cayman.sc_prim_fifo_size = 0x40; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b1f990d0eaa1..45e1f447bc79 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -1940,7 +1940,7 @@ struct si_asic { | |||
| 1940 | unsigned sc_earlyz_tile_fifo_size; | 1940 | unsigned sc_earlyz_tile_fifo_size; |
| 1941 | 1941 | ||
| 1942 | unsigned num_tile_pipes; | 1942 | unsigned num_tile_pipes; |
| 1943 | unsigned num_backends_per_se; | 1943 | unsigned backend_enable_mask; |
| 1944 | unsigned backend_disable_mask_per_asic; | 1944 | unsigned backend_disable_mask_per_asic; |
| 1945 | unsigned backend_map; | 1945 | unsigned backend_map; |
| 1946 | unsigned num_texture_channel_caches; | 1946 | unsigned num_texture_channel_caches; |
| @@ -1970,7 +1970,7 @@ struct cik_asic { | |||
| 1970 | unsigned sc_earlyz_tile_fifo_size; | 1970 | unsigned sc_earlyz_tile_fifo_size; |
| 1971 | 1971 | ||
| 1972 | unsigned num_tile_pipes; | 1972 | unsigned num_tile_pipes; |
| 1973 | unsigned num_backends_per_se; | 1973 | unsigned backend_enable_mask; |
| 1974 | unsigned backend_disable_mask_per_asic; | 1974 | unsigned backend_disable_mask_per_asic; |
| 1975 | unsigned backend_map; | 1975 | unsigned backend_map; |
| 1976 | unsigned num_texture_channel_caches; | 1976 | unsigned num_texture_channel_caches; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e354ce94cdd1..c0425bb6223a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = { | |||
| 2021 | .hdmi_setmode = &evergreen_hdmi_setmode, | 2021 | .hdmi_setmode = &evergreen_hdmi_setmode, |
| 2022 | }, | 2022 | }, |
| 2023 | .copy = { | 2023 | .copy = { |
| 2024 | .blit = NULL, | 2024 | .blit = &cik_copy_cpdma, |
| 2025 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 2025 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 2026 | .dma = &cik_copy_dma, | 2026 | .dma = &cik_copy_dma, |
| 2027 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 2027 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
| @@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = { | |||
| 2122 | .hdmi_setmode = &evergreen_hdmi_setmode, | 2122 | .hdmi_setmode = &evergreen_hdmi_setmode, |
| 2123 | }, | 2123 | }, |
| 2124 | .copy = { | 2124 | .copy = { |
| 2125 | .blit = NULL, | 2125 | .blit = &cik_copy_cpdma, |
| 2126 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 2126 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 2127 | .dma = &cik_copy_dma, | 2127 | .dma = &cik_copy_dma, |
| 2128 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 2128 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 9d302eaeea15..485848f889f5 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
| @@ -33,6 +33,7 @@ static struct radeon_atpx_priv { | |||
| 33 | bool atpx_detected; | 33 | bool atpx_detected; |
| 34 | /* handle for device - and atpx */ | 34 | /* handle for device - and atpx */ |
| 35 | acpi_handle dhandle; | 35 | acpi_handle dhandle; |
| 36 | acpi_handle other_handle; | ||
| 36 | struct radeon_atpx atpx; | 37 | struct radeon_atpx atpx; |
| 37 | } radeon_atpx_priv; | 38 | } radeon_atpx_priv; |
| 38 | 39 | ||
| @@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | |||
| 451 | return false; | 452 | return false; |
| 452 | 453 | ||
| 453 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); | 454 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); |
| 454 | if (ACPI_FAILURE(status)) | 455 | if (ACPI_FAILURE(status)) { |
| 456 | radeon_atpx_priv.other_handle = dhandle; | ||
| 455 | return false; | 457 | return false; |
| 456 | 458 | } | |
| 457 | radeon_atpx_priv.dhandle = dhandle; | 459 | radeon_atpx_priv.dhandle = dhandle; |
| 458 | radeon_atpx_priv.atpx.handle = atpx_handle; | 460 | radeon_atpx_priv.atpx.handle = atpx_handle; |
| 459 | return true; | 461 | return true; |
| @@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void) | |||
| 530 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", | 532 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", |
| 531 | acpi_method_name); | 533 | acpi_method_name); |
| 532 | radeon_atpx_priv.atpx_detected = true; | 534 | radeon_atpx_priv.atpx_detected = true; |
| 535 | /* | ||
| 536 | * On some systems hotplug events are generated for the device | ||
| 537 | * being switched off when ATPX is executed. They cause ACPI | ||
| 538 | * hotplug to trigger and attempt to remove the device from | ||
| 539 | * the system, which causes it to break down. Prevent that from | ||
| 540 | * happening by setting the no_hotplug flag for the involved | ||
| 541 | * ACPI device objects. | ||
| 542 | */ | ||
| 543 | acpi_bus_no_hotplug(radeon_atpx_priv.dhandle); | ||
| 544 | acpi_bus_no_hotplug(radeon_atpx_priv.other_handle); | ||
| 533 | return true; | 545 | return true; |
| 534 | } | 546 | } |
| 535 | return false; | 547 | return false; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 9f5ff28864f6..db39ea36bf22 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -77,9 +77,10 @@ | |||
| 77 | * 2.33.0 - Add SI tiling mode array query | 77 | * 2.33.0 - Add SI tiling mode array query |
| 78 | * 2.34.0 - Add CIK tiling mode array query | 78 | * 2.34.0 - Add CIK tiling mode array query |
| 79 | * 2.35.0 - Add CIK macrotile mode array query | 79 | * 2.35.0 - Add CIK macrotile mode array query |
| 80 | * 2.36.0 - Fix CIK DCE tiling setup | ||
| 80 | */ | 81 | */ |
| 81 | #define KMS_DRIVER_MAJOR 2 | 82 | #define KMS_DRIVER_MAJOR 2 |
| 82 | #define KMS_DRIVER_MINOR 35 | 83 | #define KMS_DRIVER_MINOR 36 |
| 83 | #define KMS_DRIVER_PATCHLEVEL 0 | 84 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 84 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 85 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 85 | int radeon_driver_unload_kms(struct drm_device *dev); | 86 | int radeon_driver_unload_kms(struct drm_device *dev); |
| @@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = { | |||
| 508 | #endif | 509 | #endif |
| 509 | }; | 510 | }; |
| 510 | 511 | ||
| 511 | |||
| 512 | static void | ||
| 513 | radeon_pci_shutdown(struct pci_dev *pdev) | ||
| 514 | { | ||
| 515 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 516 | |||
| 517 | radeon_driver_unload_kms(dev); | ||
| 518 | } | ||
| 519 | |||
| 520 | static struct drm_driver kms_driver = { | 512 | static struct drm_driver kms_driver = { |
| 521 | .driver_features = | 513 | .driver_features = |
| 522 | DRIVER_USE_AGP | | 514 | DRIVER_USE_AGP | |
| @@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = { | |||
| 586 | .probe = radeon_pci_probe, | 578 | .probe = radeon_pci_probe, |
| 587 | .remove = radeon_pci_remove, | 579 | .remove = radeon_pci_remove, |
| 588 | .driver.pm = &radeon_pm_ops, | 580 | .driver.pm = &radeon_pm_ops, |
| 589 | .shutdown = radeon_pci_shutdown, | ||
| 590 | }; | 581 | }; |
| 591 | 582 | ||
| 592 | static int __init radeon_init(void) | 583 | static int __init radeon_init(void) |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 55d0b474bd37..21d593c0ecaf 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 461 | case RADEON_INFO_SI_CP_DMA_COMPUTE: | 461 | case RADEON_INFO_SI_CP_DMA_COMPUTE: |
| 462 | *value = 1; | 462 | *value = 1; |
| 463 | break; | 463 | break; |
| 464 | case RADEON_INFO_SI_BACKEND_ENABLED_MASK: | ||
| 465 | if (rdev->family >= CHIP_BONAIRE) { | ||
| 466 | *value = rdev->config.cik.backend_enable_mask; | ||
| 467 | } else if (rdev->family >= CHIP_TAHITI) { | ||
| 468 | *value = rdev->config.si.backend_enable_mask; | ||
| 469 | } else { | ||
| 470 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); | ||
| 471 | } | ||
| 472 | break; | ||
| 464 | default: | 473 | default: |
| 465 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 474 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
| 466 | return -EINVAL; | 475 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 373d088bac66..b9c0529b4a2e 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 473 | return -EINVAL; | 473 | return -EINVAL; |
| 474 | } | 474 | } |
| 475 | 475 | ||
| 476 | if ((start >> 28) != (end >> 28)) { | 476 | if ((start >> 28) != ((end - 1) >> 28)) { |
| 477 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | 477 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", |
| 478 | start, end); | 478 | start, end); |
| 479 | return -EINVAL; | 479 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 1c560629575a..e7dab069cccf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev) | |||
| 162 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 162 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
| 163 | base = G_000100_MC_FB_START(base) << 16; | 163 | base = G_000100_MC_FB_START(base) << 16; |
| 164 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 164 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 165 | /* Some boards seem to be configured for 128MB of sideport memory, | ||
| 166 | * but really only have 64MB. Just skip the sideport and use | ||
| 167 | * UMA memory. | ||
| 168 | */ | ||
| 169 | if (rdev->mc.igp_sideport_enabled && | ||
| 170 | (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { | ||
| 171 | base += 128 * 1024 * 1024; | ||
| 172 | rdev->mc.real_vram_size -= 128 * 1024 * 1024; | ||
| 173 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
| 174 | } | ||
| 165 | 175 | ||
| 166 | /* Use K8 direct mapping for fast fb access. */ | 176 | /* Use K8 direct mapping for fast fb access. */ |
| 167 | rdev->fastfb_working = false; | 177 | rdev->fastfb_working = false; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 913b025ae9b3..374499db20c7 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
| @@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) | |||
| 2328 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | 2328 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
| 2329 | ASIC_INTERNAL_MEMORY_SS, 0); | 2329 | ASIC_INTERNAL_MEMORY_SS, 0); |
| 2330 | 2330 | ||
| 2331 | /* disable ss, causes hangs on some cayman boards */ | ||
| 2332 | if (rdev->family == CHIP_CAYMAN) { | ||
| 2333 | pi->sclk_ss = false; | ||
| 2334 | pi->mclk_ss = false; | ||
| 2335 | } | ||
| 2336 | |||
| 2331 | if (pi->sclk_ss || pi->mclk_ss) | 2337 | if (pi->sclk_ss || pi->mclk_ss) |
| 2332 | pi->dynamic_ss = true; | 2338 | pi->dynamic_ss = true; |
| 2333 | else | 2339 | else |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a36736dab5e0..85e1edfaa3be 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev, | |||
| 2811 | } | 2811 | } |
| 2812 | 2812 | ||
| 2813 | static u32 si_get_rb_disabled(struct radeon_device *rdev, | 2813 | static u32 si_get_rb_disabled(struct radeon_device *rdev, |
| 2814 | u32 max_rb_num, u32 se_num, | 2814 | u32 max_rb_num_per_se, |
| 2815 | u32 sh_per_se) | 2815 | u32 sh_per_se) |
| 2816 | { | 2816 | { |
| 2817 | u32 data, mask; | 2817 | u32 data, mask; |
| @@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev, | |||
| 2825 | 2825 | ||
| 2826 | data >>= BACKEND_DISABLE_SHIFT; | 2826 | data >>= BACKEND_DISABLE_SHIFT; |
| 2827 | 2827 | ||
| 2828 | mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); | 2828 | mask = si_create_bitmask(max_rb_num_per_se / sh_per_se); |
| 2829 | 2829 | ||
| 2830 | return data & mask; | 2830 | return data & mask; |
| 2831 | } | 2831 | } |
| 2832 | 2832 | ||
| 2833 | static void si_setup_rb(struct radeon_device *rdev, | 2833 | static void si_setup_rb(struct radeon_device *rdev, |
| 2834 | u32 se_num, u32 sh_per_se, | 2834 | u32 se_num, u32 sh_per_se, |
| 2835 | u32 max_rb_num) | 2835 | u32 max_rb_num_per_se) |
| 2836 | { | 2836 | { |
| 2837 | int i, j; | 2837 | int i, j; |
| 2838 | u32 data, mask; | 2838 | u32 data, mask; |
| @@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev, | |||
| 2842 | for (i = 0; i < se_num; i++) { | 2842 | for (i = 0; i < se_num; i++) { |
| 2843 | for (j = 0; j < sh_per_se; j++) { | 2843 | for (j = 0; j < sh_per_se; j++) { |
| 2844 | si_select_se_sh(rdev, i, j); | 2844 | si_select_se_sh(rdev, i, j); |
| 2845 | data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | 2845 | data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); |
| 2846 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); | 2846 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); |
| 2847 | } | 2847 | } |
| 2848 | } | 2848 | } |
| 2849 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 2849 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
| 2850 | 2850 | ||
| 2851 | mask = 1; | 2851 | mask = 1; |
| 2852 | for (i = 0; i < max_rb_num; i++) { | 2852 | for (i = 0; i < max_rb_num_per_se * se_num; i++) { |
| 2853 | if (!(disabled_rbs & mask)) | 2853 | if (!(disabled_rbs & mask)) |
| 2854 | enabled_rbs |= mask; | 2854 | enabled_rbs |= mask; |
| 2855 | mask <<= 1; | 2855 | mask <<= 1; |
| 2856 | } | 2856 | } |
| 2857 | 2857 | ||
| 2858 | rdev->config.si.backend_enable_mask = enabled_rbs; | ||
| 2859 | |||
| 2858 | for (i = 0; i < se_num; i++) { | 2860 | for (i = 0; i < se_num; i++) { |
| 2859 | si_select_se_sh(rdev, i, 0xffffffff); | 2861 | si_select_se_sh(rdev, i, 0xffffffff); |
| 2860 | data = 0; | 2862 | data = 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 15b86a94949d..406152152315 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
| @@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
| 353 | * Don't move nonexistent data. Clear destination instead. | 353 | * Don't move nonexistent data. Clear destination instead. |
| 354 | */ | 354 | */ |
| 355 | if (old_iomap == NULL && | 355 | if (old_iomap == NULL && |
| 356 | (ttm == NULL || ttm->state == tt_unpopulated)) { | 356 | (ttm == NULL || (ttm->state == tt_unpopulated && |
| 357 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { | ||
| 357 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); | 358 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
| 358 | goto out2; | 359 | goto out2; |
| 359 | } | 360 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b249ab9b1eb2..6440eeac22d2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
| @@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + | 171 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
| 172 | drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; | 172 | vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); |
| 173 | page_last = vma_pages(vma) + | 173 | page_last = vma_pages(vma) + vma->vm_pgoff - |
| 174 | drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; | 174 | drm_vma_node_start(&bo->vma_node); |
| 175 | 175 | ||
| 176 | if (unlikely(page_offset >= bo->num_pages)) { | 176 | if (unlikely(page_offset >= bo->num_pages)) { |
| 177 | retval = VM_FAULT_SIGBUS; | 177 | retval = VM_FAULT_SIGBUS; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a51f48e3e917..45d5b5ab6ca9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 68 | SVGA_FIFO_3D_HWVERSION)); | 68 | SVGA_FIFO_3D_HWVERSION)); |
| 69 | break; | 69 | break; |
| 70 | } | 70 | } |
| 71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | ||
| 72 | param->value = dev_priv->memory_size; | ||
| 73 | break; | ||
| 71 | default: | 74 | default: |
| 72 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 75 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
| 73 | param->param); | 76 | param->param); |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 92d1206482a6..797ed29a36ea 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table; | |||
| 123 | * which is also the index into the MWAIT hint array. | 123 | * which is also the index into the MWAIT hint array. |
| 124 | * Thus C0 is a dummy. | 124 | * Thus C0 is a dummy. |
| 125 | */ | 125 | */ |
| 126 | static struct cpuidle_state nehalem_cstates[] __initdata = { | 126 | static struct cpuidle_state nehalem_cstates[] = { |
| 127 | { | 127 | { |
| 128 | .name = "C1-NHM", | 128 | .name = "C1-NHM", |
| 129 | .desc = "MWAIT 0x00", | 129 | .desc = "MWAIT 0x00", |
| @@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { | |||
| 156 | .enter = NULL } | 156 | .enter = NULL } |
| 157 | }; | 157 | }; |
| 158 | 158 | ||
| 159 | static struct cpuidle_state snb_cstates[] __initdata = { | 159 | static struct cpuidle_state snb_cstates[] = { |
| 160 | { | 160 | { |
| 161 | .name = "C1-SNB", | 161 | .name = "C1-SNB", |
| 162 | .desc = "MWAIT 0x00", | 162 | .desc = "MWAIT 0x00", |
| @@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { | |||
| 196 | .enter = NULL } | 196 | .enter = NULL } |
| 197 | }; | 197 | }; |
| 198 | 198 | ||
| 199 | static struct cpuidle_state ivb_cstates[] __initdata = { | 199 | static struct cpuidle_state ivb_cstates[] = { |
| 200 | { | 200 | { |
| 201 | .name = "C1-IVB", | 201 | .name = "C1-IVB", |
| 202 | .desc = "MWAIT 0x00", | 202 | .desc = "MWAIT 0x00", |
| @@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { | |||
| 236 | .enter = NULL } | 236 | .enter = NULL } |
| 237 | }; | 237 | }; |
| 238 | 238 | ||
| 239 | static struct cpuidle_state hsw_cstates[] __initdata = { | 239 | static struct cpuidle_state hsw_cstates[] = { |
| 240 | { | 240 | { |
| 241 | .name = "C1-HSW", | 241 | .name = "C1-HSW", |
| 242 | .desc = "MWAIT 0x00", | 242 | .desc = "MWAIT 0x00", |
| @@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { | |||
| 297 | .enter = NULL } | 297 | .enter = NULL } |
| 298 | }; | 298 | }; |
| 299 | 299 | ||
| 300 | static struct cpuidle_state atom_cstates[] __initdata = { | 300 | static struct cpuidle_state atom_cstates[] = { |
| 301 | { | 301 | { |
| 302 | .name = "C1E-ATM", | 302 | .name = "C1E-ATM", |
| 303 | .desc = "MWAIT 0x00", | 303 | .desc = "MWAIT 0x00", |
| @@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { | |||
| 329 | { | 329 | { |
| 330 | .enter = NULL } | 330 | .enter = NULL } |
| 331 | }; | 331 | }; |
| 332 | static struct cpuidle_state avn_cstates[] __initdata = { | 332 | static struct cpuidle_state avn_cstates[] = { |
| 333 | { | 333 | { |
| 334 | .name = "C1-AVN", | 334 | .name = "C1-AVN", |
| 335 | .desc = "MWAIT 0x00", | 335 | .desc = "MWAIT 0x00", |
| @@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = { | |||
| 344 | .exit_latency = 15, | 344 | .exit_latency = 15, |
| 345 | .target_residency = 45, | 345 | .target_residency = 45, |
| 346 | .enter = &intel_idle }, | 346 | .enter = &intel_idle }, |
| 347 | { | ||
| 348 | .enter = NULL } | ||
| 347 | }; | 349 | }; |
| 348 | 350 | ||
| 349 | /** | 351 | /** |
| @@ -377,6 +379,9 @@ static int intel_idle(struct cpuidle_device *dev, | |||
| 377 | 379 | ||
| 378 | if (!current_set_polling_and_test()) { | 380 | if (!current_set_polling_and_test()) { |
| 379 | 381 | ||
| 382 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||
| 383 | clflush((void *)¤t_thread_info()->flags); | ||
| 384 | |||
| 380 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 385 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
| 381 | smp_mb(); | 386 | smp_mb(); |
| 382 | if (!need_resched()) | 387 | if (!need_resched()) |
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index acb7f90359a3..749a6cadab8b 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c | |||
| @@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { | |||
| 200 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | 200 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), |
| 201 | .address = 1, | 201 | .address = 1, |
| 202 | .scan_index = 1, | 202 | .scan_index = 1, |
| 203 | .scan_type = IIO_ST('u', 12, 16, 0), | 203 | .scan_type = { |
| 204 | .sign = 'u', | ||
| 205 | .realbits = 12, | ||
| 206 | .storagebits = 16, | ||
| 207 | .shift = 0, | ||
| 208 | .endianness = IIO_BE, | ||
| 209 | }, | ||
| 204 | }, | 210 | }, |
| 205 | .channel[1] = { | 211 | .channel[1] = { |
| 206 | .type = IIO_VOLTAGE, | 212 | .type = IIO_VOLTAGE, |
| @@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { | |||
| 210 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | 216 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), |
| 211 | .address = 0, | 217 | .address = 0, |
| 212 | .scan_index = 0, | 218 | .scan_index = 0, |
| 213 | .scan_type = IIO_ST('u', 12, 16, 0), | 219 | .scan_type = { |
| 220 | .sign = 'u', | ||
| 221 | .realbits = 12, | ||
| 222 | .storagebits = 16, | ||
| 223 | .shift = 0, | ||
| 224 | .endianness = IIO_BE, | ||
| 225 | }, | ||
| 214 | }, | 226 | }, |
| 215 | .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), | 227 | .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), |
| 216 | .int_vref_mv = 2500, | 228 | .int_vref_mv = 2500, |
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 3fb7757a1028..368660dfe135 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c | |||
| @@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = { | |||
| 651 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | 651 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), |
| 652 | .address = ADIS16448_BARO_OUT, | 652 | .address = ADIS16448_BARO_OUT, |
| 653 | .scan_index = ADIS16400_SCAN_BARO, | 653 | .scan_index = ADIS16400_SCAN_BARO, |
| 654 | .scan_type = IIO_ST('s', 16, 16, 0), | 654 | .scan_type = { |
| 655 | .sign = 's', | ||
| 656 | .realbits = 16, | ||
| 657 | .storagebits = 16, | ||
| 658 | .endianness = IIO_BE, | ||
| 659 | }, | ||
| 655 | }, | 660 | }, |
| 656 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), | 661 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), |
| 657 | IIO_CHAN_SOFT_TIMESTAMP(11) | 662 | IIO_CHAN_SOFT_TIMESTAMP(11) |
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c index 21df57130018..0922e39b0ea9 100644 --- a/drivers/iio/light/cm36651.c +++ b/drivers/iio/light/cm36651.c | |||
| @@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651, | |||
| 387 | return -EINVAL; | 387 | return -EINVAL; |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | return IIO_VAL_INT_PLUS_MICRO; | 390 | return IIO_VAL_INT; |
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | static int cm36651_write_int_time(struct cm36651_data *cm36651, | 393 | static int cm36651_write_int_time(struct cm36651_data *cm36651, |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c47c2034ca71..0717940ec3b5 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
| @@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id) | |||
| 181 | static void rem_ref(struct iw_cm_id *cm_id) | 181 | static void rem_ref(struct iw_cm_id *cm_id) |
| 182 | { | 182 | { |
| 183 | struct iwcm_id_private *cm_id_priv; | 183 | struct iwcm_id_private *cm_id_priv; |
| 184 | int cb_destroy; | ||
| 185 | |||
| 184 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 186 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
| 185 | if (iwcm_deref_id(cm_id_priv) && | 187 | |
| 186 | test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { | 188 | /* |
| 189 | * Test bit before deref in case the cm_id gets freed on another | ||
| 190 | * thread. | ||
| 191 | */ | ||
| 192 | cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
| 193 | if (iwcm_deref_id(cm_id_priv) && cb_destroy) { | ||
| 187 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | 194 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
| 188 | free_cm_id(cm_id_priv); | 195 | free_cm_id(cm_id_priv); |
| 189 | } | 196 | } |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bdc842e9faef..a283274a5a09 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
| @@ -49,12 +49,20 @@ | |||
| 49 | 49 | ||
| 50 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 50 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
| 51 | do { \ | 51 | do { \ |
| 52 | (udata)->inbuf = (void __user *) (ibuf); \ | 52 | (udata)->inbuf = (const void __user *) (ibuf); \ |
| 53 | (udata)->outbuf = (void __user *) (obuf); \ | 53 | (udata)->outbuf = (void __user *) (obuf); \ |
| 54 | (udata)->inlen = (ilen); \ | 54 | (udata)->inlen = (ilen); \ |
| 55 | (udata)->outlen = (olen); \ | 55 | (udata)->outlen = (olen); \ |
| 56 | } while (0) | 56 | } while (0) |
| 57 | 57 | ||
| 58 | #define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \ | ||
| 59 | do { \ | ||
| 60 | (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \ | ||
| 61 | (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \ | ||
| 62 | (udata)->inlen = (ilen); \ | ||
| 63 | (udata)->outlen = (olen); \ | ||
| 64 | } while (0) | ||
| 65 | |||
| 58 | /* | 66 | /* |
| 59 | * Our lifetime rules for these structs are the following: | 67 | * Our lifetime rules for these structs are the following: |
| 60 | * | 68 | * |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 65f6e7dc380c..f1cc83855af6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -2593,6 +2593,9 @@ out_put: | |||
| 2593 | static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | 2593 | static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, |
| 2594 | union ib_flow_spec *ib_spec) | 2594 | union ib_flow_spec *ib_spec) |
| 2595 | { | 2595 | { |
| 2596 | if (kern_spec->reserved) | ||
| 2597 | return -EINVAL; | ||
| 2598 | |||
| 2596 | ib_spec->type = kern_spec->type; | 2599 | ib_spec->type = kern_spec->type; |
| 2597 | 2600 | ||
| 2598 | switch (ib_spec->type) { | 2601 | switch (ib_spec->type) { |
| @@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
| 2646 | void *ib_spec; | 2649 | void *ib_spec; |
| 2647 | int i; | 2650 | int i; |
| 2648 | 2651 | ||
| 2652 | if (ucore->inlen < sizeof(cmd)) | ||
| 2653 | return -EINVAL; | ||
| 2654 | |||
| 2649 | if (ucore->outlen < sizeof(resp)) | 2655 | if (ucore->outlen < sizeof(resp)) |
| 2650 | return -ENOSPC; | 2656 | return -ENOSPC; |
| 2651 | 2657 | ||
| @@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
| 2671 | (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) | 2677 | (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) |
| 2672 | return -EINVAL; | 2678 | return -EINVAL; |
| 2673 | 2679 | ||
| 2680 | if (cmd.flow_attr.reserved[0] || | ||
| 2681 | cmd.flow_attr.reserved[1]) | ||
| 2682 | return -EINVAL; | ||
| 2683 | |||
| 2674 | if (cmd.flow_attr.num_of_specs) { | 2684 | if (cmd.flow_attr.num_of_specs) { |
| 2675 | kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, | 2685 | kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, |
| 2676 | GFP_KERNEL); | 2686 | GFP_KERNEL); |
| @@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
| 2731 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { | 2741 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { |
| 2732 | pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", | 2742 | pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", |
| 2733 | i, cmd.flow_attr.size); | 2743 | i, cmd.flow_attr.size); |
| 2744 | err = -EINVAL; | ||
| 2734 | goto err_free; | 2745 | goto err_free; |
| 2735 | } | 2746 | } |
| 2736 | flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); | 2747 | flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); |
| @@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, | |||
| 2791 | struct ib_uobject *uobj; | 2802 | struct ib_uobject *uobj; |
| 2792 | int ret; | 2803 | int ret; |
| 2793 | 2804 | ||
| 2805 | if (ucore->inlen < sizeof(cmd)) | ||
| 2806 | return -EINVAL; | ||
| 2807 | |||
| 2794 | ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); | 2808 | ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); |
| 2795 | if (ret) | 2809 | if (ret) |
| 2796 | return ret; | 2810 | return ret; |
| 2797 | 2811 | ||
| 2812 | if (cmd.comp_mask) | ||
| 2813 | return -EINVAL; | ||
| 2814 | |||
| 2798 | uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, | 2815 | uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, |
| 2799 | file->ucontext); | 2816 | file->ucontext); |
| 2800 | if (!uobj) | 2817 | if (!uobj) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 34386943ebcf..08219fb3338b 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
| 668 | if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) | 668 | if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) |
| 669 | return -EINVAL; | 669 | return -EINVAL; |
| 670 | 670 | ||
| 671 | if (ex_hdr.cmd_hdr_reserved) | ||
| 672 | return -EINVAL; | ||
| 673 | |||
| 671 | if (ex_hdr.response) { | 674 | if (ex_hdr.response) { |
| 672 | if (!hdr.out_words && !ex_hdr.provider_out_words) | 675 | if (!hdr.out_words && !ex_hdr.provider_out_words) |
| 673 | return -EINVAL; | 676 | return -EINVAL; |
| 677 | |||
| 678 | if (!access_ok(VERIFY_WRITE, | ||
| 679 | (void __user *) (unsigned long) ex_hdr.response, | ||
| 680 | (hdr.out_words + ex_hdr.provider_out_words) * 8)) | ||
| 681 | return -EFAULT; | ||
| 674 | } else { | 682 | } else { |
| 675 | if (hdr.out_words || ex_hdr.provider_out_words) | 683 | if (hdr.out_words || ex_hdr.provider_out_words) |
| 676 | return -EINVAL; | 684 | return -EINVAL; |
| 677 | } | 685 | } |
| 678 | 686 | ||
| 679 | INIT_UDATA(&ucore, | 687 | INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response, |
| 680 | (hdr.in_words) ? buf : 0, | 688 | hdr.in_words * 8, hdr.out_words * 8); |
| 681 | (unsigned long)ex_hdr.response, | 689 | |
| 682 | hdr.in_words * 8, | 690 | INIT_UDATA_BUF_OR_NULL(&uhw, |
| 683 | hdr.out_words * 8); | 691 | buf + ucore.inlen, |
| 684 | 692 | (unsigned long) ex_hdr.response + ucore.outlen, | |
| 685 | INIT_UDATA(&uhw, | 693 | ex_hdr.provider_in_words * 8, |
| 686 | (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, | 694 | ex_hdr.provider_out_words * 8); |
| 687 | (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0, | ||
| 688 | ex_hdr.provider_in_words * 8, | ||
| 689 | ex_hdr.provider_out_words * 8); | ||
| 690 | 695 | ||
| 691 | err = uverbs_ex_cmd_table[command](file, | 696 | err = uverbs_ex_cmd_table[command](file, |
| 692 | &ucore, | 697 | &ucore, |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 12fef76c791c..45126879ad28 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |||
| 524 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 524 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | #define VLAN_NONE 0xfff | ||
| 528 | #define FILTER_SEL_VLAN_NONE 0xffff | ||
| 529 | #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ | ||
| 530 | #define FILTER_SEL_WIDTH_VIN_P_FC \ | ||
| 531 | (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ | ||
| 532 | #define FILTER_SEL_WIDTH_TAG_P_FC \ | ||
| 533 | (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ | ||
| 534 | #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) | ||
| 535 | |||
| 536 | static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, | ||
| 537 | struct l2t_entry *l2t) | ||
| 538 | { | ||
| 539 | unsigned int ntuple = 0; | ||
| 540 | u32 viid; | ||
| 541 | |||
| 542 | switch (dev->rdev.lldi.filt_mode) { | ||
| 543 | |||
| 544 | /* default filter mode */ | ||
| 545 | case HW_TPL_FR_MT_PR_IV_P_FC: | ||
| 546 | if (l2t->vlan == VLAN_NONE) | ||
| 547 | ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; | ||
| 548 | else { | ||
| 549 | ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; | ||
| 550 | ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC; | ||
| 551 | } | ||
| 552 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | ||
| 553 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
| 554 | break; | ||
| 555 | case HW_TPL_FR_MT_PR_OV_P_FC: { | ||
| 556 | viid = cxgb4_port_viid(l2t->neigh->dev); | ||
| 557 | |||
| 558 | ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; | ||
| 559 | ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; | ||
| 560 | ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; | ||
| 561 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | ||
| 562 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
| 563 | break; | ||
| 564 | } | ||
| 565 | default: | ||
| 566 | break; | ||
| 567 | } | ||
| 568 | return ntuple; | ||
| 569 | } | ||
| 570 | |||
| 571 | static int send_connect(struct c4iw_ep *ep) | 527 | static int send_connect(struct c4iw_ep *ep) |
| 572 | { | 528 | { |
| 573 | struct cpl_act_open_req *req; | 529 | struct cpl_act_open_req *req; |
| @@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 641 | req->local_ip = la->sin_addr.s_addr; | 597 | req->local_ip = la->sin_addr.s_addr; |
| 642 | req->peer_ip = ra->sin_addr.s_addr; | 598 | req->peer_ip = ra->sin_addr.s_addr; |
| 643 | req->opt0 = cpu_to_be64(opt0); | 599 | req->opt0 = cpu_to_be64(opt0); |
| 644 | req->params = cpu_to_be32(select_ntuple(ep->com.dev, | 600 | req->params = cpu_to_be32(cxgb4_select_ntuple( |
| 645 | ep->dst, ep->l2t)); | 601 | ep->com.dev->rdev.lldi.ports[0], |
| 602 | ep->l2t)); | ||
| 646 | req->opt2 = cpu_to_be32(opt2); | 603 | req->opt2 = cpu_to_be32(opt2); |
| 647 | } else { | 604 | } else { |
| 648 | req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); | 605 | req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); |
| @@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 662 | req6->peer_ip_lo = *((__be64 *) | 619 | req6->peer_ip_lo = *((__be64 *) |
| 663 | (ra6->sin6_addr.s6_addr + 8)); | 620 | (ra6->sin6_addr.s6_addr + 8)); |
| 664 | req6->opt0 = cpu_to_be64(opt0); | 621 | req6->opt0 = cpu_to_be64(opt0); |
| 665 | req6->params = cpu_to_be32( | 622 | req6->params = cpu_to_be32(cxgb4_select_ntuple( |
| 666 | select_ntuple(ep->com.dev, ep->dst, | 623 | ep->com.dev->rdev.lldi.ports[0], |
| 667 | ep->l2t)); | 624 | ep->l2t)); |
| 668 | req6->opt2 = cpu_to_be32(opt2); | 625 | req6->opt2 = cpu_to_be32(opt2); |
| 669 | } | 626 | } |
| 670 | } else { | 627 | } else { |
| @@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 681 | t5_req->peer_ip = ra->sin_addr.s_addr; | 638 | t5_req->peer_ip = ra->sin_addr.s_addr; |
| 682 | t5_req->opt0 = cpu_to_be64(opt0); | 639 | t5_req->opt0 = cpu_to_be64(opt0); |
| 683 | t5_req->params = cpu_to_be64(V_FILTER_TUPLE( | 640 | t5_req->params = cpu_to_be64(V_FILTER_TUPLE( |
| 684 | select_ntuple(ep->com.dev, | 641 | cxgb4_select_ntuple( |
| 685 | ep->dst, ep->l2t))); | 642 | ep->com.dev->rdev.lldi.ports[0], |
| 643 | ep->l2t))); | ||
| 686 | t5_req->opt2 = cpu_to_be32(opt2); | 644 | t5_req->opt2 = cpu_to_be32(opt2); |
| 687 | } else { | 645 | } else { |
| 688 | t5_req6 = (struct cpl_t5_act_open_req6 *) | 646 | t5_req6 = (struct cpl_t5_act_open_req6 *) |
| @@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 703 | (ra6->sin6_addr.s6_addr + 8)); | 661 | (ra6->sin6_addr.s6_addr + 8)); |
| 704 | t5_req6->opt0 = cpu_to_be64(opt0); | 662 | t5_req6->opt0 = cpu_to_be64(opt0); |
| 705 | t5_req6->params = (__force __be64)cpu_to_be32( | 663 | t5_req6->params = (__force __be64)cpu_to_be32( |
| 706 | select_ntuple(ep->com.dev, ep->dst, ep->l2t)); | 664 | cxgb4_select_ntuple( |
| 665 | ep->com.dev->rdev.lldi.ports[0], | ||
| 666 | ep->l2t)); | ||
| 707 | t5_req6->opt2 = cpu_to_be32(opt2); | 667 | t5_req6->opt2 = cpu_to_be32(opt2); |
| 708 | } | 668 | } |
| 709 | } | 669 | } |
| @@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
| 1630 | memset(req, 0, sizeof(*req)); | 1590 | memset(req, 0, sizeof(*req)); |
| 1631 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); | 1591 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); |
| 1632 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | 1592 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); |
| 1633 | req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, | 1593 | req->le.filter = cpu_to_be32(cxgb4_select_ntuple( |
| 1594 | ep->com.dev->rdev.lldi.ports[0], | ||
| 1634 | ep->l2t)); | 1595 | ep->l2t)); |
| 1635 | sin = (struct sockaddr_in *)&ep->com.local_addr; | 1596 | sin = (struct sockaddr_in *)&ep->com.local_addr; |
| 1636 | req->le.lport = sin->sin_port; | 1597 | req->le.lport = sin->sin_port; |
| @@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
| 2938 | /* | 2899 | /* |
| 2939 | * Allocate a server TID. | 2900 | * Allocate a server TID. |
| 2940 | */ | 2901 | */ |
| 2941 | if (dev->rdev.lldi.enable_fw_ofld_conn) | 2902 | if (dev->rdev.lldi.enable_fw_ofld_conn && |
| 2903 | ep->com.local_addr.ss_family == AF_INET) | ||
| 2942 | ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, | 2904 | ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, |
| 2943 | cm_id->local_addr.ss_family, ep); | 2905 | cm_id->local_addr.ss_family, ep); |
| 2944 | else | 2906 | else |
| @@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 3323 | /* | 3285 | /* |
| 3324 | * Calculate the server tid from filter hit index from cpl_rx_pkt. | 3286 | * Calculate the server tid from filter hit index from cpl_rx_pkt. |
| 3325 | */ | 3287 | */ |
| 3326 | stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) | 3288 | stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); |
| 3327 | - dev->rdev.lldi.tids->sftid_base | ||
| 3328 | + dev->rdev.lldi.tids->nstids; | ||
| 3329 | 3289 | ||
| 3330 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); | 3290 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); |
| 3331 | if (!lep) { | 3291 | if (!lep) { |
| @@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 3397 | window = (__force u16) htons((__force u16)tcph->window); | 3357 | window = (__force u16) htons((__force u16)tcph->window); |
| 3398 | 3358 | ||
| 3399 | /* Calcuate filter portion for LE region. */ | 3359 | /* Calcuate filter portion for LE region. */ |
| 3400 | filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); | 3360 | filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( |
| 3361 | dev->rdev.lldi.ports[0], | ||
| 3362 | e)); | ||
| 3401 | 3363 | ||
| 3402 | /* | 3364 | /* |
| 3403 | * Synthesize the cpl_pass_accept_req. We have everything except the | 3365 | * Synthesize the cpl_pass_accept_req. We have everything except the |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 4cb8eb24497c..84e45006451c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
| 173 | return ret; | 173 | return ret; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) | 176 | static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) |
| 177 | { | 177 | { |
| 178 | u32 remain = len; | 178 | u32 remain = len; |
| 179 | u32 dmalen; | 179 | u32 dmalen; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index c29b5c838833..cdc7df4fdb8a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
| 34 | #include <linux/if_arp.h> /* For ARPHRD_xxx */ | ||
| 34 | #include <linux/module.h> | 35 | #include <linux/module.h> |
| 35 | #include <net/rtnetlink.h> | 36 | #include <net/rtnetlink.h> |
| 36 | #include "ipoib.h" | 37 | #include "ipoib.h" |
| @@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, | |||
| 103 | return -EINVAL; | 104 | return -EINVAL; |
| 104 | 105 | ||
| 105 | pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); | 106 | pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); |
| 106 | if (!pdev) | 107 | if (!pdev || pdev->type != ARPHRD_INFINIBAND) |
| 107 | return -ENODEV; | 108 | return -ENODEV; |
| 108 | 109 | ||
| 109 | ppriv = netdev_priv(pdev); | 110 | ppriv = netdev_priv(pdev); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6be57c38638d..9804fca6bf06 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
| @@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) | |||
| 207 | isert_conn->conn_rx_descs = NULL; | 207 | isert_conn->conn_rx_descs = NULL; |
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | static void isert_cq_tx_work(struct work_struct *); | ||
| 210 | static void isert_cq_tx_callback(struct ib_cq *, void *); | 211 | static void isert_cq_tx_callback(struct ib_cq *, void *); |
| 212 | static void isert_cq_rx_work(struct work_struct *); | ||
| 211 | static void isert_cq_rx_callback(struct ib_cq *, void *); | 213 | static void isert_cq_rx_callback(struct ib_cq *, void *); |
| 212 | 214 | ||
| 213 | static int | 215 | static int |
| @@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device) | |||
| 259 | cq_desc[i].device = device; | 261 | cq_desc[i].device = device; |
| 260 | cq_desc[i].cq_index = i; | 262 | cq_desc[i].cq_index = i; |
| 261 | 263 | ||
| 264 | INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); | ||
| 262 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, | 265 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, |
| 263 | isert_cq_rx_callback, | 266 | isert_cq_rx_callback, |
| 264 | isert_cq_event_callback, | 267 | isert_cq_event_callback, |
| 265 | (void *)&cq_desc[i], | 268 | (void *)&cq_desc[i], |
| 266 | ISER_MAX_RX_CQ_LEN, i); | 269 | ISER_MAX_RX_CQ_LEN, i); |
| 267 | if (IS_ERR(device->dev_rx_cq[i])) | 270 | if (IS_ERR(device->dev_rx_cq[i])) { |
| 271 | ret = PTR_ERR(device->dev_rx_cq[i]); | ||
| 272 | device->dev_rx_cq[i] = NULL; | ||
| 268 | goto out_cq; | 273 | goto out_cq; |
| 274 | } | ||
| 269 | 275 | ||
| 276 | INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); | ||
| 270 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, | 277 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, |
| 271 | isert_cq_tx_callback, | 278 | isert_cq_tx_callback, |
| 272 | isert_cq_event_callback, | 279 | isert_cq_event_callback, |
| 273 | (void *)&cq_desc[i], | 280 | (void *)&cq_desc[i], |
| 274 | ISER_MAX_TX_CQ_LEN, i); | 281 | ISER_MAX_TX_CQ_LEN, i); |
| 275 | if (IS_ERR(device->dev_tx_cq[i])) | 282 | if (IS_ERR(device->dev_tx_cq[i])) { |
| 283 | ret = PTR_ERR(device->dev_tx_cq[i]); | ||
| 284 | device->dev_tx_cq[i] = NULL; | ||
| 276 | goto out_cq; | 285 | goto out_cq; |
| 286 | } | ||
| 277 | 287 | ||
| 278 | if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) | 288 | ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); |
| 289 | if (ret) | ||
| 279 | goto out_cq; | 290 | goto out_cq; |
| 280 | 291 | ||
| 281 | if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) | 292 | ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); |
| 293 | if (ret) | ||
| 282 | goto out_cq; | 294 | goto out_cq; |
| 283 | } | 295 | } |
| 284 | 296 | ||
| @@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context) | |||
| 1724 | { | 1736 | { |
| 1725 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 1737 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; |
| 1726 | 1738 | ||
| 1727 | INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); | ||
| 1728 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); | 1739 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); |
| 1729 | } | 1740 | } |
| 1730 | 1741 | ||
| @@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context) | |||
| 1768 | { | 1779 | { |
| 1769 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 1780 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; |
| 1770 | 1781 | ||
| 1771 | INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); | ||
| 1772 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); | 1782 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); |
| 1773 | } | 1783 | } |
| 1774 | 1784 | ||
diff --git a/drivers/input/input.c b/drivers/input/input.c index 846ccdd905b1..d2965e4b3224 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int | |||
| 1871 | break; | 1871 | break; |
| 1872 | 1872 | ||
| 1873 | case EV_ABS: | 1873 | case EV_ABS: |
| 1874 | input_alloc_absinfo(dev); | ||
| 1875 | if (!dev->absinfo) | ||
| 1876 | return; | ||
| 1877 | |||
| 1874 | __set_bit(code, dev->absbit); | 1878 | __set_bit(code, dev->absbit); |
| 1875 | break; | 1879 | break; |
| 1876 | 1880 | ||
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index 75762d6ff3ba..aa127ba392a4 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c | |||
| @@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result) | |||
| 455 | } | 455 | } |
| 456 | } | 456 | } |
| 457 | 457 | ||
| 458 | static irqreturn_t zforce_interrupt(int irq, void *dev_id) | 458 | static irqreturn_t zforce_irq(int irq, void *dev_id) |
| 459 | { | ||
| 460 | struct zforce_ts *ts = dev_id; | ||
| 461 | struct i2c_client *client = ts->client; | ||
| 462 | |||
| 463 | if (ts->suspended && device_may_wakeup(&client->dev)) | ||
| 464 | pm_wakeup_event(&client->dev, 500); | ||
| 465 | |||
| 466 | return IRQ_WAKE_THREAD; | ||
| 467 | } | ||
| 468 | |||
| 469 | static irqreturn_t zforce_irq_thread(int irq, void *dev_id) | ||
| 459 | { | 470 | { |
| 460 | struct zforce_ts *ts = dev_id; | 471 | struct zforce_ts *ts = dev_id; |
| 461 | struct i2c_client *client = ts->client; | 472 | struct i2c_client *client = ts->client; |
| @@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id) | |||
| 465 | u8 *payload; | 476 | u8 *payload; |
| 466 | 477 | ||
| 467 | /* | 478 | /* |
| 468 | * When suspended, emit a wakeup signal if necessary and return. | 479 | * When still suspended, return. |
| 469 | * Due to the level-interrupt we will get re-triggered later. | 480 | * Due to the level-interrupt we will get re-triggered later. |
| 470 | */ | 481 | */ |
| 471 | if (ts->suspended) { | 482 | if (ts->suspended) { |
| 472 | if (device_may_wakeup(&client->dev)) | ||
| 473 | pm_wakeup_event(&client->dev, 500); | ||
| 474 | msleep(20); | 483 | msleep(20); |
| 475 | return IRQ_HANDLED; | 484 | return IRQ_HANDLED; |
| 476 | } | 485 | } |
| @@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client, | |||
| 763 | * Therefore we can trigger the interrupt anytime it is low and do | 772 | * Therefore we can trigger the interrupt anytime it is low and do |
| 764 | * not need to limit it to the interrupt edge. | 773 | * not need to limit it to the interrupt edge. |
| 765 | */ | 774 | */ |
| 766 | ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, | 775 | ret = devm_request_threaded_irq(&client->dev, client->irq, |
| 767 | zforce_interrupt, | 776 | zforce_irq, zforce_irq_thread, |
| 768 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | 777 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, |
| 769 | input_dev->name, ts); | 778 | input_dev->name, ts); |
| 770 | if (ret) { | 779 | if (ret) { |
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 82cec63a9011..3ee78f02e5d7 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c | |||
| @@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p, | |||
| 149 | static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, | 149 | static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, |
| 150 | int irq, int do_mask) | 150 | int irq, int do_mask) |
| 151 | { | 151 | { |
| 152 | int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ | 152 | /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */ |
| 153 | int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ | 153 | int bitfield_width = 4; |
| 154 | int shift = 32 - (irq + 1) * bitfield_width; | ||
| 154 | 155 | ||
| 155 | intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, | 156 | intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, |
| 156 | shift, bitfield_width, | 157 | shift, bitfield_width, |
| @@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, | |||
| 159 | 160 | ||
| 160 | static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) | 161 | static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) |
| 161 | { | 162 | { |
| 163 | /* The SENSE register is assumed to be 32-bit. */ | ||
| 162 | int bitfield_width = p->config.sense_bitfield_width; | 164 | int bitfield_width = p->config.sense_bitfield_width; |
| 163 | int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ | 165 | int shift = 32 - (irq + 1) * bitfield_width; |
| 164 | 166 | ||
| 165 | dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); | 167 | dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); |
| 166 | 168 | ||
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 497bd026c237..4a4825528188 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
| @@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card) | |||
| 1643 | int i; | 1643 | int i; |
| 1644 | struct pci_dev *tmp_hfcpci = NULL; | 1644 | struct pci_dev *tmp_hfcpci = NULL; |
| 1645 | 1645 | ||
| 1646 | #ifdef __BIG_ENDIAN | ||
| 1647 | #error "not running on big endian machines now" | ||
| 1648 | #endif | ||
| 1649 | |||
| 1650 | strcpy(tmp, hfcpci_revision); | 1646 | strcpy(tmp, hfcpci_revision); |
| 1651 | printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); | 1647 | printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); |
| 1652 | 1648 | ||
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c index f6ab63aa6995..33eeb4602c7e 100644 --- a/drivers/isdn/hisax/telespci.c +++ b/drivers/isdn/hisax/telespci.c | |||
| @@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card) | |||
| 290 | struct IsdnCardState *cs = card->cs; | 290 | struct IsdnCardState *cs = card->cs; |
| 291 | char tmp[64]; | 291 | char tmp[64]; |
| 292 | 292 | ||
| 293 | #ifdef __BIG_ENDIAN | ||
| 294 | #error "not running on big endian machines now" | ||
| 295 | #endif | ||
| 296 | |||
| 297 | strcpy(tmp, telespci_revision); | 293 | strcpy(tmp, telespci_revision); |
| 298 | printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); | 294 | printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); |
| 299 | if (cs->typ != ISDN_CTYPE_TELESPCI) | 295 | if (cs->typ != ISDN_CTYPE_TELESPCI) |
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 05188351711d..a97263e902ff 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c | |||
| @@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip, | |||
| 244 | if (i % 2) | 244 | if (i % 2) |
| 245 | goto err; | 245 | goto err; |
| 246 | 246 | ||
| 247 | mutex_lock(&chip->lock); | ||
| 248 | |||
| 249 | for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { | 247 | for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { |
| 250 | ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); | 248 | ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); |
| 251 | if (ret) { | 249 | if (ret) |
| 252 | mutex_unlock(&chip->lock); | ||
| 253 | return -EINVAL; | 250 | return -EINVAL; |
| 254 | } | ||
| 255 | } | 251 | } |
| 256 | 252 | ||
| 257 | mutex_unlock(&chip->lock); | ||
| 258 | |||
| 259 | return size; | 253 | return size; |
| 260 | 254 | ||
| 261 | err: | 255 | err: |
| @@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev, | |||
| 427 | { | 421 | { |
| 428 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); | 422 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); |
| 429 | struct lp55xx_chip *chip = led->chip; | 423 | struct lp55xx_chip *chip = led->chip; |
| 424 | int ret; | ||
| 430 | 425 | ||
| 431 | mutex_lock(&chip->lock); | 426 | mutex_lock(&chip->lock); |
| 432 | 427 | ||
| 433 | chip->engine_idx = nr; | 428 | chip->engine_idx = nr; |
| 434 | lp5521_load_engine(chip); | 429 | lp5521_load_engine(chip); |
| 430 | ret = lp5521_update_program_memory(chip, buf, len); | ||
| 435 | 431 | ||
| 436 | mutex_unlock(&chip->lock); | 432 | mutex_unlock(&chip->lock); |
| 437 | 433 | ||
| 438 | return lp5521_update_program_memory(chip, buf, len); | 434 | return ret; |
| 439 | } | 435 | } |
| 440 | store_load(1) | 436 | store_load(1) |
| 441 | store_load(2) | 437 | store_load(2) |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 6b553d9f4266..fd9ab5f61441 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
| @@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip, | |||
| 337 | if (i % 2) | 337 | if (i % 2) |
| 338 | goto err; | 338 | goto err; |
| 339 | 339 | ||
| 340 | mutex_lock(&chip->lock); | ||
| 341 | |||
| 342 | for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { | 340 | for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { |
| 343 | ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); | 341 | ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); |
| 344 | if (ret) { | 342 | if (ret) |
| 345 | mutex_unlock(&chip->lock); | ||
| 346 | return -EINVAL; | 343 | return -EINVAL; |
| 347 | } | ||
| 348 | } | 344 | } |
| 349 | 345 | ||
| 350 | mutex_unlock(&chip->lock); | ||
| 351 | |||
| 352 | return size; | 346 | return size; |
| 353 | 347 | ||
| 354 | err: | 348 | err: |
| @@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev, | |||
| 548 | { | 542 | { |
| 549 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); | 543 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); |
| 550 | struct lp55xx_chip *chip = led->chip; | 544 | struct lp55xx_chip *chip = led->chip; |
| 545 | int ret; | ||
| 551 | 546 | ||
| 552 | mutex_lock(&chip->lock); | 547 | mutex_lock(&chip->lock); |
| 553 | 548 | ||
| 554 | chip->engine_idx = nr; | 549 | chip->engine_idx = nr; |
| 555 | lp5523_load_engine_and_select_page(chip); | 550 | lp5523_load_engine_and_select_page(chip); |
| 551 | ret = lp5523_update_program_memory(chip, buf, len); | ||
| 556 | 552 | ||
| 557 | mutex_unlock(&chip->lock); | 553 | mutex_unlock(&chip->lock); |
| 558 | 554 | ||
| 559 | return lp5523_update_program_memory(chip, buf, len); | 555 | return ret; |
| 560 | } | 556 | } |
| 561 | store_load(1) | 557 | store_load(1) |
| 562 | store_load(2) | 558 | store_load(2) |
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 2b46bf1d7e40..4c9852d92b0a 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
| @@ -421,9 +421,11 @@ out: | |||
| 421 | 421 | ||
| 422 | if (watermark <= WATERMARK_METADATA) { | 422 | if (watermark <= WATERMARK_METADATA) { |
| 423 | SET_GC_MARK(b, GC_MARK_METADATA); | 423 | SET_GC_MARK(b, GC_MARK_METADATA); |
| 424 | SET_GC_MOVE(b, 0); | ||
| 424 | b->prio = BTREE_PRIO; | 425 | b->prio = BTREE_PRIO; |
| 425 | } else { | 426 | } else { |
| 426 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); | 427 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); |
| 428 | SET_GC_MOVE(b, 0); | ||
| 427 | b->prio = INITIAL_PRIO; | 429 | b->prio = INITIAL_PRIO; |
| 428 | } | 430 | } |
| 429 | 431 | ||
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 4beb55a0ff30..754f43177483 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
| @@ -197,7 +197,7 @@ struct bucket { | |||
| 197 | uint8_t disk_gen; | 197 | uint8_t disk_gen; |
| 198 | uint8_t last_gc; /* Most out of date gen in the btree */ | 198 | uint8_t last_gc; /* Most out of date gen in the btree */ |
| 199 | uint8_t gc_gen; | 199 | uint8_t gc_gen; |
| 200 | uint16_t gc_mark; | 200 | uint16_t gc_mark; /* Bitfield used by GC. See below for field */ |
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | /* | 203 | /* |
| @@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |||
| 209 | #define GC_MARK_RECLAIMABLE 0 | 209 | #define GC_MARK_RECLAIMABLE 0 |
| 210 | #define GC_MARK_DIRTY 1 | 210 | #define GC_MARK_DIRTY 1 |
| 211 | #define GC_MARK_METADATA 2 | 211 | #define GC_MARK_METADATA 2 |
| 212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); | 212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); |
| 213 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); | ||
| 213 | 214 | ||
| 214 | #include "journal.h" | 215 | #include "journal.h" |
| 215 | #include "stats.h" | 216 | #include "stats.h" |
| @@ -372,14 +373,14 @@ struct cached_dev { | |||
| 372 | unsigned char writeback_percent; | 373 | unsigned char writeback_percent; |
| 373 | unsigned writeback_delay; | 374 | unsigned writeback_delay; |
| 374 | 375 | ||
| 375 | int writeback_rate_change; | ||
| 376 | int64_t writeback_rate_derivative; | ||
| 377 | uint64_t writeback_rate_target; | 376 | uint64_t writeback_rate_target; |
| 377 | int64_t writeback_rate_proportional; | ||
| 378 | int64_t writeback_rate_derivative; | ||
| 379 | int64_t writeback_rate_change; | ||
| 378 | 380 | ||
| 379 | unsigned writeback_rate_update_seconds; | 381 | unsigned writeback_rate_update_seconds; |
| 380 | unsigned writeback_rate_d_term; | 382 | unsigned writeback_rate_d_term; |
| 381 | unsigned writeback_rate_p_term_inverse; | 383 | unsigned writeback_rate_p_term_inverse; |
| 382 | unsigned writeback_rate_d_smooth; | ||
| 383 | }; | 384 | }; |
| 384 | 385 | ||
| 385 | enum alloc_watermarks { | 386 | enum alloc_watermarks { |
| @@ -445,7 +446,6 @@ struct cache { | |||
| 445 | * call prio_write() to keep gens from wrapping. | 446 | * call prio_write() to keep gens from wrapping. |
| 446 | */ | 447 | */ |
| 447 | uint8_t need_save_prio; | 448 | uint8_t need_save_prio; |
| 448 | unsigned gc_move_threshold; | ||
| 449 | 449 | ||
| 450 | /* | 450 | /* |
| 451 | * If nonzero, we know we aren't going to find any buckets to invalidate | 451 | * If nonzero, we know we aren't going to find any buckets to invalidate |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..31bb53fcc67a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
| @@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c) | |||
| 1561 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), | 1561 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), |
| 1562 | GC_MARK_METADATA); | 1562 | GC_MARK_METADATA); |
| 1563 | 1563 | ||
| 1564 | /* don't reclaim buckets to which writeback keys point */ | ||
| 1565 | rcu_read_lock(); | ||
| 1566 | for (i = 0; i < c->nr_uuids; i++) { | ||
| 1567 | struct bcache_device *d = c->devices[i]; | ||
| 1568 | struct cached_dev *dc; | ||
| 1569 | struct keybuf_key *w, *n; | ||
| 1570 | unsigned j; | ||
| 1571 | |||
| 1572 | if (!d || UUID_FLASH_ONLY(&c->uuids[i])) | ||
| 1573 | continue; | ||
| 1574 | dc = container_of(d, struct cached_dev, disk); | ||
| 1575 | |||
| 1576 | spin_lock(&dc->writeback_keys.lock); | ||
| 1577 | rbtree_postorder_for_each_entry_safe(w, n, | ||
| 1578 | &dc->writeback_keys.keys, node) | ||
| 1579 | for (j = 0; j < KEY_PTRS(&w->key); j++) | ||
| 1580 | SET_GC_MARK(PTR_BUCKET(c, &w->key, j), | ||
| 1581 | GC_MARK_DIRTY); | ||
| 1582 | spin_unlock(&dc->writeback_keys.lock); | ||
| 1583 | } | ||
| 1584 | rcu_read_unlock(); | ||
| 1585 | |||
| 1564 | for_each_cache(ca, c, i) { | 1586 | for_each_cache(ca, c, i) { |
| 1565 | uint64_t *i; | 1587 | uint64_t *i; |
| 1566 | 1588 | ||
| @@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, | |||
| 1817 | if (KEY_START(k) > KEY_START(insert) + sectors_found) | 1839 | if (KEY_START(k) > KEY_START(insert) + sectors_found) |
| 1818 | goto check_failed; | 1840 | goto check_failed; |
| 1819 | 1841 | ||
| 1820 | if (KEY_PTRS(replace_key) != KEY_PTRS(k)) | 1842 | if (KEY_PTRS(k) != KEY_PTRS(replace_key) || |
| 1843 | KEY_DIRTY(k) != KEY_DIRTY(replace_key)) | ||
| 1821 | goto check_failed; | 1844 | goto check_failed; |
| 1822 | 1845 | ||
| 1823 | /* skip past gen */ | 1846 | /* skip past gen */ |
| @@ -2217,7 +2240,7 @@ struct btree_insert_op { | |||
| 2217 | struct bkey *replace_key; | 2240 | struct bkey *replace_key; |
| 2218 | }; | 2241 | }; |
| 2219 | 2242 | ||
| 2220 | int btree_insert_fn(struct btree_op *b_op, struct btree *b) | 2243 | static int btree_insert_fn(struct btree_op *b_op, struct btree *b) |
| 2221 | { | 2244 | { |
| 2222 | struct btree_insert_op *op = container_of(b_op, | 2245 | struct btree_insert_op *op = container_of(b_op, |
| 2223 | struct btree_insert_op, op); | 2246 | struct btree_insert_op, op); |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..f2f0998c4a91 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
| @@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) | |||
| 25 | unsigned i; | 25 | unsigned i; |
| 26 | 26 | ||
| 27 | for (i = 0; i < KEY_PTRS(k); i++) { | 27 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 28 | struct cache *ca = PTR_CACHE(c, k, i); | ||
| 29 | struct bucket *g = PTR_BUCKET(c, k, i); | 28 | struct bucket *g = PTR_BUCKET(c, k, i); |
| 30 | 29 | ||
| 31 | if (GC_SECTORS_USED(g) < ca->gc_move_threshold) | 30 | if (GC_MOVE(g)) |
| 32 | return true; | 31 | return true; |
| 33 | } | 32 | } |
| 34 | 33 | ||
| @@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl) | |||
| 65 | 64 | ||
| 66 | static void read_moving_endio(struct bio *bio, int error) | 65 | static void read_moving_endio(struct bio *bio, int error) |
| 67 | { | 66 | { |
| 67 | struct bbio *b = container_of(bio, struct bbio, bio); | ||
| 68 | struct moving_io *io = container_of(bio->bi_private, | 68 | struct moving_io *io = container_of(bio->bi_private, |
| 69 | struct moving_io, cl); | 69 | struct moving_io, cl); |
| 70 | 70 | ||
| 71 | if (error) | 71 | if (error) |
| 72 | io->op.error = error; | 72 | io->op.error = error; |
| 73 | else if (!KEY_DIRTY(&b->key) && | ||
| 74 | ptr_stale(io->op.c, &b->key, 0)) { | ||
| 75 | io->op.error = -EINTR; | ||
| 76 | } | ||
| 73 | 77 | ||
| 74 | bch_bbio_endio(io->op.c, bio, error, "reading data to move"); | 78 | bch_bbio_endio(io->op.c, bio, error, "reading data to move"); |
| 75 | } | 79 | } |
| @@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c) | |||
| 141 | if (!w) | 145 | if (!w) |
| 142 | break; | 146 | break; |
| 143 | 147 | ||
| 148 | if (ptr_stale(c, &w->key, 0)) { | ||
| 149 | bch_keybuf_del(&c->moving_gc_keys, w); | ||
| 150 | continue; | ||
| 151 | } | ||
| 152 | |||
| 144 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) | 153 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) |
| 145 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), | 154 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), |
| 146 | GFP_KERNEL); | 155 | GFP_KERNEL); |
| @@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) | |||
| 184 | 193 | ||
| 185 | static unsigned bucket_heap_top(struct cache *ca) | 194 | static unsigned bucket_heap_top(struct cache *ca) |
| 186 | { | 195 | { |
| 187 | return GC_SECTORS_USED(heap_peek(&ca->heap)); | 196 | struct bucket *b; |
| 197 | return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; | ||
| 188 | } | 198 | } |
| 189 | 199 | ||
| 190 | void bch_moving_gc(struct cache_set *c) | 200 | void bch_moving_gc(struct cache_set *c) |
| @@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c) | |||
| 226 | sectors_to_move -= GC_SECTORS_USED(b); | 236 | sectors_to_move -= GC_SECTORS_USED(b); |
| 227 | } | 237 | } |
| 228 | 238 | ||
| 229 | ca->gc_move_threshold = bucket_heap_top(ca); | 239 | while (heap_pop(&ca->heap, b, bucket_cmp)) |
| 230 | 240 | SET_GC_MOVE(b, 1); | |
| 231 | pr_debug("threshold %u", ca->gc_move_threshold); | ||
| 232 | } | 241 | } |
| 233 | 242 | ||
| 234 | mutex_unlock(&c->bucket_lock); | 243 | mutex_unlock(&c->bucket_lock); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dec15cd2d797..c57bfa071a57 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -1676,7 +1676,7 @@ err: | |||
| 1676 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) | 1676 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) |
| 1677 | { | 1677 | { |
| 1678 | return ca->sb.block_size == c->sb.block_size && | 1678 | return ca->sb.block_size == c->sb.block_size && |
| 1679 | ca->sb.bucket_size == c->sb.block_size && | 1679 | ca->sb.bucket_size == c->sb.bucket_size && |
| 1680 | ca->sb.nr_in_set == c->sb.nr_in_set; | 1680 | ca->sb.nr_in_set == c->sb.nr_in_set; |
| 1681 | } | 1681 | } |
| 1682 | 1682 | ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 80d4c2bee18a..a1f85612f0b3 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
| @@ -83,7 +83,6 @@ rw_attribute(writeback_rate); | |||
| 83 | rw_attribute(writeback_rate_update_seconds); | 83 | rw_attribute(writeback_rate_update_seconds); |
| 84 | rw_attribute(writeback_rate_d_term); | 84 | rw_attribute(writeback_rate_d_term); |
| 85 | rw_attribute(writeback_rate_p_term_inverse); | 85 | rw_attribute(writeback_rate_p_term_inverse); |
| 86 | rw_attribute(writeback_rate_d_smooth); | ||
| 87 | read_attribute(writeback_rate_debug); | 86 | read_attribute(writeback_rate_debug); |
| 88 | 87 | ||
| 89 | read_attribute(stripe_size); | 88 | read_attribute(stripe_size); |
| @@ -129,31 +128,41 @@ SHOW(__bch_cached_dev) | |||
| 129 | var_printf(writeback_running, "%i"); | 128 | var_printf(writeback_running, "%i"); |
| 130 | var_print(writeback_delay); | 129 | var_print(writeback_delay); |
| 131 | var_print(writeback_percent); | 130 | var_print(writeback_percent); |
| 132 | sysfs_print(writeback_rate, dc->writeback_rate.rate); | 131 | sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); |
| 133 | 132 | ||
| 134 | var_print(writeback_rate_update_seconds); | 133 | var_print(writeback_rate_update_seconds); |
| 135 | var_print(writeback_rate_d_term); | 134 | var_print(writeback_rate_d_term); |
| 136 | var_print(writeback_rate_p_term_inverse); | 135 | var_print(writeback_rate_p_term_inverse); |
| 137 | var_print(writeback_rate_d_smooth); | ||
| 138 | 136 | ||
| 139 | if (attr == &sysfs_writeback_rate_debug) { | 137 | if (attr == &sysfs_writeback_rate_debug) { |
| 138 | char rate[20]; | ||
| 140 | char dirty[20]; | 139 | char dirty[20]; |
| 141 | char derivative[20]; | ||
| 142 | char target[20]; | 140 | char target[20]; |
| 143 | bch_hprint(dirty, | 141 | char proportional[20]; |
| 144 | bcache_dev_sectors_dirty(&dc->disk) << 9); | 142 | char derivative[20]; |
| 145 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | 143 | char change[20]; |
| 144 | s64 next_io; | ||
| 145 | |||
| 146 | bch_hprint(rate, dc->writeback_rate.rate << 9); | ||
| 147 | bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); | ||
| 146 | bch_hprint(target, dc->writeback_rate_target << 9); | 148 | bch_hprint(target, dc->writeback_rate_target << 9); |
| 149 | bch_hprint(proportional,dc->writeback_rate_proportional << 9); | ||
| 150 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | ||
| 151 | bch_hprint(change, dc->writeback_rate_change << 9); | ||
| 152 | |||
| 153 | next_io = div64_s64(dc->writeback_rate.next - local_clock(), | ||
| 154 | NSEC_PER_MSEC); | ||
| 147 | 155 | ||
| 148 | return sprintf(buf, | 156 | return sprintf(buf, |
| 149 | "rate:\t\t%u\n" | 157 | "rate:\t\t%s/sec\n" |
| 150 | "change:\t\t%i\n" | ||
| 151 | "dirty:\t\t%s\n" | 158 | "dirty:\t\t%s\n" |
| 159 | "target:\t\t%s\n" | ||
| 160 | "proportional:\t%s\n" | ||
| 152 | "derivative:\t%s\n" | 161 | "derivative:\t%s\n" |
| 153 | "target:\t\t%s\n", | 162 | "change:\t\t%s/sec\n" |
| 154 | dc->writeback_rate.rate, | 163 | "next io:\t%llims\n", |
| 155 | dc->writeback_rate_change, | 164 | rate, dirty, target, proportional, |
| 156 | dirty, derivative, target); | 165 | derivative, change, next_io); |
| 157 | } | 166 | } |
| 158 | 167 | ||
| 159 | sysfs_hprint(dirty_data, | 168 | sysfs_hprint(dirty_data, |
| @@ -189,6 +198,7 @@ STORE(__cached_dev) | |||
| 189 | struct kobj_uevent_env *env; | 198 | struct kobj_uevent_env *env; |
| 190 | 199 | ||
| 191 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) | 200 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) |
| 201 | #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) | ||
| 192 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) | 202 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) |
| 193 | 203 | ||
| 194 | sysfs_strtoul(data_csum, dc->disk.data_csum); | 204 | sysfs_strtoul(data_csum, dc->disk.data_csum); |
| @@ -197,16 +207,15 @@ STORE(__cached_dev) | |||
| 197 | d_strtoul(writeback_metadata); | 207 | d_strtoul(writeback_metadata); |
| 198 | d_strtoul(writeback_running); | 208 | d_strtoul(writeback_running); |
| 199 | d_strtoul(writeback_delay); | 209 | d_strtoul(writeback_delay); |
| 200 | sysfs_strtoul_clamp(writeback_rate, | 210 | |
| 201 | dc->writeback_rate.rate, 1, 1000000); | ||
| 202 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); | 211 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); |
| 203 | 212 | ||
| 204 | d_strtoul(writeback_rate_update_seconds); | 213 | sysfs_strtoul_clamp(writeback_rate, |
| 214 | dc->writeback_rate.rate, 1, INT_MAX); | ||
| 215 | |||
| 216 | d_strtoul_nonzero(writeback_rate_update_seconds); | ||
| 205 | d_strtoul(writeback_rate_d_term); | 217 | d_strtoul(writeback_rate_d_term); |
| 206 | d_strtoul(writeback_rate_p_term_inverse); | 218 | d_strtoul_nonzero(writeback_rate_p_term_inverse); |
| 207 | sysfs_strtoul_clamp(writeback_rate_p_term_inverse, | ||
| 208 | dc->writeback_rate_p_term_inverse, 1, INT_MAX); | ||
| 209 | d_strtoul(writeback_rate_d_smooth); | ||
| 210 | 219 | ||
| 211 | d_strtoi_h(sequential_cutoff); | 220 | d_strtoi_h(sequential_cutoff); |
| 212 | d_strtoi_h(readahead); | 221 | d_strtoi_h(readahead); |
| @@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = { | |||
| 313 | &sysfs_writeback_rate_update_seconds, | 322 | &sysfs_writeback_rate_update_seconds, |
| 314 | &sysfs_writeback_rate_d_term, | 323 | &sysfs_writeback_rate_d_term, |
| 315 | &sysfs_writeback_rate_p_term_inverse, | 324 | &sysfs_writeback_rate_p_term_inverse, |
| 316 | &sysfs_writeback_rate_d_smooth, | ||
| 317 | &sysfs_writeback_rate_debug, | 325 | &sysfs_writeback_rate_debug, |
| 318 | &sysfs_dirty_data, | 326 | &sysfs_dirty_data, |
| 319 | &sysfs_stripe_size, | 327 | &sysfs_stripe_size, |
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..bb37618e7664 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c | |||
| @@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) | |||
| 209 | { | 209 | { |
| 210 | uint64_t now = local_clock(); | 210 | uint64_t now = local_clock(); |
| 211 | 211 | ||
| 212 | d->next += div_u64(done, d->rate); | 212 | d->next += div_u64(done * NSEC_PER_SEC, d->rate); |
| 213 | |||
| 214 | if (time_before64(now + NSEC_PER_SEC, d->next)) | ||
| 215 | d->next = now + NSEC_PER_SEC; | ||
| 216 | |||
| 217 | if (time_after64(now - NSEC_PER_SEC * 2, d->next)) | ||
| 218 | d->next = now - NSEC_PER_SEC * 2; | ||
| 213 | 219 | ||
| 214 | return time_after64(d->next, now) | 220 | return time_after64(d->next, now) |
| 215 | ? div_u64(d->next - now, NSEC_PER_SEC / HZ) | 221 | ? div_u64(d->next - now, NSEC_PER_SEC / HZ) |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 362c4b3f8b4a..1030c6020e98 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
| @@ -110,7 +110,7 @@ do { \ | |||
| 110 | _r; \ | 110 | _r; \ |
| 111 | }) | 111 | }) |
| 112 | 112 | ||
| 113 | #define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) | 113 | #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) |
| 114 | 114 | ||
| 115 | #define heap_full(h) ((h)->used == (h)->size) | 115 | #define heap_full(h) ((h)->used == (h)->size) |
| 116 | 116 | ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..6c44fe059c27 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
| @@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc) | |||
| 30 | 30 | ||
| 31 | /* PD controller */ | 31 | /* PD controller */ |
| 32 | 32 | ||
| 33 | int change = 0; | ||
| 34 | int64_t error; | ||
| 35 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); | 33 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
| 36 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; | 34 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; |
| 35 | int64_t proportional = dirty - target; | ||
| 36 | int64_t change; | ||
| 37 | 37 | ||
| 38 | dc->disk.sectors_dirty_last = dirty; | 38 | dc->disk.sectors_dirty_last = dirty; |
| 39 | 39 | ||
| 40 | derivative *= dc->writeback_rate_d_term; | 40 | /* Scale to sectors per second */ |
| 41 | derivative = clamp(derivative, -dirty, dirty); | ||
| 42 | 41 | ||
| 43 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, | 42 | proportional *= dc->writeback_rate_update_seconds; |
| 44 | dc->writeback_rate_d_smooth, 0); | 43 | proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); |
| 45 | 44 | ||
| 46 | /* Avoid divide by zero */ | 45 | derivative = div_s64(derivative, dc->writeback_rate_update_seconds); |
| 47 | if (!target) | ||
| 48 | goto out; | ||
| 49 | 46 | ||
| 50 | error = div64_s64((dirty + derivative - target) << 8, target); | 47 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, |
| 48 | (dc->writeback_rate_d_term / | ||
| 49 | dc->writeback_rate_update_seconds) ?: 1, 0); | ||
| 50 | |||
| 51 | derivative *= dc->writeback_rate_d_term; | ||
| 52 | derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); | ||
| 51 | 53 | ||
| 52 | change = div_s64((dc->writeback_rate.rate * error) >> 8, | 54 | change = proportional + derivative; |
| 53 | dc->writeback_rate_p_term_inverse); | ||
| 54 | 55 | ||
| 55 | /* Don't increase writeback rate if the device isn't keeping up */ | 56 | /* Don't increase writeback rate if the device isn't keeping up */ |
| 56 | if (change > 0 && | 57 | if (change > 0 && |
| 57 | time_after64(local_clock(), | 58 | time_after64(local_clock(), |
| 58 | dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) | 59 | dc->writeback_rate.next + NSEC_PER_MSEC)) |
| 59 | change = 0; | 60 | change = 0; |
| 60 | 61 | ||
| 61 | dc->writeback_rate.rate = | 62 | dc->writeback_rate.rate = |
| 62 | clamp_t(int64_t, dc->writeback_rate.rate + change, | 63 | clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, |
| 63 | 1, NSEC_PER_MSEC); | 64 | 1, NSEC_PER_MSEC); |
| 64 | out: | 65 | |
| 66 | dc->writeback_rate_proportional = proportional; | ||
| 65 | dc->writeback_rate_derivative = derivative; | 67 | dc->writeback_rate_derivative = derivative; |
| 66 | dc->writeback_rate_change = change; | 68 | dc->writeback_rate_change = change; |
| 67 | dc->writeback_rate_target = target; | 69 | dc->writeback_rate_target = target; |
| @@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work) | |||
| 87 | 89 | ||
| 88 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | 90 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) |
| 89 | { | 91 | { |
| 90 | uint64_t ret; | ||
| 91 | |||
| 92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || | 92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
| 93 | !dc->writeback_percent) | 93 | !dc->writeback_percent) |
| 94 | return 0; | 94 | return 0; |
| 95 | 95 | ||
| 96 | ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); | 96 | return bch_next_delay(&dc->writeback_rate, sectors); |
| 97 | |||
| 98 | return min_t(uint64_t, ret, HZ); | ||
| 99 | } | 97 | } |
| 100 | 98 | ||
| 101 | struct dirty_io { | 99 | struct dirty_io { |
| @@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc) | |||
| 241 | if (KEY_START(&w->key) != dc->last_read || | 239 | if (KEY_START(&w->key) != dc->last_read || |
| 242 | jiffies_to_msecs(delay) > 50) | 240 | jiffies_to_msecs(delay) > 50) |
| 243 | while (!kthread_should_stop() && delay) | 241 | while (!kthread_should_stop() && delay) |
| 244 | delay = schedule_timeout_interruptible(delay); | 242 | delay = schedule_timeout_uninterruptible(delay); |
| 245 | 243 | ||
| 246 | dc->last_read = KEY_OFFSET(&w->key); | 244 | dc->last_read = KEY_OFFSET(&w->key); |
| 247 | 245 | ||
| @@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg) | |||
| 438 | while (delay && | 436 | while (delay && |
| 439 | !kthread_should_stop() && | 437 | !kthread_should_stop() && |
| 440 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) | 438 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
| 441 | delay = schedule_timeout_interruptible(delay); | 439 | delay = schedule_timeout_uninterruptible(delay); |
| 442 | } | 440 | } |
| 443 | } | 441 | } |
| 444 | 442 | ||
| @@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc) | |||
| 476 | 474 | ||
| 477 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), | 475 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |
| 478 | sectors_dirty_init_fn, 0); | 476 | sectors_dirty_init_fn, 0); |
| 477 | |||
| 478 | dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); | ||
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) | 481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) |
| @@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) | |||
| 490 | dc->writeback_delay = 30; | 490 | dc->writeback_delay = 30; |
| 491 | dc->writeback_rate.rate = 1024; | 491 | dc->writeback_rate.rate = 1024; |
| 492 | 492 | ||
| 493 | dc->writeback_rate_update_seconds = 30; | 493 | dc->writeback_rate_update_seconds = 5; |
| 494 | dc->writeback_rate_d_term = 16; | 494 | dc->writeback_rate_d_term = 30; |
| 495 | dc->writeback_rate_p_term_inverse = 64; | 495 | dc->writeback_rate_p_term_inverse = 6000; |
| 496 | dc->writeback_rate_d_smooth = 8; | ||
| 497 | 496 | ||
| 498 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, | 497 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
| 499 | "bcache_writeback"); | 498 | "bcache_writeback"); |
| 500 | if (IS_ERR(dc->writeback_thread)) | 499 | if (IS_ERR(dc->writeback_thread)) |
| 501 | return PTR_ERR(dc->writeback_thread); | 500 | return PTR_ERR(dc->writeback_thread); |
| 502 | 501 | ||
| 503 | set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); | ||
| 504 | |||
| 505 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); | 502 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
| 506 | schedule_delayed_work(&dc->writeback_rate_update, | 503 | schedule_delayed_work(&dc->writeback_rate_update, |
| 507 | dc->writeback_rate_update_seconds * HZ); | 504 | dc->writeback_rate_update_seconds * HZ); |
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 6841d6805fd6..41ab5e34d2ac 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c | |||
| @@ -245,7 +245,7 @@ static int pcf50633_probe(struct i2c_client *client, | |||
| 245 | for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { | 245 | for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { |
| 246 | struct platform_device *pdev; | 246 | struct platform_device *pdev; |
| 247 | 247 | ||
| 248 | pdev = platform_device_alloc("pcf50633-regltr", i); | 248 | pdev = platform_device_alloc("pcf50633-regulator", i); |
| 249 | if (!pdev) { | 249 | if (!pdev) { |
| 250 | dev_err(pcf->dev, "Cannot create regulator %d\n", i); | 250 | dev_err(pcf->dev, "Cannot create regulator %d\n", i); |
| 251 | continue; | 251 | continue; |
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 11e20afbdcac..705698fd2c7e 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c | |||
| @@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) | |||
| 1228 | 1228 | ||
| 1229 | pcr->remove_pci = true; | 1229 | pcr->remove_pci = true; |
| 1230 | 1230 | ||
| 1231 | cancel_delayed_work(&pcr->carddet_work); | 1231 | /* Disable interrupts at the pcr level */ |
| 1232 | cancel_delayed_work(&pcr->idle_work); | 1232 | spin_lock_irq(&pcr->lock); |
| 1233 | rtsx_pci_writel(pcr, RTSX_BIER, 0); | ||
| 1234 | pcr->bier = 0; | ||
| 1235 | spin_unlock_irq(&pcr->lock); | ||
| 1236 | |||
| 1237 | cancel_delayed_work_sync(&pcr->carddet_work); | ||
| 1238 | cancel_delayed_work_sync(&pcr->idle_work); | ||
| 1233 | 1239 | ||
| 1234 | mfd_remove_devices(&pcidev->dev); | 1240 | mfd_remove_devices(&pcidev->dev); |
| 1235 | 1241 | ||
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index d210d131fef2..0f55589a56b8 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c | |||
| @@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) | |||
| 73 | return -ENOMEM; | 73 | return -ENOMEM; |
| 74 | } | 74 | } |
| 75 | info->map.cached = | 75 | info->map.cached = |
| 76 | ioremap_cached(info->map.phys, info->map.size); | 76 | ioremap_cache(info->map.phys, info->map.size); |
| 77 | if (!info->map.cached) | 77 | if (!info->map.cached) |
| 78 | printk(KERN_WARNING "Failed to ioremap cached %s\n", | 78 | printk(KERN_WARNING "Failed to ioremap cached %s\n", |
| 79 | info->map.name); | 79 | info->map.name); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 187b1b7772ef..4ced59436558 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
| @@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) | |||
| 2201 | 2201 | ||
| 2202 | port = &(SLAVE_AD_INFO(slave).port); | 2202 | port = &(SLAVE_AD_INFO(slave).port); |
| 2203 | 2203 | ||
| 2204 | // if slave is null, the whole port is not initialized | 2204 | /* if slave is null, the whole port is not initialized */ |
| 2205 | if (!port->slave) { | 2205 | if (!port->slave) { |
| 2206 | pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", | 2206 | pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", |
| 2207 | slave->bond->dev->name, slave->dev->name); | 2207 | slave->bond->dev->name, slave->dev->name); |
| 2208 | return; | 2208 | return; |
| 2209 | } | 2209 | } |
| 2210 | 2210 | ||
| 2211 | __get_state_machine_lock(port); | ||
| 2212 | |||
| 2211 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; | 2213 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; |
| 2212 | port->actor_oper_port_key = port->actor_admin_port_key |= | 2214 | port->actor_oper_port_key = port->actor_admin_port_key |= |
| 2213 | (__get_link_speed(port) << 1); | 2215 | (__get_link_speed(port) << 1); |
| 2214 | pr_debug("Port %d changed speed\n", port->actor_port_number); | 2216 | pr_debug("Port %d changed speed\n", port->actor_port_number); |
| 2215 | // there is no need to reselect a new aggregator, just signal the | 2217 | /* there is no need to reselect a new aggregator, just signal the |
| 2216 | // state machines to reinitialize | 2218 | * state machines to reinitialize |
| 2219 | */ | ||
| 2217 | port->sm_vars |= AD_PORT_BEGIN; | 2220 | port->sm_vars |= AD_PORT_BEGIN; |
| 2221 | |||
| 2222 | __release_state_machine_lock(port); | ||
| 2218 | } | 2223 | } |
| 2219 | 2224 | ||
| 2220 | /** | 2225 | /** |
| @@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) | |||
| 2229 | 2234 | ||
| 2230 | port = &(SLAVE_AD_INFO(slave).port); | 2235 | port = &(SLAVE_AD_INFO(slave).port); |
| 2231 | 2236 | ||
| 2232 | // if slave is null, the whole port is not initialized | 2237 | /* if slave is null, the whole port is not initialized */ |
| 2233 | if (!port->slave) { | 2238 | if (!port->slave) { |
| 2234 | pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", | 2239 | pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", |
| 2235 | slave->bond->dev->name, slave->dev->name); | 2240 | slave->bond->dev->name, slave->dev->name); |
| 2236 | return; | 2241 | return; |
| 2237 | } | 2242 | } |
| 2238 | 2243 | ||
| 2244 | __get_state_machine_lock(port); | ||
| 2245 | |||
| 2239 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2246 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
| 2240 | port->actor_oper_port_key = port->actor_admin_port_key |= | 2247 | port->actor_oper_port_key = port->actor_admin_port_key |= |
| 2241 | __get_duplex(port); | 2248 | __get_duplex(port); |
| 2242 | pr_debug("Port %d changed duplex\n", port->actor_port_number); | 2249 | pr_debug("Port %d changed duplex\n", port->actor_port_number); |
| 2243 | // there is no need to reselect a new aggregator, just signal the | 2250 | /* there is no need to reselect a new aggregator, just signal the |
| 2244 | // state machines to reinitialize | 2251 | * state machines to reinitialize |
| 2252 | */ | ||
| 2245 | port->sm_vars |= AD_PORT_BEGIN; | 2253 | port->sm_vars |= AD_PORT_BEGIN; |
| 2254 | |||
| 2255 | __release_state_machine_lock(port); | ||
| 2246 | } | 2256 | } |
| 2247 | 2257 | ||
| 2248 | /** | 2258 | /** |
| @@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
| 2258 | 2268 | ||
| 2259 | port = &(SLAVE_AD_INFO(slave).port); | 2269 | port = &(SLAVE_AD_INFO(slave).port); |
| 2260 | 2270 | ||
| 2261 | // if slave is null, the whole port is not initialized | 2271 | /* if slave is null, the whole port is not initialized */ |
| 2262 | if (!port->slave) { | 2272 | if (!port->slave) { |
| 2263 | pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", | 2273 | pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", |
| 2264 | slave->bond->dev->name, slave->dev->name); | 2274 | slave->bond->dev->name, slave->dev->name); |
| 2265 | return; | 2275 | return; |
| 2266 | } | 2276 | } |
| 2267 | 2277 | ||
| 2268 | // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) | 2278 | __get_state_machine_lock(port); |
| 2269 | // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report | 2279 | /* on link down we are zeroing duplex and speed since |
| 2280 | * some of the adaptors(ce1000.lan) report full duplex/speed | ||
| 2281 | * instead of N/A(duplex) / 0(speed). | ||
| 2282 | * | ||
| 2283 | * on link up we are forcing recheck on the duplex and speed since | ||
| 2284 | * some of he adaptors(ce1000.lan) report. | ||
| 2285 | */ | ||
| 2270 | if (link == BOND_LINK_UP) { | 2286 | if (link == BOND_LINK_UP) { |
| 2271 | port->is_enabled = true; | 2287 | port->is_enabled = true; |
| 2272 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2288 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
| @@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
| 2282 | port->actor_oper_port_key = (port->actor_admin_port_key &= | 2298 | port->actor_oper_port_key = (port->actor_admin_port_key &= |
| 2283 | ~AD_SPEED_KEY_BITS); | 2299 | ~AD_SPEED_KEY_BITS); |
| 2284 | } | 2300 | } |
| 2285 | //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); | 2301 | pr_debug("Port %d changed link status to %s", |
| 2286 | // there is no need to reselect a new aggregator, just signal the | 2302 | port->actor_port_number, |
| 2287 | // state machines to reinitialize | 2303 | (link == BOND_LINK_UP) ? "UP" : "DOWN"); |
| 2304 | /* there is no need to reselect a new aggregator, just signal the | ||
| 2305 | * state machines to reinitialize | ||
| 2306 | */ | ||
| 2288 | port->sm_vars |= AD_PORT_BEGIN; | 2307 | port->sm_vars |= AD_PORT_BEGIN; |
| 2308 | |||
| 2309 | __release_state_machine_lock(port); | ||
| 2289 | } | 2310 | } |
| 2290 | 2311 | ||
| 2291 | /* | 2312 | /* |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 398e299ee1bd..4b8c58b0ec24 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
| 3732 | } | 3732 | } |
| 3733 | 3733 | ||
| 3734 | 3734 | ||
| 3735 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | 3735 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 3736 | void *accel_priv) | ||
| 3736 | { | 3737 | { |
| 3737 | /* | 3738 | /* |
| 3738 | * This helper function exists to help dev_pick_tx get the correct | 3739 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 5f9a7ad9b964..8aeec0b4601a 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
| @@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev) | |||
| 625 | usb_unanchor_urb(urb); | 625 | usb_unanchor_urb(urb); |
| 626 | usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, | 626 | usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, |
| 627 | urb->transfer_dma); | 627 | urb->transfer_dma); |
| 628 | usb_free_urb(urb); | ||
| 628 | break; | 629 | break; |
| 629 | } | 630 | } |
| 630 | 631 | ||
| @@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne | |||
| 798 | * allowed (MAX_TX_URBS). | 799 | * allowed (MAX_TX_URBS). |
| 799 | */ | 800 | */ |
| 800 | if (!context) { | 801 | if (!context) { |
| 801 | usb_unanchor_urb(urb); | ||
| 802 | usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); | 802 | usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); |
| 803 | usb_free_urb(urb); | ||
| 803 | 804 | ||
| 804 | netdev_warn(netdev, "couldn't find free context\n"); | 805 | netdev_warn(netdev, "couldn't find free context\n"); |
| 805 | 806 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 8ee9d1556e6e..263dd921edc4 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
| @@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) | |||
| 927 | /* set LED in default state (end of init phase) */ | 927 | /* set LED in default state (end of init phase) */ |
| 928 | pcan_usb_pro_set_led(dev, 0, 1); | 928 | pcan_usb_pro_set_led(dev, 0, 1); |
| 929 | 929 | ||
| 930 | kfree(bi); | ||
| 931 | kfree(fi); | ||
| 932 | |||
| 930 | return 0; | 933 | return 0; |
| 931 | 934 | ||
| 932 | err_out: | 935 | err_out: |
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b2ffad1304d2..248baf6273fb 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
| @@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
| 565 | /* Make sure pointer to data buffer is set */ | 565 | /* Make sure pointer to data buffer is set */ |
| 566 | wmb(); | 566 | wmb(); |
| 567 | 567 | ||
| 568 | skb_tx_timestamp(skb); | ||
| 569 | |||
| 568 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); | 570 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); |
| 569 | 571 | ||
| 570 | /* Increment index to point to the next BD */ | 572 | /* Increment index to point to the next BD */ |
| @@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
| 579 | 581 | ||
| 580 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 582 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
| 581 | 583 | ||
| 582 | skb_tx_timestamp(skb); | ||
| 583 | |||
| 584 | return NETDEV_TX_OK; | 584 | return NETDEV_TX_OK; |
| 585 | } | 585 | } |
| 586 | 586 | ||
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a36a760ada28..29801750f239 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
| @@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) | |||
| 145 | * Mask some pcie error bits | 145 | * Mask some pcie error bits |
| 146 | */ | 146 | */ |
| 147 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | 147 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
| 148 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); | 148 | if (pos) { |
| 149 | data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); | 149 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); |
| 150 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); | 150 | data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); |
| 151 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); | ||
| 152 | } | ||
| 151 | /* clear error status */ | 153 | /* clear error status */ |
| 152 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, | 154 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, |
| 153 | PCI_EXP_DEVSTA_NFED | | 155 | PCI_EXP_DEVSTA_NFED | |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a1f66e2c9a86..ec6119089b82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -520,10 +520,12 @@ struct bnx2x_fastpath { | |||
| 520 | #define BNX2X_FP_STATE_IDLE 0 | 520 | #define BNX2X_FP_STATE_IDLE 0 |
| 521 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ | 521 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ |
| 522 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ | 522 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ |
| 523 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ | 523 | #define BNX2X_FP_STATE_DISABLED (1 << 2) |
| 524 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ | 524 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ |
| 525 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ | ||
| 526 | #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | ||
| 525 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) | 527 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) |
| 526 | #define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | 528 | #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) |
| 527 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) | 529 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) |
| 528 | /* protect state */ | 530 | /* protect state */ |
| 529 | spinlock_t lock; | 531 | spinlock_t lock; |
| @@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
| 613 | { | 615 | { |
| 614 | bool rc = true; | 616 | bool rc = true; |
| 615 | 617 | ||
| 616 | spin_lock(&fp->lock); | 618 | spin_lock_bh(&fp->lock); |
| 617 | if (fp->state & BNX2X_FP_LOCKED) { | 619 | if (fp->state & BNX2X_FP_LOCKED) { |
| 618 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); | 620 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); |
| 619 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; | 621 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; |
| @@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
| 622 | /* we don't care if someone yielded */ | 624 | /* we don't care if someone yielded */ |
| 623 | fp->state = BNX2X_FP_STATE_NAPI; | 625 | fp->state = BNX2X_FP_STATE_NAPI; |
| 624 | } | 626 | } |
| 625 | spin_unlock(&fp->lock); | 627 | spin_unlock_bh(&fp->lock); |
| 626 | return rc; | 628 | return rc; |
| 627 | } | 629 | } |
| 628 | 630 | ||
| @@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) | |||
| 631 | { | 633 | { |
| 632 | bool rc = false; | 634 | bool rc = false; |
| 633 | 635 | ||
| 634 | spin_lock(&fp->lock); | 636 | spin_lock_bh(&fp->lock); |
| 635 | WARN_ON(fp->state & | 637 | WARN_ON(fp->state & |
| 636 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); | 638 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); |
| 637 | 639 | ||
| 638 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | 640 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
| 639 | rc = true; | 641 | rc = true; |
| 640 | fp->state = BNX2X_FP_STATE_IDLE; | 642 | |
| 641 | spin_unlock(&fp->lock); | 643 | /* state ==> idle, unless currently disabled */ |
| 644 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
| 645 | spin_unlock_bh(&fp->lock); | ||
| 642 | return rc; | 646 | return rc; |
| 643 | } | 647 | } |
| 644 | 648 | ||
| @@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | |||
| 669 | 673 | ||
| 670 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | 674 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
| 671 | rc = true; | 675 | rc = true; |
| 672 | fp->state = BNX2X_FP_STATE_IDLE; | 676 | |
| 677 | /* state ==> idle, unless currently disabled */ | ||
| 678 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
| 673 | spin_unlock_bh(&fp->lock); | 679 | spin_unlock_bh(&fp->lock); |
| 674 | return rc; | 680 | return rc; |
| 675 | } | 681 | } |
| @@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | |||
| 677 | /* true if a socket is polling, even if it did not get the lock */ | 683 | /* true if a socket is polling, even if it did not get the lock */ |
| 678 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | 684 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
| 679 | { | 685 | { |
| 680 | WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); | 686 | WARN_ON(!(fp->state & BNX2X_FP_OWNED)); |
| 681 | return fp->state & BNX2X_FP_USER_PEND; | 687 | return fp->state & BNX2X_FP_USER_PEND; |
| 682 | } | 688 | } |
| 689 | |||
| 690 | /* false if fp is currently owned */ | ||
| 691 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | ||
| 692 | { | ||
| 693 | int rc = true; | ||
| 694 | |||
| 695 | spin_lock_bh(&fp->lock); | ||
| 696 | if (fp->state & BNX2X_FP_OWNED) | ||
| 697 | rc = false; | ||
| 698 | fp->state |= BNX2X_FP_STATE_DISABLED; | ||
| 699 | spin_unlock_bh(&fp->lock); | ||
| 700 | |||
| 701 | return rc; | ||
| 702 | } | ||
| 683 | #else | 703 | #else |
| 684 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) | 704 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) |
| 685 | { | 705 | { |
| @@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | |||
| 709 | { | 729 | { |
| 710 | return false; | 730 | return false; |
| 711 | } | 731 | } |
| 732 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | ||
| 733 | { | ||
| 734 | return true; | ||
| 735 | } | ||
| 712 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 736 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 713 | 737 | ||
| 714 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 738 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
| @@ -1250,7 +1274,10 @@ struct bnx2x_slowpath { | |||
| 1250 | * Therefore, if they would have been defined in the same union, | 1274 | * Therefore, if they would have been defined in the same union, |
| 1251 | * data can get corrupted. | 1275 | * data can get corrupted. |
| 1252 | */ | 1276 | */ |
| 1253 | struct afex_vif_list_ramrod_data func_afex_rdata; | 1277 | union { |
| 1278 | struct afex_vif_list_ramrod_data viflist_data; | ||
| 1279 | struct function_update_data func_update; | ||
| 1280 | } func_afex_rdata; | ||
| 1254 | 1281 | ||
| 1255 | /* used by dmae command executer */ | 1282 | /* used by dmae command executer */ |
| 1256 | struct dmae_command dmae[MAX_DMAE_C]; | 1283 | struct dmae_command dmae[MAX_DMAE_C]; |
| @@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); | |||
| 2499 | #define MCPR_SCRATCH_BASE(bp) \ | 2526 | #define MCPR_SCRATCH_BASE(bp) \ |
| 2500 | (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) | 2527 | (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) |
| 2501 | 2528 | ||
| 2529 | #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) | ||
| 2530 | |||
| 2502 | #endif /* bnx2x.h */ | 2531 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ec96130533cc..bf811565ee24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
| 160 | struct sk_buff *skb = tx_buf->skb; | 160 | struct sk_buff *skb = tx_buf->skb; |
| 161 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | 161 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
| 162 | int nbd; | 162 | int nbd; |
| 163 | u16 split_bd_len = 0; | ||
| 163 | 164 | ||
| 164 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | 165 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
| 165 | prefetch(&skb->end); | 166 | prefetch(&skb->end); |
| @@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
| 167 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", | 168 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", |
| 168 | txdata->txq_index, idx, tx_buf, skb); | 169 | txdata->txq_index, idx, tx_buf, skb); |
| 169 | 170 | ||
| 170 | /* unmap first bd */ | ||
| 171 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; | 171 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; |
| 172 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
| 173 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); | ||
| 174 | 172 | ||
| 175 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | 173 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
| 176 | #ifdef BNX2X_STOP_ON_ERROR | 174 | #ifdef BNX2X_STOP_ON_ERROR |
| @@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
| 188 | --nbd; | 186 | --nbd; |
| 189 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 187 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
| 190 | 188 | ||
| 191 | /* ...and the TSO split header bd since they have no mapping */ | 189 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ |
| 192 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | 190 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
| 191 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; | ||
| 192 | split_bd_len = BD_UNMAP_LEN(tx_data_bd); | ||
| 193 | --nbd; | 193 | --nbd; |
| 194 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 194 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | /* unmap first bd */ | ||
| 198 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
| 199 | BD_UNMAP_LEN(tx_start_bd) + split_bd_len, | ||
| 200 | DMA_TO_DEVICE); | ||
| 201 | |||
| 197 | /* now free frags */ | 202 | /* now free frags */ |
| 198 | while (nbd > 0) { | 203 | while (nbd > 0) { |
| 199 | 204 | ||
| @@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) | |||
| 1790 | { | 1795 | { |
| 1791 | int i; | 1796 | int i; |
| 1792 | 1797 | ||
| 1793 | local_bh_disable(); | ||
| 1794 | for_each_rx_queue_cnic(bp, i) { | 1798 | for_each_rx_queue_cnic(bp, i) { |
| 1795 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1799 | napi_disable(&bnx2x_fp(bp, i, napi)); |
| 1796 | while (!bnx2x_fp_lock_napi(&bp->fp[i])) | 1800 | while (!bnx2x_fp_ll_disable(&bp->fp[i])) |
| 1797 | mdelay(1); | 1801 | usleep_range(1000, 2000); |
| 1798 | } | 1802 | } |
| 1799 | local_bh_enable(); | ||
| 1800 | } | 1803 | } |
| 1801 | 1804 | ||
| 1802 | static void bnx2x_napi_disable(struct bnx2x *bp) | 1805 | static void bnx2x_napi_disable(struct bnx2x *bp) |
| 1803 | { | 1806 | { |
| 1804 | int i; | 1807 | int i; |
| 1805 | 1808 | ||
| 1806 | local_bh_disable(); | ||
| 1807 | for_each_eth_queue(bp, i) { | 1809 | for_each_eth_queue(bp, i) { |
| 1808 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1810 | napi_disable(&bnx2x_fp(bp, i, napi)); |
| 1809 | while (!bnx2x_fp_lock_napi(&bp->fp[i])) | 1811 | while (!bnx2x_fp_ll_disable(&bp->fp[i])) |
| 1810 | mdelay(1); | 1812 | usleep_range(1000, 2000); |
| 1811 | } | 1813 | } |
| 1812 | local_bh_enable(); | ||
| 1813 | } | 1814 | } |
| 1814 | 1815 | ||
| 1815 | void bnx2x_netif_start(struct bnx2x *bp) | 1816 | void bnx2x_netif_start(struct bnx2x *bp) |
| @@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
| 1832 | bnx2x_napi_disable_cnic(bp); | 1833 | bnx2x_napi_disable_cnic(bp); |
| 1833 | } | 1834 | } |
| 1834 | 1835 | ||
| 1835 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1836 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 1837 | void *accel_priv) | ||
| 1836 | { | 1838 | { |
| 1837 | struct bnx2x *bp = netdev_priv(dev); | 1839 | struct bnx2x *bp = netdev_priv(dev); |
| 1838 | 1840 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..41f3ca5ad972 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
| @@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); | |||
| 524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); | 524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); |
| 525 | 525 | ||
| 526 | /* select_queue callback */ | 526 | /* select_queue callback */ |
| 527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); | 527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 528 | void *accel_priv); | ||
| 528 | 529 | ||
| 529 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 530 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, |
| 530 | struct bnx2x_fastpath *fp, | 531 | struct bnx2x_fastpath *fp, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 20dcc02431ca..11fc79585491 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
| @@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
| 3865 | 3865 | ||
| 3866 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); | 3866 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); |
| 3867 | } else { | 3867 | } else { |
| 3868 | /* Enable Auto-Detect to support 1G over CL37 as well */ | ||
| 3869 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
| 3870 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); | ||
| 3871 | |||
| 3872 | /* Force cl48 sync_status LOW to avoid getting stuck in CL73 | ||
| 3873 | * parallel-detect loop when CL73 and CL37 are enabled. | ||
| 3874 | */ | ||
| 3875 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | ||
| 3876 | MDIO_AER_BLOCK_AER_REG, 0); | ||
| 3877 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
| 3878 | MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); | ||
| 3879 | bnx2x_set_aer_mmd(params, phy); | ||
| 3880 | |||
| 3868 | bnx2x_disable_kr2(params, vars, phy); | 3881 | bnx2x_disable_kr2(params, vars, phy); |
| 3869 | } | 3882 | } |
| 3870 | 3883 | ||
| @@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
| 8120 | *edc_mode = EDC_MODE_ACTIVE_DAC; | 8133 | *edc_mode = EDC_MODE_ACTIVE_DAC; |
| 8121 | else | 8134 | else |
| 8122 | check_limiting_mode = 1; | 8135 | check_limiting_mode = 1; |
| 8123 | } else if (copper_module_type & | 8136 | } else { |
| 8124 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | 8137 | *edc_mode = EDC_MODE_PASSIVE_DAC; |
| 8138 | /* Even in case PASSIVE_DAC indication is not set, | ||
| 8139 | * treat it as a passive DAC cable, since some cables | ||
| 8140 | * don't have this indication. | ||
| 8141 | */ | ||
| 8142 | if (copper_module_type & | ||
| 8143 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | ||
| 8125 | DP(NETIF_MSG_LINK, | 8144 | DP(NETIF_MSG_LINK, |
| 8126 | "Passive Copper cable detected\n"); | 8145 | "Passive Copper cable detected\n"); |
| 8127 | *edc_mode = | 8146 | } else { |
| 8128 | EDC_MODE_PASSIVE_DAC; | 8147 | DP(NETIF_MSG_LINK, |
| 8129 | } else { | 8148 | "Unknown copper-cable-type\n"); |
| 8130 | DP(NETIF_MSG_LINK, | 8149 | } |
| 8131 | "Unknown copper-cable-type 0x%x !!!\n", | ||
| 8132 | copper_module_type); | ||
| 8133 | return -EINVAL; | ||
| 8134 | } | 8150 | } |
| 8135 | break; | 8151 | break; |
| 8136 | } | 8152 | } |
| @@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | |||
| 10825 | (1<<11)); | 10841 | (1<<11)); |
| 10826 | 10842 | ||
| 10827 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 10843 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && |
| 10828 | (phy->speed_cap_mask & | 10844 | (phy->speed_cap_mask & |
| 10829 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || | 10845 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || |
| 10830 | (phy->req_line_speed == SPEED_1000)) { | 10846 | (phy->req_line_speed == SPEED_1000)) { |
| 10831 | an_1000_val |= (1<<8); | 10847 | an_1000_val |= (1<<8); |
| 10832 | autoneg_val |= (1<<9 | 1<<12); | 10848 | autoneg_val |= (1<<9 | 1<<12); |
| 10833 | if (phy->req_duplex == DUPLEX_FULL) | 10849 | if (phy->req_duplex == DUPLEX_FULL) |
| @@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | |||
| 10843 | 0x09, | 10859 | 0x09, |
| 10844 | &an_1000_val); | 10860 | &an_1000_val); |
| 10845 | 10861 | ||
| 10846 | /* Set 100 speed advertisement */ | 10862 | /* Advertise 10/100 link speed */ |
| 10847 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 10863 | if (phy->req_line_speed == SPEED_AUTO_NEG) { |
| 10848 | (phy->speed_cap_mask & | 10864 | if (phy->speed_cap_mask & |
| 10849 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | 10865 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { |
| 10850 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { | 10866 | an_10_100_val |= (1<<5); |
| 10851 | an_10_100_val |= (1<<7); | 10867 | autoneg_val |= (1<<9 | 1<<12); |
| 10852 | /* Enable autoneg and restart autoneg for legacy speeds */ | 10868 | DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); |
| 10853 | autoneg_val |= (1<<9 | 1<<12); | 10869 | } |
| 10854 | 10870 | if (phy->speed_cap_mask & | |
| 10855 | if (phy->req_duplex == DUPLEX_FULL) | 10871 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { |
| 10856 | an_10_100_val |= (1<<8); | ||
| 10857 | DP(NETIF_MSG_LINK, "Advertising 100M\n"); | ||
| 10858 | } | ||
| 10859 | |||
| 10860 | /* Set 10 speed advertisement */ | ||
| 10861 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | ||
| 10862 | (phy->speed_cap_mask & | ||
| 10863 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | ||
| 10864 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { | ||
| 10865 | an_10_100_val |= (1<<5); | ||
| 10866 | autoneg_val |= (1<<9 | 1<<12); | ||
| 10867 | if (phy->req_duplex == DUPLEX_FULL) | ||
| 10868 | an_10_100_val |= (1<<6); | 10872 | an_10_100_val |= (1<<6); |
| 10869 | DP(NETIF_MSG_LINK, "Advertising 10M\n"); | 10873 | autoneg_val |= (1<<9 | 1<<12); |
| 10874 | DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); | ||
| 10875 | } | ||
| 10876 | if (phy->speed_cap_mask & | ||
| 10877 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { | ||
| 10878 | an_10_100_val |= (1<<7); | ||
| 10879 | autoneg_val |= (1<<9 | 1<<12); | ||
| 10880 | DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); | ||
| 10881 | } | ||
| 10882 | if (phy->speed_cap_mask & | ||
| 10883 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { | ||
| 10884 | an_10_100_val |= (1<<8); | ||
| 10885 | autoneg_val |= (1<<9 | 1<<12); | ||
| 10886 | DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); | ||
| 10887 | } | ||
| 10870 | } | 10888 | } |
| 10871 | 10889 | ||
| 10872 | /* Only 10/100 are allowed to work in FORCE mode */ | 10890 | /* Only 10/100 are allowed to work in FORCE mode */ |
| @@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, | |||
| 13342 | DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, | 13360 | DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, |
| 13343 | old_status, status); | 13361 | old_status, status); |
| 13344 | 13362 | ||
| 13363 | /* Do not touch the link in case physical link down */ | ||
| 13364 | if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) | ||
| 13365 | return 1; | ||
| 13366 | |||
| 13345 | /* a. Update shmem->link_status accordingly | 13367 | /* a. Update shmem->link_status accordingly |
| 13346 | * b. Update link_vars->link_up | 13368 | * b. Update link_vars->link_up |
| 13347 | */ | 13369 | */ |
| @@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
| 13550 | */ | 13572 | */ |
| 13551 | not_kr2_device = (((base_page & 0x8000) == 0) || | 13573 | not_kr2_device = (((base_page & 0x8000) == 0) || |
| 13552 | (((base_page & 0x8000) && | 13574 | (((base_page & 0x8000) && |
| 13553 | ((next_page & 0xe0) == 0x2)))); | 13575 | ((next_page & 0xe0) == 0x20)))); |
| 13554 | 13576 | ||
| 13555 | /* In case KR2 is already disabled, check if we need to re-enable it */ | 13577 | /* In case KR2 is already disabled, check if we need to re-enable it */ |
| 13556 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13578 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 814d0eca9b33..8b3107b2fcc1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) | |||
| 11447 | } | 11447 | } |
| 11448 | } | 11448 | } |
| 11449 | 11449 | ||
| 11450 | /* adjust igu_sb_cnt to MF for E1x */ | 11450 | /* adjust igu_sb_cnt to MF for E1H */ |
| 11451 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) | 11451 | if (CHIP_IS_E1H(bp) && IS_MF(bp)) |
| 11452 | bp->igu_sb_cnt /= E1HVN_MAX; | 11452 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); |
| 11453 | 11453 | ||
| 11454 | /* port info */ | 11454 | /* port info */ |
| 11455 | bnx2x_get_port_hwinfo(bp); | 11455 | bnx2x_get_port_hwinfo(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 3efbb35267c8..14ffb6e56e59 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
| @@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/ | |||
| 7179 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca | 7179 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca |
| 7180 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da | 7180 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da |
| 7181 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea | 7181 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea |
| 7182 | #define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa | ||
| 7182 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 | 7183 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 |
| 7183 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 | 7184 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 |
| 7184 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 | 7185 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32c92abf5094..18438a504d57 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | |||
| @@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
| 2038 | struct bnx2x_vlan_mac_ramrod_params p; | 2038 | struct bnx2x_vlan_mac_ramrod_params p; |
| 2039 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | 2039 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
| 2040 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; | 2040 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; |
| 2041 | unsigned long flags; | ||
| 2041 | int read_lock; | 2042 | int read_lock; |
| 2042 | int rc = 0; | 2043 | int rc = 0; |
| 2043 | 2044 | ||
| @@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
| 2046 | spin_lock_bh(&exeq->lock); | 2047 | spin_lock_bh(&exeq->lock); |
| 2047 | 2048 | ||
| 2048 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { | 2049 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { |
| 2049 | if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == | 2050 | flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; |
| 2050 | *vlan_mac_flags) { | 2051 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == |
| 2052 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { | ||
| 2051 | rc = exeq->remove(bp, exeq->owner, exeq_pos); | 2053 | rc = exeq->remove(bp, exeq->owner, exeq_pos); |
| 2052 | if (rc) { | 2054 | if (rc) { |
| 2053 | BNX2X_ERR("Failed to remove command\n"); | 2055 | BNX2X_ERR("Failed to remove command\n"); |
| @@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
| 2080 | return read_lock; | 2082 | return read_lock; |
| 2081 | 2083 | ||
| 2082 | list_for_each_entry(pos, &o->head, link) { | 2084 | list_for_each_entry(pos, &o->head, link) { |
| 2083 | if (pos->vlan_mac_flags == *vlan_mac_flags) { | 2085 | flags = pos->vlan_mac_flags; |
| 2086 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == | ||
| 2087 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { | ||
| 2084 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; | 2088 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; |
| 2085 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); | 2089 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); |
| 2086 | rc = bnx2x_config_vlan_mac(bp, &p); | 2090 | rc = bnx2x_config_vlan_mac(bp, &p); |
| @@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp, | |||
| 4382 | struct bnx2x_raw_obj *r = &o->raw; | 4386 | struct bnx2x_raw_obj *r = &o->raw; |
| 4383 | 4387 | ||
| 4384 | /* Do nothing if only driver cleanup was requested */ | 4388 | /* Do nothing if only driver cleanup was requested */ |
| 4385 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) | 4389 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
| 4390 | DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", | ||
| 4391 | p->ramrod_flags); | ||
| 4386 | return 0; | 4392 | return 0; |
| 4393 | } | ||
| 4387 | 4394 | ||
| 4388 | r->set_pending(r); | 4395 | r->set_pending(r); |
| 4389 | 4396 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf9..6a53c15c85a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | |||
| @@ -266,6 +266,13 @@ enum { | |||
| 266 | BNX2X_DONT_CONSUME_CAM_CREDIT, | 266 | BNX2X_DONT_CONSUME_CAM_CREDIT, |
| 267 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | 267 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, |
| 268 | }; | 268 | }; |
| 269 | /* When looking for matching filters, some flags are not interesting */ | ||
| 270 | #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ | ||
| 271 | 1 << BNX2X_ETH_MAC | \ | ||
| 272 | 1 << BNX2X_ISCSI_ETH_MAC | \ | ||
| 273 | 1 << BNX2X_NETQ_ETH_MAC) | ||
| 274 | #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ | ||
| 275 | ((flags) & BNX2X_VLAN_MAC_CMP_MASK) | ||
| 269 | 276 | ||
| 270 | struct bnx2x_vlan_mac_ramrod_params { | 277 | struct bnx2x_vlan_mac_ramrod_params { |
| 271 | /* Object to run the command from */ | 278 | /* Object to run the command from */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2e46c28fc601..e7845e5be1c7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
| @@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 1209 | /* next state */ | 1209 | /* next state */ |
| 1210 | vfop->state = BNX2X_VFOP_RXMODE_DONE; | 1210 | vfop->state = BNX2X_VFOP_RXMODE_DONE; |
| 1211 | 1211 | ||
| 1212 | /* record the accept flags in vfdb so hypervisor can modify them | ||
| 1213 | * if necessary | ||
| 1214 | */ | ||
| 1215 | bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = | ||
| 1216 | ramrod->rx_accept_flags; | ||
| 1212 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); | 1217 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); |
| 1213 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 1218 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
| 1214 | op_err: | 1219 | op_err: |
| @@ -1224,39 +1229,43 @@ op_pending: | |||
| 1224 | return; | 1229 | return; |
| 1225 | } | 1230 | } |
| 1226 | 1231 | ||
| 1232 | static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, | ||
| 1233 | struct bnx2x_rx_mode_ramrod_params *ramrod, | ||
| 1234 | struct bnx2x_virtf *vf, | ||
| 1235 | unsigned long accept_flags) | ||
| 1236 | { | ||
| 1237 | struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); | ||
| 1238 | |||
| 1239 | memset(ramrod, 0, sizeof(*ramrod)); | ||
| 1240 | ramrod->cid = vfq->cid; | ||
| 1241 | ramrod->cl_id = vfq_cl_id(vf, vfq); | ||
| 1242 | ramrod->rx_mode_obj = &bp->rx_mode_obj; | ||
| 1243 | ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); | ||
| 1244 | ramrod->rx_accept_flags = accept_flags; | ||
| 1245 | ramrod->tx_accept_flags = accept_flags; | ||
| 1246 | ramrod->pstate = &vf->filter_state; | ||
| 1247 | ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; | ||
| 1248 | |||
| 1249 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
| 1250 | set_bit(RAMROD_RX, &ramrod->ramrod_flags); | ||
| 1251 | set_bit(RAMROD_TX, &ramrod->ramrod_flags); | ||
| 1252 | |||
| 1253 | ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); | ||
| 1254 | ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | ||
| 1255 | } | ||
| 1256 | |||
| 1227 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, | 1257 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, |
| 1228 | struct bnx2x_virtf *vf, | 1258 | struct bnx2x_virtf *vf, |
| 1229 | struct bnx2x_vfop_cmd *cmd, | 1259 | struct bnx2x_vfop_cmd *cmd, |
| 1230 | int qid, unsigned long accept_flags) | 1260 | int qid, unsigned long accept_flags) |
| 1231 | { | 1261 | { |
| 1232 | struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); | ||
| 1233 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 1262 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); |
| 1234 | 1263 | ||
| 1235 | if (vfop) { | 1264 | if (vfop) { |
| 1236 | struct bnx2x_rx_mode_ramrod_params *ramrod = | 1265 | struct bnx2x_rx_mode_ramrod_params *ramrod = |
| 1237 | &vf->op_params.rx_mode; | 1266 | &vf->op_params.rx_mode; |
| 1238 | 1267 | ||
| 1239 | memset(ramrod, 0, sizeof(*ramrod)); | 1268 | bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); |
| 1240 | |||
| 1241 | /* Prepare ramrod parameters */ | ||
| 1242 | ramrod->cid = vfq->cid; | ||
| 1243 | ramrod->cl_id = vfq_cl_id(vf, vfq); | ||
| 1244 | ramrod->rx_mode_obj = &bp->rx_mode_obj; | ||
| 1245 | ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); | ||
| 1246 | |||
| 1247 | ramrod->rx_accept_flags = accept_flags; | ||
| 1248 | ramrod->tx_accept_flags = accept_flags; | ||
| 1249 | ramrod->pstate = &vf->filter_state; | ||
| 1250 | ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; | ||
| 1251 | |||
| 1252 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
| 1253 | set_bit(RAMROD_RX, &ramrod->ramrod_flags); | ||
| 1254 | set_bit(RAMROD_TX, &ramrod->ramrod_flags); | ||
| 1255 | |||
| 1256 | ramrod->rdata = | ||
| 1257 | bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); | ||
| 1258 | ramrod->rdata_mapping = | ||
| 1259 | bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | ||
| 1260 | 1269 | ||
| 1261 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, | 1270 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, |
| 1262 | bnx2x_vfop_rxmode, cmd->done); | 1271 | bnx2x_vfop_rxmode, cmd->done); |
| @@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
| 3202 | bnx2x_iov_static_resc(bp, vf); | 3211 | bnx2x_iov_static_resc(bp, vf); |
| 3203 | } | 3212 | } |
| 3204 | 3213 | ||
| 3205 | /* prepare msix vectors in VF configuration space */ | 3214 | /* prepare msix vectors in VF configuration space - the value in the |
| 3215 | * PCI configuration space should be the index of the last entry, | ||
| 3216 | * namely one less than the actual size of the table | ||
| 3217 | */ | ||
| 3206 | for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { | 3218 | for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { |
| 3207 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); | 3219 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); |
| 3208 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, | 3220 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, |
| 3209 | num_vf_queues); | 3221 | num_vf_queues - 1); |
| 3210 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", | 3222 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", |
| 3211 | vf_idx, num_vf_queues); | 3223 | vf_idx, num_vf_queues - 1); |
| 3212 | } | 3224 | } |
| 3213 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 3225 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
| 3214 | 3226 | ||
| @@ -3436,10 +3448,18 @@ out: | |||
| 3436 | 3448 | ||
| 3437 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | 3449 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) |
| 3438 | { | 3450 | { |
| 3451 | struct bnx2x_queue_state_params q_params = {NULL}; | ||
| 3452 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | ||
| 3453 | struct bnx2x_queue_update_params *update_params; | ||
| 3454 | struct pf_vf_bulletin_content *bulletin = NULL; | ||
| 3455 | struct bnx2x_rx_mode_ramrod_params rx_ramrod; | ||
| 3439 | struct bnx2x *bp = netdev_priv(dev); | 3456 | struct bnx2x *bp = netdev_priv(dev); |
| 3440 | int rc, q_logical_state; | 3457 | struct bnx2x_vlan_mac_obj *vlan_obj; |
| 3458 | unsigned long vlan_mac_flags = 0; | ||
| 3459 | unsigned long ramrod_flags = 0; | ||
| 3441 | struct bnx2x_virtf *vf = NULL; | 3460 | struct bnx2x_virtf *vf = NULL; |
| 3442 | struct pf_vf_bulletin_content *bulletin = NULL; | 3461 | unsigned long accept_flags; |
| 3462 | int rc; | ||
| 3443 | 3463 | ||
| 3444 | /* sanity and init */ | 3464 | /* sanity and init */ |
| 3445 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); | 3465 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
| @@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
| 3457 | /* update PF's copy of the VF's bulletin. No point in posting the vlan | 3477 | /* update PF's copy of the VF's bulletin. No point in posting the vlan |
| 3458 | * to the VF since it doesn't have anything to do with it. But it useful | 3478 | * to the VF since it doesn't have anything to do with it. But it useful |
| 3459 | * to store it here in case the VF is not up yet and we can only | 3479 | * to store it here in case the VF is not up yet and we can only |
| 3460 | * configure the vlan later when it does. | 3480 | * configure the vlan later when it does. Treat vlan id 0 as remove the |
| 3481 | * Host tag. | ||
| 3461 | */ | 3482 | */ |
| 3462 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | 3483 | if (vlan > 0) |
| 3484 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | ||
| 3485 | else | ||
| 3486 | bulletin->valid_bitmap &= ~(1 << VLAN_VALID); | ||
| 3463 | bulletin->vlan = vlan; | 3487 | bulletin->vlan = vlan; |
| 3464 | 3488 | ||
| 3465 | /* is vf initialized and queue set up? */ | 3489 | /* is vf initialized and queue set up? */ |
| 3466 | q_logical_state = | 3490 | if (vf->state != VF_ENABLED || |
| 3467 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); | 3491 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != |
| 3468 | if (vf->state == VF_ENABLED && | 3492 | BNX2X_Q_LOGICAL_STATE_ACTIVE) |
| 3469 | q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { | 3493 | return rc; |
| 3470 | /* configure the vlan in device on this vf's queue */ | ||
| 3471 | unsigned long ramrod_flags = 0; | ||
| 3472 | unsigned long vlan_mac_flags = 0; | ||
| 3473 | struct bnx2x_vlan_mac_obj *vlan_obj = | ||
| 3474 | &bnx2x_leading_vfq(vf, vlan_obj); | ||
| 3475 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | ||
| 3476 | struct bnx2x_queue_state_params q_params = {NULL}; | ||
| 3477 | struct bnx2x_queue_update_params *update_params; | ||
| 3478 | 3494 | ||
| 3479 | rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); | 3495 | /* configure the vlan in device on this vf's queue */ |
| 3480 | if (rc) | 3496 | vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); |
| 3481 | return rc; | 3497 | rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); |
| 3482 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | 3498 | if (rc) |
| 3499 | return rc; | ||
| 3483 | 3500 | ||
| 3484 | /* must lock vfpf channel to protect against vf flows */ | 3501 | /* must lock vfpf channel to protect against vf flows */ |
| 3485 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); | 3502 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); |
| 3486 | 3503 | ||
| 3487 | /* remove existing vlans */ | 3504 | /* remove existing vlans */ |
| 3488 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | 3505 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
| 3489 | rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, | 3506 | rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, |
| 3490 | &ramrod_flags); | 3507 | &ramrod_flags); |
| 3491 | if (rc) { | 3508 | if (rc) { |
| 3492 | BNX2X_ERR("failed to delete vlans\n"); | 3509 | BNX2X_ERR("failed to delete vlans\n"); |
| 3493 | rc = -EINVAL; | 3510 | rc = -EINVAL; |
| 3494 | goto out; | 3511 | goto out; |
| 3495 | } | 3512 | } |
| 3513 | |||
| 3514 | /* need to remove/add the VF's accept_any_vlan bit */ | ||
| 3515 | accept_flags = bnx2x_leading_vfq(vf, accept_flags); | ||
| 3516 | if (vlan) | ||
| 3517 | clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
| 3518 | else | ||
| 3519 | set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
| 3520 | |||
| 3521 | bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, | ||
| 3522 | accept_flags); | ||
| 3523 | bnx2x_leading_vfq(vf, accept_flags) = accept_flags; | ||
| 3524 | bnx2x_config_rx_mode(bp, &rx_ramrod); | ||
| 3525 | |||
| 3526 | /* configure the new vlan to device */ | ||
| 3527 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | ||
| 3528 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
| 3529 | ramrod_param.vlan_mac_obj = vlan_obj; | ||
| 3530 | ramrod_param.ramrod_flags = ramrod_flags; | ||
| 3531 | set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
| 3532 | &ramrod_param.user_req.vlan_mac_flags); | ||
| 3533 | ramrod_param.user_req.u.vlan.vlan = vlan; | ||
| 3534 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
| 3535 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); | ||
| 3536 | if (rc) { | ||
| 3537 | BNX2X_ERR("failed to configure vlan\n"); | ||
| 3538 | rc = -EINVAL; | ||
| 3539 | goto out; | ||
| 3540 | } | ||
| 3496 | 3541 | ||
| 3497 | /* send queue update ramrod to configure default vlan and silent | 3542 | /* send queue update ramrod to configure default vlan and silent |
| 3498 | * vlan removal | 3543 | * vlan removal |
| 3544 | */ | ||
| 3545 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | ||
| 3546 | q_params.cmd = BNX2X_Q_CMD_UPDATE; | ||
| 3547 | q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); | ||
| 3548 | update_params = &q_params.params.update; | ||
| 3549 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
| 3550 | &update_params->update_flags); | ||
| 3551 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
| 3552 | &update_params->update_flags); | ||
| 3553 | if (vlan == 0) { | ||
| 3554 | /* if vlan is 0 then we want to leave the VF traffic | ||
| 3555 | * untagged, and leave the incoming traffic untouched | ||
| 3556 | * (i.e. do not remove any vlan tags). | ||
| 3499 | */ | 3557 | */ |
| 3500 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | 3558 | __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, |
| 3501 | q_params.cmd = BNX2X_Q_CMD_UPDATE; | 3559 | &update_params->update_flags); |
| 3502 | q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); | 3560 | __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, |
| 3503 | update_params = &q_params.params.update; | 3561 | &update_params->update_flags); |
| 3504 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | 3562 | } else { |
| 3563 | /* configure default vlan to vf queue and set silent | ||
| 3564 | * vlan removal (the vf remains unaware of this vlan). | ||
| 3565 | */ | ||
| 3566 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
| 3505 | &update_params->update_flags); | 3567 | &update_params->update_flags); |
| 3506 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | 3568 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, |
| 3507 | &update_params->update_flags); | 3569 | &update_params->update_flags); |
| 3570 | update_params->def_vlan = vlan; | ||
| 3571 | update_params->silent_removal_value = | ||
| 3572 | vlan & VLAN_VID_MASK; | ||
| 3573 | update_params->silent_removal_mask = VLAN_VID_MASK; | ||
| 3574 | } | ||
| 3508 | 3575 | ||
| 3509 | if (vlan == 0) { | 3576 | /* Update the Queue state */ |
| 3510 | /* if vlan is 0 then we want to leave the VF traffic | 3577 | rc = bnx2x_queue_state_change(bp, &q_params); |
| 3511 | * untagged, and leave the incoming traffic untouched | 3578 | if (rc) { |
| 3512 | * (i.e. do not remove any vlan tags). | 3579 | BNX2X_ERR("Failed to configure default VLAN\n"); |
| 3513 | */ | 3580 | goto out; |
| 3514 | __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | 3581 | } |
| 3515 | &update_params->update_flags); | ||
| 3516 | __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
| 3517 | &update_params->update_flags); | ||
| 3518 | } else { | ||
| 3519 | /* configure the new vlan to device */ | ||
| 3520 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
| 3521 | ramrod_param.vlan_mac_obj = vlan_obj; | ||
| 3522 | ramrod_param.ramrod_flags = ramrod_flags; | ||
| 3523 | ramrod_param.user_req.u.vlan.vlan = vlan; | ||
| 3524 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
| 3525 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); | ||
| 3526 | if (rc) { | ||
| 3527 | BNX2X_ERR("failed to configure vlan\n"); | ||
| 3528 | rc = -EINVAL; | ||
| 3529 | goto out; | ||
| 3530 | } | ||
| 3531 | |||
| 3532 | /* configure default vlan to vf queue and set silent | ||
| 3533 | * vlan removal (the vf remains unaware of this vlan). | ||
| 3534 | */ | ||
| 3535 | update_params = &q_params.params.update; | ||
| 3536 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
| 3537 | &update_params->update_flags); | ||
| 3538 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
| 3539 | &update_params->update_flags); | ||
| 3540 | update_params->def_vlan = vlan; | ||
| 3541 | } | ||
| 3542 | 3582 | ||
| 3543 | /* Update the Queue state */ | ||
| 3544 | rc = bnx2x_queue_state_change(bp, &q_params); | ||
| 3545 | if (rc) { | ||
| 3546 | BNX2X_ERR("Failed to configure default VLAN\n"); | ||
| 3547 | goto out; | ||
| 3548 | } | ||
| 3549 | 3583 | ||
| 3550 | /* clear the flag indicating that this VF needs its vlan | 3584 | /* clear the flag indicating that this VF needs its vlan |
| 3551 | * (will only be set if the HV configured the Vlan before vf was | 3585 | * (will only be set if the HV configured the Vlan before vf was |
| 3552 | * up and we were called because the VF came up later | 3586 | * up and we were called because the VF came up later |
| 3553 | */ | 3587 | */ |
| 3554 | out: | 3588 | out: |
| 3555 | vf->cfg_flags &= ~VF_CFG_VLAN; | 3589 | vf->cfg_flags &= ~VF_CFG_VLAN; |
| 3556 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); | 3590 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); |
| 3557 | } | 3591 | |
| 3558 | return rc; | 3592 | return rc; |
| 3559 | } | 3593 | } |
| 3560 | 3594 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 1ff6a9366629..8c213fa52174 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
| @@ -74,6 +74,7 @@ struct bnx2x_vf_queue { | |||
| 74 | /* VLANs object */ | 74 | /* VLANs object */ |
| 75 | struct bnx2x_vlan_mac_obj vlan_obj; | 75 | struct bnx2x_vlan_mac_obj vlan_obj; |
| 76 | atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ | 76 | atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ |
| 77 | unsigned long accept_flags; /* last accept flags configured */ | ||
| 77 | 78 | ||
| 78 | /* Queue Slow-path State object */ | 79 | /* Queue Slow-path State object */ |
| 79 | struct bnx2x_queue_sp_obj sp_obj; | 80 | struct bnx2x_queue_sp_obj sp_obj; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index efa8a151d789..0756d7dabdd5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
| @@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) | |||
| 208 | return -EINVAL; | 208 | return -EINVAL; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); | 211 | DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); |
| 212 | 212 | ||
| 213 | *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; | 213 | *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; |
| 214 | 214 | ||
| @@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 1598 | 1598 | ||
| 1599 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { | 1599 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { |
| 1600 | unsigned long accept = 0; | 1600 | unsigned long accept = 0; |
| 1601 | struct pf_vf_bulletin_content *bulletin = | ||
| 1602 | BP_VF_BULLETIN(bp, vf->index); | ||
| 1601 | 1603 | ||
| 1602 | /* covert VF-PF if mask to bnx2x accept flags */ | 1604 | /* covert VF-PF if mask to bnx2x accept flags */ |
| 1603 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) | 1605 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) |
| @@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 1617 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); | 1619 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); |
| 1618 | 1620 | ||
| 1619 | /* A packet arriving the vf's mac should be accepted | 1621 | /* A packet arriving the vf's mac should be accepted |
| 1620 | * with any vlan | 1622 | * with any vlan, unless a vlan has already been |
| 1623 | * configured. | ||
| 1621 | */ | 1624 | */ |
| 1622 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | 1625 | if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) |
| 1626 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | ||
| 1623 | 1627 | ||
| 1624 | /* set rx-mode */ | 1628 | /* set rx-mode */ |
| 1625 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, | 1629 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, |
| @@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, | |||
| 1710 | goto response; | 1714 | goto response; |
| 1711 | } | 1715 | } |
| 1712 | } | 1716 | } |
| 1717 | /* if vlan was set by hypervisor we don't allow guest to config vlan */ | ||
| 1718 | if (bulletin->valid_bitmap & 1 << VLAN_VALID) { | ||
| 1719 | int i; | ||
| 1720 | |||
| 1721 | /* search for vlan filters */ | ||
| 1722 | for (i = 0; i < filters->n_mac_vlan_filters; i++) { | ||
| 1723 | if (filters->filters[i].flags & | ||
| 1724 | VFPF_Q_FILTER_VLAN_TAG_VALID) { | ||
| 1725 | BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", | ||
| 1726 | vf->abs_vfid); | ||
| 1727 | vf->op_rc = -EPERM; | ||
| 1728 | goto response; | ||
| 1729 | } | ||
| 1730 | } | ||
| 1731 | } | ||
| 1713 | 1732 | ||
| 1714 | /* verify vf_qid */ | 1733 | /* verify vf_qid */ |
| 1715 | if (filters->vf_qid > vf_rxq_count(vf)) | 1734 | if (filters->vf_qid > vf_rxq_count(vf)) |
| @@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
| 1805 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; | 1824 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; |
| 1806 | 1825 | ||
| 1807 | /* flags handled individually for backward/forward compatability */ | 1826 | /* flags handled individually for backward/forward compatability */ |
| 1827 | vf_op_params->rss_flags = 0; | ||
| 1828 | vf_op_params->ramrod_flags = 0; | ||
| 1829 | |||
| 1808 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) | 1830 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) |
| 1809 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); | 1831 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); |
| 1810 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) | 1832 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f3dd93b4aeaa..15a66e4b1f57 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | |||
| 7622 | { | 7622 | { |
| 7623 | u32 base = (u32) mapping & 0xffffffff; | 7623 | u32 base = (u32) mapping & 0xffffffff; |
| 7624 | 7624 | ||
| 7625 | return (base > 0xffffdcc0) && (base + len + 8 < base); | 7625 | return base + len + 8 < base; |
| 7626 | } | 7626 | } |
| 7627 | 7627 | ||
| 7628 | /* Test for TSO DMA buffers that cross into regions which are within MSS bytes | 7628 | /* Test for TSO DMA buffers that cross into regions which are within MSS bytes |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 6c9308850453..56e0415f8cdf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -228,6 +228,25 @@ struct tp_params { | |||
| 228 | 228 | ||
| 229 | uint32_t dack_re; /* DACK timer resolution */ | 229 | uint32_t dack_re; /* DACK timer resolution */ |
| 230 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ | 230 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ |
| 231 | |||
| 232 | u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ | ||
| 233 | u32 ingress_config; /* cached TP_INGRESS_CONFIG */ | ||
| 234 | |||
| 235 | /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a | ||
| 236 | * subset of the set of fields which may be present in the Compressed | ||
| 237 | * Filter Tuple portion of filters and TCP TCB connections. The | ||
| 238 | * fields which are present are controlled by the TP_VLAN_PRI_MAP. | ||
| 239 | * Since a variable number of fields may or may not be present, their | ||
| 240 | * shifted field positions within the Compressed Filter Tuple may | ||
| 241 | * vary, or not even be present if the field isn't selected in | ||
| 242 | * TP_VLAN_PRI_MAP. Since some of these fields are needed in various | ||
| 243 | * places we store their offsets here, or a -1 if the field isn't | ||
| 244 | * present. | ||
| 245 | */ | ||
| 246 | int vlan_shift; | ||
| 247 | int vnic_shift; | ||
| 248 | int port_shift; | ||
| 249 | int protocol_shift; | ||
| 231 | }; | 250 | }; |
| 232 | 251 | ||
| 233 | struct vpd_params { | 252 | struct vpd_params { |
| @@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | |||
| 926 | const u8 *fw_data, unsigned int fw_size, | 945 | const u8 *fw_data, unsigned int fw_size, |
| 927 | struct fw_hdr *card_fw, enum dev_state state, int *reset); | 946 | struct fw_hdr *card_fw, enum dev_state state, int *reset); |
| 928 | int t4_prep_adapter(struct adapter *adapter); | 947 | int t4_prep_adapter(struct adapter *adapter); |
| 948 | int t4_init_tp_params(struct adapter *adap); | ||
| 949 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | ||
| 929 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); | 950 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
| 930 | void t4_fatal_err(struct adapter *adapter); | 951 | void t4_fatal_err(struct adapter *adapter); |
| 931 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | 952 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d6b12e035a7d..fff02ed1295e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) | |||
| 2986 | if (stid >= 0) { | 2986 | if (stid >= 0) { |
| 2987 | t->stid_tab[stid].data = data; | 2987 | t->stid_tab[stid].data = data; |
| 2988 | stid += t->stid_base; | 2988 | stid += t->stid_base; |
| 2989 | t->stids_in_use++; | 2989 | /* IPv6 requires max of 520 bits or 16 cells in TCAM |
| 2990 | * This is equivalent to 4 TIDs. With CLIP enabled it | ||
| 2991 | * needs 2 TIDs. | ||
| 2992 | */ | ||
| 2993 | if (family == PF_INET) | ||
| 2994 | t->stids_in_use++; | ||
| 2995 | else | ||
| 2996 | t->stids_in_use += 4; | ||
| 2990 | } | 2997 | } |
| 2991 | spin_unlock_bh(&t->stid_lock); | 2998 | spin_unlock_bh(&t->stid_lock); |
| 2992 | return stid; | 2999 | return stid; |
| @@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) | |||
| 3012 | } | 3019 | } |
| 3013 | if (stid >= 0) { | 3020 | if (stid >= 0) { |
| 3014 | t->stid_tab[stid].data = data; | 3021 | t->stid_tab[stid].data = data; |
| 3015 | stid += t->stid_base; | 3022 | stid -= t->nstids; |
| 3023 | stid += t->sftid_base; | ||
| 3016 | t->stids_in_use++; | 3024 | t->stids_in_use++; |
| 3017 | } | 3025 | } |
| 3018 | spin_unlock_bh(&t->stid_lock); | 3026 | spin_unlock_bh(&t->stid_lock); |
| @@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid); | |||
| 3024 | */ | 3032 | */ |
| 3025 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) | 3033 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) |
| 3026 | { | 3034 | { |
| 3027 | stid -= t->stid_base; | 3035 | /* Is it a server filter TID? */ |
| 3036 | if (t->nsftids && (stid >= t->sftid_base)) { | ||
| 3037 | stid -= t->sftid_base; | ||
| 3038 | stid += t->nstids; | ||
| 3039 | } else { | ||
| 3040 | stid -= t->stid_base; | ||
| 3041 | } | ||
| 3042 | |||
| 3028 | spin_lock_bh(&t->stid_lock); | 3043 | spin_lock_bh(&t->stid_lock); |
| 3029 | if (family == PF_INET) | 3044 | if (family == PF_INET) |
| 3030 | __clear_bit(stid, t->stid_bmap); | 3045 | __clear_bit(stid, t->stid_bmap); |
| 3031 | else | 3046 | else |
| 3032 | bitmap_release_region(t->stid_bmap, stid, 2); | 3047 | bitmap_release_region(t->stid_bmap, stid, 2); |
| 3033 | t->stid_tab[stid].data = NULL; | 3048 | t->stid_tab[stid].data = NULL; |
| 3034 | t->stids_in_use--; | 3049 | if (family == PF_INET) |
| 3050 | t->stids_in_use--; | ||
| 3051 | else | ||
| 3052 | t->stids_in_use -= 4; | ||
| 3035 | spin_unlock_bh(&t->stid_lock); | 3053 | spin_unlock_bh(&t->stid_lock); |
| 3036 | } | 3054 | } |
| 3037 | EXPORT_SYMBOL(cxgb4_free_stid); | 3055 | EXPORT_SYMBOL(cxgb4_free_stid); |
| @@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t) | |||
| 3134 | size_t size; | 3152 | size_t size; |
| 3135 | unsigned int stid_bmap_size; | 3153 | unsigned int stid_bmap_size; |
| 3136 | unsigned int natids = t->natids; | 3154 | unsigned int natids = t->natids; |
| 3155 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
| 3137 | 3156 | ||
| 3138 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); | 3157 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); |
| 3139 | size = t->ntids * sizeof(*t->tid_tab) + | 3158 | size = t->ntids * sizeof(*t->tid_tab) + |
| @@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t) | |||
| 3167 | t->afree = t->atid_tab; | 3186 | t->afree = t->atid_tab; |
| 3168 | } | 3187 | } |
| 3169 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); | 3188 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); |
| 3189 | /* Reserve stid 0 for T4/T5 adapters */ | ||
| 3190 | if (!t->stid_base && | ||
| 3191 | (is_t4(adap->params.chip) || is_t5(adap->params.chip))) | ||
| 3192 | __set_bit(0, t->stid_bmap); | ||
| 3193 | |||
| 3170 | return 0; | 3194 | return 0; |
| 3171 | } | 3195 | } |
| 3172 | 3196 | ||
| @@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
| 3731 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( | 3755 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( |
| 3732 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> | 3756 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> |
| 3733 | (adap->fn * 4)); | 3757 | (adap->fn * 4)); |
| 3734 | lli.filt_mode = adap->filter_mode; | 3758 | lli.filt_mode = adap->params.tp.vlan_pri_map; |
| 3735 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ | 3759 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ |
| 3736 | for (i = 0; i < NCHAN; i++) | 3760 | for (i = 0; i < NCHAN; i++) |
| 3737 | lli.tx_modq[i] = i; | 3761 | lli.tx_modq[i] = i; |
| @@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, | |||
| 4179 | adap = netdev2adap(dev); | 4203 | adap = netdev2adap(dev); |
| 4180 | 4204 | ||
| 4181 | /* Adjust stid to correct filter index */ | 4205 | /* Adjust stid to correct filter index */ |
| 4182 | stid -= adap->tids.nstids; | 4206 | stid -= adap->tids.sftid_base; |
| 4183 | stid += adap->tids.nftids; | 4207 | stid += adap->tids.nftids; |
| 4184 | 4208 | ||
| 4185 | /* Check to make sure the filter requested is writable ... | 4209 | /* Check to make sure the filter requested is writable ... |
| @@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, | |||
| 4205 | f->fs.val.lip[i] = val[i]; | 4229 | f->fs.val.lip[i] = val[i]; |
| 4206 | f->fs.mask.lip[i] = ~0; | 4230 | f->fs.mask.lip[i] = ~0; |
| 4207 | } | 4231 | } |
| 4208 | if (adap->filter_mode & F_PORT) { | 4232 | if (adap->params.tp.vlan_pri_map & F_PORT) { |
| 4209 | f->fs.val.iport = port; | 4233 | f->fs.val.iport = port; |
| 4210 | f->fs.mask.iport = mask; | 4234 | f->fs.mask.iport = mask; |
| 4211 | } | 4235 | } |
| 4212 | } | 4236 | } |
| 4213 | 4237 | ||
| 4238 | if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { | ||
| 4239 | f->fs.val.proto = IPPROTO_TCP; | ||
| 4240 | f->fs.mask.proto = ~0; | ||
| 4241 | } | ||
| 4242 | |||
| 4214 | f->fs.dirsteer = 1; | 4243 | f->fs.dirsteer = 1; |
| 4215 | f->fs.iq = queue; | 4244 | f->fs.iq = queue; |
| 4216 | /* Mark filter as locked */ | 4245 | /* Mark filter as locked */ |
| @@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, | |||
| 4237 | adap = netdev2adap(dev); | 4266 | adap = netdev2adap(dev); |
| 4238 | 4267 | ||
| 4239 | /* Adjust stid to correct filter index */ | 4268 | /* Adjust stid to correct filter index */ |
| 4240 | stid -= adap->tids.nstids; | 4269 | stid -= adap->tids.sftid_base; |
| 4241 | stid += adap->tids.nftids; | 4270 | stid += adap->tids.nftids; |
| 4242 | 4271 | ||
| 4243 | f = &adap->tids.ftid_tab[stid]; | 4272 | f = &adap->tids.ftid_tab[stid]; |
| @@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap) | |||
| 5092 | enum dev_state state; | 5121 | enum dev_state state; |
| 5093 | u32 params[7], val[7]; | 5122 | u32 params[7], val[7]; |
| 5094 | struct fw_caps_config_cmd caps_cmd; | 5123 | struct fw_caps_config_cmd caps_cmd; |
| 5095 | int reset = 1, j; | 5124 | int reset = 1; |
| 5096 | 5125 | ||
| 5097 | /* | 5126 | /* |
| 5098 | * Contact FW, advertising Master capability (and potentially forcing | 5127 | * Contact FW, advertising Master capability (and potentially forcing |
| @@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap) | |||
| 5434 | /* | 5463 | /* |
| 5435 | * These are finalized by FW initialization, load their values now. | 5464 | * These are finalized by FW initialization, load their values now. |
| 5436 | */ | 5465 | */ |
| 5437 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
| 5438 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
| 5439 | adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); | ||
| 5440 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); | 5466 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); |
| 5441 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | 5467 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, |
| 5442 | adap->params.b_wnd); | 5468 | adap->params.b_wnd); |
| 5443 | 5469 | ||
| 5444 | /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ | 5470 | t4_init_tp_params(adap); |
| 5445 | for (j = 0; j < NCHAN; j++) | ||
| 5446 | adap->params.tp.tx_modq[j] = j; | ||
| 5447 | |||
| 5448 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
| 5449 | &adap->filter_mode, 1, | ||
| 5450 | TP_VLAN_PRI_MAP); | ||
| 5451 | |||
| 5452 | adap->flags |= FW_OK; | 5471 | adap->flags |= FW_OK; |
| 5453 | return 0; | 5472 | return 0; |
| 5454 | 5473 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6f21f2451c30..4dd0a82533e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
| @@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) | |||
| 131 | 131 | ||
| 132 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) | 132 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) |
| 133 | { | 133 | { |
| 134 | stid -= t->stid_base; | 134 | /* Is it a server filter TID? */ |
| 135 | if (t->nsftids && (stid >= t->sftid_base)) { | ||
| 136 | stid -= t->sftid_base; | ||
| 137 | stid += t->nstids; | ||
| 138 | } else { | ||
| 139 | stid -= t->stid_base; | ||
| 140 | } | ||
| 141 | |||
| 135 | return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; | 142 | return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; |
| 136 | } | 143 | } |
| 137 | 144 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 29878098101e..cb05be905def 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include "l2t.h" | 45 | #include "l2t.h" |
| 46 | #include "t4_msg.h" | 46 | #include "t4_msg.h" |
| 47 | #include "t4fw_api.h" | 47 | #include "t4fw_api.h" |
| 48 | #include "t4_regs.h" | ||
| 48 | 49 | ||
| 49 | #define VLAN_NONE 0xfff | 50 | #define VLAN_NONE 0xfff |
| 50 | 51 | ||
| @@ -411,6 +412,40 @@ done: | |||
| 411 | } | 412 | } |
| 412 | EXPORT_SYMBOL(cxgb4_l2t_get); | 413 | EXPORT_SYMBOL(cxgb4_l2t_get); |
| 413 | 414 | ||
| 415 | u64 cxgb4_select_ntuple(struct net_device *dev, | ||
| 416 | const struct l2t_entry *l2t) | ||
| 417 | { | ||
| 418 | struct adapter *adap = netdev2adap(dev); | ||
| 419 | struct tp_params *tp = &adap->params.tp; | ||
| 420 | u64 ntuple = 0; | ||
| 421 | |||
| 422 | /* Initialize each of the fields which we care about which are present | ||
| 423 | * in the Compressed Filter Tuple. | ||
| 424 | */ | ||
| 425 | if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) | ||
| 426 | ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; | ||
| 427 | |||
| 428 | if (tp->port_shift >= 0) | ||
| 429 | ntuple |= (u64)l2t->lport << tp->port_shift; | ||
| 430 | |||
| 431 | if (tp->protocol_shift >= 0) | ||
| 432 | ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; | ||
| 433 | |||
| 434 | if (tp->vnic_shift >= 0) { | ||
| 435 | u32 viid = cxgb4_port_viid(dev); | ||
| 436 | u32 vf = FW_VIID_VIN_GET(viid); | ||
| 437 | u32 pf = FW_VIID_PFN_GET(viid); | ||
| 438 | u32 vld = FW_VIID_VIVLD_GET(viid); | ||
| 439 | |||
| 440 | ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | | ||
| 441 | V_FT_VNID_ID_PF(pf) | | ||
| 442 | V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; | ||
| 443 | } | ||
| 444 | |||
| 445 | return ntuple; | ||
| 446 | } | ||
| 447 | EXPORT_SYMBOL(cxgb4_select_ntuple); | ||
| 448 | |||
| 414 | /* | 449 | /* |
| 415 | * Called when address resolution fails for an L2T entry to handle packets | 450 | * Called when address resolution fails for an L2T entry to handle packets |
| 416 | * on the arpq head. If a packet specifies a failure handler it is invoked, | 451 | * on the arpq head. If a packet specifies a failure handler it is invoked, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 108c0f1fce1c..85eb5c71358d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h | |||
| @@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | |||
| 98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | 98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, |
| 99 | const struct net_device *physdev, | 99 | const struct net_device *physdev, |
| 100 | unsigned int priority); | 100 | unsigned int priority); |
| 101 | 101 | u64 cxgb4_select_ntuple(struct net_device *dev, | |
| 102 | const struct l2t_entry *l2t); | ||
| 102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); | 103 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); |
| 103 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); | 104 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); |
| 104 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | 105 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index cc380c36e1a8..cc3511a5cd0c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap) | |||
| 2581 | #undef READ_FL_BUF | 2581 | #undef READ_FL_BUF |
| 2582 | 2582 | ||
| 2583 | if (fl_small_pg != PAGE_SIZE || | 2583 | if (fl_small_pg != PAGE_SIZE || |
| 2584 | (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || | 2584 | (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || |
| 2585 | (fl_large_pg & (fl_large_pg-1)) != 0))) { | 2585 | (fl_large_pg & (fl_large_pg-1)) != 0))) { |
| 2586 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", | 2586 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", |
| 2587 | fl_small_pg, fl_large_pg); | 2587 | fl_small_pg, fl_large_pg); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 74a6fce5a15a..e1413eacdbd2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter) | |||
| 3808 | return 0; | 3808 | return 0; |
| 3809 | } | 3809 | } |
| 3810 | 3810 | ||
| 3811 | /** | ||
| 3812 | * t4_init_tp_params - initialize adap->params.tp | ||
| 3813 | * @adap: the adapter | ||
| 3814 | * | ||
| 3815 | * Initialize various fields of the adapter's TP Parameters structure. | ||
| 3816 | */ | ||
| 3817 | int t4_init_tp_params(struct adapter *adap) | ||
| 3818 | { | ||
| 3819 | int chan; | ||
| 3820 | u32 v; | ||
| 3821 | |||
| 3822 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
| 3823 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
| 3824 | adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); | ||
| 3825 | |||
| 3826 | /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ | ||
| 3827 | for (chan = 0; chan < NCHAN; chan++) | ||
| 3828 | adap->params.tp.tx_modq[chan] = chan; | ||
| 3829 | |||
| 3830 | /* Cache the adapter's Compressed Filter Mode and global Incress | ||
| 3831 | * Configuration. | ||
| 3832 | */ | ||
| 3833 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
| 3834 | &adap->params.tp.vlan_pri_map, 1, | ||
| 3835 | TP_VLAN_PRI_MAP); | ||
| 3836 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
| 3837 | &adap->params.tp.ingress_config, 1, | ||
| 3838 | TP_INGRESS_CONFIG); | ||
| 3839 | |||
| 3840 | /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field | ||
| 3841 | * shift positions of several elements of the Compressed Filter Tuple | ||
| 3842 | * for this adapter which we need frequently ... | ||
| 3843 | */ | ||
| 3844 | adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); | ||
| 3845 | adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); | ||
| 3846 | adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); | ||
| 3847 | adap->params.tp.protocol_shift = t4_filter_field_shift(adap, | ||
| 3848 | F_PROTOCOL); | ||
| 3849 | |||
| 3850 | /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID | ||
| 3851 | * represents the presense of an Outer VLAN instead of a VNIC ID. | ||
| 3852 | */ | ||
| 3853 | if ((adap->params.tp.ingress_config & F_VNIC) == 0) | ||
| 3854 | adap->params.tp.vnic_shift = -1; | ||
| 3855 | |||
| 3856 | return 0; | ||
| 3857 | } | ||
| 3858 | |||
| 3859 | /** | ||
| 3860 | * t4_filter_field_shift - calculate filter field shift | ||
| 3861 | * @adap: the adapter | ||
| 3862 | * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) | ||
| 3863 | * | ||
| 3864 | * Return the shift position of a filter field within the Compressed | ||
| 3865 | * Filter Tuple. The filter field is specified via its selection bit | ||
| 3866 | * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. | ||
| 3867 | */ | ||
| 3868 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel) | ||
| 3869 | { | ||
| 3870 | unsigned int filter_mode = adap->params.tp.vlan_pri_map; | ||
| 3871 | unsigned int sel; | ||
| 3872 | int field_shift; | ||
| 3873 | |||
| 3874 | if ((filter_mode & filter_sel) == 0) | ||
| 3875 | return -1; | ||
| 3876 | |||
| 3877 | for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { | ||
| 3878 | switch (filter_mode & sel) { | ||
| 3879 | case F_FCOE: | ||
| 3880 | field_shift += W_FT_FCOE; | ||
| 3881 | break; | ||
| 3882 | case F_PORT: | ||
| 3883 | field_shift += W_FT_PORT; | ||
| 3884 | break; | ||
| 3885 | case F_VNIC_ID: | ||
| 3886 | field_shift += W_FT_VNIC_ID; | ||
| 3887 | break; | ||
| 3888 | case F_VLAN: | ||
| 3889 | field_shift += W_FT_VLAN; | ||
| 3890 | break; | ||
| 3891 | case F_TOS: | ||
| 3892 | field_shift += W_FT_TOS; | ||
| 3893 | break; | ||
| 3894 | case F_PROTOCOL: | ||
| 3895 | field_shift += W_FT_PROTOCOL; | ||
| 3896 | break; | ||
| 3897 | case F_ETHERTYPE: | ||
| 3898 | field_shift += W_FT_ETHERTYPE; | ||
| 3899 | break; | ||
| 3900 | case F_MACMATCH: | ||
| 3901 | field_shift += W_FT_MACMATCH; | ||
| 3902 | break; | ||
| 3903 | case F_MPSHITTYPE: | ||
| 3904 | field_shift += W_FT_MPSHITTYPE; | ||
| 3905 | break; | ||
| 3906 | case F_FRAGMENTATION: | ||
| 3907 | field_shift += W_FT_FRAGMENTATION; | ||
| 3908 | break; | ||
| 3909 | } | ||
| 3910 | } | ||
| 3911 | return field_shift; | ||
| 3912 | } | ||
| 3913 | |||
| 3811 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) | 3914 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) |
| 3812 | { | 3915 | { |
| 3813 | u8 addr[6]; | 3916 | u8 addr[6]; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 0a8205d69d2c..4082522d8140 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
| @@ -1171,10 +1171,50 @@ | |||
| 1171 | 1171 | ||
| 1172 | #define A_TP_TX_SCHED_PCMD 0x25 | 1172 | #define A_TP_TX_SCHED_PCMD 0x25 |
| 1173 | 1173 | ||
| 1174 | #define S_VNIC 11 | ||
| 1175 | #define V_VNIC(x) ((x) << S_VNIC) | ||
| 1176 | #define F_VNIC V_VNIC(1U) | ||
| 1177 | |||
| 1178 | #define S_FRAGMENTATION 9 | ||
| 1179 | #define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) | ||
| 1180 | #define F_FRAGMENTATION V_FRAGMENTATION(1U) | ||
| 1181 | |||
| 1182 | #define S_MPSHITTYPE 8 | ||
| 1183 | #define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) | ||
| 1184 | #define F_MPSHITTYPE V_MPSHITTYPE(1U) | ||
| 1185 | |||
| 1186 | #define S_MACMATCH 7 | ||
| 1187 | #define V_MACMATCH(x) ((x) << S_MACMATCH) | ||
| 1188 | #define F_MACMATCH V_MACMATCH(1U) | ||
| 1189 | |||
| 1190 | #define S_ETHERTYPE 6 | ||
| 1191 | #define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) | ||
| 1192 | #define F_ETHERTYPE V_ETHERTYPE(1U) | ||
| 1193 | |||
| 1194 | #define S_PROTOCOL 5 | ||
| 1195 | #define V_PROTOCOL(x) ((x) << S_PROTOCOL) | ||
| 1196 | #define F_PROTOCOL V_PROTOCOL(1U) | ||
| 1197 | |||
| 1198 | #define S_TOS 4 | ||
| 1199 | #define V_TOS(x) ((x) << S_TOS) | ||
| 1200 | #define F_TOS V_TOS(1U) | ||
| 1201 | |||
| 1202 | #define S_VLAN 3 | ||
| 1203 | #define V_VLAN(x) ((x) << S_VLAN) | ||
| 1204 | #define F_VLAN V_VLAN(1U) | ||
| 1205 | |||
| 1206 | #define S_VNIC_ID 2 | ||
| 1207 | #define V_VNIC_ID(x) ((x) << S_VNIC_ID) | ||
| 1208 | #define F_VNIC_ID V_VNIC_ID(1U) | ||
| 1209 | |||
| 1174 | #define S_PORT 1 | 1210 | #define S_PORT 1 |
| 1175 | #define V_PORT(x) ((x) << S_PORT) | 1211 | #define V_PORT(x) ((x) << S_PORT) |
| 1176 | #define F_PORT V_PORT(1U) | 1212 | #define F_PORT V_PORT(1U) |
| 1177 | 1213 | ||
| 1214 | #define S_FCOE 0 | ||
| 1215 | #define V_FCOE(x) ((x) << S_FCOE) | ||
| 1216 | #define F_FCOE V_FCOE(1U) | ||
| 1217 | |||
| 1178 | #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 | 1218 | #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 |
| 1179 | #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 | 1219 | #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 |
| 1180 | 1220 | ||
| @@ -1213,4 +1253,37 @@ | |||
| 1213 | #define V_CHIPID(x) ((x) << S_CHIPID) | 1253 | #define V_CHIPID(x) ((x) << S_CHIPID) |
| 1214 | #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) | 1254 | #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) |
| 1215 | 1255 | ||
| 1256 | /* TP_VLAN_PRI_MAP controls which subset of fields will be present in the | ||
| 1257 | * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP | ||
| 1258 | * selects for a particular field being present. These fields, when present | ||
| 1259 | * in the Compressed Filter Tuple, have the following widths in bits. | ||
| 1260 | */ | ||
| 1261 | #define W_FT_FCOE 1 | ||
| 1262 | #define W_FT_PORT 3 | ||
| 1263 | #define W_FT_VNIC_ID 17 | ||
| 1264 | #define W_FT_VLAN 17 | ||
| 1265 | #define W_FT_TOS 8 | ||
| 1266 | #define W_FT_PROTOCOL 8 | ||
| 1267 | #define W_FT_ETHERTYPE 16 | ||
| 1268 | #define W_FT_MACMATCH 9 | ||
| 1269 | #define W_FT_MPSHITTYPE 3 | ||
| 1270 | #define W_FT_FRAGMENTATION 1 | ||
| 1271 | |||
| 1272 | /* Some of the Compressed Filter Tuple fields have internal structure. These | ||
| 1273 | * bit shifts/masks describe those structures. All shifts are relative to the | ||
| 1274 | * base position of the fields within the Compressed Filter Tuple | ||
| 1275 | */ | ||
| 1276 | #define S_FT_VLAN_VLD 16 | ||
| 1277 | #define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) | ||
| 1278 | #define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) | ||
| 1279 | |||
| 1280 | #define S_FT_VNID_ID_VF 0 | ||
| 1281 | #define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) | ||
| 1282 | |||
| 1283 | #define S_FT_VNID_ID_PF 7 | ||
| 1284 | #define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) | ||
| 1285 | |||
| 1286 | #define S_FT_VNID_ID_VLD 16 | ||
| 1287 | #define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) | ||
| 1288 | |||
| 1216 | #endif /* __T4_REGS_H */ | 1289 | #endif /* __T4_REGS_H */ |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 5878df619b53..4ccaf9af6fc9 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
| 104 | #define BE3_MAX_RSS_QS 16 | 104 | #define BE3_MAX_RSS_QS 16 |
| 105 | #define BE3_MAX_TX_QS 16 | 105 | #define BE3_MAX_TX_QS 16 |
| 106 | #define BE3_MAX_EVT_QS 16 | 106 | #define BE3_MAX_EVT_QS 16 |
| 107 | #define BE3_SRIOV_MAX_EVT_QS 8 | ||
| 107 | 108 | ||
| 108 | #define MAX_RX_QS 32 | 109 | #define MAX_RX_QS 32 |
| 109 | #define MAX_EVT_QS 32 | 110 | #define MAX_EVT_QS 32 |
| @@ -480,7 +481,7 @@ struct be_adapter { | |||
| 480 | struct list_head entry; | 481 | struct list_head entry; |
| 481 | 482 | ||
| 482 | u32 flash_status; | 483 | u32 flash_status; |
| 483 | struct completion flash_compl; | 484 | struct completion et_cmd_compl; |
| 484 | 485 | ||
| 485 | struct be_resources res; /* resources available for the func */ | 486 | struct be_resources res; /* resources available for the func */ |
| 486 | u16 num_vfs; /* Number of VFs provisioned by PF */ | 487 | u16 num_vfs; /* Number of VFs provisioned by PF */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e0e8bc1ef14c..94c35c8d799d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
| 141 | subsystem = resp_hdr->subsystem; | 141 | subsystem = resp_hdr->subsystem; |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && | ||
| 145 | subsystem == CMD_SUBSYSTEM_LOWLEVEL) { | ||
| 146 | complete(&adapter->et_cmd_compl); | ||
| 147 | return 0; | ||
| 148 | } | ||
| 149 | |||
| 144 | if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || | 150 | if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || |
| 145 | (opcode == OPCODE_COMMON_WRITE_OBJECT)) && | 151 | (opcode == OPCODE_COMMON_WRITE_OBJECT)) && |
| 146 | (subsystem == CMD_SUBSYSTEM_COMMON)) { | 152 | (subsystem == CMD_SUBSYSTEM_COMMON)) { |
| 147 | adapter->flash_status = compl_status; | 153 | adapter->flash_status = compl_status; |
| 148 | complete(&adapter->flash_compl); | 154 | complete(&adapter->et_cmd_compl); |
| 149 | } | 155 | } |
| 150 | 156 | ||
| 151 | if (compl_status == MCC_STATUS_SUCCESS) { | 157 | if (compl_status == MCC_STATUS_SUCCESS) { |
| @@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, | |||
| 2017 | 0x3ea83c02, 0x4a110304}; | 2023 | 0x3ea83c02, 0x4a110304}; |
| 2018 | int status; | 2024 | int status; |
| 2019 | 2025 | ||
| 2026 | if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) | ||
| 2027 | return 0; | ||
| 2028 | |||
| 2020 | if (mutex_lock_interruptible(&adapter->mbox_lock)) | 2029 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
| 2021 | return -1; | 2030 | return -1; |
| 2022 | 2031 | ||
| @@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
| 2160 | be_mcc_notify(adapter); | 2169 | be_mcc_notify(adapter); |
| 2161 | spin_unlock_bh(&adapter->mcc_lock); | 2170 | spin_unlock_bh(&adapter->mcc_lock); |
| 2162 | 2171 | ||
| 2163 | if (!wait_for_completion_timeout(&adapter->flash_compl, | 2172 | if (!wait_for_completion_timeout(&adapter->et_cmd_compl, |
| 2164 | msecs_to_jiffies(60000))) | 2173 | msecs_to_jiffies(60000))) |
| 2165 | status = -1; | 2174 | status = -1; |
| 2166 | else | 2175 | else |
| @@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
| 2255 | be_mcc_notify(adapter); | 2264 | be_mcc_notify(adapter); |
| 2256 | spin_unlock_bh(&adapter->mcc_lock); | 2265 | spin_unlock_bh(&adapter->mcc_lock); |
| 2257 | 2266 | ||
| 2258 | if (!wait_for_completion_timeout(&adapter->flash_compl, | 2267 | if (!wait_for_completion_timeout(&adapter->et_cmd_compl, |
| 2259 | msecs_to_jiffies(40000))) | 2268 | msecs_to_jiffies(40000))) |
| 2260 | status = -1; | 2269 | status = -1; |
| 2261 | else | 2270 | else |
| 2262 | status = adapter->flash_status; | 2271 | status = adapter->flash_status; |
| @@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
| 2367 | { | 2376 | { |
| 2368 | struct be_mcc_wrb *wrb; | 2377 | struct be_mcc_wrb *wrb; |
| 2369 | struct be_cmd_req_loopback_test *req; | 2378 | struct be_cmd_req_loopback_test *req; |
| 2379 | struct be_cmd_resp_loopback_test *resp; | ||
| 2370 | int status; | 2380 | int status; |
| 2371 | 2381 | ||
| 2372 | spin_lock_bh(&adapter->mcc_lock); | 2382 | spin_lock_bh(&adapter->mcc_lock); |
| @@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
| 2381 | 2391 | ||
| 2382 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | 2392 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, |
| 2383 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); | 2393 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); |
| 2384 | req->hdr.timeout = cpu_to_le32(4); | ||
| 2385 | 2394 | ||
| 2395 | req->hdr.timeout = cpu_to_le32(15); | ||
| 2386 | req->pattern = cpu_to_le64(pattern); | 2396 | req->pattern = cpu_to_le64(pattern); |
| 2387 | req->src_port = cpu_to_le32(port_num); | 2397 | req->src_port = cpu_to_le32(port_num); |
| 2388 | req->dest_port = cpu_to_le32(port_num); | 2398 | req->dest_port = cpu_to_le32(port_num); |
| @@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
| 2390 | req->num_pkts = cpu_to_le32(num_pkts); | 2400 | req->num_pkts = cpu_to_le32(num_pkts); |
| 2391 | req->loopback_type = cpu_to_le32(loopback_type); | 2401 | req->loopback_type = cpu_to_le32(loopback_type); |
| 2392 | 2402 | ||
| 2393 | status = be_mcc_notify_wait(adapter); | 2403 | be_mcc_notify(adapter); |
| 2394 | if (!status) { | 2404 | |
| 2395 | struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); | 2405 | spin_unlock_bh(&adapter->mcc_lock); |
| 2396 | status = le32_to_cpu(resp->status); | ||
| 2397 | } | ||
| 2398 | 2406 | ||
| 2407 | wait_for_completion(&adapter->et_cmd_compl); | ||
| 2408 | resp = embedded_payload(wrb); | ||
| 2409 | status = le32_to_cpu(resp->status); | ||
| 2410 | |||
| 2411 | return status; | ||
| 2399 | err: | 2412 | err: |
| 2400 | spin_unlock_bh(&adapter->mcc_lock); | 2413 | spin_unlock_bh(&adapter->mcc_lock); |
| 2401 | return status; | 2414 | return status; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0fde69d5cb6a..bf40fdaecfa3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter) | |||
| 2744 | if (!BEx_chip(adapter)) | 2744 | if (!BEx_chip(adapter)) |
| 2745 | adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | | 2745 | adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | |
| 2746 | RSS_ENABLE_UDP_IPV6; | 2746 | RSS_ENABLE_UDP_IPV6; |
| 2747 | } else { | ||
| 2748 | /* Disable RSS, if only default RX Q is created */ | ||
| 2749 | adapter->rss_flags = RSS_ENABLE_NONE; | ||
| 2750 | } | ||
| 2747 | 2751 | ||
| 2748 | rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, | 2752 | rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, |
| 2749 | 128); | 2753 | 128); |
| 2750 | if (rc) { | 2754 | if (rc) { |
| 2751 | adapter->rss_flags = 0; | 2755 | adapter->rss_flags = RSS_ENABLE_NONE; |
| 2752 | return rc; | 2756 | return rc; |
| 2753 | } | ||
| 2754 | } | 2757 | } |
| 2755 | 2758 | ||
| 2756 | /* First time posting */ | 2759 | /* First time posting */ |
| @@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
| 3124 | { | 3127 | { |
| 3125 | struct pci_dev *pdev = adapter->pdev; | 3128 | struct pci_dev *pdev = adapter->pdev; |
| 3126 | bool use_sriov = false; | 3129 | bool use_sriov = false; |
| 3130 | int max_vfs; | ||
| 3127 | 3131 | ||
| 3128 | if (BE3_chip(adapter) && sriov_want(adapter)) { | 3132 | max_vfs = pci_sriov_get_totalvfs(pdev); |
| 3129 | int max_vfs; | ||
| 3130 | 3133 | ||
| 3131 | max_vfs = pci_sriov_get_totalvfs(pdev); | 3134 | if (BE3_chip(adapter) && sriov_want(adapter)) { |
| 3132 | res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; | 3135 | res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; |
| 3133 | use_sriov = res->max_vfs; | 3136 | use_sriov = res->max_vfs; |
| 3134 | } | 3137 | } |
| @@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
| 3159 | BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; | 3162 | BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; |
| 3160 | res->max_rx_qs = res->max_rss_qs + 1; | 3163 | res->max_rx_qs = res->max_rss_qs + 1; |
| 3161 | 3164 | ||
| 3162 | res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; | 3165 | if (be_physfn(adapter)) |
| 3166 | res->max_evt_qs = (max_vfs > 0) ? | ||
| 3167 | BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; | ||
| 3168 | else | ||
| 3169 | res->max_evt_qs = 1; | ||
| 3163 | 3170 | ||
| 3164 | res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; | 3171 | res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; |
| 3165 | if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) | 3172 | if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) |
| @@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
| 4205 | spin_lock_init(&adapter->mcc_lock); | 4212 | spin_lock_init(&adapter->mcc_lock); |
| 4206 | spin_lock_init(&adapter->mcc_cq_lock); | 4213 | spin_lock_init(&adapter->mcc_cq_lock); |
| 4207 | 4214 | ||
| 4208 | init_completion(&adapter->flash_compl); | 4215 | init_completion(&adapter->et_cmd_compl); |
| 4209 | pci_save_state(adapter->pdev); | 4216 | pci_save_state(adapter->pdev); |
| 4210 | return 0; | 4217 | return 0; |
| 4211 | 4218 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e7c8b749c5a5..50bb71c663e2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 428 | /* If this was the last BD in the ring, start at the beginning again. */ | 428 | /* If this was the last BD in the ring, start at the beginning again. */ |
| 429 | bdp = fec_enet_get_nextdesc(bdp, fep); | 429 | bdp = fec_enet_get_nextdesc(bdp, fep); |
| 430 | 430 | ||
| 431 | skb_tx_timestamp(skb); | ||
| 432 | |||
| 431 | fep->cur_tx = bdp; | 433 | fep->cur_tx = bdp; |
| 432 | 434 | ||
| 433 | if (fep->cur_tx == fep->dirty_tx) | 435 | if (fep->cur_tx == fep->dirty_tx) |
| @@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 436 | /* Trigger transmission start */ | 438 | /* Trigger transmission start */ |
| 437 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 439 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
| 438 | 440 | ||
| 439 | skb_tx_timestamp(skb); | ||
| 440 | |||
| 441 | return NETDEV_TX_OK; | 441 | return NETDEV_TX_OK; |
| 442 | } | 442 | } |
| 443 | 443 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 895450e9bb3c..ff2d806eaef7 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c | |||
| @@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
| 718 | e1000_release_phy_80003es2lan(hw); | 718 | e1000_release_phy_80003es2lan(hw); |
| 719 | 719 | ||
| 720 | /* Disable IBIST slave mode (far-end loopback) */ | 720 | /* Disable IBIST slave mode (far-end loopback) */ |
| 721 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | 721 | ret_val = |
| 722 | &kum_reg_data); | 722 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
| 723 | &kum_reg_data); | ||
| 724 | if (ret_val) | ||
| 725 | return ret_val; | ||
| 723 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; | 726 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; |
| 724 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | 727 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
| 725 | kum_reg_data); | 728 | kum_reg_data); |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8d3945ab7334..c30d41d6e426 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
| @@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
| 6174 | return 0; | 6174 | return 0; |
| 6175 | } | 6175 | } |
| 6176 | 6176 | ||
| 6177 | #ifdef CONFIG_PM_SLEEP | 6177 | #ifdef CONFIG_PM |
| 6178 | static int e1000_suspend(struct device *dev) | 6178 | static int e1000_suspend(struct device *dev) |
| 6179 | { | 6179 | { |
| 6180 | struct pci_dev *pdev = to_pci_dev(dev); | 6180 | struct pci_dev *pdev = to_pci_dev(dev); |
| @@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev) | |||
| 6193 | 6193 | ||
| 6194 | return __e1000_resume(pdev); | 6194 | return __e1000_resume(pdev); |
| 6195 | } | 6195 | } |
| 6196 | #endif /* CONFIG_PM_SLEEP */ | 6196 | #endif /* CONFIG_PM */ |
| 6197 | 6197 | ||
| 6198 | #ifdef CONFIG_PM_RUNTIME | 6198 | #ifdef CONFIG_PM_RUNTIME |
| 6199 | static int e1000_runtime_suspend(struct device *dev) | 6199 | static int e1000_runtime_suspend(struct device *dev) |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index da2be59505c0..20e71f4ca426 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
| @@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | |||
| 1757 | * it across the board. | 1757 | * it across the board. |
| 1758 | */ | 1758 | */ |
| 1759 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | 1759 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); |
| 1760 | if (ret_val) | 1760 | if (ret_val) { |
| 1761 | /* If the first read fails, another entity may have | 1761 | /* If the first read fails, another entity may have |
| 1762 | * ownership of the resources, wait and try again to | 1762 | * ownership of the resources, wait and try again to |
| 1763 | * see if they have relinquished the resources yet. | 1763 | * see if they have relinquished the resources yet. |
| 1764 | */ | 1764 | */ |
| 1765 | udelay(usec_interval); | 1765 | if (usec_interval >= 1000) |
| 1766 | msleep(usec_interval / 1000); | ||
| 1767 | else | ||
| 1768 | udelay(usec_interval); | ||
| 1769 | } | ||
| 1766 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | 1770 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); |
| 1767 | if (ret_val) | 1771 | if (ret_val) |
| 1768 | break; | 1772 | break; |
| 1769 | if (phy_status & BMSR_LSTATUS) | 1773 | if (phy_status & BMSR_LSTATUS) |
| 1770 | break; | 1774 | break; |
| 1771 | if (usec_interval >= 1000) | 1775 | if (usec_interval >= 1000) |
| 1772 | mdelay(usec_interval / 1000); | 1776 | msleep(usec_interval / 1000); |
| 1773 | else | 1777 | else |
| 1774 | udelay(usec_interval); | 1778 | udelay(usec_interval); |
| 1775 | } | 1779 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc06854296a3..5bcc870f8367 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) | |||
| 6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); | 6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
| 6828 | } | 6828 | } |
| 6829 | 6829 | ||
| 6830 | #ifdef IXGBE_FCOE | 6830 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 6831 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 6831 | void *accel_priv) |
| 6832 | { | 6832 | { |
| 6833 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; | ||
| 6834 | #ifdef IXGBE_FCOE | ||
| 6833 | struct ixgbe_adapter *adapter; | 6835 | struct ixgbe_adapter *adapter; |
| 6834 | struct ixgbe_ring_feature *f; | 6836 | struct ixgbe_ring_feature *f; |
| 6835 | int txq; | 6837 | int txq; |
| 6838 | #endif | ||
| 6839 | |||
| 6840 | if (fwd_adapter) | ||
| 6841 | return skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
| 6842 | |||
| 6843 | #ifdef IXGBE_FCOE | ||
| 6836 | 6844 | ||
| 6837 | /* | 6845 | /* |
| 6838 | * only execute the code below if protocol is FCoE | 6846 | * only execute the code below if protocol is FCoE |
| @@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
| 6858 | txq -= f->indices; | 6866 | txq -= f->indices; |
| 6859 | 6867 | ||
| 6860 | return txq + f->offset; | 6868 | return txq + f->offset; |
| 6869 | #else | ||
| 6870 | return __netdev_pick_tx(dev, skb); | ||
| 6871 | #endif | ||
| 6861 | } | 6872 | } |
| 6862 | 6873 | ||
| 6863 | #endif | ||
| 6864 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | 6874 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
| 6865 | struct ixgbe_adapter *adapter, | 6875 | struct ixgbe_adapter *adapter, |
| 6866 | struct ixgbe_ring *tx_ring) | 6876 | struct ixgbe_ring *tx_ring) |
| @@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) | |||
| 7629 | kfree(fwd_adapter); | 7639 | kfree(fwd_adapter); |
| 7630 | } | 7640 | } |
| 7631 | 7641 | ||
| 7632 | static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, | ||
| 7633 | struct net_device *dev, | ||
| 7634 | void *priv) | ||
| 7635 | { | ||
| 7636 | struct ixgbe_fwd_adapter *fwd_adapter = priv; | ||
| 7637 | unsigned int queue; | ||
| 7638 | struct ixgbe_ring *tx_ring; | ||
| 7639 | |||
| 7640 | queue = skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
| 7641 | tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; | ||
| 7642 | |||
| 7643 | return __ixgbe_xmit_frame(skb, dev, tx_ring); | ||
| 7644 | } | ||
| 7645 | |||
| 7646 | static const struct net_device_ops ixgbe_netdev_ops = { | 7642 | static const struct net_device_ops ixgbe_netdev_ops = { |
| 7647 | .ndo_open = ixgbe_open, | 7643 | .ndo_open = ixgbe_open, |
| 7648 | .ndo_stop = ixgbe_close, | 7644 | .ndo_stop = ixgbe_close, |
| 7649 | .ndo_start_xmit = ixgbe_xmit_frame, | 7645 | .ndo_start_xmit = ixgbe_xmit_frame, |
| 7650 | #ifdef IXGBE_FCOE | ||
| 7651 | .ndo_select_queue = ixgbe_select_queue, | 7646 | .ndo_select_queue = ixgbe_select_queue, |
| 7652 | #endif | ||
| 7653 | .ndo_set_rx_mode = ixgbe_set_rx_mode, | 7647 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
| 7654 | .ndo_validate_addr = eth_validate_addr, | 7648 | .ndo_validate_addr = eth_validate_addr, |
| 7655 | .ndo_set_mac_address = ixgbe_set_mac, | 7649 | .ndo_set_mac_address = ixgbe_set_mac, |
| @@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
| 7689 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, | 7683 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, |
| 7690 | .ndo_dfwd_add_station = ixgbe_fwd_add, | 7684 | .ndo_dfwd_add_station = ixgbe_fwd_add, |
| 7691 | .ndo_dfwd_del_station = ixgbe_fwd_del, | 7685 | .ndo_dfwd_del_station = ixgbe_fwd_del, |
| 7692 | .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, | ||
| 7693 | }; | 7686 | }; |
| 7694 | 7687 | ||
| 7695 | /** | 7688 | /** |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d6f0c0d8cf11..72084f70adbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
| @@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) | |||
| 291 | { | 291 | { |
| 292 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); | 292 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); |
| 293 | int err; | 293 | int err; |
| 294 | #ifdef CONFIG_PCI_IOV | ||
| 294 | u32 current_flags = adapter->flags; | 295 | u32 current_flags = adapter->flags; |
| 296 | #endif | ||
| 295 | 297 | ||
| 296 | err = ixgbe_disable_sriov(adapter); | 298 | err = ixgbe_disable_sriov(adapter); |
| 297 | 299 | ||
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a6c1f76d8e0..ec94a20d7099 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
| @@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) | |||
| 619 | } | 619 | } |
| 620 | 620 | ||
| 621 | static u16 | 621 | static u16 |
| 622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) | 622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 623 | void *accel_priv) | ||
| 623 | { | 624 | { |
| 624 | /* we are currently only using the first queue */ | 625 | /* we are currently only using the first queue */ |
| 625 | return 0; | 626 | return 0; |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 7354960b583b..c4eeb69a5bee 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c | |||
| @@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus) | |||
| 92 | if (time_is_before_jiffies(end)) | 92 | if (time_is_before_jiffies(end)) |
| 93 | ++timedout; | 93 | ++timedout; |
| 94 | } else { | 94 | } else { |
| 95 | /* wait_event_timeout does not guarantee a delay of at | ||
| 96 | * least one whole jiffie, so timeout must be no less | ||
| 97 | * than two. | ||
| 98 | */ | ||
| 99 | if (timeout < 2) | ||
| 100 | timeout = 2; | ||
| 95 | wait_event_timeout(dev->smi_busy_wait, | 101 | wait_event_timeout(dev->smi_busy_wait, |
| 96 | orion_mdio_smi_is_done(dev), | 102 | orion_mdio_smi_is_done(dev), |
| 97 | timeout); | 103 | timeout); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f54ebd5a1702..a7fcd593b2db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
| @@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
| 592 | } | 592 | } |
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | 595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 596 | void *accel_priv) | ||
| 596 | { | 597 | { |
| 597 | struct mlx4_en_priv *priv = netdev_priv(dev); | 598 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 598 | u16 rings_p_up = priv->num_tx_rings_p_up; | 599 | u16 rings_p_up = priv->num_tx_rings_p_up; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f3758de59c05..d5758adceaa2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | |||
| 714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
| 715 | 715 | ||
| 716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | 716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); |
| 717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | 717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 718 | void *accel_priv); | ||
| 718 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 719 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
| 719 | 720 | ||
| 720 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 721 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 7692dfd4f262..cc68657f0536 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
| @@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter, | |||
| 1604 | u32 seq_number; | 1604 | u32 seq_number; |
| 1605 | u8 vhdr_len = 0; | 1605 | u8 vhdr_len = 0; |
| 1606 | 1606 | ||
| 1607 | if (unlikely(ring > adapter->max_rds_rings)) | 1607 | if (unlikely(ring >= adapter->max_rds_rings)) |
| 1608 | return NULL; | 1608 | return NULL; |
| 1609 | 1609 | ||
| 1610 | rds_ring = &recv_ctx->rds_rings[ring]; | 1610 | rds_ring = &recv_ctx->rds_rings[ring]; |
| 1611 | 1611 | ||
| 1612 | index = netxen_get_lro_sts_refhandle(sts_data0); | 1612 | index = netxen_get_lro_sts_refhandle(sts_data0); |
| 1613 | if (unlikely(index > rds_ring->num_desc)) | 1613 | if (unlikely(index >= rds_ring->num_desc)) |
| 1614 | return NULL; | 1614 | return NULL; |
| 1615 | 1615 | ||
| 1616 | buffer = &rds_ring->rx_buf_arr[index]; | 1616 | buffer = &rds_ring->rx_buf_arr[index]; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 631ea0ac1cd8..f2a7c7166e24 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -487,6 +487,7 @@ struct qlcnic_hardware_context { | |||
| 487 | struct qlcnic_mailbox *mailbox; | 487 | struct qlcnic_mailbox *mailbox; |
| 488 | u8 extend_lb_time; | 488 | u8 extend_lb_time; |
| 489 | u8 phys_port_id[ETH_ALEN]; | 489 | u8 phys_port_id[ETH_ALEN]; |
| 490 | u8 lb_mode; | ||
| 490 | }; | 491 | }; |
| 491 | 492 | ||
| 492 | struct qlcnic_adapter_stats { | 493 | struct qlcnic_adapter_stats { |
| @@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring { | |||
| 578 | dma_addr_t phys_addr; | 579 | dma_addr_t phys_addr; |
| 579 | dma_addr_t hw_cons_phys_addr; | 580 | dma_addr_t hw_cons_phys_addr; |
| 580 | struct netdev_queue *txq; | 581 | struct netdev_queue *txq; |
| 582 | /* Lock to protect Tx descriptors cleanup */ | ||
| 583 | spinlock_t tx_clean_lock; | ||
| 581 | } ____cacheline_internodealigned_in_smp; | 584 | } ____cacheline_internodealigned_in_smp; |
| 582 | 585 | ||
| 583 | /* | 586 | /* |
| @@ -808,6 +811,7 @@ struct qlcnic_mac_list_s { | |||
| 808 | 811 | ||
| 809 | #define QLCNIC_ILB_MODE 0x1 | 812 | #define QLCNIC_ILB_MODE 0x1 |
| 810 | #define QLCNIC_ELB_MODE 0x2 | 813 | #define QLCNIC_ELB_MODE 0x2 |
| 814 | #define QLCNIC_LB_MODE_MASK 0x3 | ||
| 811 | 815 | ||
| 812 | #define QLCNIC_LINKEVENT 0x1 | 816 | #define QLCNIC_LINKEVENT 0x1 |
| 813 | #define QLCNIC_LB_RESPONSE 0x2 | 817 | #define QLCNIC_LB_RESPONSE 0x2 |
| @@ -1093,7 +1097,6 @@ struct qlcnic_adapter { | |||
| 1093 | struct qlcnic_filter_hash rx_fhash; | 1097 | struct qlcnic_filter_hash rx_fhash; |
| 1094 | struct list_head vf_mc_list; | 1098 | struct list_head vf_mc_list; |
| 1095 | 1099 | ||
| 1096 | spinlock_t tx_clean_lock; | ||
| 1097 | spinlock_t mac_learn_lock; | 1100 | spinlock_t mac_learn_lock; |
| 1098 | /* spinlock for catching rcv filters for eswitch traffic */ | 1101 | /* spinlock for catching rcv filters for eswitch traffic */ |
| 1099 | spinlock_t rx_mac_learn_lock; | 1102 | spinlock_t rx_mac_learn_lock; |
| @@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); | |||
| 1708 | void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); | 1711 | void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); |
| 1709 | void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); | 1712 | void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); |
| 1710 | void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); | 1713 | void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); |
| 1714 | void qlcnic_update_stats(struct qlcnic_adapter *); | ||
| 1711 | 1715 | ||
| 1712 | /* Adapter hardware abstraction */ | 1716 | /* Adapter hardware abstraction */ |
| 1713 | struct qlcnic_hardware_ops { | 1717 | struct qlcnic_hardware_ops { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b1cb0ffb15c7..f776f99f7915 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data) | |||
| 447 | 447 | ||
| 448 | qlcnic_83xx_poll_process_aen(adapter); | 448 | qlcnic_83xx_poll_process_aen(adapter); |
| 449 | 449 | ||
| 450 | if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 450 | if (ahw->diag_test) { |
| 451 | ahw->diag_cnt++; | 451 | if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) |
| 452 | ahw->diag_cnt++; | ||
| 452 | qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); | 453 | qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); |
| 453 | return IRQ_HANDLED; | 454 | return IRQ_HANDLED; |
| 454 | } | 455 | } |
| @@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, | |||
| 1345 | } | 1346 | } |
| 1346 | 1347 | ||
| 1347 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { | 1348 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { |
| 1348 | /* disable and free mailbox interrupt */ | ||
| 1349 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | ||
| 1350 | qlcnic_83xx_enable_mbx_poll(adapter); | ||
| 1351 | qlcnic_83xx_free_mbx_intr(adapter); | ||
| 1352 | } | ||
| 1353 | adapter->ahw->loopback_state = 0; | 1349 | adapter->ahw->loopback_state = 0; |
| 1354 | adapter->ahw->hw_ops->setup_link_event(adapter, 1); | 1350 | adapter->ahw->hw_ops->setup_link_event(adapter, 1); |
| 1355 | } | 1351 | } |
| @@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, | |||
| 1363 | { | 1359 | { |
| 1364 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1360 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 1365 | struct qlcnic_host_sds_ring *sds_ring; | 1361 | struct qlcnic_host_sds_ring *sds_ring; |
| 1366 | int ring, err; | 1362 | int ring; |
| 1367 | 1363 | ||
| 1368 | clear_bit(__QLCNIC_DEV_UP, &adapter->state); | 1364 | clear_bit(__QLCNIC_DEV_UP, &adapter->state); |
| 1369 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 1365 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { |
| 1370 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { | 1366 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { |
| 1371 | sds_ring = &adapter->recv_ctx->sds_rings[ring]; | 1367 | sds_ring = &adapter->recv_ctx->sds_rings[ring]; |
| 1372 | qlcnic_83xx_disable_intr(adapter, sds_ring); | 1368 | if (adapter->flags & QLCNIC_MSIX_ENABLED) |
| 1373 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) | 1369 | qlcnic_83xx_disable_intr(adapter, sds_ring); |
| 1374 | qlcnic_83xx_enable_mbx_poll(adapter); | ||
| 1375 | } | 1370 | } |
| 1376 | } | 1371 | } |
| 1377 | 1372 | ||
| 1378 | qlcnic_fw_destroy_ctx(adapter); | 1373 | qlcnic_fw_destroy_ctx(adapter); |
| 1379 | qlcnic_detach(adapter); | 1374 | qlcnic_detach(adapter); |
| 1380 | 1375 | ||
| 1381 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { | ||
| 1382 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | ||
| 1383 | err = qlcnic_83xx_setup_mbx_intr(adapter); | ||
| 1384 | qlcnic_83xx_disable_mbx_poll(adapter); | ||
| 1385 | if (err) { | ||
| 1386 | dev_err(&adapter->pdev->dev, | ||
| 1387 | "%s: failed to setup mbx interrupt\n", | ||
| 1388 | __func__); | ||
| 1389 | goto out; | ||
| 1390 | } | ||
| 1391 | } | ||
| 1392 | } | ||
| 1393 | adapter->ahw->diag_test = 0; | 1376 | adapter->ahw->diag_test = 0; |
| 1394 | adapter->drv_sds_rings = drv_sds_rings; | 1377 | adapter->drv_sds_rings = drv_sds_rings; |
| 1395 | 1378 | ||
| @@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, | |||
| 1399 | if (netif_running(netdev)) | 1382 | if (netif_running(netdev)) |
| 1400 | __qlcnic_up(adapter, netdev); | 1383 | __qlcnic_up(adapter, netdev); |
| 1401 | 1384 | ||
| 1402 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && | ||
| 1403 | !(adapter->flags & QLCNIC_MSIX_ENABLED)) | ||
| 1404 | qlcnic_83xx_disable_mbx_poll(adapter); | ||
| 1405 | out: | 1385 | out: |
| 1406 | netif_device_attach(netdev); | 1386 | netif_device_attach(netdev); |
| 1407 | } | 1387 | } |
| @@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
| 1704 | } | 1684 | } |
| 1705 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); | 1685 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); |
| 1706 | 1686 | ||
| 1707 | /* Make sure carrier is off and queue is stopped during loopback */ | ||
| 1708 | if (netif_running(netdev)) { | ||
| 1709 | netif_carrier_off(netdev); | ||
| 1710 | netif_tx_stop_all_queues(netdev); | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | ret = qlcnic_do_lb_test(adapter, mode); | 1687 | ret = qlcnic_do_lb_test(adapter, mode); |
| 1714 | 1688 | ||
| 1715 | qlcnic_83xx_clear_lb_mode(adapter, mode); | 1689 | qlcnic_83xx_clear_lb_mode(adapter, mode); |
| @@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, | |||
| 2141 | ahw->link_autoneg = MSB(MSW(data[3])); | 2115 | ahw->link_autoneg = MSB(MSW(data[3])); |
| 2142 | ahw->module_type = MSB(LSW(data[3])); | 2116 | ahw->module_type = MSB(LSW(data[3])); |
| 2143 | ahw->has_link_events = 1; | 2117 | ahw->has_link_events = 1; |
| 2118 | ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; | ||
| 2144 | qlcnic_advert_link_change(adapter, link_status); | 2119 | qlcnic_advert_link_change(adapter, link_status); |
| 2145 | } | 2120 | } |
| 2146 | 2121 | ||
| @@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, | |||
| 3754 | return; | 3729 | return; |
| 3755 | } | 3730 | } |
| 3756 | 3731 | ||
| 3732 | static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) | ||
| 3733 | { | ||
| 3734 | struct qlcnic_hardware_context *ahw = adapter->ahw; | ||
| 3735 | u32 offset; | ||
| 3736 | |||
| 3737 | offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); | ||
| 3738 | dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", | ||
| 3739 | readl(ahw->pci_base0 + offset), | ||
| 3740 | QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), | ||
| 3741 | QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), | ||
| 3742 | QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); | ||
| 3743 | } | ||
| 3744 | |||
| 3757 | static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | 3745 | static void qlcnic_83xx_mailbox_worker(struct work_struct *work) |
| 3758 | { | 3746 | { |
| 3759 | struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, | 3747 | struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, |
| @@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | |||
| 3798 | __func__, cmd->cmd_op, cmd->type, ahw->pci_func, | 3786 | __func__, cmd->cmd_op, cmd->type, ahw->pci_func, |
| 3799 | ahw->op_mode); | 3787 | ahw->op_mode); |
| 3800 | clear_bit(QLC_83XX_MBX_READY, &mbx->status); | 3788 | clear_bit(QLC_83XX_MBX_READY, &mbx->status); |
| 3789 | qlcnic_dump_mailbox_registers(adapter); | ||
| 3790 | qlcnic_83xx_get_mbx_data(adapter, cmd); | ||
| 3801 | qlcnic_dump_mbx(adapter, cmd); | 3791 | qlcnic_dump_mbx(adapter, cmd); |
| 3802 | qlcnic_83xx_idc_request_reset(adapter, | 3792 | qlcnic_83xx_idc_request_reset(adapter, |
| 3803 | QLCNIC_FORCE_FW_DUMP_KEY); | 3793 | QLCNIC_FORCE_FW_DUMP_KEY); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 4cae6caa6bfa..a6a33508e401 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
| @@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, | |||
| 662 | pci_channel_state_t); | 662 | pci_channel_state_t); |
| 663 | pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); | 663 | pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); |
| 664 | void qlcnic_83xx_io_resume(struct pci_dev *); | 664 | void qlcnic_83xx_io_resume(struct pci_dev *); |
| 665 | void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); | ||
| 665 | #endif | 666 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 89208e5b25d6..918e18ddf038 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
| @@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) | |||
| 740 | adapter->ahw->idc.err_code = -EIO; | 740 | adapter->ahw->idc.err_code = -EIO; |
| 741 | dev_err(&adapter->pdev->dev, | 741 | dev_err(&adapter->pdev->dev, |
| 742 | "%s: Device in unknown state\n", __func__); | 742 | "%s: Device in unknown state\n", __func__); |
| 743 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | ||
| 743 | return 0; | 744 | return 0; |
| 744 | } | 745 | } |
| 745 | 746 | ||
| @@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
| 818 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 819 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 819 | struct qlcnic_mailbox *mbx = ahw->mailbox; | 820 | struct qlcnic_mailbox *mbx = ahw->mailbox; |
| 820 | int ret = 0; | 821 | int ret = 0; |
| 821 | u32 owner; | ||
| 822 | u32 val; | 822 | u32 val; |
| 823 | 823 | ||
| 824 | /* Perform NIC configuration based ready state entry actions */ | 824 | /* Perform NIC configuration based ready state entry actions */ |
| @@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
| 848 | set_bit(__QLCNIC_RESETTING, &adapter->state); | 848 | set_bit(__QLCNIC_RESETTING, &adapter->state); |
| 849 | qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); | 849 | qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); |
| 850 | } else { | 850 | } else { |
| 851 | owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); | 851 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", |
| 852 | if (ahw->pci_func == owner) | 852 | __func__); |
| 853 | qlcnic_dump_fw(adapter); | 853 | qlcnic_83xx_idc_enter_failed_state(adapter, 1); |
| 854 | } | 854 | } |
| 855 | return -EIO; | 855 | return -EIO; |
| 856 | } | 856 | } |
| @@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) | |||
| 948 | return 0; | 948 | return 0; |
| 949 | } | 949 | } |
| 950 | 950 | ||
| 951 | static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) | 951 | static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) |
| 952 | { | 952 | { |
| 953 | dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); | 953 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 954 | u32 val, owner; | ||
| 955 | |||
| 956 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | ||
| 957 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { | ||
| 958 | owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); | ||
| 959 | if (ahw->pci_func == owner) { | ||
| 960 | qlcnic_83xx_stop_hw(adapter); | ||
| 961 | qlcnic_dump_fw(adapter); | ||
| 962 | } | ||
| 963 | } | ||
| 964 | |||
| 965 | netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", | ||
| 966 | __func__); | ||
| 954 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 967 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
| 955 | adapter->ahw->idc.err_code = -EIO; | 968 | ahw->idc.err_code = -EIO; |
| 956 | 969 | ||
| 957 | return 0; | 970 | return; |
| 958 | } | 971 | } |
| 959 | 972 | ||
| 960 | static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) | 973 | static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) |
| @@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) | |||
| 1063 | adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; | 1076 | adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; |
| 1064 | qlcnic_83xx_periodic_tasks(adapter); | 1077 | qlcnic_83xx_periodic_tasks(adapter); |
| 1065 | 1078 | ||
| 1066 | /* Do not reschedule if firmaware is in hanged state and auto | ||
| 1067 | * recovery is disabled | ||
| 1068 | */ | ||
| 1069 | if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset) | ||
| 1070 | return; | ||
| 1071 | |||
| 1072 | /* Re-schedule the function */ | 1079 | /* Re-schedule the function */ |
| 1073 | if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) | 1080 | if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) |
| 1074 | qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, | 1081 | qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, |
| @@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) | |||
| 1219 | } | 1226 | } |
| 1220 | 1227 | ||
| 1221 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | 1228 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); |
| 1222 | if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || | 1229 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { |
| 1223 | !qlcnic_auto_fw_reset) { | 1230 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", |
| 1224 | dev_err(&adapter->pdev->dev, | 1231 | __func__); |
| 1225 | "%s:failed, device in non reset mode\n", __func__); | 1232 | qlcnic_83xx_idc_enter_failed_state(adapter, 0); |
| 1226 | qlcnic_83xx_unlock_driver(adapter); | 1233 | qlcnic_83xx_unlock_driver(adapter); |
| 1227 | return; | 1234 | return; |
| 1228 | } | 1235 | } |
| @@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) | |||
| 1254 | if (size & 0xF) | 1261 | if (size & 0xF) |
| 1255 | size = (size + 16) & ~0xF; | 1262 | size = (size + 16) & ~0xF; |
| 1256 | 1263 | ||
| 1257 | p_cache = kzalloc(size, GFP_KERNEL); | 1264 | p_cache = vzalloc(size); |
| 1258 | if (p_cache == NULL) | 1265 | if (p_cache == NULL) |
| 1259 | return -ENOMEM; | 1266 | return -ENOMEM; |
| 1260 | 1267 | ||
| 1261 | ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, | 1268 | ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, |
| 1262 | size / sizeof(u32)); | 1269 | size / sizeof(u32)); |
| 1263 | if (ret) { | 1270 | if (ret) { |
| 1264 | kfree(p_cache); | 1271 | vfree(p_cache); |
| 1265 | return ret; | 1272 | return ret; |
| 1266 | } | 1273 | } |
| 1267 | /* 16 byte write to MS memory */ | 1274 | /* 16 byte write to MS memory */ |
| 1268 | ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, | 1275 | ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, |
| 1269 | size / 16); | 1276 | size / 16); |
| 1270 | if (ret) { | 1277 | if (ret) { |
| 1271 | kfree(p_cache); | 1278 | vfree(p_cache); |
| 1272 | return ret; | 1279 | return ret; |
| 1273 | } | 1280 | } |
| 1274 | kfree(p_cache); | 1281 | vfree(p_cache); |
| 1275 | 1282 | ||
| 1276 | return ret; | 1283 | return ret; |
| 1277 | } | 1284 | } |
| @@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, | |||
| 1939 | p_dev->ahw->reset.seq_index = index; | 1946 | p_dev->ahw->reset.seq_index = index; |
| 1940 | } | 1947 | } |
| 1941 | 1948 | ||
| 1942 | static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) | 1949 | void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) |
| 1943 | { | 1950 | { |
| 1944 | p_dev->ahw->reset.seq_index = 0; | 1951 | p_dev->ahw->reset.seq_index = 0; |
| 1945 | 1952 | ||
| @@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) | |||
| 1994 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | 2001 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); |
| 1995 | if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) | 2002 | if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) |
| 1996 | qlcnic_dump_fw(adapter); | 2003 | qlcnic_dump_fw(adapter); |
| 2004 | |||
| 2005 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { | ||
| 2006 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", | ||
| 2007 | __func__); | ||
| 2008 | qlcnic_83xx_idc_enter_failed_state(adapter, 1); | ||
| 2009 | return err; | ||
| 2010 | } | ||
| 2011 | |||
| 1997 | qlcnic_83xx_init_hw(adapter); | 2012 | qlcnic_83xx_init_hw(adapter); |
| 1998 | 2013 | ||
| 1999 | if (qlcnic_83xx_copy_bootloader(adapter)) | 2014 | if (qlcnic_83xx_copy_bootloader(adapter)) |
| @@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) | |||
| 2073 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 2088 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
| 2074 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; | 2089 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; |
| 2075 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; | 2090 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; |
| 2076 | adapter->max_sds_rings = ahw->max_rx_ques; | 2091 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; |
| 2077 | adapter->max_tx_rings = ahw->max_tx_ques; | 2092 | adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; |
| 2078 | } else { | 2093 | } else { |
| 2079 | return -EIO; | 2094 | return -EIO; |
| 2080 | } | 2095 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index b36c02fafcfd..6b08194aa0d4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
| @@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { | |||
| 167 | 167 | ||
| 168 | #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) | 168 | #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) |
| 169 | 169 | ||
| 170 | static inline int qlcnic_82xx_statistics(void) | 170 | static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) |
| 171 | { | 171 | { |
| 172 | return ARRAY_SIZE(qlcnic_device_gstrings_stats) + | 172 | return ARRAY_SIZE(qlcnic_gstrings_stats) + |
| 173 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); | 173 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + |
| 174 | QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; | ||
| 174 | } | 175 | } |
| 175 | 176 | ||
| 176 | static inline int qlcnic_83xx_statistics(void) | 177 | static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) |
| 177 | { | 178 | { |
| 178 | return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + | 179 | return ARRAY_SIZE(qlcnic_gstrings_stats) + |
| 180 | ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + | ||
| 179 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + | 181 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + |
| 180 | ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); | 182 | ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + |
| 183 | QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; | ||
| 181 | } | 184 | } |
| 182 | 185 | ||
| 183 | static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) | 186 | static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) |
| 184 | { | 187 | { |
| 185 | if (qlcnic_82xx_check(adapter)) | 188 | int len = -1; |
| 186 | return qlcnic_82xx_statistics(); | 189 | |
| 187 | else if (qlcnic_83xx_check(adapter)) | 190 | if (qlcnic_82xx_check(adapter)) { |
| 188 | return qlcnic_83xx_statistics(); | 191 | len = qlcnic_82xx_statistics(adapter); |
| 189 | else | 192 | if (adapter->flags & QLCNIC_ESWITCH_ENABLED) |
| 190 | return -1; | 193 | len += ARRAY_SIZE(qlcnic_device_gstrings_stats); |
| 194 | } else if (qlcnic_83xx_check(adapter)) { | ||
| 195 | len = qlcnic_83xx_statistics(adapter); | ||
| 196 | } | ||
| 197 | |||
| 198 | return len; | ||
| 191 | } | 199 | } |
| 192 | 200 | ||
| 193 | #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 | 201 | #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 |
| @@ -667,30 +675,25 @@ qlcnic_set_ringparam(struct net_device *dev, | |||
| 667 | static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, | 675 | static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, |
| 668 | u8 rx_ring, u8 tx_ring) | 676 | u8 rx_ring, u8 tx_ring) |
| 669 | { | 677 | { |
| 678 | if (rx_ring == 0 || tx_ring == 0) | ||
| 679 | return -EINVAL; | ||
| 680 | |||
| 670 | if (rx_ring != 0) { | 681 | if (rx_ring != 0) { |
| 671 | if (rx_ring > adapter->max_sds_rings) { | 682 | if (rx_ring > adapter->max_sds_rings) { |
| 672 | netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", | 683 | netdev_err(adapter->netdev, |
| 684 | "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", | ||
| 673 | rx_ring, adapter->max_sds_rings); | 685 | rx_ring, adapter->max_sds_rings); |
| 674 | return -EINVAL; | 686 | return -EINVAL; |
| 675 | } | 687 | } |
| 676 | } | 688 | } |
| 677 | 689 | ||
| 678 | if (tx_ring != 0) { | 690 | if (tx_ring != 0) { |
| 679 | if (qlcnic_82xx_check(adapter) && | 691 | if (tx_ring > adapter->max_tx_rings) { |
| 680 | (tx_ring > adapter->max_tx_rings)) { | ||
| 681 | netdev_err(adapter->netdev, | 692 | netdev_err(adapter->netdev, |
| 682 | "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", | 693 | "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", |
| 683 | tx_ring, adapter->max_tx_rings); | 694 | tx_ring, adapter->max_tx_rings); |
| 684 | return -EINVAL; | 695 | return -EINVAL; |
| 685 | } | 696 | } |
| 686 | |||
| 687 | if (qlcnic_83xx_check(adapter) && | ||
| 688 | (tx_ring > QLCNIC_SINGLE_RING)) { | ||
| 689 | netdev_err(adapter->netdev, | ||
| 690 | "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n", | ||
| 691 | tx_ring, QLCNIC_SINGLE_RING); | ||
| 692 | return -EINVAL; | ||
| 693 | } | ||
| 694 | } | 697 | } |
| 695 | 698 | ||
| 696 | return 0; | 699 | return 0; |
| @@ -925,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev) | |||
| 925 | 928 | ||
| 926 | static int qlcnic_get_sset_count(struct net_device *dev, int sset) | 929 | static int qlcnic_get_sset_count(struct net_device *dev, int sset) |
| 927 | { | 930 | { |
| 928 | int len; | ||
| 929 | 931 | ||
| 930 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 932 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
| 931 | switch (sset) { | 933 | switch (sset) { |
| 932 | case ETH_SS_TEST: | 934 | case ETH_SS_TEST: |
| 933 | return QLCNIC_TEST_LEN; | 935 | return QLCNIC_TEST_LEN; |
| 934 | case ETH_SS_STATS: | 936 | case ETH_SS_STATS: |
| 935 | len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; | 937 | return qlcnic_dev_statistics_len(adapter); |
| 936 | if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || | ||
| 937 | qlcnic_83xx_check(adapter)) | ||
| 938 | return len; | ||
| 939 | return qlcnic_82xx_statistics(); | ||
| 940 | default: | 938 | default: |
| 941 | return -EOPNOTSUPP; | 939 | return -EOPNOTSUPP; |
| 942 | } | 940 | } |
| @@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev) | |||
| 948 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 946 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 949 | struct qlcnic_cmd_args cmd; | 947 | struct qlcnic_cmd_args cmd; |
| 950 | int ret, drv_sds_rings = adapter->drv_sds_rings; | 948 | int ret, drv_sds_rings = adapter->drv_sds_rings; |
| 949 | int drv_tx_rings = adapter->drv_tx_rings; | ||
| 951 | 950 | ||
| 952 | if (qlcnic_83xx_check(adapter)) | 951 | if (qlcnic_83xx_check(adapter)) |
| 953 | return qlcnic_83xx_interrupt_test(netdev); | 952 | return qlcnic_83xx_interrupt_test(netdev); |
| @@ -980,6 +979,7 @@ free_diag_res: | |||
| 980 | 979 | ||
| 981 | clear_diag_irq: | 980 | clear_diag_irq: |
| 982 | adapter->drv_sds_rings = drv_sds_rings; | 981 | adapter->drv_sds_rings = drv_sds_rings; |
| 982 | adapter->drv_tx_rings = drv_tx_rings; | ||
| 983 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 983 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
| 984 | 984 | ||
| 985 | return ret; | 985 | return ret; |
| @@ -1270,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) | |||
| 1270 | return data; | 1270 | return data; |
| 1271 | } | 1271 | } |
| 1272 | 1272 | ||
| 1273 | static void qlcnic_update_stats(struct qlcnic_adapter *adapter) | 1273 | void qlcnic_update_stats(struct qlcnic_adapter *adapter) |
| 1274 | { | 1274 | { |
| 1275 | struct qlcnic_host_tx_ring *tx_ring; | 1275 | struct qlcnic_host_tx_ring *tx_ring; |
| 1276 | int ring; | 1276 | int ring; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e9c21e5d0ca9..c4262c23ed7c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | |||
| @@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
| 134 | struct qlcnic_skb_frag *buffrag; | 134 | struct qlcnic_skb_frag *buffrag; |
| 135 | int i, j; | 135 | int i, j; |
| 136 | 136 | ||
| 137 | spin_lock(&tx_ring->tx_clean_lock); | ||
| 138 | |||
| 137 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
| 138 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
| 139 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
| @@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
| 157 | } | 159 | } |
| 158 | cmd_buf++; | 160 | cmd_buf++; |
| 159 | } | 161 | } |
| 162 | |||
| 163 | spin_unlock(&tx_ring->tx_clean_lock); | ||
| 160 | } | 164 | } |
| 161 | 165 | ||
| 162 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) | 166 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0149c9495347..ad1531ae3aa8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
| @@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) | |||
| 687 | if (adapter->ahw->linkup && !linkup) { | 687 | if (adapter->ahw->linkup && !linkup) { |
| 688 | netdev_info(netdev, "NIC Link is down\n"); | 688 | netdev_info(netdev, "NIC Link is down\n"); |
| 689 | adapter->ahw->linkup = 0; | 689 | adapter->ahw->linkup = 0; |
| 690 | if (netif_running(netdev)) { | 690 | netif_carrier_off(netdev); |
| 691 | netif_carrier_off(netdev); | ||
| 692 | netif_tx_stop_all_queues(netdev); | ||
| 693 | } | ||
| 694 | } else if (!adapter->ahw->linkup && linkup) { | 691 | } else if (!adapter->ahw->linkup && linkup) { |
| 692 | /* Do not advertise Link up if the port is in loopback mode */ | ||
| 693 | if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) | ||
| 694 | return; | ||
| 695 | |||
| 695 | netdev_info(netdev, "NIC Link is up\n"); | 696 | netdev_info(netdev, "NIC Link is up\n"); |
| 696 | adapter->ahw->linkup = 1; | 697 | adapter->ahw->linkup = 1; |
| 697 | if (netif_running(netdev)) { | 698 | netif_carrier_on(netdev); |
| 698 | netif_carrier_on(netdev); | ||
| 699 | netif_wake_queue(netdev); | ||
| 700 | } | ||
| 701 | } | 699 | } |
| 702 | } | 700 | } |
| 703 | 701 | ||
| @@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
| 784 | struct net_device *netdev = adapter->netdev; | 782 | struct net_device *netdev = adapter->netdev; |
| 785 | struct qlcnic_skb_frag *frag; | 783 | struct qlcnic_skb_frag *frag; |
| 786 | 784 | ||
| 787 | if (!spin_trylock(&adapter->tx_clean_lock)) | 785 | if (!spin_trylock(&tx_ring->tx_clean_lock)) |
| 788 | return 1; | 786 | return 1; |
| 789 | 787 | ||
| 790 | sw_consumer = tx_ring->sw_consumer; | 788 | sw_consumer = tx_ring->sw_consumer; |
| @@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
| 813 | break; | 811 | break; |
| 814 | } | 812 | } |
| 815 | 813 | ||
| 814 | tx_ring->sw_consumer = sw_consumer; | ||
| 815 | |||
| 816 | if (count && netif_running(netdev)) { | 816 | if (count && netif_running(netdev)) { |
| 817 | tx_ring->sw_consumer = sw_consumer; | ||
| 818 | smp_mb(); | 817 | smp_mb(); |
| 819 | if (netif_tx_queue_stopped(tx_ring->txq) && | 818 | if (netif_tx_queue_stopped(tx_ring->txq) && |
| 820 | netif_carrier_ok(netdev)) { | 819 | netif_carrier_ok(netdev)) { |
| @@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
| 840 | */ | 839 | */ |
| 841 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | 840 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
| 842 | done = (sw_consumer == hw_consumer); | 841 | done = (sw_consumer == hw_consumer); |
| 843 | spin_unlock(&adapter->tx_clean_lock); | 842 | |
| 843 | spin_unlock(&tx_ring->tx_clean_lock); | ||
| 844 | 844 | ||
| 845 | return done; | 845 | return done; |
| 846 | } | 846 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 05c1eef8df13..550791b8fbae 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) | |||
| 1178 | } else { | 1178 | } else { |
| 1179 | adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 1179 | adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
| 1180 | adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; | 1180 | adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; |
| 1181 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; | ||
| 1181 | adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; | 1182 | adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; |
| 1182 | } | 1183 | } |
| 1183 | 1184 | ||
| @@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
| 1755 | if (qlcnic_sriov_vf_check(adapter)) | 1756 | if (qlcnic_sriov_vf_check(adapter)) |
| 1756 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); | 1757 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); |
| 1757 | smp_mb(); | 1758 | smp_mb(); |
| 1758 | spin_lock(&adapter->tx_clean_lock); | ||
| 1759 | netif_carrier_off(netdev); | 1759 | netif_carrier_off(netdev); |
| 1760 | adapter->ahw->linkup = 0; | 1760 | adapter->ahw->linkup = 0; |
| 1761 | netif_tx_disable(netdev); | 1761 | netif_tx_disable(netdev); |
| @@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
| 1776 | 1776 | ||
| 1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) | 1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) |
| 1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); | 1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); |
| 1779 | spin_unlock(&adapter->tx_clean_lock); | ||
| 1780 | } | 1779 | } |
| 1781 | 1780 | ||
| 1782 | /* Usage: During suspend and firmware recovery module */ | 1781 | /* Usage: During suspend and firmware recovery module */ |
| @@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) | |||
| 1940 | qlcnic_detach(adapter); | 1939 | qlcnic_detach(adapter); |
| 1941 | 1940 | ||
| 1942 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | 1941 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; |
| 1943 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; | ||
| 1944 | adapter->ahw->diag_test = test; | 1942 | adapter->ahw->diag_test = test; |
| 1945 | adapter->ahw->linkup = 0; | 1943 | adapter->ahw->linkup = 0; |
| 1946 | 1944 | ||
| @@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, | |||
| 2172 | } | 2170 | } |
| 2173 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); | 2171 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); |
| 2174 | tx_ring->cmd_buf_arr = cmd_buf_arr; | 2172 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
| 2173 | spin_lock_init(&tx_ring->tx_clean_lock); | ||
| 2175 | } | 2174 | } |
| 2176 | 2175 | ||
| 2177 | if (qlcnic_83xx_check(adapter) || | 2176 | if (qlcnic_83xx_check(adapter) || |
| @@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2299 | rwlock_init(&adapter->ahw->crb_lock); | 2298 | rwlock_init(&adapter->ahw->crb_lock); |
| 2300 | mutex_init(&adapter->ahw->mem_lock); | 2299 | mutex_init(&adapter->ahw->mem_lock); |
| 2301 | 2300 | ||
| 2302 | spin_lock_init(&adapter->tx_clean_lock); | ||
| 2303 | INIT_LIST_HEAD(&adapter->mac_list); | 2301 | INIT_LIST_HEAD(&adapter->mac_list); |
| 2304 | 2302 | ||
| 2305 | qlcnic_register_dcb(adapter); | 2303 | qlcnic_register_dcb(adapter); |
| @@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) | |||
| 2782 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 2780 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 2783 | struct net_device_stats *stats = &netdev->stats; | 2781 | struct net_device_stats *stats = &netdev->stats; |
| 2784 | 2782 | ||
| 2783 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) | ||
| 2784 | qlcnic_update_stats(adapter); | ||
| 2785 | |||
| 2785 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; | 2786 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; |
| 2786 | stats->tx_packets = adapter->stats.xmitfinished; | 2787 | stats->tx_packets = adapter->stats.xmitfinished; |
| 2787 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; | 2788 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 686f460b1502..024f8161d2fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
| @@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
| 75 | num_vfs = sriov->num_vfs; | 75 | num_vfs = sriov->num_vfs; |
| 76 | max = num_vfs + 1; | 76 | max = num_vfs + 1; |
| 77 | info->bit_offsets = 0xffff; | 77 | info->bit_offsets = 0xffff; |
| 78 | info->max_tx_ques = res->num_tx_queues / max; | ||
| 79 | info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; | 78 | info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; |
| 80 | num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; | 79 | num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; |
| 81 | 80 | ||
| @@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
| 86 | info->max_tx_mac_filters = temp; | 85 | info->max_tx_mac_filters = temp; |
| 87 | info->min_tx_bw = 0; | 86 | info->min_tx_bw = 0; |
| 88 | info->max_tx_bw = MAX_BW; | 87 | info->max_tx_bw = MAX_BW; |
| 88 | info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; | ||
| 89 | } else { | 89 | } else { |
| 90 | id = qlcnic_sriov_func_to_index(adapter, func); | 90 | id = qlcnic_sriov_func_to_index(adapter, func); |
| 91 | if (id < 0) | 91 | if (id < 0) |
| @@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
| 95 | info->max_tx_bw = vp->max_tx_bw; | 95 | info->max_tx_bw = vp->max_tx_bw; |
| 96 | info->max_rx_ucast_mac_filters = num_vf_macs; | 96 | info->max_rx_ucast_mac_filters = num_vf_macs; |
| 97 | info->max_tx_mac_filters = num_vf_macs; | 97 | info->max_tx_mac_filters = num_vf_macs; |
| 98 | info->max_tx_ques = QLCNIC_SINGLE_RING; | ||
| 98 | } | 99 | } |
| 99 | 100 | ||
| 100 | info->max_rx_ip_addr = res->num_destip / max; | 101 | info->max_rx_ip_addr = res->num_destip / max; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8a7a23a84ac5..797b56a0efc4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
| 622 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) | 622 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
| 623 | return -EOPNOTSUPP; | 623 | return -EOPNOTSUPP; |
| 624 | 624 | ||
| 625 | if (netif_msg_hw(priv)) { | 625 | priv->adv_ts = 0; |
| 626 | if (priv->dma_cap.time_stamp) { | 626 | if (priv->dma_cap.atime_stamp && priv->extend_desc) |
| 627 | pr_debug("IEEE 1588-2002 Time Stamp supported\n"); | 627 | priv->adv_ts = 1; |
| 628 | priv->adv_ts = 0; | 628 | |
| 629 | } | 629 | if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) |
| 630 | if (priv->dma_cap.atime_stamp && priv->extend_desc) { | 630 | pr_debug("IEEE 1588-2002 Time Stamp supported\n"); |
| 631 | pr_debug | 631 | |
| 632 | ("IEEE 1588-2008 Advanced Time Stamp supported\n"); | 632 | if (netif_msg_hw(priv) && priv->adv_ts) |
| 633 | priv->adv_ts = 1; | 633 | pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); |
| 634 | } | ||
| 635 | } | ||
| 636 | 634 | ||
| 637 | priv->hw->ptp = &stmmac_ptp; | 635 | priv->hw->ptp = &stmmac_ptp; |
| 638 | priv->hwts_tx_en = 0; | 636 | priv->hwts_tx_en = 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b8b0eeed0f92..7680581ebe12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
| @@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) | |||
| 56 | 56 | ||
| 57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); | 57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); |
| 58 | 58 | ||
| 59 | spin_unlock_irqrestore(&priv->lock, flags); | 59 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
| 60 | 60 | ||
| 61 | return 0; | 61 | return 0; |
| 62 | } | 62 | } |
| @@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) | |||
| 91 | 91 | ||
| 92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); | 92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); |
| 93 | 93 | ||
| 94 | spin_unlock_irqrestore(&priv->lock, flags); | 94 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
| 95 | 95 | ||
| 96 | return 0; | 96 | return 0; |
| 97 | } | 97 | } |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 5120d9ce1dd4..5330fd298705 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, | |||
| 740 | /* set speed_in input in case RMII mode is used in 100Mbps */ | 740 | /* set speed_in input in case RMII mode is used in 100Mbps */ |
| 741 | if (phy->speed == 100) | 741 | if (phy->speed == 100) |
| 742 | mac_control |= BIT(15); | 742 | mac_control |= BIT(15); |
| 743 | else if (phy->speed == 10) | ||
| 744 | mac_control |= BIT(18); /* In Band mode */ | ||
| 743 | 745 | ||
| 744 | *link = true; | 746 | *link = true; |
| 745 | } else { | 747 | } else { |
| @@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
| 2106 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | 2108 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { |
| 2107 | for (i = res->start; i <= res->end; i++) { | 2109 | for (i = res->start; i <= res->end; i++) { |
| 2108 | if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, | 2110 | if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, |
| 2109 | dev_name(priv->dev), priv)) { | 2111 | dev_name(&pdev->dev), priv)) { |
| 2110 | dev_err(priv->dev, "error attaching irq\n"); | 2112 | dev_err(priv->dev, "error attaching irq\n"); |
| 2111 | goto clean_ale_ret; | 2113 | goto clean_ale_ret; |
| 2112 | } | 2114 | } |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 628b736e5ae7..0e9fb3301b11 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
| @@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
| 2080 | } | 2080 | } |
| 2081 | 2081 | ||
| 2082 | /* Return subqueue id on this core (one per core). */ | 2082 | /* Return subqueue id on this core (one per core). */ |
| 2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | 2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 2084 | void *accel_priv) | ||
| 2084 | { | 2085 | { |
| 2085 | return smp_processor_id(); | 2086 | return smp_processor_id(); |
| 2086 | } | 2087 | } |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 3169252613fa..5d78c1d08abd 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
| @@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 571 | case HDLCDRVCTL_CALIBRATE: | 571 | case HDLCDRVCTL_CALIBRATE: |
| 572 | if(!capable(CAP_SYS_RAWIO)) | 572 | if(!capable(CAP_SYS_RAWIO)) |
| 573 | return -EPERM; | 573 | return -EPERM; |
| 574 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) | ||
| 575 | return -EINVAL; | ||
| 574 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; | 576 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; |
| 575 | return 0; | 577 | return 0; |
| 576 | 578 | ||
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 1971411574db..61dd2447e1bb 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
| @@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 1057 | break; | 1057 | break; |
| 1058 | 1058 | ||
| 1059 | case SIOCYAMGCFG: | 1059 | case SIOCYAMGCFG: |
| 1060 | memset(&yi, 0, sizeof(yi)); | ||
| 1060 | yi.cfg.mask = 0xffffffff; | 1061 | yi.cfg.mask = 0xffffffff; |
| 1061 | yi.cfg.iobase = yp->iobase; | 1062 | yi.cfg.iobase = yp->iobase; |
| 1062 | yi.cfg.irq = yp->irq; | 1063 | yi.cfg.irq = yp->irq; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 524f713f6017..71baeb3ed905 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
| 261 | struct sk_buff *skb; | 261 | struct sk_buff *skb; |
| 262 | 262 | ||
| 263 | net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; | 263 | net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; |
| 264 | if (!net) { | 264 | if (!net || net->reg_state != NETREG_REGISTERED) { |
| 265 | netdev_err(net, "got receive callback but net device" | ||
| 266 | " not initialized yet\n"); | ||
| 267 | packet->status = NVSP_STAT_FAIL; | 265 | packet->status = NVSP_STAT_FAIL; |
| 268 | return 0; | 266 | return 0; |
| 269 | } | 267 | } |
| @@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
| 327 | return -EINVAL; | 325 | return -EINVAL; |
| 328 | 326 | ||
| 329 | nvdev->start_remove = true; | 327 | nvdev->start_remove = true; |
| 330 | cancel_delayed_work_sync(&ndevctx->dwork); | ||
| 331 | cancel_work_sync(&ndevctx->work); | 328 | cancel_work_sync(&ndevctx->work); |
| 332 | netif_tx_disable(ndev); | 329 | netif_tx_disable(ndev); |
| 333 | rndis_filter_device_remove(hdev); | 330 | rndis_filter_device_remove(hdev); |
| @@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 436 | SET_ETHTOOL_OPS(net, ðtool_ops); | 433 | SET_ETHTOOL_OPS(net, ðtool_ops); |
| 437 | SET_NETDEV_DEV(net, &dev->device); | 434 | SET_NETDEV_DEV(net, &dev->device); |
| 438 | 435 | ||
| 439 | ret = register_netdev(net); | ||
| 440 | if (ret != 0) { | ||
| 441 | pr_err("Unable to register netdev.\n"); | ||
| 442 | free_netdev(net); | ||
| 443 | goto out; | ||
| 444 | } | ||
| 445 | |||
| 446 | /* Notify the netvsc driver of the new device */ | 436 | /* Notify the netvsc driver of the new device */ |
| 447 | device_info.ring_size = ring_size; | 437 | device_info.ring_size = ring_size; |
| 448 | ret = rndis_filter_device_add(dev, &device_info); | 438 | ret = rndis_filter_device_add(dev, &device_info); |
| 449 | if (ret != 0) { | 439 | if (ret != 0) { |
| 450 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); | 440 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); |
| 451 | unregister_netdev(net); | ||
| 452 | free_netdev(net); | 441 | free_netdev(net); |
| 453 | hv_set_drvdata(dev, NULL); | 442 | hv_set_drvdata(dev, NULL); |
| 454 | return ret; | 443 | return ret; |
| @@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 457 | 446 | ||
| 458 | netif_carrier_on(net); | 447 | netif_carrier_on(net); |
| 459 | 448 | ||
| 460 | out: | 449 | ret = register_netdev(net); |
| 450 | if (ret != 0) { | ||
| 451 | pr_err("Unable to register netdev.\n"); | ||
| 452 | rndis_filter_device_remove(dev); | ||
| 453 | free_netdev(net); | ||
| 454 | } | ||
| 455 | |||
| 461 | return ret; | 456 | return ret; |
| 462 | } | 457 | } |
| 463 | 458 | ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index acf93798dc67..bc8faaec33f5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, | |||
| 299 | 299 | ||
| 300 | if (vlan->fwd_priv) { | 300 | if (vlan->fwd_priv) { |
| 301 | skb->dev = vlan->lowerdev; | 301 | skb->dev = vlan->lowerdev; |
| 302 | ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); | 302 | ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); |
| 303 | } else { | 303 | } else { |
| 304 | ret = macvlan_queue_xmit(skb, dev); | 304 | ret = macvlan_queue_xmit(skb, dev); |
| 305 | } | 305 | } |
| @@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = { | |||
| 338 | .cache_update = eth_header_cache_update, | 338 | .cache_update = eth_header_cache_update, |
| 339 | }; | 339 | }; |
| 340 | 340 | ||
| 341 | static struct rtnl_link_ops macvlan_link_ops; | ||
| 342 | |||
| 341 | static int macvlan_open(struct net_device *dev) | 343 | static int macvlan_open(struct net_device *dev) |
| 342 | { | 344 | { |
| 343 | struct macvlan_dev *vlan = netdev_priv(dev); | 345 | struct macvlan_dev *vlan = netdev_priv(dev); |
| @@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev) | |||
| 353 | goto hash_add; | 355 | goto hash_add; |
| 354 | } | 356 | } |
| 355 | 357 | ||
| 356 | if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { | 358 | if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && |
| 359 | dev->rtnl_link_ops == &macvlan_link_ops) { | ||
| 357 | vlan->fwd_priv = | 360 | vlan->fwd_priv = |
| 358 | lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); | 361 | lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); |
| 359 | 362 | ||
| @@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev) | |||
| 362 | */ | 365 | */ |
| 363 | if (IS_ERR_OR_NULL(vlan->fwd_priv)) { | 366 | if (IS_ERR_OR_NULL(vlan->fwd_priv)) { |
| 364 | vlan->fwd_priv = NULL; | 367 | vlan->fwd_priv = NULL; |
| 365 | } else { | 368 | } else |
| 366 | dev->features &= ~NETIF_F_LLTX; | ||
| 367 | return 0; | 369 | return 0; |
| 368 | } | ||
| 369 | } | 370 | } |
| 370 | 371 | ||
| 371 | err = -EBUSY; | 372 | err = -EBUSY; |
| @@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
| 690 | netdev_features_t features) | 691 | netdev_features_t features) |
| 691 | { | 692 | { |
| 692 | struct macvlan_dev *vlan = netdev_priv(dev); | 693 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 694 | netdev_features_t mask; | ||
| 695 | |||
| 696 | features |= NETIF_F_ALL_FOR_ALL; | ||
| 697 | features &= (vlan->set_features | ~MACVLAN_FEATURES); | ||
| 698 | mask = features; | ||
| 699 | |||
| 700 | features = netdev_increment_features(vlan->lowerdev->features, | ||
| 701 | features, | ||
| 702 | mask); | ||
| 703 | features |= NETIF_F_LLTX; | ||
| 693 | 704 | ||
| 694 | return features & (vlan->set_features | ~MACVLAN_FEATURES); | 705 | return features; |
| 695 | } | 706 | } |
| 696 | 707 | ||
| 697 | static const struct ethtool_ops macvlan_ethtool_ops = { | 708 | static const struct ethtool_ops macvlan_ethtool_ops = { |
| @@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused, | |||
| 1019 | break; | 1030 | break; |
| 1020 | case NETDEV_FEAT_CHANGE: | 1031 | case NETDEV_FEAT_CHANGE: |
| 1021 | list_for_each_entry(vlan, &port->vlans, list) { | 1032 | list_for_each_entry(vlan, &port->vlans, list) { |
| 1022 | vlan->dev->features = dev->features & MACVLAN_FEATURES; | ||
| 1023 | vlan->dev->gso_max_size = dev->gso_max_size; | 1033 | vlan->dev->gso_max_size = dev->gso_max_size; |
| 1024 | netdev_features_change(vlan->dev); | 1034 | netdev_update_features(vlan->dev); |
| 1025 | } | 1035 | } |
| 1026 | break; | 1036 | break; |
| 1027 | case NETDEV_UNREGISTER: | 1037 | case NETDEV_UNREGISTER: |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 36c6994436b7..98434b84f041 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev) | |||
| 565 | int err = 0; | 565 | int err = 0; |
| 566 | 566 | ||
| 567 | atomic_set(&phydev->irq_disable, 0); | 567 | atomic_set(&phydev->irq_disable, 0); |
| 568 | if (request_irq(phydev->irq, phy_interrupt, | 568 | if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", |
| 569 | IRQF_SHARED, | 569 | phydev) < 0) { |
| 570 | "phy_interrupt", | ||
| 571 | phydev) < 0) { | ||
| 572 | pr_warn("%s: Can't get IRQ %d (PHY)\n", | 570 | pr_warn("%s: Can't get IRQ %d (PHY)\n", |
| 573 | phydev->bus->name, phydev->irq); | 571 | phydev->bus->name, phydev->irq); |
| 574 | phydev->irq = PHY_POLL; | 572 | phydev->irq = PHY_POLL; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 736050d6b451..b75ae5bde673 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1647 | return NETDEV_TX_OK; | 1647 | return NETDEV_TX_OK; |
| 1648 | } | 1648 | } |
| 1649 | 1649 | ||
| 1650 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) | 1650 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 1651 | void *accel_priv) | ||
| 1651 | { | 1652 | { |
| 1652 | /* | 1653 | /* |
| 1653 | * This helper function exists to help dev_pick_tx get the correct | 1654 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7c8343a4f918..ecec8029c5e8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -348,7 +348,8 @@ unlock: | |||
| 348 | * different rxq no. here. If we could not get rxhash, then we would | 348 | * different rxq no. here. If we could not get rxhash, then we would |
| 349 | * hope the rxq no. may help here. | 349 | * hope the rxq no. may help here. |
| 350 | */ | 350 | */ |
| 351 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) | 351 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 352 | void *accel_priv) | ||
| 352 | { | 353 | { |
| 353 | struct tun_struct *tun = netdev_priv(dev); | 354 | struct tun_struct *tun = netdev_priv(dev); |
| 354 | struct tun_flow_entry *e; | 355 | struct tun_flow_entry *e; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 85e4a01670f0..47b0f732b0b1 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
| @@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM | |||
| 276 | module will be called cdc_mbim. | 276 | module will be called cdc_mbim. |
| 277 | 277 | ||
| 278 | config USB_NET_DM9601 | 278 | config USB_NET_DM9601 |
| 279 | tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" | 279 | tristate "Davicom DM96xx based USB 10/100 ethernet devices" |
| 280 | depends on USB_USBNET | 280 | depends on USB_USBNET |
| 281 | select CRC32 | 281 | select CRC32 |
| 282 | help | 282 | help |
| 283 | This option adds support for Davicom DM9601 based USB 1.1 | 283 | This option adds support for Davicom DM9601/DM9620/DM9621A |
| 284 | 10/100 Ethernet adapters. | 284 | based USB 10/100 Ethernet adapters. |
| 285 | 285 | ||
| 286 | config USB_NET_SR9700 | 286 | config USB_NET_SR9700 |
| 287 | tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" | 287 | tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index c6867f926cff..14aa48fa8d7e 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices | 2 | * Davicom DM96xx USB 10/100Mbps ethernet devices |
| 3 | * | 3 | * |
| 4 | * Peter Korsgaard <jacmet@sunsite.dk> | 4 | * Peter Korsgaard <jacmet@sunsite.dk> |
| 5 | * | 5 | * |
| @@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 364 | dev->net->ethtool_ops = &dm9601_ethtool_ops; | 364 | dev->net->ethtool_ops = &dm9601_ethtool_ops; |
| 365 | dev->net->hard_header_len += DM_TX_OVERHEAD; | 365 | dev->net->hard_header_len += DM_TX_OVERHEAD; |
| 366 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 366 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
| 367 | dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; | 367 | |
| 368 | /* dm9620/21a require room for 4 byte padding, even in dm9601 | ||
| 369 | * mode, so we need +1 to be able to receive full size | ||
| 370 | * ethernet frames. | ||
| 371 | */ | ||
| 372 | dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1; | ||
| 368 | 373 | ||
| 369 | dev->mii.dev = dev->net; | 374 | dev->mii.dev = dev->net; |
| 370 | dev->mii.mdio_read = dm9601_mdio_read; | 375 | dev->mii.mdio_read = dm9601_mdio_read; |
| @@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
| 468 | static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | 473 | static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
| 469 | gfp_t flags) | 474 | gfp_t flags) |
| 470 | { | 475 | { |
| 471 | int len; | 476 | int len, pad; |
| 472 | 477 | ||
| 473 | /* format: | 478 | /* format: |
| 474 | b1: packet length low | 479 | b1: packet length low |
| @@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
| 476 | b3..n: packet data | 481 | b3..n: packet data |
| 477 | */ | 482 | */ |
| 478 | 483 | ||
| 479 | len = skb->len; | 484 | len = skb->len + DM_TX_OVERHEAD; |
| 485 | |||
| 486 | /* workaround for dm962x errata with tx fifo getting out of | ||
| 487 | * sync if a USB bulk transfer retry happens right after a | ||
| 488 | * packet with odd / maxpacket length by adding up to 3 bytes | ||
| 489 | * padding. | ||
| 490 | */ | ||
| 491 | while ((len & 1) || !(len % dev->maxpacket)) | ||
| 492 | len++; | ||
| 480 | 493 | ||
| 481 | if (skb_headroom(skb) < DM_TX_OVERHEAD) { | 494 | len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */ |
| 495 | pad = len - skb->len; | ||
| 496 | |||
| 497 | if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) { | ||
| 482 | struct sk_buff *skb2; | 498 | struct sk_buff *skb2; |
| 483 | 499 | ||
| 484 | skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); | 500 | skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags); |
| 485 | dev_kfree_skb_any(skb); | 501 | dev_kfree_skb_any(skb); |
| 486 | skb = skb2; | 502 | skb = skb2; |
| 487 | if (!skb) | 503 | if (!skb) |
| @@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
| 490 | 506 | ||
| 491 | __skb_push(skb, DM_TX_OVERHEAD); | 507 | __skb_push(skb, DM_TX_OVERHEAD); |
| 492 | 508 | ||
| 493 | /* usbnet adds padding if length is a multiple of packet size | 509 | if (pad) { |
| 494 | if so, adjust length value in header */ | 510 | memset(skb->data + skb->len, 0, pad); |
| 495 | if ((skb->len % dev->maxpacket) == 0) | 511 | __skb_put(skb, pad); |
| 496 | len++; | 512 | } |
| 497 | 513 | ||
| 498 | skb->data[0] = len; | 514 | skb->data[0] = len; |
| 499 | skb->data[1] = len >> 8; | 515 | skb->data[1] = len >> 8; |
| @@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev) | |||
| 543 | } | 559 | } |
| 544 | 560 | ||
| 545 | static const struct driver_info dm9601_info = { | 561 | static const struct driver_info dm9601_info = { |
| 546 | .description = "Davicom DM9601 USB Ethernet", | 562 | .description = "Davicom DM96xx USB 10/100 Ethernet", |
| 547 | .flags = FLAG_ETHER | FLAG_LINK_INTR, | 563 | .flags = FLAG_ETHER | FLAG_LINK_INTR, |
| 548 | .bind = dm9601_bind, | 564 | .bind = dm9601_bind, |
| 549 | .rx_fixup = dm9601_rx_fixup, | 565 | .rx_fixup = dm9601_rx_fixup, |
| @@ -594,6 +610,10 @@ static const struct usb_device_id products[] = { | |||
| 594 | USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ | 610 | USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ |
| 595 | .driver_info = (unsigned long)&dm9601_info, | 611 | .driver_info = (unsigned long)&dm9601_info, |
| 596 | }, | 612 | }, |
| 613 | { | ||
| 614 | USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */ | ||
| 615 | .driver_info = (unsigned long)&dm9601_info, | ||
| 616 | }, | ||
| 597 | {}, // END | 617 | {}, // END |
| 598 | }; | 618 | }; |
| 599 | 619 | ||
| @@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = { | |||
| 612 | module_usb_driver(dm9601_driver); | 632 | module_usb_driver(dm9601_driver); |
| 613 | 633 | ||
| 614 | MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); | 634 | MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); |
| 615 | MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); | 635 | MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices"); |
| 616 | MODULE_LICENSE("GPL"); | 636 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 86292e6aaf49..1a482344b3f5 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
| @@ -185,7 +185,6 @@ enum rx_ctrl_state{ | |||
| 185 | #define BM_REQUEST_TYPE (0xa1) | 185 | #define BM_REQUEST_TYPE (0xa1) |
| 186 | #define B_NOTIFICATION (0x20) | 186 | #define B_NOTIFICATION (0x20) |
| 187 | #define W_VALUE (0x0) | 187 | #define W_VALUE (0x0) |
| 188 | #define W_INDEX (0x2) | ||
| 189 | #define W_LENGTH (0x2) | 188 | #define W_LENGTH (0x2) |
| 190 | 189 | ||
| 191 | #define B_OVERRUN (0x1<<6) | 190 | #define B_OVERRUN (0x1<<6) |
| @@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
| 1487 | struct uart_icount *icount; | 1486 | struct uart_icount *icount; |
| 1488 | struct hso_serial_state_notification *serial_state_notification; | 1487 | struct hso_serial_state_notification *serial_state_notification; |
| 1489 | struct usb_device *usb; | 1488 | struct usb_device *usb; |
| 1489 | int if_num; | ||
| 1490 | 1490 | ||
| 1491 | /* Sanity checks */ | 1491 | /* Sanity checks */ |
| 1492 | if (!serial) | 1492 | if (!serial) |
| @@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
| 1495 | handle_usb_error(status, __func__, serial->parent); | 1495 | handle_usb_error(status, __func__, serial->parent); |
| 1496 | return; | 1496 | return; |
| 1497 | } | 1497 | } |
| 1498 | |||
| 1499 | /* tiocmget is only supported on HSO_PORT_MODEM */ | ||
| 1498 | tiocmget = serial->tiocmget; | 1500 | tiocmget = serial->tiocmget; |
| 1499 | if (!tiocmget) | 1501 | if (!tiocmget) |
| 1500 | return; | 1502 | return; |
| 1503 | BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); | ||
| 1504 | |||
| 1501 | usb = serial->parent->usb; | 1505 | usb = serial->parent->usb; |
| 1506 | if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; | ||
| 1507 | |||
| 1508 | /* wIndex should be the USB interface number of the port to which the | ||
| 1509 | * notification applies, which should always be the Modem port. | ||
| 1510 | */ | ||
| 1502 | serial_state_notification = &tiocmget->serial_state_notification; | 1511 | serial_state_notification = &tiocmget->serial_state_notification; |
| 1503 | if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || | 1512 | if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || |
| 1504 | serial_state_notification->bNotification != B_NOTIFICATION || | 1513 | serial_state_notification->bNotification != B_NOTIFICATION || |
| 1505 | le16_to_cpu(serial_state_notification->wValue) != W_VALUE || | 1514 | le16_to_cpu(serial_state_notification->wValue) != W_VALUE || |
| 1506 | le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || | 1515 | le16_to_cpu(serial_state_notification->wIndex) != if_num || |
| 1507 | le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { | 1516 | le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { |
| 1508 | dev_warn(&usb->dev, | 1517 | dev_warn(&usb->dev, |
| 1509 | "hso received invalid serial state notification\n"); | 1518 | "hso received invalid serial state notification\n"); |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 03832d3780aa..f54637828574 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c | |||
| @@ -117,7 +117,6 @@ enum { | |||
| 117 | struct mcs7830_data { | 117 | struct mcs7830_data { |
| 118 | u8 multi_filter[8]; | 118 | u8 multi_filter[8]; |
| 119 | u8 config; | 119 | u8 config; |
| 120 | u8 link_counter; | ||
| 121 | }; | 120 | }; |
| 122 | 121 | ||
| 123 | static const char driver_name[] = "MOSCHIP usb-ethernet driver"; | 122 | static const char driver_name[] = "MOSCHIP usb-ethernet driver"; |
| @@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb) | |||
| 561 | { | 560 | { |
| 562 | u8 *buf = urb->transfer_buffer; | 561 | u8 *buf = urb->transfer_buffer; |
| 563 | bool link, link_changed; | 562 | bool link, link_changed; |
| 564 | struct mcs7830_data *data = mcs7830_get_data(dev); | ||
| 565 | 563 | ||
| 566 | if (urb->actual_length < 16) | 564 | if (urb->actual_length < 16) |
| 567 | return; | 565 | return; |
| 568 | 566 | ||
| 569 | link = !(buf[1] & 0x20); | 567 | link = !(buf[1] == 0x20); |
| 570 | link_changed = netif_carrier_ok(dev->net) != link; | 568 | link_changed = netif_carrier_ok(dev->net) != link; |
| 571 | if (link_changed) { | 569 | if (link_changed) { |
| 572 | data->link_counter++; | 570 | usbnet_link_change(dev, link, 0); |
| 573 | /* | 571 | netdev_dbg(dev->net, "Link Status is: %d\n", link); |
| 574 | track link state 20 times to guard against erroneous | 572 | } |
| 575 | link state changes reported sometimes by the chip | ||
| 576 | */ | ||
| 577 | if (data->link_counter > 20) { | ||
| 578 | data->link_counter = 0; | ||
| 579 | usbnet_link_change(dev, link, 0); | ||
| 580 | netdev_dbg(dev->net, "Link Status is: %d\n", link); | ||
| 581 | } | ||
| 582 | } else | ||
| 583 | data->link_counter = 0; | ||
| 584 | } | 573 | } |
| 585 | 574 | ||
| 586 | static const struct driver_info moschip_info = { | 575 | static const struct driver_info moschip_info = { |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d208f8604981..5d776447d9c3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev) | |||
| 1797 | if (err) | 1797 | if (err) |
| 1798 | return err; | 1798 | return err; |
| 1799 | 1799 | ||
| 1800 | if (netif_running(vi->dev)) | 1800 | if (netif_running(vi->dev)) { |
| 1801 | for (i = 0; i < vi->curr_queue_pairs; i++) | ||
| 1802 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | ||
| 1803 | schedule_delayed_work(&vi->refill, 0); | ||
| 1804 | |||
| 1801 | for (i = 0; i < vi->max_queue_pairs; i++) | 1805 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1802 | virtnet_napi_enable(&vi->rq[i]); | 1806 | virtnet_napi_enable(&vi->rq[i]); |
| 1807 | } | ||
| 1803 | 1808 | ||
| 1804 | netif_device_attach(vi->dev); | 1809 | netif_device_attach(vi->dev); |
| 1805 | 1810 | ||
| 1806 | for (i = 0; i < vi->curr_queue_pairs; i++) | ||
| 1807 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | ||
| 1808 | schedule_delayed_work(&vi->refill, 0); | ||
| 1809 | |||
| 1810 | mutex_lock(&vi->config_lock); | 1811 | mutex_lock(&vi->config_lock); |
| 1811 | vi->config_enable = true; | 1812 | vi->config_enable = true; |
| 1812 | mutex_unlock(&vi->config_lock); | 1813 | mutex_unlock(&vi->config_lock); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 249e01c5600c..ed384fee76ac 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
| 2440 | /* update header length based on lower device */ | 2440 | /* update header length based on lower device */ |
| 2441 | dev->hard_header_len = lowerdev->hard_header_len + | 2441 | dev->hard_header_len = lowerdev->hard_header_len + |
| 2442 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); | 2442 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); |
| 2443 | } | 2443 | } else if (use_ipv6) |
| 2444 | vxlan->flags |= VXLAN_F_IPV6; | ||
| 2444 | 2445 | ||
| 2445 | if (data[IFLA_VXLAN_TOS]) | 2446 | if (data[IFLA_VXLAN_TOS]) |
| 2446 | vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); | 2447 | vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 8d78253c26ce..a366d6b4626f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c | |||
| @@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
| 76 | mask2 |= ATH9K_INT_CST; | 76 | mask2 |= ATH9K_INT_CST; |
| 77 | if (isr2 & AR_ISR_S2_TSFOOR) | 77 | if (isr2 & AR_ISR_S2_TSFOOR) |
| 78 | mask2 |= ATH9K_INT_TSFOOR; | 78 | mask2 |= ATH9K_INT_TSFOOR; |
| 79 | |||
| 80 | if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { | ||
| 81 | REG_WRITE(ah, AR_ISR_S2, isr2); | ||
| 82 | isr &= ~AR_ISR_BCNMISC; | ||
| 83 | } | ||
| 79 | } | 84 | } |
| 80 | 85 | ||
| 81 | isr = REG_READ(ah, AR_ISR_RAC); | 86 | if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) |
| 87 | isr = REG_READ(ah, AR_ISR_RAC); | ||
| 88 | |||
| 82 | if (isr == 0xffffffff) { | 89 | if (isr == 0xffffffff) { |
| 83 | *masked = 0; | 90 | *masked = 0; |
| 84 | return false; | 91 | return false; |
| @@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
| 97 | 104 | ||
| 98 | *masked |= ATH9K_INT_TX; | 105 | *masked |= ATH9K_INT_TX; |
| 99 | 106 | ||
| 100 | s0_s = REG_READ(ah, AR_ISR_S0_S); | 107 | if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { |
| 108 | s0_s = REG_READ(ah, AR_ISR_S0_S); | ||
| 109 | s1_s = REG_READ(ah, AR_ISR_S1_S); | ||
| 110 | } else { | ||
| 111 | s0_s = REG_READ(ah, AR_ISR_S0); | ||
| 112 | REG_WRITE(ah, AR_ISR_S0, s0_s); | ||
| 113 | s1_s = REG_READ(ah, AR_ISR_S1); | ||
| 114 | REG_WRITE(ah, AR_ISR_S1, s1_s); | ||
| 115 | |||
| 116 | isr &= ~(AR_ISR_TXOK | | ||
| 117 | AR_ISR_TXDESC | | ||
| 118 | AR_ISR_TXERR | | ||
| 119 | AR_ISR_TXEOL); | ||
| 120 | } | ||
| 121 | |||
| 101 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); | 122 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); |
| 102 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); | 123 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); |
| 103 | |||
| 104 | s1_s = REG_READ(ah, AR_ISR_S1_S); | ||
| 105 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); | 124 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); |
| 106 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); | 125 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); |
| 107 | } | 126 | } |
| @@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
| 114 | *masked |= mask2; | 133 | *masked |= mask2; |
| 115 | } | 134 | } |
| 116 | 135 | ||
| 117 | if (AR_SREV_9100(ah)) | 136 | if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) { |
| 118 | return true; | ||
| 119 | |||
| 120 | if (isr & AR_ISR_GENTMR) { | ||
| 121 | u32 s5_s; | 137 | u32 s5_s; |
| 122 | 138 | ||
| 123 | s5_s = REG_READ(ah, AR_ISR_S5_S); | 139 | if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { |
| 140 | s5_s = REG_READ(ah, AR_ISR_S5_S); | ||
| 141 | } else { | ||
| 142 | s5_s = REG_READ(ah, AR_ISR_S5); | ||
| 143 | } | ||
| 144 | |||
| 124 | ah->intr_gen_timer_trigger = | 145 | ah->intr_gen_timer_trigger = |
| 125 | MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); | 146 | MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); |
| 126 | 147 | ||
| @@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
| 133 | if ((s5_s & AR_ISR_S5_TIM_TIMER) && | 154 | if ((s5_s & AR_ISR_S5_TIM_TIMER) && |
| 134 | !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) | 155 | !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) |
| 135 | *masked |= ATH9K_INT_TIM_TIMER; | 156 | *masked |= ATH9K_INT_TIM_TIMER; |
| 157 | |||
| 158 | if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { | ||
| 159 | REG_WRITE(ah, AR_ISR_S5, s5_s); | ||
| 160 | isr &= ~AR_ISR_GENTMR; | ||
| 161 | } | ||
| 136 | } | 162 | } |
| 137 | 163 | ||
| 164 | if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { | ||
| 165 | REG_WRITE(ah, AR_ISR, isr); | ||
| 166 | REG_READ(ah, AR_ISR); | ||
| 167 | } | ||
| 168 | |||
| 169 | if (AR_SREV_9100(ah)) | ||
| 170 | return true; | ||
| 171 | |||
| 138 | if (sync_cause) { | 172 | if (sync_cause) { |
| 139 | ath9k_debug_sync_cause(common, sync_cause); | 173 | ath9k_debug_sync_cause(common, sync_cause); |
| 140 | fatal_int = | 174 | fatal_int = |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9a2657fdd9cc..608d739d1378 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
| @@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | |||
| 127 | struct ath9k_vif_iter_data *iter_data = data; | 127 | struct ath9k_vif_iter_data *iter_data = data; |
| 128 | int i; | 128 | int i; |
| 129 | 129 | ||
| 130 | for (i = 0; i < ETH_ALEN; i++) | 130 | if (iter_data->hw_macaddr != NULL) { |
| 131 | iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); | 131 | for (i = 0; i < ETH_ALEN; i++) |
| 132 | iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); | ||
| 133 | } else { | ||
| 134 | iter_data->hw_macaddr = mac; | ||
| 135 | } | ||
| 132 | } | 136 | } |
| 133 | 137 | ||
| 134 | static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, | 138 | static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, |
| 135 | struct ieee80211_vif *vif) | 139 | struct ieee80211_vif *vif) |
| 136 | { | 140 | { |
| 137 | struct ath_common *common = ath9k_hw_common(priv->ah); | 141 | struct ath_common *common = ath9k_hw_common(priv->ah); |
| 138 | struct ath9k_vif_iter_data iter_data; | 142 | struct ath9k_vif_iter_data iter_data; |
| 139 | 143 | ||
| 140 | /* | 144 | /* |
| 141 | * Use the hardware MAC address as reference, the hardware uses it | 145 | * Pick the MAC address of the first interface as the new hardware |
| 142 | * together with the BSSID mask when matching addresses. | 146 | * MAC address. The hardware will use it together with the BSSID mask |
| 147 | * when matching addresses. | ||
| 143 | */ | 148 | */ |
| 144 | iter_data.hw_macaddr = common->macaddr; | 149 | iter_data.hw_macaddr = NULL; |
| 145 | memset(&iter_data.mask, 0xff, ETH_ALEN); | 150 | memset(&iter_data.mask, 0xff, ETH_ALEN); |
| 146 | 151 | ||
| 147 | if (vif) | 152 | if (vif) |
| @@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, | |||
| 153 | ath9k_htc_bssid_iter, &iter_data); | 158 | ath9k_htc_bssid_iter, &iter_data); |
| 154 | 159 | ||
| 155 | memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); | 160 | memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); |
| 161 | |||
| 162 | if (iter_data.hw_macaddr) | ||
| 163 | memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); | ||
| 164 | |||
| 156 | ath_hw_setbssidmask(common); | 165 | ath_hw_setbssidmask(common); |
| 157 | } | 166 | } |
| 158 | 167 | ||
| @@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, | |||
| 1063 | goto out; | 1072 | goto out; |
| 1064 | } | 1073 | } |
| 1065 | 1074 | ||
| 1066 | ath9k_htc_set_bssid_mask(priv, vif); | 1075 | ath9k_htc_set_mac_bssid_mask(priv, vif); |
| 1067 | 1076 | ||
| 1068 | priv->vif_slot |= (1 << avp->index); | 1077 | priv->vif_slot |= (1 << avp->index); |
| 1069 | priv->nvifs++; | 1078 | priv->nvifs++; |
| @@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, | |||
| 1128 | 1137 | ||
| 1129 | ath9k_htc_set_opmode(priv); | 1138 | ath9k_htc_set_opmode(priv); |
| 1130 | 1139 | ||
| 1131 | ath9k_htc_set_bssid_mask(priv, vif); | 1140 | ath9k_htc_set_mac_bssid_mask(priv, vif); |
| 1132 | 1141 | ||
| 1133 | /* | 1142 | /* |
| 1134 | * Stop ANI only if there are no associated station interfaces. | 1143 | * Stop ANI only if there are no associated station interfaces. |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 74f452c7b166..21aa09e0e825 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw, | |||
| 965 | struct ath_common *common = ath9k_hw_common(ah); | 965 | struct ath_common *common = ath9k_hw_common(ah); |
| 966 | 966 | ||
| 967 | /* | 967 | /* |
| 968 | * Use the hardware MAC address as reference, the hardware uses it | 968 | * Pick the MAC address of the first interface as the new hardware |
| 969 | * together with the BSSID mask when matching addresses. | 969 | * MAC address. The hardware will use it together with the BSSID mask |
| 970 | * when matching addresses. | ||
| 970 | */ | 971 | */ |
| 971 | memset(iter_data, 0, sizeof(*iter_data)); | 972 | memset(iter_data, 0, sizeof(*iter_data)); |
| 972 | memset(&iter_data->mask, 0xff, ETH_ALEN); | 973 | memset(&iter_data->mask, 0xff, ETH_ALEN); |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 86605027c41d..e6272546395a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
| @@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
| 357 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, | 357 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, |
| 358 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, | 358 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, |
| 359 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, | 359 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, |
| 360 | {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, |
| 361 | {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, | 361 | {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, |
| 362 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, | 362 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, |
| 363 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, | ||
| 363 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, | 364 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, |
| 364 | {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, | 365 | {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, |
| 365 | {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, | 366 | {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, |
| 366 | {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, | 367 | {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, |
| 367 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, |
| 368 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, |
| 370 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, | ||
| 369 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, | 371 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, |
| 372 | {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, | ||
| 373 | {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, | ||
| 370 | {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, | 374 | {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, |
| 371 | {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, | 375 | {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, |
| 372 | {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, | 376 | {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, |
| 373 | {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, | 377 | {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, |
| 374 | {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, | 378 | {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, |
| 379 | {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, | ||
| 380 | {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, | ||
| 375 | {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, | 381 | {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, |
| 376 | {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, | 382 | {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, |
| 377 | #endif /* CONFIG_IWLMVM */ | 383 | #endif /* CONFIG_IWLMVM */ |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c72438bb2faf..a1b32ee9594a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
| 2011 | (hwsim_flags & HWSIM_TX_STAT_ACK)) { | 2011 | (hwsim_flags & HWSIM_TX_STAT_ACK)) { |
| 2012 | if (skb->len >= 16) { | 2012 | if (skb->len >= 16) { |
| 2013 | hdr = (struct ieee80211_hdr *) skb->data; | 2013 | hdr = (struct ieee80211_hdr *) skb->data; |
| 2014 | mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], | 2014 | mac80211_hwsim_monitor_ack(data2->channel, |
| 2015 | hdr->addr2); | 2015 | hdr->addr2); |
| 2016 | } | 2016 | } |
| 2017 | txi->flags |= IEEE80211_TX_STAT_ACK; | 2017 | txi->flags |= IEEE80211_TX_STAT_ACK; |
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 78e8a6666cc6..8bb8988c435c 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
| @@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) | |||
| 746 | } | 746 | } |
| 747 | 747 | ||
| 748 | static u16 | 748 | static u16 |
| 749 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) | 749 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, |
| 750 | void *accel_priv) | ||
| 750 | { | 751 | { |
| 751 | skb->priority = cfg80211_classify8021d(skb); | 752 | skb->priority = cfg80211_classify8021d(skb); |
| 752 | return mwifiex_1d_to_wmm_queue[skb->priority]; | 753 | return mwifiex_1d_to_wmm_queue[skb->priority]; |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 0f494444bcd1..5a53195d016b 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
| @@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
| 740 | }; | 740 | }; |
| 741 | int index = rtlpci->rx_ring[rx_queue_idx].idx; | 741 | int index = rtlpci->rx_ring[rx_queue_idx].idx; |
| 742 | 742 | ||
| 743 | if (rtlpci->driver_is_goingto_unload) | ||
| 744 | return; | ||
| 743 | /*RX NORMAL PKT */ | 745 | /*RX NORMAL PKT */ |
| 744 | while (count--) { | 746 | while (count--) { |
| 745 | /*rx descriptor */ | 747 | /*rx descriptor */ |
| @@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) | |||
| 1636 | */ | 1638 | */ |
| 1637 | set_hal_stop(rtlhal); | 1639 | set_hal_stop(rtlhal); |
| 1638 | 1640 | ||
| 1641 | rtlpci->driver_is_goingto_unload = true; | ||
| 1639 | rtlpriv->cfg->ops->disable_interrupt(hw); | 1642 | rtlpriv->cfg->ops->disable_interrupt(hw); |
| 1640 | cancel_work_sync(&rtlpriv->works.lps_change_work); | 1643 | cancel_work_sync(&rtlpriv->works.lps_change_work); |
| 1641 | 1644 | ||
| @@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) | |||
| 1653 | ppsc->rfchange_inprogress = true; | 1656 | ppsc->rfchange_inprogress = true; |
| 1654 | spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); | 1657 | spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); |
| 1655 | 1658 | ||
| 1656 | rtlpci->driver_is_goingto_unload = true; | ||
| 1657 | rtlpriv->cfg->ops->hw_disable(hw); | 1659 | rtlpriv->cfg->ops->hw_disable(hw); |
| 1658 | /* some things are not needed if firmware not available */ | 1660 | /* some things are not needed if firmware not available */ |
| 1659 | if (!rtlpriv->max_fw_size) | 1661 | if (!rtlpriv->max_fw_size) |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 08ae01b41c83..c47794b9d42f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
| @@ -101,6 +101,13 @@ struct xenvif_rx_meta { | |||
| 101 | 101 | ||
| 102 | #define MAX_PENDING_REQS 256 | 102 | #define MAX_PENDING_REQS 256 |
| 103 | 103 | ||
| 104 | /* It's possible for an skb to have a maximal number of frags | ||
| 105 | * but still be less than MAX_BUFFER_OFFSET in size. Thus the | ||
| 106 | * worst-case number of copy operations is MAX_SKB_FRAGS per | ||
| 107 | * ring slot. | ||
| 108 | */ | ||
| 109 | #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) | ||
| 110 | |||
| 104 | struct xenvif { | 111 | struct xenvif { |
| 105 | /* Unique identifier for this interface. */ | 112 | /* Unique identifier for this interface. */ |
| 106 | domid_t domid; | 113 | domid_t domid; |
| @@ -143,13 +150,13 @@ struct xenvif { | |||
| 143 | */ | 150 | */ |
| 144 | RING_IDX rx_req_cons_peek; | 151 | RING_IDX rx_req_cons_peek; |
| 145 | 152 | ||
| 146 | /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each | 153 | /* This array is allocated seperately as it is large */ |
| 147 | * head/fragment page uses 2 copy operations because it | 154 | struct gnttab_copy *grant_copy_op; |
| 148 | * straddles two buffers in the frontend. | ||
| 149 | */ | ||
| 150 | struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; | ||
| 151 | struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; | ||
| 152 | 155 | ||
| 156 | /* We create one meta structure per ring request we consume, so | ||
| 157 | * the maximum number is the same as the ring size. | ||
| 158 | */ | ||
| 159 | struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; | ||
| 153 | 160 | ||
| 154 | u8 fe_dev_addr[6]; | 161 | u8 fe_dev_addr[6]; |
| 155 | 162 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 870f1fa58370..fff8cddfed81 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
| 35 | #include <linux/rtnetlink.h> | 35 | #include <linux/rtnetlink.h> |
| 36 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
| 37 | #include <linux/vmalloc.h> | ||
| 37 | 38 | ||
| 38 | #include <xen/events.h> | 39 | #include <xen/events.h> |
| 39 | #include <asm/xen/hypercall.h> | 40 | #include <asm/xen/hypercall.h> |
| @@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 307 | SET_NETDEV_DEV(dev, parent); | 308 | SET_NETDEV_DEV(dev, parent); |
| 308 | 309 | ||
| 309 | vif = netdev_priv(dev); | 310 | vif = netdev_priv(dev); |
| 311 | |||
| 312 | vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * | ||
| 313 | MAX_GRANT_COPY_OPS); | ||
| 314 | if (vif->grant_copy_op == NULL) { | ||
| 315 | pr_warn("Could not allocate grant copy space for %s\n", name); | ||
| 316 | free_netdev(dev); | ||
| 317 | return ERR_PTR(-ENOMEM); | ||
| 318 | } | ||
| 319 | |||
| 310 | vif->domid = domid; | 320 | vif->domid = domid; |
| 311 | vif->handle = handle; | 321 | vif->handle = handle; |
| 312 | vif->can_sg = 1; | 322 | vif->can_sg = 1; |
| @@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif) | |||
| 487 | 497 | ||
| 488 | unregister_netdev(vif->dev); | 498 | unregister_netdev(vif->dev); |
| 489 | 499 | ||
| 500 | vfree(vif->grant_copy_op); | ||
| 490 | free_netdev(vif->dev); | 501 | free_netdev(vif->dev); |
| 491 | 502 | ||
| 492 | module_put(THIS_MODULE); | 503 | module_put(THIS_MODULE); |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e884ee1fe7ed..78425554a537 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif) | |||
| 608 | if (!npo.copy_prod) | 608 | if (!npo.copy_prod) |
| 609 | return; | 609 | return; |
| 610 | 610 | ||
| 611 | BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); | 611 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); |
| 612 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); | 612 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); |
| 613 | 613 | ||
| 614 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | 614 | while ((skb = __skb_dequeue(&rxq)) != NULL) { |
| @@ -1197,6 +1197,9 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, | |||
| 1197 | 1197 | ||
| 1198 | err = -EPROTO; | 1198 | err = -EPROTO; |
| 1199 | 1199 | ||
| 1200 | if (fragment) | ||
| 1201 | goto out; | ||
| 1202 | |||
| 1200 | switch (ip_hdr(skb)->protocol) { | 1203 | switch (ip_hdr(skb)->protocol) { |
| 1201 | case IPPROTO_TCP: | 1204 | case IPPROTO_TCP: |
| 1202 | err = maybe_pull_tail(skb, | 1205 | err = maybe_pull_tail(skb, |
| @@ -1206,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, | |||
| 1206 | goto out; | 1209 | goto out; |
| 1207 | 1210 | ||
| 1208 | if (!skb_partial_csum_set(skb, off, | 1211 | if (!skb_partial_csum_set(skb, off, |
| 1209 | offsetof(struct tcphdr, check))) | 1212 | offsetof(struct tcphdr, check))) { |
| 1213 | err = -EPROTO; | ||
| 1210 | goto out; | 1214 | goto out; |
| 1215 | } | ||
| 1211 | 1216 | ||
| 1212 | if (recalculate_partial_csum) | 1217 | if (recalculate_partial_csum) |
| 1213 | tcp_hdr(skb)->check = | 1218 | tcp_hdr(skb)->check = |
| @@ -1224,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, | |||
| 1224 | goto out; | 1229 | goto out; |
| 1225 | 1230 | ||
| 1226 | if (!skb_partial_csum_set(skb, off, | 1231 | if (!skb_partial_csum_set(skb, off, |
| 1227 | offsetof(struct udphdr, check))) | 1232 | offsetof(struct udphdr, check))) { |
| 1233 | err = -EPROTO; | ||
| 1228 | goto out; | 1234 | goto out; |
| 1235 | } | ||
| 1229 | 1236 | ||
| 1230 | if (recalculate_partial_csum) | 1237 | if (recalculate_partial_csum) |
| 1231 | udp_hdr(skb)->check = | 1238 | udp_hdr(skb)->check = |
| @@ -1347,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, | |||
| 1347 | goto out; | 1354 | goto out; |
| 1348 | 1355 | ||
| 1349 | if (!skb_partial_csum_set(skb, off, | 1356 | if (!skb_partial_csum_set(skb, off, |
| 1350 | offsetof(struct tcphdr, check))) | 1357 | offsetof(struct tcphdr, check))) { |
| 1358 | err = -EPROTO; | ||
| 1351 | goto out; | 1359 | goto out; |
| 1360 | } | ||
| 1352 | 1361 | ||
| 1353 | if (recalculate_partial_csum) | 1362 | if (recalculate_partial_csum) |
| 1354 | tcp_hdr(skb)->check = | 1363 | tcp_hdr(skb)->check = |
| @@ -1365,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, | |||
| 1365 | goto out; | 1374 | goto out; |
| 1366 | 1375 | ||
| 1367 | if (!skb_partial_csum_set(skb, off, | 1376 | if (!skb_partial_csum_set(skb, off, |
| 1368 | offsetof(struct udphdr, check))) | 1377 | offsetof(struct udphdr, check))) { |
| 1378 | err = -EPROTO; | ||
| 1369 | goto out; | 1379 | goto out; |
| 1380 | } | ||
| 1370 | 1381 | ||
| 1371 | if (recalculate_partial_csum) | 1382 | if (recalculate_partial_csum) |
| 1372 | udp_hdr(skb)->check = | 1383 | udp_hdr(skb)->check = |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index de6f8990246f..c6973f101a3e 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
| @@ -20,7 +20,7 @@ config OF_SELFTEST | |||
| 20 | depends on OF_IRQ | 20 | depends on OF_IRQ |
| 21 | help | 21 | help |
| 22 | This option builds in test cases for the device tree infrastructure | 22 | This option builds in test cases for the device tree infrastructure |
| 23 | that are executed one at boot time, and the results dumped to the | 23 | that are executed once at boot time, and the results dumped to the |
| 24 | console. | 24 | console. |
| 25 | 25 | ||
| 26 | If unsure, say N here, but this option is safe to enable. | 26 | If unsure, say N here, but this option is safe to enable. |
diff --git a/drivers/of/address.c b/drivers/of/address.c index 4b9317bdb81c..d3dd41c840f1 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
| @@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range, | |||
| 69 | (unsigned long long)cp, (unsigned long long)s, | 69 | (unsigned long long)cp, (unsigned long long)s, |
| 70 | (unsigned long long)da); | 70 | (unsigned long long)da); |
| 71 | 71 | ||
| 72 | /* | ||
| 73 | * If the number of address cells is larger than 2 we assume the | ||
| 74 | * mapping doesn't specify a physical address. Rather, the address | ||
| 75 | * specifies an identifier that must match exactly. | ||
| 76 | */ | ||
| 77 | if (na > 2 && memcmp(range, addr, na * 4) != 0) | ||
| 78 | return OF_BAD_ADDR; | ||
| 79 | |||
| 80 | if (da < cp || da >= (cp + s)) | 72 | if (da < cp || da >= (cp + s)) |
| 81 | return OF_BAD_ADDR; | 73 | return OF_BAD_ADDR; |
| 82 | return da - cp; | 74 | return da - cp; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 2fa024b97c43..758b4f8b30b7 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -922,8 +922,16 @@ void __init unflatten_device_tree(void) | |||
| 922 | */ | 922 | */ |
| 923 | void __init unflatten_and_copy_device_tree(void) | 923 | void __init unflatten_and_copy_device_tree(void) |
| 924 | { | 924 | { |
| 925 | int size = __be32_to_cpu(initial_boot_params->totalsize); | 925 | int size; |
| 926 | void *dt = early_init_dt_alloc_memory_arch(size, | 926 | void *dt; |
| 927 | |||
| 928 | if (!initial_boot_params) { | ||
| 929 | pr_warn("No valid device tree found, continuing without\n"); | ||
| 930 | return; | ||
| 931 | } | ||
| 932 | |||
| 933 | size = __be32_to_cpu(initial_boot_params->totalsize); | ||
| 934 | dt = early_init_dt_alloc_memory_arch(size, | ||
| 927 | __alignof__(struct boot_param_header)); | 935 | __alignof__(struct boot_param_header)); |
| 928 | 936 | ||
| 929 | if (dt) { | 937 | if (dt) { |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 786b0b47fae4..27212402c532 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) | |||
| 165 | if (of_get_property(ipar, "interrupt-controller", NULL) != | 165 | if (of_get_property(ipar, "interrupt-controller", NULL) != |
| 166 | NULL) { | 166 | NULL) { |
| 167 | pr_debug(" -> got it !\n"); | 167 | pr_debug(" -> got it !\n"); |
| 168 | of_node_put(old); | ||
| 169 | return 0; | 168 | return 0; |
| 170 | } | 169 | } |
| 171 | 170 | ||
| @@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) | |||
| 250 | * Successfully parsed an interrrupt-map translation; copy new | 249 | * Successfully parsed an interrrupt-map translation; copy new |
| 251 | * interrupt specifier into the out_irq structure | 250 | * interrupt specifier into the out_irq structure |
| 252 | */ | 251 | */ |
| 253 | of_node_put(out_irq->np); | 252 | out_irq->np = newpar; |
| 254 | out_irq->np = of_node_get(newpar); | ||
| 255 | 253 | ||
| 256 | match_array = imap - newaddrsize - newintsize; | 254 | match_array = imap - newaddrsize - newintsize; |
| 257 | for (i = 0; i < newintsize; i++) | 255 | for (i = 0; i < newintsize; i++) |
| @@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) | |||
| 268 | } | 266 | } |
| 269 | fail: | 267 | fail: |
| 270 | of_node_put(ipar); | 268 | of_node_put(ipar); |
| 271 | of_node_put(out_irq->np); | ||
| 272 | of_node_put(newpar); | 269 | of_node_put(newpar); |
| 273 | 270 | ||
| 274 | return -EINVAL; | 271 | return -EINVAL; |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 1cf605f67673..e86439283a5d 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
| @@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, | |||
| 279 | 279 | ||
| 280 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); | 280 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); |
| 281 | if (ACPI_FAILURE(status)) { | 281 | if (ACPI_FAILURE(status)) { |
| 282 | acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); | 282 | if (status != AE_NOT_FOUND) |
| 283 | acpi_handle_warn(handle, | ||
| 284 | "can't evaluate _ADR (%#x)\n", status); | ||
| 283 | return AE_OK; | 285 | return AE_OK; |
| 284 | } | 286 | } |
| 285 | 287 | ||
| @@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot) | |||
| 643 | slot->flags &= (~SLOT_ENABLED); | 645 | slot->flags &= (~SLOT_ENABLED); |
| 644 | } | 646 | } |
| 645 | 647 | ||
| 648 | static bool acpiphp_no_hotplug(acpi_handle handle) | ||
| 649 | { | ||
| 650 | struct acpi_device *adev = NULL; | ||
| 651 | |||
| 652 | acpi_bus_get_device(handle, &adev); | ||
| 653 | return adev && adev->flags.no_hotplug; | ||
| 654 | } | ||
| 655 | |||
| 656 | static bool slot_no_hotplug(struct acpiphp_slot *slot) | ||
| 657 | { | ||
| 658 | struct acpiphp_func *func; | ||
| 659 | |||
| 660 | list_for_each_entry(func, &slot->funcs, sibling) | ||
| 661 | if (acpiphp_no_hotplug(func_to_handle(func))) | ||
| 662 | return true; | ||
| 663 | |||
| 664 | return false; | ||
| 665 | } | ||
| 646 | 666 | ||
| 647 | /** | 667 | /** |
| 648 | * get_slot_status - get ACPI slot status | 668 | * get_slot_status - get ACPI slot status |
| @@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev) | |||
| 701 | unsigned long long sta; | 721 | unsigned long long sta; |
| 702 | 722 | ||
| 703 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 723 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
| 704 | alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; | 724 | alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) |
| 725 | || acpiphp_no_hotplug(handle); | ||
| 705 | } | 726 | } |
| 706 | if (!alive) { | 727 | if (!alive) { |
| 707 | u32 v; | 728 | u32 v; |
| @@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
| 741 | struct pci_dev *dev, *tmp; | 762 | struct pci_dev *dev, *tmp; |
| 742 | 763 | ||
| 743 | mutex_lock(&slot->crit_sect); | 764 | mutex_lock(&slot->crit_sect); |
| 744 | /* wake up all functions */ | 765 | if (slot_no_hotplug(slot)) { |
| 745 | if (get_slot_status(slot) == ACPI_STA_ALL) { | 766 | ; /* do nothing */ |
| 767 | } else if (get_slot_status(slot) == ACPI_STA_ALL) { | ||
| 746 | /* remove stale devices if any */ | 768 | /* remove stale devices if any */ |
| 747 | list_for_each_entry_safe(dev, tmp, &bus->devices, | 769 | list_for_each_entry_safe(dev, tmp, &bus->devices, |
| 748 | bus_list) | 770 | bus_list) |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 577074efbe62..f7ebdba14bde 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) | |||
| 330 | static void pci_acpi_setup(struct device *dev) | 330 | static void pci_acpi_setup(struct device *dev) |
| 331 | { | 331 | { |
| 332 | struct pci_dev *pci_dev = to_pci_dev(dev); | 332 | struct pci_dev *pci_dev = to_pci_dev(dev); |
| 333 | acpi_handle handle = ACPI_HANDLE(dev); | 333 | struct acpi_device *adev = ACPI_COMPANION(dev); |
| 334 | struct acpi_device *adev; | ||
| 335 | 334 | ||
| 336 | if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) | 335 | if (!adev) |
| 336 | return; | ||
| 337 | |||
| 338 | pci_acpi_add_pm_notifier(adev, pci_dev); | ||
| 339 | if (!adev->wakeup.flags.valid) | ||
| 337 | return; | 340 | return; |
| 338 | 341 | ||
| 339 | device_set_wakeup_capable(dev, true); | 342 | device_set_wakeup_capable(dev, true); |
| 340 | acpi_pci_sleep_wake(pci_dev, false); | 343 | acpi_pci_sleep_wake(pci_dev, false); |
| 341 | |||
| 342 | pci_acpi_add_pm_notifier(adev, pci_dev); | ||
| 343 | if (adev->wakeup.flags.run_wake) | 344 | if (adev->wakeup.flags.run_wake) |
| 344 | device_set_run_wake(dev, true); | 345 | device_set_run_wake(dev, true); |
| 345 | } | 346 | } |
| 346 | 347 | ||
| 347 | static void pci_acpi_cleanup(struct device *dev) | 348 | static void pci_acpi_cleanup(struct device *dev) |
| 348 | { | 349 | { |
| 349 | acpi_handle handle = ACPI_HANDLE(dev); | 350 | struct acpi_device *adev = ACPI_COMPANION(dev); |
| 350 | struct acpi_device *adev; | 351 | |
| 352 | if (!adev) | ||
| 353 | return; | ||
| 351 | 354 | ||
| 352 | if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { | 355 | pci_acpi_remove_pm_notifier(adev); |
| 356 | if (adev->wakeup.flags.valid) { | ||
| 353 | device_set_wakeup_capable(dev, false); | 357 | device_set_wakeup_capable(dev, false); |
| 354 | device_set_run_wake(dev, false); | 358 | device_set_run_wake(dev, false); |
| 355 | pci_acpi_remove_pm_notifier(adev); | ||
| 356 | } | 359 | } |
| 357 | } | 360 | } |
| 358 | 361 | ||
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a344f3d52361..330ef2d06567 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
| @@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO | |||
| 24 | config OMAP_USB2 | 24 | config OMAP_USB2 |
| 25 | tristate "OMAP USB2 PHY Driver" | 25 | tristate "OMAP USB2 PHY Driver" |
| 26 | depends on ARCH_OMAP2PLUS | 26 | depends on ARCH_OMAP2PLUS |
| 27 | depends on USB_PHY | ||
| 27 | select GENERIC_PHY | 28 | select GENERIC_PHY |
| 28 | select USB_PHY | ||
| 29 | select OMAP_CONTROL_USB | 29 | select OMAP_CONTROL_USB |
| 30 | help | 30 | help |
| 31 | Enable this to support the transceiver that is part of SOC. This | 31 | Enable this to support the transceiver that is part of SOC. This |
| @@ -36,8 +36,8 @@ config OMAP_USB2 | |||
| 36 | config TWL4030_USB | 36 | config TWL4030_USB |
| 37 | tristate "TWL4030 USB Transceiver Driver" | 37 | tristate "TWL4030 USB Transceiver Driver" |
| 38 | depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS | 38 | depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS |
| 39 | depends on USB_PHY | ||
| 39 | select GENERIC_PHY | 40 | select GENERIC_PHY |
| 40 | select USB_PHY | ||
| 41 | help | 41 | help |
| 42 | Enable this to support the USB OTG transceiver on TWL4030 | 42 | Enable this to support the USB OTG transceiver on TWL4030 |
| 43 | family chips (including the TWL5030 and TPS659x0 devices). | 43 | family chips (including the TWL5030 and TPS659x0 devices). |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 03cf8fb81554..58e0e9739028 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
| @@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, | |||
| 437 | int id; | 437 | int id; |
| 438 | struct phy *phy; | 438 | struct phy *phy; |
| 439 | 439 | ||
| 440 | if (!dev) { | 440 | if (WARN_ON(!dev)) |
| 441 | dev_WARN(dev, "no device provided for PHY\n"); | 441 | return ERR_PTR(-EINVAL); |
| 442 | ret = -EINVAL; | ||
| 443 | goto err0; | ||
| 444 | } | ||
| 445 | 442 | ||
| 446 | phy = kzalloc(sizeof(*phy), GFP_KERNEL); | 443 | phy = kzalloc(sizeof(*phy), GFP_KERNEL); |
| 447 | if (!phy) { | 444 | if (!phy) |
| 448 | ret = -ENOMEM; | 445 | return ERR_PTR(-ENOMEM); |
| 449 | goto err0; | ||
| 450 | } | ||
| 451 | 446 | ||
| 452 | id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); | 447 | id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); |
| 453 | if (id < 0) { | 448 | if (id < 0) { |
| 454 | dev_err(dev, "unable to get id\n"); | 449 | dev_err(dev, "unable to get id\n"); |
| 455 | ret = id; | 450 | ret = id; |
| 456 | goto err0; | 451 | goto free_phy; |
| 457 | } | 452 | } |
| 458 | 453 | ||
| 459 | device_initialize(&phy->dev); | 454 | device_initialize(&phy->dev); |
| @@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, | |||
| 468 | 463 | ||
| 469 | ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); | 464 | ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); |
| 470 | if (ret) | 465 | if (ret) |
| 471 | goto err1; | 466 | goto put_dev; |
| 472 | 467 | ||
| 473 | ret = device_add(&phy->dev); | 468 | ret = device_add(&phy->dev); |
| 474 | if (ret) | 469 | if (ret) |
| 475 | goto err1; | 470 | goto put_dev; |
| 476 | 471 | ||
| 477 | if (pm_runtime_enabled(dev)) { | 472 | if (pm_runtime_enabled(dev)) { |
| 478 | pm_runtime_enable(&phy->dev); | 473 | pm_runtime_enable(&phy->dev); |
| @@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, | |||
| 481 | 476 | ||
| 482 | return phy; | 477 | return phy; |
| 483 | 478 | ||
| 484 | err1: | 479 | put_dev: |
| 485 | ida_remove(&phy_ida, phy->id); | ||
| 486 | put_device(&phy->dev); | 480 | put_device(&phy->dev); |
| 481 | ida_remove(&phy_ida, phy->id); | ||
| 482 | free_phy: | ||
| 487 | kfree(phy); | 483 | kfree(phy); |
| 488 | |||
| 489 | err0: | ||
| 490 | return ERR_PTR(ret); | 484 | return ERR_PTR(ret); |
| 491 | } | 485 | } |
| 492 | EXPORT_SYMBOL_GPL(phy_create); | 486 | EXPORT_SYMBOL_GPL(phy_create); |
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index 2832576d8b12..114f5ef4b73a 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c | |||
| @@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = { | |||
| 512 | 512 | ||
| 513 | static const struct acpi_device_id byt_gpio_acpi_match[] = { | 513 | static const struct acpi_device_id byt_gpio_acpi_match[] = { |
| 514 | { "INT33B2", 0 }, | 514 | { "INT33B2", 0 }, |
| 515 | { "INT33FC", 0 }, | ||
| 515 | { } | 516 | { } |
| 516 | }; | 517 | }; |
| 517 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); | 518 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); |
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 11bd0d970a52..e2142956a8e5 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h | |||
| @@ -254,7 +254,7 @@ struct sh_pfc_soc_info { | |||
| 254 | #define PINMUX_GPIO(_pin) \ | 254 | #define PINMUX_GPIO(_pin) \ |
| 255 | [GPIO_##_pin] = { \ | 255 | [GPIO_##_pin] = { \ |
| 256 | .pin = (u16)-1, \ | 256 | .pin = (u16)-1, \ |
| 257 | .name = __stringify(name), \ | 257 | .name = __stringify(GPIO_##_pin), \ |
| 258 | .enum_id = _pin##_DATA, \ | 258 | .enum_id = _pin##_DATA, \ |
| 259 | } | 259 | } |
| 260 | 260 | ||
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 5e2054afe840..85ad58c6da17 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
| @@ -196,6 +196,7 @@ config BATTERY_MAX17040 | |||
| 196 | config BATTERY_MAX17042 | 196 | config BATTERY_MAX17042 |
| 197 | tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" | 197 | tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" |
| 198 | depends on I2C | 198 | depends on I2C |
| 199 | select REGMAP_I2C | ||
| 199 | help | 200 | help |
| 200 | MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries | 201 | MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries |
| 201 | in handheld and portable equipment. The MAX17042 is configured | 202 | in handheld and portable equipment. The MAX17042 is configured |
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 00e667296360..557af943b2f5 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
| @@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
| 511 | dev_set_drvdata(dev, psy); | 511 | dev_set_drvdata(dev, psy); |
| 512 | psy->dev = dev; | 512 | psy->dev = dev; |
| 513 | 513 | ||
| 514 | rc = dev_set_name(dev, "%s", psy->name); | ||
| 515 | if (rc) | ||
| 516 | goto dev_set_name_failed; | ||
| 517 | |||
| 514 | INIT_WORK(&psy->changed_work, power_supply_changed_work); | 518 | INIT_WORK(&psy->changed_work, power_supply_changed_work); |
| 515 | 519 | ||
| 516 | rc = power_supply_check_supplies(psy); | 520 | rc = power_supply_check_supplies(psy); |
| @@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
| 524 | if (rc) | 528 | if (rc) |
| 525 | goto wakeup_init_failed; | 529 | goto wakeup_init_failed; |
| 526 | 530 | ||
| 527 | rc = kobject_set_name(&dev->kobj, "%s", psy->name); | ||
| 528 | if (rc) | ||
| 529 | goto kobject_set_name_failed; | ||
| 530 | |||
| 531 | rc = device_add(dev); | 531 | rc = device_add(dev); |
| 532 | if (rc) | 532 | if (rc) |
| 533 | goto device_add_failed; | 533 | goto device_add_failed; |
| @@ -553,11 +553,11 @@ create_triggers_failed: | |||
| 553 | register_cooler_failed: | 553 | register_cooler_failed: |
| 554 | psy_unregister_thermal(psy); | 554 | psy_unregister_thermal(psy); |
| 555 | register_thermal_failed: | 555 | register_thermal_failed: |
| 556 | wakeup_init_failed: | ||
| 557 | device_del(dev); | 556 | device_del(dev); |
| 558 | kobject_set_name_failed: | ||
| 559 | device_add_failed: | 557 | device_add_failed: |
| 558 | wakeup_init_failed: | ||
| 560 | check_supplies_failed: | 559 | check_supplies_failed: |
| 560 | dev_set_name_failed: | ||
| 561 | put_device(dev); | 561 | put_device(dev); |
| 562 | success: | 562 | success: |
| 563 | return rc; | 563 | return rc; |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 2a786c504460..3c6768378a94 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
| @@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
| 833 | return 0; | 833 | return 0; |
| 834 | } | 834 | } |
| 835 | 835 | ||
| 836 | static const struct x86_cpu_id energy_unit_quirk_ids[] = { | ||
| 837 | { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ | ||
| 838 | {} | ||
| 839 | }; | ||
| 840 | |||
| 836 | static int rapl_check_unit(struct rapl_package *rp, int cpu) | 841 | static int rapl_check_unit(struct rapl_package *rp, int cpu) |
| 837 | { | 842 | { |
| 838 | u64 msr_val; | 843 | u64 msr_val; |
| @@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu) | |||
| 853 | * time unit: 1/time_unit_divisor Seconds | 858 | * time unit: 1/time_unit_divisor Seconds |
| 854 | */ | 859 | */ |
| 855 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 860 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
| 856 | rp->energy_unit_divisor = 1 << value; | 861 | /* some CPUs have different way to calculate energy unit */ |
| 857 | 862 | if (x86_match_cpu(energy_unit_quirk_ids)) | |
| 863 | rp->energy_unit_divisor = 1000000 / (1 << value); | ||
| 864 | else | ||
| 865 | rp->energy_unit_divisor = 1 << value; | ||
| 858 | 866 | ||
| 859 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 867 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
| 860 | rp->power_unit_divisor = 1 << value; | 868 | rp->power_unit_divisor = 1 << value; |
| @@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id) | |||
| 941 | static const struct x86_cpu_id rapl_ids[] = { | 949 | static const struct x86_cpu_id rapl_ids[] = { |
| 942 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ | 950 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ |
| 943 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ | 951 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ |
| 952 | { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ | ||
| 944 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ | 953 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ |
| 945 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ | 954 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ |
| 946 | /* TODO: Add more CPU IDs after testing */ | 955 | /* TODO: Add more CPU IDs after testing */ |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index ce785f481281..77711d4bd377 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
| @@ -70,6 +70,14 @@ config REGULATOR_88PM8607 | |||
| 70 | help | 70 | help |
| 71 | This driver supports 88PM8607 voltage regulator chips. | 71 | This driver supports 88PM8607 voltage regulator chips. |
| 72 | 72 | ||
| 73 | config REGULATOR_ACT8865 | ||
| 74 | tristate "Active-semi act8865 voltage regulator" | ||
| 75 | depends on I2C | ||
| 76 | select REGMAP_I2C | ||
| 77 | help | ||
| 78 | This driver controls a active-semi act8865 voltage output | ||
| 79 | regulator via I2C bus. | ||
| 80 | |||
| 73 | config REGULATOR_AD5398 | 81 | config REGULATOR_AD5398 |
| 74 | tristate "Analog Devices AD5398/AD5821 regulators" | 82 | tristate "Analog Devices AD5398/AD5821 regulators" |
| 75 | depends on I2C | 83 | depends on I2C |
| @@ -249,6 +257,13 @@ config REGULATOR_LP8788 | |||
| 249 | help | 257 | help |
| 250 | This driver supports LP8788 voltage regulator chip. | 258 | This driver supports LP8788 voltage regulator chip. |
| 251 | 259 | ||
| 260 | config REGULATOR_MAX14577 | ||
| 261 | tristate "Maxim 14577 regulator" | ||
| 262 | depends on MFD_MAX14577 | ||
| 263 | help | ||
| 264 | This driver controls a Maxim 14577 regulator via I2C bus. | ||
| 265 | The regulators include safeout LDO and current regulator 'CHARGER'. | ||
| 266 | |||
| 252 | config REGULATOR_MAX1586 | 267 | config REGULATOR_MAX1586 |
| 253 | tristate "Maxim 1586/1587 voltage regulator" | 268 | tristate "Maxim 1586/1587 voltage regulator" |
| 254 | depends on I2C | 269 | depends on I2C |
| @@ -384,7 +399,7 @@ config REGULATOR_PCF50633 | |||
| 384 | on PCF50633 | 399 | on PCF50633 |
| 385 | 400 | ||
| 386 | config REGULATOR_PFUZE100 | 401 | config REGULATOR_PFUZE100 |
| 387 | tristate "Support regulators on Freescale PFUZE100 PMIC" | 402 | tristate "Freescale PFUZE100 regulator driver" |
| 388 | depends on I2C | 403 | depends on I2C |
| 389 | select REGMAP_I2C | 404 | select REGMAP_I2C |
| 390 | help | 405 | help |
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 01c597ea1744..979f9ddcf259 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile | |||
| @@ -14,6 +14,7 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o | |||
| 14 | obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o | 14 | obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o |
| 15 | obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o | 15 | obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o |
| 16 | obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o | 16 | obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o |
| 17 | obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o | ||
| 17 | obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o | 18 | obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o |
| 18 | obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o | 19 | obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o |
| 19 | obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o | 20 | obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o |
| @@ -35,6 +36,7 @@ obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o | |||
| 35 | obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o | 36 | obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o |
| 36 | obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o | 37 | obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o |
| 37 | obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o | 38 | obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o |
| 39 | obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o | ||
| 38 | obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o | 40 | obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o |
| 39 | obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o | 41 | obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o |
| 40 | obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o | 42 | obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o |
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index 603f192e84f1..c625468c7f2c 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c | |||
| @@ -2998,37 +2998,6 @@ static void abx500_get_regulator_info(struct ab8500 *ab8500) | |||
| 2998 | } | 2998 | } |
| 2999 | } | 2999 | } |
| 3000 | 3000 | ||
| 3001 | static int ab8500_regulator_init_registers(struct platform_device *pdev, | ||
| 3002 | int id, int mask, int value) | ||
| 3003 | { | ||
| 3004 | struct ab8500_reg_init *reg_init = abx500_regulator.init; | ||
| 3005 | int err; | ||
| 3006 | |||
| 3007 | BUG_ON(value & ~mask); | ||
| 3008 | BUG_ON(mask & ~reg_init[id].mask); | ||
| 3009 | |||
| 3010 | /* initialize register */ | ||
| 3011 | err = abx500_mask_and_set_register_interruptible( | ||
| 3012 | &pdev->dev, | ||
| 3013 | reg_init[id].bank, | ||
| 3014 | reg_init[id].addr, | ||
| 3015 | mask, value); | ||
| 3016 | if (err < 0) { | ||
| 3017 | dev_err(&pdev->dev, | ||
| 3018 | "Failed to initialize 0x%02x, 0x%02x.\n", | ||
| 3019 | reg_init[id].bank, | ||
| 3020 | reg_init[id].addr); | ||
| 3021 | return err; | ||
| 3022 | } | ||
| 3023 | dev_vdbg(&pdev->dev, | ||
| 3024 | " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", | ||
| 3025 | reg_init[id].bank, | ||
| 3026 | reg_init[id].addr, | ||
| 3027 | mask, value); | ||
| 3028 | |||
| 3029 | return 0; | ||
| 3030 | } | ||
| 3031 | |||
| 3032 | static int ab8500_regulator_register(struct platform_device *pdev, | 3001 | static int ab8500_regulator_register(struct platform_device *pdev, |
| 3033 | struct regulator_init_data *init_data, | 3002 | struct regulator_init_data *init_data, |
| 3034 | int id, struct device_node *np) | 3003 | int id, struct device_node *np) |
| @@ -3036,7 +3005,6 @@ static int ab8500_regulator_register(struct platform_device *pdev, | |||
| 3036 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); | 3005 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); |
| 3037 | struct ab8500_regulator_info *info = NULL; | 3006 | struct ab8500_regulator_info *info = NULL; |
| 3038 | struct regulator_config config = { }; | 3007 | struct regulator_config config = { }; |
| 3039 | int err; | ||
| 3040 | 3008 | ||
| 3041 | /* assign per-regulator data */ | 3009 | /* assign per-regulator data */ |
| 3042 | info = &abx500_regulator.info[id]; | 3010 | info = &abx500_regulator.info[id]; |
| @@ -3058,17 +3026,12 @@ static int ab8500_regulator_register(struct platform_device *pdev, | |||
| 3058 | } | 3026 | } |
| 3059 | 3027 | ||
| 3060 | /* register regulator with framework */ | 3028 | /* register regulator with framework */ |
| 3061 | info->regulator = regulator_register(&info->desc, &config); | 3029 | info->regulator = devm_regulator_register(&pdev->dev, &info->desc, |
| 3030 | &config); | ||
| 3062 | if (IS_ERR(info->regulator)) { | 3031 | if (IS_ERR(info->regulator)) { |
| 3063 | err = PTR_ERR(info->regulator); | ||
| 3064 | dev_err(&pdev->dev, "failed to register regulator %s\n", | 3032 | dev_err(&pdev->dev, "failed to register regulator %s\n", |
| 3065 | info->desc.name); | 3033 | info->desc.name); |
| 3066 | /* when we fail, un-register all earlier regulators */ | 3034 | return PTR_ERR(info->regulator); |
| 3067 | while (--id >= 0) { | ||
| 3068 | info = &abx500_regulator.info[id]; | ||
| 3069 | regulator_unregister(info->regulator); | ||
| 3070 | } | ||
| 3071 | return err; | ||
| 3072 | } | 3035 | } |
| 3073 | 3036 | ||
| 3074 | return 0; | 3037 | return 0; |
| @@ -3095,9 +3058,7 @@ static int ab8500_regulator_probe(struct platform_device *pdev) | |||
| 3095 | { | 3058 | { |
| 3096 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); | 3059 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); |
| 3097 | struct device_node *np = pdev->dev.of_node; | 3060 | struct device_node *np = pdev->dev.of_node; |
| 3098 | struct ab8500_platform_data *ppdata; | 3061 | int err; |
| 3099 | struct ab8500_regulator_platform_data *pdata; | ||
| 3100 | int i, err; | ||
| 3101 | 3062 | ||
| 3102 | if (!ab8500) { | 3063 | if (!ab8500) { |
| 3103 | dev_err(&pdev->dev, "null mfd parent\n"); | 3064 | dev_err(&pdev->dev, "null mfd parent\n"); |
| @@ -3106,83 +3067,20 @@ static int ab8500_regulator_probe(struct platform_device *pdev) | |||
| 3106 | 3067 | ||
| 3107 | abx500_get_regulator_info(ab8500); | 3068 | abx500_get_regulator_info(ab8500); |
| 3108 | 3069 | ||
| 3109 | if (np) { | 3070 | err = of_regulator_match(&pdev->dev, np, |
| 3110 | err = of_regulator_match(&pdev->dev, np, | 3071 | abx500_regulator.match, |
| 3111 | abx500_regulator.match, | 3072 | abx500_regulator.match_size); |
| 3112 | abx500_regulator.match_size); | 3073 | if (err < 0) { |
| 3113 | if (err < 0) { | 3074 | dev_err(&pdev->dev, |
| 3114 | dev_err(&pdev->dev, | 3075 | "Error parsing regulator init data: %d\n", err); |
| 3115 | "Error parsing regulator init data: %d\n", err); | ||
| 3116 | return err; | ||
| 3117 | } | ||
| 3118 | |||
| 3119 | err = ab8500_regulator_of_probe(pdev, np); | ||
| 3120 | return err; | ||
| 3121 | } | ||
| 3122 | |||
| 3123 | ppdata = dev_get_platdata(ab8500->dev); | ||
| 3124 | if (!ppdata) { | ||
| 3125 | dev_err(&pdev->dev, "null parent pdata\n"); | ||
| 3126 | return -EINVAL; | ||
| 3127 | } | ||
| 3128 | |||
| 3129 | pdata = ppdata->regulator; | ||
| 3130 | if (!pdata) { | ||
| 3131 | dev_err(&pdev->dev, "null pdata\n"); | ||
| 3132 | return -EINVAL; | ||
| 3133 | } | ||
| 3134 | |||
| 3135 | /* make sure the platform data has the correct size */ | ||
| 3136 | if (pdata->num_regulator != abx500_regulator.info_size) { | ||
| 3137 | dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); | ||
| 3138 | return -EINVAL; | ||
| 3139 | } | ||
| 3140 | |||
| 3141 | /* initialize debug (initial state is recorded with this call) */ | ||
| 3142 | err = ab8500_regulator_debug_init(pdev); | ||
| 3143 | if (err) | ||
| 3144 | return err; | 3076 | return err; |
| 3145 | |||
| 3146 | /* initialize registers */ | ||
| 3147 | for (i = 0; i < pdata->num_reg_init; i++) { | ||
| 3148 | int id, mask, value; | ||
| 3149 | |||
| 3150 | id = pdata->reg_init[i].id; | ||
| 3151 | mask = pdata->reg_init[i].mask; | ||
| 3152 | value = pdata->reg_init[i].value; | ||
| 3153 | |||
| 3154 | /* check for configuration errors */ | ||
| 3155 | BUG_ON(id >= abx500_regulator.init_size); | ||
| 3156 | |||
| 3157 | err = ab8500_regulator_init_registers(pdev, id, mask, value); | ||
| 3158 | if (err < 0) | ||
| 3159 | return err; | ||
| 3160 | } | 3077 | } |
| 3161 | 3078 | return ab8500_regulator_of_probe(pdev, np); | |
| 3162 | /* register all regulators */ | ||
| 3163 | for (i = 0; i < abx500_regulator.info_size; i++) { | ||
| 3164 | err = ab8500_regulator_register(pdev, &pdata->regulator[i], | ||
| 3165 | i, NULL); | ||
| 3166 | if (err < 0) | ||
| 3167 | return err; | ||
| 3168 | } | ||
| 3169 | |||
| 3170 | return 0; | ||
| 3171 | } | 3079 | } |
| 3172 | 3080 | ||
| 3173 | static int ab8500_regulator_remove(struct platform_device *pdev) | 3081 | static int ab8500_regulator_remove(struct platform_device *pdev) |
| 3174 | { | 3082 | { |
| 3175 | int i, err; | 3083 | int err; |
| 3176 | |||
| 3177 | for (i = 0; i < abx500_regulator.info_size; i++) { | ||
| 3178 | struct ab8500_regulator_info *info = NULL; | ||
| 3179 | info = &abx500_regulator.info[i]; | ||
| 3180 | |||
| 3181 | dev_vdbg(rdev_get_dev(info->regulator), | ||
| 3182 | "%s-remove\n", info->desc.name); | ||
| 3183 | |||
| 3184 | regulator_unregister(info->regulator); | ||
| 3185 | } | ||
| 3186 | 3084 | ||
| 3187 | /* remove regulator debug */ | 3085 | /* remove regulator debug */ |
| 3188 | err = ab8500_regulator_debug_exit(pdev); | 3086 | err = ab8500_regulator_debug_exit(pdev); |
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c new file mode 100644 index 000000000000..084cc0819a52 --- /dev/null +++ b/drivers/regulator/act8865-regulator.c | |||
| @@ -0,0 +1,349 @@ | |||
| 1 | /* | ||
| 2 | * act8865-regulator.c - Voltage regulation for the active-semi ACT8865 | ||
| 3 | * http://www.active-semi.com/sheets/ACT8865_Datasheet.pdf | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Atmel Corporation | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/i2c.h> | ||
| 21 | #include <linux/err.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/regulator/driver.h> | ||
| 24 | #include <linux/regulator/act8865.h> | ||
| 25 | #include <linux/of.h> | ||
| 26 | #include <linux/of_device.h> | ||
| 27 | #include <linux/regulator/of_regulator.h> | ||
| 28 | #include <linux/regmap.h> | ||
| 29 | |||
| 30 | /* | ||
| 31 | * ACT8865 Global Register Map. | ||
| 32 | */ | ||
| 33 | #define ACT8865_SYS_MODE 0x00 | ||
| 34 | #define ACT8865_SYS_CTRL 0x01 | ||
| 35 | #define ACT8865_DCDC1_VSET1 0x20 | ||
| 36 | #define ACT8865_DCDC1_VSET2 0x21 | ||
| 37 | #define ACT8865_DCDC1_CTRL 0x22 | ||
| 38 | #define ACT8865_DCDC2_VSET1 0x30 | ||
| 39 | #define ACT8865_DCDC2_VSET2 0x31 | ||
| 40 | #define ACT8865_DCDC2_CTRL 0x32 | ||
| 41 | #define ACT8865_DCDC3_VSET1 0x40 | ||
| 42 | #define ACT8865_DCDC3_VSET2 0x41 | ||
| 43 | #define ACT8865_DCDC3_CTRL 0x42 | ||
| 44 | #define ACT8865_LDO1_VSET 0x50 | ||
| 45 | #define ACT8865_LDO1_CTRL 0x51 | ||
| 46 | #define ACT8865_LDO2_VSET 0x54 | ||
| 47 | #define ACT8865_LDO2_CTRL 0x55 | ||
| 48 | #define ACT8865_LDO3_VSET 0x60 | ||
| 49 | #define ACT8865_LDO3_CTRL 0x61 | ||
| 50 | #define ACT8865_LDO4_VSET 0x64 | ||
| 51 | #define ACT8865_LDO4_CTRL 0x65 | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Field Definitions. | ||
| 55 | */ | ||
| 56 | #define ACT8865_ENA 0x80 /* ON - [7] */ | ||
| 57 | #define ACT8865_VSEL_MASK 0x3F /* VSET - [5:0] */ | ||
| 58 | |||
| 59 | /* | ||
| 60 | * ACT8865 voltage number | ||
| 61 | */ | ||
| 62 | #define ACT8865_VOLTAGE_NUM 64 | ||
| 63 | |||
| 64 | struct act8865 { | ||
| 65 | struct regulator_dev *rdev[ACT8865_REG_NUM]; | ||
| 66 | struct regmap *regmap; | ||
| 67 | }; | ||
| 68 | |||
| 69 | static const struct regmap_config act8865_regmap_config = { | ||
| 70 | .reg_bits = 8, | ||
| 71 | .val_bits = 8, | ||
| 72 | }; | ||
| 73 | |||
| 74 | static const struct regulator_linear_range act8865_volatge_ranges[] = { | ||
| 75 | REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000), | ||
| 76 | REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000), | ||
| 77 | REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000), | ||
| 78 | }; | ||
| 79 | |||
| 80 | static struct regulator_ops act8865_ops = { | ||
| 81 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 82 | .map_voltage = regulator_map_voltage_linear_range, | ||
| 83 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
| 84 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
| 85 | .enable = regulator_enable_regmap, | ||
| 86 | .disable = regulator_disable_regmap, | ||
| 87 | .is_enabled = regulator_is_enabled_regmap, | ||
| 88 | }; | ||
| 89 | |||
| 90 | static const struct regulator_desc act8865_reg[] = { | ||
| 91 | { | ||
| 92 | .name = "DCDC_REG1", | ||
| 93 | .id = ACT8865_ID_DCDC1, | ||
| 94 | .ops = &act8865_ops, | ||
| 95 | .type = REGULATOR_VOLTAGE, | ||
| 96 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 97 | .linear_ranges = act8865_volatge_ranges, | ||
| 98 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 99 | .vsel_reg = ACT8865_DCDC1_VSET1, | ||
| 100 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 101 | .enable_reg = ACT8865_DCDC1_CTRL, | ||
| 102 | .enable_mask = ACT8865_ENA, | ||
| 103 | .owner = THIS_MODULE, | ||
| 104 | }, | ||
| 105 | { | ||
| 106 | .name = "DCDC_REG2", | ||
| 107 | .id = ACT8865_ID_DCDC2, | ||
| 108 | .ops = &act8865_ops, | ||
| 109 | .type = REGULATOR_VOLTAGE, | ||
| 110 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 111 | .linear_ranges = act8865_volatge_ranges, | ||
| 112 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 113 | .vsel_reg = ACT8865_DCDC2_VSET1, | ||
| 114 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 115 | .enable_reg = ACT8865_DCDC2_CTRL, | ||
| 116 | .enable_mask = ACT8865_ENA, | ||
| 117 | .owner = THIS_MODULE, | ||
| 118 | }, | ||
| 119 | { | ||
| 120 | .name = "DCDC_REG3", | ||
| 121 | .id = ACT8865_ID_DCDC3, | ||
| 122 | .ops = &act8865_ops, | ||
| 123 | .type = REGULATOR_VOLTAGE, | ||
| 124 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 125 | .linear_ranges = act8865_volatge_ranges, | ||
| 126 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 127 | .vsel_reg = ACT8865_DCDC3_VSET1, | ||
| 128 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 129 | .enable_reg = ACT8865_DCDC3_CTRL, | ||
| 130 | .enable_mask = ACT8865_ENA, | ||
| 131 | .owner = THIS_MODULE, | ||
| 132 | }, | ||
| 133 | { | ||
| 134 | .name = "LDO_REG1", | ||
| 135 | .id = ACT8865_ID_LDO1, | ||
| 136 | .ops = &act8865_ops, | ||
| 137 | .type = REGULATOR_VOLTAGE, | ||
| 138 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 139 | .linear_ranges = act8865_volatge_ranges, | ||
| 140 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 141 | .vsel_reg = ACT8865_LDO1_VSET, | ||
| 142 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 143 | .enable_reg = ACT8865_LDO1_CTRL, | ||
| 144 | .enable_mask = ACT8865_ENA, | ||
| 145 | .owner = THIS_MODULE, | ||
| 146 | }, | ||
| 147 | { | ||
| 148 | .name = "LDO_REG2", | ||
| 149 | .id = ACT8865_ID_LDO2, | ||
| 150 | .ops = &act8865_ops, | ||
| 151 | .type = REGULATOR_VOLTAGE, | ||
| 152 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 153 | .linear_ranges = act8865_volatge_ranges, | ||
| 154 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 155 | .vsel_reg = ACT8865_LDO2_VSET, | ||
| 156 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 157 | .enable_reg = ACT8865_LDO2_CTRL, | ||
| 158 | .enable_mask = ACT8865_ENA, | ||
| 159 | .owner = THIS_MODULE, | ||
| 160 | }, | ||
| 161 | { | ||
| 162 | .name = "LDO_REG3", | ||
| 163 | .id = ACT8865_ID_LDO3, | ||
| 164 | .ops = &act8865_ops, | ||
| 165 | .type = REGULATOR_VOLTAGE, | ||
| 166 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 167 | .linear_ranges = act8865_volatge_ranges, | ||
| 168 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 169 | .vsel_reg = ACT8865_LDO3_VSET, | ||
| 170 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 171 | .enable_reg = ACT8865_LDO3_CTRL, | ||
| 172 | .enable_mask = ACT8865_ENA, | ||
| 173 | .owner = THIS_MODULE, | ||
| 174 | }, | ||
| 175 | { | ||
| 176 | .name = "LDO_REG4", | ||
| 177 | .id = ACT8865_ID_LDO4, | ||
| 178 | .ops = &act8865_ops, | ||
| 179 | .type = REGULATOR_VOLTAGE, | ||
| 180 | .n_voltages = ACT8865_VOLTAGE_NUM, | ||
| 181 | .linear_ranges = act8865_volatge_ranges, | ||
| 182 | .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges), | ||
| 183 | .vsel_reg = ACT8865_LDO4_VSET, | ||
| 184 | .vsel_mask = ACT8865_VSEL_MASK, | ||
| 185 | .enable_reg = ACT8865_LDO4_CTRL, | ||
| 186 | .enable_mask = ACT8865_ENA, | ||
| 187 | .owner = THIS_MODULE, | ||
| 188 | }, | ||
| 189 | }; | ||
| 190 | |||
| 191 | #ifdef CONFIG_OF | ||
| 192 | static const struct of_device_id act8865_dt_ids[] = { | ||
| 193 | { .compatible = "active-semi,act8865" }, | ||
| 194 | { } | ||
| 195 | }; | ||
| 196 | MODULE_DEVICE_TABLE(of, act8865_dt_ids); | ||
| 197 | |||
| 198 | static struct of_regulator_match act8865_matches[] = { | ||
| 199 | [ACT8865_ID_DCDC1] = { .name = "DCDC_REG1"}, | ||
| 200 | [ACT8865_ID_DCDC2] = { .name = "DCDC_REG2"}, | ||
| 201 | [ACT8865_ID_DCDC3] = { .name = "DCDC_REG3"}, | ||
| 202 | [ACT8865_ID_LDO1] = { .name = "LDO_REG1"}, | ||
| 203 | [ACT8865_ID_LDO2] = { .name = "LDO_REG2"}, | ||
| 204 | [ACT8865_ID_LDO3] = { .name = "LDO_REG3"}, | ||
| 205 | [ACT8865_ID_LDO4] = { .name = "LDO_REG4"}, | ||
| 206 | }; | ||
| 207 | |||
| 208 | static int act8865_pdata_from_dt(struct device *dev, | ||
| 209 | struct device_node **of_node, | ||
| 210 | struct act8865_platform_data *pdata) | ||
| 211 | { | ||
| 212 | int matched, i; | ||
| 213 | struct device_node *np; | ||
| 214 | struct act8865_regulator_data *regulator; | ||
| 215 | |||
| 216 | np = of_find_node_by_name(dev->of_node, "regulators"); | ||
| 217 | if (!np) { | ||
| 218 | dev_err(dev, "missing 'regulators' subnode in DT\n"); | ||
| 219 | return -EINVAL; | ||
| 220 | } | ||
| 221 | |||
| 222 | matched = of_regulator_match(dev, np, | ||
| 223 | act8865_matches, ARRAY_SIZE(act8865_matches)); | ||
| 224 | if (matched <= 0) | ||
| 225 | return matched; | ||
| 226 | |||
| 227 | pdata->regulators = devm_kzalloc(dev, | ||
| 228 | sizeof(struct act8865_regulator_data) * | ||
| 229 | ARRAY_SIZE(act8865_matches), GFP_KERNEL); | ||
| 230 | if (!pdata->regulators) { | ||
| 231 | dev_err(dev, "%s: failed to allocate act8865 registor\n", | ||
| 232 | __func__); | ||
| 233 | return -ENOMEM; | ||
| 234 | } | ||
| 235 | |||
| 236 | pdata->num_regulators = matched; | ||
| 237 | regulator = pdata->regulators; | ||
| 238 | |||
| 239 | for (i = 0; i < ARRAY_SIZE(act8865_matches); i++) { | ||
| 240 | regulator->id = i; | ||
| 241 | regulator->name = act8865_matches[i].name; | ||
| 242 | regulator->platform_data = act8865_matches[i].init_data; | ||
| 243 | of_node[i] = act8865_matches[i].of_node; | ||
| 244 | regulator++; | ||
| 245 | } | ||
| 246 | |||
| 247 | return 0; | ||
| 248 | } | ||
| 249 | #else | ||
| 250 | static inline int act8865_pdata_from_dt(struct device *dev, | ||
| 251 | struct device_node **of_node, | ||
| 252 | struct act8865_platform_data *pdata) | ||
| 253 | { | ||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | #endif | ||
| 257 | |||
| 258 | static int act8865_pmic_probe(struct i2c_client *client, | ||
| 259 | const struct i2c_device_id *i2c_id) | ||
| 260 | { | ||
| 261 | struct regulator_dev **rdev; | ||
| 262 | struct device *dev = &client->dev; | ||
| 263 | struct act8865_platform_data *pdata = dev_get_platdata(dev); | ||
| 264 | struct regulator_config config = { }; | ||
| 265 | struct act8865 *act8865; | ||
| 266 | struct device_node *of_node[ACT8865_REG_NUM]; | ||
| 267 | int i, id; | ||
| 268 | int ret = -EINVAL; | ||
| 269 | int error; | ||
| 270 | |||
| 271 | if (dev->of_node && !pdata) { | ||
| 272 | const struct of_device_id *id; | ||
| 273 | struct act8865_platform_data pdata_of; | ||
| 274 | |||
| 275 | id = of_match_device(of_match_ptr(act8865_dt_ids), dev); | ||
| 276 | if (!id) | ||
| 277 | return -ENODEV; | ||
| 278 | |||
| 279 | ret = act8865_pdata_from_dt(dev, of_node, &pdata_of); | ||
| 280 | if (ret < 0) | ||
| 281 | return ret; | ||
| 282 | |||
| 283 | pdata = &pdata_of; | ||
| 284 | } | ||
| 285 | |||
| 286 | if (pdata->num_regulators > ACT8865_REG_NUM) { | ||
| 287 | dev_err(dev, "Too many regulators found!\n"); | ||
| 288 | return -EINVAL; | ||
| 289 | } | ||
| 290 | |||
| 291 | act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL); | ||
| 292 | if (!act8865) | ||
| 293 | return -ENOMEM; | ||
| 294 | |||
| 295 | rdev = act8865->rdev; | ||
| 296 | |||
| 297 | act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config); | ||
| 298 | if (IS_ERR(act8865->regmap)) { | ||
| 299 | error = PTR_ERR(act8865->regmap); | ||
| 300 | dev_err(&client->dev, "Failed to allocate register map: %d\n", | ||
| 301 | error); | ||
| 302 | return error; | ||
| 303 | } | ||
| 304 | |||
| 305 | /* Finally register devices */ | ||
| 306 | for (i = 0; i < ACT8865_REG_NUM; i++) { | ||
| 307 | |||
| 308 | id = pdata->regulators[i].id; | ||
| 309 | |||
| 310 | config.dev = dev; | ||
| 311 | config.init_data = pdata->regulators[i].platform_data; | ||
| 312 | config.of_node = of_node[i]; | ||
| 313 | config.driver_data = act8865; | ||
| 314 | config.regmap = act8865->regmap; | ||
| 315 | |||
| 316 | rdev[i] = devm_regulator_register(&client->dev, | ||
| 317 | &act8865_reg[i], &config); | ||
| 318 | if (IS_ERR(rdev[i])) { | ||
| 319 | dev_err(dev, "failed to register %s\n", | ||
| 320 | act8865_reg[id].name); | ||
| 321 | return PTR_ERR(rdev[i]); | ||
| 322 | } | ||
| 323 | } | ||
| 324 | |||
| 325 | i2c_set_clientdata(client, act8865); | ||
| 326 | |||
| 327 | return 0; | ||
| 328 | } | ||
| 329 | |||
| 330 | static const struct i2c_device_id act8865_ids[] = { | ||
| 331 | { "act8865", 0 }, | ||
| 332 | { }, | ||
| 333 | }; | ||
| 334 | MODULE_DEVICE_TABLE(i2c, act8865_ids); | ||
| 335 | |||
| 336 | static struct i2c_driver act8865_pmic_driver = { | ||
| 337 | .driver = { | ||
| 338 | .name = "act8865", | ||
| 339 | .owner = THIS_MODULE, | ||
| 340 | }, | ||
| 341 | .probe = act8865_pmic_probe, | ||
| 342 | .id_table = act8865_ids, | ||
| 343 | }; | ||
| 344 | |||
| 345 | module_i2c_driver(act8865_pmic_driver); | ||
| 346 | |||
| 347 | MODULE_DESCRIPTION("active-semi act8865 voltage regulator driver"); | ||
| 348 | MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>"); | ||
| 349 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index c734d0980826..862e63e451d0 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c | |||
| @@ -122,10 +122,8 @@ static int anatop_regulator_probe(struct platform_device *pdev) | |||
| 122 | if (!sreg) | 122 | if (!sreg) |
| 123 | return -ENOMEM; | 123 | return -ENOMEM; |
| 124 | sreg->initdata = initdata; | 124 | sreg->initdata = initdata; |
| 125 | sreg->name = kstrdup(of_get_property(np, "regulator-name", NULL), | 125 | sreg->name = of_get_property(np, "regulator-name", NULL); |
| 126 | GFP_KERNEL); | ||
| 127 | rdesc = &sreg->rdesc; | 126 | rdesc = &sreg->rdesc; |
| 128 | memset(rdesc, 0, sizeof(*rdesc)); | ||
| 129 | rdesc->name = sreg->name; | 127 | rdesc->name = sreg->name; |
| 130 | rdesc->ops = &anatop_rops; | 128 | rdesc->ops = &anatop_rops; |
| 131 | rdesc->type = REGULATOR_VOLTAGE; | 129 | rdesc->type = REGULATOR_VOLTAGE; |
| @@ -143,37 +141,37 @@ static int anatop_regulator_probe(struct platform_device *pdev) | |||
| 143 | &sreg->control_reg); | 141 | &sreg->control_reg); |
| 144 | if (ret) { | 142 | if (ret) { |
| 145 | dev_err(dev, "no anatop-reg-offset property set\n"); | 143 | dev_err(dev, "no anatop-reg-offset property set\n"); |
| 146 | goto anatop_probe_end; | 144 | return ret; |
| 147 | } | 145 | } |
| 148 | ret = of_property_read_u32(np, "anatop-vol-bit-width", | 146 | ret = of_property_read_u32(np, "anatop-vol-bit-width", |
| 149 | &sreg->vol_bit_width); | 147 | &sreg->vol_bit_width); |
| 150 | if (ret) { | 148 | if (ret) { |
| 151 | dev_err(dev, "no anatop-vol-bit-width property set\n"); | 149 | dev_err(dev, "no anatop-vol-bit-width property set\n"); |
| 152 | goto anatop_probe_end; | 150 | return ret; |
| 153 | } | 151 | } |
| 154 | ret = of_property_read_u32(np, "anatop-vol-bit-shift", | 152 | ret = of_property_read_u32(np, "anatop-vol-bit-shift", |
| 155 | &sreg->vol_bit_shift); | 153 | &sreg->vol_bit_shift); |
| 156 | if (ret) { | 154 | if (ret) { |
| 157 | dev_err(dev, "no anatop-vol-bit-shift property set\n"); | 155 | dev_err(dev, "no anatop-vol-bit-shift property set\n"); |
| 158 | goto anatop_probe_end; | 156 | return ret; |
| 159 | } | 157 | } |
| 160 | ret = of_property_read_u32(np, "anatop-min-bit-val", | 158 | ret = of_property_read_u32(np, "anatop-min-bit-val", |
| 161 | &sreg->min_bit_val); | 159 | &sreg->min_bit_val); |
| 162 | if (ret) { | 160 | if (ret) { |
| 163 | dev_err(dev, "no anatop-min-bit-val property set\n"); | 161 | dev_err(dev, "no anatop-min-bit-val property set\n"); |
| 164 | goto anatop_probe_end; | 162 | return ret; |
| 165 | } | 163 | } |
| 166 | ret = of_property_read_u32(np, "anatop-min-voltage", | 164 | ret = of_property_read_u32(np, "anatop-min-voltage", |
| 167 | &sreg->min_voltage); | 165 | &sreg->min_voltage); |
| 168 | if (ret) { | 166 | if (ret) { |
| 169 | dev_err(dev, "no anatop-min-voltage property set\n"); | 167 | dev_err(dev, "no anatop-min-voltage property set\n"); |
| 170 | goto anatop_probe_end; | 168 | return ret; |
| 171 | } | 169 | } |
| 172 | ret = of_property_read_u32(np, "anatop-max-voltage", | 170 | ret = of_property_read_u32(np, "anatop-max-voltage", |
| 173 | &sreg->max_voltage); | 171 | &sreg->max_voltage); |
| 174 | if (ret) { | 172 | if (ret) { |
| 175 | dev_err(dev, "no anatop-max-voltage property set\n"); | 173 | dev_err(dev, "no anatop-max-voltage property set\n"); |
| 176 | goto anatop_probe_end; | 174 | return ret; |
| 177 | } | 175 | } |
| 178 | 176 | ||
| 179 | /* read LDO ramp up setting, only for core reg */ | 177 | /* read LDO ramp up setting, only for core reg */ |
| @@ -204,27 +202,11 @@ static int anatop_regulator_probe(struct platform_device *pdev) | |||
| 204 | if (IS_ERR(rdev)) { | 202 | if (IS_ERR(rdev)) { |
| 205 | dev_err(dev, "failed to register %s\n", | 203 | dev_err(dev, "failed to register %s\n", |
| 206 | rdesc->name); | 204 | rdesc->name); |
| 207 | ret = PTR_ERR(rdev); | 205 | return PTR_ERR(rdev); |
| 208 | goto anatop_probe_end; | ||
| 209 | } | 206 | } |
| 210 | 207 | ||
| 211 | platform_set_drvdata(pdev, rdev); | 208 | platform_set_drvdata(pdev, rdev); |
| 212 | 209 | ||
| 213 | anatop_probe_end: | ||
| 214 | if (ret) | ||
| 215 | kfree(sreg->name); | ||
| 216 | |||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | static int anatop_regulator_remove(struct platform_device *pdev) | ||
| 221 | { | ||
| 222 | struct regulator_dev *rdev = platform_get_drvdata(pdev); | ||
| 223 | struct anatop_regulator *sreg = rdev_get_drvdata(rdev); | ||
| 224 | const char *name = sreg->name; | ||
| 225 | |||
| 226 | kfree(name); | ||
| 227 | |||
| 228 | return 0; | 210 | return 0; |
| 229 | } | 211 | } |
| 230 | 212 | ||
| @@ -240,7 +222,6 @@ static struct platform_driver anatop_regulator_driver = { | |||
| 240 | .of_match_table = of_anatop_regulator_match_tbl, | 222 | .of_match_table = of_anatop_regulator_match_tbl, |
| 241 | }, | 223 | }, |
| 242 | .probe = anatop_regulator_probe, | 224 | .probe = anatop_regulator_probe, |
| 243 | .remove = anatop_regulator_remove, | ||
| 244 | }; | 225 | }; |
| 245 | 226 | ||
| 246 | static int __init anatop_regulator_init(void) | 227 | static int __init anatop_regulator_init(void) |
| @@ -259,3 +240,4 @@ MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>"); | |||
| 259 | MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>"); | 240 | MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>"); |
| 260 | MODULE_DESCRIPTION("ANATOP Regulator driver"); | 241 | MODULE_DESCRIPTION("ANATOP Regulator driver"); |
| 261 | MODULE_LICENSE("GPL v2"); | 242 | MODULE_LICENSE("GPL v2"); |
| 243 | MODULE_ALIAS("platform:anatop_regulator"); | ||
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c index fd3154d86901..034ece707083 100644 --- a/drivers/regulator/arizona-micsupp.c +++ b/drivers/regulator/arizona-micsupp.c | |||
| @@ -28,8 +28,6 @@ | |||
| 28 | #include <linux/mfd/arizona/pdata.h> | 28 | #include <linux/mfd/arizona/pdata.h> |
| 29 | #include <linux/mfd/arizona/registers.h> | 29 | #include <linux/mfd/arizona/registers.h> |
| 30 | 30 | ||
| 31 | #define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f | ||
| 32 | |||
| 33 | struct arizona_micsupp { | 31 | struct arizona_micsupp { |
| 34 | struct regulator_dev *regulator; | 32 | struct regulator_dev *regulator; |
| 35 | struct arizona *arizona; | 33 | struct arizona *arizona; |
| @@ -40,42 +38,6 @@ struct arizona_micsupp { | |||
| 40 | struct work_struct check_cp_work; | 38 | struct work_struct check_cp_work; |
| 41 | }; | 39 | }; |
| 42 | 40 | ||
| 43 | static int arizona_micsupp_list_voltage(struct regulator_dev *rdev, | ||
| 44 | unsigned int selector) | ||
| 45 | { | ||
| 46 | if (selector > ARIZONA_MICSUPP_MAX_SELECTOR) | ||
| 47 | return -EINVAL; | ||
| 48 | |||
| 49 | if (selector == ARIZONA_MICSUPP_MAX_SELECTOR) | ||
| 50 | return 3300000; | ||
| 51 | else | ||
| 52 | return (selector * 50000) + 1700000; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int arizona_micsupp_map_voltage(struct regulator_dev *rdev, | ||
| 56 | int min_uV, int max_uV) | ||
| 57 | { | ||
| 58 | unsigned int voltage; | ||
| 59 | int selector; | ||
| 60 | |||
| 61 | if (min_uV < 1700000) | ||
| 62 | min_uV = 1700000; | ||
| 63 | |||
| 64 | if (min_uV > 3200000) | ||
| 65 | selector = ARIZONA_MICSUPP_MAX_SELECTOR; | ||
| 66 | else | ||
| 67 | selector = DIV_ROUND_UP(min_uV - 1700000, 50000); | ||
| 68 | |||
| 69 | if (selector < 0) | ||
| 70 | return -EINVAL; | ||
| 71 | |||
| 72 | voltage = arizona_micsupp_list_voltage(rdev, selector); | ||
| 73 | if (voltage < min_uV || voltage > max_uV) | ||
| 74 | return -EINVAL; | ||
| 75 | |||
| 76 | return selector; | ||
| 77 | } | ||
| 78 | |||
| 79 | static void arizona_micsupp_check_cp(struct work_struct *work) | 41 | static void arizona_micsupp_check_cp(struct work_struct *work) |
| 80 | { | 42 | { |
| 81 | struct arizona_micsupp *micsupp = | 43 | struct arizona_micsupp *micsupp = |
| @@ -145,8 +107,8 @@ static struct regulator_ops arizona_micsupp_ops = { | |||
| 145 | .disable = arizona_micsupp_disable, | 107 | .disable = arizona_micsupp_disable, |
| 146 | .is_enabled = regulator_is_enabled_regmap, | 108 | .is_enabled = regulator_is_enabled_regmap, |
| 147 | 109 | ||
| 148 | .list_voltage = arizona_micsupp_list_voltage, | 110 | .list_voltage = regulator_list_voltage_linear_range, |
| 149 | .map_voltage = arizona_micsupp_map_voltage, | 111 | .map_voltage = regulator_map_voltage_linear_range, |
| 150 | 112 | ||
| 151 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | 113 | .get_voltage_sel = regulator_get_voltage_sel_regmap, |
| 152 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | 114 | .set_voltage_sel = regulator_set_voltage_sel_regmap, |
| @@ -155,11 +117,16 @@ static struct regulator_ops arizona_micsupp_ops = { | |||
| 155 | .set_bypass = arizona_micsupp_set_bypass, | 117 | .set_bypass = arizona_micsupp_set_bypass, |
| 156 | }; | 118 | }; |
| 157 | 119 | ||
| 120 | static const struct regulator_linear_range arizona_micsupp_ranges[] = { | ||
| 121 | REGULATOR_LINEAR_RANGE(1700000, 0, 0x1e, 50000), | ||
| 122 | REGULATOR_LINEAR_RANGE(3300000, 0x1f, 0x1f, 0), | ||
| 123 | }; | ||
| 124 | |||
| 158 | static const struct regulator_desc arizona_micsupp = { | 125 | static const struct regulator_desc arizona_micsupp = { |
| 159 | .name = "MICVDD", | 126 | .name = "MICVDD", |
| 160 | .supply_name = "CPVDD", | 127 | .supply_name = "CPVDD", |
| 161 | .type = REGULATOR_VOLTAGE, | 128 | .type = REGULATOR_VOLTAGE, |
| 162 | .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1, | 129 | .n_voltages = 32, |
| 163 | .ops = &arizona_micsupp_ops, | 130 | .ops = &arizona_micsupp_ops, |
| 164 | 131 | ||
| 165 | .vsel_reg = ARIZONA_LDO2_CONTROL_1, | 132 | .vsel_reg = ARIZONA_LDO2_CONTROL_1, |
| @@ -169,6 +136,9 @@ static const struct regulator_desc arizona_micsupp = { | |||
| 169 | .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1, | 136 | .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1, |
| 170 | .bypass_mask = ARIZONA_CPMIC_BYPASS, | 137 | .bypass_mask = ARIZONA_CPMIC_BYPASS, |
| 171 | 138 | ||
| 139 | .linear_ranges = arizona_micsupp_ranges, | ||
| 140 | .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ranges), | ||
| 141 | |||
| 172 | .enable_time = 3000, | 142 | .enable_time = 3000, |
| 173 | 143 | ||
| 174 | .owner = THIS_MODULE, | 144 | .owner = THIS_MODULE, |
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index b9f1d24c6812..8b17d786cb71 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c | |||
| @@ -99,7 +99,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = { | |||
| 99 | .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK, | 99 | .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK, |
| 100 | .control_reg = AS3722_SD0_CONTROL_REG, | 100 | .control_reg = AS3722_SD0_CONTROL_REG, |
| 101 | .mode_mask = AS3722_SD0_MODE_FAST, | 101 | .mode_mask = AS3722_SD0_MODE_FAST, |
| 102 | .n_voltages = AS3722_SD0_VSEL_MAX + 1, | ||
| 103 | }, | 102 | }, |
| 104 | { | 103 | { |
| 105 | .regulator_id = AS3722_REGULATOR_ID_SD1, | 104 | .regulator_id = AS3722_REGULATOR_ID_SD1, |
| @@ -112,7 +111,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = { | |||
| 112 | .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK, | 111 | .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK, |
| 113 | .control_reg = AS3722_SD1_CONTROL_REG, | 112 | .control_reg = AS3722_SD1_CONTROL_REG, |
| 114 | .mode_mask = AS3722_SD1_MODE_FAST, | 113 | .mode_mask = AS3722_SD1_MODE_FAST, |
| 115 | .n_voltages = AS3722_SD0_VSEL_MAX + 1, | ||
| 116 | }, | 114 | }, |
| 117 | { | 115 | { |
| 118 | .regulator_id = AS3722_REGULATOR_ID_SD2, | 116 | .regulator_id = AS3722_REGULATOR_ID_SD2, |
| @@ -181,7 +179,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = { | |||
| 181 | .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK, | 179 | .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK, |
| 182 | .control_reg = AS3722_SD6_CONTROL_REG, | 180 | .control_reg = AS3722_SD6_CONTROL_REG, |
| 183 | .mode_mask = AS3722_SD6_MODE_FAST, | 181 | .mode_mask = AS3722_SD6_MODE_FAST, |
| 184 | .n_voltages = AS3722_SD0_VSEL_MAX + 1, | ||
| 185 | }, | 182 | }, |
| 186 | { | 183 | { |
| 187 | .regulator_id = AS3722_REGULATOR_ID_LDO0, | 184 | .regulator_id = AS3722_REGULATOR_ID_LDO0, |
| @@ -595,6 +592,22 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev, | |||
| 595 | return as3722_update_bits(as3722, reg, mask, val); | 592 | return as3722_update_bits(as3722, reg, mask, val); |
| 596 | } | 593 | } |
| 597 | 594 | ||
| 595 | static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs) | ||
| 596 | { | ||
| 597 | int err; | ||
| 598 | unsigned val; | ||
| 599 | |||
| 600 | err = as3722_read(as3722_regs->as3722, AS3722_FUSE7_REG, &val); | ||
| 601 | if (err < 0) { | ||
| 602 | dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", | ||
| 603 | AS3722_FUSE7_REG, err); | ||
| 604 | return false; | ||
| 605 | } | ||
| 606 | if (val & AS3722_FUSE7_SD0_LOW_VOLTAGE) | ||
| 607 | return true; | ||
| 608 | return false; | ||
| 609 | } | ||
| 610 | |||
| 598 | static const struct regulator_linear_range as3722_sd2345_ranges[] = { | 611 | static const struct regulator_linear_range as3722_sd2345_ranges[] = { |
| 599 | REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500), | 612 | REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500), |
| 600 | REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000), | 613 | REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000), |
| @@ -820,9 +833,19 @@ static int as3722_regulator_probe(struct platform_device *pdev) | |||
| 820 | ops = &as3722_sd016_extcntrl_ops; | 833 | ops = &as3722_sd016_extcntrl_ops; |
| 821 | else | 834 | else |
| 822 | ops = &as3722_sd016_ops; | 835 | ops = &as3722_sd016_ops; |
| 823 | as3722_regs->desc[id].min_uV = 610000; | 836 | if (id == AS3722_REGULATOR_ID_SD0 && |
| 837 | as3722_sd0_is_low_voltage(as3722_regs)) { | ||
| 838 | as3722_regs->desc[id].n_voltages = | ||
| 839 | AS3722_SD0_VSEL_LOW_VOL_MAX + 1; | ||
| 840 | as3722_regs->desc[id].min_uV = 410000; | ||
| 841 | } else { | ||
| 842 | as3722_regs->desc[id].n_voltages = | ||
| 843 | AS3722_SD0_VSEL_MAX + 1, | ||
| 844 | as3722_regs->desc[id].min_uV = 610000; | ||
| 845 | } | ||
| 824 | as3722_regs->desc[id].uV_step = 10000; | 846 | as3722_regs->desc[id].uV_step = 10000; |
| 825 | as3722_regs->desc[id].linear_min_sel = 1; | 847 | as3722_regs->desc[id].linear_min_sel = 1; |
| 848 | as3722_regs->desc[id].enable_time = 600; | ||
| 826 | break; | 849 | break; |
| 827 | case AS3722_REGULATOR_ID_SD2: | 850 | case AS3722_REGULATOR_ID_SD2: |
| 828 | case AS3722_REGULATOR_ID_SD3: | 851 | case AS3722_REGULATOR_ID_SD3: |
| @@ -842,9 +865,6 @@ static int as3722_regulator_probe(struct platform_device *pdev) | |||
| 842 | ops = &as3722_ldo_extcntrl_ops; | 865 | ops = &as3722_ldo_extcntrl_ops; |
| 843 | else | 866 | else |
| 844 | ops = &as3722_ldo_ops; | 867 | ops = &as3722_ldo_ops; |
| 845 | as3722_regs->desc[id].min_uV = 825000; | ||
| 846 | as3722_regs->desc[id].uV_step = 25000; | ||
| 847 | as3722_regs->desc[id].linear_min_sel = 1; | ||
| 848 | as3722_regs->desc[id].enable_time = 500; | 868 | as3722_regs->desc[id].enable_time = 500; |
| 849 | as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges; | 869 | as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges; |
| 850 | as3722_regs->desc[id].n_linear_ranges = | 870 | as3722_regs->desc[id].n_linear_ranges = |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d85f31385b24..b38a6b669e8c 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -1334,9 +1334,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, | |||
| 1334 | * If we have return value from dev_lookup fail, we do not expect to | 1334 | * If we have return value from dev_lookup fail, we do not expect to |
| 1335 | * succeed, so, quit with appropriate error value | 1335 | * succeed, so, quit with appropriate error value |
| 1336 | */ | 1336 | */ |
| 1337 | if (ret && ret != -ENODEV) { | 1337 | if (ret && ret != -ENODEV) |
| 1338 | goto out; | 1338 | goto out; |
| 1339 | } | ||
| 1340 | 1339 | ||
| 1341 | if (!devname) | 1340 | if (!devname) |
| 1342 | devname = "deviceless"; | 1341 | devname = "deviceless"; |
| @@ -1351,7 +1350,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, | |||
| 1351 | 1350 | ||
| 1352 | rdev = dummy_regulator_rdev; | 1351 | rdev = dummy_regulator_rdev; |
| 1353 | goto found; | 1352 | goto found; |
| 1354 | } else { | 1353 | /* Don't log an error when called from regulator_get_optional() */ |
| 1354 | } else if (!have_full_constraints() || exclusive) { | ||
| 1355 | dev_err(dev, "dummy supplies not allowed\n"); | 1355 | dev_err(dev, "dummy supplies not allowed\n"); |
| 1356 | } | 1356 | } |
| 1357 | 1357 | ||
| @@ -2244,7 +2244,7 @@ int regulator_is_supported_voltage(struct regulator *regulator, | |||
| 2244 | if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { | 2244 | if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { |
| 2245 | ret = regulator_get_voltage(regulator); | 2245 | ret = regulator_get_voltage(regulator); |
| 2246 | if (ret >= 0) | 2246 | if (ret >= 0) |
| 2247 | return (min_uV <= ret && ret <= max_uV); | 2247 | return min_uV <= ret && ret <= max_uV; |
| 2248 | else | 2248 | else |
| 2249 | return ret; | 2249 | return ret; |
| 2250 | } | 2250 | } |
| @@ -2416,7 +2416,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) | |||
| 2416 | ret = regulator_check_voltage(rdev, &min_uV, &max_uV); | 2416 | ret = regulator_check_voltage(rdev, &min_uV, &max_uV); |
| 2417 | if (ret < 0) | 2417 | if (ret < 0) |
| 2418 | goto out; | 2418 | goto out; |
| 2419 | 2419 | ||
| 2420 | /* restore original values in case of error */ | 2420 | /* restore original values in case of error */ |
| 2421 | old_min_uV = regulator->min_uV; | 2421 | old_min_uV = regulator->min_uV; |
| 2422 | old_max_uV = regulator->max_uV; | 2422 | old_max_uV = regulator->max_uV; |
| @@ -2430,7 +2430,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) | |||
| 2430 | ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); | 2430 | ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); |
| 2431 | if (ret < 0) | 2431 | if (ret < 0) |
| 2432 | goto out2; | 2432 | goto out2; |
| 2433 | 2433 | ||
| 2434 | out: | 2434 | out: |
| 2435 | mutex_unlock(&rdev->mutex); | 2435 | mutex_unlock(&rdev->mutex); |
| 2436 | return ret; | 2436 | return ret; |
| @@ -3835,9 +3835,8 @@ static int __init regulator_init_complete(void) | |||
| 3835 | * goes wrong. */ | 3835 | * goes wrong. */ |
| 3836 | rdev_info(rdev, "disabling\n"); | 3836 | rdev_info(rdev, "disabling\n"); |
| 3837 | ret = ops->disable(rdev); | 3837 | ret = ops->disable(rdev); |
| 3838 | if (ret != 0) { | 3838 | if (ret != 0) |
| 3839 | rdev_err(rdev, "couldn't disable: %d\n", ret); | 3839 | rdev_err(rdev, "couldn't disable: %d\n", ret); |
| 3840 | } | ||
| 3841 | } else { | 3840 | } else { |
| 3842 | /* The intention is that in future we will | 3841 | /* The intention is that in future we will |
| 3843 | * assume that full constraints are provided | 3842 | * assume that full constraints are provided |
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c index a53c11a529d5..846acf240e48 100644 --- a/drivers/regulator/db8500-prcmu.c +++ b/drivers/regulator/db8500-prcmu.c | |||
| @@ -431,17 +431,11 @@ static int db8500_regulator_register(struct platform_device *pdev, | |||
| 431 | config.of_node = np; | 431 | config.of_node = np; |
| 432 | 432 | ||
| 433 | /* register with the regulator framework */ | 433 | /* register with the regulator framework */ |
| 434 | info->rdev = regulator_register(&info->desc, &config); | 434 | info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); |
| 435 | if (IS_ERR(info->rdev)) { | 435 | if (IS_ERR(info->rdev)) { |
| 436 | err = PTR_ERR(info->rdev); | 436 | err = PTR_ERR(info->rdev); |
| 437 | dev_err(&pdev->dev, "failed to register %s: err %i\n", | 437 | dev_err(&pdev->dev, "failed to register %s: err %i\n", |
| 438 | info->desc.name, err); | 438 | info->desc.name, err); |
| 439 | |||
| 440 | /* if failing, unregister all earlier regulators */ | ||
| 441 | while (--id >= 0) { | ||
| 442 | info = &dbx500_regulator_info[id]; | ||
| 443 | regulator_unregister(info->rdev); | ||
| 444 | } | ||
| 445 | return err; | 439 | return err; |
| 446 | } | 440 | } |
| 447 | 441 | ||
| @@ -530,20 +524,8 @@ static int db8500_regulator_probe(struct platform_device *pdev) | |||
| 530 | 524 | ||
| 531 | static int db8500_regulator_remove(struct platform_device *pdev) | 525 | static int db8500_regulator_remove(struct platform_device *pdev) |
| 532 | { | 526 | { |
| 533 | int i; | ||
| 534 | |||
| 535 | ux500_regulator_debug_exit(); | 527 | ux500_regulator_debug_exit(); |
| 536 | 528 | ||
| 537 | for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) { | ||
| 538 | struct dbx500_regulator_info *info; | ||
| 539 | info = &dbx500_regulator_info[i]; | ||
| 540 | |||
| 541 | dev_vdbg(rdev_get_dev(info->rdev), | ||
| 542 | "regulator-%s-remove\n", info->desc.name); | ||
| 543 | |||
| 544 | regulator_unregister(info->rdev); | ||
| 545 | } | ||
| 546 | |||
| 547 | return 0; | 529 | return 0; |
| 548 | } | 530 | } |
| 549 | 531 | ||
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 234960dc9607..c0a1d00b78c9 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c | |||
| @@ -203,17 +203,18 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np) | |||
| 203 | } | 203 | } |
| 204 | config->nr_states = i; | 204 | config->nr_states = i; |
| 205 | 205 | ||
| 206 | config->type = REGULATOR_VOLTAGE; | ||
| 206 | ret = of_property_read_string(np, "regulator-type", ®type); | 207 | ret = of_property_read_string(np, "regulator-type", ®type); |
| 207 | if (ret < 0) { | 208 | if (ret >= 0) { |
| 208 | dev_err(dev, "Missing 'regulator-type' property\n"); | 209 | if (!strncmp("voltage", regtype, 7)) |
| 209 | return ERR_PTR(-EINVAL); | 210 | config->type = REGULATOR_VOLTAGE; |
| 211 | else if (!strncmp("current", regtype, 7)) | ||
| 212 | config->type = REGULATOR_CURRENT; | ||
| 213 | else | ||
| 214 | dev_warn(dev, "Unknown regulator-type '%s'\n", | ||
| 215 | regtype); | ||
| 210 | } | 216 | } |
| 211 | 217 | ||
| 212 | if (!strncmp("voltage", regtype, 7)) | ||
| 213 | config->type = REGULATOR_VOLTAGE; | ||
| 214 | else if (!strncmp("current", regtype, 7)) | ||
| 215 | config->type = REGULATOR_CURRENT; | ||
| 216 | |||
| 217 | return config; | 218 | return config; |
| 218 | } | 219 | } |
| 219 | 220 | ||
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 947c05ffe0ab..3b1102b75071 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c | |||
| @@ -25,8 +25,6 @@ struct lp3971 { | |||
| 25 | struct device *dev; | 25 | struct device *dev; |
| 26 | struct mutex io_lock; | 26 | struct mutex io_lock; |
| 27 | struct i2c_client *i2c; | 27 | struct i2c_client *i2c; |
| 28 | int num_regulators; | ||
| 29 | struct regulator_dev **rdev; | ||
| 30 | }; | 28 | }; |
| 31 | 29 | ||
| 32 | static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg); | 30 | static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg); |
| @@ -383,42 +381,27 @@ static int setup_regulators(struct lp3971 *lp3971, | |||
| 383 | { | 381 | { |
| 384 | int i, err; | 382 | int i, err; |
| 385 | 383 | ||
| 386 | lp3971->num_regulators = pdata->num_regulators; | ||
| 387 | lp3971->rdev = kcalloc(pdata->num_regulators, | ||
| 388 | sizeof(struct regulator_dev *), GFP_KERNEL); | ||
| 389 | if (!lp3971->rdev) { | ||
| 390 | err = -ENOMEM; | ||
| 391 | goto err_nomem; | ||
| 392 | } | ||
| 393 | |||
| 394 | /* Instantiate the regulators */ | 384 | /* Instantiate the regulators */ |
| 395 | for (i = 0; i < pdata->num_regulators; i++) { | 385 | for (i = 0; i < pdata->num_regulators; i++) { |
| 396 | struct regulator_config config = { }; | 386 | struct regulator_config config = { }; |
| 397 | struct lp3971_regulator_subdev *reg = &pdata->regulators[i]; | 387 | struct lp3971_regulator_subdev *reg = &pdata->regulators[i]; |
| 388 | struct regulator_dev *rdev; | ||
| 398 | 389 | ||
| 399 | config.dev = lp3971->dev; | 390 | config.dev = lp3971->dev; |
| 400 | config.init_data = reg->initdata; | 391 | config.init_data = reg->initdata; |
| 401 | config.driver_data = lp3971; | 392 | config.driver_data = lp3971; |
| 402 | 393 | ||
| 403 | lp3971->rdev[i] = regulator_register(®ulators[reg->id], | 394 | rdev = devm_regulator_register(lp3971->dev, |
| 404 | &config); | 395 | ®ulators[reg->id], &config); |
| 405 | if (IS_ERR(lp3971->rdev[i])) { | 396 | if (IS_ERR(rdev)) { |
| 406 | err = PTR_ERR(lp3971->rdev[i]); | 397 | err = PTR_ERR(rdev); |
| 407 | dev_err(lp3971->dev, "regulator init failed: %d\n", | 398 | dev_err(lp3971->dev, "regulator init failed: %d\n", |
| 408 | err); | 399 | err); |
| 409 | goto error; | 400 | return err; |
| 410 | } | 401 | } |
| 411 | } | 402 | } |
| 412 | 403 | ||
| 413 | return 0; | 404 | return 0; |
| 414 | |||
| 415 | error: | ||
| 416 | while (--i >= 0) | ||
| 417 | regulator_unregister(lp3971->rdev[i]); | ||
| 418 | kfree(lp3971->rdev); | ||
| 419 | lp3971->rdev = NULL; | ||
| 420 | err_nomem: | ||
| 421 | return err; | ||
| 422 | } | 405 | } |
| 423 | 406 | ||
| 424 | static int lp3971_i2c_probe(struct i2c_client *i2c, | 407 | static int lp3971_i2c_probe(struct i2c_client *i2c, |
| @@ -460,19 +443,6 @@ static int lp3971_i2c_probe(struct i2c_client *i2c, | |||
| 460 | return 0; | 443 | return 0; |
| 461 | } | 444 | } |
| 462 | 445 | ||
| 463 | static int lp3971_i2c_remove(struct i2c_client *i2c) | ||
| 464 | { | ||
| 465 | struct lp3971 *lp3971 = i2c_get_clientdata(i2c); | ||
| 466 | int i; | ||
| 467 | |||
| 468 | for (i = 0; i < lp3971->num_regulators; i++) | ||
| 469 | regulator_unregister(lp3971->rdev[i]); | ||
| 470 | |||
| 471 | kfree(lp3971->rdev); | ||
| 472 | |||
| 473 | return 0; | ||
| 474 | } | ||
| 475 | |||
| 476 | static const struct i2c_device_id lp3971_i2c_id[] = { | 446 | static const struct i2c_device_id lp3971_i2c_id[] = { |
| 477 | { "lp3971", 0 }, | 447 | { "lp3971", 0 }, |
| 478 | { } | 448 | { } |
| @@ -485,7 +455,6 @@ static struct i2c_driver lp3971_i2c_driver = { | |||
| 485 | .owner = THIS_MODULE, | 455 | .owner = THIS_MODULE, |
| 486 | }, | 456 | }, |
| 487 | .probe = lp3971_i2c_probe, | 457 | .probe = lp3971_i2c_probe, |
| 488 | .remove = lp3971_i2c_remove, | ||
| 489 | .id_table = lp3971_i2c_id, | 458 | .id_table = lp3971_i2c_id, |
| 490 | }; | 459 | }; |
| 491 | 460 | ||
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c index 093e6f44ff8a..aea485afcc1a 100644 --- a/drivers/regulator/lp3972.c +++ b/drivers/regulator/lp3972.c | |||
| @@ -22,8 +22,6 @@ struct lp3972 { | |||
| 22 | struct device *dev; | 22 | struct device *dev; |
| 23 | struct mutex io_lock; | 23 | struct mutex io_lock; |
| 24 | struct i2c_client *i2c; | 24 | struct i2c_client *i2c; |
| 25 | int num_regulators; | ||
| 26 | struct regulator_dev **rdev; | ||
| 27 | }; | 25 | }; |
| 28 | 26 | ||
| 29 | /* LP3972 Control Registers */ | 27 | /* LP3972 Control Registers */ |
| @@ -478,41 +476,27 @@ static int setup_regulators(struct lp3972 *lp3972, | |||
| 478 | { | 476 | { |
| 479 | int i, err; | 477 | int i, err; |
| 480 | 478 | ||
| 481 | lp3972->num_regulators = pdata->num_regulators; | ||
| 482 | lp3972->rdev = kcalloc(pdata->num_regulators, | ||
| 483 | sizeof(struct regulator_dev *), GFP_KERNEL); | ||
| 484 | if (!lp3972->rdev) { | ||
| 485 | err = -ENOMEM; | ||
| 486 | goto err_nomem; | ||
| 487 | } | ||
| 488 | |||
| 489 | /* Instantiate the regulators */ | 479 | /* Instantiate the regulators */ |
| 490 | for (i = 0; i < pdata->num_regulators; i++) { | 480 | for (i = 0; i < pdata->num_regulators; i++) { |
| 491 | struct lp3972_regulator_subdev *reg = &pdata->regulators[i]; | 481 | struct lp3972_regulator_subdev *reg = &pdata->regulators[i]; |
| 492 | struct regulator_config config = { }; | 482 | struct regulator_config config = { }; |
| 483 | struct regulator_dev *rdev; | ||
| 493 | 484 | ||
| 494 | config.dev = lp3972->dev; | 485 | config.dev = lp3972->dev; |
| 495 | config.init_data = reg->initdata; | 486 | config.init_data = reg->initdata; |
| 496 | config.driver_data = lp3972; | 487 | config.driver_data = lp3972; |
| 497 | 488 | ||
| 498 | lp3972->rdev[i] = regulator_register(®ulators[reg->id], | 489 | rdev = devm_regulator_register(lp3972->dev, |
| 499 | &config); | 490 | ®ulators[reg->id], &config); |
| 500 | if (IS_ERR(lp3972->rdev[i])) { | 491 | if (IS_ERR(rdev)) { |
| 501 | err = PTR_ERR(lp3972->rdev[i]); | 492 | err = PTR_ERR(rdev); |
| 502 | dev_err(lp3972->dev, "regulator init failed: %d\n", | 493 | dev_err(lp3972->dev, "regulator init failed: %d\n", |
| 503 | err); | 494 | err); |
| 504 | goto error; | 495 | return err; |
| 505 | } | 496 | } |
| 506 | } | 497 | } |
| 507 | 498 | ||
| 508 | return 0; | 499 | return 0; |
| 509 | error: | ||
| 510 | while (--i >= 0) | ||
| 511 | regulator_unregister(lp3972->rdev[i]); | ||
| 512 | kfree(lp3972->rdev); | ||
| 513 | lp3972->rdev = NULL; | ||
| 514 | err_nomem: | ||
| 515 | return err; | ||
| 516 | } | 500 | } |
| 517 | 501 | ||
| 518 | static int lp3972_i2c_probe(struct i2c_client *i2c, | 502 | static int lp3972_i2c_probe(struct i2c_client *i2c, |
| @@ -557,18 +541,6 @@ static int lp3972_i2c_probe(struct i2c_client *i2c, | |||
| 557 | return 0; | 541 | return 0; |
| 558 | } | 542 | } |
| 559 | 543 | ||
| 560 | static int lp3972_i2c_remove(struct i2c_client *i2c) | ||
| 561 | { | ||
| 562 | struct lp3972 *lp3972 = i2c_get_clientdata(i2c); | ||
| 563 | int i; | ||
| 564 | |||
| 565 | for (i = 0; i < lp3972->num_regulators; i++) | ||
| 566 | regulator_unregister(lp3972->rdev[i]); | ||
| 567 | kfree(lp3972->rdev); | ||
| 568 | |||
| 569 | return 0; | ||
| 570 | } | ||
| 571 | |||
| 572 | static const struct i2c_device_id lp3972_i2c_id[] = { | 544 | static const struct i2c_device_id lp3972_i2c_id[] = { |
| 573 | { "lp3972", 0 }, | 545 | { "lp3972", 0 }, |
| 574 | { } | 546 | { } |
| @@ -581,7 +553,6 @@ static struct i2c_driver lp3972_i2c_driver = { | |||
| 581 | .owner = THIS_MODULE, | 553 | .owner = THIS_MODULE, |
| 582 | }, | 554 | }, |
| 583 | .probe = lp3972_i2c_probe, | 555 | .probe = lp3972_i2c_probe, |
| 584 | .remove = lp3972_i2c_remove, | ||
| 585 | .id_table = lp3972_i2c_id, | 556 | .id_table = lp3972_i2c_id, |
| 586 | }; | 557 | }; |
| 587 | 558 | ||
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c new file mode 100644 index 000000000000..b1078ba3f393 --- /dev/null +++ b/drivers/regulator/max14577.c | |||
| @@ -0,0 +1,273 @@ | |||
| 1 | /* | ||
| 2 | * max14577.c - Regulator driver for the Maxim 14577 | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Samsung Electronics | ||
| 5 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/platform_device.h> | ||
| 20 | #include <linux/regulator/driver.h> | ||
| 21 | #include <linux/mfd/max14577.h> | ||
| 22 | #include <linux/mfd/max14577-private.h> | ||
| 23 | #include <linux/regulator/of_regulator.h> | ||
| 24 | |||
| 25 | struct max14577_regulator { | ||
| 26 | struct device *dev; | ||
| 27 | struct max14577 *max14577; | ||
| 28 | struct regulator_dev **regulators; | ||
| 29 | }; | ||
| 30 | |||
| 31 | static int max14577_reg_is_enabled(struct regulator_dev *rdev) | ||
| 32 | { | ||
| 33 | int rid = rdev_get_id(rdev); | ||
| 34 | struct regmap *rmap = rdev->regmap; | ||
| 35 | u8 reg_data; | ||
| 36 | |||
| 37 | switch (rid) { | ||
| 38 | case MAX14577_CHARGER: | ||
| 39 | max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL2, ®_data); | ||
| 40 | if ((reg_data & CHGCTRL2_MBCHOSTEN_MASK) == 0) | ||
| 41 | return 0; | ||
| 42 | max14577_read_reg(rmap, MAX14577_CHG_REG_STATUS3, ®_data); | ||
| 43 | if ((reg_data & STATUS3_CGMBC_MASK) == 0) | ||
| 44 | return 0; | ||
| 45 | /* MBCHOSTEN and CGMBC are on */ | ||
| 46 | return 1; | ||
| 47 | default: | ||
| 48 | return -EINVAL; | ||
| 49 | } | ||
| 50 | } | ||
| 51 | |||
| 52 | static int max14577_reg_get_current_limit(struct regulator_dev *rdev) | ||
| 53 | { | ||
| 54 | u8 reg_data; | ||
| 55 | struct regmap *rmap = rdev->regmap; | ||
| 56 | |||
| 57 | if (rdev_get_id(rdev) != MAX14577_CHARGER) | ||
| 58 | return -EINVAL; | ||
| 59 | |||
| 60 | max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, ®_data); | ||
| 61 | |||
| 62 | if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0) | ||
| 63 | return MAX14577_REGULATOR_CURRENT_LIMIT_MIN; | ||
| 64 | |||
| 65 | reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >> | ||
| 66 | CHGCTRL4_MBCICHWRCH_SHIFT); | ||
| 67 | return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + | ||
| 68 | reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP; | ||
| 69 | } | ||
| 70 | |||
| 71 | static int max14577_reg_set_current_limit(struct regulator_dev *rdev, | ||
| 72 | int min_uA, int max_uA) | ||
| 73 | { | ||
| 74 | int i, current_bits = 0xf; | ||
| 75 | u8 reg_data; | ||
| 76 | |||
| 77 | if (rdev_get_id(rdev) != MAX14577_CHARGER) | ||
| 78 | return -EINVAL; | ||
| 79 | |||
| 80 | if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX || | ||
| 81 | max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN) | ||
| 82 | return -EINVAL; | ||
| 83 | |||
| 84 | if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) { | ||
| 85 | /* Less than 200 mA, so set 90mA (turn only Low Bit off) */ | ||
| 86 | u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT; | ||
| 87 | return max14577_update_reg(rdev->regmap, | ||
| 88 | MAX14577_CHG_REG_CHG_CTRL4, | ||
| 89 | CHGCTRL4_MBCICHWRCL_MASK, reg_data); | ||
| 90 | } | ||
| 91 | |||
| 92 | /* max_uA is in range: <LIMIT_HIGH_START, inifinite>, so search for | ||
| 93 | * valid current starting from LIMIT_MAX. */ | ||
| 94 | for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX; | ||
| 95 | i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START; | ||
| 96 | i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) { | ||
| 97 | if (i <= max_uA) | ||
| 98 | break; | ||
| 99 | current_bits--; | ||
| 100 | } | ||
| 101 | BUG_ON(current_bits < 0); /* Cannot happen */ | ||
| 102 | /* Turn Low Bit on (use range 200mA-950 mA) */ | ||
| 103 | reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT; | ||
| 104 | /* and set proper High Bits */ | ||
| 105 | reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT; | ||
| 106 | |||
| 107 | return max14577_update_reg(rdev->regmap, MAX14577_CHG_REG_CHG_CTRL4, | ||
| 108 | CHGCTRL4_MBCICHWRCL_MASK | CHGCTRL4_MBCICHWRCH_MASK, | ||
| 109 | reg_data); | ||
| 110 | } | ||
| 111 | |||
| 112 | static struct regulator_ops max14577_safeout_ops = { | ||
| 113 | .is_enabled = regulator_is_enabled_regmap, | ||
| 114 | .enable = regulator_enable_regmap, | ||
| 115 | .disable = regulator_disable_regmap, | ||
| 116 | .list_voltage = regulator_list_voltage_linear, | ||
| 117 | }; | ||
| 118 | |||
| 119 | static struct regulator_ops max14577_charger_ops = { | ||
| 120 | .is_enabled = max14577_reg_is_enabled, | ||
| 121 | .enable = regulator_enable_regmap, | ||
| 122 | .disable = regulator_disable_regmap, | ||
| 123 | .get_current_limit = max14577_reg_get_current_limit, | ||
| 124 | .set_current_limit = max14577_reg_set_current_limit, | ||
| 125 | }; | ||
| 126 | |||
| 127 | static const struct regulator_desc supported_regulators[] = { | ||
| 128 | [MAX14577_SAFEOUT] = { | ||
| 129 | .name = "SAFEOUT", | ||
| 130 | .id = MAX14577_SAFEOUT, | ||
| 131 | .ops = &max14577_safeout_ops, | ||
| 132 | .type = REGULATOR_VOLTAGE, | ||
| 133 | .owner = THIS_MODULE, | ||
| 134 | .n_voltages = 1, | ||
| 135 | .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE, | ||
| 136 | .enable_reg = MAX14577_REG_CONTROL2, | ||
| 137 | .enable_mask = CTRL2_SFOUTORD_MASK, | ||
| 138 | }, | ||
| 139 | [MAX14577_CHARGER] = { | ||
| 140 | .name = "CHARGER", | ||
| 141 | .id = MAX14577_CHARGER, | ||
| 142 | .ops = &max14577_charger_ops, | ||
| 143 | .type = REGULATOR_CURRENT, | ||
| 144 | .owner = THIS_MODULE, | ||
| 145 | .enable_reg = MAX14577_CHG_REG_CHG_CTRL2, | ||
| 146 | .enable_mask = CHGCTRL2_MBCHOSTEN_MASK, | ||
| 147 | }, | ||
| 148 | }; | ||
| 149 | |||
| 150 | #ifdef CONFIG_OF | ||
| 151 | static struct of_regulator_match max14577_regulator_matches[] = { | ||
| 152 | { .name = "SAFEOUT", }, | ||
| 153 | { .name = "CHARGER", }, | ||
| 154 | }; | ||
| 155 | |||
| 156 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | ||
| 157 | { | ||
| 158 | int ret; | ||
| 159 | struct device_node *np; | ||
| 160 | |||
| 161 | np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); | ||
| 162 | if (!np) { | ||
| 163 | dev_err(&pdev->dev, "Failed to get child OF node for regulators\n"); | ||
| 164 | return -EINVAL; | ||
| 165 | } | ||
| 166 | |||
| 167 | ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches, | ||
| 168 | MAX14577_REG_MAX); | ||
| 169 | if (ret < 0) { | ||
| 170 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); | ||
| 171 | return ret; | ||
| 172 | } | ||
| 173 | |||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static inline struct regulator_init_data *match_init_data(int index) | ||
| 178 | { | ||
| 179 | return max14577_regulator_matches[index].init_data; | ||
| 180 | } | ||
| 181 | |||
| 182 | static inline struct device_node *match_of_node(int index) | ||
| 183 | { | ||
| 184 | return max14577_regulator_matches[index].of_node; | ||
| 185 | } | ||
| 186 | #else /* CONFIG_OF */ | ||
| 187 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | ||
| 188 | { | ||
| 189 | return 0; | ||
| 190 | } | ||
| 191 | static inline struct regulator_init_data *match_init_data(int index) | ||
| 192 | { | ||
| 193 | return NULL; | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline struct device_node *match_of_node(int index) | ||
| 197 | { | ||
| 198 | return NULL; | ||
| 199 | } | ||
| 200 | #endif /* CONFIG_OF */ | ||
| 201 | |||
| 202 | |||
| 203 | static int max14577_regulator_probe(struct platform_device *pdev) | ||
| 204 | { | ||
| 205 | struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); | ||
| 206 | struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); | ||
| 207 | int i, ret; | ||
| 208 | struct regulator_config config = {}; | ||
| 209 | |||
| 210 | ret = max14577_regulator_dt_parse_pdata(pdev); | ||
| 211 | if (ret) | ||
| 212 | return ret; | ||
| 213 | |||
| 214 | config.dev = &pdev->dev; | ||
| 215 | config.regmap = max14577->regmap; | ||
| 216 | |||
| 217 | for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) { | ||
| 218 | struct regulator_dev *regulator; | ||
| 219 | /* | ||
| 220 | * Index of supported_regulators[] is also the id and must | ||
| 221 | * match index of pdata->regulators[]. | ||
| 222 | */ | ||
| 223 | if (pdata && pdata->regulators) { | ||
| 224 | config.init_data = pdata->regulators[i].initdata; | ||
| 225 | config.of_node = pdata->regulators[i].of_node; | ||
| 226 | } else { | ||
| 227 | config.init_data = match_init_data(i); | ||
| 228 | config.of_node = match_of_node(i); | ||
| 229 | } | ||
| 230 | |||
| 231 | regulator = devm_regulator_register(&pdev->dev, | ||
| 232 | &supported_regulators[i], &config); | ||
| 233 | if (IS_ERR(regulator)) { | ||
| 234 | ret = PTR_ERR(regulator); | ||
| 235 | dev_err(&pdev->dev, | ||
| 236 | "Regulator init failed for ID %d with error: %d\n", | ||
| 237 | i, ret); | ||
| 238 | return ret; | ||
| 239 | } | ||
| 240 | } | ||
| 241 | |||
| 242 | return ret; | ||
| 243 | } | ||
| 244 | |||
| 245 | static struct platform_driver max14577_regulator_driver = { | ||
| 246 | .driver = { | ||
| 247 | .owner = THIS_MODULE, | ||
| 248 | .name = "max14577-regulator", | ||
| 249 | }, | ||
| 250 | .probe = max14577_regulator_probe, | ||
| 251 | }; | ||
| 252 | |||
| 253 | static int __init max14577_regulator_init(void) | ||
| 254 | { | ||
| 255 | BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + | ||
| 256 | MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf != | ||
| 257 | MAX14577_REGULATOR_CURRENT_LIMIT_MAX); | ||
| 258 | BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX); | ||
| 259 | |||
| 260 | return platform_driver_register(&max14577_regulator_driver); | ||
| 261 | } | ||
| 262 | subsys_initcall(max14577_regulator_init); | ||
| 263 | |||
| 264 | static void __exit max14577_regulator_exit(void) | ||
| 265 | { | ||
| 266 | platform_driver_unregister(&max14577_regulator_driver); | ||
| 267 | } | ||
| 268 | module_exit(max14577_regulator_exit); | ||
| 269 | |||
| 270 | MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>"); | ||
| 271 | MODULE_DESCRIPTION("MAXIM 14577 regulator driver"); | ||
| 272 | MODULE_LICENSE("GPL"); | ||
| 273 | MODULE_ALIAS("platform:max14577-regulator"); | ||
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c index feb20bf4ccab..5fb899f461d0 100644 --- a/drivers/regulator/max77693.c +++ b/drivers/regulator/max77693.c | |||
| @@ -138,6 +138,7 @@ static struct regulator_ops max77693_charger_ops = { | |||
| 138 | .n_voltages = 4, \ | 138 | .n_voltages = 4, \ |
| 139 | .ops = &max77693_safeout_ops, \ | 139 | .ops = &max77693_safeout_ops, \ |
| 140 | .type = REGULATOR_VOLTAGE, \ | 140 | .type = REGULATOR_VOLTAGE, \ |
| 141 | .owner = THIS_MODULE, \ | ||
| 141 | .volt_table = max77693_safeout_table, \ | 142 | .volt_table = max77693_safeout_table, \ |
| 142 | .vsel_reg = MAX77693_CHG_REG_SAFEOUT_CTRL, \ | 143 | .vsel_reg = MAX77693_CHG_REG_SAFEOUT_CTRL, \ |
| 143 | .vsel_mask = SAFEOUT_CTRL_SAFEOUT##_num##_MASK, \ | 144 | .vsel_mask = SAFEOUT_CTRL_SAFEOUT##_num##_MASK, \ |
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c index 96c9f80d9550..f374fa57220f 100644 --- a/drivers/regulator/mc13892-regulator.c +++ b/drivers/regulator/mc13892-regulator.c | |||
| @@ -274,25 +274,25 @@ static struct mc13xxx_regulator mc13892_regulators[] = { | |||
| 274 | MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), | 274 | MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), |
| 275 | MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), | 275 | MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), |
| 276 | MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), | 276 | MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), |
| 277 | MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \ | 277 | MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, |
| 278 | mc13892_vpll), | 278 | mc13892_vpll), |
| 279 | MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ | 279 | MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, |
| 280 | mc13892_vdig), | 280 | mc13892_vdig), |
| 281 | MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \ | 281 | MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, |
| 282 | mc13892_vsd), | 282 | mc13892_vsd), |
| 283 | MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \ | 283 | MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, |
| 284 | mc13892_vusb2), | 284 | mc13892_vusb2), |
| 285 | MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \ | 285 | MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, |
| 286 | mc13892_vvideo), | 286 | mc13892_vvideo), |
| 287 | MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \ | 287 | MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, |
| 288 | mc13892_vaudio), | 288 | mc13892_vaudio), |
| 289 | MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ | 289 | MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, |
| 290 | mc13892_vcam), | 290 | mc13892_vcam), |
| 291 | MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \ | 291 | MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, |
| 292 | mc13892_vgen1), | 292 | mc13892_vgen1), |
| 293 | MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \ | 293 | MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, |
| 294 | mc13892_vgen2), | 294 | mc13892_vgen2), |
| 295 | MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \ | 295 | MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, |
| 296 | mc13892_vgen3), | 296 | mc13892_vgen3), |
| 297 | MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), | 297 | MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), |
| 298 | MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), | 298 | MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), |
| @@ -476,8 +476,8 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | mc13xxx_lock(priv->mc13xxx); | 478 | mc13xxx_lock(priv->mc13xxx); |
| 479 | ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask, | 479 | ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, |
| 480 | reg_value); | 480 | mask, reg_value); |
| 481 | mc13xxx_unlock(priv->mc13xxx); | 481 | mc13xxx_unlock(priv->mc13xxx); |
| 482 | 482 | ||
| 483 | return ret; | 483 | return ret; |
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c index d7da1c15a6da..134f90ec9ca1 100644 --- a/drivers/regulator/pcf50633-regulator.c +++ b/drivers/regulator/pcf50633-regulator.c | |||
| @@ -105,7 +105,7 @@ static int pcf50633_regulator_probe(struct platform_device *pdev) | |||
| 105 | 105 | ||
| 106 | static struct platform_driver pcf50633_regulator_driver = { | 106 | static struct platform_driver pcf50633_regulator_driver = { |
| 107 | .driver = { | 107 | .driver = { |
| 108 | .name = "pcf50633-regltr", | 108 | .name = "pcf50633-regulator", |
| 109 | }, | 109 | }, |
| 110 | .probe = pcf50633_regulator_probe, | 110 | .probe = pcf50633_regulator_probe, |
| 111 | }; | 111 | }; |
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 8b5e4c712a01..ab174f20ca11 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c | |||
| @@ -309,21 +309,24 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip) | |||
| 309 | return ret; | 309 | return ret; |
| 310 | 310 | ||
| 311 | switch (value & 0x0f) { | 311 | switch (value & 0x0f) { |
| 312 | /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */ | 312 | /* |
| 313 | case 0x8: | 313 | * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 |
| 314 | dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); | 314 | * as ID=8 |
| 315 | case 0x0: | 315 | */ |
| 316 | break; | 316 | case 0x8: |
| 317 | default: | 317 | dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); |
| 318 | dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); | 318 | case 0x0: |
| 319 | return -ENODEV; | 319 | break; |
| 320 | default: | ||
| 321 | dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); | ||
| 322 | return -ENODEV; | ||
| 320 | } | 323 | } |
| 321 | 324 | ||
| 322 | ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value); | 325 | ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value); |
| 323 | if (ret) | 326 | if (ret) |
| 324 | return ret; | 327 | return ret; |
| 325 | dev_info(pfuze_chip->dev, | 328 | dev_info(pfuze_chip->dev, |
| 326 | "Full lay: %x, Metal lay: %x\n", | 329 | "Full layer: %x, Metal layer: %x\n", |
| 327 | (value & 0xf0) >> 4, value & 0x0f); | 330 | (value & 0xf0) >> 4, value & 0x0f); |
| 328 | 331 | ||
| 329 | ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value); | 332 | ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value); |
| @@ -408,31 +411,18 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
| 408 | config.driver_data = pfuze_chip; | 411 | config.driver_data = pfuze_chip; |
| 409 | config.of_node = match_of_node(i); | 412 | config.of_node = match_of_node(i); |
| 410 | 413 | ||
| 411 | pfuze_chip->regulators[i] = regulator_register(desc, &config); | 414 | pfuze_chip->regulators[i] = |
| 415 | devm_regulator_register(&client->dev, desc, &config); | ||
| 412 | if (IS_ERR(pfuze_chip->regulators[i])) { | 416 | if (IS_ERR(pfuze_chip->regulators[i])) { |
| 413 | dev_err(&client->dev, "register regulator%s failed\n", | 417 | dev_err(&client->dev, "register regulator%s failed\n", |
| 414 | pfuze100_regulators[i].desc.name); | 418 | pfuze100_regulators[i].desc.name); |
| 415 | ret = PTR_ERR(pfuze_chip->regulators[i]); | 419 | return PTR_ERR(pfuze_chip->regulators[i]); |
| 416 | while (--i >= 0) | ||
| 417 | regulator_unregister(pfuze_chip->regulators[i]); | ||
| 418 | return ret; | ||
| 419 | } | 420 | } |
| 420 | } | 421 | } |
| 421 | 422 | ||
| 422 | return 0; | 423 | return 0; |
| 423 | } | 424 | } |
| 424 | 425 | ||
| 425 | static int pfuze100_regulator_remove(struct i2c_client *client) | ||
| 426 | { | ||
| 427 | int i; | ||
| 428 | struct pfuze_chip *pfuze_chip = i2c_get_clientdata(client); | ||
| 429 | |||
| 430 | for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) | ||
| 431 | regulator_unregister(pfuze_chip->regulators[i]); | ||
| 432 | |||
| 433 | return 0; | ||
| 434 | } | ||
| 435 | |||
| 436 | static struct i2c_driver pfuze_driver = { | 426 | static struct i2c_driver pfuze_driver = { |
| 437 | .id_table = pfuze_device_id, | 427 | .id_table = pfuze_device_id, |
| 438 | .driver = { | 428 | .driver = { |
| @@ -441,7 +431,6 @@ static struct i2c_driver pfuze_driver = { | |||
| 441 | .of_match_table = pfuze_dt_ids, | 431 | .of_match_table = pfuze_dt_ids, |
| 442 | }, | 432 | }, |
| 443 | .probe = pfuze100_regulator_probe, | 433 | .probe = pfuze100_regulator_probe, |
| 444 | .remove = pfuze100_regulator_remove, | ||
| 445 | }; | 434 | }; |
| 446 | module_i2c_driver(pfuze_driver); | 435 | module_i2c_driver(pfuze_driver); |
| 447 | 436 | ||
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 9e61922d8230..d9e557990577 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
| @@ -70,8 +70,6 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev, | |||
| 70 | ramp_delay = s2mps11->ramp_delay2; | 70 | ramp_delay = s2mps11->ramp_delay2; |
| 71 | break; | 71 | break; |
| 72 | case S2MPS11_BUCK3: | 72 | case S2MPS11_BUCK3: |
| 73 | ramp_delay = s2mps11->ramp_delay34; | ||
| 74 | break; | ||
| 75 | case S2MPS11_BUCK4: | 73 | case S2MPS11_BUCK4: |
| 76 | ramp_delay = s2mps11->ramp_delay34; | 74 | ramp_delay = s2mps11->ramp_delay34; |
| 77 | break; | 75 | break; |
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c index f78857bd6a15..a7e152696a02 100644 --- a/drivers/regulator/stw481x-vmmc.c +++ b/drivers/regulator/stw481x-vmmc.c | |||
| @@ -74,7 +74,8 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev) | |||
| 74 | config.init_data = of_get_regulator_init_data(&pdev->dev, | 74 | config.init_data = of_get_regulator_init_data(&pdev->dev, |
| 75 | pdev->dev.of_node); | 75 | pdev->dev.of_node); |
| 76 | 76 | ||
| 77 | stw481x->vmmc_regulator = regulator_register(&vmmc_regulator, &config); | 77 | stw481x->vmmc_regulator = devm_regulator_register(&pdev->dev, |
| 78 | &vmmc_regulator, &config); | ||
| 78 | if (IS_ERR(stw481x->vmmc_regulator)) { | 79 | if (IS_ERR(stw481x->vmmc_regulator)) { |
| 79 | dev_err(&pdev->dev, | 80 | dev_err(&pdev->dev, |
| 80 | "error initializing STw481x VMMC regulator\n"); | 81 | "error initializing STw481x VMMC regulator\n"); |
| @@ -85,14 +86,6 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev) | |||
| 85 | return 0; | 86 | return 0; |
| 86 | } | 87 | } |
| 87 | 88 | ||
| 88 | static int stw481x_vmmc_regulator_remove(struct platform_device *pdev) | ||
| 89 | { | ||
| 90 | struct stw481x *stw481x = dev_get_platdata(&pdev->dev); | ||
| 91 | |||
| 92 | regulator_unregister(stw481x->vmmc_regulator); | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | static const struct of_device_id stw481x_vmmc_match[] = { | 89 | static const struct of_device_id stw481x_vmmc_match[] = { |
| 97 | { .compatible = "st,stw481x-vmmc", }, | 90 | { .compatible = "st,stw481x-vmmc", }, |
| 98 | {}, | 91 | {}, |
| @@ -105,7 +98,6 @@ static struct platform_driver stw481x_vmmc_regulator_driver = { | |||
| 105 | .of_match_table = stw481x_vmmc_match, | 98 | .of_match_table = stw481x_vmmc_match, |
| 106 | }, | 99 | }, |
| 107 | .probe = stw481x_vmmc_regulator_probe, | 100 | .probe = stw481x_vmmc_regulator_probe, |
| 108 | .remove = stw481x_vmmc_regulator_remove, | ||
| 109 | }; | 101 | }; |
| 110 | 102 | ||
| 111 | module_platform_driver(stw481x_vmmc_regulator_driver); | 103 | module_platform_driver(stw481x_vmmc_regulator_driver); |
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c index b0a3f0917a27..b3764f594ee9 100644 --- a/drivers/regulator/tps51632-regulator.c +++ b/drivers/regulator/tps51632-regulator.c | |||
| @@ -70,16 +70,16 @@ | |||
| 70 | #define TPS51632_POWER_STATE_SINGLE_PHASE_CCM 0x1 | 70 | #define TPS51632_POWER_STATE_SINGLE_PHASE_CCM 0x1 |
| 71 | #define TPS51632_POWER_STATE_SINGLE_PHASE_DCM 0x2 | 71 | #define TPS51632_POWER_STATE_SINGLE_PHASE_DCM 0x2 |
| 72 | 72 | ||
| 73 | #define TPS51632_MIN_VOLATGE 500000 | 73 | #define TPS51632_MIN_VOLTAGE 500000 |
| 74 | #define TPS51632_MAX_VOLATGE 1520000 | 74 | #define TPS51632_MAX_VOLTAGE 1520000 |
| 75 | #define TPS51632_VOLATGE_STEP_10mV 10000 | 75 | #define TPS51632_VOLTAGE_STEP_10mV 10000 |
| 76 | #define TPS51632_VOLATGE_STEP_20mV 20000 | 76 | #define TPS51632_VOLTAGE_STEP_20mV 20000 |
| 77 | #define TPS51632_MAX_VSEL 0x7F | 77 | #define TPS51632_MAX_VSEL 0x7F |
| 78 | #define TPS51632_MIN_VSEL 0x19 | 78 | #define TPS51632_MIN_VSEL 0x19 |
| 79 | #define TPS51632_DEFAULT_RAMP_DELAY 6000 | 79 | #define TPS51632_DEFAULT_RAMP_DELAY 6000 |
| 80 | #define TPS51632_VOLT_VSEL(uV) \ | 80 | #define TPS51632_VOLT_VSEL(uV) \ |
| 81 | (DIV_ROUND_UP(uV - TPS51632_MIN_VOLATGE, \ | 81 | (DIV_ROUND_UP(uV - TPS51632_MIN_VOLTAGE, \ |
| 82 | TPS51632_VOLATGE_STEP_10mV) + \ | 82 | TPS51632_VOLTAGE_STEP_10mV) + \ |
| 83 | TPS51632_MIN_VSEL) | 83 | TPS51632_MIN_VSEL) |
| 84 | 84 | ||
| 85 | /* TPS51632 chip information */ | 85 | /* TPS51632 chip information */ |
| @@ -243,9 +243,9 @@ static struct tps51632_regulator_platform_data * | |||
| 243 | pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV"); | 243 | pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV"); |
| 244 | 244 | ||
| 245 | pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? : | 245 | pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? : |
| 246 | TPS51632_MIN_VOLATGE; | 246 | TPS51632_MIN_VOLTAGE; |
| 247 | pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? : | 247 | pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? : |
| 248 | TPS51632_MAX_VOLATGE; | 248 | TPS51632_MAX_VOLTAGE; |
| 249 | return pdata; | 249 | return pdata; |
| 250 | } | 250 | } |
| 251 | #else | 251 | #else |
| @@ -284,15 +284,15 @@ static int tps51632_probe(struct i2c_client *client, | |||
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | if (pdata->enable_pwm_dvfs) { | 286 | if (pdata->enable_pwm_dvfs) { |
| 287 | if ((pdata->base_voltage_uV < TPS51632_MIN_VOLATGE) || | 287 | if ((pdata->base_voltage_uV < TPS51632_MIN_VOLTAGE) || |
| 288 | (pdata->base_voltage_uV > TPS51632_MAX_VOLATGE)) { | 288 | (pdata->base_voltage_uV > TPS51632_MAX_VOLTAGE)) { |
| 289 | dev_err(&client->dev, "Invalid base_voltage_uV setting\n"); | 289 | dev_err(&client->dev, "Invalid base_voltage_uV setting\n"); |
| 290 | return -EINVAL; | 290 | return -EINVAL; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | if ((pdata->max_voltage_uV) && | 293 | if ((pdata->max_voltage_uV) && |
| 294 | ((pdata->max_voltage_uV < TPS51632_MIN_VOLATGE) || | 294 | ((pdata->max_voltage_uV < TPS51632_MIN_VOLTAGE) || |
| 295 | (pdata->max_voltage_uV > TPS51632_MAX_VOLATGE))) { | 295 | (pdata->max_voltage_uV > TPS51632_MAX_VOLTAGE))) { |
| 296 | dev_err(&client->dev, "Invalid max_voltage_uV setting\n"); | 296 | dev_err(&client->dev, "Invalid max_voltage_uV setting\n"); |
| 297 | return -EINVAL; | 297 | return -EINVAL; |
| 298 | } | 298 | } |
| @@ -305,11 +305,11 @@ static int tps51632_probe(struct i2c_client *client, | |||
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | tps->dev = &client->dev; | 307 | tps->dev = &client->dev; |
| 308 | tps->desc.name = id->name; | 308 | tps->desc.name = client->name; |
| 309 | tps->desc.id = 0; | 309 | tps->desc.id = 0; |
| 310 | tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY; | 310 | tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY; |
| 311 | tps->desc.min_uV = TPS51632_MIN_VOLATGE; | 311 | tps->desc.min_uV = TPS51632_MIN_VOLTAGE; |
| 312 | tps->desc.uV_step = TPS51632_VOLATGE_STEP_10mV; | 312 | tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV; |
| 313 | tps->desc.linear_min_sel = TPS51632_MIN_VSEL; | 313 | tps->desc.linear_min_sel = TPS51632_MIN_VSEL; |
| 314 | tps->desc.n_voltages = TPS51632_MAX_VSEL + 1; | 314 | tps->desc.n_voltages = TPS51632_MAX_VSEL + 1; |
| 315 | tps->desc.ops = &tps51632_dcdc_ops; | 315 | tps->desc.ops = &tps51632_dcdc_ops; |
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c index c2c0185a2dcd..c3fa15a299b1 100644 --- a/drivers/regulator/tps62360-regulator.c +++ b/drivers/regulator/tps62360-regulator.c | |||
| @@ -360,7 +360,7 @@ static int tps62360_probe(struct i2c_client *client, | |||
| 360 | dev_err(&client->dev, "Error: No device match found\n"); | 360 | dev_err(&client->dev, "Error: No device match found\n"); |
| 361 | return -ENODEV; | 361 | return -ENODEV; |
| 362 | } | 362 | } |
| 363 | chip_id = (int)match->data; | 363 | chip_id = (int)(long)match->data; |
| 364 | if (!pdata) | 364 | if (!pdata) |
| 365 | pdata = of_get_tps62360_platform_data(&client->dev); | 365 | pdata = of_get_tps62360_platform_data(&client->dev); |
| 366 | } else if (id) { | 366 | } else if (id) { |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index a00132e31ec7..f50dd847eebc 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c | |||
| @@ -88,6 +88,11 @@ static const unsigned int VMMC_VSEL_table[] = { | |||
| 88 | 1800000, 2800000, 3000000, 3300000, | 88 | 1800000, 2800000, 3000000, 3300000, |
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | /* supported BBCH voltages in microvolts */ | ||
| 92 | static const unsigned int VBB_VSEL_table[] = { | ||
| 93 | 3000000, 2520000, 3150000, 5000000, | ||
| 94 | }; | ||
| 95 | |||
| 91 | struct tps_info { | 96 | struct tps_info { |
| 92 | const char *name; | 97 | const char *name; |
| 93 | const char *vin_name; | 98 | const char *vin_name; |
| @@ -183,6 +188,12 @@ static struct tps_info tps65910_regs[] = { | |||
| 183 | .voltage_table = VMMC_VSEL_table, | 188 | .voltage_table = VMMC_VSEL_table, |
| 184 | .enable_time_us = 100, | 189 | .enable_time_us = 100, |
| 185 | }, | 190 | }, |
| 191 | { | ||
| 192 | .name = "vbb", | ||
| 193 | .vin_name = "vcc7", | ||
| 194 | .n_voltages = ARRAY_SIZE(VBB_VSEL_table), | ||
| 195 | .voltage_table = VBB_VSEL_table, | ||
| 196 | }, | ||
| 186 | }; | 197 | }; |
| 187 | 198 | ||
| 188 | static struct tps_info tps65911_regs[] = { | 199 | static struct tps_info tps65911_regs[] = { |
| @@ -339,6 +350,8 @@ static int tps65910_get_ctrl_register(int id) | |||
| 339 | return TPS65910_VAUX33; | 350 | return TPS65910_VAUX33; |
| 340 | case TPS65910_REG_VMMC: | 351 | case TPS65910_REG_VMMC: |
| 341 | return TPS65910_VMMC; | 352 | return TPS65910_VMMC; |
| 353 | case TPS65910_REG_VBB: | ||
| 354 | return TPS65910_BBCH; | ||
| 342 | default: | 355 | default: |
| 343 | return -EINVAL; | 356 | return -EINVAL; |
| 344 | } | 357 | } |
| @@ -528,6 +541,10 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev) | |||
| 528 | value &= LDO_SEL_MASK; | 541 | value &= LDO_SEL_MASK; |
| 529 | value >>= LDO_SEL_SHIFT; | 542 | value >>= LDO_SEL_SHIFT; |
| 530 | break; | 543 | break; |
| 544 | case TPS65910_REG_VBB: | ||
| 545 | value &= BBCH_BBSEL_MASK; | ||
| 546 | value >>= BBCH_BBSEL_SHIFT; | ||
| 547 | break; | ||
| 531 | default: | 548 | default: |
| 532 | return -EINVAL; | 549 | return -EINVAL; |
| 533 | } | 550 | } |
| @@ -638,6 +655,9 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev, | |||
| 638 | case TPS65910_REG_VMMC: | 655 | case TPS65910_REG_VMMC: |
| 639 | return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK, | 656 | return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK, |
| 640 | selector << LDO_SEL_SHIFT); | 657 | selector << LDO_SEL_SHIFT); |
| 658 | case TPS65910_REG_VBB: | ||
| 659 | return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK, | ||
| 660 | selector << BBCH_BBSEL_SHIFT); | ||
| 641 | } | 661 | } |
| 642 | 662 | ||
| 643 | return -EINVAL; | 663 | return -EINVAL; |
| @@ -669,6 +689,9 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev, | |||
| 669 | case TPS65910_REG_VIO: | 689 | case TPS65910_REG_VIO: |
| 670 | return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK, | 690 | return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK, |
| 671 | selector << LDO_SEL_SHIFT); | 691 | selector << LDO_SEL_SHIFT); |
| 692 | case TPS65910_REG_VBB: | ||
| 693 | return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK, | ||
| 694 | selector << BBCH_BBSEL_SHIFT); | ||
| 672 | } | 695 | } |
| 673 | 696 | ||
| 674 | return -EINVAL; | 697 | return -EINVAL; |
| @@ -762,6 +785,18 @@ static struct regulator_ops tps65910_ops_vdd3 = { | |||
| 762 | .map_voltage = regulator_map_voltage_ascend, | 785 | .map_voltage = regulator_map_voltage_ascend, |
| 763 | }; | 786 | }; |
| 764 | 787 | ||
| 788 | static struct regulator_ops tps65910_ops_vbb = { | ||
| 789 | .is_enabled = regulator_is_enabled_regmap, | ||
| 790 | .enable = regulator_enable_regmap, | ||
| 791 | .disable = regulator_disable_regmap, | ||
| 792 | .set_mode = tps65910_set_mode, | ||
| 793 | .get_mode = tps65910_get_mode, | ||
| 794 | .get_voltage_sel = tps65910_get_voltage_sel, | ||
| 795 | .set_voltage_sel = tps65910_set_voltage_sel, | ||
| 796 | .list_voltage = regulator_list_voltage_table, | ||
| 797 | .map_voltage = regulator_map_voltage_iterate, | ||
| 798 | }; | ||
| 799 | |||
| 765 | static struct regulator_ops tps65910_ops = { | 800 | static struct regulator_ops tps65910_ops = { |
| 766 | .is_enabled = regulator_is_enabled_regmap, | 801 | .is_enabled = regulator_is_enabled_regmap, |
| 767 | .enable = regulator_enable_regmap, | 802 | .enable = regulator_enable_regmap, |
| @@ -944,6 +979,7 @@ static struct of_regulator_match tps65910_matches[] = { | |||
| 944 | { .name = "vaux2", .driver_data = (void *) &tps65910_regs[10] }, | 979 | { .name = "vaux2", .driver_data = (void *) &tps65910_regs[10] }, |
| 945 | { .name = "vaux33", .driver_data = (void *) &tps65910_regs[11] }, | 980 | { .name = "vaux33", .driver_data = (void *) &tps65910_regs[11] }, |
| 946 | { .name = "vmmc", .driver_data = (void *) &tps65910_regs[12] }, | 981 | { .name = "vmmc", .driver_data = (void *) &tps65910_regs[12] }, |
| 982 | { .name = "vbb", .driver_data = (void *) &tps65910_regs[13] }, | ||
| 947 | }; | 983 | }; |
| 948 | 984 | ||
| 949 | static struct of_regulator_match tps65911_matches[] = { | 985 | static struct of_regulator_match tps65911_matches[] = { |
| @@ -1145,6 +1181,10 @@ static int tps65910_probe(struct platform_device *pdev) | |||
| 1145 | pmic->desc[i].ops = &tps65910_ops_dcdc; | 1181 | pmic->desc[i].ops = &tps65910_ops_dcdc; |
| 1146 | pmic->desc[i].ramp_delay = 5000; | 1182 | pmic->desc[i].ramp_delay = 5000; |
| 1147 | } | 1183 | } |
| 1184 | } else if (i == TPS65910_REG_VBB && | ||
| 1185 | tps65910_chip_id(tps65910) == TPS65910) { | ||
| 1186 | pmic->desc[i].ops = &tps65910_ops_vbb; | ||
| 1187 | pmic->desc[i].volt_table = info->voltage_table; | ||
| 1148 | } else { | 1188 | } else { |
| 1149 | if (tps65910_chip_id(tps65910) == TPS65910) { | 1189 | if (tps65910_chip_id(tps65910) == TPS65910) { |
| 1150 | pmic->desc[i].ops = &tps65910_ops; | 1190 | pmic->desc[i].ops = &tps65910_ops; |
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 8ebd785485c7..fed28abef419 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c | |||
| @@ -58,7 +58,7 @@ struct twlreg_info { | |||
| 58 | struct regulator_desc desc; | 58 | struct regulator_desc desc; |
| 59 | 59 | ||
| 60 | /* chip specific features */ | 60 | /* chip specific features */ |
| 61 | unsigned long features; | 61 | unsigned long features; |
| 62 | 62 | ||
| 63 | /* | 63 | /* |
| 64 | * optional override functions for voltage set/get | 64 | * optional override functions for voltage set/get |
| @@ -1128,7 +1128,7 @@ static int twlreg_probe(struct platform_device *pdev) | |||
| 1128 | if (!initdata) | 1128 | if (!initdata) |
| 1129 | return -EINVAL; | 1129 | return -EINVAL; |
| 1130 | 1130 | ||
| 1131 | info = kmemdup(template, sizeof (*info), GFP_KERNEL); | 1131 | info = kmemdup(template, sizeof(*info), GFP_KERNEL); |
| 1132 | if (!info) | 1132 | if (!info) |
| 1133 | return -ENOMEM; | 1133 | return -ENOMEM; |
| 1134 | 1134 | ||
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 6823e6f2b88a..04cf9c16ef23 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
| @@ -762,8 +762,7 @@ static int wm831x_boostp_probe(struct platform_device *pdev) | |||
| 762 | res = platform_get_resource(pdev, IORESOURCE_REG, 0); | 762 | res = platform_get_resource(pdev, IORESOURCE_REG, 0); |
| 763 | if (res == NULL) { | 763 | if (res == NULL) { |
| 764 | dev_err(&pdev->dev, "No REG resource\n"); | 764 | dev_err(&pdev->dev, "No REG resource\n"); |
| 765 | ret = -EINVAL; | 765 | return -EINVAL; |
| 766 | goto err; | ||
| 767 | } | 766 | } |
| 768 | dcdc->base = res->start; | 767 | dcdc->base = res->start; |
| 769 | 768 | ||
| @@ -788,7 +787,7 @@ static int wm831x_boostp_probe(struct platform_device *pdev) | |||
| 788 | ret = PTR_ERR(dcdc->regulator); | 787 | ret = PTR_ERR(dcdc->regulator); |
| 789 | dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", | 788 | dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", |
| 790 | id + 1, ret); | 789 | id + 1, ret); |
| 791 | goto err; | 790 | return ret; |
| 792 | } | 791 | } |
| 793 | 792 | ||
| 794 | irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")); | 793 | irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")); |
| @@ -799,15 +798,12 @@ static int wm831x_boostp_probe(struct platform_device *pdev) | |||
| 799 | if (ret != 0) { | 798 | if (ret != 0) { |
| 800 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 799 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
| 801 | irq, ret); | 800 | irq, ret); |
| 802 | goto err; | 801 | return ret; |
| 803 | } | 802 | } |
| 804 | 803 | ||
| 805 | platform_set_drvdata(pdev, dcdc); | 804 | platform_set_drvdata(pdev, dcdc); |
| 806 | 805 | ||
| 807 | return 0; | 806 | return 0; |
| 808 | |||
| 809 | err: | ||
| 810 | return ret; | ||
| 811 | } | 807 | } |
| 812 | 808 | ||
| 813 | static struct platform_driver wm831x_boostp_driver = { | 809 | static struct platform_driver wm831x_boostp_driver = { |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 3f4ca4e09a4c..34629ea913d4 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
| @@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
| 942 | return rc; | 942 | return rc; |
| 943 | } | 943 | } |
| 944 | 944 | ||
| 945 | tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); | 945 | tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols); |
| 946 | if (IS_ERR(tp->screen)) { | 946 | if (IS_ERR(tp->screen)) { |
| 947 | rc = PTR_ERR(tp->screen); | 947 | rc = PTR_ERR(tp->screen); |
| 948 | raw3270_put_view(&tp->view); | 948 | raw3270_put_view(&tp->view); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 596480022b0a..38a1257e76e1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, | |||
| 471 | schedule_delayed_work(&tgt->sess_del_work, 0); | 471 | schedule_delayed_work(&tgt->sess_del_work, 0); |
| 472 | else | 472 | else |
| 473 | schedule_delayed_work(&tgt->sess_del_work, | 473 | schedule_delayed_work(&tgt->sess_del_work, |
| 474 | jiffies - sess->expires); | 474 | sess->expires - jiffies); |
| 475 | } | 475 | } |
| 476 | 476 | ||
| 477 | /* ha->hardware_lock supposed to be held on entry */ | 477 | /* ha->hardware_lock supposed to be held on entry */ |
| @@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) | |||
| 550 | struct scsi_qla_host *vha = tgt->vha; | 550 | struct scsi_qla_host *vha = tgt->vha; |
| 551 | struct qla_hw_data *ha = vha->hw; | 551 | struct qla_hw_data *ha = vha->hw; |
| 552 | struct qla_tgt_sess *sess; | 552 | struct qla_tgt_sess *sess; |
| 553 | unsigned long flags; | 553 | unsigned long flags, elapsed; |
| 554 | 554 | ||
| 555 | spin_lock_irqsave(&ha->hardware_lock, flags); | 555 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 556 | while (!list_empty(&tgt->del_sess_list)) { | 556 | while (!list_empty(&tgt->del_sess_list)) { |
| 557 | sess = list_entry(tgt->del_sess_list.next, typeof(*sess), | 557 | sess = list_entry(tgt->del_sess_list.next, typeof(*sess), |
| 558 | del_list_entry); | 558 | del_list_entry); |
| 559 | if (time_after_eq(jiffies, sess->expires)) { | 559 | elapsed = jiffies; |
| 560 | if (time_after_eq(elapsed, sess->expires)) { | ||
| 560 | qlt_undelete_sess(sess); | 561 | qlt_undelete_sess(sess); |
| 561 | 562 | ||
| 562 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, | 563 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, |
| @@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) | |||
| 566 | ha->tgt.tgt_ops->put_sess(sess); | 567 | ha->tgt.tgt_ops->put_sess(sess); |
| 567 | } else { | 568 | } else { |
| 568 | schedule_delayed_work(&tgt->sess_del_work, | 569 | schedule_delayed_work(&tgt->sess_del_work, |
| 569 | jiffies - sess->expires); | 570 | sess->expires - elapsed); |
| 570 | break; | 571 | break; |
| 571 | } | 572 | } |
| 572 | } | 573 | } |
| @@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, | |||
| 4290 | if (rc != 0) { | 4291 | if (rc != 0) { |
| 4291 | ha->tgt.tgt_ops = NULL; | 4292 | ha->tgt.tgt_ops = NULL; |
| 4292 | ha->tgt.target_lport_ptr = NULL; | 4293 | ha->tgt.target_lport_ptr = NULL; |
| 4294 | scsi_host_put(host); | ||
| 4293 | } | 4295 | } |
| 4294 | mutex_unlock(&qla_tgt_mutex); | 4296 | mutex_unlock(&qla_tgt_mutex); |
| 4295 | return rc; | 4297 | return rc; |
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 53fee2f9a498..8dfdd2732bdc 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c | |||
| @@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev) | |||
| 39 | return 0; | 39 | return 0; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) | 42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 43 | void *accel_priv) | ||
| 43 | { | 44 | { |
| 44 | return ClassifyPacket(netdev_priv(dev), skb); | 45 | return ClassifyPacket(netdev_priv(dev), skb); |
| 45 | } | 46 | } |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 8f02bf66e20b..4964d2a2fc7d 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
| @@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev, | |||
| 446 | release_firmware(fw); | 446 | release_firmware(fw); |
| 447 | } | 447 | } |
| 448 | 448 | ||
| 449 | return ret; | 449 | return ret < 0 ? ret : 0; |
| 450 | } | 450 | } |
| 451 | EXPORT_SYMBOL_GPL(comedi_load_firmware); | 451 | EXPORT_SYMBOL_GPL(comedi_load_firmware); |
| 452 | 452 | ||
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c index 432e3f9c3301..c55f234b29e6 100644 --- a/drivers/staging/comedi/drivers/8255_pci.c +++ b/drivers/staging/comedi/drivers/8255_pci.c | |||
| @@ -63,7 +63,8 @@ enum pci_8255_boardid { | |||
| 63 | BOARD_ADLINK_PCI7296, | 63 | BOARD_ADLINK_PCI7296, |
| 64 | BOARD_CB_PCIDIO24, | 64 | BOARD_CB_PCIDIO24, |
| 65 | BOARD_CB_PCIDIO24H, | 65 | BOARD_CB_PCIDIO24H, |
| 66 | BOARD_CB_PCIDIO48H, | 66 | BOARD_CB_PCIDIO48H_OLD, |
| 67 | BOARD_CB_PCIDIO48H_NEW, | ||
| 67 | BOARD_CB_PCIDIO96H, | 68 | BOARD_CB_PCIDIO96H, |
| 68 | BOARD_NI_PCIDIO96, | 69 | BOARD_NI_PCIDIO96, |
| 69 | BOARD_NI_PCIDIO96B, | 70 | BOARD_NI_PCIDIO96B, |
| @@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = { | |||
| 106 | .dio_badr = 2, | 107 | .dio_badr = 2, |
| 107 | .n_8255 = 1, | 108 | .n_8255 = 1, |
| 108 | }, | 109 | }, |
| 109 | [BOARD_CB_PCIDIO48H] = { | 110 | [BOARD_CB_PCIDIO48H_OLD] = { |
| 110 | .name = "cb_pci-dio48h", | 111 | .name = "cb_pci-dio48h", |
| 111 | .dio_badr = 1, | 112 | .dio_badr = 1, |
| 112 | .n_8255 = 2, | 113 | .n_8255 = 2, |
| 113 | }, | 114 | }, |
| 115 | [BOARD_CB_PCIDIO48H_NEW] = { | ||
| 116 | .name = "cb_pci-dio48h", | ||
| 117 | .dio_badr = 2, | ||
| 118 | .n_8255 = 2, | ||
| 119 | }, | ||
| 114 | [BOARD_CB_PCIDIO96H] = { | 120 | [BOARD_CB_PCIDIO96H] = { |
| 115 | .name = "cb_pci-dio96h", | 121 | .name = "cb_pci-dio96h", |
| 116 | .dio_badr = 2, | 122 | .dio_badr = 2, |
| @@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = { | |||
| 263 | { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, | 269 | { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, |
| 264 | { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, | 270 | { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, |
| 265 | { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, | 271 | { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, |
| 266 | { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, | 272 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000), |
| 273 | .driver_data = BOARD_CB_PCIDIO48H_OLD }, | ||
| 274 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b), | ||
| 275 | .driver_data = BOARD_CB_PCIDIO48H_NEW }, | ||
| 267 | { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, | 276 | { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, |
| 268 | { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, | 277 | { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, |
| 269 | { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, | 278 | { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, |
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c index 99421f90d189..0485d7f39867 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.c +++ b/drivers/staging/iio/magnetometer/hmc5843.c | |||
| @@ -451,7 +451,12 @@ done: | |||
| 451 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ | 451 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ |
| 452 | BIT(IIO_CHAN_INFO_SAMP_FREQ), \ | 452 | BIT(IIO_CHAN_INFO_SAMP_FREQ), \ |
| 453 | .scan_index = idx, \ | 453 | .scan_index = idx, \ |
| 454 | .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ | 454 | .scan_type = { \ |
| 455 | .sign = 's', \ | ||
| 456 | .realbits = 16, \ | ||
| 457 | .storagebits = 16, \ | ||
| 458 | .endianness = IIO_BE, \ | ||
| 459 | }, \ | ||
| 455 | } | 460 | } |
| 456 | 461 | ||
| 457 | static const struct iio_chan_spec hmc5843_channels[] = { | 462 | static const struct iio_chan_spec hmc5843_channels[] = { |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 6bd015ac9d68..96e4eee344ef 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
| @@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) | |||
| 88 | 88 | ||
| 89 | imx_drm_device_put(); | 89 | imx_drm_device_put(); |
| 90 | 90 | ||
| 91 | drm_mode_config_cleanup(imxdrm->drm); | 91 | drm_vblank_cleanup(imxdrm->drm); |
| 92 | drm_kms_helper_poll_fini(imxdrm->drm); | 92 | drm_kms_helper_poll_fini(imxdrm->drm); |
| 93 | drm_mode_config_cleanup(imxdrm->drm); | ||
| 93 | 94 | ||
| 94 | return 0; | 95 | return 0; |
| 95 | } | 96 | } |
| @@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm, | |||
| 199 | if (!file->is_master) | 200 | if (!file->is_master) |
| 200 | return; | 201 | return; |
| 201 | 202 | ||
| 202 | for (i = 0; i < 4; i++) | 203 | for (i = 0; i < MAX_CRTC; i++) |
| 203 | imx_drm_disable_vblank(drm , i); | 204 | imx_drm_disable_vblank(drm, i); |
| 204 | } | 205 | } |
| 205 | 206 | ||
| 206 | static const struct file_operations imx_drm_driver_fops = { | 207 | static const struct file_operations imx_drm_driver_fops = { |
| @@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) | |||
| 376 | struct imx_drm_device *imxdrm = __imx_drm_device(); | 377 | struct imx_drm_device *imxdrm = __imx_drm_device(); |
| 377 | int ret; | 378 | int ret; |
| 378 | 379 | ||
| 379 | drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, | ||
| 380 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
| 381 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); | 380 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); |
| 382 | if (ret) | 381 | if (ret) |
| 383 | return ret; | 382 | return ret; |
| @@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) | |||
| 385 | drm_crtc_helper_add(imx_drm_crtc->crtc, | 384 | drm_crtc_helper_add(imx_drm_crtc->crtc, |
| 386 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | 385 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); |
| 387 | 386 | ||
| 387 | drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, | ||
| 388 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
| 389 | |||
| 388 | drm_mode_group_reinit(imxdrm->drm); | 390 | drm_mode_group_reinit(imxdrm->drm); |
| 389 | 391 | ||
| 390 | return 0; | 392 | return 0; |
| @@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
| 428 | ret = drm_mode_group_init_legacy_group(imxdrm->drm, | 430 | ret = drm_mode_group_init_legacy_group(imxdrm->drm, |
| 429 | &imxdrm->drm->primary->mode_group); | 431 | &imxdrm->drm->primary->mode_group); |
| 430 | if (ret) | 432 | if (ret) |
| 431 | goto err_init; | 433 | goto err_kms; |
| 432 | 434 | ||
| 433 | ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); | 435 | ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); |
| 434 | if (ret) | 436 | if (ret) |
| 435 | goto err_init; | 437 | goto err_kms; |
| 436 | 438 | ||
| 437 | /* | 439 | /* |
| 438 | * with vblank_disable_allowed = true, vblank interrupt will be disabled | 440 | * with vblank_disable_allowed = true, vblank interrupt will be disabled |
| @@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
| 441 | */ | 443 | */ |
| 442 | imxdrm->drm->vblank_disable_allowed = true; | 444 | imxdrm->drm->vblank_disable_allowed = true; |
| 443 | 445 | ||
| 444 | if (!imx_drm_device_get()) | 446 | if (!imx_drm_device_get()) { |
| 445 | ret = -EINVAL; | 447 | ret = -EINVAL; |
| 448 | goto err_vblank; | ||
| 449 | } | ||
| 446 | 450 | ||
| 447 | ret = 0; | 451 | mutex_unlock(&imxdrm->mutex); |
| 452 | return 0; | ||
| 448 | 453 | ||
| 449 | err_init: | 454 | err_vblank: |
| 455 | drm_vblank_cleanup(drm); | ||
| 456 | err_kms: | ||
| 457 | drm_kms_helper_poll_fini(drm); | ||
| 458 | drm_mode_config_cleanup(drm); | ||
| 450 | mutex_unlock(&imxdrm->mutex); | 459 | mutex_unlock(&imxdrm->mutex); |
| 451 | 460 | ||
| 452 | return ret; | 461 | return ret; |
| @@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
| 492 | 501 | ||
| 493 | mutex_lock(&imxdrm->mutex); | 502 | mutex_lock(&imxdrm->mutex); |
| 494 | 503 | ||
| 504 | /* | ||
| 505 | * The vblank arrays are dimensioned by MAX_CRTC - we can't | ||
| 506 | * pass IDs greater than this to those functions. | ||
| 507 | */ | ||
| 508 | if (imxdrm->pipes >= MAX_CRTC) { | ||
| 509 | ret = -EINVAL; | ||
| 510 | goto err_busy; | ||
| 511 | } | ||
| 512 | |||
| 495 | if (imxdrm->drm->open_count) { | 513 | if (imxdrm->drm->open_count) { |
| 496 | ret = -EBUSY; | 514 | ret = -EBUSY; |
| 497 | goto err_busy; | 515 | goto err_busy; |
| @@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
| 528 | return 0; | 546 | return 0; |
| 529 | 547 | ||
| 530 | err_register: | 548 | err_register: |
| 549 | list_del(&imx_drm_crtc->list); | ||
| 531 | kfree(imx_drm_crtc); | 550 | kfree(imx_drm_crtc); |
| 532 | err_alloc: | 551 | err_alloc: |
| 533 | err_busy: | 552 | err_busy: |
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 680f4c8fa081..2c44fef8d58b 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c | |||
| @@ -114,7 +114,6 @@ struct imx_tve { | |||
| 114 | struct drm_encoder encoder; | 114 | struct drm_encoder encoder; |
| 115 | struct imx_drm_encoder *imx_drm_encoder; | 115 | struct imx_drm_encoder *imx_drm_encoder; |
| 116 | struct device *dev; | 116 | struct device *dev; |
| 117 | spinlock_t enable_lock; /* serializes tve_enable/disable */ | ||
| 118 | spinlock_t lock; /* register lock */ | 117 | spinlock_t lock; /* register lock */ |
| 119 | bool enabled; | 118 | bool enabled; |
| 120 | int mode; | 119 | int mode; |
| @@ -146,10 +145,8 @@ __releases(&tve->lock) | |||
| 146 | 145 | ||
| 147 | static void tve_enable(struct imx_tve *tve) | 146 | static void tve_enable(struct imx_tve *tve) |
| 148 | { | 147 | { |
| 149 | unsigned long flags; | ||
| 150 | int ret; | 148 | int ret; |
| 151 | 149 | ||
| 152 | spin_lock_irqsave(&tve->enable_lock, flags); | ||
| 153 | if (!tve->enabled) { | 150 | if (!tve->enabled) { |
| 154 | tve->enabled = true; | 151 | tve->enabled = true; |
| 155 | clk_prepare_enable(tve->clk); | 152 | clk_prepare_enable(tve->clk); |
| @@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve) | |||
| 169 | TVE_CD_SM_IEN | | 166 | TVE_CD_SM_IEN | |
| 170 | TVE_CD_LM_IEN | | 167 | TVE_CD_LM_IEN | |
| 171 | TVE_CD_MON_END_IEN); | 168 | TVE_CD_MON_END_IEN); |
| 172 | |||
| 173 | spin_unlock_irqrestore(&tve->enable_lock, flags); | ||
| 174 | } | 169 | } |
| 175 | 170 | ||
| 176 | static void tve_disable(struct imx_tve *tve) | 171 | static void tve_disable(struct imx_tve *tve) |
| 177 | { | 172 | { |
| 178 | unsigned long flags; | ||
| 179 | int ret; | 173 | int ret; |
| 180 | 174 | ||
| 181 | spin_lock_irqsave(&tve->enable_lock, flags); | ||
| 182 | if (tve->enabled) { | 175 | if (tve->enabled) { |
| 183 | tve->enabled = false; | 176 | tve->enabled = false; |
| 184 | ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, | 177 | ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, |
| 185 | TVE_IPU_CLK_EN | TVE_EN, 0); | 178 | TVE_IPU_CLK_EN | TVE_EN, 0); |
| 186 | clk_disable_unprepare(tve->clk); | 179 | clk_disable_unprepare(tve->clk); |
| 187 | } | 180 | } |
| 188 | spin_unlock_irqrestore(&tve->enable_lock, flags); | ||
| 189 | } | 181 | } |
| 190 | 182 | ||
| 191 | static int tve_setup_tvout(struct imx_tve *tve) | 183 | static int tve_setup_tvout(struct imx_tve *tve) |
| @@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev) | |||
| 601 | 593 | ||
| 602 | tve->dev = &pdev->dev; | 594 | tve->dev = &pdev->dev; |
| 603 | spin_lock_init(&tve->lock); | 595 | spin_lock_init(&tve->lock); |
| 604 | spin_lock_init(&tve->enable_lock); | ||
| 605 | 596 | ||
| 606 | ddc_node = of_parse_phandle(np, "ddc", 0); | 597 | ddc_node = of_parse_phandle(np, "ddc", 0); |
| 607 | if (ddc_node) { | 598 | if (ddc_node) { |
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c index 7a22ce619ed2..97ca6924dbb3 100644 --- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c +++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c | |||
| @@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = { | |||
| 996 | }, | 996 | }, |
| 997 | }; | 997 | }; |
| 998 | 998 | ||
| 999 | static DEFINE_MUTEX(ipu_client_id_mutex); | ||
| 999 | static int ipu_client_id; | 1000 | static int ipu_client_id; |
| 1000 | 1001 | ||
| 1001 | static int ipu_add_subdevice_pdata(struct device *dev, | ||
| 1002 | const struct ipu_platform_reg *reg) | ||
| 1003 | { | ||
| 1004 | struct platform_device *pdev; | ||
| 1005 | |||
| 1006 | pdev = platform_device_register_data(dev, reg->name, ipu_client_id++, | ||
| 1007 | ®->pdata, sizeof(struct ipu_platform_reg)); | ||
| 1008 | |||
| 1009 | return PTR_ERR_OR_ZERO(pdev); | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | static int ipu_add_client_devices(struct ipu_soc *ipu) | 1002 | static int ipu_add_client_devices(struct ipu_soc *ipu) |
| 1013 | { | 1003 | { |
| 1014 | int ret; | 1004 | struct device *dev = ipu->dev; |
| 1015 | int i; | 1005 | unsigned i; |
| 1006 | int id, ret; | ||
| 1007 | |||
| 1008 | mutex_lock(&ipu_client_id_mutex); | ||
| 1009 | id = ipu_client_id; | ||
| 1010 | ipu_client_id += ARRAY_SIZE(client_reg); | ||
| 1011 | mutex_unlock(&ipu_client_id_mutex); | ||
| 1016 | 1012 | ||
| 1017 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { | 1013 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { |
| 1018 | const struct ipu_platform_reg *reg = &client_reg[i]; | 1014 | const struct ipu_platform_reg *reg = &client_reg[i]; |
| 1019 | ret = ipu_add_subdevice_pdata(ipu->dev, reg); | 1015 | struct platform_device *pdev; |
| 1020 | if (ret) | 1016 | |
| 1017 | pdev = platform_device_register_data(dev, reg->name, | ||
| 1018 | id++, ®->pdata, sizeof(reg->pdata)); | ||
| 1019 | |||
| 1020 | if (IS_ERR(pdev)) | ||
| 1021 | goto err_register; | 1021 | goto err_register; |
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| 1024 | return 0; | 1024 | return 0; |
| 1025 | 1025 | ||
| 1026 | err_register: | 1026 | err_register: |
| 1027 | platform_device_unregister_children(to_platform_device(ipu->dev)); | 1027 | platform_device_unregister_children(to_platform_device(dev)); |
| 1028 | 1028 | ||
| 1029 | return ret; | 1029 | return ret; |
| 1030 | } | 1030 | } |
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 235d2b1ec593..eedffed17e39 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c | |||
| @@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, | |||
| 306 | return NETDEV_TX_OK; | 306 | return NETDEV_TX_OK; |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) | 309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, |
| 310 | void *accel_priv) | ||
| 310 | { | 311 | { |
| 311 | return (u16)smp_processor_id(); | 312 | return (u16)smp_processor_id(); |
| 312 | } | 313 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 17659bb04bef..dd69e344e409 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c | |||
| @@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) | |||
| 652 | return dscp >> 5; | 652 | return dscp >> 5; |
| 653 | } | 653 | } |
| 654 | 654 | ||
| 655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) | 655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 656 | void *accel_priv) | ||
| 656 | { | 657 | { |
| 657 | struct adapter *padapter = rtw_netdev_priv(dev); | 658 | struct adapter *padapter = rtw_netdev_priv(dev); |
| 658 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; | 659 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d70e9119e906..00867190413c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np) | |||
| 465 | */ | 465 | */ |
| 466 | send_sig(SIGINT, np->np_thread, 1); | 466 | send_sig(SIGINT, np->np_thread, 1); |
| 467 | kthread_stop(np->np_thread); | 467 | kthread_stop(np->np_thread); |
| 468 | np->np_thread = NULL; | ||
| 468 | } | 469 | } |
| 469 | 470 | ||
| 470 | np->np_transport->iscsit_free_np(np); | 471 | np->np_transport->iscsit_free_np(np); |
| @@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
| 823 | if (((hdr->flags & ISCSI_FLAG_CMD_READ) || | 824 | if (((hdr->flags & ISCSI_FLAG_CMD_READ) || |
| 824 | (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { | 825 | (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { |
| 825 | /* | 826 | /* |
| 826 | * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) | 827 | * From RFC-3720 Section 10.3.1: |
| 827 | * that adds support for RESERVE/RELEASE. There is a bug | 828 | * |
| 828 | * add with this new functionality that sets R/W bits when | 829 | * "Either or both of R and W MAY be 1 when either the |
| 829 | * neither CDB carries any READ or WRITE datapayloads. | 830 | * Expected Data Transfer Length and/or Bidirectional Read |
| 831 | * Expected Data Transfer Length are 0" | ||
| 832 | * | ||
| 833 | * For this case, go ahead and clear the unnecssary bits | ||
| 834 | * to avoid any confusion with ->data_direction. | ||
| 830 | */ | 835 | */ |
| 831 | if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { | 836 | hdr->flags &= ~ISCSI_FLAG_CMD_READ; |
| 832 | hdr->flags &= ~ISCSI_FLAG_CMD_READ; | 837 | hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; |
| 833 | hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; | ||
| 834 | goto done; | ||
| 835 | } | ||
| 836 | 838 | ||
| 837 | pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" | 839 | pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" |
| 838 | " set when Expected Data Transfer Length is 0 for" | 840 | " set when Expected Data Transfer Length is 0 for" |
| 839 | " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); | 841 | " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); |
| 840 | return iscsit_add_reject_cmd(cmd, | ||
| 841 | ISCSI_REASON_BOOKMARK_INVALID, buf); | ||
| 842 | } | 842 | } |
| 843 | done: | ||
| 844 | 843 | ||
| 845 | if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && | 844 | if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && |
| 846 | !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { | 845 | !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index e3318edb233d..1c0088fe9e99 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
| @@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \ | |||
| 474 | \ | 474 | \ |
| 475 | if (!capable(CAP_SYS_ADMIN)) \ | 475 | if (!capable(CAP_SYS_ADMIN)) \ |
| 476 | return -EPERM; \ | 476 | return -EPERM; \ |
| 477 | \ | 477 | if (count >= sizeof(auth->name)) \ |
| 478 | return -EINVAL; \ | ||
| 478 | snprintf(auth->name, sizeof(auth->name), "%s", page); \ | 479 | snprintf(auth->name, sizeof(auth->name), "%s", page); \ |
| 479 | if (!strncmp("NULL", auth->name, 4)) \ | 480 | if (!strncmp("NULL", auth->name, 4)) \ |
| 480 | auth->naf_flags &= ~flags; \ | 481 | auth->naf_flags &= ~flags; \ |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 4eb93b2b6473..e29279e6b577 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -1403,11 +1403,6 @@ old_sess_out: | |||
| 1403 | 1403 | ||
| 1404 | out: | 1404 | out: |
| 1405 | stop = kthread_should_stop(); | 1405 | stop = kthread_should_stop(); |
| 1406 | if (!stop && signal_pending(current)) { | ||
| 1407 | spin_lock_bh(&np->np_thread_lock); | ||
| 1408 | stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); | ||
| 1409 | spin_unlock_bh(&np->np_thread_lock); | ||
| 1410 | } | ||
| 1411 | /* Wait for another socket.. */ | 1406 | /* Wait for another socket.. */ |
| 1412 | if (!stop) | 1407 | if (!stop) |
| 1413 | return 1; | 1408 | return 1; |
| @@ -1415,7 +1410,6 @@ exit: | |||
| 1415 | iscsi_stop_login_thread_timer(np); | 1410 | iscsi_stop_login_thread_timer(np); |
| 1416 | spin_lock_bh(&np->np_thread_lock); | 1411 | spin_lock_bh(&np->np_thread_lock); |
| 1417 | np->np_thread_state = ISCSI_NP_THREAD_EXIT; | 1412 | np->np_thread_state = ISCSI_NP_THREAD_EXIT; |
| 1418 | np->np_thread = NULL; | ||
| 1419 | spin_unlock_bh(&np->np_thread_lock); | 1413 | spin_unlock_bh(&np->np_thread_lock); |
| 1420 | 1414 | ||
| 1421 | return 0; | 1415 | return 0; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 207b340498a3..d06de84b069b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
| @@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
| 1106 | dev->dev_attrib.block_size = block_size; | 1106 | dev->dev_attrib.block_size = block_size; |
| 1107 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", | 1107 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", |
| 1108 | dev, block_size); | 1108 | dev, block_size); |
| 1109 | |||
| 1110 | if (dev->dev_attrib.max_bytes_per_io) | ||
| 1111 | dev->dev_attrib.hw_max_sectors = | ||
| 1112 | dev->dev_attrib.max_bytes_per_io / block_size; | ||
| 1113 | |||
| 1109 | return 0; | 1114 | return 0; |
| 1110 | } | 1115 | } |
| 1111 | 1116 | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0e34cda3271e..78241a53b555 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
| @@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) | |||
| 66 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" | 66 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
| 67 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, | 67 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
| 68 | TARGET_CORE_MOD_VERSION); | 68 | TARGET_CORE_MOD_VERSION); |
| 69 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" | 69 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", |
| 70 | " MaxSectors: %u\n", | 70 | hba->hba_id, fd_host->fd_host_id); |
| 71 | hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); | ||
| 72 | 71 | ||
| 73 | return 0; | 72 | return 0; |
| 74 | } | 73 | } |
| @@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev) | |||
| 220 | } | 219 | } |
| 221 | 220 | ||
| 222 | dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; | 221 | dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; |
| 223 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; | 222 | dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; |
| 223 | dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; | ||
| 224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; | 224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
| 225 | 225 | ||
| 226 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { | 226 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5bd2399..d7772c167685 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
| @@ -7,7 +7,10 @@ | |||
| 7 | #define FD_DEVICE_QUEUE_DEPTH 32 | 7 | #define FD_DEVICE_QUEUE_DEPTH 32 |
| 8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 | 8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 |
| 9 | #define FD_BLOCKSIZE 512 | 9 | #define FD_BLOCKSIZE 512 |
| 10 | #define FD_MAX_SECTORS 2048 | 10 | /* |
| 11 | * Limited by the number of iovecs (2048) per vfs_[writev,readv] call | ||
| 12 | */ | ||
| 13 | #define FD_MAX_BYTES 8388608 | ||
| 11 | 14 | ||
| 12 | #define RRF_EMULATE_CDB 0x01 | 15 | #define RRF_EMULATE_CDB 0x01 |
| 13 | #define RRF_GOT_LBA 0x02 | 16 | #define RRF_GOT_LBA 0x02 |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f697f8baec54..2a573de19a9f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
| @@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
| 278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
| 279 | acl->se_tpg = tpg; | 279 | acl->se_tpg = tpg; |
| 280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
| 281 | spin_lock_init(&acl->stats_lock); | ||
| 282 | acl->dynamic_node_acl = 1; | 281 | acl->dynamic_node_acl = 1; |
| 283 | 282 | ||
| 284 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | 283 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
| @@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
| 406 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 405 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
| 407 | acl->se_tpg = tpg; | 406 | acl->se_tpg = tpg; |
| 408 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 407 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
| 409 | spin_lock_init(&acl->stats_lock); | ||
| 410 | 408 | ||
| 411 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | 409 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
| 412 | 410 | ||
| @@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |||
| 658 | spin_lock_init(&lun->lun_sep_lock); | 656 | spin_lock_init(&lun->lun_sep_lock); |
| 659 | init_completion(&lun->lun_ref_comp); | 657 | init_completion(&lun->lun_ref_comp); |
| 660 | 658 | ||
| 661 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); | ||
| 662 | if (ret < 0) | ||
| 663 | return ret; | ||
| 664 | |||
| 665 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); | 659 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); |
| 666 | if (ret < 0) { | 660 | if (ret < 0) |
| 667 | percpu_ref_cancel_init(&lun->lun_ref); | ||
| 668 | return ret; | 661 | return ret; |
| 669 | } | ||
| 670 | 662 | ||
| 671 | return 0; | 663 | return 0; |
| 672 | } | 664 | } |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 268b62768f2b..34aacaaae14a 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
| @@ -93,6 +93,7 @@ struct n_tty_data { | |||
| 93 | size_t canon_head; | 93 | size_t canon_head; |
| 94 | size_t echo_head; | 94 | size_t echo_head; |
| 95 | size_t echo_commit; | 95 | size_t echo_commit; |
| 96 | size_t echo_mark; | ||
| 96 | DECLARE_BITMAP(char_map, 256); | 97 | DECLARE_BITMAP(char_map, 256); |
| 97 | 98 | ||
| 98 | /* private to n_tty_receive_overrun (single-threaded) */ | 99 | /* private to n_tty_receive_overrun (single-threaded) */ |
| @@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata) | |||
| 336 | { | 337 | { |
| 337 | ldata->read_head = ldata->canon_head = ldata->read_tail = 0; | 338 | ldata->read_head = ldata->canon_head = ldata->read_tail = 0; |
| 338 | ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; | 339 | ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; |
| 340 | ldata->echo_mark = 0; | ||
| 339 | ldata->line_start = 0; | 341 | ldata->line_start = 0; |
| 340 | 342 | ||
| 341 | ldata->erasing = 0; | 343 | ldata->erasing = 0; |
| @@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty) | |||
| 787 | size_t head; | 789 | size_t head; |
| 788 | 790 | ||
| 789 | head = ldata->echo_head; | 791 | head = ldata->echo_head; |
| 792 | ldata->echo_mark = head; | ||
| 790 | old = ldata->echo_commit - ldata->echo_tail; | 793 | old = ldata->echo_commit - ldata->echo_tail; |
| 791 | 794 | ||
| 792 | /* Process committed echoes if the accumulated # of bytes | 795 | /* Process committed echoes if the accumulated # of bytes |
| @@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty) | |||
| 811 | size_t echoed; | 814 | size_t echoed; |
| 812 | 815 | ||
| 813 | if ((!L_ECHO(tty) && !L_ECHONL(tty)) || | 816 | if ((!L_ECHO(tty) && !L_ECHONL(tty)) || |
| 814 | ldata->echo_commit == ldata->echo_tail) | 817 | ldata->echo_mark == ldata->echo_tail) |
| 815 | return; | 818 | return; |
| 816 | 819 | ||
| 817 | mutex_lock(&ldata->output_lock); | 820 | mutex_lock(&ldata->output_lock); |
| 821 | ldata->echo_commit = ldata->echo_mark; | ||
| 818 | echoed = __process_echoes(tty); | 822 | echoed = __process_echoes(tty); |
| 819 | mutex_unlock(&ldata->output_lock); | 823 | mutex_unlock(&ldata->output_lock); |
| 820 | 824 | ||
| @@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty) | |||
| 822 | tty->ops->flush_chars(tty); | 826 | tty->ops->flush_chars(tty); |
| 823 | } | 827 | } |
| 824 | 828 | ||
| 829 | /* NB: echo_mark and echo_head should be equivalent here */ | ||
| 825 | static void flush_echoes(struct tty_struct *tty) | 830 | static void flush_echoes(struct tty_struct *tty) |
| 826 | { | 831 | { |
| 827 | struct n_tty_data *ldata = tty->disc_data; | 832 | struct n_tty_data *ldata = tty->disc_data; |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 4658e3e0ec42..06525f10e364 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
| @@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) | |||
| 96 | if (offset == UART_LCR) { | 96 | if (offset == UART_LCR) { |
| 97 | int tries = 1000; | 97 | int tries = 1000; |
| 98 | while (tries--) { | 98 | while (tries--) { |
| 99 | if (value == p->serial_in(p, UART_LCR)) | 99 | unsigned int lcr = p->serial_in(p, UART_LCR); |
| 100 | if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) | ||
| 100 | return; | 101 | return; |
| 101 | dw8250_force_idle(p); | 102 | dw8250_force_idle(p); |
| 102 | writeb(value, p->membase + (UART_LCR << p->regshift)); | 103 | writeb(value, p->membase + (UART_LCR << p->regshift)); |
| @@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) | |||
| 132 | if (offset == UART_LCR) { | 133 | if (offset == UART_LCR) { |
| 133 | int tries = 1000; | 134 | int tries = 1000; |
| 134 | while (tries--) { | 135 | while (tries--) { |
| 135 | if (value == p->serial_in(p, UART_LCR)) | 136 | unsigned int lcr = p->serial_in(p, UART_LCR); |
| 137 | if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) | ||
| 136 | return; | 138 | return; |
| 137 | dw8250_force_idle(p); | 139 | dw8250_force_idle(p); |
| 138 | writel(value, p->membase + (UART_LCR << p->regshift)); | 140 | writel(value, p->membase + (UART_LCR << p->regshift)); |
| @@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); | |||
| 455 | static const struct acpi_device_id dw8250_acpi_match[] = { | 457 | static const struct acpi_device_id dw8250_acpi_match[] = { |
| 456 | { "INT33C4", 0 }, | 458 | { "INT33C4", 0 }, |
| 457 | { "INT33C5", 0 }, | 459 | { "INT33C5", 0 }, |
| 460 | { "INT3434", 0 }, | ||
| 461 | { "INT3435", 0 }, | ||
| 458 | { "80860F0A", 0 }, | 462 | { "80860F0A", 0 }, |
| 459 | { }, | 463 | { }, |
| 460 | }; | 464 | }; |
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index e46e9f3f19b9..f619ad5b5eae 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c | |||
| @@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) | |||
| 240 | continue; | 240 | continue; |
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | #ifdef SUPPORT_SYSRQ | ||
| 243 | /* | 244 | /* |
| 244 | * uart_handle_sysrq_char() doesn't work if | 245 | * uart_handle_sysrq_char() doesn't work if |
| 245 | * spinlocked, for some reason | 246 | * spinlocked, for some reason |
| @@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) | |||
| 253 | } | 254 | } |
| 254 | spin_lock(&port->lock); | 255 | spin_lock(&port->lock); |
| 255 | } | 256 | } |
| 257 | #endif | ||
| 256 | 258 | ||
| 257 | port->icount.rx++; | 259 | port->icount.rx++; |
| 258 | 260 | ||
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 22fad8ad5ac2..d8a55e87877f 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c | |||
| @@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) | |||
| 86 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); | 86 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | /* | ||
| 90 | * ldsem_cmpxchg() updates @*old with the last-known sem->count value. | ||
| 91 | * Returns 1 if count was successfully changed; @*old will have @new value. | ||
| 92 | * Returns 0 if count was not changed; @*old will have most recent sem->count | ||
| 93 | */ | ||
| 89 | static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) | 94 | static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) |
| 90 | { | 95 | { |
| 91 | long tmp = *old; | 96 | long tmp = atomic_long_cmpxchg(&sem->count, *old, new); |
| 92 | *old = atomic_long_cmpxchg(&sem->count, *old, new); | 97 | if (tmp == *old) { |
| 93 | return *old == tmp; | 98 | *old = new; |
| 99 | return 1; | ||
| 100 | } else { | ||
| 101 | *old = tmp; | ||
| 102 | return 0; | ||
| 103 | } | ||
| 94 | } | 104 | } |
| 95 | 105 | ||
| 96 | /* | 106 | /* |
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 5d8981c5235e..6e73f8cd60e5 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c | |||
| @@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev) | |||
| 642 | : CI_ROLE_GADGET; | 642 | : CI_ROLE_GADGET; |
| 643 | } | 643 | } |
| 644 | 644 | ||
| 645 | /* only update vbus status for peripheral */ | ||
| 646 | if (ci->role == CI_ROLE_GADGET) | ||
| 647 | ci_handle_vbus_change(ci); | ||
| 648 | |||
| 645 | ret = ci_role_start(ci, ci->role); | 649 | ret = ci_role_start(ci, ci->role); |
| 646 | if (ret) { | 650 | if (ret) { |
| 647 | dev_err(dev, "can't start %s role\n", ci_role(ci)->name); | 651 | dev_err(dev, "can't start %s role\n", ci_role(ci)->name); |
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 59e6020ea753..526cd77563d8 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c | |||
| @@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci) | |||
| 88 | return ret; | 88 | return ret; |
| 89 | 89 | ||
| 90 | disable_reg: | 90 | disable_reg: |
| 91 | regulator_disable(ci->platdata->reg_vbus); | 91 | if (ci->platdata->reg_vbus) |
| 92 | regulator_disable(ci->platdata->reg_vbus); | ||
| 92 | 93 | ||
| 93 | put_hcd: | 94 | put_hcd: |
| 94 | usb_put_hcd(hcd); | 95 | usb_put_hcd(hcd); |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index b34c81969cba..69d20fbb38a2 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
| @@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci) | |||
| 1795 | pm_runtime_no_callbacks(&ci->gadget.dev); | 1795 | pm_runtime_no_callbacks(&ci->gadget.dev); |
| 1796 | pm_runtime_enable(&ci->gadget.dev); | 1796 | pm_runtime_enable(&ci->gadget.dev); |
| 1797 | 1797 | ||
| 1798 | /* Update ci->vbus_active */ | ||
| 1799 | ci_handle_vbus_change(ci); | ||
| 1800 | |||
| 1801 | return retval; | 1798 | return retval; |
| 1802 | 1799 | ||
| 1803 | destroy_eps: | 1800 | destroy_eps: |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 4d387596f3f0..0b23a8639311 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
| @@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on) | |||
| 854 | { | 854 | { |
| 855 | /* need autopm_get/put here to ensure the usbcore sees the new value */ | 855 | /* need autopm_get/put here to ensure the usbcore sees the new value */ |
| 856 | int rv = usb_autopm_get_interface(intf); | 856 | int rv = usb_autopm_get_interface(intf); |
| 857 | if (rv < 0) | ||
| 858 | goto err; | ||
| 859 | 857 | ||
| 860 | intf->needs_remote_wakeup = on; | 858 | intf->needs_remote_wakeup = on; |
| 861 | usb_autopm_put_interface(intf); | 859 | if (!rv) |
| 862 | err: | 860 | usb_autopm_put_interface(intf); |
| 863 | return rv; | 861 | return 0; |
| 864 | } | 862 | } |
| 865 | 863 | ||
| 866 | static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) | 864 | static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 74f9cf02da07..a49217ae3533 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
| @@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev) | |||
| 455 | if (IS_ERR(regs)) | 455 | if (IS_ERR(regs)) |
| 456 | return PTR_ERR(regs); | 456 | return PTR_ERR(regs); |
| 457 | 457 | ||
| 458 | usb_phy_set_suspend(dwc->usb2_phy, 0); | ||
| 459 | usb_phy_set_suspend(dwc->usb3_phy, 0); | ||
| 460 | |||
| 461 | spin_lock_init(&dwc->lock); | 458 | spin_lock_init(&dwc->lock); |
| 462 | platform_set_drvdata(pdev, dwc); | 459 | platform_set_drvdata(pdev, dwc); |
| 463 | 460 | ||
| @@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev) | |||
| 488 | goto err0; | 485 | goto err0; |
| 489 | } | 486 | } |
| 490 | 487 | ||
| 488 | usb_phy_set_suspend(dwc->usb2_phy, 0); | ||
| 489 | usb_phy_set_suspend(dwc->usb3_phy, 0); | ||
| 490 | |||
| 491 | ret = dwc3_event_buffers_setup(dwc); | 491 | ret = dwc3_event_buffers_setup(dwc); |
| 492 | if (ret) { | 492 | if (ret) { |
| 493 | dev_err(dwc->dev, "failed to setup event buffers\n"); | 493 | dev_err(dwc->dev, "failed to setup event buffers\n"); |
| @@ -569,6 +569,8 @@ err2: | |||
| 569 | dwc3_event_buffers_cleanup(dwc); | 569 | dwc3_event_buffers_cleanup(dwc); |
| 570 | 570 | ||
| 571 | err1: | 571 | err1: |
| 572 | usb_phy_set_suspend(dwc->usb2_phy, 1); | ||
| 573 | usb_phy_set_suspend(dwc->usb3_phy, 1); | ||
| 572 | dwc3_core_exit(dwc); | 574 | dwc3_core_exit(dwc); |
| 573 | 575 | ||
| 574 | err0: | 576 | err0: |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 418444ebb1b8..8c356af79409 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
| @@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, | |||
| 136 | struct ohci_hcd *ohci; | 136 | struct ohci_hcd *ohci; |
| 137 | int retval; | 137 | int retval; |
| 138 | struct usb_hcd *hcd = NULL; | 138 | struct usb_hcd *hcd = NULL; |
| 139 | 139 | struct device *dev = &pdev->dev; | |
| 140 | if (pdev->num_resources != 2) { | 140 | struct resource *res; |
| 141 | pr_debug("hcd probe: invalid num_resources"); | 141 | int irq; |
| 142 | return -ENODEV; | 142 | |
| 143 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 144 | if (!res) { | ||
| 145 | dev_dbg(dev, "hcd probe: missing memory resource\n"); | ||
| 146 | return -ENXIO; | ||
| 143 | } | 147 | } |
| 144 | 148 | ||
| 145 | if ((pdev->resource[0].flags != IORESOURCE_MEM) | 149 | irq = platform_get_irq(pdev, 0); |
| 146 | || (pdev->resource[1].flags != IORESOURCE_IRQ)) { | 150 | if (irq < 0) { |
| 147 | pr_debug("hcd probe: invalid resource type\n"); | 151 | dev_dbg(dev, "hcd probe: missing irq resource\n"); |
| 148 | return -ENODEV; | 152 | return irq; |
| 149 | } | 153 | } |
| 150 | 154 | ||
| 151 | hcd = usb_create_hcd(driver, &pdev->dev, "at91"); | 155 | hcd = usb_create_hcd(driver, &pdev->dev, "at91"); |
| 152 | if (!hcd) | 156 | if (!hcd) |
| 153 | return -ENOMEM; | 157 | return -ENOMEM; |
| 154 | hcd->rsrc_start = pdev->resource[0].start; | 158 | hcd->rsrc_start = res->start; |
| 155 | hcd->rsrc_len = resource_size(&pdev->resource[0]); | 159 | hcd->rsrc_len = resource_size(res); |
| 156 | 160 | ||
| 157 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | 161 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { |
| 158 | pr_debug("request_mem_region failed\n"); | 162 | pr_debug("request_mem_region failed\n"); |
| @@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, | |||
| 199 | ohci->num_ports = board->ports; | 203 | ohci->num_ports = board->ports; |
| 200 | at91_start_hc(pdev); | 204 | at91_start_hc(pdev); |
| 201 | 205 | ||
| 202 | retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); | 206 | retval = usb_add_hcd(hcd, irq, IRQF_SHARED); |
| 203 | if (retval == 0) | 207 | if (retval == 0) |
| 204 | return retval; | 208 | return retval; |
| 205 | 209 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index b8dffd59eb25..73f5208714a4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 128 | * any other sleep) on Haswell machines with LPT and LPT-LP | 128 | * any other sleep) on Haswell machines with LPT and LPT-LP |
| 129 | * with the new Intel BIOS | 129 | * with the new Intel BIOS |
| 130 | */ | 130 | */ |
| 131 | xhci->quirks |= XHCI_SPURIOUS_WAKEUP; | 131 | /* Limit the quirk to only known vendors, as this triggers |
| 132 | * yet another BIOS bug on some other machines | ||
| 133 | * https://bugzilla.kernel.org/show_bug.cgi?id=66171 | ||
| 134 | */ | ||
| 135 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) | ||
| 136 | xhci->quirks |= XHCI_SPURIOUS_WAKEUP; | ||
| 132 | } | 137 | } |
| 133 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && | 138 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
| 134 | pdev->device == PCI_DEVICE_ID_ASROCK_P67) { | 139 | pdev->device == PCI_DEVICE_ID_ASROCK_P67) { |
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 08e2f39027ec..2b41c636a52a 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
| @@ -19,8 +19,9 @@ config AB8500_USB | |||
| 19 | in host mode, low speed. | 19 | in host mode, low speed. |
| 20 | 20 | ||
| 21 | config FSL_USB2_OTG | 21 | config FSL_USB2_OTG |
| 22 | bool "Freescale USB OTG Transceiver Driver" | 22 | tristate "Freescale USB OTG Transceiver Driver" |
| 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME | 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME |
| 24 | depends on USB | ||
| 24 | select USB_OTG | 25 | select USB_OTG |
| 25 | select USB_PHY | 26 | select USB_PHY |
| 26 | help | 27 | help |
| @@ -29,6 +30,7 @@ config FSL_USB2_OTG | |||
| 29 | config ISP1301_OMAP | 30 | config ISP1301_OMAP |
| 30 | tristate "Philips ISP1301 with OMAP OTG" | 31 | tristate "Philips ISP1301 with OMAP OTG" |
| 31 | depends on I2C && ARCH_OMAP_OTG | 32 | depends on I2C && ARCH_OMAP_OTG |
| 33 | depends on USB | ||
| 32 | select USB_PHY | 34 | select USB_PHY |
| 33 | help | 35 | help |
| 34 | If you say yes here you get support for the Philips ISP1301 | 36 | If you say yes here you get support for the Philips ISP1301 |
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 82232acf1ab6..bbe4f8e6e8d7 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c | |||
| @@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, | |||
| 876 | 876 | ||
| 877 | tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, | 877 | tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, |
| 878 | resource_size(res)); | 878 | resource_size(res)); |
| 879 | if (!tegra_phy->regs) { | 879 | if (!tegra_phy->pad_regs) { |
| 880 | dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); | 880 | dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); |
| 881 | return -ENOMEM; | 881 | return -ENOMEM; |
| 882 | } | 882 | } |
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 30e8a61552d4..bad57ce77ba5 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c | |||
| @@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module, | |||
| 127 | 127 | ||
| 128 | static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) | 128 | static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) |
| 129 | { | 129 | { |
| 130 | u8 data, ret = 0; | 130 | u8 data; |
| 131 | int ret; | ||
| 131 | 132 | ||
| 132 | ret = twl_i2c_read_u8(module, &data, address); | 133 | ret = twl_i2c_read_u8(module, &data, address); |
| 133 | if (ret >= 0) | 134 | if (ret >= 0) |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 496b7e39d5be..cc7a24154490 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 251 | #define ZTE_PRODUCT_MF628 0x0015 | 251 | #define ZTE_PRODUCT_MF628 0x0015 |
| 252 | #define ZTE_PRODUCT_MF626 0x0031 | 252 | #define ZTE_PRODUCT_MF626 0x0031 |
| 253 | #define ZTE_PRODUCT_MC2718 0xffe8 | 253 | #define ZTE_PRODUCT_MC2718 0xffe8 |
| 254 | #define ZTE_PRODUCT_AC2726 0xfff1 | ||
| 254 | 255 | ||
| 255 | #define BENQ_VENDOR_ID 0x04a5 | 256 | #define BENQ_VENDOR_ID 0x04a5 |
| 256 | #define BENQ_PRODUCT_H10 0x4068 | 257 | #define BENQ_PRODUCT_H10 0x4068 |
| @@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 1453 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, | 1454 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, |
| 1454 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, | 1455 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, |
| 1455 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, | 1456 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, |
| 1457 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, | ||
| 1456 | 1458 | ||
| 1457 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 1459 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
| 1458 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | 1460 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, |
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index fca4c752a4ed..eae2c873b39f 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c | |||
| @@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = { | |||
| 281 | { USB_DEVICE(0x19d2, 0xfffd) }, | 281 | { USB_DEVICE(0x19d2, 0xfffd) }, |
| 282 | { USB_DEVICE(0x19d2, 0xfffc) }, | 282 | { USB_DEVICE(0x19d2, 0xfffc) }, |
| 283 | { USB_DEVICE(0x19d2, 0xfffb) }, | 283 | { USB_DEVICE(0x19d2, 0xfffb) }, |
| 284 | /* AC2726, AC8710_V3 */ | 284 | /* AC8710_V3 */ |
| 285 | { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) }, | ||
| 286 | { USB_DEVICE(0x19d2, 0xfff6) }, | 285 | { USB_DEVICE(0x19d2, 0xfff6) }, |
| 287 | { USB_DEVICE(0x19d2, 0xfff7) }, | 286 | { USB_DEVICE(0x19d2, 0xfff7) }, |
| 288 | { USB_DEVICE(0x19d2, 0xfff8) }, | 287 | { USB_DEVICE(0x19d2, 0xfff8) }, |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c444654fc33f..5c4a95b516cf 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb) | |||
| 285 | { | 285 | { |
| 286 | __le32 actual = cpu_to_le32(vb->num_pages); | 286 | __le32 actual = cpu_to_le32(vb->num_pages); |
| 287 | 287 | ||
| 288 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, | 288 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, |
| 289 | &actual); | 289 | &actual); |
| 290 | } | 290 | } |
| 291 | 291 | ||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 55ea73f7c70b..4c02e2b94103 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
| 350 | 350 | ||
| 351 | pfn = page_to_pfn(page); | 351 | pfn = page_to_pfn(page); |
| 352 | 352 | ||
| 353 | set_phys_to_machine(pfn, frame_list[i]); | ||
| 354 | |||
| 355 | #ifdef CONFIG_XEN_HAVE_PVMMU | 353 | #ifdef CONFIG_XEN_HAVE_PVMMU |
| 356 | /* Link back into the page tables if not highmem. */ | 354 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
| 357 | if (xen_pv_domain() && !PageHighMem(page)) { | 355 | set_phys_to_machine(pfn, frame_list[i]); |
| 358 | int ret; | 356 | |
| 359 | ret = HYPERVISOR_update_va_mapping( | 357 | /* Link back into the page tables if not highmem. */ |
| 360 | (unsigned long)__va(pfn << PAGE_SHIFT), | 358 | if (!PageHighMem(page)) { |
| 361 | mfn_pte(frame_list[i], PAGE_KERNEL), | 359 | int ret; |
| 362 | 0); | 360 | ret = HYPERVISOR_update_va_mapping( |
| 363 | BUG_ON(ret); | 361 | (unsigned long)__va(pfn << PAGE_SHIFT), |
| 362 | mfn_pte(frame_list[i], PAGE_KERNEL), | ||
| 363 | 0); | ||
| 364 | BUG_ON(ret); | ||
| 365 | } | ||
| 364 | } | 366 | } |
| 365 | #endif | 367 | #endif |
| 366 | 368 | ||
| @@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
| 378 | enum bp_state state = BP_DONE; | 380 | enum bp_state state = BP_DONE; |
| 379 | unsigned long pfn, i; | 381 | unsigned long pfn, i; |
| 380 | struct page *page; | 382 | struct page *page; |
| 381 | struct page *scratch_page; | ||
| 382 | int ret; | 383 | int ret; |
| 383 | struct xen_memory_reservation reservation = { | 384 | struct xen_memory_reservation reservation = { |
| 384 | .address_bits = 0, | 385 | .address_bits = 0, |
| @@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
| 411 | 412 | ||
| 412 | scrub_page(page); | 413 | scrub_page(page); |
| 413 | 414 | ||
| 415 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
| 414 | /* | 416 | /* |
| 415 | * Ballooned out frames are effectively replaced with | 417 | * Ballooned out frames are effectively replaced with |
| 416 | * a scratch frame. Ensure direct mappings and the | 418 | * a scratch frame. Ensure direct mappings and the |
| 417 | * p2m are consistent. | 419 | * p2m are consistent. |
| 418 | */ | 420 | */ |
| 419 | scratch_page = get_balloon_scratch_page(); | ||
| 420 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
| 421 | if (xen_pv_domain() && !PageHighMem(page)) { | ||
| 422 | ret = HYPERVISOR_update_va_mapping( | ||
| 423 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
| 424 | pfn_pte(page_to_pfn(scratch_page), | ||
| 425 | PAGE_KERNEL_RO), 0); | ||
| 426 | BUG_ON(ret); | ||
| 427 | } | ||
| 428 | #endif | ||
| 429 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 421 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
| 430 | unsigned long p; | 422 | unsigned long p; |
| 423 | struct page *scratch_page = get_balloon_scratch_page(); | ||
| 424 | |||
| 425 | if (!PageHighMem(page)) { | ||
| 426 | ret = HYPERVISOR_update_va_mapping( | ||
| 427 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
| 428 | pfn_pte(page_to_pfn(scratch_page), | ||
| 429 | PAGE_KERNEL_RO), 0); | ||
| 430 | BUG_ON(ret); | ||
| 431 | } | ||
| 431 | p = page_to_pfn(scratch_page); | 432 | p = page_to_pfn(scratch_page); |
| 432 | __set_phys_to_machine(pfn, pfn_to_mfn(p)); | 433 | __set_phys_to_machine(pfn, pfn_to_mfn(p)); |
| 434 | |||
| 435 | put_balloon_scratch_page(); | ||
| 433 | } | 436 | } |
| 434 | put_balloon_scratch_page(); | 437 | #endif |
| 435 | 438 | ||
| 436 | balloon_append(pfn_to_page(pfn)); | 439 | balloon_append(pfn_to_page(pfn)); |
| 437 | } | 440 | } |
| @@ -627,15 +630,17 @@ static int __init balloon_init(void) | |||
| 627 | if (!xen_domain()) | 630 | if (!xen_domain()) |
| 628 | return -ENODEV; | 631 | return -ENODEV; |
| 629 | 632 | ||
| 630 | for_each_online_cpu(cpu) | 633 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
| 631 | { | 634 | for_each_online_cpu(cpu) |
| 632 | per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); | 635 | { |
| 633 | if (per_cpu(balloon_scratch_page, cpu) == NULL) { | 636 | per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); |
| 634 | pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); | 637 | if (per_cpu(balloon_scratch_page, cpu) == NULL) { |
| 635 | return -ENOMEM; | 638 | pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); |
| 639 | return -ENOMEM; | ||
| 640 | } | ||
| 636 | } | 641 | } |
| 642 | register_cpu_notifier(&balloon_cpu_notifier); | ||
| 637 | } | 643 | } |
| 638 | register_cpu_notifier(&balloon_cpu_notifier); | ||
| 639 | 644 | ||
| 640 | pr_info("Initialising balloon driver\n"); | 645 | pr_info("Initialising balloon driver\n"); |
| 641 | 646 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 028387192b60..aa846a48f400 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
| @@ -1176,7 +1176,8 @@ static int gnttab_setup(void) | |||
| 1176 | gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, | 1176 | gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, |
| 1177 | PAGE_SIZE * max_nr_gframes); | 1177 | PAGE_SIZE * max_nr_gframes); |
| 1178 | if (gnttab_shared.addr == NULL) { | 1178 | if (gnttab_shared.addr == NULL) { |
| 1179 | pr_warn("Failed to ioremap gnttab share frames!\n"); | 1179 | pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", |
| 1180 | xen_hvm_resume_frames); | ||
| 1180 | return -ENOMEM; | 1181 | return -ENOMEM; |
| 1181 | } | 1182 | } |
| 1182 | } | 1183 | } |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8e74590fa1bb..569a13b9e856 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
| @@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma) | |||
| 533 | { | 533 | { |
| 534 | struct page **pages = vma->vm_private_data; | 534 | struct page **pages = vma->vm_private_data; |
| 535 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 535 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 536 | int rc; | ||
| 536 | 537 | ||
| 537 | if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) | 538 | if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) |
| 538 | return; | 539 | return; |
| 539 | 540 | ||
| 540 | xen_unmap_domain_mfn_range(vma, numpgs, pages); | 541 | rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); |
| 541 | free_xenballooned_pages(numpgs, pages); | 542 | if (rc == 0) |
| 543 | free_xenballooned_pages(numpgs, pages); | ||
| 544 | else | ||
| 545 | pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", | ||
| 546 | numpgs, rc); | ||
| 542 | kfree(pages); | 547 | kfree(pages); |
| 543 | } | 548 | } |
| 544 | 549 | ||
| @@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx) | |||
| 244 | int i; | 244 | int i; |
| 245 | 245 | ||
| 246 | for (i = 0; i < ctx->nr_pages; i++) { | 246 | for (i = 0; i < ctx->nr_pages; i++) { |
| 247 | struct page *page; | ||
| 247 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, | 248 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
| 248 | page_count(ctx->ring_pages[i])); | 249 | page_count(ctx->ring_pages[i])); |
| 249 | put_page(ctx->ring_pages[i]); | 250 | page = ctx->ring_pages[i]; |
| 251 | if (!page) | ||
| 252 | continue; | ||
| 253 | ctx->ring_pages[i] = NULL; | ||
| 254 | put_page(page); | ||
| 250 | } | 255 | } |
| 251 | 256 | ||
| 252 | put_aio_ring_file(ctx); | 257 | put_aio_ring_file(ctx); |
| @@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
| 280 | unsigned long flags; | 285 | unsigned long flags; |
| 281 | int rc; | 286 | int rc; |
| 282 | 287 | ||
| 288 | rc = 0; | ||
| 289 | |||
| 290 | /* Make sure the old page hasn't already been changed */ | ||
| 291 | spin_lock(&mapping->private_lock); | ||
| 292 | ctx = mapping->private_data; | ||
| 293 | if (ctx) { | ||
| 294 | pgoff_t idx; | ||
| 295 | spin_lock_irqsave(&ctx->completion_lock, flags); | ||
| 296 | idx = old->index; | ||
| 297 | if (idx < (pgoff_t)ctx->nr_pages) { | ||
| 298 | if (ctx->ring_pages[idx] != old) | ||
| 299 | rc = -EAGAIN; | ||
| 300 | } else | ||
| 301 | rc = -EINVAL; | ||
| 302 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | ||
| 303 | } else | ||
| 304 | rc = -EINVAL; | ||
| 305 | spin_unlock(&mapping->private_lock); | ||
| 306 | |||
| 307 | if (rc != 0) | ||
| 308 | return rc; | ||
| 309 | |||
| 283 | /* Writeback must be complete */ | 310 | /* Writeback must be complete */ |
| 284 | BUG_ON(PageWriteback(old)); | 311 | BUG_ON(PageWriteback(old)); |
| 285 | put_page(old); | 312 | get_page(new); |
| 286 | 313 | ||
| 287 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); | 314 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
| 288 | if (rc != MIGRATEPAGE_SUCCESS) { | 315 | if (rc != MIGRATEPAGE_SUCCESS) { |
| 289 | get_page(old); | 316 | put_page(new); |
| 290 | return rc; | 317 | return rc; |
| 291 | } | 318 | } |
| 292 | 319 | ||
| 293 | get_page(new); | ||
| 294 | |||
| 295 | /* We can potentially race against kioctx teardown here. Use the | 320 | /* We can potentially race against kioctx teardown here. Use the |
| 296 | * address_space's private data lock to protect the mapping's | 321 | * address_space's private data lock to protect the mapping's |
| 297 | * private_data. | 322 | * private_data. |
| @@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
| 303 | spin_lock_irqsave(&ctx->completion_lock, flags); | 328 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 304 | migrate_page_copy(new, old); | 329 | migrate_page_copy(new, old); |
| 305 | idx = old->index; | 330 | idx = old->index; |
| 306 | if (idx < (pgoff_t)ctx->nr_pages) | 331 | if (idx < (pgoff_t)ctx->nr_pages) { |
| 307 | ctx->ring_pages[idx] = new; | 332 | /* And only do the move if things haven't changed */ |
| 333 | if (ctx->ring_pages[idx] == old) | ||
| 334 | ctx->ring_pages[idx] = new; | ||
| 335 | else | ||
| 336 | rc = -EAGAIN; | ||
| 337 | } else | ||
| 338 | rc = -EINVAL; | ||
| 308 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | 339 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 309 | } else | 340 | } else |
| 310 | rc = -EBUSY; | 341 | rc = -EBUSY; |
| 311 | spin_unlock(&mapping->private_lock); | 342 | spin_unlock(&mapping->private_lock); |
| 312 | 343 | ||
| 344 | if (rc == MIGRATEPAGE_SUCCESS) | ||
| 345 | put_page(old); | ||
| 346 | else | ||
| 347 | put_page(new); | ||
| 348 | |||
| 313 | return rc; | 349 | return rc; |
| 314 | } | 350 | } |
| 315 | #endif | 351 | #endif |
| @@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 326 | struct aio_ring *ring; | 362 | struct aio_ring *ring; |
| 327 | unsigned nr_events = ctx->max_reqs; | 363 | unsigned nr_events = ctx->max_reqs; |
| 328 | struct mm_struct *mm = current->mm; | 364 | struct mm_struct *mm = current->mm; |
| 329 | unsigned long size, populate; | 365 | unsigned long size, unused; |
| 330 | int nr_pages; | 366 | int nr_pages; |
| 331 | int i; | 367 | int i; |
| 332 | struct file *file; | 368 | struct file *file; |
| @@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 347 | return -EAGAIN; | 383 | return -EAGAIN; |
| 348 | } | 384 | } |
| 349 | 385 | ||
| 386 | ctx->aio_ring_file = file; | ||
| 387 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
| 388 | / sizeof(struct io_event); | ||
| 389 | |||
| 390 | ctx->ring_pages = ctx->internal_pages; | ||
| 391 | if (nr_pages > AIO_RING_PAGES) { | ||
| 392 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | ||
| 393 | GFP_KERNEL); | ||
| 394 | if (!ctx->ring_pages) { | ||
| 395 | put_aio_ring_file(ctx); | ||
| 396 | return -ENOMEM; | ||
| 397 | } | ||
| 398 | } | ||
| 399 | |||
| 350 | for (i = 0; i < nr_pages; i++) { | 400 | for (i = 0; i < nr_pages; i++) { |
| 351 | struct page *page; | 401 | struct page *page; |
| 352 | page = find_or_create_page(file->f_inode->i_mapping, | 402 | page = find_or_create_page(file->f_inode->i_mapping, |
| @@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 358 | SetPageUptodate(page); | 408 | SetPageUptodate(page); |
| 359 | SetPageDirty(page); | 409 | SetPageDirty(page); |
| 360 | unlock_page(page); | 410 | unlock_page(page); |
| 411 | |||
| 412 | ctx->ring_pages[i] = page; | ||
| 361 | } | 413 | } |
| 362 | ctx->aio_ring_file = file; | 414 | ctx->nr_pages = i; |
| 363 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
| 364 | / sizeof(struct io_event); | ||
| 365 | 415 | ||
| 366 | ctx->ring_pages = ctx->internal_pages; | 416 | if (unlikely(i != nr_pages)) { |
| 367 | if (nr_pages > AIO_RING_PAGES) { | 417 | aio_free_ring(ctx); |
| 368 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | 418 | return -EAGAIN; |
| 369 | GFP_KERNEL); | ||
| 370 | if (!ctx->ring_pages) { | ||
| 371 | put_aio_ring_file(ctx); | ||
| 372 | return -ENOMEM; | ||
| 373 | } | ||
| 374 | } | 419 | } |
| 375 | 420 | ||
| 376 | ctx->mmap_size = nr_pages * PAGE_SIZE; | 421 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
| @@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 379 | down_write(&mm->mmap_sem); | 424 | down_write(&mm->mmap_sem); |
| 380 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, | 425 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
| 381 | PROT_READ | PROT_WRITE, | 426 | PROT_READ | PROT_WRITE, |
| 382 | MAP_SHARED | MAP_POPULATE, 0, &populate); | 427 | MAP_SHARED, 0, &unused); |
| 428 | up_write(&mm->mmap_sem); | ||
| 383 | if (IS_ERR((void *)ctx->mmap_base)) { | 429 | if (IS_ERR((void *)ctx->mmap_base)) { |
| 384 | up_write(&mm->mmap_sem); | ||
| 385 | ctx->mmap_size = 0; | 430 | ctx->mmap_size = 0; |
| 386 | aio_free_ring(ctx); | 431 | aio_free_ring(ctx); |
| 387 | return -EAGAIN; | 432 | return -EAGAIN; |
| @@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 389 | 434 | ||
| 390 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); | 435 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
| 391 | 436 | ||
| 392 | /* We must do this while still holding mmap_sem for write, as we | ||
| 393 | * need to be protected against userspace attempting to mremap() | ||
| 394 | * or munmap() the ring buffer. | ||
| 395 | */ | ||
| 396 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, | ||
| 397 | 1, 0, ctx->ring_pages, NULL); | ||
| 398 | |||
| 399 | /* Dropping the reference here is safe as the page cache will hold | ||
| 400 | * onto the pages for us. It is also required so that page migration | ||
| 401 | * can unmap the pages and get the right reference count. | ||
| 402 | */ | ||
| 403 | for (i = 0; i < ctx->nr_pages; i++) | ||
| 404 | put_page(ctx->ring_pages[i]); | ||
| 405 | |||
| 406 | up_write(&mm->mmap_sem); | ||
| 407 | |||
| 408 | if (unlikely(ctx->nr_pages != nr_pages)) { | ||
| 409 | aio_free_ring(ctx); | ||
| 410 | return -EAGAIN; | ||
| 411 | } | ||
| 412 | |||
| 413 | ctx->user_id = ctx->mmap_base; | 437 | ctx->user_id = ctx->mmap_base; |
| 414 | ctx->nr_events = nr_events; /* trusted copy */ | 438 | ctx->nr_events = nr_events; /* trusted copy */ |
| 415 | 439 | ||
| @@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
| 652 | aio_nr += ctx->max_reqs; | 676 | aio_nr += ctx->max_reqs; |
| 653 | spin_unlock(&aio_nr_lock); | 677 | spin_unlock(&aio_nr_lock); |
| 654 | 678 | ||
| 655 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ | 679 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
| 680 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ | ||
| 656 | 681 | ||
| 657 | err = ioctx_add_table(ctx, mm); | 682 | err = ioctx_add_table(ctx, mm); |
| 658 | if (err) | 683 | if (err) |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 1e561c059539..ec3ba43b9faa 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page) | |||
| 210 | if (err < 0) { | 210 | if (err < 0) { |
| 211 | SetPageError(page); | 211 | SetPageError(page); |
| 212 | goto out; | 212 | goto out; |
| 213 | } else if (err < PAGE_CACHE_SIZE) { | 213 | } else { |
| 214 | if (err < PAGE_CACHE_SIZE) { | ||
| 214 | /* zero fill remainder of page */ | 215 | /* zero fill remainder of page */ |
| 215 | zero_user_segment(page, err, PAGE_CACHE_SIZE); | 216 | zero_user_segment(page, err, PAGE_CACHE_SIZE); |
| 217 | } else { | ||
| 218 | flush_dcache_page(page); | ||
| 219 | } | ||
| 216 | } | 220 | } |
| 217 | SetPageUptodate(page); | 221 | SetPageUptodate(page); |
| 218 | 222 | ||
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9a8e396aed89..278fd2891288 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 978 | struct ceph_mds_reply_inode *ininfo; | 978 | struct ceph_mds_reply_inode *ininfo; |
| 979 | struct ceph_vino vino; | 979 | struct ceph_vino vino; |
| 980 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); | 980 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
| 981 | int i = 0; | ||
| 982 | int err = 0; | 981 | int err = 0; |
| 983 | 982 | ||
| 984 | dout("fill_trace %p is_dentry %d is_target %d\n", req, | 983 | dout("fill_trace %p is_dentry %d is_target %d\n", req, |
| @@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1039 | } | 1038 | } |
| 1040 | } | 1039 | } |
| 1041 | 1040 | ||
| 1041 | if (rinfo->head->is_target) { | ||
| 1042 | vino.ino = le64_to_cpu(rinfo->targeti.in->ino); | ||
| 1043 | vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); | ||
| 1044 | |||
| 1045 | in = ceph_get_inode(sb, vino); | ||
| 1046 | if (IS_ERR(in)) { | ||
| 1047 | err = PTR_ERR(in); | ||
| 1048 | goto done; | ||
| 1049 | } | ||
| 1050 | req->r_target_inode = in; | ||
| 1051 | |||
| 1052 | err = fill_inode(in, &rinfo->targeti, NULL, | ||
| 1053 | session, req->r_request_started, | ||
| 1054 | (le32_to_cpu(rinfo->head->result) == 0) ? | ||
| 1055 | req->r_fmode : -1, | ||
| 1056 | &req->r_caps_reservation); | ||
| 1057 | if (err < 0) { | ||
| 1058 | pr_err("fill_inode badness %p %llx.%llx\n", | ||
| 1059 | in, ceph_vinop(in)); | ||
| 1060 | goto done; | ||
| 1061 | } | ||
| 1062 | } | ||
| 1063 | |||
| 1042 | /* | 1064 | /* |
| 1043 | * ignore null lease/binding on snapdir ENOENT, or else we | 1065 | * ignore null lease/binding on snapdir ENOENT, or else we |
| 1044 | * will have trouble splicing in the virtual snapdir later | 1066 | * will have trouble splicing in the virtual snapdir later |
| @@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1108 | ceph_dentry(req->r_old_dentry)->offset); | 1130 | ceph_dentry(req->r_old_dentry)->offset); |
| 1109 | 1131 | ||
| 1110 | dn = req->r_old_dentry; /* use old_dentry */ | 1132 | dn = req->r_old_dentry; /* use old_dentry */ |
| 1111 | in = dn->d_inode; | ||
| 1112 | } | 1133 | } |
| 1113 | 1134 | ||
| 1114 | /* null dentry? */ | 1135 | /* null dentry? */ |
| @@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1130 | } | 1151 | } |
| 1131 | 1152 | ||
| 1132 | /* attach proper inode */ | 1153 | /* attach proper inode */ |
| 1133 | ininfo = rinfo->targeti.in; | 1154 | if (!dn->d_inode) { |
| 1134 | vino.ino = le64_to_cpu(ininfo->ino); | 1155 | ihold(in); |
| 1135 | vino.snap = le64_to_cpu(ininfo->snapid); | ||
| 1136 | in = dn->d_inode; | ||
| 1137 | if (!in) { | ||
| 1138 | in = ceph_get_inode(sb, vino); | ||
| 1139 | if (IS_ERR(in)) { | ||
| 1140 | pr_err("fill_trace bad get_inode " | ||
| 1141 | "%llx.%llx\n", vino.ino, vino.snap); | ||
| 1142 | err = PTR_ERR(in); | ||
| 1143 | d_drop(dn); | ||
| 1144 | goto done; | ||
| 1145 | } | ||
| 1146 | dn = splice_dentry(dn, in, &have_lease, true); | 1156 | dn = splice_dentry(dn, in, &have_lease, true); |
| 1147 | if (IS_ERR(dn)) { | 1157 | if (IS_ERR(dn)) { |
| 1148 | err = PTR_ERR(dn); | 1158 | err = PTR_ERR(dn); |
| 1149 | goto done; | 1159 | goto done; |
| 1150 | } | 1160 | } |
| 1151 | req->r_dentry = dn; /* may have spliced */ | 1161 | req->r_dentry = dn; /* may have spliced */ |
| 1152 | ihold(in); | 1162 | } else if (dn->d_inode && dn->d_inode != in) { |
| 1153 | } else if (ceph_ino(in) == vino.ino && | ||
| 1154 | ceph_snap(in) == vino.snap) { | ||
| 1155 | ihold(in); | ||
| 1156 | } else { | ||
| 1157 | dout(" %p links to %p %llx.%llx, not %llx.%llx\n", | 1163 | dout(" %p links to %p %llx.%llx, not %llx.%llx\n", |
| 1158 | dn, in, ceph_ino(in), ceph_snap(in), | 1164 | dn, dn->d_inode, ceph_vinop(dn->d_inode), |
| 1159 | vino.ino, vino.snap); | 1165 | ceph_vinop(in)); |
| 1160 | have_lease = false; | 1166 | have_lease = false; |
| 1161 | in = NULL; | ||
| 1162 | } | 1167 | } |
| 1163 | 1168 | ||
| 1164 | if (have_lease) | 1169 | if (have_lease) |
| 1165 | update_dentry_lease(dn, rinfo->dlease, session, | 1170 | update_dentry_lease(dn, rinfo->dlease, session, |
| 1166 | req->r_request_started); | 1171 | req->r_request_started); |
| 1167 | dout(" final dn %p\n", dn); | 1172 | dout(" final dn %p\n", dn); |
| 1168 | i++; | 1173 | } else if (!req->r_aborted && |
| 1169 | } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || | 1174 | (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || |
| 1170 | req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { | 1175 | req->r_op == CEPH_MDS_OP_MKSNAP)) { |
| 1171 | struct dentry *dn = req->r_dentry; | 1176 | struct dentry *dn = req->r_dentry; |
| 1172 | 1177 | ||
| 1173 | /* fill out a snapdir LOOKUPSNAP dentry */ | 1178 | /* fill out a snapdir LOOKUPSNAP dentry */ |
| @@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1177 | ininfo = rinfo->targeti.in; | 1182 | ininfo = rinfo->targeti.in; |
| 1178 | vino.ino = le64_to_cpu(ininfo->ino); | 1183 | vino.ino = le64_to_cpu(ininfo->ino); |
| 1179 | vino.snap = le64_to_cpu(ininfo->snapid); | 1184 | vino.snap = le64_to_cpu(ininfo->snapid); |
| 1180 | in = ceph_get_inode(sb, vino); | ||
| 1181 | if (IS_ERR(in)) { | ||
| 1182 | pr_err("fill_inode get_inode badness %llx.%llx\n", | ||
| 1183 | vino.ino, vino.snap); | ||
| 1184 | err = PTR_ERR(in); | ||
| 1185 | d_delete(dn); | ||
| 1186 | goto done; | ||
| 1187 | } | ||
| 1188 | dout(" linking snapped dir %p to dn %p\n", in, dn); | 1185 | dout(" linking snapped dir %p to dn %p\n", in, dn); |
| 1186 | ihold(in); | ||
| 1189 | dn = splice_dentry(dn, in, NULL, true); | 1187 | dn = splice_dentry(dn, in, NULL, true); |
| 1190 | if (IS_ERR(dn)) { | 1188 | if (IS_ERR(dn)) { |
| 1191 | err = PTR_ERR(dn); | 1189 | err = PTR_ERR(dn); |
| 1192 | goto done; | 1190 | goto done; |
| 1193 | } | 1191 | } |
| 1194 | req->r_dentry = dn; /* may have spliced */ | 1192 | req->r_dentry = dn; /* may have spliced */ |
| 1195 | ihold(in); | ||
| 1196 | rinfo->head->is_dentry = 1; /* fool notrace handlers */ | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | if (rinfo->head->is_target) { | ||
| 1200 | vino.ino = le64_to_cpu(rinfo->targeti.in->ino); | ||
| 1201 | vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); | ||
| 1202 | |||
| 1203 | if (in == NULL || ceph_ino(in) != vino.ino || | ||
| 1204 | ceph_snap(in) != vino.snap) { | ||
| 1205 | in = ceph_get_inode(sb, vino); | ||
| 1206 | if (IS_ERR(in)) { | ||
| 1207 | err = PTR_ERR(in); | ||
| 1208 | goto done; | ||
| 1209 | } | ||
| 1210 | } | ||
| 1211 | req->r_target_inode = in; | ||
| 1212 | |||
| 1213 | err = fill_inode(in, | ||
| 1214 | &rinfo->targeti, NULL, | ||
| 1215 | session, req->r_request_started, | ||
| 1216 | (le32_to_cpu(rinfo->head->result) == 0) ? | ||
| 1217 | req->r_fmode : -1, | ||
| 1218 | &req->r_caps_reservation); | ||
| 1219 | if (err < 0) { | ||
| 1220 | pr_err("fill_inode badness %p %llx.%llx\n", | ||
| 1221 | in, ceph_vinop(in)); | ||
| 1222 | goto done; | ||
| 1223 | } | ||
| 1224 | } | 1193 | } |
| 1225 | |||
| 1226 | done: | 1194 | done: |
| 1227 | dout("fill_trace done err=%d\n", err); | 1195 | dout("fill_trace done err=%d\n", err); |
| 1228 | return err; | 1196 | return err; |
| @@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
| 1272 | struct qstr dname; | 1240 | struct qstr dname; |
| 1273 | struct dentry *dn; | 1241 | struct dentry *dn; |
| 1274 | struct inode *in; | 1242 | struct inode *in; |
| 1275 | int err = 0, i; | 1243 | int err = 0, ret, i; |
| 1276 | struct inode *snapdir = NULL; | 1244 | struct inode *snapdir = NULL; |
| 1277 | struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; | 1245 | struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; |
| 1278 | struct ceph_dentry_info *di; | 1246 | struct ceph_dentry_info *di; |
| @@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
| 1305 | ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); | 1273 | ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); |
| 1306 | } | 1274 | } |
| 1307 | 1275 | ||
| 1276 | /* FIXME: release caps/leases if error occurs */ | ||
| 1308 | for (i = 0; i < rinfo->dir_nr; i++) { | 1277 | for (i = 0; i < rinfo->dir_nr; i++) { |
| 1309 | struct ceph_vino vino; | 1278 | struct ceph_vino vino; |
| 1310 | 1279 | ||
| @@ -1329,9 +1298,10 @@ retry_lookup: | |||
| 1329 | err = -ENOMEM; | 1298 | err = -ENOMEM; |
| 1330 | goto out; | 1299 | goto out; |
| 1331 | } | 1300 | } |
| 1332 | err = ceph_init_dentry(dn); | 1301 | ret = ceph_init_dentry(dn); |
| 1333 | if (err < 0) { | 1302 | if (ret < 0) { |
| 1334 | dput(dn); | 1303 | dput(dn); |
| 1304 | err = ret; | ||
| 1335 | goto out; | 1305 | goto out; |
| 1336 | } | 1306 | } |
| 1337 | } else if (dn->d_inode && | 1307 | } else if (dn->d_inode && |
| @@ -1351,9 +1321,6 @@ retry_lookup: | |||
| 1351 | spin_unlock(&parent->d_lock); | 1321 | spin_unlock(&parent->d_lock); |
| 1352 | } | 1322 | } |
| 1353 | 1323 | ||
| 1354 | di = dn->d_fsdata; | ||
| 1355 | di->offset = ceph_make_fpos(frag, i + r_readdir_offset); | ||
| 1356 | |||
| 1357 | /* inode */ | 1324 | /* inode */ |
| 1358 | if (dn->d_inode) { | 1325 | if (dn->d_inode) { |
| 1359 | in = dn->d_inode; | 1326 | in = dn->d_inode; |
| @@ -1366,26 +1333,39 @@ retry_lookup: | |||
| 1366 | err = PTR_ERR(in); | 1333 | err = PTR_ERR(in); |
| 1367 | goto out; | 1334 | goto out; |
| 1368 | } | 1335 | } |
| 1369 | dn = splice_dentry(dn, in, NULL, false); | ||
| 1370 | if (IS_ERR(dn)) | ||
| 1371 | dn = NULL; | ||
| 1372 | } | 1336 | } |
| 1373 | 1337 | ||
| 1374 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, | 1338 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, |
| 1375 | req->r_request_started, -1, | 1339 | req->r_request_started, -1, |
| 1376 | &req->r_caps_reservation) < 0) { | 1340 | &req->r_caps_reservation) < 0) { |
| 1377 | pr_err("fill_inode badness on %p\n", in); | 1341 | pr_err("fill_inode badness on %p\n", in); |
| 1342 | if (!dn->d_inode) | ||
| 1343 | iput(in); | ||
| 1344 | d_drop(dn); | ||
| 1378 | goto next_item; | 1345 | goto next_item; |
| 1379 | } | 1346 | } |
| 1380 | if (dn) | 1347 | |
| 1381 | update_dentry_lease(dn, rinfo->dir_dlease[i], | 1348 | if (!dn->d_inode) { |
| 1382 | req->r_session, | 1349 | dn = splice_dentry(dn, in, NULL, false); |
| 1383 | req->r_request_started); | 1350 | if (IS_ERR(dn)) { |
| 1351 | err = PTR_ERR(dn); | ||
| 1352 | dn = NULL; | ||
| 1353 | goto next_item; | ||
| 1354 | } | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | di = dn->d_fsdata; | ||
| 1358 | di->offset = ceph_make_fpos(frag, i + r_readdir_offset); | ||
| 1359 | |||
| 1360 | update_dentry_lease(dn, rinfo->dir_dlease[i], | ||
| 1361 | req->r_session, | ||
| 1362 | req->r_request_started); | ||
| 1384 | next_item: | 1363 | next_item: |
| 1385 | if (dn) | 1364 | if (dn) |
| 1386 | dput(dn); | 1365 | dput(dn); |
| 1387 | } | 1366 | } |
| 1388 | req->r_did_prepopulate = true; | 1367 | if (err == 0) |
| 1368 | req->r_did_prepopulate = true; | ||
| 1389 | 1369 | ||
| 1390 | out: | 1370 | out: |
| 1391 | if (snapdir) { | 1371 | if (snapdir) { |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index aa3397620342..2c29db6a247e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
| @@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 477 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask); | 477 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask); |
| 478 | extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); | 478 | extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); |
| 479 | extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); | 479 | extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); |
| 480 | extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr, | 480 | extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon, |
| 481 | const unsigned char *path, | 481 | struct cifs_sb_info *cifs_sb, |
| 482 | struct cifs_sb_info *cifs_sb, unsigned int xid); | 482 | struct cifs_fattr *fattr, |
| 483 | const unsigned char *path); | ||
| 483 | extern int mdfour(unsigned char *, unsigned char *, int); | 484 | extern int mdfour(unsigned char *, unsigned char *, int); |
| 484 | extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, | 485 | extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, |
| 485 | const struct nls_table *codepage); | 486 | const struct nls_table *codepage); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 124aa0230c1b..d707edb6b852 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -4010,7 +4010,7 @@ QFileInfoRetry: | |||
| 4010 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4010 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
| 4011 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4011 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
| 4012 | if (rc) { | 4012 | if (rc) { |
| 4013 | cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); | 4013 | cifs_dbg(FYI, "Send error in QFileInfo = %d", rc); |
| 4014 | } else { /* decode response */ | 4014 | } else { /* decode response */ |
| 4015 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4015 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
| 4016 | 4016 | ||
| @@ -4179,7 +4179,7 @@ UnixQFileInfoRetry: | |||
| 4179 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4179 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
| 4180 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4180 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
| 4181 | if (rc) { | 4181 | if (rc) { |
| 4182 | cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); | 4182 | cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc); |
| 4183 | } else { /* decode response */ | 4183 | } else { /* decode response */ |
| 4184 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4184 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
| 4185 | 4185 | ||
| @@ -4263,7 +4263,7 @@ UnixQPathInfoRetry: | |||
| 4263 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4263 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
| 4264 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4264 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
| 4265 | if (rc) { | 4265 | if (rc) { |
| 4266 | cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); | 4266 | cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc); |
| 4267 | } else { /* decode response */ | 4267 | } else { /* decode response */ |
| 4268 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4268 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
| 4269 | 4269 | ||
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 11ff5f116b20..a514e0a65f69 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
| @@ -193,7 +193,7 @@ check_name(struct dentry *direntry) | |||
| 193 | static int | 193 | static int |
| 194 | cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, | 194 | cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, |
| 195 | struct tcon_link *tlink, unsigned oflags, umode_t mode, | 195 | struct tcon_link *tlink, unsigned oflags, umode_t mode, |
| 196 | __u32 *oplock, struct cifs_fid *fid, int *created) | 196 | __u32 *oplock, struct cifs_fid *fid) |
| 197 | { | 197 | { |
| 198 | int rc = -ENOENT; | 198 | int rc = -ENOENT; |
| 199 | int create_options = CREATE_NOT_DIR; | 199 | int create_options = CREATE_NOT_DIR; |
| @@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, | |||
| 349 | .device = 0, | 349 | .device = 0, |
| 350 | }; | 350 | }; |
| 351 | 351 | ||
| 352 | *created |= FILE_CREATED; | ||
| 353 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { | 352 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { |
| 354 | args.uid = current_fsuid(); | 353 | args.uid = current_fsuid(); |
| 355 | if (inode->i_mode & S_ISGID) | 354 | if (inode->i_mode & S_ISGID) |
| @@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, | |||
| 480 | cifs_add_pending_open(&fid, tlink, &open); | 479 | cifs_add_pending_open(&fid, tlink, &open); |
| 481 | 480 | ||
| 482 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, | 481 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, |
| 483 | &oplock, &fid, opened); | 482 | &oplock, &fid); |
| 484 | 483 | ||
| 485 | if (rc) { | 484 | if (rc) { |
| 486 | cifs_del_pending_open(&open); | 485 | cifs_del_pending_open(&open); |
| 487 | goto out; | 486 | goto out; |
| 488 | } | 487 | } |
| 489 | 488 | ||
| 489 | if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) | ||
| 490 | *opened |= FILE_CREATED; | ||
| 491 | |||
| 490 | rc = finish_open(file, direntry, generic_file_open, opened); | 492 | rc = finish_open(file, direntry, generic_file_open, opened); |
| 491 | if (rc) { | 493 | if (rc) { |
| 492 | if (server->ops->close) | 494 | if (server->ops->close) |
| @@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
| 529 | struct TCP_Server_Info *server; | 531 | struct TCP_Server_Info *server; |
| 530 | struct cifs_fid fid; | 532 | struct cifs_fid fid; |
| 531 | __u32 oplock; | 533 | __u32 oplock; |
| 532 | int created = FILE_CREATED; | ||
| 533 | 534 | ||
| 534 | cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n", | 535 | cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n", |
| 535 | inode, direntry->d_name.name, direntry); | 536 | inode, direntry->d_name.name, direntry); |
| @@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
| 546 | server->ops->new_lease_key(&fid); | 547 | server->ops->new_lease_key(&fid); |
| 547 | 548 | ||
| 548 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, | 549 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, |
| 549 | &oplock, &fid, &created); | 550 | &oplock, &fid); |
| 550 | if (!rc && server->ops->close) | 551 | if (!rc && server->ops->close) |
| 551 | server->ops->close(xid, tcon, &fid); | 552 | server->ops->close(xid, tcon, &fid); |
| 552 | 553 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 36f9ebb93ceb..49719b8228e5 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
| @@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
| 383 | 383 | ||
| 384 | /* check for Minshall+French symlinks */ | 384 | /* check for Minshall+French symlinks */ |
| 385 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { | 385 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { |
| 386 | int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid); | 386 | int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr, |
| 387 | full_path); | ||
| 387 | if (tmprc) | 388 | if (tmprc) |
| 388 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); | 389 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); |
| 389 | } | 390 | } |
| @@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, | |||
| 799 | 800 | ||
| 800 | /* check for Minshall+French symlinks */ | 801 | /* check for Minshall+French symlinks */ |
| 801 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { | 802 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { |
| 802 | tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid); | 803 | tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr, |
| 804 | full_path); | ||
| 803 | if (tmprc) | 805 | if (tmprc) |
| 804 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); | 806 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); |
| 805 | } | 807 | } |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index cc0234710ddb..92aee08483a5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
| @@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, | |||
| 354 | 354 | ||
| 355 | 355 | ||
| 356 | int | 356 | int |
| 357 | CIFSCheckMFSymlink(struct cifs_fattr *fattr, | 357 | CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon, |
| 358 | const unsigned char *path, | 358 | struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, |
| 359 | struct cifs_sb_info *cifs_sb, unsigned int xid) | 359 | const unsigned char *path) |
| 360 | { | 360 | { |
| 361 | int rc = 0; | 361 | int rc; |
| 362 | u8 *buf = NULL; | 362 | u8 *buf = NULL; |
| 363 | unsigned int link_len = 0; | 363 | unsigned int link_len = 0; |
| 364 | unsigned int bytes_read = 0; | 364 | unsigned int bytes_read = 0; |
| 365 | struct cifs_tcon *ptcon; | ||
| 366 | 365 | ||
| 367 | if (!CIFSCouldBeMFSymlink(fattr)) | 366 | if (!CIFSCouldBeMFSymlink(fattr)) |
| 368 | /* it's not a symlink */ | 367 | /* it's not a symlink */ |
| 369 | return 0; | 368 | return 0; |
| 370 | 369 | ||
| 371 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | 370 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); |
| 372 | if (!buf) { | 371 | if (!buf) |
| 373 | rc = -ENOMEM; | 372 | return -ENOMEM; |
| 374 | goto out; | ||
| 375 | } | ||
| 376 | 373 | ||
| 377 | ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); | 374 | if (tcon->ses->server->ops->query_mf_symlink) |
| 378 | if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) | 375 | rc = tcon->ses->server->ops->query_mf_symlink(path, buf, |
| 379 | rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, | 376 | &bytes_read, cifs_sb, xid); |
| 380 | &bytes_read, cifs_sb, xid); | ||
| 381 | else | 377 | else |
| 382 | goto out; | 378 | rc = -ENOSYS; |
| 383 | 379 | ||
| 384 | if (rc != 0) | 380 | if (rc) |
| 385 | goto out; | 381 | goto out; |
| 386 | 382 | ||
| 387 | if (bytes_read == 0) /* not a symlink */ | 383 | if (bytes_read == 0) /* not a symlink */ |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8b5e2584c840..af903128891c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -1907,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
| 1907 | } | 1907 | } |
| 1908 | } | 1908 | } |
| 1909 | } | 1909 | } |
| 1910 | if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) { | ||
| 1911 | tep = tf.file->private_data; | ||
| 1912 | mutex_lock_nested(&tep->mtx, 1); | ||
| 1913 | } | ||
| 1914 | 1910 | ||
| 1915 | /* | 1911 | /* |
| 1916 | * Try to lookup the file inside our RB tree, Since we grabbed "mtx" | 1912 | * Try to lookup the file inside our RB tree, Since we grabbed "mtx" |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 288534920fe5..20d6697bd638 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, | |||
| 1493 | sb->s_blocksize - offset : towrite; | 1493 | sb->s_blocksize - offset : towrite; |
| 1494 | 1494 | ||
| 1495 | tmp_bh.b_state = 0; | 1495 | tmp_bh.b_state = 0; |
| 1496 | tmp_bh.b_size = sb->s_blocksize; | ||
| 1496 | err = ext2_get_block(inode, blk, &tmp_bh, 1); | 1497 | err = ext2_get_block(inode, blk, &tmp_bh, 1); |
| 1497 | if (err < 0) | 1498 | if (err < 0) |
| 1498 | goto out; | 1499 | goto out; |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e6185031c1cc..ece55565b9cd 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -268,6 +268,16 @@ struct ext4_io_submit { | |||
| 268 | /* Translate # of blks to # of clusters */ | 268 | /* Translate # of blks to # of clusters */ |
| 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ | 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ |
| 270 | (sbi)->s_cluster_bits) | 270 | (sbi)->s_cluster_bits) |
| 271 | /* Mask out the low bits to get the starting block of the cluster */ | ||
| 272 | #define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ | ||
| 273 | ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
| 274 | #define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ | ||
| 275 | ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
| 276 | /* Get the cluster offset */ | ||
| 277 | #define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ | ||
| 278 | ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
| 279 | #define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ | ||
| 280 | ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
| 271 | 281 | ||
| 272 | /* | 282 | /* |
| 273 | * Structure of a blocks group descriptor | 283 | * Structure of a blocks group descriptor |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 17ac112ab101..3fe29de832c8 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
| @@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, | |||
| 259 | if (WARN_ON_ONCE(err)) { | 259 | if (WARN_ON_ONCE(err)) { |
| 260 | ext4_journal_abort_handle(where, line, __func__, bh, | 260 | ext4_journal_abort_handle(where, line, __func__, bh, |
| 261 | handle, err); | 261 | handle, err); |
| 262 | ext4_error_inode(inode, where, line, | ||
| 263 | bh->b_blocknr, | ||
| 264 | "journal_dirty_metadata failed: " | ||
| 265 | "handle type %u started at line %u, " | ||
| 266 | "credits %u/%u, errcode %d", | ||
| 267 | handle->h_type, | ||
| 268 | handle->h_line_no, | ||
| 269 | handle->h_requested_credits, | ||
| 270 | handle->h_buffer_credits, err); | ||
| 262 | } | 271 | } |
| 263 | } else { | 272 | } else { |
| 264 | if (inode) | 273 | if (inode) |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 35f65cf4f318..3384dc4bed40 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) | |||
| 360 | { | 360 | { |
| 361 | ext4_fsblk_t block = ext4_ext_pblock(ext); | 361 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
| 362 | int len = ext4_ext_get_actual_len(ext); | 362 | int len = ext4_ext_get_actual_len(ext); |
| 363 | ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); | ||
| 364 | ext4_lblk_t last = lblock + len - 1; | ||
| 363 | 365 | ||
| 364 | if (len == 0) | 366 | if (lblock > last) |
| 365 | return 0; | 367 | return 0; |
| 366 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); | 368 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
| 367 | } | 369 | } |
| @@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode, | |||
| 387 | if (depth == 0) { | 389 | if (depth == 0) { |
| 388 | /* leaf entries */ | 390 | /* leaf entries */ |
| 389 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); | 391 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
| 392 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; | ||
| 393 | ext4_fsblk_t pblock = 0; | ||
| 394 | ext4_lblk_t lblock = 0; | ||
| 395 | ext4_lblk_t prev = 0; | ||
| 396 | int len = 0; | ||
| 390 | while (entries) { | 397 | while (entries) { |
| 391 | if (!ext4_valid_extent(inode, ext)) | 398 | if (!ext4_valid_extent(inode, ext)) |
| 392 | return 0; | 399 | return 0; |
| 400 | |||
| 401 | /* Check for overlapping extents */ | ||
| 402 | lblock = le32_to_cpu(ext->ee_block); | ||
| 403 | len = ext4_ext_get_actual_len(ext); | ||
| 404 | if ((lblock <= prev) && prev) { | ||
| 405 | pblock = ext4_ext_pblock(ext); | ||
| 406 | es->s_last_error_block = cpu_to_le64(pblock); | ||
| 407 | return 0; | ||
| 408 | } | ||
| 393 | ext++; | 409 | ext++; |
| 394 | entries--; | 410 | entries--; |
| 411 | prev = lblock + len - 1; | ||
| 395 | } | 412 | } |
| 396 | } else { | 413 | } else { |
| 397 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); | 414 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
| @@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
| 1834 | depth = ext_depth(inode); | 1851 | depth = ext_depth(inode); |
| 1835 | if (!path[depth].p_ext) | 1852 | if (!path[depth].p_ext) |
| 1836 | goto out; | 1853 | goto out; |
| 1837 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | 1854 | b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
| 1838 | b2 &= ~(sbi->s_cluster_ratio - 1); | ||
| 1839 | 1855 | ||
| 1840 | /* | 1856 | /* |
| 1841 | * get the next allocated block if the extent in the path | 1857 | * get the next allocated block if the extent in the path |
| @@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
| 1845 | b2 = ext4_ext_next_allocated_block(path); | 1861 | b2 = ext4_ext_next_allocated_block(path); |
| 1846 | if (b2 == EXT_MAX_BLOCKS) | 1862 | if (b2 == EXT_MAX_BLOCKS) |
| 1847 | goto out; | 1863 | goto out; |
| 1848 | b2 &= ~(sbi->s_cluster_ratio - 1); | 1864 | b2 = EXT4_LBLK_CMASK(sbi, b2); |
| 1849 | } | 1865 | } |
| 1850 | 1866 | ||
| 1851 | /* check for wrap through zero on extent logical start block*/ | 1867 | /* check for wrap through zero on extent logical start block*/ |
| @@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |||
| 2504 | * extent, we have to mark the cluster as used (store negative | 2520 | * extent, we have to mark the cluster as used (store negative |
| 2505 | * cluster number in partial_cluster). | 2521 | * cluster number in partial_cluster). |
| 2506 | */ | 2522 | */ |
| 2507 | unaligned = pblk & (sbi->s_cluster_ratio - 1); | 2523 | unaligned = EXT4_PBLK_COFF(sbi, pblk); |
| 2508 | if (unaligned && (ee_len == num) && | 2524 | if (unaligned && (ee_len == num) && |
| 2509 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) | 2525 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) |
| 2510 | *partial_cluster = EXT4_B2C(sbi, pblk); | 2526 | *partial_cluster = EXT4_B2C(sbi, pblk); |
| @@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
| 2598 | * accidentally freeing it later on | 2614 | * accidentally freeing it later on |
| 2599 | */ | 2615 | */ |
| 2600 | pblk = ext4_ext_pblock(ex); | 2616 | pblk = ext4_ext_pblock(ex); |
| 2601 | if (pblk & (sbi->s_cluster_ratio - 1)) | 2617 | if (EXT4_PBLK_COFF(sbi, pblk)) |
| 2602 | *partial_cluster = | 2618 | *partial_cluster = |
| 2603 | -((long long)EXT4_B2C(sbi, pblk)); | 2619 | -((long long)EXT4_B2C(sbi, pblk)); |
| 2604 | ex--; | 2620 | ex--; |
| @@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) | |||
| 3753 | { | 3769 | { |
| 3754 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 3770 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 3755 | ext4_lblk_t lblk_start, lblk_end; | 3771 | ext4_lblk_t lblk_start, lblk_end; |
| 3756 | lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); | 3772 | lblk_start = EXT4_LBLK_CMASK(sbi, lblk); |
| 3757 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; | 3773 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; |
| 3758 | 3774 | ||
| 3759 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); | 3775 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); |
| @@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
| 3812 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); | 3828 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); |
| 3813 | 3829 | ||
| 3814 | /* Check towards left side */ | 3830 | /* Check towards left side */ |
| 3815 | c_offset = lblk_start & (sbi->s_cluster_ratio - 1); | 3831 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start); |
| 3816 | if (c_offset) { | 3832 | if (c_offset) { |
| 3817 | lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); | 3833 | lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); |
| 3818 | lblk_to = lblk_from + c_offset - 1; | 3834 | lblk_to = lblk_from + c_offset - 1; |
| 3819 | 3835 | ||
| 3820 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) | 3836 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) |
| @@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
| 3822 | } | 3838 | } |
| 3823 | 3839 | ||
| 3824 | /* Now check towards right. */ | 3840 | /* Now check towards right. */ |
| 3825 | c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); | 3841 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); |
| 3826 | if (allocated_clusters && c_offset) { | 3842 | if (allocated_clusters && c_offset) { |
| 3827 | lblk_from = lblk_start + num_blks; | 3843 | lblk_from = lblk_start + num_blks; |
| 3828 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; | 3844 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; |
| @@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
| 4030 | struct ext4_ext_path *path) | 4046 | struct ext4_ext_path *path) |
| 4031 | { | 4047 | { |
| 4032 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4048 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
| 4033 | ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4049 | ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4034 | ext4_lblk_t ex_cluster_start, ex_cluster_end; | 4050 | ext4_lblk_t ex_cluster_start, ex_cluster_end; |
| 4035 | ext4_lblk_t rr_cluster_start; | 4051 | ext4_lblk_t rr_cluster_start; |
| 4036 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); | 4052 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
| @@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
| 4048 | (rr_cluster_start == ex_cluster_start)) { | 4064 | (rr_cluster_start == ex_cluster_start)) { |
| 4049 | if (rr_cluster_start == ex_cluster_end) | 4065 | if (rr_cluster_start == ex_cluster_end) |
| 4050 | ee_start += ee_len - 1; | 4066 | ee_start += ee_len - 1; |
| 4051 | map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + | 4067 | map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; |
| 4052 | c_offset; | ||
| 4053 | map->m_len = min(map->m_len, | 4068 | map->m_len = min(map->m_len, |
| 4054 | (unsigned) sbi->s_cluster_ratio - c_offset); | 4069 | (unsigned) sbi->s_cluster_ratio - c_offset); |
| 4055 | /* | 4070 | /* |
| @@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 4203 | */ | 4218 | */ |
| 4204 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; | 4219 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; |
| 4205 | newex.ee_block = cpu_to_le32(map->m_lblk); | 4220 | newex.ee_block = cpu_to_le32(map->m_lblk); |
| 4206 | cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4221 | cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4207 | 4222 | ||
| 4208 | /* | 4223 | /* |
| 4209 | * If we are doing bigalloc, check to see if the extent returned | 4224 | * If we are doing bigalloc, check to see if the extent returned |
| @@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 4271 | * needed so that future calls to get_implied_cluster_alloc() | 4286 | * needed so that future calls to get_implied_cluster_alloc() |
| 4272 | * work correctly. | 4287 | * work correctly. |
| 4273 | */ | 4288 | */ |
| 4274 | offset = map->m_lblk & (sbi->s_cluster_ratio - 1); | 4289 | offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4275 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); | 4290 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); |
| 4276 | ar.goal -= offset; | 4291 | ar.goal -= offset; |
| 4277 | ar.logical -= offset; | 4292 | ar.logical -= offset; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 075763474118..61d49ff22c81 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file, | |||
| 1206 | */ | 1206 | */ |
| 1207 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | 1207 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) |
| 1208 | { | 1208 | { |
| 1209 | int retries = 0; | ||
| 1210 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1209 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1211 | struct ext4_inode_info *ei = EXT4_I(inode); | 1210 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 1212 | unsigned int md_needed; | 1211 | unsigned int md_needed; |
| @@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | |||
| 1218 | * in order to allocate nrblocks | 1217 | * in order to allocate nrblocks |
| 1219 | * worse case is one extent per block | 1218 | * worse case is one extent per block |
| 1220 | */ | 1219 | */ |
| 1221 | repeat: | ||
| 1222 | spin_lock(&ei->i_block_reservation_lock); | 1220 | spin_lock(&ei->i_block_reservation_lock); |
| 1223 | /* | 1221 | /* |
| 1224 | * ext4_calc_metadata_amount() has side effects, which we have | 1222 | * ext4_calc_metadata_amount() has side effects, which we have |
| @@ -1238,10 +1236,6 @@ repeat: | |||
| 1238 | ei->i_da_metadata_calc_len = save_len; | 1236 | ei->i_da_metadata_calc_len = save_len; |
| 1239 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1237 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
| 1240 | spin_unlock(&ei->i_block_reservation_lock); | 1238 | spin_unlock(&ei->i_block_reservation_lock); |
| 1241 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
| 1242 | cond_resched(); | ||
| 1243 | goto repeat; | ||
| 1244 | } | ||
| 1245 | return -ENOSPC; | 1239 | return -ENOSPC; |
| 1246 | } | 1240 | } |
| 1247 | ei->i_reserved_meta_blocks += md_needed; | 1241 | ei->i_reserved_meta_blocks += md_needed; |
| @@ -1255,7 +1249,6 @@ repeat: | |||
| 1255 | */ | 1249 | */ |
| 1256 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | 1250 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) |
| 1257 | { | 1251 | { |
| 1258 | int retries = 0; | ||
| 1259 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1252 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1260 | struct ext4_inode_info *ei = EXT4_I(inode); | 1253 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 1261 | unsigned int md_needed; | 1254 | unsigned int md_needed; |
| @@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | |||
| 1277 | * in order to allocate nrblocks | 1270 | * in order to allocate nrblocks |
| 1278 | * worse case is one extent per block | 1271 | * worse case is one extent per block |
| 1279 | */ | 1272 | */ |
| 1280 | repeat: | ||
| 1281 | spin_lock(&ei->i_block_reservation_lock); | 1273 | spin_lock(&ei->i_block_reservation_lock); |
| 1282 | /* | 1274 | /* |
| 1283 | * ext4_calc_metadata_amount() has side effects, which we have | 1275 | * ext4_calc_metadata_amount() has side effects, which we have |
| @@ -1297,10 +1289,6 @@ repeat: | |||
| 1297 | ei->i_da_metadata_calc_len = save_len; | 1289 | ei->i_da_metadata_calc_len = save_len; |
| 1298 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1290 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
| 1299 | spin_unlock(&ei->i_block_reservation_lock); | 1291 | spin_unlock(&ei->i_block_reservation_lock); |
| 1300 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
| 1301 | cond_resched(); | ||
| 1302 | goto repeat; | ||
| 1303 | } | ||
| 1304 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); | 1292 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
| 1305 | return -ENOSPC; | 1293 | return -ENOSPC; |
| 1306 | } | 1294 | } |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4d113efa024c..04a5c7504be9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head) | |||
| 3442 | { | 3442 | { |
| 3443 | struct ext4_prealloc_space *pa; | 3443 | struct ext4_prealloc_space *pa; |
| 3444 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); | 3444 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); |
| 3445 | |||
| 3446 | BUG_ON(atomic_read(&pa->pa_count)); | ||
| 3447 | BUG_ON(pa->pa_deleted == 0); | ||
| 3445 | kmem_cache_free(ext4_pspace_cachep, pa); | 3448 | kmem_cache_free(ext4_pspace_cachep, pa); |
| 3446 | } | 3449 | } |
| 3447 | 3450 | ||
| @@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |||
| 3455 | ext4_group_t grp; | 3458 | ext4_group_t grp; |
| 3456 | ext4_fsblk_t grp_blk; | 3459 | ext4_fsblk_t grp_blk; |
| 3457 | 3460 | ||
| 3458 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) | ||
| 3459 | return; | ||
| 3460 | |||
| 3461 | /* in this short window concurrent discard can set pa_deleted */ | 3461 | /* in this short window concurrent discard can set pa_deleted */ |
| 3462 | spin_lock(&pa->pa_lock); | 3462 | spin_lock(&pa->pa_lock); |
| 3463 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { | ||
| 3464 | spin_unlock(&pa->pa_lock); | ||
| 3465 | return; | ||
| 3466 | } | ||
| 3467 | |||
| 3463 | if (pa->pa_deleted == 1) { | 3468 | if (pa->pa_deleted == 1) { |
| 3464 | spin_unlock(&pa->pa_lock); | 3469 | spin_unlock(&pa->pa_lock); |
| 3465 | return; | 3470 | return; |
| @@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, | |||
| 4121 | ext4_get_group_no_and_offset(sb, goal, &group, &block); | 4126 | ext4_get_group_no_and_offset(sb, goal, &group, &block); |
| 4122 | 4127 | ||
| 4123 | /* set up allocation goals */ | 4128 | /* set up allocation goals */ |
| 4124 | ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); | 4129 | ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); |
| 4125 | ac->ac_status = AC_STATUS_CONTINUE; | 4130 | ac->ac_status = AC_STATUS_CONTINUE; |
| 4126 | ac->ac_sb = sb; | 4131 | ac->ac_sb = sb; |
| 4127 | ac->ac_inode = ar->inode; | 4132 | ac->ac_inode = ar->inode; |
| @@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4663 | * blocks at the beginning or the end unless we are explicitly | 4668 | * blocks at the beginning or the end unless we are explicitly |
| 4664 | * requested to avoid doing so. | 4669 | * requested to avoid doing so. |
| 4665 | */ | 4670 | */ |
| 4666 | overflow = block & (sbi->s_cluster_ratio - 1); | 4671 | overflow = EXT4_PBLK_COFF(sbi, block); |
| 4667 | if (overflow) { | 4672 | if (overflow) { |
| 4668 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { | 4673 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { |
| 4669 | overflow = sbi->s_cluster_ratio - overflow; | 4674 | overflow = sbi->s_cluster_ratio - overflow; |
| @@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4677 | count += overflow; | 4682 | count += overflow; |
| 4678 | } | 4683 | } |
| 4679 | } | 4684 | } |
| 4680 | overflow = count & (sbi->s_cluster_ratio - 1); | 4685 | overflow = EXT4_LBLK_COFF(sbi, count); |
| 4681 | if (overflow) { | 4686 | if (overflow) { |
| 4682 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { | 4687 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { |
| 4683 | if (count > overflow) | 4688 | if (count > overflow) |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c977f4e4e63b..1f7784de05b6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb) | |||
| 792 | } | 792 | } |
| 793 | 793 | ||
| 794 | ext4_es_unregister_shrinker(sbi); | 794 | ext4_es_unregister_shrinker(sbi); |
| 795 | del_timer(&sbi->s_err_report); | 795 | del_timer_sync(&sbi->s_err_report); |
| 796 | ext4_release_system_zone(sb); | 796 | ext4_release_system_zone(sb); |
| 797 | ext4_mb_release(sb); | 797 | ext4_mb_release(sb); |
| 798 | ext4_ext_release(sb); | 798 | ext4_ext_release(sb); |
| @@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb) | |||
| 3316 | } | 3316 | } |
| 3317 | 3317 | ||
| 3318 | 3318 | ||
| 3319 | static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) | 3319 | static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb) |
| 3320 | { | 3320 | { |
| 3321 | ext4_fsblk_t resv_clusters; | 3321 | ext4_fsblk_t resv_clusters; |
| 3322 | 3322 | ||
| 3323 | /* | 3323 | /* |
| 3324 | * There's no need to reserve anything when we aren't using extents. | ||
| 3325 | * The space estimates are exact, there are no unwritten extents, | ||
| 3326 | * hole punching doesn't need new metadata... This is needed especially | ||
| 3327 | * to keep ext2/3 backward compatibility. | ||
| 3328 | */ | ||
| 3329 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) | ||
| 3330 | return 0; | ||
| 3331 | /* | ||
| 3324 | * By default we reserve 2% or 4096 clusters, whichever is smaller. | 3332 | * By default we reserve 2% or 4096 clusters, whichever is smaller. |
| 3325 | * This should cover the situations where we can not afford to run | 3333 | * This should cover the situations where we can not afford to run |
| 3326 | * out of space like for example punch hole, or converting | 3334 | * out of space like for example punch hole, or converting |
| @@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) | |||
| 3328 | * allocation would require 1, or 2 blocks, higher numbers are | 3336 | * allocation would require 1, or 2 blocks, higher numbers are |
| 3329 | * very rare. | 3337 | * very rare. |
| 3330 | */ | 3338 | */ |
| 3331 | resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; | 3339 | resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >> |
| 3340 | EXT4_SB(sb)->s_cluster_bits; | ||
| 3332 | 3341 | ||
| 3333 | do_div(resv_clusters, 50); | 3342 | do_div(resv_clusters, 50); |
| 3334 | resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); | 3343 | resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); |
| @@ -4071,10 +4080,10 @@ no_journal: | |||
| 4071 | "available"); | 4080 | "available"); |
| 4072 | } | 4081 | } |
| 4073 | 4082 | ||
| 4074 | err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); | 4083 | err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb)); |
| 4075 | if (err) { | 4084 | if (err) { |
| 4076 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " | 4085 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " |
| 4077 | "reserved pool", ext4_calculate_resv_clusters(sbi)); | 4086 | "reserved pool", ext4_calculate_resv_clusters(sb)); |
| 4078 | goto failed_mount4a; | 4087 | goto failed_mount4a; |
| 4079 | } | 4088 | } |
| 4080 | 4089 | ||
| @@ -4184,7 +4193,7 @@ failed_mount_wq: | |||
| 4184 | } | 4193 | } |
| 4185 | failed_mount3: | 4194 | failed_mount3: |
| 4186 | ext4_es_unregister_shrinker(sbi); | 4195 | ext4_es_unregister_shrinker(sbi); |
| 4187 | del_timer(&sbi->s_err_report); | 4196 | del_timer_sync(&sbi->s_err_report); |
| 4188 | if (sbi->s_flex_groups) | 4197 | if (sbi->s_flex_groups) |
| 4189 | ext4_kvfree(sbi->s_flex_groups); | 4198 | ext4_kvfree(sbi->s_flex_groups); |
| 4190 | percpu_counter_destroy(&sbi->s_freeclusters_counter); | 4199 | percpu_counter_destroy(&sbi->s_freeclusters_counter); |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index b7fc035a6943..73f3e4ee4037 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
| @@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
| 986 | { | 986 | { |
| 987 | struct file *file = iocb->ki_filp; | 987 | struct file *file = iocb->ki_filp; |
| 988 | struct inode *inode = file->f_mapping->host; | 988 | struct inode *inode = file->f_mapping->host; |
| 989 | struct address_space *mapping = inode->i_mapping; | ||
| 989 | struct gfs2_inode *ip = GFS2_I(inode); | 990 | struct gfs2_inode *ip = GFS2_I(inode); |
| 990 | struct gfs2_holder gh; | 991 | struct gfs2_holder gh; |
| 991 | int rv; | 992 | int rv; |
| @@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
| 1006 | if (rv != 1) | 1007 | if (rv != 1) |
| 1007 | goto out; /* dio not valid, fall back to buffered i/o */ | 1008 | goto out; /* dio not valid, fall back to buffered i/o */ |
| 1008 | 1009 | ||
| 1010 | /* | ||
| 1011 | * Now since we are holding a deferred (CW) lock at this point, you | ||
| 1012 | * might be wondering why this is ever needed. There is a case however | ||
| 1013 | * where we've granted a deferred local lock against a cached exclusive | ||
| 1014 | * glock. That is ok provided all granted local locks are deferred, but | ||
| 1015 | * it also means that it is possible to encounter pages which are | ||
| 1016 | * cached and possibly also mapped. So here we check for that and sort | ||
| 1017 | * them out ahead of the dio. The glock state machine will take care of | ||
| 1018 | * everything else. | ||
| 1019 | * | ||
| 1020 | * If in fact the cached glock state (gl->gl_state) is deferred (CW) in | ||
| 1021 | * the first place, mapping->nr_pages will always be zero. | ||
| 1022 | */ | ||
| 1023 | if (mapping->nrpages) { | ||
| 1024 | loff_t lstart = offset & (PAGE_CACHE_SIZE - 1); | ||
| 1025 | loff_t len = iov_length(iov, nr_segs); | ||
| 1026 | loff_t end = PAGE_ALIGN(offset + len) - 1; | ||
| 1027 | |||
| 1028 | rv = 0; | ||
| 1029 | if (len == 0) | ||
| 1030 | goto out; | ||
| 1031 | if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) | ||
| 1032 | unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); | ||
| 1033 | rv = filemap_write_and_wait_range(mapping, lstart, end); | ||
| 1034 | if (rv) | ||
| 1035 | return rv; | ||
| 1036 | truncate_inode_pages_range(mapping, lstart, end); | ||
| 1037 | } | ||
| 1038 | |||
| 1009 | rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1039 | rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
| 1010 | offset, nr_segs, gfs2_get_block_direct, | 1040 | offset, nr_segs, gfs2_get_block_direct, |
| 1011 | NULL, NULL, 0); | 1041 | NULL, NULL, 0); |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index c8420f7e4db6..6f7a47c05259 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -1655,6 +1655,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) | |||
| 1655 | struct task_struct *gh_owner = NULL; | 1655 | struct task_struct *gh_owner = NULL; |
| 1656 | char flags_buf[32]; | 1656 | char flags_buf[32]; |
| 1657 | 1657 | ||
| 1658 | rcu_read_lock(); | ||
| 1658 | if (gh->gh_owner_pid) | 1659 | if (gh->gh_owner_pid) |
| 1659 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); | 1660 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); |
| 1660 | gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", | 1661 | gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", |
| @@ -1664,6 +1665,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) | |||
| 1664 | gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, | 1665 | gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, |
| 1665 | gh_owner ? gh_owner->comm : "(ended)", | 1666 | gh_owner ? gh_owner->comm : "(ended)", |
| 1666 | (void *)gh->gh_ip); | 1667 | (void *)gh->gh_ip); |
| 1668 | rcu_read_unlock(); | ||
| 1667 | return 0; | 1669 | return 0; |
| 1668 | } | 1670 | } |
| 1669 | 1671 | ||
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index db908f697139..f88dcd925010 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
| @@ -192,8 +192,11 @@ static void inode_go_sync(struct gfs2_glock *gl) | |||
| 192 | 192 | ||
| 193 | if (ip && !S_ISREG(ip->i_inode.i_mode)) | 193 | if (ip && !S_ISREG(ip->i_inode.i_mode)) |
| 194 | ip = NULL; | 194 | ip = NULL; |
| 195 | if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) | 195 | if (ip) { |
| 196 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); | 196 | if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) |
| 197 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); | ||
| 198 | inode_dio_wait(&ip->i_inode); | ||
| 199 | } | ||
| 197 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | 200 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) |
| 198 | return; | 201 | return; |
| 199 | 202 | ||
| @@ -410,6 +413,9 @@ static int inode_go_lock(struct gfs2_holder *gh) | |||
| 410 | return error; | 413 | return error; |
| 411 | } | 414 | } |
| 412 | 415 | ||
| 416 | if (gh->gh_state != LM_ST_DEFERRED) | ||
| 417 | inode_dio_wait(&ip->i_inode); | ||
| 418 | |||
| 413 | if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && | 419 | if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && |
| 414 | (gl->gl_state == LM_ST_EXCLUSIVE) && | 420 | (gl->gl_state == LM_ST_EXCLUSIVE) && |
| 415 | (gh->gh_state == LM_ST_EXCLUSIVE)) { | 421 | (gh->gh_state == LM_ST_EXCLUSIVE)) { |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 610613fb65b5..9dcb9777a5f8 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
| @@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) | |||
| 551 | struct buffer_head *bh = bd->bd_bh; | 551 | struct buffer_head *bh = bd->bd_bh; |
| 552 | struct gfs2_glock *gl = bd->bd_gl; | 552 | struct gfs2_glock *gl = bd->bd_gl; |
| 553 | 553 | ||
| 554 | gfs2_remove_from_ail(bd); | ||
| 555 | bd->bd_bh = NULL; | ||
| 556 | bh->b_private = NULL; | 554 | bh->b_private = NULL; |
| 557 | bd->bd_blkno = bh->b_blocknr; | 555 | bd->bd_blkno = bh->b_blocknr; |
| 556 | gfs2_remove_from_ail(bd); /* drops ref on bh */ | ||
| 557 | bd->bd_bh = NULL; | ||
| 558 | bd->bd_ops = &gfs2_revoke_lops; | 558 | bd->bd_ops = &gfs2_revoke_lops; |
| 559 | sdp->sd_log_num_revoke++; | 559 | sdp->sd_log_num_revoke++; |
| 560 | atomic_inc(&gl->gl_revokes); | 560 | atomic_inc(&gl->gl_revokes); |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 932415050540..52f177be3bf8 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
| @@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int | |||
| 258 | struct address_space *mapping = bh->b_page->mapping; | 258 | struct address_space *mapping = bh->b_page->mapping; |
| 259 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); | 259 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
| 260 | struct gfs2_bufdata *bd = bh->b_private; | 260 | struct gfs2_bufdata *bd = bh->b_private; |
| 261 | int was_pinned = 0; | ||
| 261 | 262 | ||
| 262 | if (test_clear_buffer_pinned(bh)) { | 263 | if (test_clear_buffer_pinned(bh)) { |
| 263 | trace_gfs2_pin(bd, 0); | 264 | trace_gfs2_pin(bd, 0); |
| @@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int | |||
| 273 | tr->tr_num_databuf_rm++; | 274 | tr->tr_num_databuf_rm++; |
| 274 | } | 275 | } |
| 275 | tr->tr_touched = 1; | 276 | tr->tr_touched = 1; |
| 277 | was_pinned = 1; | ||
| 276 | brelse(bh); | 278 | brelse(bh); |
| 277 | } | 279 | } |
| 278 | if (bd) { | 280 | if (bd) { |
| 279 | spin_lock(&sdp->sd_ail_lock); | 281 | spin_lock(&sdp->sd_ail_lock); |
| 280 | if (bd->bd_tr) { | 282 | if (bd->bd_tr) { |
| 281 | gfs2_trans_add_revoke(sdp, bd); | 283 | gfs2_trans_add_revoke(sdp, bd); |
| 284 | } else if (was_pinned) { | ||
| 285 | bh->b_private = NULL; | ||
| 286 | kmem_cache_free(gfs2_bufdata_cachep, bd); | ||
| 282 | } | 287 | } |
| 283 | spin_unlock(&sdp->sd_ail_lock); | 288 | spin_unlock(&sdp->sd_ail_lock); |
| 284 | } | 289 | } |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 82303b474958..52fa88314f5c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
| @@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, | |||
| 1366 | if (IS_ERR(s)) | 1366 | if (IS_ERR(s)) |
| 1367 | goto error_bdev; | 1367 | goto error_bdev; |
| 1368 | 1368 | ||
| 1369 | if (s->s_root) | 1369 | if (s->s_root) { |
| 1370 | /* | ||
| 1371 | * s_umount nests inside bd_mutex during | ||
| 1372 | * __invalidate_device(). blkdev_put() acquires | ||
| 1373 | * bd_mutex and can't be called under s_umount. Drop | ||
| 1374 | * s_umount temporarily. This is safe as we're | ||
| 1375 | * holding an active reference. | ||
| 1376 | */ | ||
| 1377 | up_write(&s->s_umount); | ||
| 1370 | blkdev_put(bdev, mode); | 1378 | blkdev_put(bdev, mode); |
| 1379 | down_write(&s->s_umount); | ||
| 1380 | } | ||
| 1371 | 1381 | ||
| 1372 | memset(&args, 0, sizeof(args)); | 1382 | memset(&args, 0, sizeof(args)); |
| 1373 | args.ar_quota = GFS2_QUOTA_DEFAULT; | 1383 | args.ar_quota = GFS2_QUOTA_DEFAULT; |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 52032647dd4a..5fa344afb49a 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) | |||
| 702 | read_lock(&journal->j_state_lock); | 702 | read_lock(&journal->j_state_lock); |
| 703 | #ifdef CONFIG_JBD2_DEBUG | 703 | #ifdef CONFIG_JBD2_DEBUG |
| 704 | if (!tid_geq(journal->j_commit_request, tid)) { | 704 | if (!tid_geq(journal->j_commit_request, tid)) { |
| 705 | printk(KERN_EMERG | 705 | printk(KERN_ERR |
| 706 | "%s: error: j_commit_request=%d, tid=%d\n", | 706 | "%s: error: j_commit_request=%d, tid=%d\n", |
| 707 | __func__, journal->j_commit_request, tid); | 707 | __func__, journal->j_commit_request, tid); |
| 708 | } | 708 | } |
| @@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) | |||
| 718 | } | 718 | } |
| 719 | read_unlock(&journal->j_state_lock); | 719 | read_unlock(&journal->j_state_lock); |
| 720 | 720 | ||
| 721 | if (unlikely(is_journal_aborted(journal))) { | 721 | if (unlikely(is_journal_aborted(journal))) |
| 722 | printk(KERN_EMERG "journal commit I/O error\n"); | ||
| 723 | err = -EIO; | 722 | err = -EIO; |
| 724 | } | ||
| 725 | return err; | 723 | return err; |
| 726 | } | 724 | } |
| 727 | 725 | ||
| @@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal) | |||
| 1527 | if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && | 1525 | if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && |
| 1528 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { | 1526 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
| 1529 | /* Can't have checksum v1 and v2 on at the same time! */ | 1527 | /* Can't have checksum v1 and v2 on at the same time! */ |
| 1530 | printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " | 1528 | printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " |
| 1531 | "at the same time!\n"); | 1529 | "at the same time!\n"); |
| 1532 | goto out; | 1530 | goto out; |
| 1533 | } | 1531 | } |
| 1534 | 1532 | ||
| 1535 | if (!jbd2_verify_csum_type(journal, sb)) { | 1533 | if (!jbd2_verify_csum_type(journal, sb)) { |
| 1536 | printk(KERN_ERR "JBD: Unknown checksum type\n"); | 1534 | printk(KERN_ERR "JBD2: Unknown checksum type\n"); |
| 1537 | goto out; | 1535 | goto out; |
| 1538 | } | 1536 | } |
| 1539 | 1537 | ||
| @@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal) | |||
| 1541 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { | 1539 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
| 1542 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); | 1540 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); |
| 1543 | if (IS_ERR(journal->j_chksum_driver)) { | 1541 | if (IS_ERR(journal->j_chksum_driver)) { |
| 1544 | printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); | 1542 | printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); |
| 1545 | err = PTR_ERR(journal->j_chksum_driver); | 1543 | err = PTR_ERR(journal->j_chksum_driver); |
| 1546 | journal->j_chksum_driver = NULL; | 1544 | journal->j_chksum_driver = NULL; |
| 1547 | goto out; | 1545 | goto out; |
| @@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal) | |||
| 1550 | 1548 | ||
| 1551 | /* Check superblock checksum */ | 1549 | /* Check superblock checksum */ |
| 1552 | if (!jbd2_superblock_csum_verify(journal, sb)) { | 1550 | if (!jbd2_superblock_csum_verify(journal, sb)) { |
| 1553 | printk(KERN_ERR "JBD: journal checksum error\n"); | 1551 | printk(KERN_ERR "JBD2: journal checksum error\n"); |
| 1554 | goto out; | 1552 | goto out; |
| 1555 | } | 1553 | } |
| 1556 | 1554 | ||
| @@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, | |||
| 1836 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", | 1834 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", |
| 1837 | 0, 0); | 1835 | 0, 0); |
| 1838 | if (IS_ERR(journal->j_chksum_driver)) { | 1836 | if (IS_ERR(journal->j_chksum_driver)) { |
| 1839 | printk(KERN_ERR "JBD: Cannot load crc32c " | 1837 | printk(KERN_ERR "JBD2: Cannot load crc32c " |
| 1840 | "driver.\n"); | 1838 | "driver.\n"); |
| 1841 | journal->j_chksum_driver = NULL; | 1839 | journal->j_chksum_driver = NULL; |
| 1842 | return 0; | 1840 | return 0; |
| @@ -2645,7 +2643,7 @@ static void __exit journal_exit(void) | |||
| 2645 | #ifdef CONFIG_JBD2_DEBUG | 2643 | #ifdef CONFIG_JBD2_DEBUG |
| 2646 | int n = atomic_read(&nr_journal_heads); | 2644 | int n = atomic_read(&nr_journal_heads); |
| 2647 | if (n) | 2645 | if (n) |
| 2648 | printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n); | 2646 | printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); |
| 2649 | #endif | 2647 | #endif |
| 2650 | jbd2_remove_jbd_stats_proc_entry(); | 2648 | jbd2_remove_jbd_stats_proc_entry(); |
| 2651 | jbd2_journal_destroy_caches(); | 2649 | jbd2_journal_destroy_caches(); |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 3929c50428b1..3b6bb19d60b1 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
| @@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal, | |||
| 594 | be32_to_cpu(tmp->h_sequence))) { | 594 | be32_to_cpu(tmp->h_sequence))) { |
| 595 | brelse(obh); | 595 | brelse(obh); |
| 596 | success = -EIO; | 596 | success = -EIO; |
| 597 | printk(KERN_ERR "JBD: Invalid " | 597 | printk(KERN_ERR "JBD2: Invalid " |
| 598 | "checksum recovering " | 598 | "checksum recovering " |
| 599 | "block %llu in log\n", | 599 | "block %llu in log\n", |
| 600 | blocknr); | 600 | blocknr); |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7aa9a32573bb..8360674c85bc 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
| @@ -932,7 +932,7 @@ repeat: | |||
| 932 | jbd2_alloc(jh2bh(jh)->b_size, | 932 | jbd2_alloc(jh2bh(jh)->b_size, |
| 933 | GFP_NOFS); | 933 | GFP_NOFS); |
| 934 | if (!frozen_buffer) { | 934 | if (!frozen_buffer) { |
| 935 | printk(KERN_EMERG | 935 | printk(KERN_ERR |
| 936 | "%s: OOM for frozen_buffer\n", | 936 | "%s: OOM for frozen_buffer\n", |
| 937 | __func__); | 937 | __func__); |
| 938 | JBUFFER_TRACE(jh, "oom!"); | 938 | JBUFFER_TRACE(jh, "oom!"); |
| @@ -1166,7 +1166,7 @@ repeat: | |||
| 1166 | if (!jh->b_committed_data) { | 1166 | if (!jh->b_committed_data) { |
| 1167 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); | 1167 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
| 1168 | if (!committed_data) { | 1168 | if (!committed_data) { |
| 1169 | printk(KERN_EMERG "%s: No memory for committed data\n", | 1169 | printk(KERN_ERR "%s: No memory for committed data\n", |
| 1170 | __func__); | 1170 | __func__); |
| 1171 | err = -ENOMEM; | 1171 | err = -ENOMEM; |
| 1172 | goto out; | 1172 | goto out; |
| @@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1290 | * once a transaction -bzzz | 1290 | * once a transaction -bzzz |
| 1291 | */ | 1291 | */ |
| 1292 | jh->b_modified = 1; | 1292 | jh->b_modified = 1; |
| 1293 | J_ASSERT_JH(jh, handle->h_buffer_credits > 0); | 1293 | if (handle->h_buffer_credits <= 0) { |
| 1294 | ret = -ENOSPC; | ||
| 1295 | goto out_unlock_bh; | ||
| 1296 | } | ||
| 1294 | handle->h_buffer_credits--; | 1297 | handle->h_buffer_credits--; |
| 1295 | } | 1298 | } |
| 1296 | 1299 | ||
| @@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1305 | JBUFFER_TRACE(jh, "fastpath"); | 1308 | JBUFFER_TRACE(jh, "fastpath"); |
| 1306 | if (unlikely(jh->b_transaction != | 1309 | if (unlikely(jh->b_transaction != |
| 1307 | journal->j_running_transaction)) { | 1310 | journal->j_running_transaction)) { |
| 1308 | printk(KERN_EMERG "JBD: %s: " | 1311 | printk(KERN_ERR "JBD2: %s: " |
| 1309 | "jh->b_transaction (%llu, %p, %u) != " | 1312 | "jh->b_transaction (%llu, %p, %u) != " |
| 1310 | "journal->j_running_transaction (%p, %u)", | 1313 | "journal->j_running_transaction (%p, %u)", |
| 1311 | journal->j_devname, | 1314 | journal->j_devname, |
| @@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1332 | JBUFFER_TRACE(jh, "already on other transaction"); | 1335 | JBUFFER_TRACE(jh, "already on other transaction"); |
| 1333 | if (unlikely(jh->b_transaction != | 1336 | if (unlikely(jh->b_transaction != |
| 1334 | journal->j_committing_transaction)) { | 1337 | journal->j_committing_transaction)) { |
| 1335 | printk(KERN_EMERG "JBD: %s: " | 1338 | printk(KERN_ERR "JBD2: %s: " |
| 1336 | "jh->b_transaction (%llu, %p, %u) != " | 1339 | "jh->b_transaction (%llu, %p, %u) != " |
| 1337 | "journal->j_committing_transaction (%p, %u)", | 1340 | "journal->j_committing_transaction (%p, %u)", |
| 1338 | journal->j_devname, | 1341 | journal->j_devname, |
| @@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
| 1345 | ret = -EINVAL; | 1348 | ret = -EINVAL; |
| 1346 | } | 1349 | } |
| 1347 | if (unlikely(jh->b_next_transaction != transaction)) { | 1350 | if (unlikely(jh->b_next_transaction != transaction)) { |
| 1348 | printk(KERN_EMERG "JBD: %s: " | 1351 | printk(KERN_ERR "JBD2: %s: " |
| 1349 | "jh->b_next_transaction (%llu, %p, %u) != " | 1352 | "jh->b_next_transaction (%llu, %p, %u) != " |
| 1350 | "transaction (%p, %u)", | 1353 | "transaction (%p, %u)", |
| 1351 | journal->j_devname, | 1354 | journal->j_devname, |
| @@ -1373,7 +1376,6 @@ out_unlock_bh: | |||
| 1373 | jbd2_journal_put_journal_head(jh); | 1376 | jbd2_journal_put_journal_head(jh); |
| 1374 | out: | 1377 | out: |
| 1375 | JBUFFER_TRACE(jh, "exit"); | 1378 | JBUFFER_TRACE(jh, "exit"); |
| 1376 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ | ||
| 1377 | return ret; | 1379 | return ret; |
| 1378 | } | 1380 | } |
| 1379 | 1381 | ||
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b8e93a40a5d3..78c3c2097787 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
| @@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi) | |||
| 443 | pstore_get_records(0); | 443 | pstore_get_records(0); |
| 444 | 444 | ||
| 445 | kmsg_dump_register(&pstore_dumper); | 445 | kmsg_dump_register(&pstore_dumper); |
| 446 | pstore_register_console(); | 446 | |
| 447 | pstore_register_ftrace(); | 447 | if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { |
| 448 | pstore_register_console(); | ||
| 449 | pstore_register_ftrace(); | ||
| 450 | } | ||
| 448 | 451 | ||
| 449 | if (pstore_update_ms >= 0) { | 452 | if (pstore_update_ms >= 0) { |
| 450 | pstore_timer.expires = jiffies + | 453 | pstore_timer.expires = jiffies + |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b94f93685093..35e7d08fe629 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
| @@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 609 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | 609 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; |
| 610 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; | 610 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; |
| 611 | struct sysfs_open_file *of; | 611 | struct sysfs_open_file *of; |
| 612 | bool has_read, has_write, has_mmap; | 612 | bool has_read, has_write; |
| 613 | int error = -EACCES; | 613 | int error = -EACCES; |
| 614 | 614 | ||
| 615 | /* need attr_sd for attr and ops, its parent for kobj */ | 615 | /* need attr_sd for attr and ops, its parent for kobj */ |
| @@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 621 | 621 | ||
| 622 | has_read = battr->read || battr->mmap; | 622 | has_read = battr->read || battr->mmap; |
| 623 | has_write = battr->write || battr->mmap; | 623 | has_write = battr->write || battr->mmap; |
| 624 | has_mmap = battr->mmap; | ||
| 625 | } else { | 624 | } else { |
| 626 | const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); | 625 | const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); |
| 627 | 626 | ||
| @@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 633 | 632 | ||
| 634 | has_read = ops->show; | 633 | has_read = ops->show; |
| 635 | has_write = ops->store; | 634 | has_write = ops->store; |
| 636 | has_mmap = false; | ||
| 637 | } | 635 | } |
| 638 | 636 | ||
| 639 | /* check perms and supported operations */ | 637 | /* check perms and supported operations */ |
| @@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 661 | * open file has a separate mutex, it's okay as long as those don't | 659 | * open file has a separate mutex, it's okay as long as those don't |
| 662 | * happen on the same file. At this point, we can't easily give | 660 | * happen on the same file. At this point, we can't easily give |
| 663 | * each file a separate locking class. Let's differentiate on | 661 | * each file a separate locking class. Let's differentiate on |
| 664 | * whether the file has mmap or not for now. | 662 | * whether the file is bin or not for now. |
| 665 | */ | 663 | */ |
| 666 | if (has_mmap) | 664 | if (sysfs_is_bin(attr_sd)) |
| 667 | mutex_init(&of->mutex); | 665 | mutex_init(&of->mutex); |
| 668 | else | 666 | else |
| 669 | mutex_init(&of->mutex); | 667 | mutex_init(&of->mutex); |
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c index 739e0a52deda..5549d69ddb45 100644 --- a/fs/xfs/xfs_attr_remote.c +++ b/fs/xfs/xfs_attr_remote.c | |||
| @@ -110,7 +110,7 @@ xfs_attr3_rmt_verify( | |||
| 110 | if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) | 110 | if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) |
| 111 | return false; | 111 | return false; |
| 112 | if (be32_to_cpu(rmt->rm_offset) + | 112 | if (be32_to_cpu(rmt->rm_offset) + |
| 113 | be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX) | 113 | be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX) |
| 114 | return false; | 114 | return false; |
| 115 | if (rmt->rm_owner == 0) | 115 | if (rmt->rm_owner == 0) |
| 116 | return false; | 116 | return false; |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3ef11b22e750..3b2c14b6f0fb 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
| @@ -1635,7 +1635,7 @@ xfs_bmap_last_extent( | |||
| 1635 | * blocks at the end of the file which do not start at the previous data block, | 1635 | * blocks at the end of the file which do not start at the previous data block, |
| 1636 | * we will try to align the new blocks at stripe unit boundaries. | 1636 | * we will try to align the new blocks at stripe unit boundaries. |
| 1637 | * | 1637 | * |
| 1638 | * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be | 1638 | * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be |
| 1639 | * at, or past the EOF. | 1639 | * at, or past the EOF. |
| 1640 | */ | 1640 | */ |
| 1641 | STATIC int | 1641 | STATIC int |
| @@ -1650,9 +1650,14 @@ xfs_bmap_isaeof( | |||
| 1650 | bma->aeof = 0; | 1650 | bma->aeof = 0; |
| 1651 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | 1651 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, |
| 1652 | &is_empty); | 1652 | &is_empty); |
| 1653 | if (error || is_empty) | 1653 | if (error) |
| 1654 | return error; | 1654 | return error; |
| 1655 | 1655 | ||
| 1656 | if (is_empty) { | ||
| 1657 | bma->aeof = 1; | ||
| 1658 | return 0; | ||
| 1659 | } | ||
| 1660 | |||
| 1656 | /* | 1661 | /* |
| 1657 | * Check if we are allocation or past the last extent, or at least into | 1662 | * Check if we are allocation or past the last extent, or at least into |
| 1658 | * the last delayed allocated extent. | 1663 | * the last delayed allocated extent. |
| @@ -3643,10 +3648,19 @@ xfs_bmap_btalloc( | |||
| 3643 | int isaligned; | 3648 | int isaligned; |
| 3644 | int tryagain; | 3649 | int tryagain; |
| 3645 | int error; | 3650 | int error; |
| 3651 | int stripe_align; | ||
| 3646 | 3652 | ||
| 3647 | ASSERT(ap->length); | 3653 | ASSERT(ap->length); |
| 3648 | 3654 | ||
| 3649 | mp = ap->ip->i_mount; | 3655 | mp = ap->ip->i_mount; |
| 3656 | |||
| 3657 | /* stripe alignment for allocation is determined by mount parameters */ | ||
| 3658 | stripe_align = 0; | ||
| 3659 | if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) | ||
| 3660 | stripe_align = mp->m_swidth; | ||
| 3661 | else if (mp->m_dalign) | ||
| 3662 | stripe_align = mp->m_dalign; | ||
| 3663 | |||
| 3650 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; | 3664 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; |
| 3651 | if (unlikely(align)) { | 3665 | if (unlikely(align)) { |
| 3652 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, | 3666 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, |
| @@ -3655,6 +3669,8 @@ xfs_bmap_btalloc( | |||
| 3655 | ASSERT(!error); | 3669 | ASSERT(!error); |
| 3656 | ASSERT(ap->length); | 3670 | ASSERT(ap->length); |
| 3657 | } | 3671 | } |
| 3672 | |||
| 3673 | |||
| 3658 | nullfb = *ap->firstblock == NULLFSBLOCK; | 3674 | nullfb = *ap->firstblock == NULLFSBLOCK; |
| 3659 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); | 3675 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); |
| 3660 | if (nullfb) { | 3676 | if (nullfb) { |
| @@ -3730,7 +3746,7 @@ xfs_bmap_btalloc( | |||
| 3730 | */ | 3746 | */ |
| 3731 | if (!ap->flist->xbf_low && ap->aeof) { | 3747 | if (!ap->flist->xbf_low && ap->aeof) { |
| 3732 | if (!ap->offset) { | 3748 | if (!ap->offset) { |
| 3733 | args.alignment = mp->m_dalign; | 3749 | args.alignment = stripe_align; |
| 3734 | atype = args.type; | 3750 | atype = args.type; |
| 3735 | isaligned = 1; | 3751 | isaligned = 1; |
| 3736 | /* | 3752 | /* |
| @@ -3755,13 +3771,13 @@ xfs_bmap_btalloc( | |||
| 3755 | * of minlen+alignment+slop doesn't go up | 3771 | * of minlen+alignment+slop doesn't go up |
| 3756 | * between the calls. | 3772 | * between the calls. |
| 3757 | */ | 3773 | */ |
| 3758 | if (blen > mp->m_dalign && blen <= args.maxlen) | 3774 | if (blen > stripe_align && blen <= args.maxlen) |
| 3759 | nextminlen = blen - mp->m_dalign; | 3775 | nextminlen = blen - stripe_align; |
| 3760 | else | 3776 | else |
| 3761 | nextminlen = args.minlen; | 3777 | nextminlen = args.minlen; |
| 3762 | if (nextminlen + mp->m_dalign > args.minlen + 1) | 3778 | if (nextminlen + stripe_align > args.minlen + 1) |
| 3763 | args.minalignslop = | 3779 | args.minalignslop = |
| 3764 | nextminlen + mp->m_dalign - | 3780 | nextminlen + stripe_align - |
| 3765 | args.minlen - 1; | 3781 | args.minlen - 1; |
| 3766 | else | 3782 | else |
| 3767 | args.minalignslop = 0; | 3783 | args.minalignslop = 0; |
| @@ -3783,7 +3799,7 @@ xfs_bmap_btalloc( | |||
| 3783 | */ | 3799 | */ |
| 3784 | args.type = atype; | 3800 | args.type = atype; |
| 3785 | args.fsbno = ap->blkno; | 3801 | args.fsbno = ap->blkno; |
| 3786 | args.alignment = mp->m_dalign; | 3802 | args.alignment = stripe_align; |
| 3787 | args.minlen = nextminlen; | 3803 | args.minlen = nextminlen; |
| 3788 | args.minalignslop = 0; | 3804 | args.minalignslop = 0; |
| 3789 | isaligned = 1; | 3805 | isaligned = 1; |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5887e41c0323..82e0dab46ee5 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -287,6 +287,7 @@ xfs_bmapi_allocate( | |||
| 287 | INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); | 287 | INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); |
| 288 | queue_work(xfs_alloc_wq, &args->work); | 288 | queue_work(xfs_alloc_wq, &args->work); |
| 289 | wait_for_completion(&done); | 289 | wait_for_completion(&done); |
| 290 | destroy_work_on_stack(&args->work); | ||
| 290 | return args->result; | 291 | return args->result; |
| 291 | } | 292 | } |
| 292 | 293 | ||
| @@ -1187,7 +1188,12 @@ xfs_zero_remaining_bytes( | |||
| 1187 | XFS_BUF_UNWRITE(bp); | 1188 | XFS_BUF_UNWRITE(bp); |
| 1188 | XFS_BUF_READ(bp); | 1189 | XFS_BUF_READ(bp); |
| 1189 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); | 1190 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); |
| 1190 | xfsbdstrat(mp, bp); | 1191 | |
| 1192 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 1193 | error = XFS_ERROR(EIO); | ||
| 1194 | break; | ||
| 1195 | } | ||
| 1196 | xfs_buf_iorequest(bp); | ||
| 1191 | error = xfs_buf_iowait(bp); | 1197 | error = xfs_buf_iowait(bp); |
| 1192 | if (error) { | 1198 | if (error) { |
| 1193 | xfs_buf_ioerror_alert(bp, | 1199 | xfs_buf_ioerror_alert(bp, |
| @@ -1200,7 +1206,12 @@ xfs_zero_remaining_bytes( | |||
| 1200 | XFS_BUF_UNDONE(bp); | 1206 | XFS_BUF_UNDONE(bp); |
| 1201 | XFS_BUF_UNREAD(bp); | 1207 | XFS_BUF_UNREAD(bp); |
| 1202 | XFS_BUF_WRITE(bp); | 1208 | XFS_BUF_WRITE(bp); |
| 1203 | xfsbdstrat(mp, bp); | 1209 | |
| 1210 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 1211 | error = XFS_ERROR(EIO); | ||
| 1212 | break; | ||
| 1213 | } | ||
| 1214 | xfs_buf_iorequest(bp); | ||
| 1204 | error = xfs_buf_iowait(bp); | 1215 | error = xfs_buf_iowait(bp); |
| 1205 | if (error) { | 1216 | if (error) { |
| 1206 | xfs_buf_ioerror_alert(bp, | 1217 | xfs_buf_ioerror_alert(bp, |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77dcb00..afe7645e4b2b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
| @@ -698,7 +698,11 @@ xfs_buf_read_uncached( | |||
| 698 | bp->b_flags |= XBF_READ; | 698 | bp->b_flags |= XBF_READ; |
| 699 | bp->b_ops = ops; | 699 | bp->b_ops = ops; |
| 700 | 700 | ||
| 701 | xfsbdstrat(target->bt_mount, bp); | 701 | if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { |
| 702 | xfs_buf_relse(bp); | ||
| 703 | return NULL; | ||
| 704 | } | ||
| 705 | xfs_buf_iorequest(bp); | ||
| 702 | xfs_buf_iowait(bp); | 706 | xfs_buf_iowait(bp); |
| 703 | return bp; | 707 | return bp; |
| 704 | } | 708 | } |
| @@ -1089,7 +1093,7 @@ xfs_bioerror( | |||
| 1089 | * This is meant for userdata errors; metadata bufs come with | 1093 | * This is meant for userdata errors; metadata bufs come with |
| 1090 | * iodone functions attached, so that we can track down errors. | 1094 | * iodone functions attached, so that we can track down errors. |
| 1091 | */ | 1095 | */ |
| 1092 | STATIC int | 1096 | int |
| 1093 | xfs_bioerror_relse( | 1097 | xfs_bioerror_relse( |
| 1094 | struct xfs_buf *bp) | 1098 | struct xfs_buf *bp) |
| 1095 | { | 1099 | { |
| @@ -1152,7 +1156,7 @@ xfs_bwrite( | |||
| 1152 | ASSERT(xfs_buf_islocked(bp)); | 1156 | ASSERT(xfs_buf_islocked(bp)); |
| 1153 | 1157 | ||
| 1154 | bp->b_flags |= XBF_WRITE; | 1158 | bp->b_flags |= XBF_WRITE; |
| 1155 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); | 1159 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL); |
| 1156 | 1160 | ||
| 1157 | xfs_bdstrat_cb(bp); | 1161 | xfs_bdstrat_cb(bp); |
| 1158 | 1162 | ||
| @@ -1164,25 +1168,6 @@ xfs_bwrite( | |||
| 1164 | return error; | 1168 | return error; |
| 1165 | } | 1169 | } |
| 1166 | 1170 | ||
| 1167 | /* | ||
| 1168 | * Wrapper around bdstrat so that we can stop data from going to disk in case | ||
| 1169 | * we are shutting down the filesystem. Typically user data goes thru this | ||
| 1170 | * path; one of the exceptions is the superblock. | ||
| 1171 | */ | ||
| 1172 | void | ||
| 1173 | xfsbdstrat( | ||
| 1174 | struct xfs_mount *mp, | ||
| 1175 | struct xfs_buf *bp) | ||
| 1176 | { | ||
| 1177 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 1178 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | ||
| 1179 | xfs_bioerror_relse(bp); | ||
| 1180 | return; | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | xfs_buf_iorequest(bp); | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | STATIC void | 1171 | STATIC void |
| 1187 | _xfs_buf_ioend( | 1172 | _xfs_buf_ioend( |
| 1188 | xfs_buf_t *bp, | 1173 | xfs_buf_t *bp, |
| @@ -1516,6 +1501,12 @@ xfs_wait_buftarg( | |||
| 1516 | struct xfs_buf *bp; | 1501 | struct xfs_buf *bp; |
| 1517 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); | 1502 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); |
| 1518 | list_del_init(&bp->b_lru); | 1503 | list_del_init(&bp->b_lru); |
| 1504 | if (bp->b_flags & XBF_WRITE_FAIL) { | ||
| 1505 | xfs_alert(btp->bt_mount, | ||
| 1506 | "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" | ||
| 1507 | "Please run xfs_repair to determine the extent of the problem.", | ||
| 1508 | (long long)bp->b_bn); | ||
| 1509 | } | ||
| 1519 | xfs_buf_rele(bp); | 1510 | xfs_buf_rele(bp); |
| 1520 | } | 1511 | } |
| 1521 | if (loop++ != 0) | 1512 | if (loop++ != 0) |
| @@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit( | |||
| 1799 | 1790 | ||
| 1800 | blk_start_plug(&plug); | 1791 | blk_start_plug(&plug); |
| 1801 | list_for_each_entry_safe(bp, n, io_list, b_list) { | 1792 | list_for_each_entry_safe(bp, n, io_list, b_list) { |
| 1802 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); | 1793 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); |
| 1803 | bp->b_flags |= XBF_WRITE; | 1794 | bp->b_flags |= XBF_WRITE; |
| 1804 | 1795 | ||
| 1805 | if (!wait) { | 1796 | if (!wait) { |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index e65683361017..1cf21a4a9f22 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
| @@ -45,6 +45,7 @@ typedef enum { | |||
| 45 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ | 45 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ |
| 46 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ | 46 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ |
| 47 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ | 47 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ |
| 48 | #define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ | ||
| 48 | 49 | ||
| 49 | /* I/O hints for the BIO layer */ | 50 | /* I/O hints for the BIO layer */ |
| 50 | #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ | 51 | #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ |
| @@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t; | |||
| 70 | { XBF_ASYNC, "ASYNC" }, \ | 71 | { XBF_ASYNC, "ASYNC" }, \ |
| 71 | { XBF_DONE, "DONE" }, \ | 72 | { XBF_DONE, "DONE" }, \ |
| 72 | { XBF_STALE, "STALE" }, \ | 73 | { XBF_STALE, "STALE" }, \ |
| 74 | { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ | ||
| 73 | { XBF_SYNCIO, "SYNCIO" }, \ | 75 | { XBF_SYNCIO, "SYNCIO" }, \ |
| 74 | { XBF_FUA, "FUA" }, \ | 76 | { XBF_FUA, "FUA" }, \ |
| 75 | { XBF_FLUSH, "FLUSH" }, \ | 77 | { XBF_FLUSH, "FLUSH" }, \ |
| @@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t; | |||
| 80 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | 82 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
| 81 | { _XBF_COMPOUND, "COMPOUND" } | 83 | { _XBF_COMPOUND, "COMPOUND" } |
| 82 | 84 | ||
| 85 | |||
| 83 | /* | 86 | /* |
| 84 | * Internal state flags. | 87 | * Internal state flags. |
| 85 | */ | 88 | */ |
| @@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); | |||
| 269 | 272 | ||
| 270 | /* Buffer Read and Write Routines */ | 273 | /* Buffer Read and Write Routines */ |
| 271 | extern int xfs_bwrite(struct xfs_buf *bp); | 274 | extern int xfs_bwrite(struct xfs_buf *bp); |
| 272 | |||
| 273 | extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | ||
| 274 | |||
| 275 | extern void xfs_buf_ioend(xfs_buf_t *, int); | 275 | extern void xfs_buf_ioend(xfs_buf_t *, int); |
| 276 | extern void xfs_buf_ioerror(xfs_buf_t *, int); | 276 | extern void xfs_buf_ioerror(xfs_buf_t *, int); |
| 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); | 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); |
| @@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, | |||
| 282 | #define xfs_buf_zero(bp, off, len) \ | 282 | #define xfs_buf_zero(bp, off, len) \ |
| 283 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) | 283 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) |
| 284 | 284 | ||
| 285 | extern int xfs_bioerror_relse(struct xfs_buf *); | ||
| 286 | |||
| 285 | static inline int xfs_buf_geterror(xfs_buf_t *bp) | 287 | static inline int xfs_buf_geterror(xfs_buf_t *bp) |
| 286 | { | 288 | { |
| 287 | return bp ? bp->b_error : ENOMEM; | 289 | return bp ? bp->b_error : ENOMEM; |
| @@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void); | |||
| 301 | 303 | ||
| 302 | #define XFS_BUF_ZEROFLAGS(bp) \ | 304 | #define XFS_BUF_ZEROFLAGS(bp) \ |
| 303 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ | 305 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ |
| 304 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) | 306 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \ |
| 307 | XBF_WRITE_FAIL)) | ||
| 305 | 308 | ||
| 306 | void xfs_buf_stale(struct xfs_buf *bp); | 309 | void xfs_buf_stale(struct xfs_buf *bp); |
| 307 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) | 310 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a64f67ba25d3..2227b9b050bb 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
| @@ -496,6 +496,14 @@ xfs_buf_item_unpin( | |||
| 496 | } | 496 | } |
| 497 | } | 497 | } |
| 498 | 498 | ||
| 499 | /* | ||
| 500 | * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 | ||
| 501 | * seconds so as to not spam logs too much on repeated detection of the same | ||
| 502 | * buffer being bad.. | ||
| 503 | */ | ||
| 504 | |||
| 505 | DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); | ||
| 506 | |||
| 499 | STATIC uint | 507 | STATIC uint |
| 500 | xfs_buf_item_push( | 508 | xfs_buf_item_push( |
| 501 | struct xfs_log_item *lip, | 509 | struct xfs_log_item *lip, |
| @@ -524,6 +532,14 @@ xfs_buf_item_push( | |||
| 524 | 532 | ||
| 525 | trace_xfs_buf_item_push(bip); | 533 | trace_xfs_buf_item_push(bip); |
| 526 | 534 | ||
| 535 | /* has a previous flush failed due to IO errors? */ | ||
| 536 | if ((bp->b_flags & XBF_WRITE_FAIL) && | ||
| 537 | ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { | ||
| 538 | xfs_warn(bp->b_target->bt_mount, | ||
| 539 | "Detected failing async write on buffer block 0x%llx. Retrying async write.\n", | ||
| 540 | (long long)bp->b_bn); | ||
| 541 | } | ||
| 542 | |||
| 527 | if (!xfs_buf_delwri_queue(bp, buffer_list)) | 543 | if (!xfs_buf_delwri_queue(bp, buffer_list)) |
| 528 | rval = XFS_ITEM_FLUSHING; | 544 | rval = XFS_ITEM_FLUSHING; |
| 529 | xfs_buf_unlock(bp); | 545 | xfs_buf_unlock(bp); |
| @@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks( | |||
| 1096 | 1112 | ||
| 1097 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ | 1113 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ |
| 1098 | 1114 | ||
| 1099 | if (!XFS_BUF_ISSTALE(bp)) { | 1115 | if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { |
| 1100 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; | 1116 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | |
| 1117 | XBF_DONE | XBF_WRITE_FAIL; | ||
| 1101 | xfs_buf_iorequest(bp); | 1118 | xfs_buf_iorequest(bp); |
| 1102 | } else { | 1119 | } else { |
| 1103 | xfs_buf_relse(bp); | 1120 | xfs_buf_relse(bp); |
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 56369d4509d5..48c7d18f68c3 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c | |||
| @@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup( | |||
| 2067 | */ | 2067 | */ |
| 2068 | int /* error */ | 2068 | int /* error */ |
| 2069 | xfs_dir2_node_removename( | 2069 | xfs_dir2_node_removename( |
| 2070 | xfs_da_args_t *args) /* operation arguments */ | 2070 | struct xfs_da_args *args) /* operation arguments */ |
| 2071 | { | 2071 | { |
| 2072 | xfs_da_state_blk_t *blk; /* leaf block */ | 2072 | struct xfs_da_state_blk *blk; /* leaf block */ |
| 2073 | int error; /* error return value */ | 2073 | int error; /* error return value */ |
| 2074 | int rval; /* operation return value */ | 2074 | int rval; /* operation return value */ |
| 2075 | xfs_da_state_t *state; /* btree cursor */ | 2075 | struct xfs_da_state *state; /* btree cursor */ |
| 2076 | 2076 | ||
| 2077 | trace_xfs_dir2_node_removename(args); | 2077 | trace_xfs_dir2_node_removename(args); |
| 2078 | 2078 | ||
| @@ -2084,19 +2084,18 @@ xfs_dir2_node_removename( | |||
| 2084 | state->mp = args->dp->i_mount; | 2084 | state->mp = args->dp->i_mount; |
| 2085 | state->blocksize = state->mp->m_dirblksize; | 2085 | state->blocksize = state->mp->m_dirblksize; |
| 2086 | state->node_ents = state->mp->m_dir_node_ents; | 2086 | state->node_ents = state->mp->m_dir_node_ents; |
| 2087 | /* | 2087 | |
| 2088 | * Look up the entry we're deleting, set up the cursor. | 2088 | /* Look up the entry we're deleting, set up the cursor. */ |
| 2089 | */ | ||
| 2090 | error = xfs_da3_node_lookup_int(state, &rval); | 2089 | error = xfs_da3_node_lookup_int(state, &rval); |
| 2091 | if (error) | 2090 | if (error) |
| 2092 | rval = error; | 2091 | goto out_free; |
| 2093 | /* | 2092 | |
| 2094 | * Didn't find it, upper layer screwed up. | 2093 | /* Didn't find it, upper layer screwed up. */ |
| 2095 | */ | ||
| 2096 | if (rval != EEXIST) { | 2094 | if (rval != EEXIST) { |
| 2097 | xfs_da_state_free(state); | 2095 | error = rval; |
| 2098 | return rval; | 2096 | goto out_free; |
| 2099 | } | 2097 | } |
| 2098 | |||
| 2100 | blk = &state->path.blk[state->path.active - 1]; | 2099 | blk = &state->path.blk[state->path.active - 1]; |
| 2101 | ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); | 2100 | ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); |
| 2102 | ASSERT(state->extravalid); | 2101 | ASSERT(state->extravalid); |
| @@ -2107,7 +2106,7 @@ xfs_dir2_node_removename( | |||
| 2107 | error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, | 2106 | error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, |
| 2108 | &state->extrablk, &rval); | 2107 | &state->extrablk, &rval); |
| 2109 | if (error) | 2108 | if (error) |
| 2110 | return error; | 2109 | goto out_free; |
| 2111 | /* | 2110 | /* |
| 2112 | * Fix the hash values up the btree. | 2111 | * Fix the hash values up the btree. |
| 2113 | */ | 2112 | */ |
| @@ -2122,6 +2121,7 @@ xfs_dir2_node_removename( | |||
| 2122 | */ | 2121 | */ |
| 2123 | if (!error) | 2122 | if (!error) |
| 2124 | error = xfs_dir2_node_to_leaf(state); | 2123 | error = xfs_dir2_node_to_leaf(state); |
| 2124 | out_free: | ||
| 2125 | xfs_da_state_free(state); | 2125 | xfs_da_state_free(state); |
| 2126 | return error; | 2126 | return error; |
| 2127 | } | 2127 | } |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 27e0e544e963..104455b8046c 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
| @@ -618,7 +618,8 @@ xfs_setattr_nonsize( | |||
| 618 | } | 618 | } |
| 619 | if (!gid_eq(igid, gid)) { | 619 | if (!gid_eq(igid, gid)) { |
| 620 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { | 620 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { |
| 621 | ASSERT(!XFS_IS_PQUOTA_ON(mp)); | 621 | ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) || |
| 622 | !XFS_IS_PQUOTA_ON(mp)); | ||
| 622 | ASSERT(mask & ATTR_GID); | 623 | ASSERT(mask & ATTR_GID); |
| 623 | ASSERT(gdqp); | 624 | ASSERT(gdqp); |
| 624 | olddquot2 = xfs_qm_vop_chown(tp, ip, | 625 | olddquot2 = xfs_qm_vop_chown(tp, ip, |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b6b669df40f3..eae16920655b 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
| @@ -193,7 +193,10 @@ xlog_bread_noalign( | |||
| 193 | bp->b_io_length = nbblks; | 193 | bp->b_io_length = nbblks; |
| 194 | bp->b_error = 0; | 194 | bp->b_error = 0; |
| 195 | 195 | ||
| 196 | xfsbdstrat(log->l_mp, bp); | 196 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) |
| 197 | return XFS_ERROR(EIO); | ||
| 198 | |||
| 199 | xfs_buf_iorequest(bp); | ||
| 197 | error = xfs_buf_iowait(bp); | 200 | error = xfs_buf_iowait(bp); |
| 198 | if (error) | 201 | if (error) |
| 199 | xfs_buf_ioerror_alert(bp, __func__); | 202 | xfs_buf_ioerror_alert(bp, __func__); |
| @@ -4397,7 +4400,13 @@ xlog_do_recover( | |||
| 4397 | XFS_BUF_READ(bp); | 4400 | XFS_BUF_READ(bp); |
| 4398 | XFS_BUF_UNASYNC(bp); | 4401 | XFS_BUF_UNASYNC(bp); |
| 4399 | bp->b_ops = &xfs_sb_buf_ops; | 4402 | bp->b_ops = &xfs_sb_buf_ops; |
| 4400 | xfsbdstrat(log->l_mp, bp); | 4403 | |
| 4404 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { | ||
| 4405 | xfs_buf_relse(bp); | ||
| 4406 | return XFS_ERROR(EIO); | ||
| 4407 | } | ||
| 4408 | |||
| 4409 | xfs_buf_iorequest(bp); | ||
| 4401 | error = xfs_buf_iowait(bp); | 4410 | error = xfs_buf_iowait(bp); |
| 4402 | if (error) { | 4411 | if (error) { |
| 4403 | xfs_buf_ioerror_alert(bp, __func__); | 4412 | xfs_buf_ioerror_alert(bp, __func__); |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 14a4996cfec6..dd88f0e27bd8 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
| @@ -134,8 +134,6 @@ xfs_qm_dqpurge( | |||
| 134 | { | 134 | { |
| 135 | struct xfs_mount *mp = dqp->q_mount; | 135 | struct xfs_mount *mp = dqp->q_mount; |
| 136 | struct xfs_quotainfo *qi = mp->m_quotainfo; | 136 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
| 137 | struct xfs_dquot *gdqp = NULL; | ||
| 138 | struct xfs_dquot *pdqp = NULL; | ||
| 139 | 137 | ||
| 140 | xfs_dqlock(dqp); | 138 | xfs_dqlock(dqp); |
| 141 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { | 139 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { |
| @@ -143,21 +141,6 @@ xfs_qm_dqpurge( | |||
| 143 | return EAGAIN; | 141 | return EAGAIN; |
| 144 | } | 142 | } |
| 145 | 143 | ||
| 146 | /* | ||
| 147 | * If this quota has a hint attached, prepare for releasing it now. | ||
| 148 | */ | ||
| 149 | gdqp = dqp->q_gdquot; | ||
| 150 | if (gdqp) { | ||
| 151 | xfs_dqlock(gdqp); | ||
| 152 | dqp->q_gdquot = NULL; | ||
| 153 | } | ||
| 154 | |||
| 155 | pdqp = dqp->q_pdquot; | ||
| 156 | if (pdqp) { | ||
| 157 | xfs_dqlock(pdqp); | ||
| 158 | dqp->q_pdquot = NULL; | ||
| 159 | } | ||
| 160 | |||
| 161 | dqp->dq_flags |= XFS_DQ_FREEING; | 144 | dqp->dq_flags |= XFS_DQ_FREEING; |
| 162 | 145 | ||
| 163 | xfs_dqflock(dqp); | 146 | xfs_dqflock(dqp); |
| @@ -206,11 +189,47 @@ xfs_qm_dqpurge( | |||
| 206 | XFS_STATS_DEC(xs_qm_dquot_unused); | 189 | XFS_STATS_DEC(xs_qm_dquot_unused); |
| 207 | 190 | ||
| 208 | xfs_qm_dqdestroy(dqp); | 191 | xfs_qm_dqdestroy(dqp); |
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | /* | ||
| 196 | * Release the group or project dquot pointers the user dquots maybe carrying | ||
| 197 | * around as a hint, and proceed to purge the user dquot cache if requested. | ||
| 198 | */ | ||
| 199 | STATIC int | ||
| 200 | xfs_qm_dqpurge_hints( | ||
| 201 | struct xfs_dquot *dqp, | ||
| 202 | void *data) | ||
| 203 | { | ||
| 204 | struct xfs_dquot *gdqp = NULL; | ||
| 205 | struct xfs_dquot *pdqp = NULL; | ||
| 206 | uint flags = *((uint *)data); | ||
| 207 | |||
| 208 | xfs_dqlock(dqp); | ||
| 209 | if (dqp->dq_flags & XFS_DQ_FREEING) { | ||
| 210 | xfs_dqunlock(dqp); | ||
| 211 | return EAGAIN; | ||
| 212 | } | ||
| 213 | |||
| 214 | /* If this quota has a hint attached, prepare for releasing it now */ | ||
| 215 | gdqp = dqp->q_gdquot; | ||
| 216 | if (gdqp) | ||
| 217 | dqp->q_gdquot = NULL; | ||
| 218 | |||
| 219 | pdqp = dqp->q_pdquot; | ||
| 220 | if (pdqp) | ||
| 221 | dqp->q_pdquot = NULL; | ||
| 222 | |||
| 223 | xfs_dqunlock(dqp); | ||
| 209 | 224 | ||
| 210 | if (gdqp) | 225 | if (gdqp) |
| 211 | xfs_qm_dqput(gdqp); | 226 | xfs_qm_dqrele(gdqp); |
| 212 | if (pdqp) | 227 | if (pdqp) |
| 213 | xfs_qm_dqput(pdqp); | 228 | xfs_qm_dqrele(pdqp); |
| 229 | |||
| 230 | if (flags & XFS_QMOPT_UQUOTA) | ||
| 231 | return xfs_qm_dqpurge(dqp, NULL); | ||
| 232 | |||
| 214 | return 0; | 233 | return 0; |
| 215 | } | 234 | } |
| 216 | 235 | ||
| @@ -222,8 +241,18 @@ xfs_qm_dqpurge_all( | |||
| 222 | struct xfs_mount *mp, | 241 | struct xfs_mount *mp, |
| 223 | uint flags) | 242 | uint flags) |
| 224 | { | 243 | { |
| 225 | if (flags & XFS_QMOPT_UQUOTA) | 244 | /* |
| 226 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); | 245 | * We have to release group/project dquot hint(s) from the user dquot |
| 246 | * at first if they are there, otherwise we would run into an infinite | ||
| 247 | * loop while walking through radix tree to purge other type of dquots | ||
| 248 | * since their refcount is not zero if the user dquot refers to them | ||
| 249 | * as hint. | ||
| 250 | * | ||
| 251 | * Call the special xfs_qm_dqpurge_hints() will end up go through the | ||
| 252 | * general xfs_qm_dqpurge() against user dquot cache if requested. | ||
| 253 | */ | ||
| 254 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); | ||
| 255 | |||
| 227 | if (flags & XFS_QMOPT_GQUOTA) | 256 | if (flags & XFS_QMOPT_GQUOTA) |
| 228 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); | 257 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); |
| 229 | if (flags & XFS_QMOPT_PQUOTA) | 258 | if (flags & XFS_QMOPT_PQUOTA) |
| @@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach( | |||
| 2082 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2111 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
| 2083 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 2112 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 2084 | 2113 | ||
| 2085 | if (udqp) { | 2114 | if (udqp && XFS_IS_UQUOTA_ON(mp)) { |
| 2086 | ASSERT(ip->i_udquot == NULL); | 2115 | ASSERT(ip->i_udquot == NULL); |
| 2087 | ASSERT(XFS_IS_UQUOTA_ON(mp)); | ||
| 2088 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); | 2116 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); |
| 2089 | 2117 | ||
| 2090 | ip->i_udquot = xfs_qm_dqhold(udqp); | 2118 | ip->i_udquot = xfs_qm_dqhold(udqp); |
| 2091 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); | 2119 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 2092 | } | 2120 | } |
| 2093 | if (gdqp) { | 2121 | if (gdqp && XFS_IS_GQUOTA_ON(mp)) { |
| 2094 | ASSERT(ip->i_gdquot == NULL); | 2122 | ASSERT(ip->i_gdquot == NULL); |
| 2095 | ASSERT(XFS_IS_GQUOTA_ON(mp)); | ||
| 2096 | ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); | 2123 | ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); |
| 2097 | ip->i_gdquot = xfs_qm_dqhold(gdqp); | 2124 | ip->i_gdquot = xfs_qm_dqhold(gdqp); |
| 2098 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); | 2125 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 2099 | } | 2126 | } |
| 2100 | if (pdqp) { | 2127 | if (pdqp && XFS_IS_PQUOTA_ON(mp)) { |
| 2101 | ASSERT(ip->i_pdquot == NULL); | 2128 | ASSERT(ip->i_pdquot == NULL); |
| 2102 | ASSERT(XFS_IS_PQUOTA_ON(mp)); | ||
| 2103 | ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); | 2129 | ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); |
| 2104 | 2130 | ||
| 2105 | ip->i_pdquot = xfs_qm_dqhold(pdqp); | 2131 | ip->i_pdquot = xfs_qm_dqhold(pdqp); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index c035d11b7734..647b6f1d8923 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
| @@ -314,7 +314,18 @@ xfs_trans_read_buf_map( | |||
| 314 | ASSERT(bp->b_iodone == NULL); | 314 | ASSERT(bp->b_iodone == NULL); |
| 315 | XFS_BUF_READ(bp); | 315 | XFS_BUF_READ(bp); |
| 316 | bp->b_ops = ops; | 316 | bp->b_ops = ops; |
| 317 | xfsbdstrat(tp->t_mountp, bp); | 317 | |
| 318 | /* | ||
| 319 | * XXX(hch): clean up the error handling here to be less | ||
| 320 | * of a mess.. | ||
| 321 | */ | ||
| 322 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 323 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | ||
| 324 | xfs_bioerror_relse(bp); | ||
| 325 | } else { | ||
| 326 | xfs_buf_iorequest(bp); | ||
| 327 | } | ||
| 328 | |||
| 318 | error = xfs_buf_iowait(bp); | 329 | error = xfs_buf_iowait(bp); |
| 319 | if (error) { | 330 | if (error) { |
| 320 | xfs_buf_ioerror_alert(bp, __func__); | 331 | xfs_buf_ioerror_alert(bp, __func__); |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c602c7718421..ddabed1f51c2 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
| @@ -169,7 +169,8 @@ struct acpi_device_flags { | |||
| 169 | u32 ejectable:1; | 169 | u32 ejectable:1; |
| 170 | u32 power_manageable:1; | 170 | u32 power_manageable:1; |
| 171 | u32 match_driver:1; | 171 | u32 match_driver:1; |
| 172 | u32 reserved:27; | 172 | u32 no_hotplug:1; |
| 173 | u32 reserved:26; | ||
| 173 | }; | 174 | }; |
| 174 | 175 | ||
| 175 | /* File System */ | 176 | /* File System */ |
| @@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj; | |||
| 344 | extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); | 345 | extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); |
| 345 | void acpi_bus_private_data_handler(acpi_handle, void *); | 346 | void acpi_bus_private_data_handler(acpi_handle, void *); |
| 346 | int acpi_bus_get_private_data(acpi_handle, void **); | 347 | int acpi_bus_get_private_data(acpi_handle, void **); |
| 348 | void acpi_bus_no_hotplug(acpi_handle handle); | ||
| 347 | extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); | 349 | extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); |
| 348 | extern int register_acpi_notifier(struct notifier_block *); | 350 | extern int register_acpi_notifier(struct notifier_block *); |
| 349 | extern int unregister_acpi_notifier(struct notifier_block *); | 351 | extern int unregister_acpi_notifier(struct notifier_block *); |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0e..db0923458940 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |||
| 217 | #endif | 217 | #endif |
| 218 | 218 | ||
| 219 | #ifndef pte_accessible | 219 | #ifndef pte_accessible |
| 220 | # define pte_accessible(pte) ((void)(pte),1) | 220 | # define pte_accessible(mm, pte) ((void)(pte), 1) |
| 221 | #endif | 221 | #endif |
| 222 | 222 | ||
| 223 | #ifndef flush_tlb_fix_spurious_fault | 223 | #ifndef flush_tlb_fix_spurious_fault |
| @@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) | |||
| 599 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 599 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 600 | barrier(); | 600 | barrier(); |
| 601 | #endif | 601 | #endif |
| 602 | if (pmd_none(pmdval)) | 602 | if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) |
| 603 | return 1; | 603 | return 1; |
| 604 | if (unlikely(pmd_bad(pmdval))) { | 604 | if (unlikely(pmd_bad(pmdval))) { |
| 605 | if (!pmd_trans_huge(pmdval)) | 605 | pmd_clear_bad(pmd); |
| 606 | pmd_clear_bad(pmd); | ||
| 607 | return 1; | 606 | return 1; |
| 608 | } | 607 | } |
| 609 | return 0; | 608 | return 0; |
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8f..1cd3f5d767a8 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h | |||
| @@ -3,13 +3,11 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/thread_info.h> | 4 | #include <linux/thread_info.h> |
| 5 | 5 | ||
| 6 | /* | 6 | #define PREEMPT_ENABLED (0) |
| 7 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | 7 | |
| 8 | * that think a non-zero value indicates we cannot preempt. | ||
| 9 | */ | ||
| 10 | static __always_inline int preempt_count(void) | 8 | static __always_inline int preempt_count(void) |
| 11 | { | 9 | { |
| 12 | return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; | 10 | return current_thread_info()->preempt_count; |
| 13 | } | 11 | } |
| 14 | 12 | ||
| 15 | static __always_inline int *preempt_count_ptr(void) | 13 | static __always_inline int *preempt_count_ptr(void) |
| @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) | |||
| 17 | return ¤t_thread_info()->preempt_count; | 15 | return ¤t_thread_info()->preempt_count; |
| 18 | } | 16 | } |
| 19 | 17 | ||
| 20 | /* | ||
| 21 | * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the | ||
| 22 | * alternative is loosing a reschedule. Better schedule too often -- also this | ||
| 23 | * should be a very rare operation. | ||
| 24 | */ | ||
| 25 | static __always_inline void preempt_count_set(int pc) | 18 | static __always_inline void preempt_count_set(int pc) |
| 26 | { | 19 | { |
| 27 | *preempt_count_ptr() = pc; | 20 | *preempt_count_ptr() = pc; |
| @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) | |||
| 41 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ | 34 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ |
| 42 | } while (0) | 35 | } while (0) |
| 43 | 36 | ||
| 44 | /* | ||
| 45 | * We fold the NEED_RESCHED bit into the preempt count such that | ||
| 46 | * preempt_enable() can decrement and test for needing to reschedule with a | ||
| 47 | * single instruction. | ||
| 48 | * | ||
| 49 | * We invert the actual bit, so that when the decrement hits 0 we know we both | ||
| 50 | * need to resched (the bit is cleared) and can resched (no preempt count). | ||
| 51 | */ | ||
| 52 | |||
| 53 | static __always_inline void set_preempt_need_resched(void) | 37 | static __always_inline void set_preempt_need_resched(void) |
| 54 | { | 38 | { |
| 55 | *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; | ||
| 56 | } | 39 | } |
| 57 | 40 | ||
| 58 | static __always_inline void clear_preempt_need_resched(void) | 41 | static __always_inline void clear_preempt_need_resched(void) |
| 59 | { | 42 | { |
| 60 | *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; | ||
| 61 | } | 43 | } |
| 62 | 44 | ||
| 63 | static __always_inline bool test_preempt_need_resched(void) | 45 | static __always_inline bool test_preempt_need_resched(void) |
| 64 | { | 46 | { |
| 65 | return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); | 47 | return false; |
| 66 | } | 48 | } |
| 67 | 49 | ||
| 68 | /* | 50 | /* |
| @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) | |||
| 81 | 63 | ||
| 82 | static __always_inline bool __preempt_count_dec_and_test(void) | 64 | static __always_inline bool __preempt_count_dec_and_test(void) |
| 83 | { | 65 | { |
| 84 | return !--*preempt_count_ptr(); | 66 | /* |
| 67 | * Because of load-store architectures cannot do per-cpu atomic | ||
| 68 | * operations; we cannot use PREEMPT_NEED_RESCHED because it might get | ||
| 69 | * lost. | ||
| 70 | */ | ||
| 71 | return !--*preempt_count_ptr() && tif_need_resched(); | ||
| 85 | } | 72 | } |
| 86 | 73 | ||
| 87 | /* | 74 | /* |
| @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) | |||
| 89 | */ | 76 | */ |
| 90 | static __always_inline bool should_resched(void) | 77 | static __always_inline bool should_resched(void) |
| 91 | { | 78 | { |
| 92 | return unlikely(!*preempt_count_ptr()); | 79 | return unlikely(!preempt_count() && tif_need_resched()); |
| 93 | } | 80 | } |
| 94 | 81 | ||
| 95 | #ifdef CONFIG_PREEMPT | 82 | #ifdef CONFIG_PREEMPT |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 87578c109e48..49376aec2fbb 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -600,7 +600,7 @@ | |||
| 600 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 600 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 601 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 601 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
| 602 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 602 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
| 603 | {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 603 | {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
| 604 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 604 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 605 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 605 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 606 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 606 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 669fef5c745a..3e0fbe441763 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h | |||
| @@ -3,6 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | #include <uapi/linux/auxvec.h> | 4 | #include <uapi/linux/auxvec.h> |
| 5 | 5 | ||
| 6 | #define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ | 6 | #define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ |
| 7 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ | 7 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ |
| 8 | #endif /* _LINUX_AUXVEC_H */ | 8 | #endif /* _LINUX_AUXVEC_H */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 0e23c26485f4..9b503376738f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -418,6 +418,7 @@ enum { | |||
| 418 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ | 418 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ |
| 419 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ | 419 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ |
| 420 | ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ | 420 | ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ |
| 421 | ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ | ||
| 421 | 422 | ||
| 422 | /* DMA mask for user DMA control: User visible values; DO NOT | 423 | /* DMA mask for user DMA control: User visible values; DO NOT |
| 423 | renumber */ | 424 | renumber */ |
diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c8929c3832db..4bfde0e99ed5 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | #define USE_CMPXCHG_LOCKREF \ | 20 | #define USE_CMPXCHG_LOCKREF \ |
| 21 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ | 21 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ |
| 22 | IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) | 22 | IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) |
| 23 | 23 | ||
| 24 | struct lockref { | 24 | struct lockref { |
| 25 | union { | 25 | union { |
diff --git a/include/linux/math64.h b/include/linux/math64.h index 69ed5f5e9f6e..c45c089bfdac 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h | |||
| @@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | |||
| 133 | return ret; | 133 | return ret; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) | ||
| 137 | |||
| 138 | #ifndef mul_u64_u32_shr | ||
| 139 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | ||
| 140 | { | ||
| 141 | return (u64)(((unsigned __int128)a * mul) >> shift); | ||
| 142 | } | ||
| 143 | #endif /* mul_u64_u32_shr */ | ||
| 144 | |||
| 145 | #else | ||
| 146 | |||
| 147 | #ifndef mul_u64_u32_shr | ||
| 148 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | ||
| 149 | { | ||
| 150 | u32 ah, al; | ||
| 151 | u64 ret; | ||
| 152 | |||
| 153 | al = a; | ||
| 154 | ah = a >> 32; | ||
| 155 | |||
| 156 | ret = ((u64)al * mul) >> shift; | ||
| 157 | if (ah) | ||
| 158 | ret += ((u64)ah * mul) << (32 - shift); | ||
| 159 | |||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | #endif /* mul_u64_u32_shr */ | ||
| 163 | |||
| 164 | #endif | ||
| 165 | |||
| 136 | #endif /* _LINUX_MATH64_H */ | 166 | #endif /* _LINUX_MATH64_H */ |
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h index 16bf8a0dcd97..bb9616dc0efa 100644 --- a/include/linux/mfd/as3722.h +++ b/include/linux/mfd/as3722.h | |||
| @@ -151,6 +151,7 @@ | |||
| 151 | #define AS3722_ASIC_ID1_REG 0x90 | 151 | #define AS3722_ASIC_ID1_REG 0x90 |
| 152 | #define AS3722_ASIC_ID2_REG 0x91 | 152 | #define AS3722_ASIC_ID2_REG 0x91 |
| 153 | #define AS3722_LOCK_REG 0x9E | 153 | #define AS3722_LOCK_REG 0x9E |
| 154 | #define AS3722_FUSE7_REG 0xA7 | ||
| 154 | #define AS3722_MAX_REGISTER 0xF4 | 155 | #define AS3722_MAX_REGISTER 0xF4 |
| 155 | 156 | ||
| 156 | #define AS3722_SD0_EXT_ENABLE_MASK 0x03 | 157 | #define AS3722_SD0_EXT_ENABLE_MASK 0x03 |
| @@ -224,6 +225,7 @@ | |||
| 224 | #define AS3722_SD_VSEL_MASK 0x7F | 225 | #define AS3722_SD_VSEL_MASK 0x7F |
| 225 | #define AS3722_SD0_VSEL_MIN 0x01 | 226 | #define AS3722_SD0_VSEL_MIN 0x01 |
| 226 | #define AS3722_SD0_VSEL_MAX 0x5A | 227 | #define AS3722_SD0_VSEL_MAX 0x5A |
| 228 | #define AS3722_SD0_VSEL_LOW_VOL_MAX 0x6E | ||
| 227 | #define AS3722_SD2_VSEL_MIN 0x01 | 229 | #define AS3722_SD2_VSEL_MIN 0x01 |
| 228 | #define AS3722_SD2_VSEL_MAX 0x7F | 230 | #define AS3722_SD2_VSEL_MAX 0x7F |
| 229 | 231 | ||
| @@ -341,6 +343,8 @@ | |||
| 341 | #define AS3722_EXT_CONTROL_ENABLE2 0x2 | 343 | #define AS3722_EXT_CONTROL_ENABLE2 0x2 |
| 342 | #define AS3722_EXT_CONTROL_ENABLE3 0x3 | 344 | #define AS3722_EXT_CONTROL_ENABLE3 0x3 |
| 343 | 345 | ||
| 346 | #define AS3722_FUSE7_SD0_LOW_VOLTAGE BIT(4) | ||
| 347 | |||
| 344 | /* Interrupt IDs */ | 348 | /* Interrupt IDs */ |
| 345 | enum as3722_irq { | 349 | enum as3722_irq { |
| 346 | AS3722_IRQ_LID, | 350 | AS3722_IRQ_LID, |
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 20e433e551e3..16c2335c2856 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h | |||
| @@ -358,8 +358,6 @@ | |||
| 358 | /*Register BBCH (0x80) register.RegisterDescription */ | 358 | /*Register BBCH (0x80) register.RegisterDescription */ |
| 359 | #define BBCH_BBSEL_MASK 0x06 | 359 | #define BBCH_BBSEL_MASK 0x06 |
| 360 | #define BBCH_BBSEL_SHIFT 1 | 360 | #define BBCH_BBSEL_SHIFT 1 |
| 361 | #define BBCH_BBCHEN_MASK 0x01 | ||
| 362 | #define BBCH_BBCHEN_SHIFT 0 | ||
| 363 | 361 | ||
| 364 | 362 | ||
| 365 | /*Register DCDCCTRL (0x80) register.RegisterDescription */ | 363 | /*Register DCDCCTRL (0x80) register.RegisterDescription */ |
| @@ -833,6 +831,7 @@ | |||
| 833 | #define TPS65910_REG_VAUX2 10 | 831 | #define TPS65910_REG_VAUX2 10 |
| 834 | #define TPS65910_REG_VAUX33 11 | 832 | #define TPS65910_REG_VAUX33 11 |
| 835 | #define TPS65910_REG_VMMC 12 | 833 | #define TPS65910_REG_VMMC 12 |
| 834 | #define TPS65910_REG_VBB 13 | ||
| 836 | 835 | ||
| 837 | #define TPS65911_REG_VDDCTRL 4 | 836 | #define TPS65911_REG_VDDCTRL 4 |
| 838 | #define TPS65911_REG_LDO1 5 | 837 | #define TPS65911_REG_LDO1 5 |
| @@ -845,7 +844,7 @@ | |||
| 845 | #define TPS65911_REG_LDO8 12 | 844 | #define TPS65911_REG_LDO8 12 |
| 846 | 845 | ||
| 847 | /* Max number of TPS65910/11 regulators */ | 846 | /* Max number of TPS65910/11 regulators */ |
| 848 | #define TPS65910_NUM_REGS 13 | 847 | #define TPS65910_NUM_REGS 14 |
| 849 | 848 | ||
| 850 | /* External sleep controls through EN1/EN2/EN3/SLEEP inputs */ | 849 | /* External sleep controls through EN1/EN2/EN3/SLEEP inputs */ |
| 851 | #define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 0x1 | 850 | #define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 0x1 |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b58b20d..f015c059e159 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
| @@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
| 55 | struct page *newpage, struct page *page); | 55 | struct page *newpage, struct page *page); |
| 56 | extern int migrate_page_move_mapping(struct address_space *mapping, | 56 | extern int migrate_page_move_mapping(struct address_space *mapping, |
| 57 | struct page *newpage, struct page *page, | 57 | struct page *newpage, struct page *page, |
| 58 | struct buffer_head *head, enum migrate_mode mode); | 58 | struct buffer_head *head, enum migrate_mode mode, |
| 59 | int extra_count); | ||
| 59 | #else | 60 | #else |
| 60 | 61 | ||
| 61 | static inline void putback_lru_pages(struct list_head *l) {} | 62 | static inline void putback_lru_pages(struct list_head *l) {} |
| @@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
| 90 | #endif /* CONFIG_MIGRATION */ | 91 | #endif /* CONFIG_MIGRATION */ |
| 91 | 92 | ||
| 92 | #ifdef CONFIG_NUMA_BALANCING | 93 | #ifdef CONFIG_NUMA_BALANCING |
| 94 | extern bool pmd_trans_migrating(pmd_t pmd); | ||
| 95 | extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); | ||
| 93 | extern int migrate_misplaced_page(struct page *page, | 96 | extern int migrate_misplaced_page(struct page *page, |
| 94 | struct vm_area_struct *vma, int node); | 97 | struct vm_area_struct *vma, int node); |
| 95 | extern bool migrate_ratelimited(int node); | 98 | extern bool migrate_ratelimited(int node); |
| 96 | #else | 99 | #else |
| 100 | static inline bool pmd_trans_migrating(pmd_t pmd) | ||
| 101 | { | ||
| 102 | return false; | ||
| 103 | } | ||
| 104 | static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) | ||
| 105 | { | ||
| 106 | } | ||
| 97 | static inline int migrate_misplaced_page(struct page *page, | 107 | static inline int migrate_misplaced_page(struct page *page, |
| 98 | struct vm_area_struct *vma, int node) | 108 | struct vm_area_struct *vma, int node) |
| 99 | { | 109 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..35527173cf50 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a | |||
| 1317 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ | 1317 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ |
| 1318 | 1318 | ||
| 1319 | #if USE_SPLIT_PTE_PTLOCKS | 1319 | #if USE_SPLIT_PTE_PTLOCKS |
| 1320 | #if BLOATED_SPINLOCKS | 1320 | #if ALLOC_SPLIT_PTLOCKS |
| 1321 | extern bool ptlock_alloc(struct page *page); | 1321 | extern bool ptlock_alloc(struct page *page); |
| 1322 | extern void ptlock_free(struct page *page); | 1322 | extern void ptlock_free(struct page *page); |
| 1323 | 1323 | ||
| @@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) | |||
| 1325 | { | 1325 | { |
| 1326 | return page->ptl; | 1326 | return page->ptl; |
| 1327 | } | 1327 | } |
| 1328 | #else /* BLOATED_SPINLOCKS */ | 1328 | #else /* ALLOC_SPLIT_PTLOCKS */ |
| 1329 | static inline bool ptlock_alloc(struct page *page) | 1329 | static inline bool ptlock_alloc(struct page *page) |
| 1330 | { | 1330 | { |
| 1331 | return true; | 1331 | return true; |
| @@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) | |||
| 1339 | { | 1339 | { |
| 1340 | return &page->ptl; | 1340 | return &page->ptl; |
| 1341 | } | 1341 | } |
| 1342 | #endif /* BLOATED_SPINLOCKS */ | 1342 | #endif /* ALLOC_SPLIT_PTLOCKS */ |
| 1343 | 1343 | ||
| 1344 | static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) | 1344 | static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) |
| 1345 | { | 1345 | { |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd299418a934..290901a8c1de 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -26,6 +26,7 @@ struct address_space; | |||
| 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) | 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
| 27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ | 27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ |
| 28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) | 28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) |
| 29 | #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) | ||
| 29 | 30 | ||
| 30 | /* | 31 | /* |
| 31 | * Each physical page in the system has a struct page associated with | 32 | * Each physical page in the system has a struct page associated with |
| @@ -155,7 +156,7 @@ struct page { | |||
| 155 | * system if PG_buddy is set. | 156 | * system if PG_buddy is set. |
| 156 | */ | 157 | */ |
| 157 | #if USE_SPLIT_PTE_PTLOCKS | 158 | #if USE_SPLIT_PTE_PTLOCKS |
| 158 | #if BLOATED_SPINLOCKS | 159 | #if ALLOC_SPLIT_PTLOCKS |
| 159 | spinlock_t *ptl; | 160 | spinlock_t *ptl; |
| 160 | #else | 161 | #else |
| 161 | spinlock_t ptl; | 162 | spinlock_t ptl; |
| @@ -443,6 +444,14 @@ struct mm_struct { | |||
| 443 | /* numa_scan_seq prevents two threads setting pte_numa */ | 444 | /* numa_scan_seq prevents two threads setting pte_numa */ |
| 444 | int numa_scan_seq; | 445 | int numa_scan_seq; |
| 445 | #endif | 446 | #endif |
| 447 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | ||
| 448 | /* | ||
| 449 | * An operation with batched TLB flushing is going on. Anything that | ||
| 450 | * can move process memory needs to flush the TLB when moving a | ||
| 451 | * PROT_NONE or PROT_NUMA mapped page. | ||
| 452 | */ | ||
| 453 | bool tlb_flush_pending; | ||
| 454 | #endif | ||
| 446 | struct uprobes_state uprobes_state; | 455 | struct uprobes_state uprobes_state; |
| 447 | }; | 456 | }; |
| 448 | 457 | ||
| @@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) | |||
| 459 | return mm->cpu_vm_mask_var; | 468 | return mm->cpu_vm_mask_var; |
| 460 | } | 469 | } |
| 461 | 470 | ||
| 471 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | ||
| 472 | /* | ||
| 473 | * Memory barriers to keep this state in sync are graciously provided by | ||
| 474 | * the page table locks, outside of which no page table modifications happen. | ||
| 475 | * The barriers below prevent the compiler from re-ordering the instructions | ||
| 476 | * around the memory barriers that are already present in the code. | ||
| 477 | */ | ||
| 478 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | ||
| 479 | { | ||
| 480 | barrier(); | ||
| 481 | return mm->tlb_flush_pending; | ||
| 482 | } | ||
| 483 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | ||
| 484 | { | ||
| 485 | mm->tlb_flush_pending = true; | ||
| 486 | |||
| 487 | /* | ||
| 488 | * Guarantee that the tlb_flush_pending store does not leak into the | ||
| 489 | * critical section updating the page tables | ||
| 490 | */ | ||
| 491 | smp_mb__before_spinlock(); | ||
| 492 | } | ||
| 493 | /* Clearing is done after a TLB flush, which also provides a barrier. */ | ||
| 494 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | ||
| 495 | { | ||
| 496 | barrier(); | ||
| 497 | mm->tlb_flush_pending = false; | ||
| 498 | } | ||
| 499 | #else | ||
| 500 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | ||
| 501 | { | ||
| 502 | return false; | ||
| 503 | } | ||
| 504 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | ||
| 505 | { | ||
| 506 | } | ||
| 507 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | ||
| 508 | { | ||
| 509 | } | ||
| 510 | #endif | ||
| 511 | |||
| 462 | #endif /* _LINUX_MM_TYPES_H */ | 512 | #endif /* _LINUX_MM_TYPES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d9a550bf3e8e..ce2a1f5f9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -769,7 +769,8 @@ struct netdev_phys_port_id { | |||
| 769 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | 769 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) |
| 770 | * Required can not be NULL. | 770 | * Required can not be NULL. |
| 771 | * | 771 | * |
| 772 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | 772 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
| 773 | * void *accel_priv); | ||
| 773 | * Called to decide which queue to when device supports multiple | 774 | * Called to decide which queue to when device supports multiple |
| 774 | * transmit queues. | 775 | * transmit queues. |
| 775 | * | 776 | * |
| @@ -990,7 +991,8 @@ struct net_device_ops { | |||
| 990 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, | 991 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
| 991 | struct net_device *dev); | 992 | struct net_device *dev); |
| 992 | u16 (*ndo_select_queue)(struct net_device *dev, | 993 | u16 (*ndo_select_queue)(struct net_device *dev, |
| 993 | struct sk_buff *skb); | 994 | struct sk_buff *skb, |
| 995 | void *accel_priv); | ||
| 994 | void (*ndo_change_rx_flags)(struct net_device *dev, | 996 | void (*ndo_change_rx_flags)(struct net_device *dev, |
| 995 | int flags); | 997 | int flags); |
| 996 | void (*ndo_set_rx_mode)(struct net_device *dev); | 998 | void (*ndo_set_rx_mode)(struct net_device *dev); |
| @@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
| 1529 | } | 1531 | } |
| 1530 | 1532 | ||
| 1531 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 1533 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
| 1532 | struct sk_buff *skb); | 1534 | struct sk_buff *skb, |
| 1535 | void *accel_priv); | ||
| 1533 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); | 1536 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); |
| 1534 | 1537 | ||
| 1535 | /* | 1538 | /* |
| @@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev); | |||
| 1819 | void dev_disable_lro(struct net_device *dev); | 1822 | void dev_disable_lro(struct net_device *dev); |
| 1820 | int dev_loopback_xmit(struct sk_buff *newskb); | 1823 | int dev_loopback_xmit(struct sk_buff *newskb); |
| 1821 | int dev_queue_xmit(struct sk_buff *skb); | 1824 | int dev_queue_xmit(struct sk_buff *skb); |
| 1825 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); | ||
| 1822 | int register_netdevice(struct net_device *dev); | 1826 | int register_netdevice(struct net_device *dev); |
| 1823 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | 1827 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
| 1824 | void unregister_netdevice_many(struct list_head *head); | 1828 | void unregister_netdevice_many(struct list_head *head); |
| @@ -1912,6 +1916,15 @@ static inline int dev_parse_header(const struct sk_buff *skb, | |||
| 1912 | return dev->header_ops->parse(skb, haddr); | 1916 | return dev->header_ops->parse(skb, haddr); |
| 1913 | } | 1917 | } |
| 1914 | 1918 | ||
| 1919 | static inline int dev_rebuild_header(struct sk_buff *skb) | ||
| 1920 | { | ||
| 1921 | const struct net_device *dev = skb->dev; | ||
| 1922 | |||
| 1923 | if (!dev->header_ops || !dev->header_ops->rebuild) | ||
| 1924 | return 0; | ||
| 1925 | return dev->header_ops->rebuild(skb); | ||
| 1926 | } | ||
| 1927 | |||
| 1915 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 1928 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
| 1916 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); | 1929 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
| 1917 | static inline int unregister_gifconf(unsigned int family) | 1930 | static inline int unregister_gifconf(unsigned int family) |
| @@ -2417,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier); | |||
| 2417 | int dev_get_phys_port_id(struct net_device *dev, | 2430 | int dev_get_phys_port_id(struct net_device *dev, |
| 2418 | struct netdev_phys_port_id *ppid); | 2431 | struct netdev_phys_port_id *ppid); |
| 2419 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2432 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
| 2420 | struct netdev_queue *txq, void *accel_priv); | 2433 | struct netdev_queue *txq); |
| 2421 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 2434 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
| 2422 | 2435 | ||
| 2423 | extern int netdev_budget; | 2436 | extern int netdev_budget; |
| @@ -3008,6 +3021,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev, | |||
| 3008 | dev->gso_max_size = size; | 3021 | dev->gso_max_size = size; |
| 3009 | } | 3022 | } |
| 3010 | 3023 | ||
| 3024 | static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, | ||
| 3025 | int pulled_hlen, u16 mac_offset, | ||
| 3026 | int mac_len) | ||
| 3027 | { | ||
| 3028 | skb->protocol = protocol; | ||
| 3029 | skb->encapsulation = 1; | ||
| 3030 | skb_push(skb, pulled_hlen); | ||
| 3031 | skb_reset_transport_header(skb); | ||
| 3032 | skb->mac_header = mac_offset; | ||
| 3033 | skb->network_header = skb->mac_header + mac_len; | ||
| 3034 | skb->mac_len = mac_len; | ||
| 3035 | } | ||
| 3036 | |||
| 3011 | static inline bool netif_is_macvlan(struct net_device *dev) | 3037 | static inline bool netif_is_macvlan(struct net_device *dev) |
| 3012 | { | 3038 | { |
| 3013 | return dev->priv_flags & IFF_MACVLAN; | 3039 | return dev->priv_flags & IFF_MACVLAN; |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 57e890abe1f0..a5fc7d01aad6 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
| @@ -69,6 +69,7 @@ | |||
| 69 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | 69 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
| 70 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 70 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
| 71 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 71 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
| 72 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ | ||
| 72 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ | 73 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
| 73 | __typeof__(type) name | 74 | __typeof__(type) name |
| 74 | #else | 75 | #else |
diff --git a/include/linux/pstore.h b/include/linux/pstore.h index abd437d0a8a7..ece0c6bbfcc5 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h | |||
| @@ -51,6 +51,7 @@ struct pstore_info { | |||
| 51 | char *buf; | 51 | char *buf; |
| 52 | size_t bufsize; | 52 | size_t bufsize; |
| 53 | struct mutex read_mutex; /* serialize open/read/close */ | 53 | struct mutex read_mutex; /* serialize open/read/close */ |
| 54 | int flags; | ||
| 54 | int (*open)(struct pstore_info *psi); | 55 | int (*open)(struct pstore_info *psi); |
| 55 | int (*close)(struct pstore_info *psi); | 56 | int (*close)(struct pstore_info *psi); |
| 56 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, | 57 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, |
| @@ -70,6 +71,8 @@ struct pstore_info { | |||
| 70 | void *data; | 71 | void *data; |
| 71 | }; | 72 | }; |
| 72 | 73 | ||
| 74 | #define PSTORE_FLAGS_FRAGILE 1 | ||
| 75 | |||
| 73 | #ifdef CONFIG_PSTORE | 76 | #ifdef CONFIG_PSTORE |
| 74 | extern int pstore_register(struct pstore_info *); | 77 | extern int pstore_register(struct pstore_info *); |
| 75 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); | 78 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); |
diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f6f963..9e7db9e73cc1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h | |||
| @@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); | |||
| 43 | * Architecture-specific implementations of sys_reboot commands. | 43 | * Architecture-specific implementations of sys_reboot commands. |
| 44 | */ | 44 | */ |
| 45 | 45 | ||
| 46 | extern void migrate_to_reboot_cpu(void); | ||
| 46 | extern void machine_restart(char *cmd); | 47 | extern void machine_restart(char *cmd); |
| 47 | extern void machine_halt(void); | 48 | extern void machine_halt(void); |
| 48 | extern void machine_power_off(void); | 49 | extern void machine_power_off(void); |
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h new file mode 100644 index 000000000000..49206c1b4905 --- /dev/null +++ b/include/linux/regulator/act8865.h | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | /* | ||
| 2 | * act8865.h -- Voltage regulation for the active-semi act8865 | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Atmel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; version 2 of the License. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef __LINUX_REGULATOR_ACT8865_H | ||
| 17 | #define __LINUX_REGULATOR_ACT8865_H | ||
| 18 | |||
| 19 | #include <linux/regulator/machine.h> | ||
| 20 | |||
| 21 | enum { | ||
| 22 | ACT8865_ID_DCDC1, | ||
| 23 | ACT8865_ID_DCDC2, | ||
| 24 | ACT8865_ID_DCDC3, | ||
| 25 | ACT8865_ID_LDO1, | ||
| 26 | ACT8865_ID_LDO2, | ||
| 27 | ACT8865_ID_LDO3, | ||
| 28 | ACT8865_ID_LDO4, | ||
| 29 | ACT8865_REG_NUM, | ||
| 30 | }; | ||
| 31 | |||
| 32 | /** | ||
| 33 | * act8865_regulator_data - regulator data | ||
| 34 | * @id: regulator id | ||
| 35 | * @name: regulator name | ||
| 36 | * @platform_data: regulator init data | ||
| 37 | */ | ||
| 38 | struct act8865_regulator_data { | ||
| 39 | int id; | ||
| 40 | const char *name; | ||
| 41 | struct regulator_init_data *platform_data; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /** | ||
| 45 | * act8865_platform_data - platform data for act8865 | ||
| 46 | * @num_regulators: number of regulators used | ||
| 47 | * @regulators: pointer to regulators used | ||
| 48 | */ | ||
| 49 | struct act8865_platform_data { | ||
| 50 | int num_regulators; | ||
| 51 | struct act8865_regulator_data *regulators; | ||
| 52 | }; | ||
| 53 | #endif | ||
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 939428ad25ac..8e3e66ac0a52 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
| @@ -24,6 +24,11 @@ extern int rtnl_trylock(void); | |||
| 24 | extern int rtnl_is_locked(void); | 24 | extern int rtnl_is_locked(void); |
| 25 | #ifdef CONFIG_PROVE_LOCKING | 25 | #ifdef CONFIG_PROVE_LOCKING |
| 26 | extern int lockdep_rtnl_is_held(void); | 26 | extern int lockdep_rtnl_is_held(void); |
| 27 | #else | ||
| 28 | static inline int lockdep_rtnl_is_held(void) | ||
| 29 | { | ||
| 30 | return 1; | ||
| 31 | } | ||
| 27 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 32 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
| 28 | 33 | ||
| 29 | /** | 34 | /** |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 768b037dfacb..53f97eb8dbc7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -440,8 +440,6 @@ struct task_cputime { | |||
| 440 | .sum_exec_runtime = 0, \ | 440 | .sum_exec_runtime = 0, \ |
| 441 | } | 441 | } |
| 442 | 442 | ||
| 443 | #define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) | ||
| 444 | |||
| 445 | #ifdef CONFIG_PREEMPT_COUNT | 443 | #ifdef CONFIG_PREEMPT_COUNT |
| 446 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) | 444 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) |
| 447 | #else | 445 | #else |
| @@ -932,7 +930,8 @@ struct pipe_inode_info; | |||
| 932 | struct uts_namespace; | 930 | struct uts_namespace; |
| 933 | 931 | ||
| 934 | struct load_weight { | 932 | struct load_weight { |
| 935 | unsigned long weight, inv_weight; | 933 | unsigned long weight; |
| 934 | u32 inv_weight; | ||
| 936 | }; | 935 | }; |
| 937 | 936 | ||
| 938 | struct sched_avg { | 937 | struct sched_avg { |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 215b5ea1cb30..6f69b3f914fb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) | |||
| 1638 | skb->mac_header += offset; | 1638 | skb->mac_header += offset; |
| 1639 | } | 1639 | } |
| 1640 | 1640 | ||
| 1641 | static inline void skb_pop_mac_header(struct sk_buff *skb) | ||
| 1642 | { | ||
| 1643 | skb->mac_header = skb->network_header; | ||
| 1644 | } | ||
| 1645 | |||
| 1641 | static inline void skb_probe_transport_header(struct sk_buff *skb, | 1646 | static inline void skb_probe_transport_header(struct sk_buff *skb, |
| 1642 | const int offset_hint) | 1647 | const int offset_hint) |
| 1643 | { | 1648 | { |
| @@ -2526,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) | |||
| 2526 | * Ethernet MAC Drivers should call this function in their hard_xmit() | 2531 | * Ethernet MAC Drivers should call this function in their hard_xmit() |
| 2527 | * function immediately before giving the sk_buff to the MAC hardware. | 2532 | * function immediately before giving the sk_buff to the MAC hardware. |
| 2528 | * | 2533 | * |
| 2534 | * Specifically, one should make absolutely sure that this function is | ||
| 2535 | * called before TX completion of this packet can trigger. Otherwise | ||
| 2536 | * the packet could potentially already be freed. | ||
| 2537 | * | ||
| 2529 | * @skb: A socket buffer. | 2538 | * @skb: A socket buffer. |
| 2530 | */ | 2539 | */ |
| 2531 | static inline void skb_tx_timestamp(struct sk_buff *skb) | 2540 | static inline void skb_tx_timestamp(struct sk_buff *skb) |
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 31e2de7d57c5..c0f0a13ed818 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h | |||
| @@ -142,7 +142,7 @@ | |||
| 142 | #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) | 142 | #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) |
| 143 | 143 | ||
| 144 | #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) | 144 | #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) |
| 145 | #define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) | 145 | #define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO) |
| 146 | 146 | ||
| 147 | /* FRMR information field macros */ | 147 | /* FRMR information field macros */ |
| 148 | 148 | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 67b5d0068273..0a248b323d87 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -1046,9 +1046,6 @@ struct sctp_outq { | |||
| 1046 | 1046 | ||
| 1047 | /* Corked? */ | 1047 | /* Corked? */ |
| 1048 | char cork; | 1048 | char cork; |
| 1049 | |||
| 1050 | /* Is this structure empty? */ | ||
| 1051 | char empty; | ||
| 1052 | }; | 1049 | }; |
| 1053 | 1050 | ||
| 1054 | void sctp_outq_init(struct sctp_association *, struct sctp_outq *); | 1051 | void sctp_outq_init(struct sctp_association *, struct sctp_outq *); |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 979874c627ee..61e1935c91b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -978,7 +978,7 @@ struct ib_uobject { | |||
| 978 | }; | 978 | }; |
| 979 | 979 | ||
| 980 | struct ib_udata { | 980 | struct ib_udata { |
| 981 | void __user *inbuf; | 981 | const void __user *inbuf; |
| 982 | void __user *outbuf; | 982 | void __user *outbuf; |
| 983 | size_t inlen; | 983 | size_t inlen; |
| 984 | size_t outlen; | 984 | size_t outlen; |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6afa69..321301c0a643 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
| @@ -517,10 +517,6 @@ struct se_node_acl { | |||
| 517 | u32 acl_index; | 517 | u32 acl_index; |
| 518 | #define MAX_ACL_TAG_SIZE 64 | 518 | #define MAX_ACL_TAG_SIZE 64 |
| 519 | char acl_tag[MAX_ACL_TAG_SIZE]; | 519 | char acl_tag[MAX_ACL_TAG_SIZE]; |
| 520 | u64 num_cmds; | ||
| 521 | u64 read_bytes; | ||
| 522 | u64 write_bytes; | ||
| 523 | spinlock_t stats_lock; | ||
| 524 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 520 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
| 525 | atomic_t acl_pr_ref_count; | 521 | atomic_t acl_pr_ref_count; |
| 526 | struct se_dev_entry **device_list; | 522 | struct se_dev_entry **device_list; |
| @@ -624,6 +620,7 @@ struct se_dev_attrib { | |||
| 624 | u32 unmap_granularity; | 620 | u32 unmap_granularity; |
| 625 | u32 unmap_granularity_alignment; | 621 | u32 unmap_granularity_alignment; |
| 626 | u32 max_write_same_len; | 622 | u32 max_write_same_len; |
| 623 | u32 max_bytes_per_io; | ||
| 627 | struct se_device *da_dev; | 624 | struct se_device *da_dev; |
| 628 | struct config_group da_group; | 625 | struct config_group da_group; |
| 629 | }; | 626 | }; |
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 2f3f7ea8c77b..fe421e8a431b 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
| @@ -983,6 +983,8 @@ struct drm_radeon_cs { | |||
| 983 | #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 | 983 | #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 |
| 984 | /* CIK macrotile mode array */ | 984 | /* CIK macrotile mode array */ |
| 985 | #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 | 985 | #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
| 986 | /* query the number of render backends */ | ||
| 987 | #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 | ||
| 986 | 988 | ||
| 987 | 989 | ||
| 988 | struct drm_radeon_info { | 990 | struct drm_radeon_info { |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..f854ca4a1372 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
| @@ -75,6 +75,7 @@ | |||
| 75 | #define DRM_VMW_PARAM_FIFO_CAPS 4 | 75 | #define DRM_VMW_PARAM_FIFO_CAPS 4 |
| 76 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 | 76 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
| 77 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 | 77 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
| 78 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 | ||
| 78 | 79 | ||
| 79 | /** | 80 | /** |
| 80 | * struct drm_vmw_getparam_arg | 81 | * struct drm_vmw_getparam_arg |
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index ecc88592ecbe..bd24470d24a2 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
| @@ -464,7 +464,8 @@ struct input_keymap_entry { | |||
| 464 | #define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ | 464 | #define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ |
| 465 | #define KEY_DISPLAY_OFF 245 /* display device to off state */ | 465 | #define KEY_DISPLAY_OFF 245 /* display device to off state */ |
| 466 | 466 | ||
| 467 | #define KEY_WIMAX 246 | 467 | #define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */ |
| 468 | #define KEY_WIMAX KEY_WWAN | ||
| 468 | #define KEY_RFKILL 247 /* Key that controls all radios */ | 469 | #define KEY_RFKILL 247 /* Key that controls all radios */ |
| 469 | 470 | ||
| 470 | #define KEY_MICMUTE 248 /* Mute / unmute the microphone */ | 471 | #define KEY_MICMUTE 248 /* Mute / unmute the microphone */ |
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1802d6153ae..959d454f76a1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h | |||
| @@ -679,6 +679,7 @@ enum perf_event_type { | |||
| 679 | * | 679 | * |
| 680 | * { u64 weight; } && PERF_SAMPLE_WEIGHT | 680 | * { u64 weight; } && PERF_SAMPLE_WEIGHT |
| 681 | * { u64 data_src; } && PERF_SAMPLE_DATA_SRC | 681 | * { u64 data_src; } && PERF_SAMPLE_DATA_SRC |
| 682 | * { u64 transaction; } && PERF_SAMPLE_TRANSACTION | ||
| 682 | * }; | 683 | * }; |
| 683 | */ | 684 | */ |
| 684 | PERF_RECORD_SAMPLE = 9, | 685 | PERF_RECORD_SAMPLE = 9, |
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 65e12099ef89..ae665ac59c36 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h | |||
| @@ -146,7 +146,7 @@ struct blkif_request_segment_aligned { | |||
| 146 | struct blkif_request_rw { | 146 | struct blkif_request_rw { |
| 147 | uint8_t nr_segments; /* number of segments */ | 147 | uint8_t nr_segments; /* number of segments */ |
| 148 | blkif_vdev_t handle; /* only for read/write requests */ | 148 | blkif_vdev_t handle; /* only for read/write requests */ |
| 149 | #ifdef CONFIG_X86_64 | 149 | #ifndef CONFIG_X86_32 |
| 150 | uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ | 150 | uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ |
| 151 | #endif | 151 | #endif |
| 152 | uint64_t id; /* private guest value, echoed in resp */ | 152 | uint64_t id; /* private guest value, echoed in resp */ |
| @@ -163,7 +163,7 @@ struct blkif_request_discard { | |||
| 163 | uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ | 163 | uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ |
| 164 | #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ | 164 | #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ |
| 165 | blkif_vdev_t _pad1; /* only for read/write requests */ | 165 | blkif_vdev_t _pad1; /* only for read/write requests */ |
| 166 | #ifdef CONFIG_X86_64 | 166 | #ifndef CONFIG_X86_32 |
| 167 | uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ | 167 | uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ |
| 168 | #endif | 168 | #endif |
| 169 | uint64_t id; /* private guest value, echoed in resp */ | 169 | uint64_t id; /* private guest value, echoed in resp */ |
| @@ -175,7 +175,7 @@ struct blkif_request_discard { | |||
| 175 | struct blkif_request_other { | 175 | struct blkif_request_other { |
| 176 | uint8_t _pad1; | 176 | uint8_t _pad1; |
| 177 | blkif_vdev_t _pad2; /* only for read/write requests */ | 177 | blkif_vdev_t _pad2; /* only for read/write requests */ |
| 178 | #ifdef CONFIG_X86_64 | 178 | #ifndef CONFIG_X86_32 |
| 179 | uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ | 179 | uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ |
| 180 | #endif | 180 | #endif |
| 181 | uint64_t id; /* private guest value, echoed in resp */ | 181 | uint64_t id; /* private guest value, echoed in resp */ |
| @@ -184,7 +184,7 @@ struct blkif_request_other { | |||
| 184 | struct blkif_request_indirect { | 184 | struct blkif_request_indirect { |
| 185 | uint8_t indirect_op; | 185 | uint8_t indirect_op; |
| 186 | uint16_t nr_segments; | 186 | uint16_t nr_segments; |
| 187 | #ifdef CONFIG_X86_64 | 187 | #ifndef CONFIG_X86_32 |
| 188 | uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ | 188 | uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ |
| 189 | #endif | 189 | #endif |
| 190 | uint64_t id; | 190 | uint64_t id; |
| @@ -192,7 +192,7 @@ struct blkif_request_indirect { | |||
| 192 | blkif_vdev_t handle; | 192 | blkif_vdev_t handle; |
| 193 | uint16_t _pad2; | 193 | uint16_t _pad2; |
| 194 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; | 194 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; |
| 195 | #ifdef CONFIG_X86_64 | 195 | #ifndef CONFIG_X86_32 |
| 196 | uint32_t _pad3; /* make it 64 byte aligned */ | 196 | uint32_t _pad3; /* make it 64 byte aligned */ |
| 197 | #else | 197 | #else |
| 198 | uint64_t _pad3; /* make it 64 byte aligned */ | 198 | uint64_t _pad3; /* make it 64 byte aligned */ |
diff --git a/init/Kconfig b/init/Kconfig index 79383d3aa5dc..4e5d96ab2034 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK | |||
| 809 | config ARCH_SUPPORTS_NUMA_BALANCING | 809 | config ARCH_SUPPORTS_NUMA_BALANCING |
| 810 | bool | 810 | bool |
| 811 | 811 | ||
| 812 | # | ||
| 813 | # For architectures that know their GCC __int128 support is sound | ||
| 814 | # | ||
| 815 | config ARCH_SUPPORTS_INT128 | ||
| 816 | bool | ||
| 817 | |||
| 812 | # For architectures that (ab)use NUMA to represent different memory regions | 818 | # For architectures that (ab)use NUMA to represent different memory regions |
| 813 | # all cpu-local but of different latencies, such as SuperH. | 819 | # all cpu-local but of different latencies, such as SuperH. |
| 814 | # | 820 | # |
diff --git a/kernel/Makefile b/kernel/Makefile index bbaf7d59c1bb..bc010ee272b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | |||
| 137 | ############################################################################### | 137 | ############################################################################### |
| 138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) | 138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) |
| 139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) | 139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) |
| 140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 | 140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 |
| 141 | X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ | 141 | X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ |
| 142 | $(or $(realpath $(CERT)),$(CERT)))) | 142 | $(or $(realpath $(CERT)),$(CERT)))) |
| 143 | X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) | ||
| 143 | 144 | ||
| 144 | ifeq ($(X509_CERTIFICATES),) | 145 | ifeq ($(X509_CERTIFICATES),) |
| 145 | $(warning *** No X.509 certificates found ***) | 146 | $(warning *** No X.509 certificates found ***) |
| @@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list | |||
| 164 | targets += $(obj)/.x509.list | 165 | targets += $(obj)/.x509.list |
| 165 | $(obj)/.x509.list: | 166 | $(obj)/.x509.list: |
| 166 | @echo $(X509_CERTIFICATES) >$@ | 167 | @echo $(X509_CERTIFICATES) >$@ |
| 168 | endif | ||
| 167 | 169 | ||
| 168 | clean-files := x509_certificate_list .x509.list | 170 | clean-files := x509_certificate_list .x509.list |
| 169 | endif | ||
| 170 | 171 | ||
| 171 | ifeq ($(CONFIG_MODULE_SIG),y) | 172 | ifeq ($(CONFIG_MODULE_SIG),y) |
| 172 | ############################################################################### | 173 | ############################################################################### |
diff --git a/kernel/bounds.c b/kernel/bounds.c index 5253204afdca..9fd4246b04b8 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
| @@ -22,6 +22,6 @@ void foo(void) | |||
| 22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
| 23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); | 23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); |
| 24 | #endif | 24 | #endif |
| 25 | DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); | 25 | DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); |
| 26 | /* End of constants */ | 26 | /* End of constants */ |
| 27 | } | 27 | } |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8b729c278b64..bc1dcabe9217 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
| 890 | struct cgroup *cgrp = dentry->d_fsdata; | 890 | struct cgroup *cgrp = dentry->d_fsdata; |
| 891 | 891 | ||
| 892 | BUG_ON(!(cgroup_is_dead(cgrp))); | 892 | BUG_ON(!(cgroup_is_dead(cgrp))); |
| 893 | |||
| 894 | /* | ||
| 895 | * XXX: cgrp->id is only used to look up css's. As cgroup | ||
| 896 | * and css's lifetimes will be decoupled, it should be made | ||
| 897 | * per-subsystem and moved to css->id so that lookups are | ||
| 898 | * successful until the target css is released. | ||
| 899 | */ | ||
| 900 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
| 901 | cgrp->id = -1; | ||
| 902 | |||
| 893 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | 903 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
| 894 | } else { | 904 | } else { |
| 895 | struct cfent *cfe = __d_cfe(dentry); | 905 | struct cfent *cfe = __d_cfe(dentry); |
| @@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref) | |||
| 4268 | struct cgroup_subsys_state *css = | 4278 | struct cgroup_subsys_state *css = |
| 4269 | container_of(ref, struct cgroup_subsys_state, refcnt); | 4279 | container_of(ref, struct cgroup_subsys_state, refcnt); |
| 4270 | 4280 | ||
| 4281 | rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); | ||
| 4271 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 4282 | call_rcu(&css->rcu_head, css_free_rcu_fn); |
| 4272 | } | 4283 | } |
| 4273 | 4284 | ||
| @@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4426 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); | 4437 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); |
| 4427 | root->number_of_cgroups++; | 4438 | root->number_of_cgroups++; |
| 4428 | 4439 | ||
| 4429 | /* each css holds a ref to the cgroup's dentry and the parent css */ | ||
| 4430 | for_each_root_subsys(root, ss) { | ||
| 4431 | struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; | ||
| 4432 | |||
| 4433 | dget(dentry); | ||
| 4434 | css_get(css->parent); | ||
| 4435 | } | ||
| 4436 | |||
| 4437 | /* hold a ref to the parent's dentry */ | 4440 | /* hold a ref to the parent's dentry */ |
| 4438 | dget(parent->dentry); | 4441 | dget(parent->dentry); |
| 4439 | 4442 | ||
| @@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4445 | if (err) | 4448 | if (err) |
| 4446 | goto err_destroy; | 4449 | goto err_destroy; |
| 4447 | 4450 | ||
| 4451 | /* each css holds a ref to the cgroup's dentry and parent css */ | ||
| 4452 | dget(dentry); | ||
| 4453 | css_get(css->parent); | ||
| 4454 | |||
| 4455 | /* mark it consumed for error path */ | ||
| 4456 | css_ar[ss->subsys_id] = NULL; | ||
| 4457 | |||
| 4448 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && | 4458 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && |
| 4449 | parent->parent) { | 4459 | parent->parent) { |
| 4450 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", | 4460 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", |
| @@ -4491,6 +4501,14 @@ err_free_cgrp: | |||
| 4491 | return err; | 4501 | return err; |
| 4492 | 4502 | ||
| 4493 | err_destroy: | 4503 | err_destroy: |
| 4504 | for_each_root_subsys(root, ss) { | ||
| 4505 | struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; | ||
| 4506 | |||
| 4507 | if (css) { | ||
| 4508 | percpu_ref_cancel_init(&css->refcnt); | ||
| 4509 | ss->css_free(css); | ||
| 4510 | } | ||
| 4511 | } | ||
| 4494 | cgroup_destroy_locked(cgrp); | 4512 | cgroup_destroy_locked(cgrp); |
| 4495 | mutex_unlock(&cgroup_mutex); | 4513 | mutex_unlock(&cgroup_mutex); |
| 4496 | mutex_unlock(&dentry->d_inode->i_mutex); | 4514 | mutex_unlock(&dentry->d_inode->i_mutex); |
| @@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4652 | * will be invoked to perform the rest of destruction once the | 4670 | * will be invoked to perform the rest of destruction once the |
| 4653 | * percpu refs of all css's are confirmed to be killed. | 4671 | * percpu refs of all css's are confirmed to be killed. |
| 4654 | */ | 4672 | */ |
| 4655 | for_each_root_subsys(cgrp->root, ss) | 4673 | for_each_root_subsys(cgrp->root, ss) { |
| 4656 | kill_css(cgroup_css(cgrp, ss)); | 4674 | struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); |
| 4675 | |||
| 4676 | if (css) | ||
| 4677 | kill_css(css); | ||
| 4678 | } | ||
| 4657 | 4679 | ||
| 4658 | /* | 4680 | /* |
| 4659 | * Mark @cgrp dead. This prevents further task migration and child | 4681 | * Mark @cgrp dead. This prevents further task migration and child |
| @@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) | |||
| 4722 | /* delete this cgroup from parent->children */ | 4744 | /* delete this cgroup from parent->children */ |
| 4723 | list_del_rcu(&cgrp->sibling); | 4745 | list_del_rcu(&cgrp->sibling); |
| 4724 | 4746 | ||
| 4725 | /* | ||
| 4726 | * We should remove the cgroup object from idr before its grace | ||
| 4727 | * period starts, so we won't be looking up a cgroup while the | ||
| 4728 | * cgroup is being freed. | ||
| 4729 | */ | ||
| 4730 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
| 4731 | cgrp->id = -1; | ||
| 4732 | |||
| 4733 | dput(d); | 4747 | dput(d); |
| 4734 | 4748 | ||
| 4735 | set_bit(CGRP_RELEASABLE, &parent->flags); | 4749 | set_bit(CGRP_RELEASABLE, &parent->flags); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, | |||
| 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 1397 | return; | 1397 | return; |
| 1398 | 1398 | ||
| 1399 | perf_pmu_disable(event->pmu); | ||
| 1400 | |||
| 1399 | event->state = PERF_EVENT_STATE_INACTIVE; | 1401 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1400 | if (event->pending_disable) { | 1402 | if (event->pending_disable) { |
| 1401 | event->pending_disable = 0; | 1403 | event->pending_disable = 0; |
| @@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, | |||
| 1412 | ctx->nr_freq--; | 1414 | ctx->nr_freq--; |
| 1413 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1415 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 1414 | cpuctx->exclusive = 0; | 1416 | cpuctx->exclusive = 0; |
| 1417 | |||
| 1418 | perf_pmu_enable(event->pmu); | ||
| 1415 | } | 1419 | } |
| 1416 | 1420 | ||
| 1417 | static void | 1421 | static void |
| @@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, | |||
| 1652 | struct perf_event_context *ctx) | 1656 | struct perf_event_context *ctx) |
| 1653 | { | 1657 | { |
| 1654 | u64 tstamp = perf_event_time(event); | 1658 | u64 tstamp = perf_event_time(event); |
| 1659 | int ret = 0; | ||
| 1655 | 1660 | ||
| 1656 | if (event->state <= PERF_EVENT_STATE_OFF) | 1661 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 1657 | return 0; | 1662 | return 0; |
| @@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, | |||
| 1674 | */ | 1679 | */ |
| 1675 | smp_wmb(); | 1680 | smp_wmb(); |
| 1676 | 1681 | ||
| 1682 | perf_pmu_disable(event->pmu); | ||
| 1683 | |||
| 1677 | if (event->pmu->add(event, PERF_EF_START)) { | 1684 | if (event->pmu->add(event, PERF_EF_START)) { |
| 1678 | event->state = PERF_EVENT_STATE_INACTIVE; | 1685 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1679 | event->oncpu = -1; | 1686 | event->oncpu = -1; |
| 1680 | return -EAGAIN; | 1687 | ret = -EAGAIN; |
| 1688 | goto out; | ||
| 1681 | } | 1689 | } |
| 1682 | 1690 | ||
| 1683 | event->tstamp_running += tstamp - event->tstamp_stopped; | 1691 | event->tstamp_running += tstamp - event->tstamp_stopped; |
| @@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, | |||
| 1693 | if (event->attr.exclusive) | 1701 | if (event->attr.exclusive) |
| 1694 | cpuctx->exclusive = 1; | 1702 | cpuctx->exclusive = 1; |
| 1695 | 1703 | ||
| 1696 | return 0; | 1704 | out: |
| 1705 | perf_pmu_enable(event->pmu); | ||
| 1706 | |||
| 1707 | return ret; | ||
| 1697 | } | 1708 | } |
| 1698 | 1709 | ||
| 1699 | static int | 1710 | static int |
| @@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2743 | if (!event_filter_match(event)) | 2754 | if (!event_filter_match(event)) |
| 2744 | continue; | 2755 | continue; |
| 2745 | 2756 | ||
| 2757 | perf_pmu_disable(event->pmu); | ||
| 2758 | |||
| 2746 | hwc = &event->hw; | 2759 | hwc = &event->hw; |
| 2747 | 2760 | ||
| 2748 | if (hwc->interrupts == MAX_INTERRUPTS) { | 2761 | if (hwc->interrupts == MAX_INTERRUPTS) { |
| @@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2752 | } | 2765 | } |
| 2753 | 2766 | ||
| 2754 | if (!event->attr.freq || !event->attr.sample_freq) | 2767 | if (!event->attr.freq || !event->attr.sample_freq) |
| 2755 | continue; | 2768 | goto next; |
| 2756 | 2769 | ||
| 2757 | /* | 2770 | /* |
| 2758 | * stop the event and update event->count | 2771 | * stop the event and update event->count |
| @@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2774 | perf_adjust_period(event, period, delta, false); | 2787 | perf_adjust_period(event, period, delta, false); |
| 2775 | 2788 | ||
| 2776 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2789 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
| 2790 | next: | ||
| 2791 | perf_pmu_enable(event->pmu); | ||
| 2777 | } | 2792 | } |
| 2778 | 2793 | ||
| 2779 | perf_pmu_enable(ctx->pmu); | 2794 | perf_pmu_enable(ctx->pmu); |
diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be9548c..5721f0e3f2da 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
| 537 | spin_lock_init(&mm->page_table_lock); | 537 | spin_lock_init(&mm->page_table_lock); |
| 538 | mm_init_aio(mm); | 538 | mm_init_aio(mm); |
| 539 | mm_init_owner(mm, p); | 539 | mm_init_owner(mm, p); |
| 540 | clear_tlb_flush_pending(mm); | ||
| 540 | 541 | ||
| 541 | if (likely(!mm_alloc_pgd(mm))) { | 542 | if (likely(!mm_alloc_pgd(mm))) { |
| 542 | mm->def_flags = 0; | 543 | mm->def_flags = 0; |
diff --git a/kernel/freezer.c b/kernel/freezer.c index b462fa197517..aa6a8aadb911 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
| @@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt); | |||
| 19 | bool pm_freezing; | 19 | bool pm_freezing; |
| 20 | bool pm_nosig_freezing; | 20 | bool pm_nosig_freezing; |
| 21 | 21 | ||
| 22 | /* | ||
| 23 | * Temporary export for the deadlock workaround in ata_scsi_hotplug(). | ||
| 24 | * Remove once the hack becomes unnecessary. | ||
| 25 | */ | ||
| 26 | EXPORT_SYMBOL_GPL(pm_freezing); | ||
| 27 | |||
| 22 | /* protects freezing and frozen transitions */ | 28 | /* protects freezing and frozen transitions */ |
| 23 | static DEFINE_SPINLOCK(freezer_lock); | 29 | static DEFINE_SPINLOCK(freezer_lock); |
| 24 | 30 | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index d0d8fca54065..9c970167e402 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1680,6 +1680,7 @@ int kernel_kexec(void) | |||
| 1680 | { | 1680 | { |
| 1681 | kexec_in_progress = true; | 1681 | kexec_in_progress = true; |
| 1682 | kernel_restart_prepare(NULL); | 1682 | kernel_restart_prepare(NULL); |
| 1683 | migrate_to_reboot_cpu(); | ||
| 1683 | printk(KERN_EMERG "Starting new kernel\n"); | 1684 | printk(KERN_EMERG "Starting new kernel\n"); |
| 1684 | machine_shutdown(); | 1685 | machine_shutdown(); |
| 1685 | } | 1686 | } |
diff --git a/kernel/power/console.c b/kernel/power/console.c index 463aa6736751..eacb8bd8cab4 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
| @@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev) | |||
| 81 | list_for_each_entry(tmp, &pm_vt_switch_list, head) { | 81 | list_for_each_entry(tmp, &pm_vt_switch_list, head) { |
| 82 | if (tmp->dev == dev) { | 82 | if (tmp->dev == dev) { |
| 83 | list_del(&tmp->head); | 83 | list_del(&tmp->head); |
| 84 | kfree(tmp); | ||
| 84 | break; | 85 | break; |
| 85 | } | 86 | } |
| 86 | } | 87 | } |
diff --git a/kernel/reboot.c b/kernel/reboot.c index f813b3474646..662c83fc16b7 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
| @@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb) | |||
| 104 | } | 104 | } |
| 105 | EXPORT_SYMBOL(unregister_reboot_notifier); | 105 | EXPORT_SYMBOL(unregister_reboot_notifier); |
| 106 | 106 | ||
| 107 | static void migrate_to_reboot_cpu(void) | 107 | void migrate_to_reboot_cpu(void) |
| 108 | { | 108 | { |
| 109 | /* The boot cpu is always logical cpu 0 */ | 109 | /* The boot cpu is always logical cpu 0 */ |
| 110 | int cpu = reboot_cpu; | 110 | int cpu = reboot_cpu; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85cda20ab2b..a88f4a485c5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); | |||
| 4902 | static void update_top_cache_domain(int cpu) | 4902 | static void update_top_cache_domain(int cpu) |
| 4903 | { | 4903 | { |
| 4904 | struct sched_domain *sd; | 4904 | struct sched_domain *sd; |
| 4905 | struct sched_domain *busy_sd = NULL; | ||
| 4905 | int id = cpu; | 4906 | int id = cpu; |
| 4906 | int size = 1; | 4907 | int size = 1; |
| 4907 | 4908 | ||
| @@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu) | |||
| 4909 | if (sd) { | 4910 | if (sd) { |
| 4910 | id = cpumask_first(sched_domain_span(sd)); | 4911 | id = cpumask_first(sched_domain_span(sd)); |
| 4911 | size = cpumask_weight(sched_domain_span(sd)); | 4912 | size = cpumask_weight(sched_domain_span(sd)); |
| 4912 | sd = sd->parent; /* sd_busy */ | 4913 | busy_sd = sd->parent; /* sd_busy */ |
| 4913 | } | 4914 | } |
| 4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | 4915 | rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); |
| 4915 | 4916 | ||
| 4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4917 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
| 4917 | per_cpu(sd_llc_size, cpu) = size; | 4918 | per_cpu(sd_llc_size, cpu) = size; |
| @@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
| 5112 | * die on a /0 trap. | 5113 | * die on a /0 trap. |
| 5113 | */ | 5114 | */ |
| 5114 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); | 5115 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); |
| 5116 | sg->sgp->power_orig = sg->sgp->power; | ||
| 5115 | 5117 | ||
| 5116 | /* | 5118 | /* |
| 5117 | * Make sure the first group of this domain contains the | 5119 | * Make sure the first group of this domain contains the |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd773ade1a31..c7395d97e4cb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -178,59 +178,61 @@ void sched_init_granularity(void) | |||
| 178 | update_sysctl(); | 178 | update_sysctl(); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | #if BITS_PER_LONG == 32 | 181 | #define WMULT_CONST (~0U) |
| 182 | # define WMULT_CONST (~0UL) | ||
| 183 | #else | ||
| 184 | # define WMULT_CONST (1UL << 32) | ||
| 185 | #endif | ||
| 186 | |||
| 187 | #define WMULT_SHIFT 32 | 182 | #define WMULT_SHIFT 32 |
| 188 | 183 | ||
| 189 | /* | 184 | static void __update_inv_weight(struct load_weight *lw) |
| 190 | * Shift right and round: | 185 | { |
| 191 | */ | 186 | unsigned long w; |
| 192 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 187 | |
| 188 | if (likely(lw->inv_weight)) | ||
| 189 | return; | ||
| 190 | |||
| 191 | w = scale_load_down(lw->weight); | ||
| 192 | |||
| 193 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | ||
| 194 | lw->inv_weight = 1; | ||
| 195 | else if (unlikely(!w)) | ||
| 196 | lw->inv_weight = WMULT_CONST; | ||
| 197 | else | ||
| 198 | lw->inv_weight = WMULT_CONST / w; | ||
| 199 | } | ||
| 193 | 200 | ||
| 194 | /* | 201 | /* |
| 195 | * delta *= weight / lw | 202 | * delta_exec * weight / lw.weight |
| 203 | * OR | ||
| 204 | * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT | ||
| 205 | * | ||
| 206 | * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case | ||
| 207 | * we're guaranteed shift stays positive because inv_weight is guaranteed to | ||
| 208 | * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. | ||
| 209 | * | ||
| 210 | * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus | ||
| 211 | * weight/lw.weight <= 1, and therefore our shift will also be positive. | ||
| 196 | */ | 212 | */ |
| 197 | static unsigned long | 213 | static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) |
| 198 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | ||
| 199 | struct load_weight *lw) | ||
| 200 | { | 214 | { |
| 201 | u64 tmp; | 215 | u64 fact = scale_load_down(weight); |
| 202 | 216 | int shift = WMULT_SHIFT; | |
| 203 | /* | ||
| 204 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | ||
| 205 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than | ||
| 206 | * 2^SCHED_LOAD_RESOLUTION. | ||
| 207 | */ | ||
| 208 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | ||
| 209 | tmp = (u64)delta_exec * scale_load_down(weight); | ||
| 210 | else | ||
| 211 | tmp = (u64)delta_exec; | ||
| 212 | 217 | ||
| 213 | if (!lw->inv_weight) { | 218 | __update_inv_weight(lw); |
| 214 | unsigned long w = scale_load_down(lw->weight); | ||
| 215 | 219 | ||
| 216 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | 220 | if (unlikely(fact >> 32)) { |
| 217 | lw->inv_weight = 1; | 221 | while (fact >> 32) { |
| 218 | else if (unlikely(!w)) | 222 | fact >>= 1; |
| 219 | lw->inv_weight = WMULT_CONST; | 223 | shift--; |
| 220 | else | 224 | } |
| 221 | lw->inv_weight = WMULT_CONST / w; | ||
| 222 | } | 225 | } |
| 223 | 226 | ||
| 224 | /* | 227 | /* hint to use a 32x32->64 mul */ |
| 225 | * Check whether we'd overflow the 64-bit multiplication: | 228 | fact = (u64)(u32)fact * lw->inv_weight; |
| 226 | */ | 229 | |
| 227 | if (unlikely(tmp > WMULT_CONST)) | 230 | while (fact >> 32) { |
| 228 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | 231 | fact >>= 1; |
| 229 | WMULT_SHIFT/2); | 232 | shift--; |
| 230 | else | 233 | } |
| 231 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); | ||
| 232 | 234 | ||
| 233 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 235 | return mul_u64_u32_shr(delta_exec, fact, shift); |
| 234 | } | 236 | } |
| 235 | 237 | ||
| 236 | 238 | ||
| @@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |||
| 443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 445 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 444 | 446 | ||
| 445 | static __always_inline | 447 | static __always_inline |
| 446 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); | 448 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); |
| 447 | 449 | ||
| 448 | /************************************************************** | 450 | /************************************************************** |
| 449 | * Scheduling class tree data structure manipulation methods: | 451 | * Scheduling class tree data structure manipulation methods: |
| @@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write, | |||
| 612 | /* | 614 | /* |
| 613 | * delta /= w | 615 | * delta /= w |
| 614 | */ | 616 | */ |
| 615 | static inline unsigned long | 617 | static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) |
| 616 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | ||
| 617 | { | 618 | { |
| 618 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 619 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
| 619 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 620 | delta = __calc_delta(delta, NICE_0_LOAD, &se->load); |
| 620 | 621 | ||
| 621 | return delta; | 622 | return delta; |
| 622 | } | 623 | } |
| @@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 665 | update_load_add(&lw, se->load.weight); | 666 | update_load_add(&lw, se->load.weight); |
| 666 | load = &lw; | 667 | load = &lw; |
| 667 | } | 668 | } |
| 668 | slice = calc_delta_mine(slice, se->load.weight, load); | 669 | slice = __calc_delta(slice, se->load.weight, load); |
| 669 | } | 670 | } |
| 670 | return slice; | 671 | return slice; |
| 671 | } | 672 | } |
| @@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p) | |||
| 703 | #endif | 704 | #endif |
| 704 | 705 | ||
| 705 | /* | 706 | /* |
| 706 | * Update the current task's runtime statistics. Skip current tasks that | 707 | * Update the current task's runtime statistics. |
| 707 | * are not in our scheduling class. | ||
| 708 | */ | 708 | */ |
| 709 | static inline void | ||
| 710 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | ||
| 711 | unsigned long delta_exec) | ||
| 712 | { | ||
| 713 | unsigned long delta_exec_weighted; | ||
| 714 | |||
| 715 | schedstat_set(curr->statistics.exec_max, | ||
| 716 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
| 717 | |||
| 718 | curr->sum_exec_runtime += delta_exec; | ||
| 719 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
| 720 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | ||
| 721 | |||
| 722 | curr->vruntime += delta_exec_weighted; | ||
| 723 | update_min_vruntime(cfs_rq); | ||
| 724 | } | ||
| 725 | |||
| 726 | static void update_curr(struct cfs_rq *cfs_rq) | 709 | static void update_curr(struct cfs_rq *cfs_rq) |
| 727 | { | 710 | { |
| 728 | struct sched_entity *curr = cfs_rq->curr; | 711 | struct sched_entity *curr = cfs_rq->curr; |
| 729 | u64 now = rq_clock_task(rq_of(cfs_rq)); | 712 | u64 now = rq_clock_task(rq_of(cfs_rq)); |
| 730 | unsigned long delta_exec; | 713 | u64 delta_exec; |
| 731 | 714 | ||
| 732 | if (unlikely(!curr)) | 715 | if (unlikely(!curr)) |
| 733 | return; | 716 | return; |
| 734 | 717 | ||
| 735 | /* | 718 | delta_exec = now - curr->exec_start; |
| 736 | * Get the amount of time the current task was running | 719 | if (unlikely((s64)delta_exec <= 0)) |
| 737 | * since the last time we changed load (this cannot | ||
| 738 | * overflow on 32 bits): | ||
| 739 | */ | ||
| 740 | delta_exec = (unsigned long)(now - curr->exec_start); | ||
| 741 | if (!delta_exec) | ||
| 742 | return; | 720 | return; |
| 743 | 721 | ||
| 744 | __update_curr(cfs_rq, curr, delta_exec); | ||
| 745 | curr->exec_start = now; | 722 | curr->exec_start = now; |
| 746 | 723 | ||
| 724 | schedstat_set(curr->statistics.exec_max, | ||
| 725 | max(delta_exec, curr->statistics.exec_max)); | ||
| 726 | |||
| 727 | curr->sum_exec_runtime += delta_exec; | ||
| 728 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
| 729 | |||
| 730 | curr->vruntime += calc_delta_fair(delta_exec, curr); | ||
| 731 | update_min_vruntime(cfs_rq); | ||
| 732 | |||
| 747 | if (entity_is_task(curr)) { | 733 | if (entity_is_task(curr)) { |
| 748 | struct task_struct *curtask = task_of(curr); | 734 | struct task_struct *curtask = task_of(curr); |
| 749 | 735 | ||
| @@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work) | |||
| 1752 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) | 1738 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) |
| 1753 | continue; | 1739 | continue; |
| 1754 | 1740 | ||
| 1741 | /* | ||
| 1742 | * Skip inaccessible VMAs to avoid any confusion between | ||
| 1743 | * PROT_NONE and NUMA hinting ptes | ||
| 1744 | */ | ||
| 1745 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
| 1746 | continue; | ||
| 1747 | |||
| 1755 | do { | 1748 | do { |
| 1756 | start = max(start, vma->vm_start); | 1749 | start = max(start, vma->vm_start); |
| 1757 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); | 1750 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); |
| @@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
| 3015 | } | 3008 | } |
| 3016 | } | 3009 | } |
| 3017 | 3010 | ||
| 3018 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3011 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
| 3019 | unsigned long delta_exec) | ||
| 3020 | { | 3012 | { |
| 3021 | /* dock delta_exec before expiring quota (as it could span periods) */ | 3013 | /* dock delta_exec before expiring quota (as it could span periods) */ |
| 3022 | cfs_rq->runtime_remaining -= delta_exec; | 3014 | cfs_rq->runtime_remaining -= delta_exec; |
| @@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | |||
| 3034 | } | 3026 | } |
| 3035 | 3027 | ||
| 3036 | static __always_inline | 3028 | static __always_inline |
| 3037 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) | 3029 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
| 3038 | { | 3030 | { |
| 3039 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) | 3031 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) |
| 3040 | return; | 3032 | return; |
| @@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) | |||
| 3574 | return rq_clock_task(rq_of(cfs_rq)); | 3566 | return rq_clock_task(rq_of(cfs_rq)); |
| 3575 | } | 3567 | } |
| 3576 | 3568 | ||
| 3577 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3569 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} |
| 3578 | unsigned long delta_exec) {} | ||
| 3579 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3570 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 3580 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | 3571 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
| 3581 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3572 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7d57275fc396..1c4065575fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
| 901 | { | 901 | { |
| 902 | struct rq *rq = rq_of_rt_rq(rt_rq); | 902 | struct rq *rq = rq_of_rt_rq(rt_rq); |
| 903 | 903 | ||
| 904 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 905 | /* | ||
| 906 | * Change rq's cpupri only if rt_rq is the top queue. | ||
| 907 | */ | ||
| 908 | if (&rq->rt != rt_rq) | ||
| 909 | return; | ||
| 910 | #endif | ||
| 904 | if (rq->online && prio < prev_prio) | 911 | if (rq->online && prio < prev_prio) |
| 905 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | 912 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
| 906 | } | 913 | } |
| @@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
| 910 | { | 917 | { |
| 911 | struct rq *rq = rq_of_rt_rq(rt_rq); | 918 | struct rq *rq = rq_of_rt_rq(rt_rq); |
| 912 | 919 | ||
| 920 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 921 | /* | ||
| 922 | * Change rq's cpupri only if rt_rq is the top queue. | ||
| 923 | */ | ||
| 924 | if (&rq->rt != rt_rq) | ||
| 925 | return; | ||
| 926 | #endif | ||
| 913 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) | 927 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
| 914 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); | 928 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
| 915 | } | 929 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9f9eaade2f..72a0f81dc5a8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -775,7 +775,7 @@ static int ftrace_profile_init(void) | |||
| 775 | int cpu; | 775 | int cpu; |
| 776 | int ret = 0; | 776 | int ret = 0; |
| 777 | 777 | ||
| 778 | for_each_online_cpu(cpu) { | 778 | for_each_possible_cpu(cpu) { |
| 779 | ret = ftrace_profile_init_cpu(cpu); | 779 | ret = ftrace_profile_init_cpu(cpu); |
| 780 | if (ret) | 780 | if (ret) |
| 781 | break; | 781 | break; |
diff --git a/kernel/user.c b/kernel/user.c index a3a0dbfda329..c006131beb77 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -51,9 +51,9 @@ struct user_namespace init_user_ns = { | |||
| 51 | .owner = GLOBAL_ROOT_UID, | 51 | .owner = GLOBAL_ROOT_UID, |
| 52 | .group = GLOBAL_ROOT_GID, | 52 | .group = GLOBAL_ROOT_GID, |
| 53 | .proc_inum = PROC_USER_INIT_INO, | 53 | .proc_inum = PROC_USER_INIT_INO, |
| 54 | #ifdef CONFIG_KEYS_KERBEROS_CACHE | 54 | #ifdef CONFIG_PERSISTENT_KEYRINGS |
| 55 | .krb_cache_register_sem = | 55 | .persistent_keyring_register_sem = |
| 56 | __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), | 56 | __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), |
| 57 | #endif | 57 | #endif |
| 58 | }; | 58 | }; |
| 59 | EXPORT_SYMBOL_GPL(init_user_ns); | 59 | EXPORT_SYMBOL_GPL(init_user_ns); |
diff --git a/mm/Kconfig b/mm/Kconfig index eb69f352401d..723bbe04a0b0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -543,7 +543,7 @@ config ZSWAP | |||
| 543 | 543 | ||
| 544 | config MEM_SOFT_DIRTY | 544 | config MEM_SOFT_DIRTY |
| 545 | bool "Track memory changes" | 545 | bool "Track memory changes" |
| 546 | depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY | 546 | depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS |
| 547 | select PROC_PAGE_MONITOR | 547 | select PROC_PAGE_MONITOR |
| 548 | help | 548 | help |
| 549 | This option enables memory changes tracking by introducing a | 549 | This option enables memory changes tracking by introducing a |
diff --git a/mm/compaction.c b/mm/compaction.c index 805165bcd3dd..f58bcd016f43 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc, | |||
| 134 | bool migrate_scanner) | 134 | bool migrate_scanner) |
| 135 | { | 135 | { |
| 136 | struct zone *zone = cc->zone; | 136 | struct zone *zone = cc->zone; |
| 137 | |||
| 138 | if (cc->ignore_skip_hint) | ||
| 139 | return; | ||
| 140 | |||
| 137 | if (!page) | 141 | if (!page) |
| 138 | return; | 142 | return; |
| 139 | 143 | ||
diff --git a/mm/fremap.c b/mm/fremap.c index 5bff08147768..bbc4d660221a 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
| @@ -208,9 +208,10 @@ get_write_lock: | |||
| 208 | if (mapping_cap_account_dirty(mapping)) { | 208 | if (mapping_cap_account_dirty(mapping)) { |
| 209 | unsigned long addr; | 209 | unsigned long addr; |
| 210 | struct file *file = get_file(vma->vm_file); | 210 | struct file *file = get_file(vma->vm_file); |
| 211 | /* mmap_region may free vma; grab the info now */ | ||
| 212 | vm_flags = vma->vm_flags; | ||
| 211 | 213 | ||
| 212 | addr = mmap_region(file, start, size, | 214 | addr = mmap_region(file, start, size, vm_flags, pgoff); |
| 213 | vma->vm_flags, pgoff); | ||
| 214 | fput(file); | 215 | fput(file); |
| 215 | if (IS_ERR_VALUE(addr)) { | 216 | if (IS_ERR_VALUE(addr)) { |
| 216 | err = addr; | 217 | err = addr; |
| @@ -218,7 +219,7 @@ get_write_lock: | |||
| 218 | BUG_ON(addr != start); | 219 | BUG_ON(addr != start); |
| 219 | err = 0; | 220 | err = 0; |
| 220 | } | 221 | } |
| 221 | goto out; | 222 | goto out_freed; |
| 222 | } | 223 | } |
| 223 | mutex_lock(&mapping->i_mmap_mutex); | 224 | mutex_lock(&mapping->i_mmap_mutex); |
| 224 | flush_dcache_mmap_lock(mapping); | 225 | flush_dcache_mmap_lock(mapping); |
| @@ -253,6 +254,7 @@ get_write_lock: | |||
| 253 | out: | 254 | out: |
| 254 | if (vma) | 255 | if (vma) |
| 255 | vm_flags = vma->vm_flags; | 256 | vm_flags = vma->vm_flags; |
| 257 | out_freed: | ||
| 256 | if (likely(!has_write_lock)) | 258 | if (likely(!has_write_lock)) |
| 257 | up_read(&mm->mmap_sem); | 259 | up_read(&mm->mmap_sem); |
| 258 | else | 260 | else |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 33a5dc492810..95d1acb0f3d2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -882,6 +882,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 882 | ret = 0; | 882 | ret = 0; |
| 883 | goto out_unlock; | 883 | goto out_unlock; |
| 884 | } | 884 | } |
| 885 | |||
| 885 | if (unlikely(pmd_trans_splitting(pmd))) { | 886 | if (unlikely(pmd_trans_splitting(pmd))) { |
| 886 | /* split huge page running from under us */ | 887 | /* split huge page running from under us */ |
| 887 | spin_unlock(src_ptl); | 888 | spin_unlock(src_ptl); |
| @@ -1153,7 +1154,7 @@ alloc: | |||
| 1153 | new_page = NULL; | 1154 | new_page = NULL; |
| 1154 | 1155 | ||
| 1155 | if (unlikely(!new_page)) { | 1156 | if (unlikely(!new_page)) { |
| 1156 | if (is_huge_zero_pmd(orig_pmd)) { | 1157 | if (!page) { |
| 1157 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, | 1158 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, |
| 1158 | address, pmd, orig_pmd, haddr); | 1159 | address, pmd, orig_pmd, haddr); |
| 1159 | } else { | 1160 | } else { |
| @@ -1180,7 +1181,7 @@ alloc: | |||
| 1180 | 1181 | ||
| 1181 | count_vm_event(THP_FAULT_ALLOC); | 1182 | count_vm_event(THP_FAULT_ALLOC); |
| 1182 | 1183 | ||
| 1183 | if (is_huge_zero_pmd(orig_pmd)) | 1184 | if (!page) |
| 1184 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); | 1185 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |
| 1185 | else | 1186 | else |
| 1186 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); | 1187 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
| @@ -1206,7 +1207,7 @@ alloc: | |||
| 1206 | page_add_new_anon_rmap(new_page, vma, haddr); | 1207 | page_add_new_anon_rmap(new_page, vma, haddr); |
| 1207 | set_pmd_at(mm, haddr, pmd, entry); | 1208 | set_pmd_at(mm, haddr, pmd, entry); |
| 1208 | update_mmu_cache_pmd(vma, address, pmd); | 1209 | update_mmu_cache_pmd(vma, address, pmd); |
| 1209 | if (is_huge_zero_pmd(orig_pmd)) { | 1210 | if (!page) { |
| 1210 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); | 1211 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
| 1211 | put_huge_zero_page(); | 1212 | put_huge_zero_page(); |
| 1212 | } else { | 1213 | } else { |
| @@ -1243,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | |||
| 1243 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) | 1244 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) |
| 1244 | return ERR_PTR(-EFAULT); | 1245 | return ERR_PTR(-EFAULT); |
| 1245 | 1246 | ||
| 1247 | /* Full NUMA hinting faults to serialise migration in fault paths */ | ||
| 1248 | if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) | ||
| 1249 | goto out; | ||
| 1250 | |||
| 1246 | page = pmd_page(*pmd); | 1251 | page = pmd_page(*pmd); |
| 1247 | VM_BUG_ON(!PageHead(page)); | 1252 | VM_BUG_ON(!PageHead(page)); |
| 1248 | if (flags & FOLL_TOUCH) { | 1253 | if (flags & FOLL_TOUCH) { |
| @@ -1295,6 +1300,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1295 | if (unlikely(!pmd_same(pmd, *pmdp))) | 1300 | if (unlikely(!pmd_same(pmd, *pmdp))) |
| 1296 | goto out_unlock; | 1301 | goto out_unlock; |
| 1297 | 1302 | ||
| 1303 | /* | ||
| 1304 | * If there are potential migrations, wait for completion and retry | ||
| 1305 | * without disrupting NUMA hinting information. Do not relock and | ||
| 1306 | * check_same as the page may no longer be mapped. | ||
| 1307 | */ | ||
| 1308 | if (unlikely(pmd_trans_migrating(*pmdp))) { | ||
| 1309 | spin_unlock(ptl); | ||
| 1310 | wait_migrate_huge_page(vma->anon_vma, pmdp); | ||
| 1311 | goto out; | ||
| 1312 | } | ||
| 1313 | |||
| 1298 | page = pmd_page(pmd); | 1314 | page = pmd_page(pmd); |
| 1299 | BUG_ON(is_huge_zero_page(page)); | 1315 | BUG_ON(is_huge_zero_page(page)); |
| 1300 | page_nid = page_to_nid(page); | 1316 | page_nid = page_to_nid(page); |
| @@ -1323,23 +1339,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1323 | /* If the page was locked, there are no parallel migrations */ | 1339 | /* If the page was locked, there are no parallel migrations */ |
| 1324 | if (page_locked) | 1340 | if (page_locked) |
| 1325 | goto clear_pmdnuma; | 1341 | goto clear_pmdnuma; |
| 1342 | } | ||
| 1326 | 1343 | ||
| 1327 | /* | 1344 | /* Migration could have started since the pmd_trans_migrating check */ |
| 1328 | * Otherwise wait for potential migrations and retry. We do | 1345 | if (!page_locked) { |
| 1329 | * relock and check_same as the page may no longer be mapped. | ||
| 1330 | * As the fault is being retried, do not account for it. | ||
| 1331 | */ | ||
| 1332 | spin_unlock(ptl); | 1346 | spin_unlock(ptl); |
| 1333 | wait_on_page_locked(page); | 1347 | wait_on_page_locked(page); |
| 1334 | page_nid = -1; | 1348 | page_nid = -1; |
| 1335 | goto out; | 1349 | goto out; |
| 1336 | } | 1350 | } |
| 1337 | 1351 | ||
| 1338 | /* Page is misplaced, serialise migrations and parallel THP splits */ | 1352 | /* |
| 1353 | * Page is misplaced. Page lock serialises migrations. Acquire anon_vma | ||
| 1354 | * to serialises splits | ||
| 1355 | */ | ||
| 1339 | get_page(page); | 1356 | get_page(page); |
| 1340 | spin_unlock(ptl); | 1357 | spin_unlock(ptl); |
| 1341 | if (!page_locked) | ||
| 1342 | lock_page(page); | ||
| 1343 | anon_vma = page_lock_anon_vma_read(page); | 1358 | anon_vma = page_lock_anon_vma_read(page); |
| 1344 | 1359 | ||
| 1345 | /* Confirm the PMD did not change while page_table_lock was released */ | 1360 | /* Confirm the PMD did not change while page_table_lock was released */ |
| @@ -1351,6 +1366,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1351 | goto out_unlock; | 1366 | goto out_unlock; |
| 1352 | } | 1367 | } |
| 1353 | 1368 | ||
| 1369 | /* Bail if we fail to protect against THP splits for any reason */ | ||
| 1370 | if (unlikely(!anon_vma)) { | ||
| 1371 | put_page(page); | ||
| 1372 | page_nid = -1; | ||
| 1373 | goto clear_pmdnuma; | ||
| 1374 | } | ||
| 1375 | |||
| 1354 | /* | 1376 | /* |
| 1355 | * Migrate the THP to the requested node, returns with page unlocked | 1377 | * Migrate the THP to the requested node, returns with page unlocked |
| 1356 | * and pmd_numa cleared. | 1378 | * and pmd_numa cleared. |
| @@ -1517,6 +1539,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 1517 | ret = 1; | 1539 | ret = 1; |
| 1518 | if (!prot_numa) { | 1540 | if (!prot_numa) { |
| 1519 | entry = pmdp_get_and_clear(mm, addr, pmd); | 1541 | entry = pmdp_get_and_clear(mm, addr, pmd); |
| 1542 | if (pmd_numa(entry)) | ||
| 1543 | entry = pmd_mknonnuma(entry); | ||
| 1520 | entry = pmd_modify(entry, newprot); | 1544 | entry = pmd_modify(entry, newprot); |
| 1521 | ret = HPAGE_PMD_NR; | 1545 | ret = HPAGE_PMD_NR; |
| 1522 | BUG_ON(pmd_write(entry)); | 1546 | BUG_ON(pmd_write(entry)); |
| @@ -1531,7 +1555,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 1531 | */ | 1555 | */ |
| 1532 | if (!is_huge_zero_page(page) && | 1556 | if (!is_huge_zero_page(page) && |
| 1533 | !pmd_numa(*pmd)) { | 1557 | !pmd_numa(*pmd)) { |
| 1534 | entry = pmdp_get_and_clear(mm, addr, pmd); | 1558 | entry = *pmd; |
| 1535 | entry = pmd_mknuma(entry); | 1559 | entry = pmd_mknuma(entry); |
| 1536 | ret = HPAGE_PMD_NR; | 1560 | ret = HPAGE_PMD_NR; |
| 1537 | } | 1561 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index bf5e89457149..7f1a356153c0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -338,7 +338,7 @@ struct mem_cgroup { | |||
| 338 | static size_t memcg_size(void) | 338 | static size_t memcg_size(void) |
| 339 | { | 339 | { |
| 340 | return sizeof(struct mem_cgroup) + | 340 | return sizeof(struct mem_cgroup) + |
| 341 | nr_node_ids * sizeof(struct mem_cgroup_per_node); | 341 | nr_node_ids * sizeof(struct mem_cgroup_per_node *); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | /* internal only representation about the status of kmem accounting. */ | 344 | /* internal only representation about the status of kmem accounting. */ |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b7c171602ba1..fabe55046c1d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 938 | BUG_ON(!PageHWPoison(p)); | 938 | BUG_ON(!PageHWPoison(p)); |
| 939 | return SWAP_FAIL; | 939 | return SWAP_FAIL; |
| 940 | } | 940 | } |
| 941 | /* | ||
| 942 | * We pinned the head page for hwpoison handling, | ||
| 943 | * now we split the thp and we are interested in | ||
| 944 | * the hwpoisoned raw page, so move the refcount | ||
| 945 | * to it. | ||
| 946 | */ | ||
| 947 | if (hpage != p) { | ||
| 948 | put_page(hpage); | ||
| 949 | get_page(p); | ||
| 950 | } | ||
| 941 | /* THP is split, so ppage should be the real poisoned page. */ | 951 | /* THP is split, so ppage should be the real poisoned page. */ |
| 942 | ppage = p; | 952 | ppage = p; |
| 943 | } | 953 | } |
| @@ -1505,10 +1515,16 @@ static int soft_offline_huge_page(struct page *page, int flags) | |||
| 1505 | if (ret > 0) | 1515 | if (ret > 0) |
| 1506 | ret = -EIO; | 1516 | ret = -EIO; |
| 1507 | } else { | 1517 | } else { |
| 1508 | set_page_hwpoison_huge_page(hpage); | 1518 | /* overcommit hugetlb page will be freed to buddy */ |
| 1509 | dequeue_hwpoisoned_huge_page(hpage); | 1519 | if (PageHuge(page)) { |
| 1510 | atomic_long_add(1 << compound_order(hpage), | 1520 | set_page_hwpoison_huge_page(hpage); |
| 1511 | &num_poisoned_pages); | 1521 | dequeue_hwpoisoned_huge_page(hpage); |
| 1522 | atomic_long_add(1 << compound_order(hpage), | ||
| 1523 | &num_poisoned_pages); | ||
| 1524 | } else { | ||
| 1525 | SetPageHWPoison(page); | ||
| 1526 | atomic_long_inc(&num_poisoned_pages); | ||
| 1527 | } | ||
| 1512 | } | 1528 | } |
| 1513 | return ret; | 1529 | return ret; |
| 1514 | } | 1530 | } |
diff --git a/mm/memory.c b/mm/memory.c index 5d9025f3b3e1..6768ce9e57d2 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src, | |||
| 4271 | } | 4271 | } |
| 4272 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 4272 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
| 4273 | 4273 | ||
| 4274 | #if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS | 4274 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS |
| 4275 | bool ptlock_alloc(struct page *page) | 4275 | bool ptlock_alloc(struct page *page) |
| 4276 | { | 4276 | { |
| 4277 | spinlock_t *ptl; | 4277 | spinlock_t *ptl; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eca4a3129129..0cd2c4d4e270 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int * | |||
| 1197 | break; | 1197 | break; |
| 1198 | vma = vma->vm_next; | 1198 | vma = vma->vm_next; |
| 1199 | } | 1199 | } |
| 1200 | |||
| 1201 | if (PageHuge(page)) { | ||
| 1202 | if (vma) | ||
| 1203 | return alloc_huge_page_noerr(vma, address, 1); | ||
| 1204 | else | ||
| 1205 | return NULL; | ||
| 1206 | } | ||
| 1200 | /* | 1207 | /* |
| 1201 | * queue_pages_range() confirms that @page belongs to some vma, | 1208 | * if !vma, alloc_page_vma() will use task or system default policy |
| 1202 | * so vma shouldn't be NULL. | ||
| 1203 | */ | 1209 | */ |
| 1204 | BUG_ON(!vma); | ||
| 1205 | |||
| 1206 | if (PageHuge(page)) | ||
| 1207 | return alloc_huge_page_noerr(vma, address, 1); | ||
| 1208 | return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 1210 | return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 1209 | } | 1211 | } |
| 1210 | #else | 1212 | #else |
| @@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
| 1318 | if (nr_failed && (flags & MPOL_MF_STRICT)) | 1320 | if (nr_failed && (flags & MPOL_MF_STRICT)) |
| 1319 | err = -EIO; | 1321 | err = -EIO; |
| 1320 | } else | 1322 | } else |
| 1321 | putback_lru_pages(&pagelist); | 1323 | putback_movable_pages(&pagelist); |
| 1322 | 1324 | ||
| 1323 | up_write(&mm->mmap_sem); | 1325 | up_write(&mm->mmap_sem); |
| 1324 | mpol_out: | 1326 | mpol_out: |
diff --git a/mm/migrate.c b/mm/migrate.c index bb940045fe85..9194375b2307 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/hugetlb_cgroup.h> | 36 | #include <linux/hugetlb_cgroup.h> |
| 37 | #include <linux/gfp.h> | 37 | #include <linux/gfp.h> |
| 38 | #include <linux/balloon_compaction.h> | 38 | #include <linux/balloon_compaction.h> |
| 39 | #include <linux/mmu_notifier.h> | ||
| 39 | 40 | ||
| 40 | #include <asm/tlbflush.h> | 41 | #include <asm/tlbflush.h> |
| 41 | 42 | ||
| @@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
| 316 | */ | 317 | */ |
| 317 | int migrate_page_move_mapping(struct address_space *mapping, | 318 | int migrate_page_move_mapping(struct address_space *mapping, |
| 318 | struct page *newpage, struct page *page, | 319 | struct page *newpage, struct page *page, |
| 319 | struct buffer_head *head, enum migrate_mode mode) | 320 | struct buffer_head *head, enum migrate_mode mode, |
| 321 | int extra_count) | ||
| 320 | { | 322 | { |
| 321 | int expected_count = 0; | 323 | int expected_count = 1 + extra_count; |
| 322 | void **pslot; | 324 | void **pslot; |
| 323 | 325 | ||
| 324 | if (!mapping) { | 326 | if (!mapping) { |
| 325 | /* Anonymous page without mapping */ | 327 | /* Anonymous page without mapping */ |
| 326 | if (page_count(page) != 1) | 328 | if (page_count(page) != expected_count) |
| 327 | return -EAGAIN; | 329 | return -EAGAIN; |
| 328 | return MIGRATEPAGE_SUCCESS; | 330 | return MIGRATEPAGE_SUCCESS; |
| 329 | } | 331 | } |
| @@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
| 333 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 335 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
| 334 | page_index(page)); | 336 | page_index(page)); |
| 335 | 337 | ||
| 336 | expected_count = 2 + page_has_private(page); | 338 | expected_count += 1 + page_has_private(page); |
| 337 | if (page_count(page) != expected_count || | 339 | if (page_count(page) != expected_count || |
| 338 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 340 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { |
| 339 | spin_unlock_irq(&mapping->tree_lock); | 341 | spin_unlock_irq(&mapping->tree_lock); |
| @@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping, | |||
| 583 | 585 | ||
| 584 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | 586 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
| 585 | 587 | ||
| 586 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); | 588 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); |
| 587 | 589 | ||
| 588 | if (rc != MIGRATEPAGE_SUCCESS) | 590 | if (rc != MIGRATEPAGE_SUCCESS) |
| 589 | return rc; | 591 | return rc; |
| @@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping, | |||
| 610 | 612 | ||
| 611 | head = page_buffers(page); | 613 | head = page_buffers(page); |
| 612 | 614 | ||
| 613 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); | 615 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); |
| 614 | 616 | ||
| 615 | if (rc != MIGRATEPAGE_SUCCESS) | 617 | if (rc != MIGRATEPAGE_SUCCESS) |
| 616 | return rc; | 618 | return rc; |
| @@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) | |||
| 1654 | return 1; | 1656 | return 1; |
| 1655 | } | 1657 | } |
| 1656 | 1658 | ||
| 1659 | bool pmd_trans_migrating(pmd_t pmd) | ||
| 1660 | { | ||
| 1661 | struct page *page = pmd_page(pmd); | ||
| 1662 | return PageLocked(page); | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) | ||
| 1666 | { | ||
| 1667 | struct page *page = pmd_page(*pmd); | ||
| 1668 | wait_on_page_locked(page); | ||
| 1669 | } | ||
| 1670 | |||
| 1657 | /* | 1671 | /* |
| 1658 | * Attempt to migrate a misplaced page to the specified destination | 1672 | * Attempt to migrate a misplaced page to the specified destination |
| 1659 | * node. Caller is expected to have an elevated reference count on | 1673 | * node. Caller is expected to have an elevated reference count on |
| @@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1716 | struct page *page, int node) | 1730 | struct page *page, int node) |
| 1717 | { | 1731 | { |
| 1718 | spinlock_t *ptl; | 1732 | spinlock_t *ptl; |
| 1719 | unsigned long haddr = address & HPAGE_PMD_MASK; | ||
| 1720 | pg_data_t *pgdat = NODE_DATA(node); | 1733 | pg_data_t *pgdat = NODE_DATA(node); |
| 1721 | int isolated = 0; | 1734 | int isolated = 0; |
| 1722 | struct page *new_page = NULL; | 1735 | struct page *new_page = NULL; |
| 1723 | struct mem_cgroup *memcg = NULL; | 1736 | struct mem_cgroup *memcg = NULL; |
| 1724 | int page_lru = page_is_file_cache(page); | 1737 | int page_lru = page_is_file_cache(page); |
| 1738 | unsigned long mmun_start = address & HPAGE_PMD_MASK; | ||
| 1739 | unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; | ||
| 1740 | pmd_t orig_entry; | ||
| 1725 | 1741 | ||
| 1726 | /* | 1742 | /* |
| 1727 | * Rate-limit the amount of data that is being migrated to a node. | 1743 | * Rate-limit the amount of data that is being migrated to a node. |
| @@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1744 | goto out_fail; | 1760 | goto out_fail; |
| 1745 | } | 1761 | } |
| 1746 | 1762 | ||
| 1763 | if (mm_tlb_flush_pending(mm)) | ||
| 1764 | flush_tlb_range(vma, mmun_start, mmun_end); | ||
| 1765 | |||
| 1747 | /* Prepare a page as a migration target */ | 1766 | /* Prepare a page as a migration target */ |
| 1748 | __set_page_locked(new_page); | 1767 | __set_page_locked(new_page); |
| 1749 | SetPageSwapBacked(new_page); | 1768 | SetPageSwapBacked(new_page); |
| @@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1755 | WARN_ON(PageLRU(new_page)); | 1774 | WARN_ON(PageLRU(new_page)); |
| 1756 | 1775 | ||
| 1757 | /* Recheck the target PMD */ | 1776 | /* Recheck the target PMD */ |
| 1777 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
| 1758 | ptl = pmd_lock(mm, pmd); | 1778 | ptl = pmd_lock(mm, pmd); |
| 1759 | if (unlikely(!pmd_same(*pmd, entry))) { | 1779 | if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { |
| 1780 | fail_putback: | ||
| 1760 | spin_unlock(ptl); | 1781 | spin_unlock(ptl); |
| 1782 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
| 1761 | 1783 | ||
| 1762 | /* Reverse changes made by migrate_page_copy() */ | 1784 | /* Reverse changes made by migrate_page_copy() */ |
| 1763 | if (TestClearPageActive(new_page)) | 1785 | if (TestClearPageActive(new_page)) |
| @@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1774 | putback_lru_page(page); | 1796 | putback_lru_page(page); |
| 1775 | mod_zone_page_state(page_zone(page), | 1797 | mod_zone_page_state(page_zone(page), |
| 1776 | NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); | 1798 | NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); |
| 1777 | goto out_fail; | 1799 | |
| 1800 | goto out_unlock; | ||
| 1778 | } | 1801 | } |
| 1779 | 1802 | ||
| 1780 | /* | 1803 | /* |
| @@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1786 | */ | 1809 | */ |
| 1787 | mem_cgroup_prepare_migration(page, new_page, &memcg); | 1810 | mem_cgroup_prepare_migration(page, new_page, &memcg); |
| 1788 | 1811 | ||
| 1812 | orig_entry = *pmd; | ||
| 1789 | entry = mk_pmd(new_page, vma->vm_page_prot); | 1813 | entry = mk_pmd(new_page, vma->vm_page_prot); |
| 1790 | entry = pmd_mknonnuma(entry); | ||
| 1791 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
| 1792 | entry = pmd_mkhuge(entry); | 1814 | entry = pmd_mkhuge(entry); |
| 1815 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
| 1793 | 1816 | ||
| 1794 | pmdp_clear_flush(vma, haddr, pmd); | 1817 | /* |
| 1795 | set_pmd_at(mm, haddr, pmd, entry); | 1818 | * Clear the old entry under pagetable lock and establish the new PTE. |
| 1796 | page_add_new_anon_rmap(new_page, vma, haddr); | 1819 | * Any parallel GUP will either observe the old page blocking on the |
| 1820 | * page lock, block on the page table lock or observe the new page. | ||
| 1821 | * The SetPageUptodate on the new page and page_add_new_anon_rmap | ||
| 1822 | * guarantee the copy is visible before the pagetable update. | ||
| 1823 | */ | ||
| 1824 | flush_cache_range(vma, mmun_start, mmun_end); | ||
| 1825 | page_add_new_anon_rmap(new_page, vma, mmun_start); | ||
| 1826 | pmdp_clear_flush(vma, mmun_start, pmd); | ||
| 1827 | set_pmd_at(mm, mmun_start, pmd, entry); | ||
| 1828 | flush_tlb_range(vma, mmun_start, mmun_end); | ||
| 1797 | update_mmu_cache_pmd(vma, address, &entry); | 1829 | update_mmu_cache_pmd(vma, address, &entry); |
| 1830 | |||
| 1831 | if (page_count(page) != 2) { | ||
| 1832 | set_pmd_at(mm, mmun_start, pmd, orig_entry); | ||
| 1833 | flush_tlb_range(vma, mmun_start, mmun_end); | ||
| 1834 | update_mmu_cache_pmd(vma, address, &entry); | ||
| 1835 | page_remove_rmap(new_page); | ||
| 1836 | goto fail_putback; | ||
| 1837 | } | ||
| 1838 | |||
| 1798 | page_remove_rmap(page); | 1839 | page_remove_rmap(page); |
| 1840 | |||
| 1799 | /* | 1841 | /* |
| 1800 | * Finish the charge transaction under the page table lock to | 1842 | * Finish the charge transaction under the page table lock to |
| 1801 | * prevent split_huge_page() from dividing up the charge | 1843 | * prevent split_huge_page() from dividing up the charge |
| @@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1803 | */ | 1845 | */ |
| 1804 | mem_cgroup_end_migration(memcg, page, new_page, true); | 1846 | mem_cgroup_end_migration(memcg, page, new_page, true); |
| 1805 | spin_unlock(ptl); | 1847 | spin_unlock(ptl); |
| 1848 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
| 1806 | 1849 | ||
| 1807 | unlock_page(new_page); | 1850 | unlock_page(new_page); |
| 1808 | unlock_page(page); | 1851 | unlock_page(page); |
| @@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1820 | out_fail: | 1863 | out_fail: |
| 1821 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); | 1864 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); |
| 1822 | out_dropref: | 1865 | out_dropref: |
| 1823 | entry = pmd_mknonnuma(entry); | 1866 | ptl = pmd_lock(mm, pmd); |
| 1824 | set_pmd_at(mm, haddr, pmd, entry); | 1867 | if (pmd_same(*pmd, entry)) { |
| 1825 | update_mmu_cache_pmd(vma, address, &entry); | 1868 | entry = pmd_mknonnuma(entry); |
| 1869 | set_pmd_at(mm, mmun_start, pmd, entry); | ||
| 1870 | update_mmu_cache_pmd(vma, address, &entry); | ||
| 1871 | } | ||
| 1872 | spin_unlock(ptl); | ||
| 1826 | 1873 | ||
| 1874 | out_unlock: | ||
| 1827 | unlock_page(page); | 1875 | unlock_page(page); |
| 1828 | put_page(page); | 1876 | put_page(page); |
| 1829 | return 0; | 1877 | return 0; |
diff --git a/mm/mlock.c b/mm/mlock.c index d480cd6fc475..192e6eebe4f2 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page) | |||
| 133 | 133 | ||
| 134 | /** | 134 | /** |
| 135 | * munlock_vma_page - munlock a vma page | 135 | * munlock_vma_page - munlock a vma page |
| 136 | * @page - page to be unlocked | 136 | * @page - page to be unlocked, either a normal page or THP page head |
| 137 | * | ||
| 138 | * returns the size of the page as a page mask (0 for normal page, | ||
| 139 | * HPAGE_PMD_NR - 1 for THP head page) | ||
| 137 | * | 140 | * |
| 138 | * called from munlock()/munmap() path with page supposedly on the LRU. | 141 | * called from munlock()/munmap() path with page supposedly on the LRU. |
| 139 | * When we munlock a page, because the vma where we found the page is being | 142 | * When we munlock a page, because the vma where we found the page is being |
| @@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page) | |||
| 148 | */ | 151 | */ |
| 149 | unsigned int munlock_vma_page(struct page *page) | 152 | unsigned int munlock_vma_page(struct page *page) |
| 150 | { | 153 | { |
| 151 | unsigned int page_mask = 0; | 154 | unsigned int nr_pages; |
| 152 | 155 | ||
| 153 | BUG_ON(!PageLocked(page)); | 156 | BUG_ON(!PageLocked(page)); |
| 154 | 157 | ||
| 155 | if (TestClearPageMlocked(page)) { | 158 | if (TestClearPageMlocked(page)) { |
| 156 | unsigned int nr_pages = hpage_nr_pages(page); | 159 | nr_pages = hpage_nr_pages(page); |
| 157 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); | 160 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
| 158 | page_mask = nr_pages - 1; | ||
| 159 | if (!isolate_lru_page(page)) | 161 | if (!isolate_lru_page(page)) |
| 160 | __munlock_isolated_page(page); | 162 | __munlock_isolated_page(page); |
| 161 | else | 163 | else |
| 162 | __munlock_isolation_failed(page); | 164 | __munlock_isolation_failed(page); |
| 165 | } else { | ||
| 166 | nr_pages = hpage_nr_pages(page); | ||
| 163 | } | 167 | } |
| 164 | 168 | ||
| 165 | return page_mask; | 169 | /* |
| 170 | * Regardless of the original PageMlocked flag, we determine nr_pages | ||
| 171 | * after touching the flag. This leaves a possible race with a THP page | ||
| 172 | * split, such that a whole THP page was munlocked, but nr_pages == 1. | ||
| 173 | * Returning a smaller mask due to that is OK, the worst that can | ||
| 174 | * happen is subsequent useless scanning of the former tail pages. | ||
| 175 | * The NR_MLOCK accounting can however become broken. | ||
| 176 | */ | ||
| 177 | return nr_pages - 1; | ||
| 166 | } | 178 | } |
| 167 | 179 | ||
| 168 | /** | 180 | /** |
| @@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
| 286 | { | 298 | { |
| 287 | int i; | 299 | int i; |
| 288 | int nr = pagevec_count(pvec); | 300 | int nr = pagevec_count(pvec); |
| 289 | int delta_munlocked = -nr; | 301 | int delta_munlocked; |
| 290 | struct pagevec pvec_putback; | 302 | struct pagevec pvec_putback; |
| 291 | int pgrescued = 0; | 303 | int pgrescued = 0; |
| 292 | 304 | ||
| 305 | pagevec_init(&pvec_putback, 0); | ||
| 306 | |||
| 293 | /* Phase 1: page isolation */ | 307 | /* Phase 1: page isolation */ |
| 294 | spin_lock_irq(&zone->lru_lock); | 308 | spin_lock_irq(&zone->lru_lock); |
| 295 | for (i = 0; i < nr; i++) { | 309 | for (i = 0; i < nr; i++) { |
| @@ -318,18 +332,21 @@ skip_munlock: | |||
| 318 | /* | 332 | /* |
| 319 | * We won't be munlocking this page in the next phase | 333 | * We won't be munlocking this page in the next phase |
| 320 | * but we still need to release the follow_page_mask() | 334 | * but we still need to release the follow_page_mask() |
| 321 | * pin. | 335 | * pin. We cannot do it under lru_lock however. If it's |
| 336 | * the last pin, __page_cache_release would deadlock. | ||
| 322 | */ | 337 | */ |
| 338 | pagevec_add(&pvec_putback, pvec->pages[i]); | ||
| 323 | pvec->pages[i] = NULL; | 339 | pvec->pages[i] = NULL; |
| 324 | put_page(page); | ||
| 325 | delta_munlocked++; | ||
| 326 | } | 340 | } |
| 327 | } | 341 | } |
| 342 | delta_munlocked = -nr + pagevec_count(&pvec_putback); | ||
| 328 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); | 343 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |
| 329 | spin_unlock_irq(&zone->lru_lock); | 344 | spin_unlock_irq(&zone->lru_lock); |
| 330 | 345 | ||
| 346 | /* Now we can release pins of pages that we are not munlocking */ | ||
| 347 | pagevec_release(&pvec_putback); | ||
| 348 | |||
| 331 | /* Phase 2: page munlock */ | 349 | /* Phase 2: page munlock */ |
| 332 | pagevec_init(&pvec_putback, 0); | ||
| 333 | for (i = 0; i < nr; i++) { | 350 | for (i = 0; i < nr; i++) { |
| 334 | struct page *page = pvec->pages[i]; | 351 | struct page *page = pvec->pages[i]; |
| 335 | 352 | ||
| @@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 440 | 457 | ||
| 441 | while (start < end) { | 458 | while (start < end) { |
| 442 | struct page *page = NULL; | 459 | struct page *page = NULL; |
| 443 | unsigned int page_mask, page_increm; | 460 | unsigned int page_mask; |
| 461 | unsigned long page_increm; | ||
| 444 | struct pagevec pvec; | 462 | struct pagevec pvec; |
| 445 | struct zone *zone; | 463 | struct zone *zone; |
| 446 | int zoneid; | 464 | int zoneid; |
| @@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 490 | goto next; | 508 | goto next; |
| 491 | } | 509 | } |
| 492 | } | 510 | } |
| 493 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | 511 | /* It's a bug to munlock in the middle of a THP page */ |
| 512 | VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); | ||
| 513 | page_increm = 1 + page_mask; | ||
| 494 | start += page_increm * PAGE_SIZE; | 514 | start += page_increm * PAGE_SIZE; |
| 495 | next: | 515 | next: |
| 496 | cond_resched(); | 516 | cond_resched(); |
diff --git a/mm/mprotect.c b/mm/mprotect.c index 26667971c824..bb53a6591aea 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
| @@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 52 | pte_t ptent; | 52 | pte_t ptent; |
| 53 | bool updated = false; | 53 | bool updated = false; |
| 54 | 54 | ||
| 55 | ptent = ptep_modify_prot_start(mm, addr, pte); | ||
| 56 | if (!prot_numa) { | 55 | if (!prot_numa) { |
| 56 | ptent = ptep_modify_prot_start(mm, addr, pte); | ||
| 57 | if (pte_numa(ptent)) | ||
| 58 | ptent = pte_mknonnuma(ptent); | ||
| 57 | ptent = pte_modify(ptent, newprot); | 59 | ptent = pte_modify(ptent, newprot); |
| 58 | updated = true; | 60 | updated = true; |
| 59 | } else { | 61 | } else { |
| 60 | struct page *page; | 62 | struct page *page; |
| 61 | 63 | ||
| 64 | ptent = *pte; | ||
| 62 | page = vm_normal_page(vma, addr, oldpte); | 65 | page = vm_normal_page(vma, addr, oldpte); |
| 63 | if (page) { | 66 | if (page) { |
| 64 | if (!pte_numa(oldpte)) { | 67 | if (!pte_numa(oldpte)) { |
| 65 | ptent = pte_mknuma(ptent); | 68 | ptent = pte_mknuma(ptent); |
| 69 | set_pte_at(mm, addr, pte, ptent); | ||
| 66 | updated = true; | 70 | updated = true; |
| 67 | } | 71 | } |
| 68 | } | 72 | } |
| @@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 79 | 83 | ||
| 80 | if (updated) | 84 | if (updated) |
| 81 | pages++; | 85 | pages++; |
| 82 | ptep_modify_prot_commit(mm, addr, pte, ptent); | 86 | |
| 87 | /* Only !prot_numa always clears the pte */ | ||
| 88 | if (!prot_numa) | ||
| 89 | ptep_modify_prot_commit(mm, addr, pte, ptent); | ||
| 83 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { | 90 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { |
| 84 | swp_entry_t entry = pte_to_swp_entry(oldpte); | 91 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
| 85 | 92 | ||
| @@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, | |||
| 181 | BUG_ON(addr >= end); | 188 | BUG_ON(addr >= end); |
| 182 | pgd = pgd_offset(mm, addr); | 189 | pgd = pgd_offset(mm, addr); |
| 183 | flush_cache_range(vma, addr, end); | 190 | flush_cache_range(vma, addr, end); |
| 191 | set_tlb_flush_pending(mm); | ||
| 184 | do { | 192 | do { |
| 185 | next = pgd_addr_end(addr, end); | 193 | next = pgd_addr_end(addr, end); |
| 186 | if (pgd_none_or_clear_bad(pgd)) | 194 | if (pgd_none_or_clear_bad(pgd)) |
| @@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, | |||
| 192 | /* Only flush the TLB if we actually modified any entries: */ | 200 | /* Only flush the TLB if we actually modified any entries: */ |
| 193 | if (pages) | 201 | if (pages) |
| 194 | flush_tlb_range(vma, start, end); | 202 | flush_tlb_range(vma, start, end); |
| 203 | clear_tlb_flush_pending(mm); | ||
| 195 | 204 | ||
| 196 | return pages; | 205 | return pages; |
| 197 | } | 206 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 580a5f075ed0..5248fe070aa4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) | |||
| 1816 | 1816 | ||
| 1817 | static bool zone_local(struct zone *local_zone, struct zone *zone) | 1817 | static bool zone_local(struct zone *local_zone, struct zone *zone) |
| 1818 | { | 1818 | { |
| 1819 | return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; | 1819 | return local_zone->node == zone->node; |
| 1820 | } | 1820 | } |
| 1821 | 1821 | ||
| 1822 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | 1822 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) |
| @@ -1913,18 +1913,17 @@ zonelist_scan: | |||
| 1913 | * page was allocated in should have no effect on the | 1913 | * page was allocated in should have no effect on the |
| 1914 | * time the page has in memory before being reclaimed. | 1914 | * time the page has in memory before being reclaimed. |
| 1915 | * | 1915 | * |
| 1916 | * When zone_reclaim_mode is enabled, try to stay in | 1916 | * Try to stay in local zones in the fastpath. If |
| 1917 | * local zones in the fastpath. If that fails, the | 1917 | * that fails, the slowpath is entered, which will do |
| 1918 | * slowpath is entered, which will do another pass | 1918 | * another pass starting with the local zones, but |
| 1919 | * starting with the local zones, but ultimately fall | 1919 | * ultimately fall back to remote zones that do not |
| 1920 | * back to remote zones that do not partake in the | 1920 | * partake in the fairness round-robin cycle of this |
| 1921 | * fairness round-robin cycle of this zonelist. | 1921 | * zonelist. |
| 1922 | */ | 1922 | */ |
| 1923 | if (alloc_flags & ALLOC_WMARK_LOW) { | 1923 | if (alloc_flags & ALLOC_WMARK_LOW) { |
| 1924 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) | 1924 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) |
| 1925 | continue; | 1925 | continue; |
| 1926 | if (zone_reclaim_mode && | 1926 | if (!zone_local(preferred_zone, zone)) |
| 1927 | !zone_local(preferred_zone, zone)) | ||
| 1928 | continue; | 1927 | continue; |
| 1929 | } | 1928 | } |
| 1930 | /* | 1929 | /* |
| @@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, | |||
| 2390 | * thrash fairness information for zones that are not | 2389 | * thrash fairness information for zones that are not |
| 2391 | * actually part of this zonelist's round-robin cycle. | 2390 | * actually part of this zonelist's round-robin cycle. |
| 2392 | */ | 2391 | */ |
| 2393 | if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) | 2392 | if (!zone_local(preferred_zone, zone)) |
| 2394 | continue; | 2393 | continue; |
| 2395 | mod_zone_page_state(zone, NR_ALLOC_BATCH, | 2394 | mod_zone_page_state(zone, NR_ALLOC_BATCH, |
| 2396 | high_wmark_pages(zone) - | 2395 | high_wmark_pages(zone) - |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index cbb38545d9d6..a8b919925934 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
| @@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, | |||
| 110 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, | 110 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, |
| 111 | pte_t *ptep) | 111 | pte_t *ptep) |
| 112 | { | 112 | { |
| 113 | struct mm_struct *mm = (vma)->vm_mm; | ||
| 113 | pte_t pte; | 114 | pte_t pte; |
| 114 | pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); | 115 | pte = ptep_get_and_clear(mm, address, ptep); |
| 115 | if (pte_accessible(pte)) | 116 | if (pte_accessible(mm, pte)) |
| 116 | flush_tlb_page(vma, address); | 117 | flush_tlb_page(vma, address); |
| 117 | return pte; | 118 | return pte; |
| 118 | } | 119 | } |
| @@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |||
| 191 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | 192 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 192 | pmd_t *pmdp) | 193 | pmd_t *pmdp) |
| 193 | { | 194 | { |
| 195 | pmd_t entry = *pmdp; | ||
| 196 | if (pmd_numa(entry)) | ||
| 197 | entry = pmd_mknonnuma(entry); | ||
| 194 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); | 198 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); |
| 195 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | 199 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
| 196 | } | 200 | } |
| @@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, | |||
| 600 | spinlock_t *ptl; | 600 | spinlock_t *ptl; |
| 601 | 601 | ||
| 602 | if (unlikely(PageHuge(page))) { | 602 | if (unlikely(PageHuge(page))) { |
| 603 | /* when pud is not present, pte will be NULL */ | ||
| 603 | pte = huge_pte_offset(mm, address); | 604 | pte = huge_pte_offset(mm, address); |
| 605 | if (!pte) | ||
| 606 | return NULL; | ||
| 607 | |||
| 604 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | 608 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); |
| 605 | goto check; | 609 | goto check; |
| 606 | } | 610 | } |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 762896ebfcf5..47c908f1f626 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = { | |||
| 530 | .parse = eth_header_parse, | 530 | .parse = eth_header_parse, |
| 531 | }; | 531 | }; |
| 532 | 532 | ||
| 533 | static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, | ||
| 534 | unsigned short type, | ||
| 535 | const void *daddr, const void *saddr, | ||
| 536 | unsigned int len) | ||
| 537 | { | ||
| 538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | ||
| 539 | struct net_device *real_dev = vlan->real_dev; | ||
| 540 | |||
| 541 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); | ||
| 542 | } | ||
| 543 | |||
| 544 | static const struct header_ops vlan_passthru_header_ops = { | ||
| 545 | .create = vlan_passthru_hard_header, | ||
| 546 | .rebuild = dev_rebuild_header, | ||
| 547 | .parse = eth_header_parse, | ||
| 548 | }; | ||
| 549 | |||
| 533 | static struct device_type vlan_type = { | 550 | static struct device_type vlan_type = { |
| 534 | .name = "vlan", | 551 | .name = "vlan", |
| 535 | }; | 552 | }; |
| @@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
| 573 | 590 | ||
| 574 | dev->needed_headroom = real_dev->needed_headroom; | 591 | dev->needed_headroom = real_dev->needed_headroom; |
| 575 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { | 592 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
| 576 | dev->header_ops = real_dev->header_ops; | 593 | dev->header_ops = &vlan_passthru_header_ops; |
| 577 | dev->hard_header_len = real_dev->hard_header_len; | 594 | dev->hard_header_len = real_dev->hard_header_len; |
| 578 | } else { | 595 | } else { |
| 579 | dev->header_ops = &vlan_header_ops; | 596 | dev->header_ops = &vlan_header_ops; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a2b480a90872..b9c8a6eedf45 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
| @@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) | |||
| 307 | hard_iface->bat_iv.ogm_buff = ogm_buff; | 307 | hard_iface->bat_iv.ogm_buff = ogm_buff; |
| 308 | 308 | ||
| 309 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | 309 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
| 310 | batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; | 310 | batadv_ogm_packet->packet_type = BATADV_IV_OGM; |
| 311 | batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; | 311 | batadv_ogm_packet->version = BATADV_COMPAT_VERSION; |
| 312 | batadv_ogm_packet->header.ttl = 2; | 312 | batadv_ogm_packet->ttl = 2; |
| 313 | batadv_ogm_packet->flags = BATADV_NO_FLAGS; | 313 | batadv_ogm_packet->flags = BATADV_NO_FLAGS; |
| 314 | batadv_ogm_packet->reserved = 0; | 314 | batadv_ogm_packet->reserved = 0; |
| 315 | batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; | 315 | batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; |
| @@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) | |||
| 346 | 346 | ||
| 347 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | 347 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
| 348 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; | 348 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; |
| 349 | batadv_ogm_packet->header.ttl = BATADV_TTL; | 349 | batadv_ogm_packet->ttl = BATADV_TTL; |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | /* when do we schedule our own ogm to be sent */ | 352 | /* when do we schedule our own ogm to be sent */ |
| @@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, | |||
| 435 | fwd_str, (packet_num > 0 ? "aggregated " : ""), | 435 | fwd_str, (packet_num > 0 ? "aggregated " : ""), |
| 436 | batadv_ogm_packet->orig, | 436 | batadv_ogm_packet->orig, |
| 437 | ntohl(batadv_ogm_packet->seqno), | 437 | ntohl(batadv_ogm_packet->seqno), |
| 438 | batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl, | 438 | batadv_ogm_packet->tq, batadv_ogm_packet->ttl, |
| 439 | (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? | 439 | (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? |
| 440 | "on" : "off"), | 440 | "on" : "off"), |
| 441 | hard_iface->net_dev->name, | 441 | hard_iface->net_dev->name, |
| @@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) | |||
| 491 | /* multihomed peer assumed | 491 | /* multihomed peer assumed |
| 492 | * non-primary OGMs are only broadcasted on their interface | 492 | * non-primary OGMs are only broadcasted on their interface |
| 493 | */ | 493 | */ |
| 494 | if ((directlink && (batadv_ogm_packet->header.ttl == 1)) || | 494 | if ((directlink && (batadv_ogm_packet->ttl == 1)) || |
| 495 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { | 495 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { |
| 496 | /* FIXME: what about aggregated packets ? */ | 496 | /* FIXME: what about aggregated packets ? */ |
| 497 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 497 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
| @@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) | |||
| 499 | (forw_packet->own ? "Sending own" : "Forwarding"), | 499 | (forw_packet->own ? "Sending own" : "Forwarding"), |
| 500 | batadv_ogm_packet->orig, | 500 | batadv_ogm_packet->orig, |
| 501 | ntohl(batadv_ogm_packet->seqno), | 501 | ntohl(batadv_ogm_packet->seqno), |
| 502 | batadv_ogm_packet->header.ttl, | 502 | batadv_ogm_packet->ttl, |
| 503 | forw_packet->if_incoming->net_dev->name, | 503 | forw_packet->if_incoming->net_dev->name, |
| 504 | forw_packet->if_incoming->net_dev->dev_addr); | 504 | forw_packet->if_incoming->net_dev->dev_addr); |
| 505 | 505 | ||
| @@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, | |||
| 572 | */ | 572 | */ |
| 573 | if ((!directlink) && | 573 | if ((!directlink) && |
| 574 | (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && | 574 | (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && |
| 575 | (batadv_ogm_packet->header.ttl != 1) && | 575 | (batadv_ogm_packet->ttl != 1) && |
| 576 | 576 | ||
| 577 | /* own packets originating non-primary | 577 | /* own packets originating non-primary |
| 578 | * interfaces leave only that interface | 578 | * interfaces leave only that interface |
| @@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, | |||
| 587 | * interface only - we still can aggregate | 587 | * interface only - we still can aggregate |
| 588 | */ | 588 | */ |
| 589 | if ((directlink) && | 589 | if ((directlink) && |
| 590 | (new_bat_ogm_packet->header.ttl == 1) && | 590 | (new_bat_ogm_packet->ttl == 1) && |
| 591 | (forw_packet->if_incoming == if_incoming) && | 591 | (forw_packet->if_incoming == if_incoming) && |
| 592 | 592 | ||
| 593 | /* packets from direct neighbors or | 593 | /* packets from direct neighbors or |
| @@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
| 778 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 778 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
| 779 | uint16_t tvlv_len; | 779 | uint16_t tvlv_len; |
| 780 | 780 | ||
| 781 | if (batadv_ogm_packet->header.ttl <= 1) { | 781 | if (batadv_ogm_packet->ttl <= 1) { |
| 782 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); | 782 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); |
| 783 | return; | 783 | return; |
| 784 | } | 784 | } |
| @@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
| 798 | 798 | ||
| 799 | tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); | 799 | tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); |
| 800 | 800 | ||
| 801 | batadv_ogm_packet->header.ttl--; | 801 | batadv_ogm_packet->ttl--; |
| 802 | memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | 802 | memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); |
| 803 | 803 | ||
| 804 | /* apply hop penalty */ | 804 | /* apply hop penalty */ |
| @@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
| 807 | 807 | ||
| 808 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 808 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
| 809 | "Forwarding packet: tq: %i, ttl: %i\n", | 809 | "Forwarding packet: tq: %i, ttl: %i\n", |
| 810 | batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl); | 810 | batadv_ogm_packet->tq, batadv_ogm_packet->ttl); |
| 811 | 811 | ||
| 812 | /* switch of primaries first hop flag when forwarding */ | 812 | /* switch of primaries first hop flag when forwarding */ |
| 813 | batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; | 813 | batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; |
| @@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
| 972 | spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); | 972 | spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); |
| 973 | 973 | ||
| 974 | if (dup_status == BATADV_NO_DUP) { | 974 | if (dup_status == BATADV_NO_DUP) { |
| 975 | orig_node->last_ttl = batadv_ogm_packet->header.ttl; | 975 | orig_node->last_ttl = batadv_ogm_packet->ttl; |
| 976 | neigh_node->last_ttl = batadv_ogm_packet->header.ttl; | 976 | neigh_node->last_ttl = batadv_ogm_packet->ttl; |
| 977 | } | 977 | } |
| 978 | 978 | ||
| 979 | batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); | 979 | batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); |
| @@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
| 1247 | * packet in an aggregation. Here we expect that the padding | 1247 | * packet in an aggregation. Here we expect that the padding |
| 1248 | * is always zero (or not 0x01) | 1248 | * is always zero (or not 0x01) |
| 1249 | */ | 1249 | */ |
| 1250 | if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM) | 1250 | if (batadv_ogm_packet->packet_type != BATADV_IV_OGM) |
| 1251 | return; | 1251 | return; |
| 1252 | 1252 | ||
| 1253 | /* could be changed by schedule_own_packet() */ | 1253 | /* could be changed by schedule_own_packet() */ |
| @@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
| 1267 | if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, | 1267 | if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, |
| 1268 | batadv_ogm_packet->prev_sender, | 1268 | batadv_ogm_packet->prev_sender, |
| 1269 | ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, | 1269 | ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, |
| 1270 | batadv_ogm_packet->header.ttl, | 1270 | batadv_ogm_packet->ttl, |
| 1271 | batadv_ogm_packet->header.version, has_directlink_flag); | 1271 | batadv_ogm_packet->version, has_directlink_flag); |
| 1272 | 1272 | ||
| 1273 | rcu_read_lock(); | 1273 | rcu_read_lock(); |
| 1274 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 1274 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
| @@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
| 1433 | * seqno and similar ttl as the non-duplicate | 1433 | * seqno and similar ttl as the non-duplicate |
| 1434 | */ | 1434 | */ |
| 1435 | sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); | 1435 | sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); |
| 1436 | similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; | 1436 | similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl; |
| 1437 | if (is_bidirect && ((dup_status == BATADV_NO_DUP) || | 1437 | if (is_bidirect && ((dup_status == BATADV_NO_DUP) || |
| 1438 | (sameseq && similar_ttl))) | 1438 | (sameseq && similar_ttl))) |
| 1439 | batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, | 1439 | batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 6c8c3934bd7b..b316a4cb6f14 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
| @@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 349 | 349 | ||
| 350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
| 351 | 351 | ||
| 352 | switch (unicast_4addr_packet->u.header.packet_type) { | 352 | switch (unicast_4addr_packet->u.packet_type) { |
| 353 | case BATADV_UNICAST: | 353 | case BATADV_UNICAST: |
| 354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
| 355 | "* encapsulated within a UNICAST packet\n"); | 355 | "* encapsulated within a UNICAST packet\n"); |
| @@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 374 | break; | 374 | break; |
| 375 | default: | 375 | default: |
| 376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", | 376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", |
| 377 | unicast_4addr_packet->u.header.packet_type); | 377 | unicast_4addr_packet->u.packet_type); |
| 378 | } | 378 | } |
| 379 | break; | 379 | break; |
| 380 | case BATADV_BCAST: | 380 | case BATADV_BCAST: |
| @@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 387 | default: | 387 | default: |
| 388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
| 389 | "* encapsulated within an unknown packet type (0x%x)\n", | 389 | "* encapsulated within an unknown packet type (0x%x)\n", |
| 390 | unicast_4addr_packet->u.header.packet_type); | 390 | unicast_4addr_packet->u.packet_type); |
| 391 | } | 391 | } |
| 392 | } | 392 | } |
| 393 | 393 | ||
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 271d321b3a04..6ddb6145ffb5 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
| @@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, | |||
| 355 | batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, | 355 | batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, |
| 356 | skb->len + ETH_HLEN); | 356 | skb->len + ETH_HLEN); |
| 357 | 357 | ||
| 358 | packet->header.ttl--; | 358 | packet->ttl--; |
| 359 | batadv_send_skb_packet(skb, neigh_node->if_incoming, | 359 | batadv_send_skb_packet(skb, neigh_node->if_incoming, |
| 360 | neigh_node->addr); | 360 | neigh_node->addr); |
| 361 | ret = true; | 361 | ret = true; |
| @@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb, | |||
| 444 | goto out_err; | 444 | goto out_err; |
| 445 | 445 | ||
| 446 | /* Create one header to be copied to all fragments */ | 446 | /* Create one header to be copied to all fragments */ |
| 447 | frag_header.header.packet_type = BATADV_UNICAST_FRAG; | 447 | frag_header.packet_type = BATADV_UNICAST_FRAG; |
| 448 | frag_header.header.version = BATADV_COMPAT_VERSION; | 448 | frag_header.version = BATADV_COMPAT_VERSION; |
| 449 | frag_header.header.ttl = BATADV_TTL; | 449 | frag_header.ttl = BATADV_TTL; |
| 450 | frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); | 450 | frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); |
| 451 | frag_header.reserved = 0; | 451 | frag_header.reserved = 0; |
| 452 | frag_header.no = 0; | 452 | frag_header.no = 0; |
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 29ae4efe3543..130cc3217e2b 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c | |||
| @@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
| 194 | goto free_skb; | 194 | goto free_skb; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | if (icmp_header->header.packet_type != BATADV_ICMP) { | 197 | if (icmp_header->packet_type != BATADV_ICMP) { |
| 198 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 198 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
| 199 | "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); | 199 | "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); |
| 200 | len = -EINVAL; | 200 | len = -EINVAL; |
| @@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
| 243 | 243 | ||
| 244 | icmp_header->uid = socket_client->index; | 244 | icmp_header->uid = socket_client->index; |
| 245 | 245 | ||
| 246 | if (icmp_header->header.version != BATADV_COMPAT_VERSION) { | 246 | if (icmp_header->version != BATADV_COMPAT_VERSION) { |
| 247 | icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; | 247 | icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; |
| 248 | icmp_header->header.version = BATADV_COMPAT_VERSION; | 248 | icmp_header->version = BATADV_COMPAT_VERSION; |
| 249 | batadv_socket_add_packet(socket_client, icmp_header, | 249 | batadv_socket_add_packet(socket_client, icmp_header, |
| 250 | packet_len); | 250 | packet_len); |
| 251 | goto free_skb; | 251 | goto free_skb; |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index c51a5e568f0a..1511f64a6cea 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
| @@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
| 383 | 383 | ||
| 384 | batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; | 384 | batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; |
| 385 | 385 | ||
| 386 | if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { | 386 | if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) { |
| 387 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 387 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
| 388 | "Drop packet: incompatible batman version (%i)\n", | 388 | "Drop packet: incompatible batman version (%i)\n", |
| 389 | batadv_ogm_packet->header.version); | 389 | batadv_ogm_packet->version); |
| 390 | goto err_free; | 390 | goto err_free; |
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | /* all receive handlers return whether they received or reused | 393 | /* all receive handlers return whether they received or reused |
| 394 | * the supplied skb. if not, we have to free the skb. | 394 | * the supplied skb. if not, we have to free the skb. |
| 395 | */ | 395 | */ |
| 396 | idx = batadv_ogm_packet->header.packet_type; | 396 | idx = batadv_ogm_packet->packet_type; |
| 397 | ret = (*batadv_rx_handler[idx])(skb, hard_iface); | 397 | ret = (*batadv_rx_handler[idx])(skb, hard_iface); |
| 398 | 398 | ||
| 399 | if (ret == NET_RX_DROP) | 399 | if (ret == NET_RX_DROP) |
| @@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void) | |||
| 426 | BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); | 426 | BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); |
| 427 | BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); | 427 | BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); |
| 428 | BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); | 428 | BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); |
| 429 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4); | 429 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4); |
| 430 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4); | 430 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4); |
| 431 | 431 | ||
| 432 | /* broadcast packet */ | 432 | /* broadcast packet */ |
| 433 | batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; | 433 | batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; |
| @@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src, | |||
| 1119 | skb_reserve(skb, ETH_HLEN); | 1119 | skb_reserve(skb, ETH_HLEN); |
| 1120 | tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); | 1120 | tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); |
| 1121 | unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; | 1121 | unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; |
| 1122 | unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV; | 1122 | unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV; |
| 1123 | unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION; | 1123 | unicast_tvlv_packet->version = BATADV_COMPAT_VERSION; |
| 1124 | unicast_tvlv_packet->header.ttl = BATADV_TTL; | 1124 | unicast_tvlv_packet->ttl = BATADV_TTL; |
| 1125 | unicast_tvlv_packet->reserved = 0; | 1125 | unicast_tvlv_packet->reserved = 0; |
| 1126 | unicast_tvlv_packet->tvlv_len = htons(tvlv_len); | 1126 | unicast_tvlv_packet->tvlv_len = htons(tvlv_len); |
| 1127 | unicast_tvlv_packet->align = 0; | 1127 | unicast_tvlv_packet->align = 0; |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 351e199bc0af..511d7e1eea38 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
| @@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv, | |||
| 722 | { | 722 | { |
| 723 | if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) | 723 | if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) |
| 724 | return false; | 724 | return false; |
| 725 | if (orig_node->last_ttl != ogm_packet->header.ttl + 1) | 725 | if (orig_node->last_ttl != ogm_packet->ttl + 1) |
| 726 | return false; | 726 | return false; |
| 727 | if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) | 727 | if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) |
| 728 | return false; | 728 | return false; |
| @@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, | |||
| 1082 | coded_packet = (struct batadv_coded_packet *)skb_dest->data; | 1082 | coded_packet = (struct batadv_coded_packet *)skb_dest->data; |
| 1083 | skb_reset_mac_header(skb_dest); | 1083 | skb_reset_mac_header(skb_dest); |
| 1084 | 1084 | ||
| 1085 | coded_packet->header.packet_type = BATADV_CODED; | 1085 | coded_packet->packet_type = BATADV_CODED; |
| 1086 | coded_packet->header.version = BATADV_COMPAT_VERSION; | 1086 | coded_packet->version = BATADV_COMPAT_VERSION; |
| 1087 | coded_packet->header.ttl = packet1->header.ttl; | 1087 | coded_packet->ttl = packet1->ttl; |
| 1088 | 1088 | ||
| 1089 | /* Info about first unicast packet */ | 1089 | /* Info about first unicast packet */ |
| 1090 | memcpy(coded_packet->first_source, first_source, ETH_ALEN); | 1090 | memcpy(coded_packet->first_source, first_source, ETH_ALEN); |
| @@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, | |||
| 1097 | memcpy(coded_packet->second_source, second_source, ETH_ALEN); | 1097 | memcpy(coded_packet->second_source, second_source, ETH_ALEN); |
| 1098 | memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); | 1098 | memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); |
| 1099 | coded_packet->second_crc = packet_id2; | 1099 | coded_packet->second_crc = packet_id2; |
| 1100 | coded_packet->second_ttl = packet2->header.ttl; | 1100 | coded_packet->second_ttl = packet2->ttl; |
| 1101 | coded_packet->second_ttvn = packet2->ttvn; | 1101 | coded_packet->second_ttvn = packet2->ttvn; |
| 1102 | coded_packet->coded_len = htons(coding_len); | 1102 | coded_packet->coded_len = htons(coding_len); |
| 1103 | 1103 | ||
| @@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb, | |||
| 1452 | /* We only handle unicast packets */ | 1452 | /* We only handle unicast packets */ |
| 1453 | payload = skb_network_header(skb); | 1453 | payload = skb_network_header(skb); |
| 1454 | packet = (struct batadv_unicast_packet *)payload; | 1454 | packet = (struct batadv_unicast_packet *)payload; |
| 1455 | if (packet->header.packet_type != BATADV_UNICAST) | 1455 | if (packet->packet_type != BATADV_UNICAST) |
| 1456 | goto out; | 1456 | goto out; |
| 1457 | 1457 | ||
| 1458 | /* Try to find a coding opportunity and send the skb if one is found */ | 1458 | /* Try to find a coding opportunity and send the skb if one is found */ |
| @@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv, | |||
| 1505 | /* Check for supported packet type */ | 1505 | /* Check for supported packet type */ |
| 1506 | payload = skb_network_header(skb); | 1506 | payload = skb_network_header(skb); |
| 1507 | packet = (struct batadv_unicast_packet *)payload; | 1507 | packet = (struct batadv_unicast_packet *)payload; |
| 1508 | if (packet->header.packet_type != BATADV_UNICAST) | 1508 | if (packet->packet_type != BATADV_UNICAST) |
| 1509 | goto out; | 1509 | goto out; |
| 1510 | 1510 | ||
| 1511 | /* Find existing nc_path or create a new */ | 1511 | /* Find existing nc_path or create a new */ |
| @@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 1623 | ttvn = coded_packet_tmp.second_ttvn; | 1623 | ttvn = coded_packet_tmp.second_ttvn; |
| 1624 | } else { | 1624 | } else { |
| 1625 | orig_dest = coded_packet_tmp.first_orig_dest; | 1625 | orig_dest = coded_packet_tmp.first_orig_dest; |
| 1626 | ttl = coded_packet_tmp.header.ttl; | 1626 | ttl = coded_packet_tmp.ttl; |
| 1627 | ttvn = coded_packet_tmp.first_ttvn; | 1627 | ttvn = coded_packet_tmp.first_ttvn; |
| 1628 | } | 1628 | } |
| 1629 | 1629 | ||
| @@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 1648 | 1648 | ||
| 1649 | /* Create decoded unicast packet */ | 1649 | /* Create decoded unicast packet */ |
| 1650 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 1650 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
| 1651 | unicast_packet->header.packet_type = BATADV_UNICAST; | 1651 | unicast_packet->packet_type = BATADV_UNICAST; |
| 1652 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 1652 | unicast_packet->version = BATADV_COMPAT_VERSION; |
| 1653 | unicast_packet->header.ttl = ttl; | 1653 | unicast_packet->ttl = ttl; |
| 1654 | memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); | 1654 | memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); |
| 1655 | unicast_packet->ttvn = ttvn; | 1655 | unicast_packet->ttvn = ttvn; |
| 1656 | 1656 | ||
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 207459b62966..2dd8f2422550 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
| @@ -155,6 +155,7 @@ enum batadv_tvlv_type { | |||
| 155 | BATADV_TVLV_ROAM = 0x05, | 155 | BATADV_TVLV_ROAM = 0x05, |
| 156 | }; | 156 | }; |
| 157 | 157 | ||
| 158 | #pragma pack(2) | ||
| 158 | /* the destination hardware field in the ARP frame is used to | 159 | /* the destination hardware field in the ARP frame is used to |
| 159 | * transport the claim type and the group id | 160 | * transport the claim type and the group id |
| 160 | */ | 161 | */ |
| @@ -163,24 +164,20 @@ struct batadv_bla_claim_dst { | |||
| 163 | uint8_t type; /* bla_claimframe */ | 164 | uint8_t type; /* bla_claimframe */ |
| 164 | __be16 group; /* group id */ | 165 | __be16 group; /* group id */ |
| 165 | }; | 166 | }; |
| 166 | 167 | #pragma pack() | |
| 167 | struct batadv_header { | ||
| 168 | uint8_t packet_type; | ||
| 169 | uint8_t version; /* batman version field */ | ||
| 170 | uint8_t ttl; | ||
| 171 | /* the parent struct has to add a byte after the header to make | ||
| 172 | * everything 4 bytes aligned again | ||
| 173 | */ | ||
| 174 | }; | ||
| 175 | 168 | ||
| 176 | /** | 169 | /** |
| 177 | * struct batadv_ogm_packet - ogm (routing protocol) packet | 170 | * struct batadv_ogm_packet - ogm (routing protocol) packet |
| 178 | * @header: common batman packet header | 171 | * @packet_type: batman-adv packet type, part of the general header |
| 172 | * @version: batman-adv protocol version, part of the genereal header | ||
| 173 | * @ttl: time to live for this packet, part of the genereal header | ||
| 179 | * @flags: contains routing relevant flags - see enum batadv_iv_flags | 174 | * @flags: contains routing relevant flags - see enum batadv_iv_flags |
| 180 | * @tvlv_len: length of tvlv data following the ogm header | 175 | * @tvlv_len: length of tvlv data following the ogm header |
| 181 | */ | 176 | */ |
| 182 | struct batadv_ogm_packet { | 177 | struct batadv_ogm_packet { |
| 183 | struct batadv_header header; | 178 | uint8_t packet_type; |
| 179 | uint8_t version; | ||
| 180 | uint8_t ttl; | ||
| 184 | uint8_t flags; | 181 | uint8_t flags; |
| 185 | __be32 seqno; | 182 | __be32 seqno; |
| 186 | uint8_t orig[ETH_ALEN]; | 183 | uint8_t orig[ETH_ALEN]; |
| @@ -196,29 +193,51 @@ struct batadv_ogm_packet { | |||
| 196 | #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) | 193 | #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) |
| 197 | 194 | ||
| 198 | /** | 195 | /** |
| 199 | * batadv_icmp_header - common ICMP header | 196 | * batadv_icmp_header - common members among all the ICMP packets |
| 200 | * @header: common batman header | 197 | * @packet_type: batman-adv packet type, part of the general header |
| 198 | * @version: batman-adv protocol version, part of the genereal header | ||
| 199 | * @ttl: time to live for this packet, part of the genereal header | ||
| 201 | * @msg_type: ICMP packet type | 200 | * @msg_type: ICMP packet type |
| 202 | * @dst: address of the destination node | 201 | * @dst: address of the destination node |
| 203 | * @orig: address of the source node | 202 | * @orig: address of the source node |
| 204 | * @uid: local ICMP socket identifier | 203 | * @uid: local ICMP socket identifier |
| 204 | * @align: not used - useful for alignment purposes only | ||
| 205 | * | ||
| 206 | * This structure is used for ICMP packets parsing only and it is never sent | ||
| 207 | * over the wire. The alignment field at the end is there to ensure that | ||
| 208 | * members are padded the same way as they are in real packets. | ||
| 205 | */ | 209 | */ |
| 206 | struct batadv_icmp_header { | 210 | struct batadv_icmp_header { |
| 207 | struct batadv_header header; | 211 | uint8_t packet_type; |
| 212 | uint8_t version; | ||
| 213 | uint8_t ttl; | ||
| 208 | uint8_t msg_type; /* see ICMP message types above */ | 214 | uint8_t msg_type; /* see ICMP message types above */ |
| 209 | uint8_t dst[ETH_ALEN]; | 215 | uint8_t dst[ETH_ALEN]; |
| 210 | uint8_t orig[ETH_ALEN]; | 216 | uint8_t orig[ETH_ALEN]; |
| 211 | uint8_t uid; | 217 | uint8_t uid; |
| 218 | uint8_t align[3]; | ||
| 212 | }; | 219 | }; |
| 213 | 220 | ||
| 214 | /** | 221 | /** |
| 215 | * batadv_icmp_packet - ICMP packet | 222 | * batadv_icmp_packet - ICMP packet |
| 216 | * @icmph: common ICMP header | 223 | * @packet_type: batman-adv packet type, part of the general header |
| 224 | * @version: batman-adv protocol version, part of the genereal header | ||
| 225 | * @ttl: time to live for this packet, part of the genereal header | ||
| 226 | * @msg_type: ICMP packet type | ||
| 227 | * @dst: address of the destination node | ||
| 228 | * @orig: address of the source node | ||
| 229 | * @uid: local ICMP socket identifier | ||
| 217 | * @reserved: not used - useful for alignment | 230 | * @reserved: not used - useful for alignment |
| 218 | * @seqno: ICMP sequence number | 231 | * @seqno: ICMP sequence number |
| 219 | */ | 232 | */ |
| 220 | struct batadv_icmp_packet { | 233 | struct batadv_icmp_packet { |
| 221 | struct batadv_icmp_header icmph; | 234 | uint8_t packet_type; |
| 235 | uint8_t version; | ||
| 236 | uint8_t ttl; | ||
| 237 | uint8_t msg_type; /* see ICMP message types above */ | ||
| 238 | uint8_t dst[ETH_ALEN]; | ||
| 239 | uint8_t orig[ETH_ALEN]; | ||
| 240 | uint8_t uid; | ||
| 222 | uint8_t reserved; | 241 | uint8_t reserved; |
| 223 | __be16 seqno; | 242 | __be16 seqno; |
| 224 | }; | 243 | }; |
| @@ -227,13 +246,25 @@ struct batadv_icmp_packet { | |||
| 227 | 246 | ||
| 228 | /** | 247 | /** |
| 229 | * batadv_icmp_packet_rr - ICMP RouteRecord packet | 248 | * batadv_icmp_packet_rr - ICMP RouteRecord packet |
| 230 | * @icmph: common ICMP header | 249 | * @packet_type: batman-adv packet type, part of the general header |
| 250 | * @version: batman-adv protocol version, part of the genereal header | ||
| 251 | * @ttl: time to live for this packet, part of the genereal header | ||
| 252 | * @msg_type: ICMP packet type | ||
| 253 | * @dst: address of the destination node | ||
| 254 | * @orig: address of the source node | ||
| 255 | * @uid: local ICMP socket identifier | ||
| 231 | * @rr_cur: number of entries the rr array | 256 | * @rr_cur: number of entries the rr array |
| 232 | * @seqno: ICMP sequence number | 257 | * @seqno: ICMP sequence number |
| 233 | * @rr: route record array | 258 | * @rr: route record array |
| 234 | */ | 259 | */ |
| 235 | struct batadv_icmp_packet_rr { | 260 | struct batadv_icmp_packet_rr { |
| 236 | struct batadv_icmp_header icmph; | 261 | uint8_t packet_type; |
| 262 | uint8_t version; | ||
| 263 | uint8_t ttl; | ||
| 264 | uint8_t msg_type; /* see ICMP message types above */ | ||
| 265 | uint8_t dst[ETH_ALEN]; | ||
| 266 | uint8_t orig[ETH_ALEN]; | ||
| 267 | uint8_t uid; | ||
| 237 | uint8_t rr_cur; | 268 | uint8_t rr_cur; |
| 238 | __be16 seqno; | 269 | __be16 seqno; |
| 239 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; | 270 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; |
| @@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr { | |||
| 253 | */ | 284 | */ |
| 254 | #pragma pack(2) | 285 | #pragma pack(2) |
| 255 | 286 | ||
| 287 | /** | ||
| 288 | * struct batadv_unicast_packet - unicast packet for network payload | ||
| 289 | * @packet_type: batman-adv packet type, part of the general header | ||
| 290 | * @version: batman-adv protocol version, part of the genereal header | ||
| 291 | * @ttl: time to live for this packet, part of the genereal header | ||
| 292 | * @ttvn: translation table version number | ||
| 293 | * @dest: originator destination of the unicast packet | ||
| 294 | */ | ||
| 256 | struct batadv_unicast_packet { | 295 | struct batadv_unicast_packet { |
| 257 | struct batadv_header header; | 296 | uint8_t packet_type; |
| 297 | uint8_t version; | ||
| 298 | uint8_t ttl; | ||
| 258 | uint8_t ttvn; /* destination translation table version number */ | 299 | uint8_t ttvn; /* destination translation table version number */ |
| 259 | uint8_t dest[ETH_ALEN]; | 300 | uint8_t dest[ETH_ALEN]; |
| 260 | /* "4 bytes boundary + 2 bytes" long to make the payload after the | 301 | /* "4 bytes boundary + 2 bytes" long to make the payload after the |
| @@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet { | |||
| 280 | 321 | ||
| 281 | /** | 322 | /** |
| 282 | * struct batadv_frag_packet - fragmented packet | 323 | * struct batadv_frag_packet - fragmented packet |
| 283 | * @header: common batman packet header with type, compatversion, and ttl | 324 | * @packet_type: batman-adv packet type, part of the general header |
| 325 | * @version: batman-adv protocol version, part of the genereal header | ||
| 326 | * @ttl: time to live for this packet, part of the genereal header | ||
| 284 | * @dest: final destination used when routing fragments | 327 | * @dest: final destination used when routing fragments |
| 285 | * @orig: originator of the fragment used when merging the packet | 328 | * @orig: originator of the fragment used when merging the packet |
| 286 | * @no: fragment number within this sequence | 329 | * @no: fragment number within this sequence |
| @@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet { | |||
| 289 | * @total_size: size of the merged packet | 332 | * @total_size: size of the merged packet |
| 290 | */ | 333 | */ |
| 291 | struct batadv_frag_packet { | 334 | struct batadv_frag_packet { |
| 292 | struct batadv_header header; | 335 | uint8_t packet_type; |
| 336 | uint8_t version; /* batman version field */ | ||
| 337 | uint8_t ttl; | ||
| 293 | #if defined(__BIG_ENDIAN_BITFIELD) | 338 | #if defined(__BIG_ENDIAN_BITFIELD) |
| 294 | uint8_t no:4; | 339 | uint8_t no:4; |
| 295 | uint8_t reserved:4; | 340 | uint8_t reserved:4; |
| @@ -305,8 +350,19 @@ struct batadv_frag_packet { | |||
| 305 | __be16 total_size; | 350 | __be16 total_size; |
| 306 | }; | 351 | }; |
| 307 | 352 | ||
| 353 | /** | ||
| 354 | * struct batadv_bcast_packet - broadcast packet for network payload | ||
| 355 | * @packet_type: batman-adv packet type, part of the general header | ||
| 356 | * @version: batman-adv protocol version, part of the genereal header | ||
| 357 | * @ttl: time to live for this packet, part of the genereal header | ||
| 358 | * @reserved: reserved byte for alignment | ||
| 359 | * @seqno: sequence identification | ||
| 360 | * @orig: originator of the broadcast packet | ||
| 361 | */ | ||
| 308 | struct batadv_bcast_packet { | 362 | struct batadv_bcast_packet { |
| 309 | struct batadv_header header; | 363 | uint8_t packet_type; |
| 364 | uint8_t version; /* batman version field */ | ||
| 365 | uint8_t ttl; | ||
| 310 | uint8_t reserved; | 366 | uint8_t reserved; |
| 311 | __be32 seqno; | 367 | __be32 seqno; |
| 312 | uint8_t orig[ETH_ALEN]; | 368 | uint8_t orig[ETH_ALEN]; |
| @@ -315,11 +371,11 @@ struct batadv_bcast_packet { | |||
| 315 | */ | 371 | */ |
| 316 | }; | 372 | }; |
| 317 | 373 | ||
| 318 | #pragma pack() | ||
| 319 | |||
| 320 | /** | 374 | /** |
| 321 | * struct batadv_coded_packet - network coded packet | 375 | * struct batadv_coded_packet - network coded packet |
| 322 | * @header: common batman packet header and ttl of first included packet | 376 | * @packet_type: batman-adv packet type, part of the general header |
| 377 | * @version: batman-adv protocol version, part of the genereal header | ||
| 378 | * @ttl: time to live for this packet, part of the genereal header | ||
| 323 | * @reserved: Align following fields to 2-byte boundaries | 379 | * @reserved: Align following fields to 2-byte boundaries |
| 324 | * @first_source: original source of first included packet | 380 | * @first_source: original source of first included packet |
| 325 | * @first_orig_dest: original destinal of first included packet | 381 | * @first_orig_dest: original destinal of first included packet |
| @@ -334,7 +390,9 @@ struct batadv_bcast_packet { | |||
| 334 | * @coded_len: length of network coded part of the payload | 390 | * @coded_len: length of network coded part of the payload |
| 335 | */ | 391 | */ |
| 336 | struct batadv_coded_packet { | 392 | struct batadv_coded_packet { |
| 337 | struct batadv_header header; | 393 | uint8_t packet_type; |
| 394 | uint8_t version; /* batman version field */ | ||
| 395 | uint8_t ttl; | ||
| 338 | uint8_t first_ttvn; | 396 | uint8_t first_ttvn; |
| 339 | /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ | 397 | /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ |
| 340 | uint8_t first_source[ETH_ALEN]; | 398 | uint8_t first_source[ETH_ALEN]; |
| @@ -349,9 +407,13 @@ struct batadv_coded_packet { | |||
| 349 | __be16 coded_len; | 407 | __be16 coded_len; |
| 350 | }; | 408 | }; |
| 351 | 409 | ||
| 410 | #pragma pack() | ||
| 411 | |||
| 352 | /** | 412 | /** |
| 353 | * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload | 413 | * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload |
| 354 | * @header: common batman packet header | 414 | * @packet_type: batman-adv packet type, part of the general header |
| 415 | * @version: batman-adv protocol version, part of the genereal header | ||
| 416 | * @ttl: time to live for this packet, part of the genereal header | ||
| 355 | * @reserved: reserved field (for packet alignment) | 417 | * @reserved: reserved field (for packet alignment) |
| 356 | * @src: address of the source | 418 | * @src: address of the source |
| 357 | * @dst: address of the destination | 419 | * @dst: address of the destination |
| @@ -359,7 +421,9 @@ struct batadv_coded_packet { | |||
| 359 | * @align: 2 bytes to align the header to a 4 byte boundry | 421 | * @align: 2 bytes to align the header to a 4 byte boundry |
| 360 | */ | 422 | */ |
| 361 | struct batadv_unicast_tvlv_packet { | 423 | struct batadv_unicast_tvlv_packet { |
| 362 | struct batadv_header header; | 424 | uint8_t packet_type; |
| 425 | uint8_t version; /* batman version field */ | ||
| 426 | uint8_t ttl; | ||
| 363 | uint8_t reserved; | 427 | uint8_t reserved; |
| 364 | uint8_t dst[ETH_ALEN]; | 428 | uint8_t dst[ETH_ALEN]; |
| 365 | uint8_t src[ETH_ALEN]; | 429 | uint8_t src[ETH_ALEN]; |
| @@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data { | |||
| 420 | * struct batadv_tvlv_tt_change - translation table diff data | 484 | * struct batadv_tvlv_tt_change - translation table diff data |
| 421 | * @flags: status indicators concerning the non-mesh client (see | 485 | * @flags: status indicators concerning the non-mesh client (see |
| 422 | * batadv_tt_client_flags) | 486 | * batadv_tt_client_flags) |
| 423 | * @reserved: reserved field | 487 | * @reserved: reserved field - useful for alignment purposes only |
| 424 | * @addr: mac address of non-mesh client that triggered this tt change | 488 | * @addr: mac address of non-mesh client that triggered this tt change |
| 425 | * @vid: VLAN identifier | 489 | * @vid: VLAN identifier |
| 426 | */ | 490 | */ |
| 427 | struct batadv_tvlv_tt_change { | 491 | struct batadv_tvlv_tt_change { |
| 428 | uint8_t flags; | 492 | uint8_t flags; |
| 429 | uint8_t reserved; | 493 | uint8_t reserved[3]; |
| 430 | uint8_t addr[ETH_ALEN]; | 494 | uint8_t addr[ETH_ALEN]; |
| 431 | __be16 vid; | 495 | __be16 vid; |
| 432 | }; | 496 | }; |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index d4114d775ad6..46278bfb8fdb 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
| @@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, | |||
| 308 | memcpy(icmph->dst, icmph->orig, ETH_ALEN); | 308 | memcpy(icmph->dst, icmph->orig, ETH_ALEN); |
| 309 | memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); | 309 | memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); |
| 310 | icmph->msg_type = BATADV_ECHO_REPLY; | 310 | icmph->msg_type = BATADV_ECHO_REPLY; |
| 311 | icmph->header.ttl = BATADV_TTL; | 311 | icmph->ttl = BATADV_TTL; |
| 312 | 312 | ||
| 313 | res = batadv_send_skb_to_orig(skb, orig_node, NULL); | 313 | res = batadv_send_skb_to_orig(skb, orig_node, NULL); |
| 314 | if (res != NET_XMIT_DROP) | 314 | if (res != NET_XMIT_DROP) |
| @@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
| 338 | icmp_packet = (struct batadv_icmp_packet *)skb->data; | 338 | icmp_packet = (struct batadv_icmp_packet *)skb->data; |
| 339 | 339 | ||
| 340 | /* send TTL exceeded if packet is an echo request (traceroute) */ | 340 | /* send TTL exceeded if packet is an echo request (traceroute) */ |
| 341 | if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) { | 341 | if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) { |
| 342 | pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", | 342 | pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", |
| 343 | icmp_packet->icmph.orig, icmp_packet->icmph.dst); | 343 | icmp_packet->orig, icmp_packet->dst); |
| 344 | goto out; | 344 | goto out; |
| 345 | } | 345 | } |
| 346 | 346 | ||
| @@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
| 349 | goto out; | 349 | goto out; |
| 350 | 350 | ||
| 351 | /* get routing information */ | 351 | /* get routing information */ |
| 352 | orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig); | 352 | orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig); |
| 353 | if (!orig_node) | 353 | if (!orig_node) |
| 354 | goto out; | 354 | goto out; |
| 355 | 355 | ||
| @@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
| 359 | 359 | ||
| 360 | icmp_packet = (struct batadv_icmp_packet *)skb->data; | 360 | icmp_packet = (struct batadv_icmp_packet *)skb->data; |
| 361 | 361 | ||
| 362 | memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN); | 362 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); |
| 363 | memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr, | 363 | memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, |
| 364 | ETH_ALEN); | 364 | ETH_ALEN); |
| 365 | icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED; | 365 | icmp_packet->msg_type = BATADV_TTL_EXCEEDED; |
| 366 | icmp_packet->icmph.header.ttl = BATADV_TTL; | 366 | icmp_packet->ttl = BATADV_TTL; |
| 367 | 367 | ||
| 368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) | 368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) |
| 369 | ret = NET_RX_SUCCESS; | 369 | ret = NET_RX_SUCCESS; |
| @@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
| 434 | return batadv_recv_my_icmp_packet(bat_priv, skb); | 434 | return batadv_recv_my_icmp_packet(bat_priv, skb); |
| 435 | 435 | ||
| 436 | /* TTL exceeded */ | 436 | /* TTL exceeded */ |
| 437 | if (icmph->header.ttl < 2) | 437 | if (icmph->ttl < 2) |
| 438 | return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); | 438 | return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); |
| 439 | 439 | ||
| 440 | /* get routing information */ | 440 | /* get routing information */ |
| @@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
| 449 | icmph = (struct batadv_icmp_header *)skb->data; | 449 | icmph = (struct batadv_icmp_header *)skb->data; |
| 450 | 450 | ||
| 451 | /* decrement ttl */ | 451 | /* decrement ttl */ |
| 452 | icmph->header.ttl--; | 452 | icmph->ttl--; |
| 453 | 453 | ||
| 454 | /* route it */ | 454 | /* route it */ |
| 455 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) | 455 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) |
| @@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
| 709 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 709 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
| 710 | 710 | ||
| 711 | /* TTL exceeded */ | 711 | /* TTL exceeded */ |
| 712 | if (unicast_packet->header.ttl < 2) { | 712 | if (unicast_packet->ttl < 2) { |
| 713 | pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", | 713 | pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", |
| 714 | ethhdr->h_source, unicast_packet->dest); | 714 | ethhdr->h_source, unicast_packet->dest); |
| 715 | goto out; | 715 | goto out; |
| @@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
| 727 | 727 | ||
| 728 | /* decrement ttl */ | 728 | /* decrement ttl */ |
| 729 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 729 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
| 730 | unicast_packet->header.ttl--; | 730 | unicast_packet->ttl--; |
| 731 | 731 | ||
| 732 | switch (unicast_packet->header.packet_type) { | 732 | switch (unicast_packet->packet_type) { |
| 733 | case BATADV_UNICAST_4ADDR: | 733 | case BATADV_UNICAST_4ADDR: |
| 734 | hdr_len = sizeof(struct batadv_unicast_4addr_packet); | 734 | hdr_len = sizeof(struct batadv_unicast_4addr_packet); |
| 735 | break; | 735 | break; |
| @@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
| 970 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 970 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
| 971 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 971 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
| 972 | 972 | ||
| 973 | is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR; | 973 | is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; |
| 974 | /* the caller function should have already pulled 2 bytes */ | 974 | /* the caller function should have already pulled 2 bytes */ |
| 975 | if (is4addr) | 975 | if (is4addr) |
| 976 | hdr_size = sizeof(*unicast_4addr_packet); | 976 | hdr_size = sizeof(*unicast_4addr_packet); |
| @@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
| 1160 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) | 1160 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) |
| 1161 | goto out; | 1161 | goto out; |
| 1162 | 1162 | ||
| 1163 | if (bcast_packet->header.ttl < 2) | 1163 | if (bcast_packet->ttl < 2) |
| 1164 | goto out; | 1164 | goto out; |
| 1165 | 1165 | ||
| 1166 | orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); | 1166 | orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index c83be5ebaa28..fba4dcfcfac2 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
| @@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, | |||
| 161 | return false; | 161 | return false; |
| 162 | 162 | ||
| 163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
| 164 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 164 | unicast_packet->version = BATADV_COMPAT_VERSION; |
| 165 | /* batman packet type: unicast */ | 165 | /* batman packet type: unicast */ |
| 166 | unicast_packet->header.packet_type = BATADV_UNICAST; | 166 | unicast_packet->packet_type = BATADV_UNICAST; |
| 167 | /* set unicast ttl */ | 167 | /* set unicast ttl */ |
| 168 | unicast_packet->header.ttl = BATADV_TTL; | 168 | unicast_packet->ttl = BATADV_TTL; |
| 169 | /* copy the destination for faster routing */ | 169 | /* copy the destination for faster routing */ |
| 170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | 170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); |
| 171 | /* set the destination tt version number */ | 171 | /* set the destination tt version number */ |
| @@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, | |||
| 221 | goto out; | 221 | goto out; |
| 222 | 222 | ||
| 223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
| 224 | uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; | 224 | uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; |
| 225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); | 225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); |
| 226 | uc_4addr_packet->subtype = packet_subtype; | 226 | uc_4addr_packet->subtype = packet_subtype; |
| 227 | uc_4addr_packet->reserved = 0; | 227 | uc_4addr_packet->reserved = 0; |
| @@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | |||
| 436 | 436 | ||
| 437 | /* as we have a copy now, it is safe to decrease the TTL */ | 437 | /* as we have a copy now, it is safe to decrease the TTL */ |
| 438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; | 438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; |
| 439 | bcast_packet->header.ttl--; | 439 | bcast_packet->ttl--; |
| 440 | 440 | ||
| 441 | skb_reset_mac_header(newskb); | 441 | skb_reset_mac_header(newskb); |
| 442 | 442 | ||
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 36f050876f82..a8f99d1486c0 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
| @@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
| 264 | goto dropped; | 264 | goto dropped; |
| 265 | 265 | ||
| 266 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | 266 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
| 267 | bcast_packet->header.version = BATADV_COMPAT_VERSION; | 267 | bcast_packet->version = BATADV_COMPAT_VERSION; |
| 268 | bcast_packet->header.ttl = BATADV_TTL; | 268 | bcast_packet->ttl = BATADV_TTL; |
| 269 | 269 | ||
| 270 | /* batman packet type: broadcast */ | 270 | /* batman packet type: broadcast */ |
| 271 | bcast_packet->header.packet_type = BATADV_BCAST; | 271 | bcast_packet->packet_type = BATADV_BCAST; |
| 272 | bcast_packet->reserved = 0; | 272 | bcast_packet->reserved = 0; |
| 273 | 273 | ||
| 274 | /* hw address of first interface is the orig mac because only | 274 | /* hw address of first interface is the orig mac because only |
| @@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
| 328 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, | 328 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, |
| 329 | int hdr_size, struct batadv_orig_node *orig_node) | 329 | int hdr_size, struct batadv_orig_node *orig_node) |
| 330 | { | 330 | { |
| 331 | struct batadv_header *batadv_header = (struct batadv_header *)skb->data; | 331 | struct batadv_bcast_packet *batadv_bcast_packet; |
| 332 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 332 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
| 333 | __be16 ethertype = htons(ETH_P_BATMAN); | 333 | __be16 ethertype = htons(ETH_P_BATMAN); |
| 334 | struct vlan_ethhdr *vhdr; | 334 | struct vlan_ethhdr *vhdr; |
| @@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
| 336 | unsigned short vid; | 336 | unsigned short vid; |
| 337 | bool is_bcast; | 337 | bool is_bcast; |
| 338 | 338 | ||
| 339 | is_bcast = (batadv_header->packet_type == BATADV_BCAST); | 339 | batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; |
| 340 | is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); | ||
| 340 | 341 | ||
| 341 | /* check if enough space is available for pulling, and pull */ | 342 | /* check if enough space is available for pulling, and pull */ |
| 342 | if (!pskb_may_pull(skb, hdr_size)) | 343 | if (!pskb_may_pull(skb, hdr_size)) |
| @@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
| 345 | skb_pull_rcsum(skb, hdr_size); | 346 | skb_pull_rcsum(skb, hdr_size); |
| 346 | skb_reset_mac_header(skb); | 347 | skb_reset_mac_header(skb); |
| 347 | 348 | ||
| 348 | vid = batadv_get_vid(skb, hdr_size); | 349 | /* clean the netfilter state now that the batman-adv header has been |
| 350 | * removed | ||
| 351 | */ | ||
| 352 | nf_reset(skb); | ||
| 353 | |||
| 354 | vid = batadv_get_vid(skb, 0); | ||
| 349 | ethhdr = eth_hdr(skb); | 355 | ethhdr = eth_hdr(skb); |
| 350 | 356 | ||
| 351 | switch (ntohs(ethhdr->h_proto)) { | 357 | switch (ntohs(ethhdr->h_proto)) { |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4add57d4857f..ff625fedbc5e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, | |||
| 333 | return; | 333 | return; |
| 334 | 334 | ||
| 335 | tt_change_node->change.flags = flags; | 335 | tt_change_node->change.flags = flags; |
| 336 | tt_change_node->change.reserved = 0; | 336 | memset(tt_change_node->change.reserved, 0, |
| 337 | sizeof(tt_change_node->change.reserved)); | ||
| 337 | memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); | 338 | memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); |
| 338 | tt_change_node->change.vid = htons(common->vid); | 339 | tt_change_node->change.vid = htons(common->vid); |
| 339 | 340 | ||
| @@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, | |||
| 2221 | ETH_ALEN); | 2222 | ETH_ALEN); |
| 2222 | tt_change->flags = tt_common_entry->flags; | 2223 | tt_change->flags = tt_common_entry->flags; |
| 2223 | tt_change->vid = htons(tt_common_entry->vid); | 2224 | tt_change->vid = htons(tt_common_entry->vid); |
| 2224 | tt_change->reserved = 0; | 2225 | memset(tt_change->reserved, 0, |
| 2226 | sizeof(tt_change->reserved)); | ||
| 2225 | 2227 | ||
| 2226 | tt_num_entries++; | 2228 | tt_num_entries++; |
| 2227 | tt_change++; | 2229 | tt_change++; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 6a6c8bb4fd72..7552f9e3089c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
| @@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 940 | bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); | 940 | bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); |
| 941 | skb_pull(skb, 1); | 941 | skb_pull(skb, 1); |
| 942 | 942 | ||
| 943 | if (hci_pi(sk)->channel == HCI_CHANNEL_RAW && | 943 | if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { |
| 944 | bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { | 944 | /* No permission check is needed for user channel |
| 945 | * since that gets enforced when binding the socket. | ||
| 946 | * | ||
| 947 | * However check that the packet type is valid. | ||
| 948 | */ | ||
| 949 | if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && | ||
| 950 | bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && | ||
| 951 | bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { | ||
| 952 | err = -EINVAL; | ||
| 953 | goto drop; | ||
| 954 | } | ||
| 955 | |||
| 956 | skb_queue_tail(&hdev->raw_q, skb); | ||
| 957 | queue_work(hdev->workqueue, &hdev->tx_work); | ||
| 958 | } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { | ||
| 945 | u16 opcode = get_unaligned_le16(skb->data); | 959 | u16 opcode = get_unaligned_le16(skb->data); |
| 946 | u16 ogf = hci_opcode_ogf(opcode); | 960 | u16 ogf = hci_opcode_ogf(opcode); |
| 947 | u16 ocf = hci_opcode_ocf(opcode); | 961 | u16 ocf = hci_opcode_ocf(opcode); |
| @@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 972 | goto drop; | 986 | goto drop; |
| 973 | } | 987 | } |
| 974 | 988 | ||
| 975 | if (hci_pi(sk)->channel == HCI_CHANNEL_USER && | ||
| 976 | bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && | ||
| 977 | bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && | ||
| 978 | bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { | ||
| 979 | err = -EINVAL; | ||
| 980 | goto drop; | ||
| 981 | } | ||
| 982 | |||
| 983 | skb_queue_tail(&hdev->raw_q, skb); | 989 | skb_queue_tail(&hdev->raw_q, skb); |
| 984 | queue_work(hdev->workqueue, &hdev->tx_work); | 990 | queue_work(hdev->workqueue, &hdev->tx_work); |
| 985 | } | 991 | } |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 4c214b2b88ef..ef66365b7354 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) | |||
| 1998 | u32 old; | 1998 | u32 old; |
| 1999 | struct net_bridge_mdb_htable *mdb; | 1999 | struct net_bridge_mdb_htable *mdb; |
| 2000 | 2000 | ||
| 2001 | spin_lock(&br->multicast_lock); | 2001 | spin_lock_bh(&br->multicast_lock); |
| 2002 | if (!netif_running(br->dev)) | 2002 | if (!netif_running(br->dev)) |
| 2003 | goto unlock; | 2003 | goto unlock; |
| 2004 | 2004 | ||
| @@ -2030,7 +2030,7 @@ rollback: | |||
| 2030 | } | 2030 | } |
| 2031 | 2031 | ||
| 2032 | unlock: | 2032 | unlock: |
| 2033 | spin_unlock(&br->multicast_lock); | 2033 | spin_unlock_bh(&br->multicast_lock); |
| 2034 | 2034 | ||
| 2035 | return err; | 2035 | return err; |
| 2036 | } | 2036 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index ba3b7ea5ebb3..0ce469e5ec80 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb, | |||
| 2539 | } | 2539 | } |
| 2540 | 2540 | ||
| 2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
| 2542 | struct netdev_queue *txq, void *accel_priv) | 2542 | struct netdev_queue *txq) |
| 2543 | { | 2543 | { |
| 2544 | const struct net_device_ops *ops = dev->netdev_ops; | 2544 | const struct net_device_ops *ops = dev->netdev_ops; |
| 2545 | int rc = NETDEV_TX_OK; | 2545 | int rc = NETDEV_TX_OK; |
| @@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 2605 | dev_queue_xmit_nit(skb, dev); | 2605 | dev_queue_xmit_nit(skb, dev); |
| 2606 | 2606 | ||
| 2607 | skb_len = skb->len; | 2607 | skb_len = skb->len; |
| 2608 | if (accel_priv) | ||
| 2609 | rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); | ||
| 2610 | else | ||
| 2611 | rc = ops->ndo_start_xmit(skb, dev); | 2608 | rc = ops->ndo_start_xmit(skb, dev); |
| 2612 | 2609 | ||
| 2613 | trace_net_dev_xmit(skb, rc, dev, skb_len); | 2610 | trace_net_dev_xmit(skb, rc, dev, skb_len); |
| 2614 | if (rc == NETDEV_TX_OK && txq) | 2611 | if (rc == NETDEV_TX_OK) |
| 2615 | txq_trans_update(txq); | 2612 | txq_trans_update(txq); |
| 2616 | return rc; | 2613 | return rc; |
| 2617 | } | 2614 | } |
| @@ -2627,10 +2624,7 @@ gso: | |||
| 2627 | dev_queue_xmit_nit(nskb, dev); | 2624 | dev_queue_xmit_nit(nskb, dev); |
| 2628 | 2625 | ||
| 2629 | skb_len = nskb->len; | 2626 | skb_len = nskb->len; |
| 2630 | if (accel_priv) | 2627 | rc = ops->ndo_start_xmit(nskb, dev); |
| 2631 | rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); | ||
| 2632 | else | ||
| 2633 | rc = ops->ndo_start_xmit(nskb, dev); | ||
| 2634 | trace_net_dev_xmit(nskb, rc, dev, skb_len); | 2628 | trace_net_dev_xmit(nskb, rc, dev, skb_len); |
| 2635 | if (unlikely(rc != NETDEV_TX_OK)) { | 2629 | if (unlikely(rc != NETDEV_TX_OK)) { |
| 2636 | if (rc & ~NETDEV_TX_MASK) | 2630 | if (rc & ~NETDEV_TX_MASK) |
| @@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); | |||
| 2811 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 2805 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
| 2812 | * --BLG | 2806 | * --BLG |
| 2813 | */ | 2807 | */ |
| 2814 | int dev_queue_xmit(struct sk_buff *skb) | 2808 | int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
| 2815 | { | 2809 | { |
| 2816 | struct net_device *dev = skb->dev; | 2810 | struct net_device *dev = skb->dev; |
| 2817 | struct netdev_queue *txq; | 2811 | struct netdev_queue *txq; |
| @@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
| 2827 | 2821 | ||
| 2828 | skb_update_prio(skb); | 2822 | skb_update_prio(skb); |
| 2829 | 2823 | ||
| 2830 | txq = netdev_pick_tx(dev, skb); | 2824 | txq = netdev_pick_tx(dev, skb, accel_priv); |
| 2831 | q = rcu_dereference_bh(txq->qdisc); | 2825 | q = rcu_dereference_bh(txq->qdisc); |
| 2832 | 2826 | ||
| 2833 | #ifdef CONFIG_NET_CLS_ACT | 2827 | #ifdef CONFIG_NET_CLS_ACT |
| @@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
| 2863 | 2857 | ||
| 2864 | if (!netif_xmit_stopped(txq)) { | 2858 | if (!netif_xmit_stopped(txq)) { |
| 2865 | __this_cpu_inc(xmit_recursion); | 2859 | __this_cpu_inc(xmit_recursion); |
| 2866 | rc = dev_hard_start_xmit(skb, dev, txq, NULL); | 2860 | rc = dev_hard_start_xmit(skb, dev, txq); |
| 2867 | __this_cpu_dec(xmit_recursion); | 2861 | __this_cpu_dec(xmit_recursion); |
| 2868 | if (dev_xmit_complete(rc)) { | 2862 | if (dev_xmit_complete(rc)) { |
| 2869 | HARD_TX_UNLOCK(dev, txq); | 2863 | HARD_TX_UNLOCK(dev, txq); |
| @@ -2892,8 +2886,19 @@ out: | |||
| 2892 | rcu_read_unlock_bh(); | 2886 | rcu_read_unlock_bh(); |
| 2893 | return rc; | 2887 | return rc; |
| 2894 | } | 2888 | } |
| 2889 | |||
| 2890 | int dev_queue_xmit(struct sk_buff *skb) | ||
| 2891 | { | ||
| 2892 | return __dev_queue_xmit(skb, NULL); | ||
| 2893 | } | ||
| 2895 | EXPORT_SYMBOL(dev_queue_xmit); | 2894 | EXPORT_SYMBOL(dev_queue_xmit); |
| 2896 | 2895 | ||
| 2896 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | ||
| 2897 | { | ||
| 2898 | return __dev_queue_xmit(skb, accel_priv); | ||
| 2899 | } | ||
| 2900 | EXPORT_SYMBOL(dev_queue_xmit_accel); | ||
| 2901 | |||
| 2897 | 2902 | ||
| 2898 | /*======================================================================= | 2903 | /*======================================================================= |
| 2899 | Receiver routines | 2904 | Receiver routines |
| @@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, | |||
| 4500 | { | 4505 | { |
| 4501 | struct netdev_adjacent *upper; | 4506 | struct netdev_adjacent *upper; |
| 4502 | 4507 | ||
| 4503 | WARN_ON_ONCE(!rcu_read_lock_held()); | 4508 | WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); |
| 4504 | 4509 | ||
| 4505 | upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); | 4510 | upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); |
| 4506 | 4511 | ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d6ef17322500..2fc5beaf5783 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
| 395 | EXPORT_SYMBOL(__netdev_pick_tx); | 395 | EXPORT_SYMBOL(__netdev_pick_tx); |
| 396 | 396 | ||
| 397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
| 398 | struct sk_buff *skb) | 398 | struct sk_buff *skb, |
| 399 | void *accel_priv) | ||
| 399 | { | 400 | { |
| 400 | int queue_index = 0; | 401 | int queue_index = 0; |
| 401 | 402 | ||
| 402 | if (dev->real_num_tx_queues != 1) { | 403 | if (dev->real_num_tx_queues != 1) { |
| 403 | const struct net_device_ops *ops = dev->netdev_ops; | 404 | const struct net_device_ops *ops = dev->netdev_ops; |
| 404 | if (ops->ndo_select_queue) | 405 | if (ops->ndo_select_queue) |
| 405 | queue_index = ops->ndo_select_queue(dev, skb); | 406 | queue_index = ops->ndo_select_queue(dev, skb, |
| 407 | accel_priv); | ||
| 406 | else | 408 | else |
| 407 | queue_index = __netdev_pick_tx(dev, skb); | 409 | queue_index = __netdev_pick_tx(dev, skb); |
| 408 | queue_index = dev_cap_txqueue(dev, queue_index); | 410 | |
| 411 | if (!accel_priv) | ||
| 412 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
| 409 | } | 413 | } |
| 410 | 414 | ||
| 411 | skb_set_queue_mapping(skb, queue_index); | 415 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ca15f32821fb..932c6d7cf666 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
| 1161 | neigh->parms->reachable_time : | 1161 | neigh->parms->reachable_time : |
| 1162 | 0))); | 1162 | 0))); |
| 1163 | neigh->nud_state = new; | 1163 | neigh->nud_state = new; |
| 1164 | notify = 1; | ||
| 1164 | } | 1165 | } |
| 1165 | 1166 | ||
| 1166 | if (lladdr != neigh->ha) { | 1167 | if (lladdr != neigh->ha) { |
| @@ -1274,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb) | |||
| 1274 | 1275 | ||
| 1275 | if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, | 1276 | if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, |
| 1276 | skb->len) < 0 && | 1277 | skb->len) < 0 && |
| 1277 | dev->header_ops->rebuild(skb)) | 1278 | dev_rebuild_header(skb)) |
| 1278 | return 0; | 1279 | return 0; |
| 1279 | 1280 | ||
| 1280 | return dev_queue_xmit(skb); | 1281 | return dev_queue_xmit(skb); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8f971990677c..19fe9c717ced 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
| 375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
| 376 | struct netdev_queue *txq; | 376 | struct netdev_queue *txq; |
| 377 | 377 | ||
| 378 | txq = netdev_pick_tx(dev, skb); | 378 | txq = netdev_pick_tx(dev, skb, NULL); |
| 379 | 379 | ||
| 380 | /* try until next clock tick */ | 380 | /* try until next clock tick */ |
| 381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
| @@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
| 386 | !vlan_hw_offload_capable(netif_skb_features(skb), | 386 | !vlan_hw_offload_capable(netif_skb_features(skb), |
| 387 | skb->vlan_proto)) { | 387 | skb->vlan_proto)) { |
| 388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); | 388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); |
| 389 | if (unlikely(!skb)) | 389 | if (unlikely(!skb)) { |
| 390 | break; | 390 | /* This is actually a packet drop, but we |
| 391 | * don't want the code at the end of this | ||
| 392 | * function to try and re-queue a NULL skb. | ||
| 393 | */ | ||
| 394 | status = NETDEV_TX_OK; | ||
| 395 | goto unlock_txq; | ||
| 396 | } | ||
| 391 | skb->vlan_tci = 0; | 397 | skb->vlan_tci = 0; |
| 392 | } | 398 | } |
| 393 | 399 | ||
| @@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
| 395 | if (status == NETDEV_TX_OK) | 401 | if (status == NETDEV_TX_OK) |
| 396 | txq_trans_update(txq); | 402 | txq_trans_update(txq); |
| 397 | } | 403 | } |
| 404 | unlock_txq: | ||
| 398 | __netif_tx_unlock(txq); | 405 | __netif_tx_unlock(txq); |
| 399 | 406 | ||
| 400 | if (status == NETDEV_TX_OK) | 407 | if (status == NETDEV_TX_OK) |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 4c6bdf97a657..595ddf0459db 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
| @@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = { | |||
| 152 | .llseek = noop_llseek, | 152 | .llseek = noop_llseek, |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | static __init int setup_jprobe(void) | ||
| 156 | { | ||
| 157 | int ret = register_jprobe(&dccp_send_probe); | ||
| 158 | |||
| 159 | if (ret) { | ||
| 160 | request_module("dccp"); | ||
| 161 | ret = register_jprobe(&dccp_send_probe); | ||
| 162 | } | ||
| 163 | return ret; | ||
| 164 | } | ||
| 165 | |||
| 166 | static __init int dccpprobe_init(void) | 155 | static __init int dccpprobe_init(void) |
| 167 | { | 156 | { |
| 168 | int ret = -ENOMEM; | 157 | int ret = -ENOMEM; |
| @@ -174,7 +163,13 @@ static __init int dccpprobe_init(void) | |||
| 174 | if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) | 163 | if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) |
| 175 | goto err0; | 164 | goto err0; |
| 176 | 165 | ||
| 177 | ret = setup_jprobe(); | 166 | ret = register_jprobe(&dccp_send_probe); |
| 167 | if (ret) { | ||
| 168 | ret = request_module("dccp"); | ||
| 169 | if (!ret) | ||
| 170 | ret = register_jprobe(&dccp_send_probe); | ||
| 171 | } | ||
| 172 | |||
| 178 | if (ret) | 173 | if (ret) |
| 179 | goto err1; | 174 | goto err1; |
| 180 | 175 | ||
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 459e200c08a4..a2d2456a557a 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c | |||
| @@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb, | |||
| 547 | hc06_ptr += 3; | 547 | hc06_ptr += 3; |
| 548 | } else { | 548 | } else { |
| 549 | /* compress nothing */ | 549 | /* compress nothing */ |
| 550 | memcpy(hc06_ptr, &hdr, 4); | 550 | memcpy(hc06_ptr, hdr, 4); |
| 551 | /* replace the top byte with new ECN | DSCP format */ | 551 | /* replace the top byte with new ECN | DSCP format */ |
| 552 | *hc06_ptr = tmp; | 552 | *hc06_ptr = tmp; |
| 553 | hc06_ptr += 4; | 553 | hc06_ptr += 4; |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index e5d436188464..2cd02f32f99f 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
| @@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
| 28 | netdev_features_t enc_features; | 28 | netdev_features_t enc_features; |
| 29 | int ghl = GRE_HEADER_SECTION; | 29 | int ghl = GRE_HEADER_SECTION; |
| 30 | struct gre_base_hdr *greh; | 30 | struct gre_base_hdr *greh; |
| 31 | u16 mac_offset = skb->mac_header; | ||
| 31 | int mac_len = skb->mac_len; | 32 | int mac_len = skb->mac_len; |
| 32 | __be16 protocol = skb->protocol; | 33 | __be16 protocol = skb->protocol; |
| 33 | int tnl_hlen; | 34 | int tnl_hlen; |
| @@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
| 58 | } else | 59 | } else |
| 59 | csum = false; | 60 | csum = false; |
| 60 | 61 | ||
| 62 | if (unlikely(!pskb_may_pull(skb, ghl))) | ||
| 63 | goto out; | ||
| 64 | |||
| 61 | /* setup inner skb. */ | 65 | /* setup inner skb. */ |
| 62 | skb->protocol = greh->protocol; | 66 | skb->protocol = greh->protocol; |
| 63 | skb->encapsulation = 0; | 67 | skb->encapsulation = 0; |
| 64 | 68 | ||
| 65 | if (unlikely(!pskb_may_pull(skb, ghl))) | ||
| 66 | goto out; | ||
| 67 | |||
| 68 | __skb_pull(skb, ghl); | 69 | __skb_pull(skb, ghl); |
| 69 | skb_reset_mac_header(skb); | 70 | skb_reset_mac_header(skb); |
| 70 | skb_set_network_header(skb, skb_inner_network_offset(skb)); | 71 | skb_set_network_header(skb, skb_inner_network_offset(skb)); |
| @@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
| 73 | /* segment inner packet. */ | 74 | /* segment inner packet. */ |
| 74 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 75 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
| 75 | segs = skb_mac_gso_segment(skb, enc_features); | 76 | segs = skb_mac_gso_segment(skb, enc_features); |
| 76 | if (!segs || IS_ERR(segs)) | 77 | if (!segs || IS_ERR(segs)) { |
| 78 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); | ||
| 77 | goto out; | 79 | goto out; |
| 80 | } | ||
| 78 | 81 | ||
| 79 | skb = segs; | 82 | skb = segs; |
| 80 | tnl_hlen = skb_tnl_header_len(skb); | 83 | tnl_hlen = skb_tnl_header_len(skb); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 56a964a553d2..a0f52dac8940 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
| @@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
| 106 | 106 | ||
| 107 | r->id.idiag_sport = inet->inet_sport; | 107 | r->id.idiag_sport = inet->inet_sport; |
| 108 | r->id.idiag_dport = inet->inet_dport; | 108 | r->id.idiag_dport = inet->inet_dport; |
| 109 | |||
| 110 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
| 111 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
| 112 | |||
| 109 | r->id.idiag_src[0] = inet->inet_rcv_saddr; | 113 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
| 110 | r->id.idiag_dst[0] = inet->inet_daddr; | 114 | r->id.idiag_dst[0] = inet->inet_daddr; |
| 111 | 115 | ||
| @@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, | |||
| 240 | 244 | ||
| 241 | r->idiag_family = tw->tw_family; | 245 | r->idiag_family = tw->tw_family; |
| 242 | r->idiag_retrans = 0; | 246 | r->idiag_retrans = 0; |
| 247 | |||
| 243 | r->id.idiag_if = tw->tw_bound_dev_if; | 248 | r->id.idiag_if = tw->tw_bound_dev_if; |
| 244 | sock_diag_save_cookie(tw, r->id.idiag_cookie); | 249 | sock_diag_save_cookie(tw, r->id.idiag_cookie); |
| 250 | |||
| 245 | r->id.idiag_sport = tw->tw_sport; | 251 | r->id.idiag_sport = tw->tw_sport; |
| 246 | r->id.idiag_dport = tw->tw_dport; | 252 | r->id.idiag_dport = tw->tw_dport; |
| 253 | |||
| 254 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
| 255 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
| 256 | |||
| 247 | r->id.idiag_src[0] = tw->tw_rcv_saddr; | 257 | r->id.idiag_src[0] = tw->tw_rcv_saddr; |
| 248 | r->id.idiag_dst[0] = tw->tw_daddr; | 258 | r->id.idiag_dst[0] = tw->tw_daddr; |
| 259 | |||
| 249 | r->idiag_state = tw->tw_substate; | 260 | r->idiag_state = tw->tw_substate; |
| 250 | r->idiag_timer = 3; | 261 | r->idiag_timer = 3; |
| 251 | r->idiag_expires = jiffies_to_msecs(tmo); | 262 | r->idiag_expires = jiffies_to_msecs(tmo); |
| @@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
| 726 | 737 | ||
| 727 | r->id.idiag_sport = inet->inet_sport; | 738 | r->id.idiag_sport = inet->inet_sport; |
| 728 | r->id.idiag_dport = ireq->ir_rmt_port; | 739 | r->id.idiag_dport = ireq->ir_rmt_port; |
| 740 | |||
| 741 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
| 742 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
| 743 | |||
| 729 | r->id.idiag_src[0] = ireq->ir_loc_addr; | 744 | r->id.idiag_src[0] = ireq->ir_loc_addr; |
| 730 | r->id.idiag_dst[0] = ireq->ir_rmt_addr; | 745 | r->id.idiag_dst[0] = ireq->ir_rmt_addr; |
| 746 | |||
| 731 | r->idiag_expires = jiffies_to_msecs(tmo); | 747 | r->idiag_expires = jiffies_to_msecs(tmo); |
| 732 | r->idiag_rqueue = 0; | 748 | r->idiag_rqueue = 0; |
| 733 | r->idiag_wqueue = 0; | 749 | r->idiag_wqueue = 0; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d7aea4c5b940..e560ef34cf4b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) | |||
| 217 | iph->saddr, iph->daddr, tpi->key); | 217 | iph->saddr, iph->daddr, tpi->key); |
| 218 | 218 | ||
| 219 | if (tunnel) { | 219 | if (tunnel) { |
| 220 | skb_pop_mac_header(skb); | ||
| 220 | ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); | 221 | ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); |
| 221 | return PACKET_RCVD; | 222 | return PACKET_RCVD; |
| 222 | } | 223 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 912402752f2f..df184616493f 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk, | |||
| 828 | 828 | ||
| 829 | if (cork->length + length > maxnonfragsize - fragheaderlen) { | 829 | if (cork->length + length > maxnonfragsize - fragheaderlen) { |
| 830 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, | 830 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, |
| 831 | mtu-exthdrlen); | 831 | mtu - (opt ? opt->optlen : 0)); |
| 832 | return -EMSGSIZE; | 832 | return -EMSGSIZE; |
| 833 | } | 833 | } |
| 834 | 834 | ||
| @@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, | |||
| 1151 | mtu : 0xFFFF; | 1151 | mtu : 0xFFFF; |
| 1152 | 1152 | ||
| 1153 | if (cork->length + size > maxnonfragsize - fragheaderlen) { | 1153 | if (cork->length + size > maxnonfragsize - fragheaderlen) { |
| 1154 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu); | 1154 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, |
| 1155 | mtu - (opt ? opt->optlen : 0)); | ||
| 1155 | return -EMSGSIZE; | 1156 | return -EMSGSIZE; |
| 1156 | } | 1157 | } |
| 1157 | 1158 | ||
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index f13bd91d9a56..a313c3fbeb46 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
| @@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) | |||
| 423 | static struct xt_target synproxy_tg4_reg __read_mostly = { | 423 | static struct xt_target synproxy_tg4_reg __read_mostly = { |
| 424 | .name = "SYNPROXY", | 424 | .name = "SYNPROXY", |
| 425 | .family = NFPROTO_IPV4, | 425 | .family = NFPROTO_IPV4, |
| 426 | .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), | ||
| 426 | .target = synproxy_tg4, | 427 | .target = synproxy_tg4, |
| 427 | .targetsize = sizeof(struct xt_synproxy_info), | 428 | .targetsize = sizeof(struct xt_synproxy_info), |
| 428 | .checkentry = synproxy_tg4_check, | 429 | .checkentry = synproxy_tg4_check, |
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index fff5ba1a33b7..4a5e94ac314a 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c | |||
| @@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
| 72 | { | 72 | { |
| 73 | const struct nft_reject *priv = nft_expr_priv(expr); | 73 | const struct nft_reject *priv = nft_expr_priv(expr); |
| 74 | 74 | ||
| 75 | if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) | 75 | if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) |
| 76 | goto nla_put_failure; | 76 | goto nla_put_failure; |
| 77 | 77 | ||
| 78 | switch (priv->type) { | 78 | switch (priv->type) { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 62c19fdd102d..a7e4729e974b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1600,20 +1600,15 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
| 1600 | } | 1600 | } |
| 1601 | 1601 | ||
| 1602 | /* For TCP sockets, sk_rx_dst is protected by socket lock | 1602 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
| 1603 | * For UDP, we use sk_dst_lock to guard against concurrent changes. | 1603 | * For UDP, we use xchg() to guard against concurrent changes. |
| 1604 | */ | 1604 | */ |
| 1605 | static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | 1605 | static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) |
| 1606 | { | 1606 | { |
| 1607 | struct dst_entry *old; | 1607 | struct dst_entry *old; |
| 1608 | 1608 | ||
| 1609 | spin_lock(&sk->sk_dst_lock); | 1609 | dst_hold(dst); |
| 1610 | old = sk->sk_rx_dst; | 1610 | old = xchg(&sk->sk_rx_dst, dst); |
| 1611 | if (likely(old != dst)) { | 1611 | dst_release(old); |
| 1612 | dst_hold(dst); | ||
| 1613 | sk->sk_rx_dst = dst; | ||
| 1614 | dst_release(old); | ||
| 1615 | } | ||
| 1616 | spin_unlock(&sk->sk_dst_lock); | ||
| 1617 | } | 1612 | } |
| 1618 | 1613 | ||
| 1619 | /* | 1614 | /* |
| @@ -2483,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
| 2483 | netdev_features_t features) | 2478 | netdev_features_t features) |
| 2484 | { | 2479 | { |
| 2485 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 2480 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 2481 | u16 mac_offset = skb->mac_header; | ||
| 2486 | int mac_len = skb->mac_len; | 2482 | int mac_len = skb->mac_len; |
| 2487 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); | 2483 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); |
| 2488 | __be16 protocol = skb->protocol; | 2484 | __be16 protocol = skb->protocol; |
| @@ -2502,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
| 2502 | /* segment inner packet. */ | 2498 | /* segment inner packet. */ |
| 2503 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 2499 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
| 2504 | segs = skb_mac_gso_segment(skb, enc_features); | 2500 | segs = skb_mac_gso_segment(skb, enc_features); |
| 2505 | if (!segs || IS_ERR(segs)) | 2501 | if (!segs || IS_ERR(segs)) { |
| 2502 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, | ||
| 2503 | mac_len); | ||
| 2506 | goto out; | 2504 | goto out; |
| 2505 | } | ||
| 2507 | 2506 | ||
| 2508 | outer_hlen = skb_tnl_header_len(skb); | 2507 | outer_hlen = skb_tnl_header_len(skb); |
| 2509 | skb = segs; | 2508 | skb = segs; |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 83206de2bc76..79c62bdcd3c5 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
| @@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
| 41 | { | 41 | { |
| 42 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 42 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 43 | unsigned int mss; | 43 | unsigned int mss; |
| 44 | int offset; | ||
| 45 | __wsum csum; | ||
| 46 | |||
| 47 | if (skb->encapsulation && | ||
| 48 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { | ||
| 49 | segs = skb_udp_tunnel_segment(skb, features); | ||
| 50 | goto out; | ||
| 51 | } | ||
| 44 | 52 | ||
| 45 | mss = skb_shinfo(skb)->gso_size; | 53 | mss = skb_shinfo(skb)->gso_size; |
| 46 | if (unlikely(skb->len <= mss)) | 54 | if (unlikely(skb->len <= mss)) |
| @@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
| 63 | goto out; | 71 | goto out; |
| 64 | } | 72 | } |
| 65 | 73 | ||
| 74 | /* Do software UFO. Complete and fill in the UDP checksum as | ||
| 75 | * HW cannot do checksum of UDP packets sent as multiple | ||
| 76 | * IP fragments. | ||
| 77 | */ | ||
| 78 | offset = skb_checksum_start_offset(skb); | ||
| 79 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
| 80 | offset += skb->csum_offset; | ||
| 81 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
| 82 | skb->ip_summed = CHECKSUM_NONE; | ||
| 83 | |||
| 66 | /* Fragment the skb. IP headers of the fragments are updated in | 84 | /* Fragment the skb. IP headers of the fragments are updated in |
| 67 | * inet_gso_segment() | 85 | * inet_gso_segment() |
| 68 | */ | 86 | */ |
| 69 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) | 87 | segs = skb_segment(skb, features); |
| 70 | segs = skb_udp_tunnel_segment(skb, features); | ||
| 71 | else { | ||
| 72 | int offset; | ||
| 73 | __wsum csum; | ||
| 74 | |||
| 75 | /* Do software UFO. Complete and fill in the UDP checksum as | ||
| 76 | * HW cannot do checksum of UDP packets sent as multiple | ||
| 77 | * IP fragments. | ||
| 78 | */ | ||
| 79 | offset = skb_checksum_start_offset(skb); | ||
| 80 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
| 81 | offset += skb->csum_offset; | ||
| 82 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
| 83 | skb->ip_summed = CHECKSUM_NONE; | ||
| 84 | |||
| 85 | segs = skb_segment(skb, features); | ||
| 86 | } | ||
| 87 | out: | 88 | out: |
| 88 | return segs; | 89 | return segs; |
| 89 | } | 90 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d5fa5b8c443e..abe46a4228ce 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
| 1671 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | 1671 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
| 1672 | { | 1672 | { |
| 1673 | struct in6_addr addr; | 1673 | struct in6_addr addr; |
| 1674 | if (ifp->prefix_len == 127) /* RFC 6164 */ | 1674 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
| 1675 | return; | 1675 | return; |
| 1676 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1676 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
| 1677 | if (ipv6_addr_any(&addr)) | 1677 | if (ipv6_addr_any(&addr)) |
| @@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | |||
| 1682 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) | 1682 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
| 1683 | { | 1683 | { |
| 1684 | struct in6_addr addr; | 1684 | struct in6_addr addr; |
| 1685 | if (ifp->prefix_len == 127) /* RFC 6164 */ | 1685 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
| 1686 | return; | 1686 | return; |
| 1687 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1687 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
| 1688 | if (ipv6_addr_any(&addr)) | 1688 | if (ipv6_addr_any(&addr)) |
| @@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
| 2509 | struct inet6_ifaddr *ifp; | 2509 | struct inet6_ifaddr *ifp; |
| 2510 | 2510 | ||
| 2511 | ifp = ipv6_add_addr(idev, addr, NULL, plen, | 2511 | ifp = ipv6_add_addr(idev, addr, NULL, plen, |
| 2512 | scope, IFA_F_PERMANENT, 0, 0); | 2512 | scope, IFA_F_PERMANENT, |
| 2513 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
| 2513 | if (!IS_ERR(ifp)) { | 2514 | if (!IS_ERR(ifp)) { |
| 2514 | spin_lock_bh(&ifp->lock); | 2515 | spin_lock_bh(&ifp->lock); |
| 2515 | ifp->flags &= ~IFA_F_TENTATIVE; | 2516 | ifp->flags &= ~IFA_F_TENTATIVE; |
| @@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr | |||
| 2637 | #endif | 2638 | #endif |
| 2638 | 2639 | ||
| 2639 | 2640 | ||
| 2640 | ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); | 2641 | ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, |
| 2642 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
| 2641 | if (!IS_ERR(ifp)) { | 2643 | if (!IS_ERR(ifp)) { |
| 2642 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); | 2644 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); |
| 2643 | addrconf_dad_start(ifp); | 2645 | addrconf_dad_start(ifp); |
| @@ -3456,7 +3458,12 @@ restart: | |||
| 3456 | &inet6_addr_lst[i], addr_lst) { | 3458 | &inet6_addr_lst[i], addr_lst) { |
| 3457 | unsigned long age; | 3459 | unsigned long age; |
| 3458 | 3460 | ||
| 3459 | if (ifp->flags & IFA_F_PERMANENT) | 3461 | /* When setting preferred_lft to a value not zero or |
| 3462 | * infinity, while valid_lft is infinity | ||
| 3463 | * IFA_F_PERMANENT has a non-infinity life time. | ||
| 3464 | */ | ||
| 3465 | if ((ifp->flags & IFA_F_PERMANENT) && | ||
| 3466 | (ifp->prefered_lft == INFINITY_LIFE_TIME)) | ||
| 3460 | continue; | 3467 | continue; |
| 3461 | 3468 | ||
| 3462 | spin_lock(&ifp->lock); | 3469 | spin_lock(&ifp->lock); |
| @@ -3481,7 +3488,8 @@ restart: | |||
| 3481 | ifp->flags |= IFA_F_DEPRECATED; | 3488 | ifp->flags |= IFA_F_DEPRECATED; |
| 3482 | } | 3489 | } |
| 3483 | 3490 | ||
| 3484 | if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)) | 3491 | if ((ifp->valid_lft != INFINITY_LIFE_TIME) && |
| 3492 | (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))) | ||
| 3485 | next = ifp->tstamp + ifp->valid_lft * HZ; | 3493 | next = ifp->tstamp + ifp->valid_lft * HZ; |
| 3486 | 3494 | ||
| 3487 | spin_unlock(&ifp->lock); | 3495 | spin_unlock(&ifp->lock); |
| @@ -3761,7 +3769,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, | |||
| 3761 | put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), | 3769 | put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), |
| 3762 | ifa->idev->dev->ifindex); | 3770 | ifa->idev->dev->ifindex); |
| 3763 | 3771 | ||
| 3764 | if (!(ifa->flags&IFA_F_PERMANENT)) { | 3772 | if (!((ifa->flags&IFA_F_PERMANENT) && |
| 3773 | (ifa->prefered_lft == INFINITY_LIFE_TIME))) { | ||
| 3765 | preferred = ifa->prefered_lft; | 3774 | preferred = ifa->prefered_lft; |
| 3766 | valid = ifa->valid_lft; | 3775 | valid = ifa->valid_lft; |
| 3767 | if (preferred != INFINITY_LIFE_TIME) { | 3776 | if (preferred != INFINITY_LIFE_TIME) { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4acdb63495db..e6f931997996 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
| 1193 | 1193 | ||
| 1194 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + | 1194 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + |
| 1195 | (opt ? opt->opt_nflen : 0); | 1195 | (opt ? opt->opt_nflen : 0); |
| 1196 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); | 1196 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - |
| 1197 | sizeof(struct frag_hdr); | ||
| 1197 | 1198 | ||
| 1198 | if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { | 1199 | if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { |
| 1199 | if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { | 1200 | unsigned int maxnonfragsize, headersize; |
| 1200 | ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); | 1201 | |
| 1202 | headersize = sizeof(struct ipv6hdr) + | ||
| 1203 | (opt ? opt->tot_len : 0) + | ||
| 1204 | (dst_allfrag(&rt->dst) ? | ||
| 1205 | sizeof(struct frag_hdr) : 0) + | ||
| 1206 | rt->rt6i_nfheader_len; | ||
| 1207 | |||
| 1208 | maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ? | ||
| 1209 | mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN; | ||
| 1210 | |||
| 1211 | /* dontfrag active */ | ||
| 1212 | if ((cork->length + length > mtu - headersize) && dontfrag && | ||
| 1213 | (sk->sk_protocol == IPPROTO_UDP || | ||
| 1214 | sk->sk_protocol == IPPROTO_RAW)) { | ||
| 1215 | ipv6_local_rxpmtu(sk, fl6, mtu - headersize + | ||
| 1216 | sizeof(struct ipv6hdr)); | ||
| 1217 | goto emsgsize; | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | if (cork->length + length > maxnonfragsize - headersize) { | ||
| 1221 | emsgsize: | ||
| 1222 | ipv6_local_error(sk, EMSGSIZE, fl6, | ||
| 1223 | mtu - headersize + | ||
| 1224 | sizeof(struct ipv6hdr)); | ||
| 1201 | return -EMSGSIZE; | 1225 | return -EMSGSIZE; |
| 1202 | } | 1226 | } |
| 1203 | } | 1227 | } |
| @@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
| 1222 | * --yoshfuji | 1246 | * --yoshfuji |
| 1223 | */ | 1247 | */ |
| 1224 | 1248 | ||
| 1225 | if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP || | ||
| 1226 | sk->sk_protocol == IPPROTO_RAW)) { | ||
| 1227 | ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); | ||
| 1228 | return -EMSGSIZE; | ||
| 1229 | } | ||
| 1230 | |||
| 1231 | skb = skb_peek_tail(&sk->sk_write_queue); | 1249 | skb = skb_peek_tail(&sk->sk_write_queue); |
| 1232 | cork->length += length; | 1250 | cork->length += length; |
| 1233 | if (((length > mtu) || | 1251 | if (((length > mtu) || |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index d6062325db08..7881965a8248 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -103,16 +103,25 @@ struct ip6_tnl_net { | |||
| 103 | 103 | ||
| 104 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) | 104 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) |
| 105 | { | 105 | { |
| 106 | struct pcpu_tstats sum = { 0 }; | 106 | struct pcpu_tstats tmp, sum = { 0 }; |
| 107 | int i; | 107 | int i; |
| 108 | 108 | ||
| 109 | for_each_possible_cpu(i) { | 109 | for_each_possible_cpu(i) { |
| 110 | unsigned int start; | ||
| 110 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | 111 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); |
| 111 | 112 | ||
| 112 | sum.rx_packets += tstats->rx_packets; | 113 | do { |
| 113 | sum.rx_bytes += tstats->rx_bytes; | 114 | start = u64_stats_fetch_begin_bh(&tstats->syncp); |
| 114 | sum.tx_packets += tstats->tx_packets; | 115 | tmp.rx_packets = tstats->rx_packets; |
| 115 | sum.tx_bytes += tstats->tx_bytes; | 116 | tmp.rx_bytes = tstats->rx_bytes; |
| 117 | tmp.tx_packets = tstats->tx_packets; | ||
| 118 | tmp.tx_bytes = tstats->tx_bytes; | ||
| 119 | } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); | ||
| 120 | |||
| 121 | sum.rx_packets += tmp.rx_packets; | ||
| 122 | sum.rx_bytes += tmp.rx_bytes; | ||
| 123 | sum.tx_packets += tmp.tx_packets; | ||
| 124 | sum.tx_bytes += tmp.tx_bytes; | ||
| 116 | } | 125 | } |
| 117 | dev->stats.rx_packets = sum.rx_packets; | 126 | dev->stats.rx_packets = sum.rx_packets; |
| 118 | dev->stats.rx_bytes = sum.rx_bytes; | 127 | dev->stats.rx_bytes = sum.rx_bytes; |
| @@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
| 824 | } | 833 | } |
| 825 | 834 | ||
| 826 | tstats = this_cpu_ptr(t->dev->tstats); | 835 | tstats = this_cpu_ptr(t->dev->tstats); |
| 836 | u64_stats_update_begin(&tstats->syncp); | ||
| 827 | tstats->rx_packets++; | 837 | tstats->rx_packets++; |
| 828 | tstats->rx_bytes += skb->len; | 838 | tstats->rx_bytes += skb->len; |
| 839 | u64_stats_update_end(&tstats->syncp); | ||
| 829 | 840 | ||
| 830 | netif_rx(skb); | 841 | netif_rx(skb); |
| 831 | 842 | ||
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed94ba61dda0..7b42d5ef868d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
| @@ -75,26 +75,6 @@ struct vti6_net { | |||
| 75 | struct ip6_tnl __rcu **tnls[2]; | 75 | struct ip6_tnl __rcu **tnls[2]; |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | static struct net_device_stats *vti6_get_stats(struct net_device *dev) | ||
| 79 | { | ||
| 80 | struct pcpu_tstats sum = { 0 }; | ||
| 81 | int i; | ||
| 82 | |||
| 83 | for_each_possible_cpu(i) { | ||
| 84 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | ||
| 85 | |||
| 86 | sum.rx_packets += tstats->rx_packets; | ||
| 87 | sum.rx_bytes += tstats->rx_bytes; | ||
| 88 | sum.tx_packets += tstats->tx_packets; | ||
| 89 | sum.tx_bytes += tstats->tx_bytes; | ||
| 90 | } | ||
| 91 | dev->stats.rx_packets = sum.rx_packets; | ||
| 92 | dev->stats.rx_bytes = sum.rx_bytes; | ||
| 93 | dev->stats.tx_packets = sum.tx_packets; | ||
| 94 | dev->stats.tx_bytes = sum.tx_bytes; | ||
| 95 | return &dev->stats; | ||
| 96 | } | ||
| 97 | |||
| 98 | #define for_each_vti6_tunnel_rcu(start) \ | 78 | #define for_each_vti6_tunnel_rcu(start) \ |
| 99 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | 79 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) |
| 100 | 80 | ||
| @@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb) | |||
| 331 | } | 311 | } |
| 332 | 312 | ||
| 333 | tstats = this_cpu_ptr(t->dev->tstats); | 313 | tstats = this_cpu_ptr(t->dev->tstats); |
| 314 | u64_stats_update_begin(&tstats->syncp); | ||
| 334 | tstats->rx_packets++; | 315 | tstats->rx_packets++; |
| 335 | tstats->rx_bytes += skb->len; | 316 | tstats->rx_bytes += skb->len; |
| 317 | u64_stats_update_end(&tstats->syncp); | ||
| 336 | 318 | ||
| 337 | skb->mark = 0; | 319 | skb->mark = 0; |
| 338 | secpath_reset(skb); | 320 | secpath_reset(skb); |
| @@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = { | |||
| 716 | .ndo_start_xmit = vti6_tnl_xmit, | 698 | .ndo_start_xmit = vti6_tnl_xmit, |
| 717 | .ndo_do_ioctl = vti6_ioctl, | 699 | .ndo_do_ioctl = vti6_ioctl, |
| 718 | .ndo_change_mtu = vti6_change_mtu, | 700 | .ndo_change_mtu = vti6_change_mtu, |
| 719 | .ndo_get_stats = vti6_get_stats, | 701 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
| 720 | }; | 702 | }; |
| 721 | 703 | ||
| 722 | /** | 704 | /** |
| @@ -750,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev) | |||
| 750 | static inline int vti6_dev_init_gen(struct net_device *dev) | 732 | static inline int vti6_dev_init_gen(struct net_device *dev) |
| 751 | { | 733 | { |
| 752 | struct ip6_tnl *t = netdev_priv(dev); | 734 | struct ip6_tnl *t = netdev_priv(dev); |
| 735 | int i; | ||
| 753 | 736 | ||
| 754 | t->dev = dev; | 737 | t->dev = dev; |
| 755 | t->net = dev_net(dev); | 738 | t->net = dev_net(dev); |
| 756 | dev->tstats = alloc_percpu(struct pcpu_tstats); | 739 | dev->tstats = alloc_percpu(struct pcpu_tstats); |
| 757 | if (!dev->tstats) | 740 | if (!dev->tstats) |
| 758 | return -ENOMEM; | 741 | return -ENOMEM; |
| 742 | for_each_possible_cpu(i) { | ||
| 743 | struct pcpu_tstats *stats; | ||
| 744 | stats = per_cpu_ptr(dev->tstats, i); | ||
| 745 | u64_stats_init(&stats->syncp); | ||
| 746 | } | ||
| 759 | return 0; | 747 | return 0; |
| 760 | } | 748 | } |
| 761 | 749 | ||
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index f78f41aca8e9..a0d17270117c 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
| @@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) | |||
| 446 | static struct xt_target synproxy_tg6_reg __read_mostly = { | 446 | static struct xt_target synproxy_tg6_reg __read_mostly = { |
| 447 | .name = "SYNPROXY", | 447 | .name = "SYNPROXY", |
| 448 | .family = NFPROTO_IPV6, | 448 | .family = NFPROTO_IPV6, |
| 449 | .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), | ||
| 449 | .target = synproxy_tg6, | 450 | .target = synproxy_tg6, |
| 450 | .targetsize = sizeof(struct xt_synproxy_info), | 451 | .targetsize = sizeof(struct xt_synproxy_info), |
| 451 | .checkentry = synproxy_tg6_check, | 452 | .checkentry = synproxy_tg6_check, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a0a48ac3403f..4b4944c3e4c4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1905,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, | |||
| 1905 | else | 1905 | else |
| 1906 | rt->rt6i_gateway = *dest; | 1906 | rt->rt6i_gateway = *dest; |
| 1907 | rt->rt6i_flags = ort->rt6i_flags; | 1907 | rt->rt6i_flags = ort->rt6i_flags; |
| 1908 | if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == | 1908 | rt6_set_from(rt, ort); |
| 1909 | (RTF_DEFAULT | RTF_ADDRCONF)) | ||
| 1910 | rt6_set_from(rt, ort); | ||
| 1911 | rt->rt6i_metric = 0; | 1909 | rt->rt6i_metric = 0; |
| 1912 | 1910 | ||
| 1913 | #ifdef CONFIG_IPV6_SUBTREES | 1911 | #ifdef CONFIG_IPV6_SUBTREES |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 366fbba3359a..d3005b34476a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
| 702 | } | 702 | } |
| 703 | 703 | ||
| 704 | tstats = this_cpu_ptr(tunnel->dev->tstats); | 704 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
| 705 | u64_stats_update_begin(&tstats->syncp); | ||
| 705 | tstats->rx_packets++; | 706 | tstats->rx_packets++; |
| 706 | tstats->rx_bytes += skb->len; | 707 | tstats->rx_bytes += skb->len; |
| 708 | u64_stats_update_end(&tstats->syncp); | ||
| 707 | 709 | ||
| 708 | netif_rx(skb); | 710 | netif_rx(skb); |
| 709 | 711 | ||
| @@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
| 924 | if (tunnel->parms.iph.daddr && skb_dst(skb)) | 926 | if (tunnel->parms.iph.daddr && skb_dst(skb)) |
| 925 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | 927 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
| 926 | 928 | ||
| 927 | if (skb->len > mtu) { | 929 | if (skb->len > mtu && !skb_is_gso(skb)) { |
| 928 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 930 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
| 929 | ip_rt_put(rt); | 931 | ip_rt_put(rt); |
| 930 | goto tx_error; | 932 | goto tx_error; |
| @@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
| 966 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 968 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
| 967 | 969 | ||
| 968 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); | 970 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); |
| 969 | if (IS_ERR(skb)) | 971 | if (IS_ERR(skb)) { |
| 972 | ip_rt_put(rt); | ||
| 970 | goto out; | 973 | goto out; |
| 974 | } | ||
| 971 | 975 | ||
| 972 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, | 976 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, |
| 973 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 977 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7b01b9f5846c..c71b699eb555 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
| @@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 715 | unsigned long cpu_flags; | 715 | unsigned long cpu_flags; |
| 716 | size_t copied = 0; | 716 | size_t copied = 0; |
| 717 | u32 peek_seq = 0; | 717 | u32 peek_seq = 0; |
| 718 | u32 *seq; | 718 | u32 *seq, skb_len; |
| 719 | unsigned long used; | 719 | unsigned long used; |
| 720 | int target; /* Read at least this many bytes */ | 720 | int target; /* Read at least this many bytes */ |
| 721 | long timeo; | 721 | long timeo; |
| @@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 812 | } | 812 | } |
| 813 | continue; | 813 | continue; |
| 814 | found_ok_skb: | 814 | found_ok_skb: |
| 815 | skb_len = skb->len; | ||
| 815 | /* Ok so how much can we use? */ | 816 | /* Ok so how much can we use? */ |
| 816 | used = skb->len - offset; | 817 | used = skb->len - offset; |
| 817 | if (len < used) | 818 | if (len < used) |
| @@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 844 | } | 845 | } |
| 845 | 846 | ||
| 846 | /* Partial read */ | 847 | /* Partial read */ |
| 847 | if (used + offset < skb->len) | 848 | if (used + offset < skb_len) |
| 848 | continue; | 849 | continue; |
| 849 | } while (len > 0); | 850 | } while (len > 0); |
| 850 | 851 | ||
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 36c3a4cbcabf..a0757913046e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev) | |||
| 1061 | } | 1061 | } |
| 1062 | 1062 | ||
| 1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, | 1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, |
| 1064 | struct sk_buff *skb) | 1064 | struct sk_buff *skb, |
| 1065 | void *accel_priv) | ||
| 1065 | { | 1066 | { |
| 1066 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | 1067 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); |
| 1067 | } | 1068 | } |
| @@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | |||
| 1078 | }; | 1079 | }; |
| 1079 | 1080 | ||
| 1080 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, | 1081 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, |
| 1081 | struct sk_buff *skb) | 1082 | struct sk_buff *skb, |
| 1083 | void *accel_priv) | ||
| 1082 | { | 1084 | { |
| 1083 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1085 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
| 1084 | struct ieee80211_local *local = sdata->local; | 1086 | struct ieee80211_local *local = sdata->local; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c558b246ef00..ca7fa7f0613d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
| 463 | { | 463 | { |
| 464 | struct sta_info *sta = tx->sta; | 464 | struct sta_info *sta = tx->sta; |
| 465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
| 466 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
| 467 | struct ieee80211_local *local = tx->local; | 466 | struct ieee80211_local *local = tx->local; |
| 468 | 467 | ||
| 469 | if (unlikely(!sta)) | 468 | if (unlikely(!sta)) |
| @@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
| 474 | !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { | 473 | !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { |
| 475 | int ac = skb_get_queue_mapping(tx->skb); | 474 | int ac = skb_get_queue_mapping(tx->skb); |
| 476 | 475 | ||
| 477 | /* only deauth, disassoc and action are bufferable MMPDUs */ | ||
| 478 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
| 479 | !ieee80211_is_deauth(hdr->frame_control) && | ||
| 480 | !ieee80211_is_disassoc(hdr->frame_control) && | ||
| 481 | !ieee80211_is_action(hdr->frame_control)) { | ||
| 482 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | ||
| 483 | return TX_CONTINUE; | ||
| 484 | } | ||
| 485 | |||
| 486 | ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", | 476 | ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", |
| 487 | sta->sta.addr, sta->sta.aid, ac); | 477 | sta->sta.addr, sta->sta.aid, ac); |
| 488 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 478 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
| @@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
| 525 | static ieee80211_tx_result debug_noinline | 515 | static ieee80211_tx_result debug_noinline |
| 526 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | 516 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) |
| 527 | { | 517 | { |
| 518 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
| 519 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
| 520 | |||
| 528 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) | 521 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) |
| 529 | return TX_CONTINUE; | 522 | return TX_CONTINUE; |
| 530 | 523 | ||
| 524 | /* only deauth, disassoc and action are bufferable MMPDUs */ | ||
| 525 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
| 526 | !ieee80211_is_deauth(hdr->frame_control) && | ||
| 527 | !ieee80211_is_disassoc(hdr->frame_control) && | ||
| 528 | !ieee80211_is_action(hdr->frame_control)) { | ||
| 529 | if (tx->flags & IEEE80211_TX_UNICAST) | ||
| 530 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | ||
| 531 | return TX_CONTINUE; | ||
| 532 | } | ||
| 533 | |||
| 531 | if (tx->flags & IEEE80211_TX_UNICAST) | 534 | if (tx->flags & IEEE80211_TX_UNICAST) |
| 532 | return ieee80211_tx_h_unicast_ps_buf(tx); | 535 | return ieee80211_tx_h_unicast_ps_buf(tx); |
| 533 | else | 536 | else |
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index c8beafd401aa..5a355a46d1dc 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #include <net/ip_vs.h> | 63 | #include <net/ip_vs.h> |
| 64 | #include <net/netfilter/nf_conntrack_core.h> | 64 | #include <net/netfilter/nf_conntrack_core.h> |
| 65 | #include <net/netfilter/nf_conntrack_expect.h> | 65 | #include <net/netfilter/nf_conntrack_expect.h> |
| 66 | #include <net/netfilter/nf_conntrack_seqadj.h> | ||
| 66 | #include <net/netfilter/nf_conntrack_helper.h> | 67 | #include <net/netfilter/nf_conntrack_helper.h> |
| 67 | #include <net/netfilter/nf_conntrack_zones.h> | 68 | #include <net/netfilter/nf_conntrack_zones.h> |
| 68 | 69 | ||
| @@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) | |||
| 97 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | 98 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) |
| 98 | return; | 99 | return; |
| 99 | 100 | ||
| 101 | /* Applications may adjust TCP seqs */ | ||
| 102 | if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && | ||
| 103 | !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) | ||
| 104 | return; | ||
| 105 | |||
| 100 | /* | 106 | /* |
| 101 | * The connection is not yet in the hashtable, so we update it. | 107 | * The connection is not yet in the hashtable, so we update it. |
| 102 | * CIP->VIP will remain the same, so leave the tuple in | 108 | * CIP->VIP will remain the same, so leave the tuple in |
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index 17c1bcb182c6..f6e2ae91a80b 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c | |||
| @@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, | |||
| 36 | if (off == 0) | 36 | if (off == 0) |
| 37 | return 0; | 37 | return 0; |
| 38 | 38 | ||
| 39 | if (unlikely(!seqadj)) { | ||
| 40 | WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n"); | ||
| 41 | return 0; | ||
| 42 | } | ||
| 43 | |||
| 39 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); | 44 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); |
| 40 | 45 | ||
| 41 | spin_lock_bh(&ct->lock); | 46 | spin_lock_bh(&ct->lock); |
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index 902fb0a6b38a..7a394df0deb7 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c | |||
| @@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net) | |||
| 97 | void nf_conntrack_tstamp_pernet_fini(struct net *net) | 97 | void nf_conntrack_tstamp_pernet_fini(struct net *net) |
| 98 | { | 98 | { |
| 99 | nf_conntrack_tstamp_fini_sysctl(net); | 99 | nf_conntrack_tstamp_fini_sysctl(net); |
| 100 | nf_ct_extend_unregister(&tstamp_extend); | ||
| 101 | } | 100 | } |
| 102 | 101 | ||
| 103 | int nf_conntrack_tstamp_init(void) | 102 | int nf_conntrack_tstamp_init(void) |
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index f02b3605823e..1fb2258c3535 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c | |||
| @@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb, | |||
| 34 | struct nf_conntrack_expect *exp) | 34 | struct nf_conntrack_expect *exp) |
| 35 | { | 35 | { |
| 36 | char buffer[sizeof("4294967296 65635")]; | 36 | char buffer[sizeof("4294967296 65635")]; |
| 37 | struct nf_conn *ct = exp->master; | ||
| 38 | union nf_inet_addr newaddr; | ||
| 37 | u_int16_t port; | 39 | u_int16_t port; |
| 38 | unsigned int ret; | 40 | unsigned int ret; |
| 39 | 41 | ||
| 40 | /* Reply comes from server. */ | 42 | /* Reply comes from server. */ |
| 43 | newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; | ||
| 44 | |||
| 41 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 45 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
| 42 | exp->dir = IP_CT_DIR_REPLY; | 46 | exp->dir = IP_CT_DIR_REPLY; |
| 43 | exp->expectfn = nf_nat_follow_master; | 47 | exp->expectfn = nf_nat_follow_master; |
| @@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb, | |||
| 57 | } | 61 | } |
| 58 | 62 | ||
| 59 | if (port == 0) { | 63 | if (port == 0) { |
| 60 | nf_ct_helper_log(skb, exp->master, "all ports in use"); | 64 | nf_ct_helper_log(skb, ct, "all ports in use"); |
| 61 | return NF_DROP; | 65 | return NF_DROP; |
| 62 | } | 66 | } |
| 63 | 67 | ||
| 64 | ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, | 68 | /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 |
| 65 | protoff, matchoff, matchlen, buffer, | 69 | * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 |
| 66 | strlen(buffer)); | 70 | * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 |
| 71 | * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 | ||
| 72 | * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 | ||
| 73 | * | ||
| 74 | * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, | ||
| 75 | * 255.255.255.255==4294967296, 10 digits) | ||
| 76 | * P: bound port (min 1 d, max 5d (65635)) | ||
| 77 | * F: filename (min 1 d ) | ||
| 78 | * S: size (min 1 d ) | ||
| 79 | * 0x01, \n: terminators | ||
| 80 | */ | ||
| 81 | /* AAA = "us", ie. where server normally talks to. */ | ||
| 82 | snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); | ||
| 83 | pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", | ||
| 84 | buffer, &newaddr.ip, port); | ||
| 85 | |||
| 86 | ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, | ||
| 87 | matchlen, buffer, strlen(buffer)); | ||
| 67 | if (ret != NF_ACCEPT) { | 88 | if (ret != NF_ACCEPT) { |
| 68 | nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); | 89 | nf_ct_helper_log(skb, ct, "cannot mangle packet"); |
| 69 | nf_ct_unexpect_related(exp); | 90 | nf_ct_unexpect_related(exp); |
| 70 | } | 91 | } |
| 92 | |||
| 71 | return ret; | 93 | return ret; |
| 72 | } | 94 | } |
| 73 | 95 | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f93b7d06f4be..71a9f49a768b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table) | |||
| 312 | int err, i = 0; | 312 | int err, i = 0; |
| 313 | 313 | ||
| 314 | list_for_each_entry(chain, &table->chains, list) { | 314 | list_for_each_entry(chain, &table->chains, list) { |
| 315 | if (!(chain->flags & NFT_BASE_CHAIN)) | ||
| 316 | continue; | ||
| 317 | |||
| 315 | err = nf_register_hook(&nft_base_chain(chain)->ops); | 318 | err = nf_register_hook(&nft_base_chain(chain)->ops); |
| 316 | if (err < 0) | 319 | if (err < 0) |
| 317 | goto err; | 320 | goto err; |
| @@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table) | |||
| 321 | return 0; | 324 | return 0; |
| 322 | err: | 325 | err: |
| 323 | list_for_each_entry(chain, &table->chains, list) { | 326 | list_for_each_entry(chain, &table->chains, list) { |
| 327 | if (!(chain->flags & NFT_BASE_CHAIN)) | ||
| 328 | continue; | ||
| 329 | |||
| 324 | if (i-- <= 0) | 330 | if (i-- <= 0) |
| 325 | break; | 331 | break; |
| 326 | 332 | ||
| @@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table) | |||
| 333 | { | 339 | { |
| 334 | struct nft_chain *chain; | 340 | struct nft_chain *chain; |
| 335 | 341 | ||
| 336 | list_for_each_entry(chain, &table->chains, list) | 342 | list_for_each_entry(chain, &table->chains, list) { |
| 337 | nf_unregister_hook(&nft_base_chain(chain)->ops); | 343 | if (chain->flags & NFT_BASE_CHAIN) |
| 344 | nf_unregister_hook(&nft_base_chain(chain)->ops); | ||
| 345 | } | ||
| 338 | 346 | ||
| 339 | return 0; | 347 | return 0; |
| 340 | } | 348 | } |
| @@ -2098,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, | |||
| 2098 | struct netlink_callback *cb) | 2106 | struct netlink_callback *cb) |
| 2099 | { | 2107 | { |
| 2100 | const struct nft_set *set; | 2108 | const struct nft_set *set; |
| 2101 | unsigned int idx = 0, s_idx = cb->args[0]; | 2109 | unsigned int idx, s_idx = cb->args[0]; |
| 2102 | struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; | 2110 | struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; |
| 2103 | 2111 | ||
| 2104 | if (cb->args[1]) | 2112 | if (cb->args[1]) |
| 2105 | return skb->len; | 2113 | return skb->len; |
| 2106 | 2114 | ||
| 2107 | list_for_each_entry(table, &ctx->afi->tables, list) { | 2115 | list_for_each_entry(table, &ctx->afi->tables, list) { |
| 2108 | if (cur_table && cur_table != table) | 2116 | if (cur_table) { |
| 2109 | continue; | 2117 | if (cur_table != table) |
| 2118 | continue; | ||
| 2110 | 2119 | ||
| 2120 | cur_table = NULL; | ||
| 2121 | } | ||
| 2111 | ctx->table = table; | 2122 | ctx->table = table; |
| 2123 | idx = 0; | ||
| 2112 | list_for_each_entry(set, &ctx->table->sets, list) { | 2124 | list_for_each_entry(set, &ctx->table->sets, list) { |
| 2113 | if (idx < s_idx) | 2125 | if (idx < s_idx) |
| 2114 | goto cont; | 2126 | goto cont; |
| @@ -2370,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, | |||
| 2370 | enum nft_registers dreg; | 2382 | enum nft_registers dreg; |
| 2371 | 2383 | ||
| 2372 | dreg = nft_type_to_reg(set->dtype); | 2384 | dreg = nft_type_to_reg(set->dtype); |
| 2373 | return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype); | 2385 | return nft_validate_data_load(ctx, dreg, &elem->data, |
| 2386 | set->dtype == NFT_DATA_VERDICT ? | ||
| 2387 | NFT_DATA_VERDICT : NFT_DATA_VALUE); | ||
| 2374 | } | 2388 | } |
| 2375 | 2389 | ||
| 2376 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | 2390 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 3c4b69e5fe17..a155d19a225e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
| @@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net) | |||
| 1053 | #ifdef CONFIG_PROC_FS | 1053 | #ifdef CONFIG_PROC_FS |
| 1054 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); | 1054 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); |
| 1055 | #endif | 1055 | #endif |
| 1056 | nf_log_unset(net, &nfulnl_logger); | ||
| 1056 | } | 1057 | } |
| 1057 | 1058 | ||
| 1058 | static struct pernet_operations nfnl_log_net_ops = { | 1059 | static struct pernet_operations nfnl_log_net_ops = { |
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 8e0bb75e7c51..55c939f5371f 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
| @@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr, | |||
| 31 | { | 31 | { |
| 32 | struct nft_exthdr *priv = nft_expr_priv(expr); | 32 | struct nft_exthdr *priv = nft_expr_priv(expr); |
| 33 | struct nft_data *dest = &data[priv->dreg]; | 33 | struct nft_data *dest = &data[priv->dreg]; |
| 34 | unsigned int offset; | 34 | unsigned int offset = 0; |
| 35 | int err; | 35 | int err; |
| 36 | 36 | ||
| 37 | err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); | 37 | err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); |
diff --git a/net/nfc/core.c b/net/nfc/core.c index 872529105abc..83b9927e7d19 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c | |||
| @@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, | |||
| 384 | { | 384 | { |
| 385 | dev->dep_link_up = true; | 385 | dev->dep_link_up = true; |
| 386 | 386 | ||
| 387 | if (!dev->active_target) { | 387 | if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { |
| 388 | struct nfc_target *target; | 388 | struct nfc_target *target; |
| 389 | 389 | ||
| 390 | target = nfc_find_target(dev, target_idx); | 390 | target = nfc_find_target(dev, target_idx); |
diff --git a/net/rds/ib.c b/net/rds/ib.c index b4c8b0022fee..ba2dffeff608 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
| @@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr) | |||
| 338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); | 338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
| 339 | /* due to this, we will claim to support iWARP devices unless we | 339 | /* due to this, we will claim to support iWARP devices unless we |
| 340 | check node_type. */ | 340 | check node_type. */ |
| 341 | if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) | 341 | if (ret || !cm_id->device || |
| 342 | cm_id->device->node_type != RDMA_NODE_IB_CA) | ||
| 342 | ret = -EADDRNOTAVAIL; | 343 | ret = -EADDRNOTAVAIL; |
| 343 | 344 | ||
| 344 | rdsdebug("addr %pI4 ret %d node type %d\n", | 345 | rdsdebug("addr %pI4 ret %d node type %d\n", |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 33af77246bfe..62ced6516c58 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1253 | 1253 | ||
| 1254 | if (msg->msg_name) { | 1254 | if (msg->msg_name) { |
| 1255 | struct sockaddr_rose *srose; | 1255 | struct sockaddr_rose *srose; |
| 1256 | struct full_sockaddr_rose *full_srose = msg->msg_name; | ||
| 1256 | 1257 | ||
| 1257 | memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); | 1258 | memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); |
| 1258 | srose = msg->msg_name; | 1259 | srose = msg->msg_name; |
| @@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1260 | srose->srose_addr = rose->dest_addr; | 1261 | srose->srose_addr = rose->dest_addr; |
| 1261 | srose->srose_call = rose->dest_call; | 1262 | srose->srose_call = rose->dest_call; |
| 1262 | srose->srose_ndigis = rose->dest_ndigis; | 1263 | srose->srose_ndigis = rose->dest_ndigis; |
| 1263 | if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { | 1264 | for (n = 0 ; n < rose->dest_ndigis ; n++) |
| 1264 | struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; | 1265 | full_srose->srose_digis[n] = rose->dest_digis[n]; |
| 1265 | for (n = 0 ; n < rose->dest_ndigis ; n++) | 1266 | msg->msg_namelen = sizeof(struct full_sockaddr_rose); |
| 1266 | full_srose->srose_digis[n] = rose->dest_digis[n]; | ||
| 1267 | msg->msg_namelen = sizeof(struct full_sockaddr_rose); | ||
| 1268 | } else { | ||
| 1269 | if (rose->dest_ndigis >= 1) { | ||
| 1270 | srose->srose_ndigis = 1; | ||
| 1271 | srose->srose_digi = rose->dest_digis[0]; | ||
| 1272 | } | ||
| 1273 | msg->msg_namelen = sizeof(struct sockaddr_rose); | ||
| 1274 | } | ||
| 1275 | } | 1267 | } |
| 1276 | 1268 | ||
| 1277 | skb_free_datagram(sk, skb); | 1269 | skb_free_datagram(sk, skb); |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 5c5edf56adbd..11fe1a416433 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
| @@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est, | |||
| 77 | &csum_idx_gen, &csum_hash_info); | 77 | &csum_idx_gen, &csum_hash_info); |
| 78 | if (IS_ERR(pc)) | 78 | if (IS_ERR(pc)) |
| 79 | return PTR_ERR(pc); | 79 | return PTR_ERR(pc); |
| 80 | p = to_tcf_csum(pc); | ||
| 81 | ret = ACT_P_CREATED; | 80 | ret = ACT_P_CREATED; |
| 82 | } else { | 81 | } else { |
| 83 | p = to_tcf_csum(pc); | 82 | if (bind)/* dont override defaults */ |
| 84 | if (!ovr) { | 83 | return 0; |
| 85 | tcf_hash_release(pc, bind, &csum_hash_info); | 84 | tcf_hash_release(pc, bind, &csum_hash_info); |
| 85 | if (!ovr) | ||
| 86 | return -EEXIST; | 86 | return -EEXIST; |
| 87 | } | ||
| 88 | } | 87 | } |
| 89 | 88 | ||
| 89 | p = to_tcf_csum(pc); | ||
| 90 | spin_lock_bh(&p->tcf_lock); | 90 | spin_lock_bh(&p->tcf_lock); |
| 91 | p->tcf_action = parm->action; | 91 | p->tcf_action = parm->action; |
| 92 | p->update_flags = parm->update_flags; | 92 | p->update_flags = parm->update_flags; |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 5645a4d32abd..eb9ba60ebab4 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
| @@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
| 102 | return PTR_ERR(pc); | 102 | return PTR_ERR(pc); |
| 103 | ret = ACT_P_CREATED; | 103 | ret = ACT_P_CREATED; |
| 104 | } else { | 104 | } else { |
| 105 | if (!ovr) { | 105 | if (bind)/* dont override defaults */ |
| 106 | tcf_hash_release(pc, bind, &gact_hash_info); | 106 | return 0; |
| 107 | tcf_hash_release(pc, bind, &gact_hash_info); | ||
| 108 | if (!ovr) | ||
| 107 | return -EEXIST; | 109 | return -EEXIST; |
| 108 | } | ||
| 109 | } | 110 | } |
| 110 | 111 | ||
| 111 | gact = to_gact(pc); | 112 | gact = to_gact(pc); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 882a89762f77..dcbfe8ce04a6 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
| @@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
| 141 | return PTR_ERR(pc); | 141 | return PTR_ERR(pc); |
| 142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
| 143 | } else { | 143 | } else { |
| 144 | if (!ovr) { | 144 | if (bind)/* dont override defaults */ |
| 145 | tcf_ipt_release(to_ipt(pc), bind); | 145 | return 0; |
| 146 | tcf_ipt_release(to_ipt(pc), bind); | ||
| 147 | |||
| 148 | if (!ovr) | ||
| 146 | return -EEXIST; | 149 | return -EEXIST; |
| 147 | } | ||
| 148 | } | 150 | } |
| 149 | ipt = to_ipt(pc); | 151 | ipt = to_ipt(pc); |
| 150 | 152 | ||
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 6a15ace00241..76869538d028 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
| @@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
| 70 | &nat_idx_gen, &nat_hash_info); | 70 | &nat_idx_gen, &nat_hash_info); |
| 71 | if (IS_ERR(pc)) | 71 | if (IS_ERR(pc)) |
| 72 | return PTR_ERR(pc); | 72 | return PTR_ERR(pc); |
| 73 | p = to_tcf_nat(pc); | ||
| 74 | ret = ACT_P_CREATED; | 73 | ret = ACT_P_CREATED; |
| 75 | } else { | 74 | } else { |
| 76 | p = to_tcf_nat(pc); | 75 | if (bind) |
| 77 | if (!ovr) { | 76 | return 0; |
| 78 | tcf_hash_release(pc, bind, &nat_hash_info); | 77 | tcf_hash_release(pc, bind, &nat_hash_info); |
| 78 | if (!ovr) | ||
| 79 | return -EEXIST; | 79 | return -EEXIST; |
| 80 | } | ||
| 81 | } | 80 | } |
| 81 | p = to_tcf_nat(pc); | ||
| 82 | 82 | ||
| 83 | spin_lock_bh(&p->tcf_lock); | 83 | spin_lock_bh(&p->tcf_lock); |
| 84 | p->old_addr = parm->old_addr; | 84 | p->old_addr = parm->old_addr; |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 03b67674169c..7aa2dcd989f8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
| @@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
| 84 | ret = ACT_P_CREATED; | 84 | ret = ACT_P_CREATED; |
| 85 | } else { | 85 | } else { |
| 86 | p = to_pedit(pc); | 86 | p = to_pedit(pc); |
| 87 | if (!ovr) { | 87 | tcf_hash_release(pc, bind, &pedit_hash_info); |
| 88 | tcf_hash_release(pc, bind, &pedit_hash_info); | 88 | if (bind) |
| 89 | return 0; | ||
| 90 | if (!ovr) | ||
| 89 | return -EEXIST; | 91 | return -EEXIST; |
| 90 | } | 92 | |
| 91 | if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { | 93 | if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { |
| 92 | keys = kmalloc(ksize, GFP_KERNEL); | 94 | keys = kmalloc(ksize, GFP_KERNEL); |
| 93 | if (keys == NULL) | 95 | if (keys == NULL) |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 16a62c36928a..ef246d87e68b 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 177 | if (bind) { | 177 | if (bind) { |
| 178 | police->tcf_bindcnt += 1; | 178 | police->tcf_bindcnt += 1; |
| 179 | police->tcf_refcnt += 1; | 179 | police->tcf_refcnt += 1; |
| 180 | return 0; | ||
| 180 | } | 181 | } |
| 181 | if (ovr) | 182 | if (ovr) |
| 182 | goto override; | 183 | goto override; |
| 183 | return ret; | 184 | /* not replacing */ |
| 185 | return -EEXIST; | ||
| 184 | } | 186 | } |
| 185 | } | 187 | } |
| 186 | 188 | ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 31157d3e729c..f7b45ab85388 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
| @@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, | |||
| 142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
| 143 | } else { | 143 | } else { |
| 144 | d = to_defact(pc); | 144 | d = to_defact(pc); |
| 145 | if (!ovr) { | 145 | |
| 146 | tcf_simp_release(d, bind); | 146 | if (bind) |
| 147 | return 0; | ||
| 148 | tcf_simp_release(d, bind); | ||
| 149 | if (!ovr) | ||
| 147 | return -EEXIST; | 150 | return -EEXIST; |
| 148 | } | 151 | |
| 149 | reset_policy(d, defdata, parm); | 152 | reset_policy(d, defdata, parm); |
| 150 | } | 153 | } |
| 151 | 154 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 35ea643b4325..8fe9d25c3008 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
| @@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
| 120 | ret = ACT_P_CREATED; | 120 | ret = ACT_P_CREATED; |
| 121 | } else { | 121 | } else { |
| 122 | d = to_skbedit(pc); | 122 | d = to_skbedit(pc); |
| 123 | if (!ovr) { | 123 | if (bind) |
| 124 | tcf_hash_release(pc, bind, &skbedit_hash_info); | 124 | return 0; |
| 125 | tcf_hash_release(pc, bind, &skbedit_hash_info); | ||
| 126 | if (!ovr) | ||
| 125 | return -EEXIST; | 127 | return -EEXIST; |
| 126 | } | ||
| 127 | } | 128 | } |
| 128 | 129 | ||
| 129 | spin_lock_bh(&d->tcf_lock); | 130 | spin_lock_bh(&d->tcf_lock); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 922a09406ba7..7fc899a943a8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
| 126 | 126 | ||
| 127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
| 128 | if (!netif_xmit_frozen_or_stopped(txq)) | 128 | if (!netif_xmit_frozen_or_stopped(txq)) |
| 129 | ret = dev_hard_start_xmit(skb, dev, txq, NULL); | 129 | ret = dev_hard_start_xmit(skb, dev, txq); |
| 130 | 130 | ||
| 131 | HARD_TX_UNLOCK(dev, txq); | 131 | HARD_TX_UNLOCK(dev, txq); |
| 132 | 132 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index f51ba985a36e..59268f6e2c36 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | |||
| 208 | INIT_LIST_HEAD(&q->retransmit); | 208 | INIT_LIST_HEAD(&q->retransmit); |
| 209 | INIT_LIST_HEAD(&q->sacked); | 209 | INIT_LIST_HEAD(&q->sacked); |
| 210 | INIT_LIST_HEAD(&q->abandoned); | 210 | INIT_LIST_HEAD(&q->abandoned); |
| 211 | |||
| 212 | q->empty = 1; | ||
| 213 | } | 211 | } |
| 214 | 212 | ||
| 215 | /* Free the outqueue structure and any related pending chunks. | 213 | /* Free the outqueue structure and any related pending chunks. |
| @@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
| 332 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); | 330 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); |
| 333 | else | 331 | else |
| 334 | SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); | 332 | SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); |
| 335 | q->empty = 0; | ||
| 336 | break; | 333 | break; |
| 337 | } | 334 | } |
| 338 | } else { | 335 | } else { |
| @@ -654,7 +651,6 @@ redo: | |||
| 654 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) | 651 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) |
| 655 | chunk->fast_retransmit = SCTP_DONT_FRTX; | 652 | chunk->fast_retransmit = SCTP_DONT_FRTX; |
| 656 | 653 | ||
| 657 | q->empty = 0; | ||
| 658 | q->asoc->stats.rtxchunks++; | 654 | q->asoc->stats.rtxchunks++; |
| 659 | break; | 655 | break; |
| 660 | } | 656 | } |
| @@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
| 1065 | 1061 | ||
| 1066 | sctp_transport_reset_timers(transport); | 1062 | sctp_transport_reset_timers(transport); |
| 1067 | 1063 | ||
| 1068 | q->empty = 0; | ||
| 1069 | |||
| 1070 | /* Only let one DATA chunk get bundled with a | 1064 | /* Only let one DATA chunk get bundled with a |
| 1071 | * COOKIE-ECHO chunk. | 1065 | * COOKIE-ECHO chunk. |
| 1072 | */ | 1066 | */ |
| @@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
| 1275 | "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, | 1269 | "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, |
| 1276 | asoc->adv_peer_ack_point); | 1270 | asoc->adv_peer_ack_point); |
| 1277 | 1271 | ||
| 1278 | /* See if all chunks are acked. | 1272 | return sctp_outq_is_empty(q); |
| 1279 | * Make sure the empty queue handler will get run later. | ||
| 1280 | */ | ||
| 1281 | q->empty = (list_empty(&q->out_chunk_list) && | ||
| 1282 | list_empty(&q->retransmit)); | ||
| 1283 | if (!q->empty) | ||
| 1284 | goto finish; | ||
| 1285 | |||
| 1286 | list_for_each_entry(transport, transport_list, transports) { | ||
| 1287 | q->empty = q->empty && list_empty(&transport->transmitted); | ||
| 1288 | if (!q->empty) | ||
| 1289 | goto finish; | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | pr_debug("%s: sack queue is empty\n", __func__); | ||
| 1293 | finish: | ||
| 1294 | return q->empty; | ||
| 1295 | } | 1273 | } |
| 1296 | 1274 | ||
| 1297 | /* Is the outqueue empty? */ | 1275 | /* Is the outqueue empty? |
| 1276 | * The queue is empty when we have not pending data, no in-flight data | ||
| 1277 | * and nothing pending retransmissions. | ||
| 1278 | */ | ||
| 1298 | int sctp_outq_is_empty(const struct sctp_outq *q) | 1279 | int sctp_outq_is_empty(const struct sctp_outq *q) |
| 1299 | { | 1280 | { |
| 1300 | return q->empty; | 1281 | return q->out_qlen == 0 && q->outstanding_bytes == 0 && |
| 1282 | list_empty(&q->retransmit); | ||
| 1301 | } | 1283 | } |
| 1302 | 1284 | ||
| 1303 | /******************************************************************** | 1285 | /******************************************************************** |
diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 53c452efb40b..5e68b94ee640 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <net/sctp/sctp.h> | 38 | #include <net/sctp/sctp.h> |
| 39 | #include <net/sctp/sm.h> | 39 | #include <net/sctp/sm.h> |
| 40 | 40 | ||
| 41 | MODULE_SOFTDEP("pre: sctp"); | ||
| 41 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); | 42 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); |
| 42 | MODULE_DESCRIPTION("SCTP snooper"); | 43 | MODULE_DESCRIPTION("SCTP snooper"); |
| 43 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
| @@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = { | |||
| 182 | .entry = jsctp_sf_eat_sack, | 183 | .entry = jsctp_sf_eat_sack, |
| 183 | }; | 184 | }; |
| 184 | 185 | ||
| 186 | static __init int sctp_setup_jprobe(void) | ||
| 187 | { | ||
| 188 | int ret = register_jprobe(&sctp_recv_probe); | ||
| 189 | |||
| 190 | if (ret) { | ||
| 191 | if (request_module("sctp")) | ||
| 192 | goto out; | ||
| 193 | ret = register_jprobe(&sctp_recv_probe); | ||
| 194 | } | ||
| 195 | |||
| 196 | out: | ||
| 197 | return ret; | ||
| 198 | } | ||
| 199 | |||
| 185 | static __init int sctpprobe_init(void) | 200 | static __init int sctpprobe_init(void) |
| 186 | { | 201 | { |
| 187 | int ret = -ENOMEM; | 202 | int ret = -ENOMEM; |
| @@ -202,7 +217,7 @@ static __init int sctpprobe_init(void) | |||
| 202 | &sctpprobe_fops)) | 217 | &sctpprobe_fops)) |
| 203 | goto free_kfifo; | 218 | goto free_kfifo; |
| 204 | 219 | ||
| 205 | ret = register_jprobe(&sctp_recv_probe); | 220 | ret = sctp_setup_jprobe(); |
| 206 | if (ret) | 221 | if (ret) |
| 207 | goto remove_proc; | 222 | goto remove_proc; |
| 208 | 223 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index 69cd9bf3f561..13b987745820 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) | |||
| 1498 | int type; | 1498 | int type; |
| 1499 | 1499 | ||
| 1500 | head = head->next; | 1500 | head = head->next; |
| 1501 | buf->next = NULL; | ||
| 1501 | 1502 | ||
| 1502 | /* Ensure bearer is still enabled */ | 1503 | /* Ensure bearer is still enabled */ |
| 1503 | if (unlikely(!b_ptr->active)) | 1504 | if (unlikely(!b_ptr->active)) |
diff --git a/net/tipc/port.c b/net/tipc/port.c index c081a7632302..d43f3182b1d4 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
| @@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk, | |||
| 251 | return p_ptr; | 251 | return p_ptr; |
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | int tipc_deleteport(u32 ref) | 254 | int tipc_deleteport(struct tipc_port *p_ptr) |
| 255 | { | 255 | { |
| 256 | struct tipc_port *p_ptr; | ||
| 257 | struct sk_buff *buf = NULL; | 256 | struct sk_buff *buf = NULL; |
| 258 | 257 | ||
| 259 | tipc_withdraw(ref, 0, NULL); | 258 | tipc_withdraw(p_ptr, 0, NULL); |
| 260 | p_ptr = tipc_port_lock(ref); | ||
| 261 | if (!p_ptr) | ||
| 262 | return -EINVAL; | ||
| 263 | 259 | ||
| 264 | tipc_ref_discard(ref); | 260 | spin_lock_bh(p_ptr->lock); |
| 265 | tipc_port_unlock(p_ptr); | 261 | tipc_ref_discard(p_ptr->ref); |
| 262 | spin_unlock_bh(p_ptr->lock); | ||
| 266 | 263 | ||
| 267 | k_cancel_timer(&p_ptr->timer); | 264 | k_cancel_timer(&p_ptr->timer); |
| 268 | if (p_ptr->connected) { | 265 | if (p_ptr->connected) { |
| @@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp) | |||
| 704 | } | 701 | } |
| 705 | 702 | ||
| 706 | 703 | ||
| 707 | int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | 704 | int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, |
| 705 | struct tipc_name_seq const *seq) | ||
| 708 | { | 706 | { |
| 709 | struct tipc_port *p_ptr; | ||
| 710 | struct publication *publ; | 707 | struct publication *publ; |
| 711 | u32 key; | 708 | u32 key; |
| 712 | int res = -EINVAL; | ||
| 713 | 709 | ||
| 714 | p_ptr = tipc_port_lock(ref); | 710 | if (p_ptr->connected) |
| 715 | if (!p_ptr) | ||
| 716 | return -EINVAL; | 711 | return -EINVAL; |
| 712 | key = p_ptr->ref + p_ptr->pub_count + 1; | ||
| 713 | if (key == p_ptr->ref) | ||
| 714 | return -EADDRINUSE; | ||
| 717 | 715 | ||
| 718 | if (p_ptr->connected) | ||
| 719 | goto exit; | ||
| 720 | key = ref + p_ptr->pub_count + 1; | ||
| 721 | if (key == ref) { | ||
| 722 | res = -EADDRINUSE; | ||
| 723 | goto exit; | ||
| 724 | } | ||
| 725 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, | 716 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, |
| 726 | scope, p_ptr->ref, key); | 717 | scope, p_ptr->ref, key); |
| 727 | if (publ) { | 718 | if (publ) { |
| 728 | list_add(&publ->pport_list, &p_ptr->publications); | 719 | list_add(&publ->pport_list, &p_ptr->publications); |
| 729 | p_ptr->pub_count++; | 720 | p_ptr->pub_count++; |
| 730 | p_ptr->published = 1; | 721 | p_ptr->published = 1; |
| 731 | res = 0; | 722 | return 0; |
| 732 | } | 723 | } |
| 733 | exit: | 724 | return -EINVAL; |
| 734 | tipc_port_unlock(p_ptr); | ||
| 735 | return res; | ||
| 736 | } | 725 | } |
| 737 | 726 | ||
| 738 | int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | 727 | int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, |
| 728 | struct tipc_name_seq const *seq) | ||
| 739 | { | 729 | { |
| 740 | struct tipc_port *p_ptr; | ||
| 741 | struct publication *publ; | 730 | struct publication *publ; |
| 742 | struct publication *tpubl; | 731 | struct publication *tpubl; |
| 743 | int res = -EINVAL; | 732 | int res = -EINVAL; |
| 744 | 733 | ||
| 745 | p_ptr = tipc_port_lock(ref); | ||
| 746 | if (!p_ptr) | ||
| 747 | return -EINVAL; | ||
| 748 | if (!seq) { | 734 | if (!seq) { |
| 749 | list_for_each_entry_safe(publ, tpubl, | 735 | list_for_each_entry_safe(publ, tpubl, |
| 750 | &p_ptr->publications, pport_list) { | 736 | &p_ptr->publications, pport_list) { |
| @@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | |||
| 771 | } | 757 | } |
| 772 | if (list_empty(&p_ptr->publications)) | 758 | if (list_empty(&p_ptr->publications)) |
| 773 | p_ptr->published = 0; | 759 | p_ptr->published = 0; |
| 774 | tipc_port_unlock(p_ptr); | ||
| 775 | return res; | 760 | return res; |
| 776 | } | 761 | } |
| 777 | 762 | ||
diff --git a/net/tipc/port.h b/net/tipc/port.h index 912253597343..34f12bd4074e 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h | |||
| @@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err); | |||
| 116 | 116 | ||
| 117 | void tipc_acknowledge(u32 port_ref, u32 ack); | 117 | void tipc_acknowledge(u32 port_ref, u32 ack); |
| 118 | 118 | ||
| 119 | int tipc_deleteport(u32 portref); | 119 | int tipc_deleteport(struct tipc_port *p_ptr); |
| 120 | 120 | ||
| 121 | int tipc_portimportance(u32 portref, unsigned int *importance); | 121 | int tipc_portimportance(u32 portref, unsigned int *importance); |
| 122 | int tipc_set_portimportance(u32 portref, unsigned int importance); | 122 | int tipc_set_portimportance(u32 portref, unsigned int importance); |
| @@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable); | |||
| 127 | int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); | 127 | int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); |
| 128 | int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); | 128 | int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); |
| 129 | 129 | ||
| 130 | int tipc_publish(u32 portref, unsigned int scope, | 130 | int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, |
| 131 | struct tipc_name_seq const *name_seq); | 131 | struct tipc_name_seq const *name_seq); |
| 132 | int tipc_withdraw(u32 portref, unsigned int scope, | 132 | int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, |
| 133 | struct tipc_name_seq const *name_seq); | 133 | struct tipc_name_seq const *name_seq); |
| 134 | 134 | ||
| 135 | int tipc_connect(u32 portref, struct tipc_portid const *port); | 135 | int tipc_connect(u32 portref, struct tipc_portid const *port); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3b61851bb927..e741416d1d24 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -354,7 +354,7 @@ static int release(struct socket *sock) | |||
| 354 | * Delete TIPC port; this ensures no more messages are queued | 354 | * Delete TIPC port; this ensures no more messages are queued |
| 355 | * (also disconnects an active connection & sends a 'FIN-' to peer) | 355 | * (also disconnects an active connection & sends a 'FIN-' to peer) |
| 356 | */ | 356 | */ |
| 357 | res = tipc_deleteport(tport->ref); | 357 | res = tipc_deleteport(tport); |
| 358 | 358 | ||
| 359 | /* Discard any remaining (connection-based) messages in receive queue */ | 359 | /* Discard any remaining (connection-based) messages in receive queue */ |
| 360 | __skb_queue_purge(&sk->sk_receive_queue); | 360 | __skb_queue_purge(&sk->sk_receive_queue); |
| @@ -386,30 +386,46 @@ static int release(struct socket *sock) | |||
| 386 | */ | 386 | */ |
| 387 | static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) | 387 | static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) |
| 388 | { | 388 | { |
| 389 | struct sock *sk = sock->sk; | ||
| 389 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 390 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
| 390 | u32 portref = tipc_sk_port(sock->sk)->ref; | 391 | struct tipc_port *tport = tipc_sk_port(sock->sk); |
| 392 | int res = -EINVAL; | ||
| 391 | 393 | ||
| 392 | if (unlikely(!uaddr_len)) | 394 | lock_sock(sk); |
| 393 | return tipc_withdraw(portref, 0, NULL); | 395 | if (unlikely(!uaddr_len)) { |
| 396 | res = tipc_withdraw(tport, 0, NULL); | ||
| 397 | goto exit; | ||
| 398 | } | ||
| 394 | 399 | ||
| 395 | if (uaddr_len < sizeof(struct sockaddr_tipc)) | 400 | if (uaddr_len < sizeof(struct sockaddr_tipc)) { |
| 396 | return -EINVAL; | 401 | res = -EINVAL; |
| 397 | if (addr->family != AF_TIPC) | 402 | goto exit; |
| 398 | return -EAFNOSUPPORT; | 403 | } |
| 404 | if (addr->family != AF_TIPC) { | ||
| 405 | res = -EAFNOSUPPORT; | ||
| 406 | goto exit; | ||
| 407 | } | ||
| 399 | 408 | ||
| 400 | if (addr->addrtype == TIPC_ADDR_NAME) | 409 | if (addr->addrtype == TIPC_ADDR_NAME) |
| 401 | addr->addr.nameseq.upper = addr->addr.nameseq.lower; | 410 | addr->addr.nameseq.upper = addr->addr.nameseq.lower; |
| 402 | else if (addr->addrtype != TIPC_ADDR_NAMESEQ) | 411 | else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { |
| 403 | return -EAFNOSUPPORT; | 412 | res = -EAFNOSUPPORT; |
| 413 | goto exit; | ||
| 414 | } | ||
| 404 | 415 | ||
| 405 | if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && | 416 | if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && |
| 406 | (addr->addr.nameseq.type != TIPC_TOP_SRV) && | 417 | (addr->addr.nameseq.type != TIPC_TOP_SRV) && |
| 407 | (addr->addr.nameseq.type != TIPC_CFG_SRV)) | 418 | (addr->addr.nameseq.type != TIPC_CFG_SRV)) { |
| 408 | return -EACCES; | 419 | res = -EACCES; |
| 420 | goto exit; | ||
| 421 | } | ||
| 409 | 422 | ||
| 410 | return (addr->scope > 0) ? | 423 | res = (addr->scope > 0) ? |
| 411 | tipc_publish(portref, addr->scope, &addr->addr.nameseq) : | 424 | tipc_publish(tport, addr->scope, &addr->addr.nameseq) : |
| 412 | tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); | 425 | tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq); |
| 426 | exit: | ||
| 427 | release_sock(sk); | ||
| 428 | return res; | ||
| 413 | } | 429 | } |
| 414 | 430 | ||
| 415 | /** | 431 | /** |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index a0ca162e5bd5..a427623ee574 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -718,7 +718,9 @@ static int unix_autobind(struct socket *sock) | |||
| 718 | int err; | 718 | int err; |
| 719 | unsigned int retries = 0; | 719 | unsigned int retries = 0; |
| 720 | 720 | ||
| 721 | mutex_lock(&u->readlock); | 721 | err = mutex_lock_interruptible(&u->readlock); |
| 722 | if (err) | ||
| 723 | return err; | ||
| 722 | 724 | ||
| 723 | err = 0; | 725 | err = 0; |
| 724 | if (u->addr) | 726 | if (u->addr) |
| @@ -877,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 877 | goto out; | 879 | goto out; |
| 878 | addr_len = err; | 880 | addr_len = err; |
| 879 | 881 | ||
| 880 | mutex_lock(&u->readlock); | 882 | err = mutex_lock_interruptible(&u->readlock); |
| 883 | if (err) | ||
| 884 | goto out; | ||
| 881 | 885 | ||
| 882 | err = -EINVAL; | 886 | err = -EINVAL; |
| 883 | if (u->addr) | 887 | if (u->addr) |
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index a271c27fac77..722da616438c 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c | |||
| @@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init( | |||
| 124 | /* find payload start allowing for extended bitmap(s) */ | 124 | /* find payload start allowing for extended bitmap(s) */ |
| 125 | 125 | ||
| 126 | if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { | 126 | if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { |
| 127 | if ((unsigned long)iterator->_arg - | ||
| 128 | (unsigned long)iterator->_rtheader + sizeof(uint32_t) > | ||
| 129 | (unsigned long)iterator->_max_length) | ||
| 130 | return -EINVAL; | ||
| 127 | while (get_unaligned_le32(iterator->_arg) & | 131 | while (get_unaligned_le32(iterator->_arg) & |
| 128 | (1 << IEEE80211_RADIOTAP_EXT)) { | 132 | (1 << IEEE80211_RADIOTAP_EXT)) { |
| 129 | iterator->_arg += sizeof(uint32_t); | 133 | iterator->_arg += sizeof(uint32_t); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 65f800890d70..d3c5bd7c6b51 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
| @@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
| 632 | } | 632 | } |
| 633 | #endif | 633 | #endif |
| 634 | 634 | ||
| 635 | if (!bss && (status == WLAN_STATUS_SUCCESS)) { | ||
| 636 | WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); | ||
| 637 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
| 638 | wdev->ssid, wdev->ssid_len, | ||
| 639 | WLAN_CAPABILITY_ESS, | ||
| 640 | WLAN_CAPABILITY_ESS); | ||
| 641 | if (bss) | ||
| 642 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
| 643 | } | ||
| 644 | |||
| 635 | if (wdev->current_bss) { | 645 | if (wdev->current_bss) { |
| 636 | cfg80211_unhold_bss(wdev->current_bss); | 646 | cfg80211_unhold_bss(wdev->current_bss); |
| 637 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); | 647 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
| @@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
| 649 | return; | 659 | return; |
| 650 | } | 660 | } |
| 651 | 661 | ||
| 652 | if (!bss) { | 662 | if (WARN_ON(!bss)) |
| 653 | WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); | 663 | return; |
| 654 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
| 655 | wdev->ssid, wdev->ssid_len, | ||
| 656 | WLAN_CAPABILITY_ESS, | ||
| 657 | WLAN_CAPABILITY_ESS); | ||
| 658 | if (WARN_ON(!bss)) | ||
| 659 | return; | ||
| 660 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
| 661 | } | ||
| 662 | 664 | ||
| 663 | wdev->current_bss = bss_from_pub(bss); | 665 | wdev->current_bss = bss_from_pub(bss); |
| 664 | 666 | ||
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 32b10f53d0b4..2dcb37736d84 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh | |||
| @@ -82,7 +82,9 @@ kallsyms() | |||
| 82 | kallsymopt="${kallsymopt} --all-symbols" | 82 | kallsymopt="${kallsymopt} --all-symbols" |
| 83 | fi | 83 | fi |
| 84 | 84 | ||
| 85 | kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" | 85 | if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then |
| 86 | kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" | ||
| 87 | fi | ||
| 86 | 88 | ||
| 87 | local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ | 89 | local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ |
| 88 | ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" | 90 | ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 419491d8e7d2..57b0b49f4e6e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
| @@ -234,6 +234,14 @@ static int inode_alloc_security(struct inode *inode) | |||
| 234 | return 0; | 234 | return 0; |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | static void inode_free_rcu(struct rcu_head *head) | ||
| 238 | { | ||
| 239 | struct inode_security_struct *isec; | ||
| 240 | |||
| 241 | isec = container_of(head, struct inode_security_struct, rcu); | ||
| 242 | kmem_cache_free(sel_inode_cache, isec); | ||
| 243 | } | ||
| 244 | |||
| 237 | static void inode_free_security(struct inode *inode) | 245 | static void inode_free_security(struct inode *inode) |
| 238 | { | 246 | { |
| 239 | struct inode_security_struct *isec = inode->i_security; | 247 | struct inode_security_struct *isec = inode->i_security; |
| @@ -244,8 +252,16 @@ static void inode_free_security(struct inode *inode) | |||
| 244 | list_del_init(&isec->list); | 252 | list_del_init(&isec->list); |
| 245 | spin_unlock(&sbsec->isec_lock); | 253 | spin_unlock(&sbsec->isec_lock); |
| 246 | 254 | ||
| 247 | inode->i_security = NULL; | 255 | /* |
| 248 | kmem_cache_free(sel_inode_cache, isec); | 256 | * The inode may still be referenced in a path walk and |
| 257 | * a call to selinux_inode_permission() can be made | ||
| 258 | * after inode_free_security() is called. Ideally, the VFS | ||
| 259 | * wouldn't do this, but fixing that is a much harder | ||
| 260 | * job. For now, simply free the i_security via RCU, and | ||
| 261 | * leave the current inode->i_security pointer intact. | ||
| 262 | * The inode will be freed after the RCU grace period too. | ||
| 263 | */ | ||
| 264 | call_rcu(&isec->rcu, inode_free_rcu); | ||
| 249 | } | 265 | } |
| 250 | 266 | ||
| 251 | static int file_alloc_security(struct file *file) | 267 | static int file_alloc_security(struct file *file) |
| @@ -4334,8 +4350,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 4334 | } | 4350 | } |
| 4335 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, | 4351 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, |
| 4336 | PEER__RECV, &ad); | 4352 | PEER__RECV, &ad); |
| 4337 | if (err) | 4353 | if (err) { |
| 4338 | selinux_netlbl_err(skb, err, 0); | 4354 | selinux_netlbl_err(skb, err, 0); |
| 4355 | return err; | ||
| 4356 | } | ||
| 4339 | } | 4357 | } |
| 4340 | 4358 | ||
| 4341 | if (secmark_active) { | 4359 | if (secmark_active) { |
| @@ -5586,11 +5604,11 @@ static int selinux_setprocattr(struct task_struct *p, | |||
| 5586 | /* Check for ptracing, and update the task SID if ok. | 5604 | /* Check for ptracing, and update the task SID if ok. |
| 5587 | Otherwise, leave SID unchanged and fail. */ | 5605 | Otherwise, leave SID unchanged and fail. */ |
| 5588 | ptsid = 0; | 5606 | ptsid = 0; |
| 5589 | task_lock(p); | 5607 | rcu_read_lock(); |
| 5590 | tracer = ptrace_parent(p); | 5608 | tracer = ptrace_parent(p); |
| 5591 | if (tracer) | 5609 | if (tracer) |
| 5592 | ptsid = task_sid(tracer); | 5610 | ptsid = task_sid(tracer); |
| 5593 | task_unlock(p); | 5611 | rcu_read_unlock(); |
| 5594 | 5612 | ||
| 5595 | if (tracer) { | 5613 | if (tracer) { |
| 5596 | error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, | 5614 | error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, |
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index b1dfe1049450..078e553f52f2 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h | |||
| @@ -38,7 +38,10 @@ struct task_security_struct { | |||
| 38 | 38 | ||
| 39 | struct inode_security_struct { | 39 | struct inode_security_struct { |
| 40 | struct inode *inode; /* back pointer to inode object */ | 40 | struct inode *inode; /* back pointer to inode object */ |
| 41 | struct list_head list; /* list of inode_security_struct */ | 41 | union { |
| 42 | struct list_head list; /* list of inode_security_struct */ | ||
| 43 | struct rcu_head rcu; /* for freeing the inode_security_struct */ | ||
| 44 | }; | ||
| 42 | u32 task_sid; /* SID of creating task */ | 45 | u32 task_sid; /* SID of creating task */ |
| 43 | u32 sid; /* SID of this object */ | 46 | u32 sid; /* SID of this object */ |
| 44 | u16 sclass; /* security class of this object */ | 47 | u16 sclass; /* security class of this object */ |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 6e03b465e44e..a2104671f51d 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
| @@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream, | |||
| 1937 | case SNDRV_PCM_STATE_DISCONNECTED: | 1937 | case SNDRV_PCM_STATE_DISCONNECTED: |
| 1938 | err = -EBADFD; | 1938 | err = -EBADFD; |
| 1939 | goto _endloop; | 1939 | goto _endloop; |
| 1940 | case SNDRV_PCM_STATE_PAUSED: | ||
| 1941 | continue; | ||
| 1940 | } | 1942 | } |
| 1941 | if (!tout) { | 1943 | if (!tout) { |
| 1942 | snd_printd("%s write error (DMA or IRQ trouble?)\n", | 1944 | snd_printd("%s write error (DMA or IRQ trouble?)\n", |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 27aa14007cbd..956871d8b3d2 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev) | |||
| 3433 | * white/black-list for enable_msi | 3433 | * white/black-list for enable_msi |
| 3434 | */ | 3434 | */ |
| 3435 | static struct snd_pci_quirk msi_black_list[] = { | 3435 | static struct snd_pci_quirk msi_black_list[] = { |
| 3436 | SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */ | ||
| 3437 | SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */ | ||
| 3438 | SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */ | ||
| 3439 | SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */ | ||
| 3436 | SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ | 3440 | SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ |
| 3437 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ | 3441 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ |
| 3438 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ | 3442 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 34de5dc2fe9b..c5646941539a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -4247,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 4247 | SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4247 | SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4248 | SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4248 | SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4249 | SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4249 | SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4250 | SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
| 4250 | SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4251 | SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4251 | SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4252 | SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4252 | SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), | 4253 | SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), |
| 4253 | SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | 4254 | SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4255 | SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
| 4254 | SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), | 4256 | SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), |
| 4257 | SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
| 4255 | SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | 4258 | SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4259 | SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
| 4256 | SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4260 | SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 4257 | SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4261 | SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 4258 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 4262 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 8697cedccd21..1ead3c977a51 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c | |||
| @@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, | |||
| 648 | 648 | ||
| 649 | dma_params = ssc_p->dma_params[dir]; | 649 | dma_params = ssc_p->dma_params[dir]; |
| 650 | 650 | ||
| 651 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); | 651 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); |
| 652 | ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); | 652 | ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); |
| 653 | 653 | ||
| 654 | pr_debug("%s enabled SSC_SR=0x%08x\n", | 654 | pr_debug("%s enabled SSC_SR=0x%08x\n", |
| @@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, | |||
| 657 | return 0; | 657 | return 0; |
| 658 | } | 658 | } |
| 659 | 659 | ||
| 660 | static int atmel_ssc_trigger(struct snd_pcm_substream *substream, | ||
| 661 | int cmd, struct snd_soc_dai *dai) | ||
| 662 | { | ||
| 663 | struct atmel_ssc_info *ssc_p = &ssc_info[dai->id]; | ||
| 664 | struct atmel_pcm_dma_params *dma_params; | ||
| 665 | int dir; | ||
| 666 | |||
| 667 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | ||
| 668 | dir = 0; | ||
| 669 | else | ||
| 670 | dir = 1; | ||
| 671 | |||
| 672 | dma_params = ssc_p->dma_params[dir]; | ||
| 673 | |||
| 674 | switch (cmd) { | ||
| 675 | case SNDRV_PCM_TRIGGER_START: | ||
| 676 | case SNDRV_PCM_TRIGGER_RESUME: | ||
| 677 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | ||
| 678 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); | ||
| 679 | break; | ||
| 680 | default: | ||
| 681 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); | ||
| 682 | break; | ||
| 683 | } | ||
| 684 | |||
| 685 | return 0; | ||
| 686 | } | ||
| 660 | 687 | ||
| 661 | #ifdef CONFIG_PM | 688 | #ifdef CONFIG_PM |
| 662 | static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) | 689 | static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) |
| @@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = { | |||
| 731 | .startup = atmel_ssc_startup, | 758 | .startup = atmel_ssc_startup, |
| 732 | .shutdown = atmel_ssc_shutdown, | 759 | .shutdown = atmel_ssc_shutdown, |
| 733 | .prepare = atmel_ssc_prepare, | 760 | .prepare = atmel_ssc_prepare, |
| 761 | .trigger = atmel_ssc_trigger, | ||
| 734 | .hw_params = atmel_ssc_hw_params, | 762 | .hw_params = atmel_ssc_hw_params, |
| 735 | .set_fmt = atmel_ssc_set_dai_fmt, | 763 | .set_fmt = atmel_ssc_set_dai_fmt, |
| 736 | .set_clkdiv = atmel_ssc_set_dai_clkdiv, | 764 | .set_clkdiv = atmel_ssc_set_dai_clkdiv, |
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c index 1b372283bd01..7d6a9055874b 100644 --- a/sound/soc/atmel/sam9x5_wm8731.c +++ b/sound/soc/atmel/sam9x5_wm8731.c | |||
| @@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) | |||
| 109 | dai->stream_name = "WM8731 PCM"; | 109 | dai->stream_name = "WM8731 PCM"; |
| 110 | dai->codec_dai_name = "wm8731-hifi"; | 110 | dai->codec_dai_name = "wm8731-hifi"; |
| 111 | dai->init = sam9x5_wm8731_init; | 111 | dai->init = sam9x5_wm8731_init; |
| 112 | dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | 112 | dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
| 113 | | SND_SOC_DAIFMT_CBM_CFM; | 113 | | SND_SOC_DAIFMT_CBM_CFM; |
| 114 | 114 | ||
| 115 | ret = snd_soc_of_parse_card_name(card, "atmel,model"); | 115 | ret = snd_soc_of_parse_card_name(card, "atmel,model"); |
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 99b359e19d35..0ab2dc296474 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c | |||
| @@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { | |||
| 1012 | { "AEC Loopback", "HPOUT3L", "OUT3L" }, | 1012 | { "AEC Loopback", "HPOUT3L", "OUT3L" }, |
| 1013 | { "AEC Loopback", "HPOUT3R", "OUT3R" }, | 1013 | { "AEC Loopback", "HPOUT3R", "OUT3R" }, |
| 1014 | { "HPOUT3L", NULL, "OUT3L" }, | 1014 | { "HPOUT3L", NULL, "OUT3L" }, |
| 1015 | { "HPOUT3R", NULL, "OUT3L" }, | 1015 | { "HPOUT3R", NULL, "OUT3R" }, |
| 1016 | 1016 | ||
| 1017 | { "AEC Loopback", "SPKOUTL", "OUT4L" }, | 1017 | { "AEC Loopback", "SPKOUTL", "OUT4L" }, |
| 1018 | { "SPKOUTLN", NULL, "OUT4L" }, | 1018 | { "SPKOUTLN", NULL, "OUT4L" }, |
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 3938fb1c203e..53bbfac6a83a 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c | |||
| @@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) | |||
| 1444 | 1444 | ||
| 1445 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { | 1445 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { |
| 1446 | case SND_SOC_DAIFMT_DSP_B: | 1446 | case SND_SOC_DAIFMT_DSP_B: |
| 1447 | aif1 |= WM8904_AIF_LRCLK_INV; | 1447 | aif1 |= 0x3 | WM8904_AIF_LRCLK_INV; |
| 1448 | case SND_SOC_DAIFMT_DSP_A: | 1448 | case SND_SOC_DAIFMT_DSP_A: |
| 1449 | aif1 |= 0x3; | 1449 | aif1 |= 0x3; |
| 1450 | break; | 1450 | break; |
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 543c5c2631b6..0f17ed3e29f4 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
| @@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec) | |||
| 2439 | snd_soc_update_bits(codec, WM8962_CLOCKING_4, | 2439 | snd_soc_update_bits(codec, WM8962_CLOCKING_4, |
| 2440 | WM8962_SYSCLK_RATE_MASK, clocking4); | 2440 | WM8962_SYSCLK_RATE_MASK, clocking4); |
| 2441 | 2441 | ||
| 2442 | /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK. | ||
| 2443 | * So we here provisionally enable it and then disable it afterward | ||
| 2444 | * if current bias_level hasn't reached SND_SOC_BIAS_ON. | ||
| 2445 | */ | ||
| 2446 | if (codec->dapm.bias_level != SND_SOC_BIAS_ON) | ||
| 2447 | snd_soc_update_bits(codec, WM8962_CLOCKING2, | ||
| 2448 | WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA); | ||
| 2449 | |||
| 2442 | dspclk = snd_soc_read(codec, WM8962_CLOCKING1); | 2450 | dspclk = snd_soc_read(codec, WM8962_CLOCKING1); |
| 2451 | |||
| 2452 | if (codec->dapm.bias_level != SND_SOC_BIAS_ON) | ||
| 2453 | snd_soc_update_bits(codec, WM8962_CLOCKING2, | ||
| 2454 | WM8962_SYSCLK_ENA_MASK, 0); | ||
| 2455 | |||
| 2443 | if (dspclk < 0) { | 2456 | if (dspclk < 0) { |
| 2444 | dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); | 2457 | dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); |
| 2445 | return; | 2458 | return; |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 46ec0e9744d4..4fbcab63e61f 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
| @@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp) | |||
| 1474 | return ret; | 1474 | return ret; |
| 1475 | 1475 | ||
| 1476 | /* Wait for the RAM to start, should be near instantaneous */ | 1476 | /* Wait for the RAM to start, should be near instantaneous */ |
| 1477 | count = 0; | 1477 | for (count = 0; count < 10; ++count) { |
| 1478 | do { | ||
| 1479 | ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, | 1478 | ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, |
| 1480 | &val); | 1479 | &val); |
| 1481 | if (ret != 0) | 1480 | if (ret != 0) |
| 1482 | return ret; | 1481 | return ret; |
| 1483 | } while (!(val & ADSP2_RAM_RDY) && ++count < 10); | 1482 | |
| 1483 | if (val & ADSP2_RAM_RDY) | ||
| 1484 | break; | ||
| 1485 | |||
| 1486 | msleep(1); | ||
| 1487 | } | ||
| 1484 | 1488 | ||
| 1485 | if (!(val & ADSP2_RAM_RDY)) { | 1489 | if (!(val & ADSP2_RAM_RDY)) { |
| 1486 | adsp_err(dsp, "Failed to start DSP RAM\n"); | 1490 | adsp_err(dsp, "Failed to start DSP RAM\n"); |
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 61e48852b9e8..3fd76bc391de 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c | |||
| @@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card, | |||
| 130 | break; | 130 | break; |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | dapm->bias_level = level; | ||
| 134 | |||
| 135 | return 0; | 133 | return 0; |
| 136 | } | 134 | } |
| 137 | 135 | ||
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 0b18f654b413..3920a5e8125f 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
| @@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { | |||
| 473 | .playback = { | 473 | .playback = { |
| 474 | .channels_min = 1, | 474 | .channels_min = 1, |
| 475 | .channels_max = 2, | 475 | .channels_max = 2, |
| 476 | .rates = SNDRV_PCM_RATE_8000_192000 | | 476 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
| 477 | SNDRV_PCM_RATE_CONTINUOUS | | 477 | .rate_min = 5512, |
| 478 | SNDRV_PCM_RATE_KNOT, | 478 | .rate_max = 192000, |
| 479 | .formats = KIRKWOOD_I2S_FORMATS, | 479 | .formats = KIRKWOOD_I2S_FORMATS, |
| 480 | }, | 480 | }, |
| 481 | .capture = { | 481 | .capture = { |
| 482 | .channels_min = 1, | 482 | .channels_min = 1, |
| 483 | .channels_max = 2, | 483 | .channels_max = 2, |
| 484 | .rates = SNDRV_PCM_RATE_8000_192000 | | 484 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
| 485 | SNDRV_PCM_RATE_CONTINUOUS | | 485 | .rate_min = 5512, |
| 486 | SNDRV_PCM_RATE_KNOT, | 486 | .rate_max = 192000, |
| 487 | .formats = KIRKWOOD_I2S_FORMATS, | 487 | .formats = KIRKWOOD_I2S_FORMATS, |
| 488 | }, | 488 | }, |
| 489 | .ops = &kirkwood_i2s_dai_ops, | 489 | .ops = &kirkwood_i2s_dai_ops, |
| @@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { | |||
| 494 | .playback = { | 494 | .playback = { |
| 495 | .channels_min = 1, | 495 | .channels_min = 1, |
| 496 | .channels_max = 2, | 496 | .channels_max = 2, |
| 497 | .rates = SNDRV_PCM_RATE_8000_192000 | | 497 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
| 498 | SNDRV_PCM_RATE_CONTINUOUS | | 498 | .rate_min = 5512, |
| 499 | SNDRV_PCM_RATE_KNOT, | 499 | .rate_max = 192000, |
| 500 | .formats = KIRKWOOD_SPDIF_FORMATS, | 500 | .formats = KIRKWOOD_SPDIF_FORMATS, |
| 501 | }, | 501 | }, |
| 502 | .capture = { | 502 | .capture = { |
| 503 | .channels_min = 1, | 503 | .channels_min = 1, |
| 504 | .channels_max = 2, | 504 | .channels_max = 2, |
| 505 | .rates = SNDRV_PCM_RATE_8000_192000 | | 505 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
| 506 | SNDRV_PCM_RATE_CONTINUOUS | | 506 | .rate_min = 5512, |
| 507 | SNDRV_PCM_RATE_KNOT, | 507 | .rate_max = 192000, |
| 508 | .formats = KIRKWOOD_SPDIF_FORMATS, | 508 | .formats = KIRKWOOD_SPDIF_FORMATS, |
| 509 | }, | 509 | }, |
| 510 | .ops = &kirkwood_i2s_dai_ops, | 510 | .ops = &kirkwood_i2s_dai_ops, |
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index cbc9c96ce1f4..41949af3baae 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c | |||
| @@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm, | |||
| 305 | } | 305 | } |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm) | ||
| 309 | { | ||
| 310 | unsigned int i; | ||
| 311 | |||
| 312 | for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; | ||
| 313 | i++) { | ||
| 314 | if (!pcm->chan[i]) | ||
| 315 | continue; | ||
| 316 | dma_release_channel(pcm->chan[i]); | ||
| 317 | if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) | ||
| 318 | break; | ||
| 319 | } | ||
| 320 | } | ||
| 321 | |||
| 308 | /** | 322 | /** |
| 309 | * snd_dmaengine_pcm_register - Register a dmaengine based PCM device | 323 | * snd_dmaengine_pcm_register - Register a dmaengine based PCM device |
| 310 | * @dev: The parent device for the PCM device | 324 | * @dev: The parent device for the PCM device |
| @@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev, | |||
| 315 | const struct snd_dmaengine_pcm_config *config, unsigned int flags) | 329 | const struct snd_dmaengine_pcm_config *config, unsigned int flags) |
| 316 | { | 330 | { |
| 317 | struct dmaengine_pcm *pcm; | 331 | struct dmaengine_pcm *pcm; |
| 332 | int ret; | ||
| 318 | 333 | ||
| 319 | pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); | 334 | pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); |
| 320 | if (!pcm) | 335 | if (!pcm) |
| @@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev, | |||
| 326 | dmaengine_pcm_request_chan_of(pcm, dev); | 341 | dmaengine_pcm_request_chan_of(pcm, dev); |
| 327 | 342 | ||
| 328 | if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) | 343 | if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) |
| 329 | return snd_soc_add_platform(dev, &pcm->platform, | 344 | ret = snd_soc_add_platform(dev, &pcm->platform, |
| 330 | &dmaengine_no_residue_pcm_platform); | 345 | &dmaengine_no_residue_pcm_platform); |
| 331 | else | 346 | else |
| 332 | return snd_soc_add_platform(dev, &pcm->platform, | 347 | ret = snd_soc_add_platform(dev, &pcm->platform, |
| 333 | &dmaengine_pcm_platform); | 348 | &dmaengine_pcm_platform); |
| 349 | if (ret) | ||
| 350 | goto err_free_dma; | ||
| 351 | |||
| 352 | return 0; | ||
| 353 | |||
| 354 | err_free_dma: | ||
| 355 | dmaengine_pcm_release_chan(pcm); | ||
| 356 | kfree(pcm); | ||
| 357 | return ret; | ||
| 334 | } | 358 | } |
| 335 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); | 359 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); |
| 336 | 360 | ||
| @@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev) | |||
| 345 | { | 369 | { |
| 346 | struct snd_soc_platform *platform; | 370 | struct snd_soc_platform *platform; |
| 347 | struct dmaengine_pcm *pcm; | 371 | struct dmaengine_pcm *pcm; |
| 348 | unsigned int i; | ||
| 349 | 372 | ||
| 350 | platform = snd_soc_lookup_platform(dev); | 373 | platform = snd_soc_lookup_platform(dev); |
| 351 | if (!platform) | 374 | if (!platform) |
| @@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev) | |||
| 353 | 376 | ||
| 354 | pcm = soc_platform_to_pcm(platform); | 377 | pcm = soc_platform_to_pcm(platform); |
| 355 | 378 | ||
| 356 | for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) { | ||
| 357 | if (pcm->chan[i]) { | ||
| 358 | dma_release_channel(pcm->chan[i]); | ||
| 359 | if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) | ||
| 360 | break; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | |||
| 364 | snd_soc_remove_platform(platform); | 379 | snd_soc_remove_platform(platform); |
| 380 | dmaengine_pcm_release_chan(pcm); | ||
| 365 | kfree(pcm); | 381 | kfree(pcm); |
| 366 | } | 382 | } |
| 367 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); | 383 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 11a90cd027fa..891b9a9bcbf8 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
| @@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) | |||
| 600 | struct snd_soc_platform *platform = rtd->platform; | 600 | struct snd_soc_platform *platform = rtd->platform; |
| 601 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; | 601 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; |
| 602 | struct snd_soc_dai *codec_dai = rtd->codec_dai; | 602 | struct snd_soc_dai *codec_dai = rtd->codec_dai; |
| 603 | struct snd_soc_codec *codec = rtd->codec; | 603 | bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; |
| 604 | 604 | ||
| 605 | mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); | 605 | mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); |
| 606 | 606 | ||
| 607 | /* apply codec digital mute */ | 607 | /* apply codec digital mute */ |
| 608 | if (!codec->active) | 608 | if ((playback && codec_dai->playback_active == 1) || |
| 609 | (!playback && codec_dai->capture_active == 1)) | ||
| 609 | snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); | 610 | snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); |
| 610 | 611 | ||
| 611 | /* free any machine hw params */ | 612 | /* free any machine hw params */ |
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c index 364bf6a907e1..8c819f811470 100644 --- a/sound/soc/tegra/tegra20_i2s.c +++ b/sound/soc/tegra/tegra20_i2s.c | |||
| @@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, | |||
| 74 | unsigned int fmt) | 74 | unsigned int fmt) |
| 75 | { | 75 | { |
| 76 | struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); | 76 | struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); |
| 77 | unsigned int mask, val; | 77 | unsigned int mask = 0, val = 0; |
| 78 | 78 | ||
| 79 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { | 79 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { |
| 80 | case SND_SOC_DAIFMT_NB_NF: | 80 | case SND_SOC_DAIFMT_NB_NF: |
| @@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, | |||
| 83 | return -EINVAL; | 83 | return -EINVAL; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; | 86 | mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE; |
| 87 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { | 87 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { |
| 88 | case SND_SOC_DAIFMT_CBS_CFS: | 88 | case SND_SOC_DAIFMT_CBS_CFS: |
| 89 | val = TEGRA20_I2S_CTRL_MASTER_ENABLE; | 89 | val |= TEGRA20_I2S_CTRL_MASTER_ENABLE; |
| 90 | break; | 90 | break; |
| 91 | case SND_SOC_DAIFMT_CBM_CFM: | 91 | case SND_SOC_DAIFMT_CBM_CFM: |
| 92 | break; | 92 | break; |
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c index 08bc6931c7c7..8c7c1028e579 100644 --- a/sound/soc/tegra/tegra20_spdif.c +++ b/sound/soc/tegra/tegra20_spdif.c | |||
| @@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream, | |||
| 67 | { | 67 | { |
| 68 | struct device *dev = dai->dev; | 68 | struct device *dev = dai->dev; |
| 69 | struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); | 69 | struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); |
| 70 | unsigned int mask, val; | 70 | unsigned int mask = 0, val = 0; |
| 71 | int ret, spdifclock; | 71 | int ret, spdifclock; |
| 72 | 72 | ||
| 73 | mask = TEGRA20_SPDIF_CTRL_PACK | | 73 | mask |= TEGRA20_SPDIF_CTRL_PACK | |
| 74 | TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; | 74 | TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; |
| 75 | switch (params_format(params)) { | 75 | switch (params_format(params)) { |
| 76 | case SNDRV_PCM_FORMAT_S16_LE: | 76 | case SNDRV_PCM_FORMAT_S16_LE: |
| 77 | val = TEGRA20_SPDIF_CTRL_PACK | | 77 | val |= TEGRA20_SPDIF_CTRL_PACK | |
| 78 | TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; | 78 | TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; |
| 79 | break; | 79 | break; |
| 80 | default: | 80 | default: |
| 81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index 231a785b3921..02247fee1cf7 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c | |||
| @@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, | |||
| 118 | unsigned int fmt) | 118 | unsigned int fmt) |
| 119 | { | 119 | { |
| 120 | struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); | 120 | struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); |
| 121 | unsigned int mask, val; | 121 | unsigned int mask = 0, val = 0; |
| 122 | 122 | ||
| 123 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { | 123 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { |
| 124 | case SND_SOC_DAIFMT_NB_NF: | 124 | case SND_SOC_DAIFMT_NB_NF: |
| @@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, | |||
| 127 | return -EINVAL; | 127 | return -EINVAL; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; | 130 | mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE; |
| 131 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { | 131 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { |
| 132 | case SND_SOC_DAIFMT_CBS_CFS: | 132 | case SND_SOC_DAIFMT_CBS_CFS: |
| 133 | val = TEGRA30_I2S_CTRL_MASTER_ENABLE; | 133 | val |= TEGRA30_I2S_CTRL_MASTER_ENABLE; |
| 134 | break; | 134 | break; |
| 135 | case SND_SOC_DAIFMT_CBM_CFM: | 135 | case SND_SOC_DAIFMT_CBM_CFM: |
| 136 | break; | 136 | break; |
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index dc4de3762111..bcf1d2f0b791 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c | |||
| @@ -18,9 +18,9 @@ | |||
| 18 | #include "helpers/bitmask.h" | 18 | #include "helpers/bitmask.h" |
| 19 | 19 | ||
| 20 | static struct option set_opts[] = { | 20 | static struct option set_opts[] = { |
| 21 | { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, | 21 | { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, |
| 22 | { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, | 22 | { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'}, |
| 23 | { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, | 23 | { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'}, |
| 24 | { }, | 24 | { }, |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
