diff options
304 files changed, 2831 insertions, 1381 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index e124847443f8..f0b4cd72411d 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt | |||
| @@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4 | |||
| 19 | (DSA_MAX_SWITCHES). | 19 | (DSA_MAX_SWITCHES). |
| 20 | Each of these switch child nodes should have the following required properties: | 20 | Each of these switch child nodes should have the following required properties: |
| 21 | 21 | ||
| 22 | - reg : Describes the switch address on the MII bus | 22 | - reg : Contains two fields. The first one describes the |
| 23 | address on the MII bus. The second is the switch | ||
| 24 | number that must be unique in cascaded configurations | ||
| 23 | - #address-cells : Must be 1 | 25 | - #address-cells : Must be 1 |
| 24 | - #size-cells : Must be 0 | 26 | - #size-cells : Must be 0 |
| 25 | 27 | ||
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index a63e5e013a8c..92ae734c00c3 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt | |||
| @@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2 | |||
| 114 | byte 4: 0 y6 y5 y4 y3 y2 y1 y0 | 114 | byte 4: 0 y6 y5 y4 y3 y2 y1 y0 |
| 115 | byte 5: 0 z6 z5 z4 z3 z2 z1 z0 | 115 | byte 5: 0 z6 z5 z4 z3 z2 z1 z0 |
| 116 | 116 | ||
| 117 | Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for | ||
| 118 | the DualPoint Stick. | ||
| 119 | |||
| 117 | Dualpoint device -- interleaved packet format | 120 | Dualpoint device -- interleaved packet format |
| 118 | --------------------------------------------- | 121 | --------------------------------------------- |
| 119 | 122 | ||
| @@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format | |||
| 127 | byte 7: 0 y6 y5 y4 y3 y2 y1 y0 | 130 | byte 7: 0 y6 y5 y4 y3 y2 y1 y0 |
| 128 | byte 8: 0 z6 z5 z4 z3 z2 z1 z0 | 131 | byte 8: 0 z6 z5 z4 z3 z2 z1 z0 |
| 129 | 132 | ||
| 133 | Devices which use the interleaving format normally send standard PS/2 mouse | ||
| 134 | packets for the DualPoint Stick + ALPS Absolute Mode packets for the | ||
| 135 | touchpad, switching to the interleaved packet format when both the stick and | ||
| 136 | the touchpad are used at the same time. | ||
| 137 | |||
| 130 | ALPS Absolute Mode - Protocol Version 3 | 138 | ALPS Absolute Mode - Protocol Version 3 |
| 131 | --------------------------------------- | 139 | --------------------------------------- |
| 132 | 140 | ||
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt index c587a966413e..96705616f582 100644 --- a/Documentation/input/event-codes.txt +++ b/Documentation/input/event-codes.txt | |||
| @@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior. | |||
| 294 | The kernel does not provide button emulation for such devices but treats | 294 | The kernel does not provide button emulation for such devices but treats |
| 295 | them as any other INPUT_PROP_BUTTONPAD device. | 295 | them as any other INPUT_PROP_BUTTONPAD device. |
| 296 | 296 | ||
| 297 | INPUT_PROP_ACCELEROMETER | ||
| 298 | ------------------------- | ||
| 299 | Directional axes on this device (absolute and/or relative x, y, z) represent | ||
| 300 | accelerometer data. All other axes retain their meaning. A device must not mix | ||
| 301 | regular directional axes and accelerometer axes on the same event node. | ||
| 302 | |||
| 297 | Guidelines: | 303 | Guidelines: |
| 298 | ========== | 304 | ========== |
| 299 | The guidelines below ensure proper single-touch and multi-finger functionality. | 305 | The guidelines below ensure proper single-touch and multi-finger functionality. |
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index 7b4f59c09ee2..b85d000faeb4 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt | |||
| @@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE | |||
| 312 | 312 | ||
| 313 | The type of approaching tool. A lot of kernel drivers cannot distinguish | 313 | The type of approaching tool. A lot of kernel drivers cannot distinguish |
| 314 | between different tool types, such as a finger or a pen. In such cases, the | 314 | between different tool types, such as a finger or a pen. In such cases, the |
| 315 | event should be omitted. The protocol currently supports MT_TOOL_FINGER and | 315 | event should be omitted. The protocol currently supports MT_TOOL_FINGER, |
| 316 | MT_TOOL_PEN [2]. For type B devices, this event is handled by input core; | 316 | MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled |
| 317 | drivers should instead use input_mt_report_slot_state(). | 317 | by input core; drivers should instead use input_mt_report_slot_state(). |
| 318 | A contact's ABS_MT_TOOL_TYPE may change over time while still touching the | ||
| 319 | device, because the firmware may not be able to determine which tool is being | ||
| 320 | used when it first appears. | ||
| 318 | 321 | ||
| 319 | ABS_MT_BLOB_ID | 322 | ABS_MT_BLOB_ID |
| 320 | 323 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 036284f6cf45..e2642f39971f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h | |||
| 637 | F: include/uapi/linux/kfd_ioctl.h | 637 | F: include/uapi/linux/kfd_ioctl.h |
| 638 | 638 | ||
| 639 | AMD MICROCODE UPDATE SUPPORT | 639 | AMD MICROCODE UPDATE SUPPORT |
| 640 | M: Andreas Herrmann <herrmann.der.user@googlemail.com> | 640 | M: Borislav Petkov <bp@alien8.de> |
| 641 | L: amd64-microcode@amd64.org | ||
| 642 | S: Maintained | 641 | S: Maintained |
| 643 | F: arch/x86/kernel/cpu/microcode/amd* | 642 | F: arch/x86/kernel/cpu/microcode/amd* |
| 644 | 643 | ||
| @@ -1186,7 +1185,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | |||
| 1186 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1185 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1187 | S: Maintained | 1186 | S: Maintained |
| 1188 | F: arch/arm/mach-mvebu/ | 1187 | F: arch/arm/mach-mvebu/ |
| 1189 | F: drivers/rtc/armada38x-rtc | 1188 | F: drivers/rtc/rtc-armada38x.c |
| 1190 | 1189 | ||
| 1191 | ARM/Marvell Berlin SoC support | 1190 | ARM/Marvell Berlin SoC support |
| 1192 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1191 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
| @@ -1362,6 +1361,7 @@ F: drivers/i2c/busses/i2c-rk3x.c | |||
| 1362 | F: drivers/*/*rockchip* | 1361 | F: drivers/*/*rockchip* |
| 1363 | F: drivers/*/*/*rockchip* | 1362 | F: drivers/*/*/*rockchip* |
| 1364 | F: sound/soc/rockchip/ | 1363 | F: sound/soc/rockchip/ |
| 1364 | N: rockchip | ||
| 1365 | 1365 | ||
| 1366 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES | 1366 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES |
| 1367 | M: Kukjin Kim <kgene@kernel.org> | 1367 | M: Kukjin Kim <kgene@kernel.org> |
| @@ -1675,8 +1675,8 @@ F: drivers/misc/eeprom/at24.c | |||
| 1675 | F: include/linux/platform_data/at24.h | 1675 | F: include/linux/platform_data/at24.h |
| 1676 | 1676 | ||
| 1677 | ATA OVER ETHERNET (AOE) DRIVER | 1677 | ATA OVER ETHERNET (AOE) DRIVER |
| 1678 | M: "Ed L. Cashin" <ecashin@coraid.com> | 1678 | M: "Ed L. Cashin" <ed.cashin@acm.org> |
| 1679 | W: http://support.coraid.com/support/linux | 1679 | W: http://www.openaoe.org/ |
| 1680 | S: Supported | 1680 | S: Supported |
| 1681 | F: Documentation/aoe/ | 1681 | F: Documentation/aoe/ |
| 1682 | F: drivers/block/aoe/ | 1682 | F: drivers/block/aoe/ |
| @@ -3252,6 +3252,13 @@ S: Maintained | |||
| 3252 | F: Documentation/hwmon/dme1737 | 3252 | F: Documentation/hwmon/dme1737 |
| 3253 | F: drivers/hwmon/dme1737.c | 3253 | F: drivers/hwmon/dme1737.c |
| 3254 | 3254 | ||
| 3255 | DMI/SMBIOS SUPPORT | ||
| 3256 | M: Jean Delvare <jdelvare@suse.de> | ||
| 3257 | S: Maintained | ||
| 3258 | F: drivers/firmware/dmi-id.c | ||
| 3259 | F: drivers/firmware/dmi_scan.c | ||
| 3260 | F: include/linux/dmi.h | ||
| 3261 | |||
| 3255 | DOCKING STATION DRIVER | 3262 | DOCKING STATION DRIVER |
| 3256 | M: Shaohua Li <shaohua.li@intel.com> | 3263 | M: Shaohua Li <shaohua.li@intel.com> |
| 3257 | L: linux-acpi@vger.kernel.org | 3264 | L: linux-acpi@vger.kernel.org |
| @@ -5094,7 +5101,7 @@ S: Supported | |||
| 5094 | F: drivers/platform/x86/intel_menlow.c | 5101 | F: drivers/platform/x86/intel_menlow.c |
| 5095 | 5102 | ||
| 5096 | INTEL IA32 MICROCODE UPDATE SUPPORT | 5103 | INTEL IA32 MICROCODE UPDATE SUPPORT |
| 5097 | M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | 5104 | M: Borislav Petkov <bp@alien8.de> |
| 5098 | S: Maintained | 5105 | S: Maintained |
| 5099 | F: arch/x86/kernel/cpu/microcode/core* | 5106 | F: arch/x86/kernel/cpu/microcode/core* |
| 5100 | F: arch/x86/kernel/cpu/microcode/intel* | 5107 | F: arch/x86/kernel/cpu/microcode/intel* |
| @@ -5135,22 +5142,21 @@ M: Deepak Saxena <dsaxena@plexity.net> | |||
| 5135 | S: Maintained | 5142 | S: Maintained |
| 5136 | F: drivers/char/hw_random/ixp4xx-rng.c | 5143 | F: drivers/char/hw_random/ixp4xx-rng.c |
| 5137 | 5144 | ||
| 5138 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) | 5145 | INTEL ETHERNET DRIVERS |
| 5139 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 5146 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
| 5140 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> | 5147 | R: Jesse Brandeburg <jesse.brandeburg@intel.com> |
| 5141 | M: Bruce Allan <bruce.w.allan@intel.com> | 5148 | R: Shannon Nelson <shannon.nelson@intel.com> |
| 5142 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> | 5149 | R: Carolyn Wyborny <carolyn.wyborny@intel.com> |
| 5143 | M: Don Skidmore <donald.c.skidmore@intel.com> | 5150 | R: Don Skidmore <donald.c.skidmore@intel.com> |
| 5144 | M: Greg Rose <gregory.v.rose@intel.com> | 5151 | R: Matthew Vick <matthew.vick@intel.com> |
| 5145 | M: Matthew Vick <matthew.vick@intel.com> | 5152 | R: John Ronciak <john.ronciak@intel.com> |
| 5146 | M: John Ronciak <john.ronciak@intel.com> | 5153 | R: Mitch Williams <mitch.a.williams@intel.com> |
| 5147 | M: Mitch Williams <mitch.a.williams@intel.com> | 5154 | L: intel-wired-lan@lists.osuosl.org |
| 5148 | M: Linux NICS <linux.nics@intel.com> | ||
| 5149 | L: e1000-devel@lists.sourceforge.net | ||
| 5150 | W: http://www.intel.com/support/feedback.htm | 5155 | W: http://www.intel.com/support/feedback.htm |
| 5151 | W: http://e1000.sourceforge.net/ | 5156 | W: http://e1000.sourceforge.net/ |
| 5152 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git | 5157 | Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ |
| 5153 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git | 5158 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git |
| 5159 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git | ||
| 5154 | S: Supported | 5160 | S: Supported |
| 5155 | F: Documentation/networking/e100.txt | 5161 | F: Documentation/networking/e100.txt |
| 5156 | F: Documentation/networking/e1000.txt | 5162 | F: Documentation/networking/e1000.txt |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc7 |
| 5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 114234e83caa..edda76fae83f 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
| @@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, | |||
| 67 | sigset_t *set) | 67 | sigset_t *set) |
| 68 | { | 68 | { |
| 69 | int err; | 69 | int err; |
| 70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, | 70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, |
| 71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
| 72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); | 72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); |
| 73 | 73 | ||
| @@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) | |||
| 83 | if (!err) | 83 | if (!err) |
| 84 | set_current_blocked(&set); | 84 | set_current_blocked(&set); |
| 85 | 85 | ||
| 86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), | 86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), |
| 87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
| 88 | 88 | ||
| 89 | return err; | 89 | return err; |
| @@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
| 131 | /* Don't restart from sigreturn */ | 131 | /* Don't restart from sigreturn */ |
| 132 | syscall_wont_restart(regs); | 132 | syscall_wont_restart(regs); |
| 133 | 133 | ||
| 134 | /* | ||
| 135 | * Ensure that sigreturn always returns to user mode (in case the | ||
| 136 | * regs saved on user stack got fudged between save and sigreturn) | ||
| 137 | * Otherwise it is easy to panic the kernel with a custom | ||
| 138 | * signal handler and/or restorer which clobberes the status32/ret | ||
| 139 | * to return to a bogus location in kernel mode. | ||
| 140 | */ | ||
| 141 | regs->status32 |= STATUS_U_MASK; | ||
| 142 | |||
| 134 | return regs->r0; | 143 | return regs->r0; |
| 135 | 144 | ||
| 136 | badframe: | 145 | badframe: |
| @@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) | |||
| 229 | 238 | ||
| 230 | /* | 239 | /* |
| 231 | * handler returns using sigreturn stub provided already by userpsace | 240 | * handler returns using sigreturn stub provided already by userpsace |
| 241 | * If not, nuke the process right away | ||
| 232 | */ | 242 | */ |
| 233 | BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); | 243 | if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) |
| 244 | return 1; | ||
| 245 | |||
| 234 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; | 246 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; |
| 235 | 247 | ||
| 236 | /* User Stack for signal handler will be above the frame just carved */ | 248 | /* User Stack for signal handler will be above the frame just carved */ |
| @@ -296,12 +308,12 @@ static void | |||
| 296 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) | 308 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
| 297 | { | 309 | { |
| 298 | sigset_t *oldset = sigmask_to_save(); | 310 | sigset_t *oldset = sigmask_to_save(); |
| 299 | int ret; | 311 | int failed; |
| 300 | 312 | ||
| 301 | /* Set up the stack frame */ | 313 | /* Set up the stack frame */ |
| 302 | ret = setup_rt_frame(ksig, oldset, regs); | 314 | failed = setup_rt_frame(ksig, oldset, regs); |
| 303 | 315 | ||
| 304 | signal_setup_done(ret, ksig, 0); | 316 | signal_setup_done(failed, ksig, 0); |
| 305 | } | 317 | } |
| 306 | 318 | ||
| 307 | void do_signal(struct pt_regs *regs) | 319 | void do_signal(struct pt_regs *regs) |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9f1f09a2bc9b..cf4c0c99aa25 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -619,6 +619,7 @@ config ARCH_PXA | |||
| 619 | select GENERIC_CLOCKEVENTS | 619 | select GENERIC_CLOCKEVENTS |
| 620 | select GPIO_PXA | 620 | select GPIO_PXA |
| 621 | select HAVE_IDE | 621 | select HAVE_IDE |
| 622 | select IRQ_DOMAIN | ||
| 622 | select MULTI_IRQ_HANDLER | 623 | select MULTI_IRQ_HANDLER |
| 623 | select PLAT_PXA | 624 | select PLAT_PXA |
| 624 | select SPARSE_IRQ | 625 | select SPARSE_IRQ |
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index d3a29c1b8417..afe678f6d2e9 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts | |||
| @@ -36,6 +36,20 @@ | |||
| 36 | >; | 36 | >; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | mmc_pins: pinmux_mmc_pins { | ||
| 40 | pinctrl-single,pins = < | ||
| 41 | DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */ | ||
| 42 | DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */ | ||
| 43 | DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */ | ||
| 44 | DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */ | ||
| 45 | DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */ | ||
| 46 | DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */ | ||
| 47 | DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */ | ||
| 48 | DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */ | ||
| 49 | DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */ | ||
| 50 | >; | ||
| 51 | }; | ||
| 52 | |||
| 39 | usb0_pins: pinmux_usb0_pins { | 53 | usb0_pins: pinmux_usb0_pins { |
| 40 | pinctrl-single,pins = < | 54 | pinctrl-single,pins = < |
| 41 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ | 55 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ |
| @@ -137,7 +151,12 @@ | |||
| 137 | }; | 151 | }; |
| 138 | 152 | ||
| 139 | &mmc1 { | 153 | &mmc1 { |
| 154 | pinctrl-names = "default"; | ||
| 155 | pinctrl-0 = <&mmc_pins>; | ||
| 140 | vmmc-supply = <&vmmcsd_fixed>; | 156 | vmmc-supply = <&vmmcsd_fixed>; |
| 157 | bus-width = <4>; | ||
| 158 | cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; | ||
| 159 | wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; | ||
| 141 | }; | 160 | }; |
| 142 | 161 | ||
| 143 | /* At least dm8168-evm rev c won't support multipoint, later may */ | 162 | /* At least dm8168-evm rev c won't support multipoint, later may */ |
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index 3c97b5f2addc..f35715bc6992 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi | |||
| @@ -150,17 +150,27 @@ | |||
| 150 | }; | 150 | }; |
| 151 | 151 | ||
| 152 | gpio1: gpio@48032000 { | 152 | gpio1: gpio@48032000 { |
| 153 | compatible = "ti,omap3-gpio"; | 153 | compatible = "ti,omap4-gpio"; |
| 154 | ti,hwmods = "gpio1"; | 154 | ti,hwmods = "gpio1"; |
| 155 | ti,gpio-always-on; | ||
| 155 | reg = <0x48032000 0x1000>; | 156 | reg = <0x48032000 0x1000>; |
| 156 | interrupts = <97>; | 157 | interrupts = <96>; |
| 158 | gpio-controller; | ||
| 159 | #gpio-cells = <2>; | ||
| 160 | interrupt-controller; | ||
| 161 | #interrupt-cells = <2>; | ||
| 157 | }; | 162 | }; |
| 158 | 163 | ||
| 159 | gpio2: gpio@4804c000 { | 164 | gpio2: gpio@4804c000 { |
| 160 | compatible = "ti,omap3-gpio"; | 165 | compatible = "ti,omap4-gpio"; |
| 161 | ti,hwmods = "gpio2"; | 166 | ti,hwmods = "gpio2"; |
| 167 | ti,gpio-always-on; | ||
| 162 | reg = <0x4804c000 0x1000>; | 168 | reg = <0x4804c000 0x1000>; |
| 163 | interrupts = <99>; | 169 | interrupts = <98>; |
| 170 | gpio-controller; | ||
| 171 | #gpio-cells = <2>; | ||
| 172 | interrupt-controller; | ||
| 173 | #interrupt-cells = <2>; | ||
| 164 | }; | 174 | }; |
| 165 | 175 | ||
| 166 | gpmc: gpmc@50000000 { | 176 | gpmc: gpmc@50000000 { |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 127608d79033..c4659a979c41 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
| @@ -1111,7 +1111,6 @@ | |||
| 1111 | "wkupclk", "refclk", | 1111 | "wkupclk", "refclk", |
| 1112 | "div-clk", "phy-div"; | 1112 | "div-clk", "phy-div"; |
| 1113 | #phy-cells = <0>; | 1113 | #phy-cells = <0>; |
| 1114 | ti,hwmods = "pcie1-phy"; | ||
| 1115 | }; | 1114 | }; |
| 1116 | 1115 | ||
| 1117 | pcie2_phy: pciephy@4a095000 { | 1116 | pcie2_phy: pciephy@4a095000 { |
| @@ -1130,7 +1129,6 @@ | |||
| 1130 | "wkupclk", "refclk", | 1129 | "wkupclk", "refclk", |
| 1131 | "div-clk", "phy-div"; | 1130 | "div-clk", "phy-div"; |
| 1132 | #phy-cells = <0>; | 1131 | #phy-cells = <0>; |
| 1133 | ti,hwmods = "pcie2-phy"; | ||
| 1134 | status = "disabled"; | 1132 | status = "disabled"; |
| 1135 | }; | 1133 | }; |
| 1136 | }; | 1134 | }; |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index f4f78c40b564..3fdc84fddb70 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
| @@ -92,6 +92,8 @@ | |||
| 92 | ti,hwmods = "aes"; | 92 | ti,hwmods = "aes"; |
| 93 | reg = <0x480c5000 0x50>; | 93 | reg = <0x480c5000 0x50>; |
| 94 | interrupts = <0>; | 94 | interrupts = <0>; |
| 95 | dmas = <&sdma 65 &sdma 66>; | ||
| 96 | dma-names = "tx", "rx"; | ||
| 95 | }; | 97 | }; |
| 96 | 98 | ||
| 97 | prm: prm@48306000 { | 99 | prm: prm@48306000 { |
| @@ -550,6 +552,8 @@ | |||
| 550 | ti,hwmods = "sham"; | 552 | ti,hwmods = "sham"; |
| 551 | reg = <0x480c3000 0x64>; | 553 | reg = <0x480c3000 0x64>; |
| 552 | interrupts = <49>; | 554 | interrupts = <49>; |
| 555 | dmas = <&sdma 69>; | ||
| 556 | dma-names = "rx"; | ||
| 553 | }; | 557 | }; |
| 554 | 558 | ||
| 555 | smartreflex_core: smartreflex@480cb000 { | 559 | smartreflex_core: smartreflex@480cb000 { |
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index d771f687a13b..eccc78d3220b 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
| @@ -411,6 +411,7 @@ | |||
| 411 | "mac_clk_rx", "mac_clk_tx", | 411 | "mac_clk_rx", "mac_clk_tx", |
| 412 | "clk_mac_ref", "clk_mac_refout", | 412 | "clk_mac_ref", "clk_mac_refout", |
| 413 | "aclk_mac", "pclk_mac"; | 413 | "aclk_mac", "pclk_mac"; |
| 414 | status = "disabled"; | ||
| 414 | }; | 415 | }; |
| 415 | 416 | ||
| 416 | usb_host0_ehci: usb@ff500000 { | 417 | usb_host0_ehci: usb@ff500000 { |
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 9d8760956752..d9176e606173 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi | |||
| @@ -660,7 +660,7 @@ | |||
| 660 | #address-cells = <1>; | 660 | #address-cells = <1>; |
| 661 | #size-cells = <0>; | 661 | #size-cells = <0>; |
| 662 | reg = <0xfff01000 0x1000>; | 662 | reg = <0xfff01000 0x1000>; |
| 663 | interrupts = <0 156 4>; | 663 | interrupts = <0 155 4>; |
| 664 | num-cs = <4>; | 664 | num-cs = <4>; |
| 665 | clocks = <&spi_m_clk>; | 665 | clocks = <&spi_m_clk>; |
| 666 | status = "disabled"; | 666 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts index ab7891c43231..75742f8f96f3 100644 --- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts +++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts | |||
| @@ -56,6 +56,22 @@ | |||
| 56 | model = "Olimex A10-OLinuXino-LIME"; | 56 | model = "Olimex A10-OLinuXino-LIME"; |
| 57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; | 57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; |
| 58 | 58 | ||
| 59 | cpus { | ||
| 60 | cpu0: cpu@0 { | ||
| 61 | /* | ||
| 62 | * The A10-Lime is known to be unstable | ||
| 63 | * when running at 1008 MHz | ||
| 64 | */ | ||
| 65 | operating-points = < | ||
| 66 | /* kHz uV */ | ||
| 67 | 912000 1350000 | ||
| 68 | 864000 1300000 | ||
| 69 | 624000 1250000 | ||
| 70 | >; | ||
| 71 | cooling-max-level = <2>; | ||
| 72 | }; | ||
| 73 | }; | ||
| 74 | |||
| 59 | soc@01c00000 { | 75 | soc@01c00000 { |
| 60 | emac: ethernet@01c0b000 { | 76 | emac: ethernet@01c0b000 { |
| 61 | pinctrl-names = "default"; | 77 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 5c2925831f20..eebb7853e00b 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
| @@ -75,7 +75,6 @@ | |||
| 75 | clock-latency = <244144>; /* 8 32k periods */ | 75 | clock-latency = <244144>; /* 8 32k periods */ |
| 76 | operating-points = < | 76 | operating-points = < |
| 77 | /* kHz uV */ | 77 | /* kHz uV */ |
| 78 | 1056000 1500000 | ||
| 79 | 1008000 1400000 | 78 | 1008000 1400000 |
| 80 | 912000 1350000 | 79 | 912000 1350000 |
| 81 | 864000 1300000 | 80 | 864000 1300000 |
| @@ -83,7 +82,7 @@ | |||
| 83 | >; | 82 | >; |
| 84 | #cooling-cells = <2>; | 83 | #cooling-cells = <2>; |
| 85 | cooling-min-level = <0>; | 84 | cooling-min-level = <0>; |
| 86 | cooling-max-level = <4>; | 85 | cooling-max-level = <3>; |
| 87 | }; | 86 | }; |
| 88 | }; | 87 | }; |
| 89 | 88 | ||
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index f8818f1edbbe..883cb4873688 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
| @@ -47,7 +47,6 @@ | |||
| 47 | clock-latency = <244144>; /* 8 32k periods */ | 47 | clock-latency = <244144>; /* 8 32k periods */ |
| 48 | operating-points = < | 48 | operating-points = < |
| 49 | /* kHz uV */ | 49 | /* kHz uV */ |
| 50 | 1104000 1500000 | ||
| 51 | 1008000 1400000 | 50 | 1008000 1400000 |
| 52 | 912000 1350000 | 51 | 912000 1350000 |
| 53 | 864000 1300000 | 52 | 864000 1300000 |
| @@ -57,7 +56,7 @@ | |||
| 57 | >; | 56 | >; |
| 58 | #cooling-cells = <2>; | 57 | #cooling-cells = <2>; |
| 59 | cooling-min-level = <0>; | 58 | cooling-min-level = <0>; |
| 60 | cooling-max-level = <6>; | 59 | cooling-max-level = <5>; |
| 61 | }; | 60 | }; |
| 62 | }; | 61 | }; |
| 63 | 62 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 3a8530b79f1c..fdd181792b4b 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
| @@ -105,7 +105,6 @@ | |||
| 105 | clock-latency = <244144>; /* 8 32k periods */ | 105 | clock-latency = <244144>; /* 8 32k periods */ |
| 106 | operating-points = < | 106 | operating-points = < |
| 107 | /* kHz uV */ | 107 | /* kHz uV */ |
| 108 | 1008000 1450000 | ||
| 109 | 960000 1400000 | 108 | 960000 1400000 |
| 110 | 912000 1400000 | 109 | 912000 1400000 |
| 111 | 864000 1300000 | 110 | 864000 1300000 |
| @@ -116,7 +115,7 @@ | |||
| 116 | >; | 115 | >; |
| 117 | #cooling-cells = <2>; | 116 | #cooling-cells = <2>; |
| 118 | cooling-min-level = <0>; | 117 | cooling-min-level = <0>; |
| 119 | cooling-max-level = <7>; | 118 | cooling-max-level = <6>; |
| 120 | }; | 119 | }; |
| 121 | 120 | ||
| 122 | cpu@1 { | 121 | cpu@1 { |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2a2f4d56e4c8..25f1beea453e 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
| @@ -720,6 +720,8 @@ static const char * __init omap_get_family(void) | |||
| 720 | return kasprintf(GFP_KERNEL, "OMAP4"); | 720 | return kasprintf(GFP_KERNEL, "OMAP4"); |
| 721 | else if (soc_is_omap54xx()) | 721 | else if (soc_is_omap54xx()) |
| 722 | return kasprintf(GFP_KERNEL, "OMAP5"); | 722 | return kasprintf(GFP_KERNEL, "OMAP5"); |
| 723 | else if (soc_is_am33xx() || soc_is_am335x()) | ||
| 724 | return kasprintf(GFP_KERNEL, "AM33xx"); | ||
| 723 | else if (soc_is_am43xx()) | 725 | else if (soc_is_am43xx()) |
| 724 | return kasprintf(GFP_KERNEL, "AM43xx"); | 726 | return kasprintf(GFP_KERNEL, "AM43xx"); |
| 725 | else if (soc_is_dra7xx()) | 727 | else if (soc_is_dra7xx()) |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 0eecd83c624e..89a7c06570d3 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
| 12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
| 13 | */ | 13 | */ |
| 14 | #include <linux/bitops.h> | ||
| 14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
| @@ -40,7 +41,6 @@ | |||
| 40 | #define ICHP_VAL_IRQ (1 << 31) | 41 | #define ICHP_VAL_IRQ (1 << 31) |
| 41 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) | 42 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) |
| 42 | #define IPR_VALID (1 << 31) | 43 | #define IPR_VALID (1 << 31) |
| 43 | #define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f) | ||
| 44 | 44 | ||
| 45 | #define MAX_INTERNAL_IRQS 128 | 45 | #define MAX_INTERNAL_IRQS 128 |
| 46 | 46 | ||
| @@ -51,6 +51,7 @@ | |||
| 51 | static void __iomem *pxa_irq_base; | 51 | static void __iomem *pxa_irq_base; |
| 52 | static int pxa_internal_irq_nr; | 52 | static int pxa_internal_irq_nr; |
| 53 | static bool cpu_has_ipr; | 53 | static bool cpu_has_ipr; |
| 54 | static struct irq_domain *pxa_irq_domain; | ||
| 54 | 55 | ||
| 55 | static inline void __iomem *irq_base(int i) | 56 | static inline void __iomem *irq_base(int i) |
| 56 | { | 57 | { |
| @@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i) | |||
| 66 | void pxa_mask_irq(struct irq_data *d) | 67 | void pxa_mask_irq(struct irq_data *d) |
| 67 | { | 68 | { |
| 68 | void __iomem *base = irq_data_get_irq_chip_data(d); | 69 | void __iomem *base = irq_data_get_irq_chip_data(d); |
| 70 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
| 69 | uint32_t icmr = __raw_readl(base + ICMR); | 71 | uint32_t icmr = __raw_readl(base + ICMR); |
| 70 | 72 | ||
| 71 | icmr &= ~(1 << IRQ_BIT(d->irq)); | 73 | icmr &= ~BIT(irq & 0x1f); |
| 72 | __raw_writel(icmr, base + ICMR); | 74 | __raw_writel(icmr, base + ICMR); |
| 73 | } | 75 | } |
| 74 | 76 | ||
| 75 | void pxa_unmask_irq(struct irq_data *d) | 77 | void pxa_unmask_irq(struct irq_data *d) |
| 76 | { | 78 | { |
| 77 | void __iomem *base = irq_data_get_irq_chip_data(d); | 79 | void __iomem *base = irq_data_get_irq_chip_data(d); |
| 80 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
| 78 | uint32_t icmr = __raw_readl(base + ICMR); | 81 | uint32_t icmr = __raw_readl(base + ICMR); |
| 79 | 82 | ||
| 80 | icmr |= 1 << IRQ_BIT(d->irq); | 83 | icmr |= BIT(irq & 0x1f); |
| 81 | __raw_writel(icmr, base + ICMR); | 84 | __raw_writel(icmr, base + ICMR); |
| 82 | } | 85 | } |
| 83 | 86 | ||
| @@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) | |||
| 118 | } while (1); | 121 | } while (1); |
| 119 | } | 122 | } |
| 120 | 123 | ||
| 121 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | 124 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, |
| 125 | irq_hw_number_t hw) | ||
| 122 | { | 126 | { |
| 123 | int irq, i, n; | 127 | void __iomem *base = irq_base(hw / 32); |
| 124 | 128 | ||
| 125 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | 129 | /* initialize interrupt priority */ |
| 130 | if (cpu_has_ipr) | ||
| 131 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
| 132 | |||
| 133 | irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, | ||
| 134 | handle_level_irq); | ||
| 135 | irq_set_chip_data(virq, base); | ||
| 136 | set_irq_flags(virq, IRQF_VALID); | ||
| 137 | |||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | static struct irq_domain_ops pxa_irq_ops = { | ||
| 142 | .map = pxa_irq_map, | ||
| 143 | .xlate = irq_domain_xlate_onecell, | ||
| 144 | }; | ||
| 145 | |||
| 146 | static __init void | ||
| 147 | pxa_init_irq_common(struct device_node *node, int irq_nr, | ||
| 148 | int (*fn)(struct irq_data *, unsigned int)) | ||
| 149 | { | ||
| 150 | int n; | ||
| 126 | 151 | ||
| 127 | pxa_internal_irq_nr = irq_nr; | 152 | pxa_internal_irq_nr = irq_nr; |
| 128 | cpu_has_ipr = !cpu_is_pxa25x(); | 153 | pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, |
| 129 | pxa_irq_base = io_p2v(0x40d00000); | 154 | PXA_IRQ(0), 0, |
| 155 | &pxa_irq_ops, NULL); | ||
| 156 | if (!pxa_irq_domain) | ||
| 157 | panic("Unable to add PXA IRQ domain\n"); | ||
| 158 | irq_set_default_host(pxa_irq_domain); | ||
| 130 | 159 | ||
| 131 | for (n = 0; n < irq_nr; n += 32) { | 160 | for (n = 0; n < irq_nr; n += 32) { |
| 132 | void __iomem *base = irq_base(n >> 5); | 161 | void __iomem *base = irq_base(n >> 5); |
| 133 | 162 | ||
| 134 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | 163 | __raw_writel(0, base + ICMR); /* disable all IRQs */ |
| 135 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | 164 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ |
| 136 | for (i = n; (i < (n + 32)) && (i < irq_nr); i++) { | ||
| 137 | /* initialize interrupt priority */ | ||
| 138 | if (cpu_has_ipr) | ||
| 139 | __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i)); | ||
| 140 | |||
| 141 | irq = PXA_IRQ(i); | ||
| 142 | irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, | ||
| 143 | handle_level_irq); | ||
| 144 | irq_set_chip_data(irq, base); | ||
| 145 | set_irq_flags(irq, IRQF_VALID); | ||
| 146 | } | ||
| 147 | } | 165 | } |
| 148 | |||
| 149 | /* only unmasked interrupts kick us out of idle */ | 166 | /* only unmasked interrupts kick us out of idle */ |
| 150 | __raw_writel(1, irq_base(0) + ICCR); | 167 | __raw_writel(1, irq_base(0) + ICCR); |
| 151 | 168 | ||
| 152 | pxa_internal_irq_chip.irq_set_wake = fn; | 169 | pxa_internal_irq_chip.irq_set_wake = fn; |
| 153 | } | 170 | } |
| 154 | 171 | ||
| 172 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | ||
| 173 | { | ||
| 174 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | ||
| 175 | |||
| 176 | pxa_irq_base = io_p2v(0x40d00000); | ||
| 177 | cpu_has_ipr = !cpu_is_pxa25x(); | ||
| 178 | pxa_init_irq_common(NULL, irq_nr, fn); | ||
| 179 | } | ||
| 180 | |||
| 155 | #ifdef CONFIG_PM | 181 | #ifdef CONFIG_PM |
| 156 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; | 182 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
| 157 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; | 183 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
| @@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = { | |||
| 203 | }; | 229 | }; |
| 204 | 230 | ||
| 205 | #ifdef CONFIG_OF | 231 | #ifdef CONFIG_OF |
| 206 | static struct irq_domain *pxa_irq_domain; | ||
| 207 | |||
| 208 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, | ||
| 209 | irq_hw_number_t hw) | ||
| 210 | { | ||
| 211 | void __iomem *base = irq_base(hw / 32); | ||
| 212 | |||
| 213 | /* initialize interrupt priority */ | ||
| 214 | if (cpu_has_ipr) | ||
| 215 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
| 216 | |||
| 217 | irq_set_chip_and_handler(hw, &pxa_internal_irq_chip, | ||
| 218 | handle_level_irq); | ||
| 219 | irq_set_chip_data(hw, base); | ||
| 220 | set_irq_flags(hw, IRQF_VALID); | ||
| 221 | |||
| 222 | return 0; | ||
| 223 | } | ||
| 224 | |||
| 225 | static struct irq_domain_ops pxa_irq_ops = { | ||
| 226 | .map = pxa_irq_map, | ||
| 227 | .xlate = irq_domain_xlate_onecell, | ||
| 228 | }; | ||
| 229 | |||
| 230 | static const struct of_device_id intc_ids[] __initconst = { | 232 | static const struct of_device_id intc_ids[] __initconst = { |
| 231 | { .compatible = "marvell,pxa-intc", }, | 233 | { .compatible = "marvell,pxa-intc", }, |
| 232 | {} | 234 | {} |
| @@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
| 236 | { | 238 | { |
| 237 | struct device_node *node; | 239 | struct device_node *node; |
| 238 | struct resource res; | 240 | struct resource res; |
| 239 | int n, ret; | 241 | int ret; |
| 240 | 242 | ||
| 241 | node = of_find_matching_node(NULL, intc_ids); | 243 | node = of_find_matching_node(NULL, intc_ids); |
| 242 | if (!node) { | 244 | if (!node) { |
| @@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
| 267 | return; | 269 | return; |
| 268 | } | 270 | } |
| 269 | 271 | ||
| 270 | pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, | 272 | pxa_init_irq_common(node, pxa_internal_irq_nr, fn); |
| 271 | &pxa_irq_ops, NULL); | ||
| 272 | if (!pxa_irq_domain) | ||
| 273 | panic("Unable to add PXA IRQ domain\n"); | ||
| 274 | |||
| 275 | irq_set_default_host(pxa_irq_domain); | ||
| 276 | |||
| 277 | for (n = 0; n < pxa_internal_irq_nr; n += 32) { | ||
| 278 | void __iomem *base = irq_base(n >> 5); | ||
| 279 | |||
| 280 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | ||
| 281 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | ||
| 282 | } | ||
| 283 | |||
| 284 | /* only unmasked interrupts kick us out of idle */ | ||
| 285 | __raw_writel(1, irq_base(0) + ICCR); | ||
| 286 | |||
| 287 | pxa_internal_irq_chip.irq_set_wake = fn; | ||
| 288 | } | 273 | } |
| 289 | #endif /* CONFIG_OF */ | 274 | #endif /* CONFIG_OF */ |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 205f9bf3821e..ac2ae5c71ab4 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
| @@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = { | |||
| 412 | }; | 412 | }; |
| 413 | 413 | ||
| 414 | static struct platform_device can_regulator_device = { | 414 | static struct platform_device can_regulator_device = { |
| 415 | .name = "reg-fixed-volage", | 415 | .name = "reg-fixed-voltage", |
| 416 | .id = 0, | 416 | .id = 0, |
| 417 | .dev = { | 417 | .dev = { |
| 418 | .platform_data = &can_regulator_pdata, | 418 | .platform_data = &can_regulator_pdata, |
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index a77604fbaf25..81502b90dd91 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig | |||
| @@ -1,10 +1,12 @@ | |||
| 1 | menuconfig ARCH_SUNXI | 1 | menuconfig ARCH_SUNXI |
| 2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 | 2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 |
| 3 | select ARCH_REQUIRE_GPIOLIB | 3 | select ARCH_REQUIRE_GPIOLIB |
| 4 | select ARCH_HAS_RESET_CONTROLLER | ||
| 4 | select CLKSRC_MMIO | 5 | select CLKSRC_MMIO |
| 5 | select GENERIC_IRQ_CHIP | 6 | select GENERIC_IRQ_CHIP |
| 6 | select PINCTRL | 7 | select PINCTRL |
| 7 | select SUN4I_TIMER | 8 | select SUN4I_TIMER |
| 9 | select RESET_CONTROLLER | ||
| 8 | 10 | ||
| 9 | if ARCH_SUNXI | 11 | if ARCH_SUNXI |
| 10 | 12 | ||
| @@ -20,10 +22,8 @@ config MACH_SUN5I | |||
| 20 | config MACH_SUN6I | 22 | config MACH_SUN6I |
| 21 | bool "Allwinner A31 (sun6i) SoCs support" | 23 | bool "Allwinner A31 (sun6i) SoCs support" |
| 22 | default ARCH_SUNXI | 24 | default ARCH_SUNXI |
| 23 | select ARCH_HAS_RESET_CONTROLLER | ||
| 24 | select ARM_GIC | 25 | select ARM_GIC |
| 25 | select MFD_SUN6I_PRCM | 26 | select MFD_SUN6I_PRCM |
| 26 | select RESET_CONTROLLER | ||
| 27 | select SUN5I_HSTIMER | 27 | select SUN5I_HSTIMER |
| 28 | 28 | ||
| 29 | config MACH_SUN7I | 29 | config MACH_SUN7I |
| @@ -37,16 +37,12 @@ config MACH_SUN7I | |||
| 37 | config MACH_SUN8I | 37 | config MACH_SUN8I |
| 38 | bool "Allwinner A23 (sun8i) SoCs support" | 38 | bool "Allwinner A23 (sun8i) SoCs support" |
| 39 | default ARCH_SUNXI | 39 | default ARCH_SUNXI |
| 40 | select ARCH_HAS_RESET_CONTROLLER | ||
| 41 | select ARM_GIC | 40 | select ARM_GIC |
| 42 | select MFD_SUN6I_PRCM | 41 | select MFD_SUN6I_PRCM |
| 43 | select RESET_CONTROLLER | ||
| 44 | 42 | ||
| 45 | config MACH_SUN9I | 43 | config MACH_SUN9I |
| 46 | bool "Allwinner (sun9i) SoCs support" | 44 | bool "Allwinner (sun9i) SoCs support" |
| 47 | default ARCH_SUNXI | 45 | default ARCH_SUNXI |
| 48 | select ARCH_HAS_RESET_CONTROLLER | ||
| 49 | select ARM_GIC | 46 | select ARM_GIC |
| 50 | select RESET_CONTROLLER | ||
| 51 | 47 | ||
| 52 | endif | 48 | endif |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index db10169a08de..8ca94d379bc3 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
| @@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
| 799 | struct device *dev = &pdev->dev; | 799 | struct device *dev = &pdev->dev; |
| 800 | const struct of_device_id *match; | 800 | const struct of_device_id *match; |
| 801 | const struct dmtimer_platform_data *pdata; | 801 | const struct dmtimer_platform_data *pdata; |
| 802 | int ret; | ||
| 802 | 803 | ||
| 803 | match = of_match_device(of_match_ptr(omap_timer_match), dev); | 804 | match = of_match_device(of_match_ptr(omap_timer_match), dev); |
| 804 | pdata = match ? match->data : dev->platform_data; | 805 | pdata = match ? match->data : dev->platform_data; |
| @@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
| 860 | } | 861 | } |
| 861 | 862 | ||
| 862 | if (!timer->reserved) { | 863 | if (!timer->reserved) { |
| 863 | pm_runtime_get_sync(dev); | 864 | ret = pm_runtime_get_sync(dev); |
| 865 | if (ret < 0) { | ||
| 866 | dev_err(dev, "%s: pm_runtime_get_sync failed!\n", | ||
| 867 | __func__); | ||
| 868 | goto err_get_sync; | ||
| 869 | } | ||
| 864 | __omap_dm_timer_init_regs(timer); | 870 | __omap_dm_timer_init_regs(timer); |
| 865 | pm_runtime_put(dev); | 871 | pm_runtime_put(dev); |
| 866 | } | 872 | } |
| @@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
| 873 | dev_dbg(dev, "Device Probed.\n"); | 879 | dev_dbg(dev, "Device Probed.\n"); |
| 874 | 880 | ||
| 875 | return 0; | 881 | return 0; |
| 882 | |||
| 883 | err_get_sync: | ||
| 884 | pm_runtime_put_noidle(dev); | ||
| 885 | pm_runtime_disable(dev); | ||
| 886 | return ret; | ||
| 876 | } | 887 | } |
| 877 | 888 | ||
| 878 | /** | 889 | /** |
| @@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev) | |||
| 899 | } | 910 | } |
| 900 | spin_unlock_irqrestore(&dm_timer_lock, flags); | 911 | spin_unlock_irqrestore(&dm_timer_lock, flags); |
| 901 | 912 | ||
| 913 | pm_runtime_disable(&pdev->dev); | ||
| 914 | |||
| 902 | return ret; | 915 | return ret; |
| 903 | } | 916 | } |
| 904 | 917 | ||
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | /* SoC fixed clocks */ | 10 | /* SoC fixed clocks */ |
| 11 | soc_uartclk: refclk72738khz { | 11 | soc_uartclk: refclk7273800hz { |
| 12 | compatible = "fixed-clock"; | 12 | compatible = "fixed-clock"; |
| 13 | #clock-cells = <0>; | 13 | #clock-cells = <0>; |
| 14 | clock-frequency = <7273800>; | 14 | clock-frequency = <7273800>; |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index cb9593079f29..d8c25b7b18fb 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
| @@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |||
| 246 | __ret; \ | 246 | __ret; \ |
| 247 | }) | 247 | }) |
| 248 | 248 | ||
| 249 | #define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 249 | #define _protect_cmpxchg_local(pcp, o, n) \ |
| 250 | #define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 250 | ({ \ |
| 251 | #define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 251 | typeof(*raw_cpu_ptr(&(pcp))) __ret; \ |
| 252 | #define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 252 | preempt_disable(); \ |
| 253 | 253 | __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ | |
| 254 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | 254 | preempt_enable(); \ |
| 255 | cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ | 255 | __ret; \ |
| 256 | o1, o2, n1, n2) | 256 | }) |
| 257 | |||
| 258 | #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
| 259 | #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
| 260 | #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
| 261 | #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
| 262 | |||
| 263 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | ||
| 264 | ({ \ | ||
| 265 | int __ret; \ | ||
| 266 | preempt_disable(); \ | ||
| 267 | __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ | ||
| 268 | raw_cpu_ptr(&(ptr2)), \ | ||
| 269 | o1, o2, n1, n2); \ | ||
| 270 | preempt_enable(); \ | ||
| 271 | __ret; \ | ||
| 272 | }) | ||
| 257 | 273 | ||
| 258 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) | 274 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
| 259 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | 275 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..101a42bde728 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
| @@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 151 | { | 151 | { |
| 152 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
| 153 | 153 | ||
| 154 | /* | ||
| 155 | * init_mm.pgd does not contain any user mappings and it is always | ||
| 156 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. | ||
| 157 | */ | ||
| 158 | if (next == &init_mm) { | ||
| 159 | cpu_set_reserved_ttbr0(); | ||
| 160 | return; | ||
| 161 | } | ||
| 162 | |||
| 154 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 163 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
| 155 | check_and_switch_context(next, tsk); | 164 | check_and_switch_context(next, tsk); |
| 156 | } | 165 | } |
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 09da25bc596f..4fde8c1df97f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
| @@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, | |||
| 204 | return ret; | 204 | return ret; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | #define _percpu_read(pcp) \ | ||
| 208 | ({ \ | ||
| 209 | typeof(pcp) __retval; \ | ||
| 210 | preempt_disable(); \ | ||
| 211 | __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ | ||
| 212 | sizeof(pcp)); \ | ||
| 213 | preempt_enable(); \ | ||
| 214 | __retval; \ | ||
| 215 | }) | ||
| 216 | |||
| 217 | #define _percpu_write(pcp, val) \ | ||
| 218 | do { \ | ||
| 219 | preempt_disable(); \ | ||
| 220 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ | ||
| 221 | sizeof(pcp)); \ | ||
| 222 | preempt_enable(); \ | ||
| 223 | } while(0) \ | ||
| 224 | |||
| 225 | #define _pcp_protect(operation, pcp, val) \ | ||
| 226 | ({ \ | ||
| 227 | typeof(pcp) __retval; \ | ||
| 228 | preempt_disable(); \ | ||
| 229 | __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ | ||
| 230 | (val), sizeof(pcp)); \ | ||
| 231 | preempt_enable(); \ | ||
| 232 | __retval; \ | ||
| 233 | }) | ||
| 234 | |||
| 207 | #define _percpu_add(pcp, val) \ | 235 | #define _percpu_add(pcp, val) \ |
| 208 | __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 236 | _pcp_protect(__percpu_add, pcp, val) |
| 209 | 237 | ||
| 210 | #define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) | 238 | #define _percpu_add_return(pcp, val) _percpu_add(pcp, val) |
| 211 | 239 | ||
| 212 | #define _percpu_and(pcp, val) \ | 240 | #define _percpu_and(pcp, val) \ |
| 213 | __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 241 | _pcp_protect(__percpu_and, pcp, val) |
| 214 | 242 | ||
| 215 | #define _percpu_or(pcp, val) \ | 243 | #define _percpu_or(pcp, val) \ |
| 216 | __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 244 | _pcp_protect(__percpu_or, pcp, val) |
| 217 | |||
| 218 | #define _percpu_read(pcp) (typeof(pcp)) \ | ||
| 219 | (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) | ||
| 220 | |||
| 221 | #define _percpu_write(pcp, val) \ | ||
| 222 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) | ||
| 223 | 245 | ||
| 224 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ | 246 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ |
| 225 | (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) | 247 | _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) |
| 226 | 248 | ||
| 227 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) | 249 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) |
| 228 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) | 250 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) |
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h index 9359e5048442..d5779b0ec573 100644 --- a/arch/metag/include/asm/io.h +++ b/arch/metag/include/asm/io.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _ASM_METAG_IO_H | 2 | #define _ASM_METAG_IO_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <asm/pgtable-bits.h> | ||
| 5 | 6 | ||
| 6 | #define IO_SPACE_LIMIT 0 | 7 | #define IO_SPACE_LIMIT 0 |
| 7 | 8 | ||
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h new file mode 100644 index 000000000000..25ba6729f496 --- /dev/null +++ b/arch/metag/include/asm/pgtable-bits.h | |||
| @@ -0,0 +1,104 @@ | |||
| 1 | /* | ||
| 2 | * Meta page table definitions. | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _METAG_PGTABLE_BITS_H | ||
| 6 | #define _METAG_PGTABLE_BITS_H | ||
| 7 | |||
| 8 | #include <asm/metag_mem.h> | ||
| 9 | |||
| 10 | /* | ||
| 11 | * Definitions for MMU descriptors | ||
| 12 | * | ||
| 13 | * These are the hardware bits in the MMCU pte entries. | ||
| 14 | * Derived from the Meta toolkit headers. | ||
| 15 | */ | ||
| 16 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
| 17 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
| 18 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
| 19 | /* Write combine bit - this can cause writes to occur out of order */ | ||
| 20 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
| 21 | /* Sys coherent bit - this bit is never used by Linux */ | ||
| 22 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
| 23 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
| 24 | #define _PAGE_CACHE_CTRL0 0x040 | ||
| 25 | #define _PAGE_CACHE_CTRL1 0x080 | ||
| 26 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
| 27 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
| 28 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
| 29 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
| 30 | |||
| 31 | /* These are software bits that we stuff into the gaps in the hardware | ||
| 32 | * pte entries that are not used. Note, these DO get stored in the actual | ||
| 33 | * hardware, but the hardware just does not use them. | ||
| 34 | */ | ||
| 35 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
| 36 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
| 37 | |||
| 38 | /* Pages owned, and protected by, the kernel. */ | ||
| 39 | #define _PAGE_KERNEL _PAGE_PRIV | ||
| 40 | |||
| 41 | /* No cacheing of this page */ | ||
| 42 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
| 43 | /* burst cacheing - good for data streaming */ | ||
| 44 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
| 45 | /* One cache way per thread */ | ||
| 46 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
| 47 | /* Full on cacheing */ | ||
| 48 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
| 49 | |||
| 50 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
| 51 | |||
| 52 | /* which bits are used for cache control ... */ | ||
| 53 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
| 54 | _PAGE_WR_COMBINE) | ||
| 55 | |||
| 56 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
| 57 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
| 58 | |||
| 59 | #define _PAGE_SZ_SHIFT 1 | ||
| 60 | #define _PAGE_SZ_4K (0x0) | ||
| 61 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
| 62 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
| 63 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
| 64 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
| 65 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
| 66 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
| 67 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
| 68 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
| 69 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
| 70 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
| 71 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
| 72 | |||
| 73 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
| 74 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
| 75 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
| 76 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
| 77 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
| 78 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
| 79 | #endif | ||
| 80 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
| 81 | |||
| 82 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
| 83 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
| 84 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
| 85 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
| 86 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
| 87 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
| 88 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
| 89 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
| 90 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
| 91 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
| 92 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
| 93 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
| 94 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
| 95 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
| 96 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
| 97 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
| 98 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
| 99 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
| 100 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
| 101 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
| 102 | #endif | ||
| 103 | |||
| 104 | #endif /* _METAG_PGTABLE_BITS_H */ | ||
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index d0604c0a8702..ffa3a3a2ecad 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #ifndef _METAG_PGTABLE_H | 5 | #ifndef _METAG_PGTABLE_H |
| 6 | #define _METAG_PGTABLE_H | 6 | #define _METAG_PGTABLE_H |
| 7 | 7 | ||
| 8 | #include <asm/pgtable-bits.h> | ||
| 8 | #include <asm-generic/pgtable-nopmd.h> | 9 | #include <asm-generic/pgtable-nopmd.h> |
| 9 | 10 | ||
| 10 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ | 11 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ |
| @@ -21,100 +22,6 @@ | |||
| 21 | #endif | 22 | #endif |
| 22 | 23 | ||
| 23 | /* | 24 | /* |
| 24 | * Definitions for MMU descriptors | ||
| 25 | * | ||
| 26 | * These are the hardware bits in the MMCU pte entries. | ||
| 27 | * Derived from the Meta toolkit headers. | ||
| 28 | */ | ||
| 29 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
| 30 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
| 31 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
| 32 | /* Write combine bit - this can cause writes to occur out of order */ | ||
| 33 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
| 34 | /* Sys coherent bit - this bit is never used by Linux */ | ||
| 35 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
| 36 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
| 37 | #define _PAGE_CACHE_CTRL0 0x040 | ||
| 38 | #define _PAGE_CACHE_CTRL1 0x080 | ||
| 39 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
| 40 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
| 41 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
| 42 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
| 43 | |||
| 44 | /* These are software bits that we stuff into the gaps in the hardware | ||
| 45 | * pte entries that are not used. Note, these DO get stored in the actual | ||
| 46 | * hardware, but the hardware just does not use them. | ||
| 47 | */ | ||
| 48 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
| 49 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
| 50 | |||
| 51 | /* Pages owned, and protected by, the kernel. */ | ||
| 52 | #define _PAGE_KERNEL _PAGE_PRIV | ||
| 53 | |||
| 54 | /* No cacheing of this page */ | ||
| 55 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
| 56 | /* burst cacheing - good for data streaming */ | ||
| 57 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
| 58 | /* One cache way per thread */ | ||
| 59 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
| 60 | /* Full on cacheing */ | ||
| 61 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
| 62 | |||
| 63 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
| 64 | |||
| 65 | /* which bits are used for cache control ... */ | ||
| 66 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
| 67 | _PAGE_WR_COMBINE) | ||
| 68 | |||
| 69 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
| 70 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
| 71 | |||
| 72 | #define _PAGE_SZ_SHIFT 1 | ||
| 73 | #define _PAGE_SZ_4K (0x0) | ||
| 74 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
| 75 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
| 76 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
| 77 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
| 78 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
| 79 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
| 80 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
| 81 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
| 82 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
| 83 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
| 84 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
| 85 | |||
| 86 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
| 87 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
| 88 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
| 89 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
| 90 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
| 91 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
| 92 | #endif | ||
| 93 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
| 94 | |||
| 95 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
| 96 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
| 97 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
| 98 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
| 99 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
| 100 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
| 101 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
| 102 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
| 103 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
| 104 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
| 105 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
| 106 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
| 107 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
| 108 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
| 109 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
| 110 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
| 111 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
| 112 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
| 113 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
| 114 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
| 115 | #endif | ||
| 116 | |||
| 117 | /* | ||
| 118 | * The Linux memory management assumes a three-level page table setup. On | 25 | * The Linux memory management assumes a three-level page table setup. On |
| 119 | * Meta, we use that, but "fold" the mid level into the top-level page | 26 | * Meta, we use that, but "fold" the mid level into the top-level page |
| 120 | * table. | 27 | * table. |
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f213f5b4c423..d17437238a2c 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
| @@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
| 26 | 26 | ||
| 27 | if (likely(pgd != NULL)) { | 27 | if (likely(pgd != NULL)) { |
| 28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); | 28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); |
| 29 | #ifdef CONFIG_64BIT | 29 | #if PT_NLEVELS == 3 |
| 30 | actual_pgd += PTRS_PER_PGD; | 30 | actual_pgd += PTRS_PER_PGD; |
| 31 | /* Populate first pmd with allocated memory. We mark it | 31 | /* Populate first pmd with allocated memory. We mark it |
| 32 | * with PxD_FLAG_ATTACHED as a signal to the system that this | 32 | * with PxD_FLAG_ATTACHED as a signal to the system that this |
| @@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
| 45 | 45 | ||
| 46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 47 | { | 47 | { |
| 48 | #ifdef CONFIG_64BIT | 48 | #if PT_NLEVELS == 3 |
| 49 | pgd -= PTRS_PER_PGD; | 49 | pgd -= PTRS_PER_PGD; |
| 50 | #endif | 50 | #endif |
| 51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); | 51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); |
| @@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 72 | 72 | ||
| 73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
| 74 | { | 74 | { |
| 75 | #ifdef CONFIG_64BIT | ||
| 76 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 75 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
| 77 | /* This is the permanent pmd attached to the pgd; | 76 | /* |
| 78 | * cannot free it */ | 77 | * This is the permanent pmd attached to the pgd; |
| 78 | * cannot free it. | ||
| 79 | * Increment the counter to compensate for the decrement | ||
| 80 | * done by generic mm code. | ||
| 81 | */ | ||
| 82 | mm_inc_nr_pmds(mm); | ||
| 79 | return; | 83 | return; |
| 80 | #endif | ||
| 81 | free_pages((unsigned long)pmd, PMD_ORDER); | 84 | free_pages((unsigned long)pmd, PMD_ORDER); |
| 82 | } | 85 | } |
| 83 | 86 | ||
| @@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
| 99 | static inline void | 102 | static inline void |
| 100 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | 103 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
| 101 | { | 104 | { |
| 102 | #ifdef CONFIG_64BIT | 105 | #if PT_NLEVELS == 3 |
| 103 | /* preserve the gateway marker if this is the beginning of | 106 | /* preserve the gateway marker if this is the beginning of |
| 104 | * the permanent pmd */ | 107 | * the permanent pmd */ |
| 105 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 108 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 5a8997d63899..8eefb12d1d33 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
| @@ -55,8 +55,8 @@ | |||
| 55 | #define ENTRY_COMP(_name_) .word sys_##_name_ | 55 | #define ENTRY_COMP(_name_) .word sys_##_name_ |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | ENTRY_SAME(restart_syscall) /* 0 */ | 58 | 90: ENTRY_SAME(restart_syscall) /* 0 */ |
| 59 | ENTRY_SAME(exit) | 59 | 91: ENTRY_SAME(exit) |
| 60 | ENTRY_SAME(fork_wrapper) | 60 | ENTRY_SAME(fork_wrapper) |
| 61 | ENTRY_SAME(read) | 61 | ENTRY_SAME(read) |
| 62 | ENTRY_SAME(write) | 62 | ENTRY_SAME(write) |
| @@ -439,7 +439,10 @@ | |||
| 439 | ENTRY_SAME(bpf) | 439 | ENTRY_SAME(bpf) |
| 440 | ENTRY_COMP(execveat) | 440 | ENTRY_COMP(execveat) |
| 441 | 441 | ||
| 442 | /* Nothing yet */ | 442 | |
| 443 | .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) | ||
| 444 | .error "size of syscall table does not fit value of __NR_Linux_syscalls" | ||
| 445 | .endif | ||
| 443 | 446 | ||
| 444 | #undef ENTRY_SAME | 447 | #undef ENTRY_SAME |
| 445 | #undef ENTRY_DIFF | 448 | #undef ENTRY_DIFF |
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index 2bf8e9307be9..4c8ad592ae33 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h | |||
| @@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) | |||
| 55 | 55 | ||
| 56 | static inline int cpu_nr_cores(void) | 56 | static inline int cpu_nr_cores(void) |
| 57 | { | 57 | { |
| 58 | return NR_CPUS >> threads_shift; | 58 | return nr_cpu_ids >> threads_shift; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static inline cpumask_t cpu_online_cores_map(void) | 61 | static inline cpumask_t cpu_online_cores_map(void) |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 03cd858a401c..4cbe23af400a 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
| @@ -153,6 +153,7 @@ | |||
| 153 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff | 153 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff |
| 154 | #define PPC_INST_MFTMR 0x7c0002dc | 154 | #define PPC_INST_MFTMR 0x7c0002dc |
| 155 | #define PPC_INST_MSGSND 0x7c00019c | 155 | #define PPC_INST_MSGSND 0x7c00019c |
| 156 | #define PPC_INST_MSGCLR 0x7c0001dc | ||
| 156 | #define PPC_INST_MSGSNDP 0x7c00011c | 157 | #define PPC_INST_MSGSNDP 0x7c00011c |
| 157 | #define PPC_INST_MTTMR 0x7c0003dc | 158 | #define PPC_INST_MTTMR 0x7c0003dc |
| 158 | #define PPC_INST_NOP 0x60000000 | 159 | #define PPC_INST_NOP 0x60000000 |
| @@ -309,6 +310,8 @@ | |||
| 309 | ___PPC_RB(b) | __PPC_EH(eh)) | 310 | ___PPC_RB(b) | __PPC_EH(eh)) |
| 310 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ | 311 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ |
| 311 | ___PPC_RB(b)) | 312 | ___PPC_RB(b)) |
| 313 | #define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \ | ||
| 314 | ___PPC_RB(b)) | ||
| 312 | #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ | 315 | #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ |
| 313 | ___PPC_RB(b)) | 316 | ___PPC_RB(b)) |
| 314 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ | 317 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 1c874fb533bb..af56b5c6c81a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
| @@ -608,13 +608,16 @@ | |||
| 608 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ | 608 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ |
| 609 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ | 609 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ |
| 610 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ | 610 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ |
| 611 | #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ | ||
| 611 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ | 612 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ |
| 612 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ | 613 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ |
| 613 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ | 614 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ |
| 614 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ | 615 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ |
| 615 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ | 616 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ |
| 617 | #define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */ | ||
| 616 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ | 618 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ |
| 617 | #define SRR1_WAKERESET 0x00100000 /* System reset */ | 619 | #define SRR1_WAKERESET 0x00100000 /* System reset */ |
| 620 | #define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */ | ||
| 618 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ | 621 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ |
| 619 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, | 622 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, |
| 620 | * may not be recoverable */ | 623 | * may not be recoverable */ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f337666768a7..f83046878336 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
| @@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
| 437 | .machine_check_early = __machine_check_early_realmode_p8, | 437 | .machine_check_early = __machine_check_early_realmode_p8, |
| 438 | .platform = "power8", | 438 | .platform = "power8", |
| 439 | }, | 439 | }, |
| 440 | { /* Power8NVL */ | ||
| 441 | .pvr_mask = 0xffff0000, | ||
| 442 | .pvr_value = 0x004c0000, | ||
| 443 | .cpu_name = "POWER8NVL (raw)", | ||
| 444 | .cpu_features = CPU_FTRS_POWER8, | ||
| 445 | .cpu_user_features = COMMON_USER_POWER8, | ||
| 446 | .cpu_user_features2 = COMMON_USER2_POWER8, | ||
| 447 | .mmu_features = MMU_FTRS_POWER8, | ||
| 448 | .icache_bsize = 128, | ||
| 449 | .dcache_bsize = 128, | ||
| 450 | .num_pmcs = 6, | ||
| 451 | .pmc_type = PPC_PMC_IBM, | ||
| 452 | .oprofile_cpu_type = "ppc64/power8", | ||
| 453 | .oprofile_type = PPC_OPROFILE_INVALID, | ||
| 454 | .cpu_setup = __setup_cpu_power8, | ||
| 455 | .cpu_restore = __restore_cpu_power8, | ||
| 456 | .flush_tlb = __flush_tlb_power8, | ||
| 457 | .machine_check_early = __machine_check_early_realmode_p8, | ||
| 458 | .platform = "power8", | ||
| 459 | }, | ||
| 440 | { /* Power8 DD1: Does not support doorbell IPIs */ | 460 | { /* Power8 DD1: Does not support doorbell IPIs */ |
| 441 | .pvr_mask = 0xffffff00, | 461 | .pvr_mask = 0xffffff00, |
| 442 | .pvr_value = 0x004d0100, | 462 | .pvr_value = 0x004d0100, |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f4217819cc31..2128f3a96c32 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <asm/dbell.h> | 18 | #include <asm/dbell.h> |
| 19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
| 20 | #include <asm/kvm_ppc.h> | ||
| 20 | 21 | ||
| 21 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
| 22 | void doorbell_setup_this_cpu(void) | 23 | void doorbell_setup_this_cpu(void) |
| @@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs) | |||
| 41 | 42 | ||
| 42 | may_hard_irq_enable(); | 43 | may_hard_irq_enable(); |
| 43 | 44 | ||
| 45 | kvmppc_set_host_ipi(smp_processor_id(), 0); | ||
| 44 | __this_cpu_inc(irq_stat.doorbell_irqs); | 46 | __this_cpu_inc(irq_stat.doorbell_irqs); |
| 45 | 47 | ||
| 46 | smp_ipi_demux(); | 48 | smp_ipi_demux(); |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c2df8150bd7a..9519e6bdc6d7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
| @@ -1408,7 +1408,7 @@ machine_check_handle_early: | |||
| 1408 | bne 9f /* continue in V mode if we are. */ | 1408 | bne 9f /* continue in V mode if we are. */ |
| 1409 | 1409 | ||
| 1410 | 5: | 1410 | 5: |
| 1411 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 1411 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 1412 | /* | 1412 | /* |
| 1413 | * We are coming from kernel context. Check if we are coming from | 1413 | * We are coming from kernel context. Check if we are coming from |
| 1414 | * guest. if yes, then we can continue. We will fall through | 1414 | * guest. if yes, then we can continue. We will fall through |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index de4018a1bc4b..de747563d29d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | |||
| 636 | spin_lock(&vcpu->arch.vpa_update_lock); | 636 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | 637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; |
| 638 | if (lppaca) | 638 | if (lppaca) |
| 639 | yield_count = lppaca->yield_count; | 639 | yield_count = be32_to_cpu(lppaca->yield_count); |
| 640 | spin_unlock(&vcpu->arch.vpa_update_lock); | 640 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 641 | return yield_count; | 641 | return yield_count; |
| 642 | } | 642 | } |
| @@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, | |||
| 942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | 942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
| 943 | bool preserve_top32) | 943 | bool preserve_top32) |
| 944 | { | 944 | { |
| 945 | struct kvm *kvm = vcpu->kvm; | ||
| 945 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 946 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 946 | u64 mask; | 947 | u64 mask; |
| 947 | 948 | ||
| 949 | mutex_lock(&kvm->lock); | ||
| 948 | spin_lock(&vc->lock); | 950 | spin_lock(&vc->lock); |
| 949 | /* | 951 | /* |
| 950 | * If ILE (interrupt little-endian) has changed, update the | 952 | * If ILE (interrupt little-endian) has changed, update the |
| 951 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | 953 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. |
| 952 | */ | 954 | */ |
| 953 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | 955 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { |
| 954 | struct kvm *kvm = vcpu->kvm; | ||
| 955 | struct kvm_vcpu *vcpu; | 956 | struct kvm_vcpu *vcpu; |
| 956 | int i; | 957 | int i; |
| 957 | 958 | ||
| 958 | mutex_lock(&kvm->lock); | ||
| 959 | kvm_for_each_vcpu(i, vcpu, kvm) { | 959 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 960 | if (vcpu->arch.vcore != vc) | 960 | if (vcpu->arch.vcore != vc) |
| 961 | continue; | 961 | continue; |
| @@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
| 964 | else | 964 | else |
| 965 | vcpu->arch.intr_msr &= ~MSR_LE; | 965 | vcpu->arch.intr_msr &= ~MSR_LE; |
| 966 | } | 966 | } |
| 967 | mutex_unlock(&kvm->lock); | ||
| 968 | } | 967 | } |
| 969 | 968 | ||
| 970 | /* | 969 | /* |
| @@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
| 981 | mask &= 0xFFFFFFFF; | 980 | mask &= 0xFFFFFFFF; |
| 982 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | 981 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
| 983 | spin_unlock(&vc->lock); | 982 | spin_unlock(&vc->lock); |
| 983 | mutex_unlock(&kvm->lock); | ||
| 984 | } | 984 | } |
| 985 | 985 | ||
| 986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | 986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bb94e6f20c81..6cbf1630cb70 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
| 1005 | /* Save HEIR (HV emulation assist reg) in emul_inst | 1005 | /* Save HEIR (HV emulation assist reg) in emul_inst |
| 1006 | if this is an HEI (HV emulation interrupt, e40) */ | 1006 | if this is an HEI (HV emulation interrupt, e40) */ |
| 1007 | li r3,KVM_INST_FETCH_FAILED | 1007 | li r3,KVM_INST_FETCH_FAILED |
| 1008 | stw r3,VCPU_LAST_INST(r9) | ||
| 1008 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | 1009 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
| 1009 | bne 11f | 1010 | bne 11f |
| 1010 | mfspr r3,SPRN_HEIR | 1011 | mfspr r3,SPRN_HEIR |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index fc34025ef822..38a45088f633 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <asm/runlatch.h> | 33 | #include <asm/runlatch.h> |
| 34 | #include <asm/code-patching.h> | 34 | #include <asm/code-patching.h> |
| 35 | #include <asm/dbell.h> | 35 | #include <asm/dbell.h> |
| 36 | #include <asm/kvm_ppc.h> | ||
| 37 | #include <asm/ppc-opcode.h> | ||
| 36 | 38 | ||
| 37 | #include "powernv.h" | 39 | #include "powernv.h" |
| 38 | 40 | ||
| @@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void) | |||
| 149 | static void pnv_smp_cpu_kill_self(void) | 151 | static void pnv_smp_cpu_kill_self(void) |
| 150 | { | 152 | { |
| 151 | unsigned int cpu; | 153 | unsigned int cpu; |
| 152 | unsigned long srr1; | 154 | unsigned long srr1, wmask; |
| 153 | u32 idle_states; | 155 | u32 idle_states; |
| 154 | 156 | ||
| 155 | /* Standard hot unplug procedure */ | 157 | /* Standard hot unplug procedure */ |
| @@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void) | |||
| 161 | generic_set_cpu_dead(cpu); | 163 | generic_set_cpu_dead(cpu); |
| 162 | smp_wmb(); | 164 | smp_wmb(); |
| 163 | 165 | ||
| 166 | wmask = SRR1_WAKEMASK; | ||
| 167 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
| 168 | wmask = SRR1_WAKEMASK_P8; | ||
| 169 | |||
| 164 | idle_states = pnv_get_supported_cpuidle_states(); | 170 | idle_states = pnv_get_supported_cpuidle_states(); |
| 165 | /* We don't want to take decrementer interrupts while we are offline, | 171 | /* We don't want to take decrementer interrupts while we are offline, |
| 166 | * so clear LPCR:PECE1. We keep PECE2 enabled. | 172 | * so clear LPCR:PECE1. We keep PECE2 enabled. |
| @@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void) | |||
| 191 | * having finished executing in a KVM guest, then srr1 | 197 | * having finished executing in a KVM guest, then srr1 |
| 192 | * contains 0. | 198 | * contains 0. |
| 193 | */ | 199 | */ |
| 194 | if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { | 200 | if ((srr1 & wmask) == SRR1_WAKEEE) { |
| 195 | icp_native_flush_interrupt(); | 201 | icp_native_flush_interrupt(); |
| 196 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; | 202 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; |
| 197 | smp_mb(); | 203 | smp_mb(); |
| 204 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { | ||
| 205 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); | ||
| 206 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); | ||
| 207 | kvmppc_set_host_ipi(cpu, 0); | ||
| 198 | } | 208 | } |
| 199 | 209 | ||
| 200 | if (cpu_core_split_required()) | 210 | if (cpu_core_split_required()) |
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 90cf3dcbd9f2..8f35d525cede 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c | |||
| @@ -25,10 +25,10 @@ | |||
| 25 | static struct kobject *mobility_kobj; | 25 | static struct kobject *mobility_kobj; |
| 26 | 26 | ||
| 27 | struct update_props_workarea { | 27 | struct update_props_workarea { |
| 28 | u32 phandle; | 28 | __be32 phandle; |
| 29 | u32 state; | 29 | __be32 state; |
| 30 | u64 reserved; | 30 | __be64 reserved; |
| 31 | u32 nprops; | 31 | __be32 nprops; |
| 32 | } __packed; | 32 | } __packed; |
| 33 | 33 | ||
| 34 | #define NODE_ACTION_MASK 0xff000000 | 34 | #define NODE_ACTION_MASK 0xff000000 |
| @@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) | |||
| 54 | return rc; | 54 | return rc; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static int delete_dt_node(u32 phandle) | 57 | static int delete_dt_node(__be32 phandle) |
| 58 | { | 58 | { |
| 59 | struct device_node *dn; | 59 | struct device_node *dn; |
| 60 | 60 | ||
| 61 | dn = of_find_node_by_phandle(phandle); | 61 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); |
| 62 | if (!dn) | 62 | if (!dn) |
| 63 | return -ENOENT; | 63 | return -ENOENT; |
| 64 | 64 | ||
| @@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, | |||
| 127 | return 0; | 127 | return 0; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | static int update_dt_node(u32 phandle, s32 scope) | 130 | static int update_dt_node(__be32 phandle, s32 scope) |
| 131 | { | 131 | { |
| 132 | struct update_props_workarea *upwa; | 132 | struct update_props_workarea *upwa; |
| 133 | struct device_node *dn; | 133 | struct device_node *dn; |
| @@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
| 136 | char *prop_data; | 136 | char *prop_data; |
| 137 | char *rtas_buf; | 137 | char *rtas_buf; |
| 138 | int update_properties_token; | 138 | int update_properties_token; |
| 139 | u32 nprops; | ||
| 139 | u32 vd; | 140 | u32 vd; |
| 140 | 141 | ||
| 141 | update_properties_token = rtas_token("ibm,update-properties"); | 142 | update_properties_token = rtas_token("ibm,update-properties"); |
| @@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
| 146 | if (!rtas_buf) | 147 | if (!rtas_buf) |
| 147 | return -ENOMEM; | 148 | return -ENOMEM; |
| 148 | 149 | ||
| 149 | dn = of_find_node_by_phandle(phandle); | 150 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); |
| 150 | if (!dn) { | 151 | if (!dn) { |
| 151 | kfree(rtas_buf); | 152 | kfree(rtas_buf); |
| 152 | return -ENOENT; | 153 | return -ENOENT; |
| @@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
| 162 | break; | 163 | break; |
| 163 | 164 | ||
| 164 | prop_data = rtas_buf + sizeof(*upwa); | 165 | prop_data = rtas_buf + sizeof(*upwa); |
| 166 | nprops = be32_to_cpu(upwa->nprops); | ||
| 165 | 167 | ||
| 166 | /* On the first call to ibm,update-properties for a node the | 168 | /* On the first call to ibm,update-properties for a node the |
| 167 | * the first property value descriptor contains an empty | 169 | * the first property value descriptor contains an empty |
| @@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
| 170 | */ | 172 | */ |
| 171 | if (*prop_data == 0) { | 173 | if (*prop_data == 0) { |
| 172 | prop_data++; | 174 | prop_data++; |
| 173 | vd = *(u32 *)prop_data; | 175 | vd = be32_to_cpu(*(__be32 *)prop_data); |
| 174 | prop_data += vd + sizeof(vd); | 176 | prop_data += vd + sizeof(vd); |
| 175 | upwa->nprops--; | 177 | nprops--; |
| 176 | } | 178 | } |
| 177 | 179 | ||
| 178 | for (i = 0; i < upwa->nprops; i++) { | 180 | for (i = 0; i < nprops; i++) { |
| 179 | char *prop_name; | 181 | char *prop_name; |
| 180 | 182 | ||
| 181 | prop_name = prop_data; | 183 | prop_name = prop_data; |
| 182 | prop_data += strlen(prop_name) + 1; | 184 | prop_data += strlen(prop_name) + 1; |
| 183 | vd = *(u32 *)prop_data; | 185 | vd = be32_to_cpu(*(__be32 *)prop_data); |
| 184 | prop_data += sizeof(vd); | 186 | prop_data += sizeof(vd); |
| 185 | 187 | ||
| 186 | switch (vd) { | 188 | switch (vd) { |
| @@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
| 212 | return 0; | 214 | return 0; |
| 213 | } | 215 | } |
| 214 | 216 | ||
| 215 | static int add_dt_node(u32 parent_phandle, u32 drc_index) | 217 | static int add_dt_node(__be32 parent_phandle, __be32 drc_index) |
| 216 | { | 218 | { |
| 217 | struct device_node *dn; | 219 | struct device_node *dn; |
| 218 | struct device_node *parent_dn; | 220 | struct device_node *parent_dn; |
| 219 | int rc; | 221 | int rc; |
| 220 | 222 | ||
| 221 | parent_dn = of_find_node_by_phandle(parent_phandle); | 223 | parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); |
| 222 | if (!parent_dn) | 224 | if (!parent_dn) |
| 223 | return -ENOENT; | 225 | return -ENOENT; |
| 224 | 226 | ||
| @@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index) | |||
| 237 | int pseries_devicetree_update(s32 scope) | 239 | int pseries_devicetree_update(s32 scope) |
| 238 | { | 240 | { |
| 239 | char *rtas_buf; | 241 | char *rtas_buf; |
| 240 | u32 *data; | 242 | __be32 *data; |
| 241 | int update_nodes_token; | 243 | int update_nodes_token; |
| 242 | int rc; | 244 | int rc; |
| 243 | 245 | ||
| @@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope) | |||
| 254 | if (rc && rc != 1) | 256 | if (rc && rc != 1) |
| 255 | break; | 257 | break; |
| 256 | 258 | ||
| 257 | data = (u32 *)rtas_buf + 4; | 259 | data = (__be32 *)rtas_buf + 4; |
| 258 | while (*data & NODE_ACTION_MASK) { | 260 | while (be32_to_cpu(*data) & NODE_ACTION_MASK) { |
| 259 | int i; | 261 | int i; |
| 260 | u32 action = *data & NODE_ACTION_MASK; | 262 | u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK; |
| 261 | int node_count = *data & NODE_COUNT_MASK; | 263 | u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; |
| 262 | 264 | ||
| 263 | data++; | 265 | data++; |
| 264 | 266 | ||
| 265 | for (i = 0; i < node_count; i++) { | 267 | for (i = 0; i < node_count; i++) { |
| 266 | u32 phandle = *data++; | 268 | __be32 phandle = *data++; |
| 267 | u32 drc_index; | 269 | __be32 drc_index; |
| 268 | 270 | ||
| 269 | switch (action) { | 271 | switch (action) { |
| 270 | case DELETE_DT_NODE: | 272 | case DELETE_DT_NODE: |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index c9df40b5c0ac..c9c875d9ed31 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
| @@ -211,7 +211,7 @@ do { \ | |||
| 211 | 211 | ||
| 212 | extern unsigned long mmap_rnd_mask; | 212 | extern unsigned long mmap_rnd_mask; |
| 213 | 213 | ||
| 214 | #define STACK_RND_MASK (mmap_rnd_mask) | 214 | #define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) |
| 215 | 215 | ||
| 216 | #define ARCH_DLINFO \ | 216 | #define ARCH_DLINFO \ |
| 217 | do { \ | 217 | do { \ |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 82c19899574f..6c79f1b44fe7 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
| @@ -57,6 +57,44 @@ | |||
| 57 | 57 | ||
| 58 | unsigned long ftrace_plt; | 58 | unsigned long ftrace_plt; |
| 59 | 59 | ||
| 60 | static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) | ||
| 61 | { | ||
| 62 | #ifdef CC_USING_HOTPATCH | ||
| 63 | /* brcl 0,0 */ | ||
| 64 | insn->opc = 0xc004; | ||
| 65 | insn->disp = 0; | ||
| 66 | #else | ||
| 67 | /* stg r14,8(r15) */ | ||
| 68 | insn->opc = 0xe3e0; | ||
| 69 | insn->disp = 0xf0080024; | ||
| 70 | #endif | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn) | ||
| 74 | { | ||
| 75 | #ifdef CONFIG_KPROBES | ||
| 76 | if (insn->opc == BREAKPOINT_INSTRUCTION) | ||
| 77 | return 1; | ||
| 78 | #endif | ||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) | ||
| 83 | { | ||
| 84 | #ifdef CONFIG_KPROBES | ||
| 85 | insn->opc = BREAKPOINT_INSTRUCTION; | ||
| 86 | insn->disp = KPROBE_ON_FTRACE_NOP; | ||
| 87 | #endif | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn) | ||
| 91 | { | ||
| 92 | #ifdef CONFIG_KPROBES | ||
| 93 | insn->opc = BREAKPOINT_INSTRUCTION; | ||
| 94 | insn->disp = KPROBE_ON_FTRACE_CALL; | ||
| 95 | #endif | ||
| 96 | } | ||
| 97 | |||
| 60 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | 98 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
| 61 | unsigned long addr) | 99 | unsigned long addr) |
| 62 | { | 100 | { |
| @@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
| 72 | return -EFAULT; | 110 | return -EFAULT; |
| 73 | if (addr == MCOUNT_ADDR) { | 111 | if (addr == MCOUNT_ADDR) { |
| 74 | /* Initial code replacement */ | 112 | /* Initial code replacement */ |
| 75 | #ifdef CC_USING_HOTPATCH | 113 | ftrace_generate_orig_insn(&orig); |
| 76 | /* We expect to see brcl 0,0 */ | ||
| 77 | ftrace_generate_nop_insn(&orig); | ||
| 78 | #else | ||
| 79 | /* We expect to see stg r14,8(r15) */ | ||
| 80 | orig.opc = 0xe3e0; | ||
| 81 | orig.disp = 0xf0080024; | ||
| 82 | #endif | ||
| 83 | ftrace_generate_nop_insn(&new); | 114 | ftrace_generate_nop_insn(&new); |
| 84 | } else if (old.opc == BREAKPOINT_INSTRUCTION) { | 115 | } else if (is_kprobe_on_ftrace(&old)) { |
| 85 | /* | 116 | /* |
| 86 | * If we find a breakpoint instruction, a kprobe has been | 117 | * If we find a breakpoint instruction, a kprobe has been |
| 87 | * placed at the beginning of the function. We write the | 118 | * placed at the beginning of the function. We write the |
| @@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
| 89 | * bytes of the original instruction so that the kprobes | 120 | * bytes of the original instruction so that the kprobes |
| 90 | * handler can execute a nop, if it reaches this breakpoint. | 121 | * handler can execute a nop, if it reaches this breakpoint. |
| 91 | */ | 122 | */ |
| 92 | new.opc = orig.opc = BREAKPOINT_INSTRUCTION; | 123 | ftrace_generate_kprobe_call_insn(&orig); |
| 93 | orig.disp = KPROBE_ON_FTRACE_CALL; | 124 | ftrace_generate_kprobe_nop_insn(&new); |
| 94 | new.disp = KPROBE_ON_FTRACE_NOP; | ||
| 95 | } else { | 125 | } else { |
| 96 | /* Replace ftrace call with a nop. */ | 126 | /* Replace ftrace call with a nop. */ |
| 97 | ftrace_generate_call_insn(&orig, rec->ip); | 127 | ftrace_generate_call_insn(&orig, rec->ip); |
| @@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
| 111 | 141 | ||
| 112 | if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) | 142 | if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) |
| 113 | return -EFAULT; | 143 | return -EFAULT; |
| 114 | if (old.opc == BREAKPOINT_INSTRUCTION) { | 144 | if (is_kprobe_on_ftrace(&old)) { |
| 115 | /* | 145 | /* |
| 116 | * If we find a breakpoint instruction, a kprobe has been | 146 | * If we find a breakpoint instruction, a kprobe has been |
| 117 | * placed at the beginning of the function. We write the | 147 | * placed at the beginning of the function. We write the |
| @@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
| 119 | * bytes of the original instruction so that the kprobes | 149 | * bytes of the original instruction so that the kprobes |
| 120 | * handler can execute a brasl if it reaches this breakpoint. | 150 | * handler can execute a brasl if it reaches this breakpoint. |
| 121 | */ | 151 | */ |
| 122 | new.opc = orig.opc = BREAKPOINT_INSTRUCTION; | 152 | ftrace_generate_kprobe_nop_insn(&orig); |
| 123 | orig.disp = KPROBE_ON_FTRACE_NOP; | 153 | ftrace_generate_kprobe_call_insn(&new); |
| 124 | new.disp = KPROBE_ON_FTRACE_CALL; | ||
| 125 | } else { | 154 | } else { |
| 126 | /* Replace nop with an ftrace call. */ | 155 | /* Replace nop with an ftrace call. */ |
| 127 | ftrace_generate_nop_insn(&orig); | 156 | ftrace_generate_nop_insn(&orig); |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c3f8d157cb0d..e6a1578fc000 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
| @@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); | |||
| 1415 | 1415 | ||
| 1416 | static struct attribute *cpumsf_pmu_events_attr[] = { | 1416 | static struct attribute *cpumsf_pmu_events_attr[] = { |
| 1417 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), | 1417 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), |
| 1418 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), | 1418 | NULL, |
| 1419 | NULL, | 1419 | NULL, |
| 1420 | }; | 1420 | }; |
| 1421 | 1421 | ||
| @@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void) | |||
| 1606 | return -EINVAL; | 1606 | return -EINVAL; |
| 1607 | } | 1607 | } |
| 1608 | 1608 | ||
| 1609 | if (si.ad) | 1609 | if (si.ad) { |
| 1610 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); | 1610 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); |
| 1611 | cpumsf_pmu_events_attr[1] = | ||
| 1612 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); | ||
| 1613 | } | ||
| 1611 | 1614 | ||
| 1612 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); | 1615 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); |
| 1613 | if (!sfdbg) | 1616 | if (!sfdbg) |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 6b09fdffbd2f..ca6294645dd3 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
| @@ -177,6 +177,17 @@ restart_entry: | |||
| 177 | lhi %r1,1 | 177 | lhi %r1,1 |
| 178 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | 178 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE |
| 179 | sam64 | 179 | sam64 |
| 180 | #ifdef CONFIG_SMP | ||
| 181 | larl %r1,smp_cpu_mt_shift | ||
| 182 | icm %r1,15,0(%r1) | ||
| 183 | jz smt_done | ||
| 184 | llgfr %r1,%r1 | ||
| 185 | smt_loop: | ||
| 186 | sigp %r1,%r0,SIGP_SET_MULTI_THREADING | ||
| 187 | brc 8,smt_done /* accepted */ | ||
| 188 | brc 2,smt_loop /* busy, try again */ | ||
| 189 | smt_done: | ||
| 190 | #endif | ||
| 180 | larl %r1,.Lnew_pgm_check_psw | 191 | larl %r1,.Lnew_pgm_check_psw |
| 181 | lpswe 0(%r1) | 192 | lpswe 0(%r1) |
| 182 | pgm_check_entry: | 193 | pgm_check_entry: |
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 4f6725ff4c33..f5b6537306f0 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h | |||
| @@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
| 2957 | unsigned long reg_val); | 2957 | unsigned long reg_val); |
| 2958 | #endif | 2958 | #endif |
| 2959 | 2959 | ||
| 2960 | |||
| 2961 | #define HV_FAST_M7_GET_PERFREG 0x43 | ||
| 2962 | #define HV_FAST_M7_SET_PERFREG 0x44 | ||
| 2963 | |||
| 2964 | #ifndef __ASSEMBLY__ | ||
| 2965 | unsigned long sun4v_m7_get_perfreg(unsigned long reg_num, | ||
| 2966 | unsigned long *reg_val); | ||
| 2967 | unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, | ||
| 2968 | unsigned long reg_val); | ||
| 2969 | #endif | ||
| 2970 | |||
| 2960 | /* Function numbers for HV_CORE_TRAP. */ | 2971 | /* Function numbers for HV_CORE_TRAP. */ |
| 2961 | #define HV_CORE_SET_VER 0x00 | 2972 | #define HV_CORE_SET_VER 0x00 |
| 2962 | #define HV_CORE_PUTCHAR 0x01 | 2973 | #define HV_CORE_PUTCHAR 0x01 |
| @@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
| 2981 | #define HV_GRP_SDIO 0x0108 | 2992 | #define HV_GRP_SDIO 0x0108 |
| 2982 | #define HV_GRP_SDIO_ERR 0x0109 | 2993 | #define HV_GRP_SDIO_ERR 0x0109 |
| 2983 | #define HV_GRP_REBOOT_DATA 0x0110 | 2994 | #define HV_GRP_REBOOT_DATA 0x0110 |
| 2995 | #define HV_GRP_M7_PERF 0x0114 | ||
| 2984 | #define HV_GRP_NIAG_PERF 0x0200 | 2996 | #define HV_GRP_NIAG_PERF 0x0200 |
| 2985 | #define HV_GRP_FIRE_PERF 0x0201 | 2997 | #define HV_GRP_FIRE_PERF 0x0201 |
| 2986 | #define HV_GRP_N2_CPU 0x0202 | 2998 | #define HV_GRP_N2_CPU 0x0202 |
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 5c55145bfbf0..662500fa555f 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c | |||
| @@ -48,6 +48,7 @@ static struct api_info api_table[] = { | |||
| 48 | { .group = HV_GRP_VT_CPU, }, | 48 | { .group = HV_GRP_VT_CPU, }, |
| 49 | { .group = HV_GRP_T5_CPU, }, | 49 | { .group = HV_GRP_T5_CPU, }, |
| 50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, | 50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, |
| 51 | { .group = HV_GRP_M7_PERF, }, | ||
| 51 | }; | 52 | }; |
| 52 | 53 | ||
| 53 | static DEFINE_SPINLOCK(hvapi_lock); | 54 | static DEFINE_SPINLOCK(hvapi_lock); |
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index caedf8320416..afbaba52d2f1 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S | |||
| @@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg) | |||
| 837 | retl | 837 | retl |
| 838 | nop | 838 | nop |
| 839 | ENDPROC(sun4v_t5_set_perfreg) | 839 | ENDPROC(sun4v_t5_set_perfreg) |
| 840 | |||
| 841 | ENTRY(sun4v_m7_get_perfreg) | ||
| 842 | mov %o1, %o4 | ||
| 843 | mov HV_FAST_M7_GET_PERFREG, %o5 | ||
| 844 | ta HV_FAST_TRAP | ||
| 845 | stx %o1, [%o4] | ||
| 846 | retl | ||
| 847 | nop | ||
| 848 | ENDPROC(sun4v_m7_get_perfreg) | ||
| 849 | |||
| 850 | ENTRY(sun4v_m7_set_perfreg) | ||
| 851 | mov HV_FAST_M7_SET_PERFREG, %o5 | ||
| 852 | ta HV_FAST_TRAP | ||
| 853 | retl | ||
| 854 | nop | ||
| 855 | ENDPROC(sun4v_m7_set_perfreg) | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7e967c8018c8..eb978c77c76a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
| @@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = { | |||
| 217 | .pcr_nmi_disable = PCR_N4_PICNPT, | 217 | .pcr_nmi_disable = PCR_N4_PICNPT, |
| 218 | }; | 218 | }; |
| 219 | 219 | ||
| 220 | static u64 m7_pcr_read(unsigned long reg_num) | ||
| 221 | { | ||
| 222 | unsigned long val; | ||
| 223 | |||
| 224 | (void) sun4v_m7_get_perfreg(reg_num, &val); | ||
| 225 | |||
| 226 | return val; | ||
| 227 | } | ||
| 228 | |||
| 229 | static void m7_pcr_write(unsigned long reg_num, u64 val) | ||
| 230 | { | ||
| 231 | (void) sun4v_m7_set_perfreg(reg_num, val); | ||
| 232 | } | ||
| 233 | |||
| 234 | static const struct pcr_ops m7_pcr_ops = { | ||
| 235 | .read_pcr = m7_pcr_read, | ||
| 236 | .write_pcr = m7_pcr_write, | ||
| 237 | .read_pic = n4_pic_read, | ||
| 238 | .write_pic = n4_pic_write, | ||
| 239 | .nmi_picl_value = n4_picl_value, | ||
| 240 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | | ||
| 241 | PCR_N4_UTRACE | PCR_N4_TOE | | ||
| 242 | (26 << PCR_N4_SL_SHIFT)), | ||
| 243 | .pcr_nmi_disable = PCR_N4_PICNPT, | ||
| 244 | }; | ||
| 220 | 245 | ||
| 221 | static unsigned long perf_hsvc_group; | 246 | static unsigned long perf_hsvc_group; |
| 222 | static unsigned long perf_hsvc_major; | 247 | static unsigned long perf_hsvc_major; |
| @@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void) | |||
| 248 | perf_hsvc_group = HV_GRP_T5_CPU; | 273 | perf_hsvc_group = HV_GRP_T5_CPU; |
| 249 | break; | 274 | break; |
| 250 | 275 | ||
| 276 | case SUN4V_CHIP_SPARC_M7: | ||
| 277 | perf_hsvc_group = HV_GRP_M7_PERF; | ||
| 278 | break; | ||
| 279 | |||
| 251 | default: | 280 | default: |
| 252 | return -ENODEV; | 281 | return -ENODEV; |
| 253 | } | 282 | } |
| @@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void) | |||
| 293 | pcr_ops = &n5_pcr_ops; | 322 | pcr_ops = &n5_pcr_ops; |
| 294 | break; | 323 | break; |
| 295 | 324 | ||
| 325 | case SUN4V_CHIP_SPARC_M7: | ||
| 326 | pcr_ops = &m7_pcr_ops; | ||
| 327 | break; | ||
| 328 | |||
| 296 | default: | 329 | default: |
| 297 | ret = -ENODEV; | 330 | ret = -ENODEV; |
| 298 | break; | 331 | break; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
| @@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { | |||
| 792 | .num_pic_regs = 4, | 792 | .num_pic_regs = 4, |
| 793 | }; | 793 | }; |
| 794 | 794 | ||
| 795 | static void sparc_m7_write_pmc(int idx, u64 val) | ||
| 796 | { | ||
| 797 | u64 pcr; | ||
| 798 | |||
| 799 | pcr = pcr_ops->read_pcr(idx); | ||
| 800 | /* ensure ov and ntc are reset */ | ||
| 801 | pcr &= ~(PCR_N4_OV | PCR_N4_NTC); | ||
| 802 | |||
| 803 | pcr_ops->write_pic(idx, val & 0xffffffff); | ||
| 804 | |||
| 805 | pcr_ops->write_pcr(idx, pcr); | ||
| 806 | } | ||
| 807 | |||
| 808 | static const struct sparc_pmu sparc_m7_pmu = { | ||
| 809 | .event_map = niagara4_event_map, | ||
| 810 | .cache_map = &niagara4_cache_map, | ||
| 811 | .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), | ||
| 812 | .read_pmc = sparc_vt_read_pmc, | ||
| 813 | .write_pmc = sparc_m7_write_pmc, | ||
| 814 | .upper_shift = 5, | ||
| 815 | .lower_shift = 5, | ||
| 816 | .event_mask = 0x7ff, | ||
| 817 | .user_bit = PCR_N4_UTRACE, | ||
| 818 | .priv_bit = PCR_N4_STRACE, | ||
| 819 | |||
| 820 | /* We explicitly don't support hypervisor tracing. */ | ||
| 821 | .hv_bit = 0, | ||
| 822 | |||
| 823 | .irq_bit = PCR_N4_TOE, | ||
| 824 | .upper_nop = 0, | ||
| 825 | .lower_nop = 0, | ||
| 826 | .flags = 0, | ||
| 827 | .max_hw_events = 4, | ||
| 828 | .num_pcrs = 4, | ||
| 829 | .num_pic_regs = 4, | ||
| 830 | }; | ||
| 795 | static const struct sparc_pmu *sparc_pmu __read_mostly; | 831 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
| 796 | 832 | ||
| 797 | static u64 event_encoding(u64 event_id, int idx) | 833 | static u64 event_encoding(u64 event_id, int idx) |
| @@ -960,6 +996,8 @@ out: | |||
| 960 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; | 996 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
| 961 | } | 997 | } |
| 962 | 998 | ||
| 999 | static void sparc_pmu_start(struct perf_event *event, int flags); | ||
| 1000 | |||
| 963 | /* On this PMU each PIC has it's own PCR control register. */ | 1001 | /* On this PMU each PIC has it's own PCR control register. */ |
| 964 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | 1002 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) |
| 965 | { | 1003 | { |
| @@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |||
| 972 | struct perf_event *cp = cpuc->event[i]; | 1010 | struct perf_event *cp = cpuc->event[i]; |
| 973 | struct hw_perf_event *hwc = &cp->hw; | 1011 | struct hw_perf_event *hwc = &cp->hw; |
| 974 | int idx = hwc->idx; | 1012 | int idx = hwc->idx; |
| 975 | u64 enc; | ||
| 976 | 1013 | ||
| 977 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | 1014 | if (cpuc->current_idx[i] != PIC_NO_INDEX) |
| 978 | continue; | 1015 | continue; |
| 979 | 1016 | ||
| 980 | sparc_perf_event_set_period(cp, hwc, idx); | ||
| 981 | cpuc->current_idx[i] = idx; | 1017 | cpuc->current_idx[i] = idx; |
| 982 | 1018 | ||
| 983 | enc = perf_event_get_enc(cpuc->events[i]); | 1019 | sparc_pmu_start(cp, PERF_EF_RELOAD); |
| 984 | cpuc->pcr[idx] &= ~mask_for_index(idx); | ||
| 985 | if (hwc->state & PERF_HES_STOPPED) | ||
| 986 | cpuc->pcr[idx] |= nop_for_index(idx); | ||
| 987 | else | ||
| 988 | cpuc->pcr[idx] |= event_encoding(enc, idx); | ||
| 989 | } | 1020 | } |
| 990 | out: | 1021 | out: |
| 991 | for (i = 0; i < cpuc->n_events; i++) { | 1022 | for (i = 0; i < cpuc->n_events; i++) { |
| @@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
| 1101 | int i; | 1132 | int i; |
| 1102 | 1133 | ||
| 1103 | local_irq_save(flags); | 1134 | local_irq_save(flags); |
| 1104 | perf_pmu_disable(event->pmu); | ||
| 1105 | 1135 | ||
| 1106 | for (i = 0; i < cpuc->n_events; i++) { | 1136 | for (i = 0; i < cpuc->n_events; i++) { |
| 1107 | if (event == cpuc->event[i]) { | 1137 | if (event == cpuc->event[i]) { |
| @@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
| 1127 | } | 1157 | } |
| 1128 | } | 1158 | } |
| 1129 | 1159 | ||
| 1130 | perf_pmu_enable(event->pmu); | ||
| 1131 | local_irq_restore(flags); | 1160 | local_irq_restore(flags); |
| 1132 | } | 1161 | } |
| 1133 | 1162 | ||
| @@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) | |||
| 1361 | unsigned long flags; | 1390 | unsigned long flags; |
| 1362 | 1391 | ||
| 1363 | local_irq_save(flags); | 1392 | local_irq_save(flags); |
| 1364 | perf_pmu_disable(event->pmu); | ||
| 1365 | 1393 | ||
| 1366 | n0 = cpuc->n_events; | 1394 | n0 = cpuc->n_events; |
| 1367 | if (n0 >= sparc_pmu->max_hw_events) | 1395 | if (n0 >= sparc_pmu->max_hw_events) |
| @@ -1394,7 +1422,6 @@ nocheck: | |||
| 1394 | 1422 | ||
| 1395 | ret = 0; | 1423 | ret = 0; |
| 1396 | out: | 1424 | out: |
| 1397 | perf_pmu_enable(event->pmu); | ||
| 1398 | local_irq_restore(flags); | 1425 | local_irq_restore(flags); |
| 1399 | return ret; | 1426 | return ret; |
| 1400 | } | 1427 | } |
| @@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void) | |||
| 1667 | sparc_pmu = &niagara4_pmu; | 1694 | sparc_pmu = &niagara4_pmu; |
| 1668 | return true; | 1695 | return true; |
| 1669 | } | 1696 | } |
| 1697 | if (!strcmp(sparc_pmu_type, "sparc-m7")) { | ||
| 1698 | sparc_pmu = &sparc_m7_pmu; | ||
| 1699 | return true; | ||
| 1700 | } | ||
| 1670 | return false; | 1701 | return false; |
| 1671 | } | 1702 | } |
| 1672 | 1703 | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 0be7bf978cb1..46a59643bb1c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
| @@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) | |||
| 287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", | 287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", |
| 288 | gp->tpc, gp->o7, gp->i7, gp->rpc); | 288 | gp->tpc, gp->o7, gp->i7, gp->rpc); |
| 289 | } | 289 | } |
| 290 | |||
| 291 | touch_nmi_watchdog(); | ||
| 290 | } | 292 | } |
| 291 | 293 | ||
| 292 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 294 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
| @@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) | |||
| 362 | (cpu == this_cpu ? '*' : ' '), cpu, | 364 | (cpu == this_cpu ? '*' : ' '), cpu, |
| 363 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], | 365 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], |
| 364 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); | 366 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); |
| 367 | |||
| 368 | touch_nmi_watchdog(); | ||
| 365 | } | 369 | } |
| 366 | 370 | ||
| 367 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 371 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index b7f6334e159f..857ad4f8905f 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S | |||
| @@ -8,9 +8,11 @@ | |||
| 8 | 8 | ||
| 9 | .text | 9 | .text |
| 10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ | 10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ |
| 11 | mov %o0, %g1 | 11 | brz,pn %o2, 99f |
| 12 | mov %o0, %g1 | ||
| 13 | |||
| 12 | cmp %o0, %o1 | 14 | cmp %o0, %o1 |
| 13 | bleu,pt %xcc, memcpy | 15 | bleu,pt %xcc, 2f |
| 14 | add %o1, %o2, %g7 | 16 | add %o1, %o2, %g7 |
| 15 | cmp %g7, %o0 | 17 | cmp %g7, %o0 |
| 16 | bleu,pt %xcc, memcpy | 18 | bleu,pt %xcc, memcpy |
| @@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ | |||
| 24 | stb %g7, [%o0] | 26 | stb %g7, [%o0] |
| 25 | bne,pt %icc, 1b | 27 | bne,pt %icc, 1b |
| 26 | sub %o0, 1, %o0 | 28 | sub %o0, 1, %o0 |
| 27 | 29 | 99: | |
| 28 | retl | 30 | retl |
| 29 | mov %g1, %o0 | 31 | mov %g1, %o0 |
| 32 | |||
| 33 | /* We can't just call memcpy for these memmove cases. On some | ||
| 34 | * chips the memcpy uses cache initializing stores and when dst | ||
| 35 | * and src are close enough, those can clobber the source data | ||
| 36 | * before we've loaded it in. | ||
| 37 | */ | ||
| 38 | 2: or %o0, %o1, %g7 | ||
| 39 | or %o2, %g7, %g7 | ||
| 40 | andcc %g7, 0x7, %g0 | ||
| 41 | bne,pn %xcc, 4f | ||
| 42 | nop | ||
| 43 | |||
| 44 | 3: ldx [%o1], %g7 | ||
| 45 | add %o1, 8, %o1 | ||
| 46 | subcc %o2, 8, %o2 | ||
| 47 | add %o0, 8, %o0 | ||
| 48 | bne,pt %icc, 3b | ||
| 49 | stx %g7, [%o0 - 0x8] | ||
| 50 | ba,a,pt %xcc, 99b | ||
| 51 | |||
| 52 | 4: ldub [%o1], %g7 | ||
| 53 | add %o1, 1, %o1 | ||
| 54 | subcc %o2, 1, %o2 | ||
| 55 | add %o0, 1, %o0 | ||
| 56 | bne,pt %icc, 4b | ||
| 57 | stb %g7, [%o0 - 0x1] | ||
| 58 | ba,a,pt %xcc, 99b | ||
| 30 | ENDPROC(memmove) | 59 | ENDPROC(memmove) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 498b6d967138..258990688a5e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
| @@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
| 212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
| 213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
| 214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ |
| 215 | INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), | 215 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), |
| 216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ |
| 217 | INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), | 217 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), |
| 218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | 218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ |
| 219 | INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), | 219 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), |
| 220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
| 221 | }; | 221 | }; |
| 222 | 222 | ||
| @@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
| 1649 | if (c) | 1649 | if (c) |
| 1650 | return c; | 1650 | return c; |
| 1651 | 1651 | ||
| 1652 | c = intel_pebs_constraints(event); | 1652 | c = intel_shared_regs_constraints(cpuc, event); |
| 1653 | if (c) | 1653 | if (c) |
| 1654 | return c; | 1654 | return c; |
| 1655 | 1655 | ||
| 1656 | c = intel_shared_regs_constraints(cpuc, event); | 1656 | c = intel_pebs_constraints(event); |
| 1657 | if (c) | 1657 | if (c) |
| 1658 | return c; | 1658 | return c; |
| 1659 | 1659 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1d74d161687c..f0095a76c182 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -364,12 +364,21 @@ system_call_fastpath: | |||
| 364 | * Has incomplete stack frame and undefined top of stack. | 364 | * Has incomplete stack frame and undefined top of stack. |
| 365 | */ | 365 | */ |
| 366 | ret_from_sys_call: | 366 | ret_from_sys_call: |
| 367 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
| 368 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
| 369 | |||
| 370 | LOCKDEP_SYS_EXIT | 367 | LOCKDEP_SYS_EXIT |
| 371 | DISABLE_INTERRUPTS(CLBR_NONE) | 368 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 372 | TRACE_IRQS_OFF | 369 | TRACE_IRQS_OFF |
| 370 | |||
| 371 | /* | ||
| 372 | * We must check ti flags with interrupts (or at least preemption) | ||
| 373 | * off because we must *never* return to userspace without | ||
| 374 | * processing exit work that is enqueued if we're preempted here. | ||
| 375 | * In particular, returning to userspace with any of the one-shot | ||
| 376 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | ||
| 377 | * very bad. | ||
| 378 | */ | ||
| 379 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
| 380 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
| 381 | |||
| 373 | CFI_REMEMBER_STATE | 382 | CFI_REMEMBER_STATE |
| 374 | /* | 383 | /* |
| 375 | * sysretq will re-enable interrupts: | 384 | * sysretq will re-enable interrupts: |
| @@ -386,7 +395,7 @@ ret_from_sys_call: | |||
| 386 | 395 | ||
| 387 | int_ret_from_sys_call_fixup: | 396 | int_ret_from_sys_call_fixup: |
| 388 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | 397 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET |
| 389 | jmp int_ret_from_sys_call | 398 | jmp int_ret_from_sys_call_irqs_off |
| 390 | 399 | ||
| 391 | /* Do syscall tracing */ | 400 | /* Do syscall tracing */ |
| 392 | tracesys: | 401 | tracesys: |
| @@ -432,6 +441,7 @@ tracesys_phase2: | |||
| 432 | GLOBAL(int_ret_from_sys_call) | 441 | GLOBAL(int_ret_from_sys_call) |
| 433 | DISABLE_INTERRUPTS(CLBR_NONE) | 442 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 434 | TRACE_IRQS_OFF | 443 | TRACE_IRQS_OFF |
| 444 | int_ret_from_sys_call_irqs_off: | ||
| 435 | movl $_TIF_ALLWORK_MASK,%edi | 445 | movl $_TIF_ALLWORK_MASK,%edi |
| 436 | /* edi: mask to check */ | 446 | /* edi: mask to check */ |
| 437 | GLOBAL(int_with_check) | 447 | GLOBAL(int_with_check) |
| @@ -789,7 +799,21 @@ retint_swapgs: /* return to user-space */ | |||
| 789 | cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ | 799 | cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ |
| 790 | jne opportunistic_sysret_failed | 800 | jne opportunistic_sysret_failed |
| 791 | 801 | ||
| 792 | testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */ | 802 | /* |
| 803 | * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, | ||
| 804 | * restoring TF results in a trap from userspace immediately after | ||
| 805 | * SYSRET. This would cause an infinite loop whenever #DB happens | ||
| 806 | * with register state that satisfies the opportunistic SYSRET | ||
| 807 | * conditions. For example, single-stepping this user code: | ||
| 808 | * | ||
| 809 | * movq $stuck_here,%rcx | ||
| 810 | * pushfq | ||
| 811 | * popq %r11 | ||
| 812 | * stuck_here: | ||
| 813 | * | ||
| 814 | * would never get past 'stuck_here'. | ||
| 815 | */ | ||
| 816 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 | ||
| 793 | jnz opportunistic_sysret_failed | 817 | jnz opportunistic_sysret_failed |
| 794 | 818 | ||
| 795 | /* nothing to check for RSP */ | 819 | /* nothing to check for RSP */ |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 7ec1d5f8d283..25ecd56cefa8 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
| @@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = | |||
| 72 | { "bx", 8, offsetof(struct pt_regs, bx) }, | 72 | { "bx", 8, offsetof(struct pt_regs, bx) }, |
| 73 | { "cx", 8, offsetof(struct pt_regs, cx) }, | 73 | { "cx", 8, offsetof(struct pt_regs, cx) }, |
| 74 | { "dx", 8, offsetof(struct pt_regs, dx) }, | 74 | { "dx", 8, offsetof(struct pt_regs, dx) }, |
| 75 | { "si", 8, offsetof(struct pt_regs, dx) }, | 75 | { "si", 8, offsetof(struct pt_regs, si) }, |
| 76 | { "di", 8, offsetof(struct pt_regs, di) }, | 76 | { "di", 8, offsetof(struct pt_regs, di) }, |
| 77 | { "bp", 8, offsetof(struct pt_regs, bp) }, | 77 | { "bp", 8, offsetof(struct pt_regs, bp) }, |
| 78 | { "sp", 8, offsetof(struct pt_regs, sp) }, | 78 | { "sp", 8, offsetof(struct pt_regs, sp) }, |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index bae6c609888e..86db4bcd7ce5 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
| 183 | }, | 183 | }, |
| 184 | }, | 184 | }, |
| 185 | 185 | ||
| 186 | /* ASRock */ | ||
| 187 | { /* Handle problems with rebooting on ASRock Q1900DC-ITX */ | ||
| 188 | .callback = set_pci_reboot, | ||
| 189 | .ident = "ASRock Q1900DC-ITX", | ||
| 190 | .matches = { | ||
| 191 | DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"), | ||
| 192 | DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"), | ||
| 193 | }, | ||
| 194 | }, | ||
| 195 | |||
| 186 | /* ASUS */ | 196 | /* ASUS */ |
| 187 | { /* Handle problems with rebooting on ASUS P4S800 */ | 197 | { /* Handle problems with rebooting on ASUS P4S800 */ |
| 188 | .callback = set_bios_reboot, | 198 | .callback = set_bios_reboot, |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index b1947e0f3e10..46d4449772bc 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
| @@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
| 422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) | 422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) |
| 423 | { | 423 | { |
| 424 | int i; | 424 | int i; |
| 425 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
| 425 | 426 | ||
| 426 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | 427 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
| 427 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | 428 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; |
| @@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
| 443 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 444 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
| 444 | spin_lock(&ioapic->lock); | 445 | spin_lock(&ioapic->lock); |
| 445 | 446 | ||
| 446 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 447 | if (trigger_mode != IOAPIC_LEVEL_TRIG || |
| 448 | kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) | ||
| 447 | continue; | 449 | continue; |
| 448 | 450 | ||
| 449 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 451 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bd4e34de24c7..4ee827d7bf36 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) | |||
| 833 | 833 | ||
| 834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) | 834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) |
| 835 | { | 835 | { |
| 836 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && | 836 | if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { |
| 837 | kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { | ||
| 838 | int trigger_mode; | 837 | int trigger_mode; |
| 839 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) | 838 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) |
| 840 | trigger_mode = IOAPIC_LEVEL_TRIG; | 839 | trigger_mode = IOAPIC_LEVEL_TRIG; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 10a481b7674d..ae4f6d35d19c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2479 | if (enable_ept) { | 2479 | if (enable_ept) { |
| 2480 | /* nested EPT: emulate EPT also to L1 */ | 2480 | /* nested EPT: emulate EPT also to L1 */ |
| 2481 | vmx->nested.nested_vmx_secondary_ctls_high |= | 2481 | vmx->nested.nested_vmx_secondary_ctls_high |= |
| 2482 | SECONDARY_EXEC_ENABLE_EPT | | 2482 | SECONDARY_EXEC_ENABLE_EPT; |
| 2483 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
| 2484 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | | 2483 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | |
| 2485 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | | 2484 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | |
| 2486 | VMX_EPT_INVEPT_BIT; | 2485 | VMX_EPT_INVEPT_BIT; |
| @@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2494 | } else | 2493 | } else |
| 2495 | vmx->nested.nested_vmx_ept_caps = 0; | 2494 | vmx->nested.nested_vmx_ept_caps = 0; |
| 2496 | 2495 | ||
| 2496 | if (enable_unrestricted_guest) | ||
| 2497 | vmx->nested.nested_vmx_secondary_ctls_high |= | ||
| 2498 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
| 2499 | |||
| 2497 | /* miscellaneous data */ | 2500 | /* miscellaneous data */ |
| 2498 | rdmsr(MSR_IA32_VMX_MISC, | 2501 | rdmsr(MSR_IA32_VMX_MISC, |
| 2499 | vmx->nested.nested_vmx_misc_low, | 2502 | vmx->nested.nested_vmx_misc_low, |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 9f93af56a5fc..b47124d4cd67 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
| @@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size); | |||
| 91 | unsigned long xen_max_p2m_pfn __read_mostly; | 91 | unsigned long xen_max_p2m_pfn __read_mostly; |
| 92 | EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); | 92 | EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); |
| 93 | 93 | ||
| 94 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | ||
| 95 | #define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | ||
| 96 | #else | ||
| 97 | #define P2M_LIMIT 0 | ||
| 98 | #endif | ||
| 99 | |||
| 94 | static DEFINE_SPINLOCK(p2m_update_lock); | 100 | static DEFINE_SPINLOCK(p2m_update_lock); |
| 95 | 101 | ||
| 96 | static unsigned long *p2m_mid_missing_mfn; | 102 | static unsigned long *p2m_mid_missing_mfn; |
| @@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) | |||
| 385 | void __init xen_vmalloc_p2m_tree(void) | 391 | void __init xen_vmalloc_p2m_tree(void) |
| 386 | { | 392 | { |
| 387 | static struct vm_struct vm; | 393 | static struct vm_struct vm; |
| 394 | unsigned long p2m_limit; | ||
| 388 | 395 | ||
| 396 | p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; | ||
| 389 | vm.flags = VM_ALLOC; | 397 | vm.flags = VM_ALLOC; |
| 390 | vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, | 398 | vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), |
| 391 | PMD_SIZE * PMDS_PER_MID_PAGE); | 399 | PMD_SIZE * PMDS_PER_MID_PAGE); |
| 392 | vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); | 400 | vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); |
| 393 | pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); | 401 | pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); |
diff --git a/block/blk-merge.c b/block/blk-merge.c index fc1ff3b1ea1f..fd3fee81c23c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
| 592 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { | 592 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { |
| 593 | struct bio_vec *bprev; | 593 | struct bio_vec *bprev; |
| 594 | 594 | ||
| 595 | bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; | 595 | bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; |
| 596 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) | 596 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) |
| 597 | return false; | 597 | return false; |
| 598 | } | 598 | } |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d53a764b05ea..be3290cc0644 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
| @@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data, | |||
| 278 | /* | 278 | /* |
| 279 | * We're out of tags on this hardware queue, kick any | 279 | * We're out of tags on this hardware queue, kick any |
| 280 | * pending IO submits before going to sleep waiting for | 280 | * pending IO submits before going to sleep waiting for |
| 281 | * some to complete. | 281 | * some to complete. Note that hctx can be NULL here for |
| 282 | * reserved tag allocation. | ||
| 282 | */ | 283 | */ |
| 283 | blk_mq_run_hw_queue(hctx, false); | 284 | if (hctx) |
| 285 | blk_mq_run_hw_queue(hctx, false); | ||
| 284 | 286 | ||
| 285 | /* | 287 | /* |
| 286 | * Retry tag allocation after running the hardware queue, | 288 | * Retry tag allocation after running the hardware queue, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4f4bea21052e..b7b8933ec241 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
| 1938 | */ | 1938 | */ |
| 1939 | if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, | 1939 | if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, |
| 1940 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) | 1940 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) |
| 1941 | goto err_map; | 1941 | goto err_mq_usage; |
| 1942 | 1942 | ||
| 1943 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); | 1943 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); |
| 1944 | blk_queue_rq_timeout(q, 30000); | 1944 | blk_queue_rq_timeout(q, 30000); |
| @@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
| 1981 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); | 1981 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); |
| 1982 | 1982 | ||
| 1983 | if (blk_mq_init_hw_queues(q, set)) | 1983 | if (blk_mq_init_hw_queues(q, set)) |
| 1984 | goto err_hw; | 1984 | goto err_mq_usage; |
| 1985 | 1985 | ||
| 1986 | mutex_lock(&all_q_mutex); | 1986 | mutex_lock(&all_q_mutex); |
| 1987 | list_add_tail(&q->all_q_node, &all_q_list); | 1987 | list_add_tail(&q->all_q_node, &all_q_list); |
| @@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
| 1993 | 1993 | ||
| 1994 | return q; | 1994 | return q; |
| 1995 | 1995 | ||
| 1996 | err_hw: | 1996 | err_mq_usage: |
| 1997 | blk_cleanup_queue(q); | 1997 | blk_cleanup_queue(q); |
| 1998 | err_hctxs: | 1998 | err_hctxs: |
| 1999 | kfree(map); | 1999 | kfree(map); |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 6ed2cbe5e8c9..12600bfffca9 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
| @@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
| 585 | b->physical_block_size); | 585 | b->physical_block_size); |
| 586 | 586 | ||
| 587 | t->io_min = max(t->io_min, b->io_min); | 587 | t->io_min = max(t->io_min, b->io_min); |
| 588 | t->io_opt = lcm(t->io_opt, b->io_opt); | 588 | t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); |
| 589 | 589 | ||
| 590 | t->cluster &= b->cluster; | 590 | t->cluster &= b->cluster; |
| 591 | t->discard_zeroes_data &= b->discard_zeroes_data; | 591 | t->discard_zeroes_data &= b->discard_zeroes_data; |
| @@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
| 616 | b->raid_partial_stripes_expensive); | 616 | b->raid_partial_stripes_expensive); |
| 617 | 617 | ||
| 618 | /* Find lowest common alignment_offset */ | 618 | /* Find lowest common alignment_offset */ |
| 619 | t->alignment_offset = lcm(t->alignment_offset, alignment) | 619 | t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) |
| 620 | % max(t->physical_block_size, t->io_min); | 620 | % max(t->physical_block_size, t->io_min); |
| 621 | 621 | ||
| 622 | /* Verify that new alignment_offset is on a logical block boundary */ | 622 | /* Verify that new alignment_offset is on a logical block boundary */ |
| @@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
| 643 | b->max_discard_sectors); | 643 | b->max_discard_sectors); |
| 644 | t->discard_granularity = max(t->discard_granularity, | 644 | t->discard_granularity = max(t->discard_granularity, |
| 645 | b->discard_granularity); | 645 | b->discard_granularity); |
| 646 | t->discard_alignment = lcm(t->discard_alignment, alignment) % | 646 | t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % |
| 647 | t->discard_granularity; | 647 | t->discard_granularity; |
| 648 | } | 648 | } |
| 649 | 649 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4c35f0822d06..23dac3babfe3 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4204 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4204 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
| 4205 | 4205 | ||
| 4206 | /* devices that don't properly handle queued TRIM commands */ | 4206 | /* devices that don't properly handle queued TRIM commands */ |
| 4207 | { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4207 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
| 4208 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4209 | { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
| 4210 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4211 | { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
| 4212 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4213 | { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
| 4214 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4215 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
| 4216 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4217 | { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
| 4208 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4218 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4209 | { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
| 4210 | 4219 | ||
| 4211 | /* | 4220 | /* |
| 4212 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT | 4221 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT |
| @@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4226 | */ | 4235 | */ |
| 4227 | { "INTEL*SSDSC2MH*", NULL, 0, }, | 4236 | { "INTEL*SSDSC2MH*", NULL, 0, }, |
| 4228 | 4237 | ||
| 4238 | { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4239 | { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4229 | { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4240 | { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4230 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4241 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4231 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4242 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| @@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) | |||
| 4737 | return NULL; | 4748 | return NULL; |
| 4738 | 4749 | ||
| 4739 | /* libsas case */ | 4750 | /* libsas case */ |
| 4740 | if (!ap->scsi_host) { | 4751 | if (ap->flags & ATA_FLAG_SAS_HOST) { |
| 4741 | tag = ata_sas_allocate_tag(ap); | 4752 | tag = ata_sas_allocate_tag(ap); |
| 4742 | if (tag < 0) | 4753 | if (tag < 0) |
| 4743 | return NULL; | 4754 | return NULL; |
| @@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
| 4776 | tag = qc->tag; | 4787 | tag = qc->tag; |
| 4777 | if (likely(ata_tag_valid(tag))) { | 4788 | if (likely(ata_tag_valid(tag))) { |
| 4778 | qc->tag = ATA_TAG_POISON; | 4789 | qc->tag = ATA_TAG_POISON; |
| 4779 | if (!ap->scsi_host) | 4790 | if (ap->flags & ATA_FLAG_SAS_HOST) |
| 4780 | ata_sas_free_tag(tag, ap); | 4791 | ata_sas_free_tag(tag, ap); |
| 4781 | } | 4792 | } |
| 4782 | } | 4793 | } |
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index beb8b27d4621..a13587b5c2be 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h | |||
| @@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops; | |||
| 243 | extern struct regcache_ops regcache_lzo_ops; | 243 | extern struct regcache_ops regcache_lzo_ops; |
| 244 | extern struct regcache_ops regcache_flat_ops; | 244 | extern struct regcache_ops regcache_flat_ops; |
| 245 | 245 | ||
| 246 | static inline const char *regmap_name(const struct regmap *map) | ||
| 247 | { | ||
| 248 | if (map->dev) | ||
| 249 | return dev_name(map->dev); | ||
| 250 | |||
| 251 | return map->name; | ||
| 252 | } | ||
| 253 | |||
| 246 | #endif | 254 | #endif |
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index da84f544c544..87db9893b463 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
| @@ -218,7 +218,7 @@ int regcache_read(struct regmap *map, | |||
| 218 | ret = map->cache_ops->read(map, reg, value); | 218 | ret = map->cache_ops->read(map, reg, value); |
| 219 | 219 | ||
| 220 | if (ret == 0) | 220 | if (ret == 0) |
| 221 | trace_regmap_reg_read_cache(map->dev, reg, *value); | 221 | trace_regmap_reg_read_cache(map, reg, *value); |
| 222 | 222 | ||
| 223 | return ret; | 223 | return ret; |
| 224 | } | 224 | } |
| @@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map) | |||
| 311 | dev_dbg(map->dev, "Syncing %s cache\n", | 311 | dev_dbg(map->dev, "Syncing %s cache\n", |
| 312 | map->cache_ops->name); | 312 | map->cache_ops->name); |
| 313 | name = map->cache_ops->name; | 313 | name = map->cache_ops->name; |
| 314 | trace_regcache_sync(map->dev, name, "start"); | 314 | trace_regcache_sync(map, name, "start"); |
| 315 | 315 | ||
| 316 | if (!map->cache_dirty) | 316 | if (!map->cache_dirty) |
| 317 | goto out; | 317 | goto out; |
| @@ -346,7 +346,7 @@ out: | |||
| 346 | 346 | ||
| 347 | regmap_async_complete(map); | 347 | regmap_async_complete(map); |
| 348 | 348 | ||
| 349 | trace_regcache_sync(map->dev, name, "stop"); | 349 | trace_regcache_sync(map, name, "stop"); |
| 350 | 350 | ||
| 351 | return ret; | 351 | return ret; |
| 352 | } | 352 | } |
| @@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, | |||
| 381 | name = map->cache_ops->name; | 381 | name = map->cache_ops->name; |
| 382 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); | 382 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); |
| 383 | 383 | ||
| 384 | trace_regcache_sync(map->dev, name, "start region"); | 384 | trace_regcache_sync(map, name, "start region"); |
| 385 | 385 | ||
| 386 | if (!map->cache_dirty) | 386 | if (!map->cache_dirty) |
| 387 | goto out; | 387 | goto out; |
| @@ -401,7 +401,7 @@ out: | |||
| 401 | 401 | ||
| 402 | regmap_async_complete(map); | 402 | regmap_async_complete(map); |
| 403 | 403 | ||
| 404 | trace_regcache_sync(map->dev, name, "stop region"); | 404 | trace_regcache_sync(map, name, "stop region"); |
| 405 | 405 | ||
| 406 | return ret; | 406 | return ret; |
| 407 | } | 407 | } |
| @@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min, | |||
| 428 | 428 | ||
| 429 | map->lock(map->lock_arg); | 429 | map->lock(map->lock_arg); |
| 430 | 430 | ||
| 431 | trace_regcache_drop_region(map->dev, min, max); | 431 | trace_regcache_drop_region(map, min, max); |
| 432 | 432 | ||
| 433 | ret = map->cache_ops->drop(map, min, max); | 433 | ret = map->cache_ops->drop(map, min, max); |
| 434 | 434 | ||
| @@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable) | |||
| 455 | map->lock(map->lock_arg); | 455 | map->lock(map->lock_arg); |
| 456 | WARN_ON(map->cache_bypass && enable); | 456 | WARN_ON(map->cache_bypass && enable); |
| 457 | map->cache_only = enable; | 457 | map->cache_only = enable; |
| 458 | trace_regmap_cache_only(map->dev, enable); | 458 | trace_regmap_cache_only(map, enable); |
| 459 | map->unlock(map->lock_arg); | 459 | map->unlock(map->lock_arg); |
| 460 | } | 460 | } |
| 461 | EXPORT_SYMBOL_GPL(regcache_cache_only); | 461 | EXPORT_SYMBOL_GPL(regcache_cache_only); |
| @@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable) | |||
| 493 | map->lock(map->lock_arg); | 493 | map->lock(map->lock_arg); |
| 494 | WARN_ON(map->cache_only && enable); | 494 | WARN_ON(map->cache_only && enable); |
| 495 | map->cache_bypass = enable; | 495 | map->cache_bypass = enable; |
| 496 | trace_regmap_cache_bypass(map->dev, enable); | 496 | trace_regmap_cache_bypass(map, enable); |
| 497 | map->unlock(map->lock_arg); | 497 | map->unlock(map->lock_arg); |
| 498 | } | 498 | } |
| 499 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | 499 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f99b098ddabf..dbfe6a69c3da 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
| @@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
| 1281 | if (map->async && map->bus->async_write) { | 1281 | if (map->async && map->bus->async_write) { |
| 1282 | struct regmap_async *async; | 1282 | struct regmap_async *async; |
| 1283 | 1283 | ||
| 1284 | trace_regmap_async_write_start(map->dev, reg, val_len); | 1284 | trace_regmap_async_write_start(map, reg, val_len); |
| 1285 | 1285 | ||
| 1286 | spin_lock_irqsave(&map->async_lock, flags); | 1286 | spin_lock_irqsave(&map->async_lock, flags); |
| 1287 | async = list_first_entry_or_null(&map->async_free, | 1287 | async = list_first_entry_or_null(&map->async_free, |
| @@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
| 1339 | return ret; | 1339 | return ret; |
| 1340 | } | 1340 | } |
| 1341 | 1341 | ||
| 1342 | trace_regmap_hw_write_start(map->dev, reg, | 1342 | trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); |
| 1343 | val_len / map->format.val_bytes); | ||
| 1344 | 1343 | ||
| 1345 | /* If we're doing a single register write we can probably just | 1344 | /* If we're doing a single register write we can probably just |
| 1346 | * send the work_buf directly, otherwise try to do a gather | 1345 | * send the work_buf directly, otherwise try to do a gather |
| @@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
| 1372 | kfree(buf); | 1371 | kfree(buf); |
| 1373 | } | 1372 | } |
| 1374 | 1373 | ||
| 1375 | trace_regmap_hw_write_done(map->dev, reg, | 1374 | trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); |
| 1376 | val_len / map->format.val_bytes); | ||
| 1377 | 1375 | ||
| 1378 | return ret; | 1376 | return ret; |
| 1379 | } | 1377 | } |
| @@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, | |||
| 1407 | 1405 | ||
| 1408 | map->format.format_write(map, reg, val); | 1406 | map->format.format_write(map, reg, val); |
| 1409 | 1407 | ||
| 1410 | trace_regmap_hw_write_start(map->dev, reg, 1); | 1408 | trace_regmap_hw_write_start(map, reg, 1); |
| 1411 | 1409 | ||
| 1412 | ret = map->bus->write(map->bus_context, map->work_buf, | 1410 | ret = map->bus->write(map->bus_context, map->work_buf, |
| 1413 | map->format.buf_size); | 1411 | map->format.buf_size); |
| 1414 | 1412 | ||
| 1415 | trace_regmap_hw_write_done(map->dev, reg, 1); | 1413 | trace_regmap_hw_write_done(map, reg, 1); |
| 1416 | 1414 | ||
| 1417 | return ret; | 1415 | return ret; |
| 1418 | } | 1416 | } |
| @@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, | |||
| 1470 | dev_info(map->dev, "%x <= %x\n", reg, val); | 1468 | dev_info(map->dev, "%x <= %x\n", reg, val); |
| 1471 | #endif | 1469 | #endif |
| 1472 | 1470 | ||
| 1473 | trace_regmap_reg_write(map->dev, reg, val); | 1471 | trace_regmap_reg_write(map, reg, val); |
| 1474 | 1472 | ||
| 1475 | return map->reg_write(context, reg, val); | 1473 | return map->reg_write(context, reg, val); |
| 1476 | } | 1474 | } |
| @@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
| 1773 | for (i = 0; i < num_regs; i++) { | 1771 | for (i = 0; i < num_regs; i++) { |
| 1774 | int reg = regs[i].reg; | 1772 | int reg = regs[i].reg; |
| 1775 | int val = regs[i].def; | 1773 | int val = regs[i].def; |
| 1776 | trace_regmap_hw_write_start(map->dev, reg, 1); | 1774 | trace_regmap_hw_write_start(map, reg, 1); |
| 1777 | map->format.format_reg(u8, reg, map->reg_shift); | 1775 | map->format.format_reg(u8, reg, map->reg_shift); |
| 1778 | u8 += reg_bytes + pad_bytes; | 1776 | u8 += reg_bytes + pad_bytes; |
| 1779 | map->format.format_val(u8, val, 0); | 1777 | map->format.format_val(u8, val, 0); |
| @@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
| 1788 | 1786 | ||
| 1789 | for (i = 0; i < num_regs; i++) { | 1787 | for (i = 0; i < num_regs; i++) { |
| 1790 | int reg = regs[i].reg; | 1788 | int reg = regs[i].reg; |
| 1791 | trace_regmap_hw_write_done(map->dev, reg, 1); | 1789 | trace_regmap_hw_write_done(map, reg, 1); |
| 1792 | } | 1790 | } |
| 1793 | return ret; | 1791 | return ret; |
| 1794 | } | 1792 | } |
| @@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
| 2059 | */ | 2057 | */ |
| 2060 | u8[0] |= map->read_flag_mask; | 2058 | u8[0] |= map->read_flag_mask; |
| 2061 | 2059 | ||
| 2062 | trace_regmap_hw_read_start(map->dev, reg, | 2060 | trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); |
| 2063 | val_len / map->format.val_bytes); | ||
| 2064 | 2061 | ||
| 2065 | ret = map->bus->read(map->bus_context, map->work_buf, | 2062 | ret = map->bus->read(map->bus_context, map->work_buf, |
| 2066 | map->format.reg_bytes + map->format.pad_bytes, | 2063 | map->format.reg_bytes + map->format.pad_bytes, |
| 2067 | val, val_len); | 2064 | val, val_len); |
| 2068 | 2065 | ||
| 2069 | trace_regmap_hw_read_done(map->dev, reg, | 2066 | trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); |
| 2070 | val_len / map->format.val_bytes); | ||
| 2071 | 2067 | ||
| 2072 | return ret; | 2068 | return ret; |
| 2073 | } | 2069 | } |
| @@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, | |||
| 2123 | dev_info(map->dev, "%x => %x\n", reg, *val); | 2119 | dev_info(map->dev, "%x => %x\n", reg, *val); |
| 2124 | #endif | 2120 | #endif |
| 2125 | 2121 | ||
| 2126 | trace_regmap_reg_read(map->dev, reg, *val); | 2122 | trace_regmap_reg_read(map, reg, *val); |
| 2127 | 2123 | ||
| 2128 | if (!map->cache_bypass) | 2124 | if (!map->cache_bypass) |
| 2129 | regcache_write(map, reg, *val); | 2125 | regcache_write(map, reg, *val); |
| @@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret) | |||
| 2480 | struct regmap *map = async->map; | 2476 | struct regmap *map = async->map; |
| 2481 | bool wake; | 2477 | bool wake; |
| 2482 | 2478 | ||
| 2483 | trace_regmap_async_io_complete(map->dev); | 2479 | trace_regmap_async_io_complete(map); |
| 2484 | 2480 | ||
| 2485 | spin_lock(&map->async_lock); | 2481 | spin_lock(&map->async_lock); |
| 2486 | list_move(&async->list, &map->async_free); | 2482 | list_move(&async->list, &map->async_free); |
| @@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map) | |||
| 2525 | if (!map->bus || !map->bus->async_write) | 2521 | if (!map->bus || !map->bus->async_write) |
| 2526 | return 0; | 2522 | return 0; |
| 2527 | 2523 | ||
| 2528 | trace_regmap_async_complete_start(map->dev); | 2524 | trace_regmap_async_complete_start(map); |
| 2529 | 2525 | ||
| 2530 | wait_event(map->async_waitq, regmap_async_is_done(map)); | 2526 | wait_event(map->async_waitq, regmap_async_is_done(map)); |
| 2531 | 2527 | ||
| @@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map) | |||
| 2534 | map->async_ret = 0; | 2530 | map->async_ret = 0; |
| 2535 | spin_unlock_irqrestore(&map->async_lock, flags); | 2531 | spin_unlock_irqrestore(&map->async_lock, flags); |
| 2536 | 2532 | ||
| 2537 | trace_regmap_async_complete_done(map->dev); | 2533 | trace_regmap_async_complete_done(map); |
| 2538 | 2534 | ||
| 2539 | return ret; | 2535 | return ret; |
| 2540 | } | 2536 | } |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 4bc2a5cb9935..a98c41f72c63 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -803,10 +803,6 @@ static int __init nbd_init(void) | |||
| 803 | return -EINVAL; | 803 | return -EINVAL; |
| 804 | } | 804 | } |
| 805 | 805 | ||
| 806 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
| 807 | if (!nbd_dev) | ||
| 808 | return -ENOMEM; | ||
| 809 | |||
| 810 | part_shift = 0; | 806 | part_shift = 0; |
| 811 | if (max_part > 0) { | 807 | if (max_part > 0) { |
| 812 | part_shift = fls(max_part); | 808 | part_shift = fls(max_part); |
| @@ -828,6 +824,10 @@ static int __init nbd_init(void) | |||
| 828 | if (nbds_max > 1UL << (MINORBITS - part_shift)) | 824 | if (nbds_max > 1UL << (MINORBITS - part_shift)) |
| 829 | return -EINVAL; | 825 | return -EINVAL; |
| 830 | 826 | ||
| 827 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
| 828 | if (!nbd_dev) | ||
| 829 | return -ENOMEM; | ||
| 830 | |||
| 831 | for (i = 0; i < nbds_max; i++) { | 831 | for (i = 0; i < nbds_max; i++) { |
| 832 | struct gendisk *disk = alloc_disk(1 << part_shift); | 832 | struct gendisk *disk = alloc_disk(1 << part_shift); |
| 833 | if (!disk) | 833 | if (!disk) |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ceb32dd52a6c..e23be20a3417 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
| @@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3003 | } | 3003 | } |
| 3004 | get_device(dev->device); | 3004 | get_device(dev->device); |
| 3005 | 3005 | ||
| 3006 | INIT_LIST_HEAD(&dev->node); | ||
| 3006 | INIT_WORK(&dev->probe_work, nvme_async_probe); | 3007 | INIT_WORK(&dev->probe_work, nvme_async_probe); |
| 3007 | schedule_work(&dev->probe_work); | 3008 | schedule_work(&dev->probe_work); |
| 3008 | return 0; | 3009 | return 0; |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 68161f7a07d6..a0b036ccb118 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI | |||
| 192 | config SH_TIMER_CMT | 192 | config SH_TIMER_CMT |
| 193 | bool "Renesas CMT timer driver" if COMPILE_TEST | 193 | bool "Renesas CMT timer driver" if COMPILE_TEST |
| 194 | depends on GENERIC_CLOCKEVENTS | 194 | depends on GENERIC_CLOCKEVENTS |
| 195 | depends on HAS_IOMEM | ||
| 195 | default SYS_SUPPORTS_SH_CMT | 196 | default SYS_SUPPORTS_SH_CMT |
| 196 | help | 197 | help |
| 197 | This enables build of a clocksource and clockevent driver for | 198 | This enables build of a clocksource and clockevent driver for |
| @@ -201,6 +202,7 @@ config SH_TIMER_CMT | |||
| 201 | config SH_TIMER_MTU2 | 202 | config SH_TIMER_MTU2 |
| 202 | bool "Renesas MTU2 timer driver" if COMPILE_TEST | 203 | bool "Renesas MTU2 timer driver" if COMPILE_TEST |
| 203 | depends on GENERIC_CLOCKEVENTS | 204 | depends on GENERIC_CLOCKEVENTS |
| 205 | depends on HAS_IOMEM | ||
| 204 | default SYS_SUPPORTS_SH_MTU2 | 206 | default SYS_SUPPORTS_SH_MTU2 |
| 205 | help | 207 | help |
| 206 | This enables build of a clockevent driver for the Multi-Function | 208 | This enables build of a clockevent driver for the Multi-Function |
| @@ -210,6 +212,7 @@ config SH_TIMER_MTU2 | |||
| 210 | config SH_TIMER_TMU | 212 | config SH_TIMER_TMU |
| 211 | bool "Renesas TMU timer driver" if COMPILE_TEST | 213 | bool "Renesas TMU timer driver" if COMPILE_TEST |
| 212 | depends on GENERIC_CLOCKEVENTS | 214 | depends on GENERIC_CLOCKEVENTS |
| 215 | depends on HAS_IOMEM | ||
| 213 | default SYS_SUPPORTS_SH_TMU | 216 | default SYS_SUPPORTS_SH_TMU |
| 214 | help | 217 | help |
| 215 | This enables build of a clocksource and clockevent driver for | 218 | This enables build of a clocksource and clockevent driver for |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 5dcbf90b8015..58597fbcc046 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
| 18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
| 19 | #include <linux/reset.h> | 19 | #include <linux/reset.h> |
| 20 | #include <linux/sched_clock.h> | ||
| 21 | #include <linux/of.h> | 20 | #include <linux/of.h> |
| 22 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
| 23 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
| @@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = { | |||
| 137 | .dev_id = &sun5i_clockevent, | 136 | .dev_id = &sun5i_clockevent, |
| 138 | }; | 137 | }; |
| 139 | 138 | ||
| 140 | static u64 sun5i_timer_sched_read(void) | ||
| 141 | { | ||
| 142 | return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); | ||
| 143 | } | ||
| 144 | |||
| 145 | static void __init sun5i_timer_init(struct device_node *node) | 139 | static void __init sun5i_timer_init(struct device_node *node) |
| 146 | { | 140 | { |
| 147 | struct reset_control *rstc; | 141 | struct reset_control *rstc; |
| @@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node) | |||
| 172 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, | 166 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, |
| 173 | timer_base + TIMER_CTL_REG(1)); | 167 | timer_base + TIMER_CTL_REG(1)); |
| 174 | 168 | ||
| 175 | sched_clock_register(sun5i_timer_sched_read, 32, rate); | ||
| 176 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, | 169 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, |
| 177 | rate, 340, 32, clocksource_mmio_readl_down); | 170 | rate, 340, 32, clocksource_mmio_readl_down); |
| 178 | 171 | ||
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 0723096fb50a..c92d6a70ccf3 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
| @@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
| 475 | * c->desc is NULL and exit.) | 475 | * c->desc is NULL and exit.) |
| 476 | */ | 476 | */ |
| 477 | if (c->desc) { | 477 | if (c->desc) { |
| 478 | bcm2835_dma_desc_free(&c->desc->vd); | ||
| 478 | c->desc = NULL; | 479 | c->desc = NULL; |
| 479 | bcm2835_dma_abort(c->chan_base); | 480 | bcm2835_dma_abort(c->chan_base); |
| 480 | 481 | ||
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 4527a3ebeac4..84884418fd30 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
| @@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) | |||
| 511 | kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); | 511 | kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); |
| 512 | } | 512 | } |
| 513 | 513 | ||
| 514 | #define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
| 515 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
| 516 | |||
| 514 | static int jz4740_dma_probe(struct platform_device *pdev) | 517 | static int jz4740_dma_probe(struct platform_device *pdev) |
| 515 | { | 518 | { |
| 516 | struct jz4740_dmaengine_chan *chan; | 519 | struct jz4740_dmaengine_chan *chan; |
| @@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev) | |||
| 548 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; | 551 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; |
| 549 | dd->device_config = jz4740_dma_slave_config; | 552 | dd->device_config = jz4740_dma_slave_config; |
| 550 | dd->device_terminate_all = jz4740_dma_terminate_all; | 553 | dd->device_terminate_all = jz4740_dma_terminate_all; |
| 554 | dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS; | ||
| 555 | dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS; | ||
| 556 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
| 557 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
| 551 | dd->dev = &pdev->dev; | 558 | dd->dev = &pdev->dev; |
| 552 | INIT_LIST_HEAD(&dd->channels); | 559 | INIT_LIST_HEAD(&dd->channels); |
| 553 | 560 | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 276157f22612..53dbd3b3384c 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan) | |||
| 260 | */ | 260 | */ |
| 261 | if (echan->edesc) { | 261 | if (echan->edesc) { |
| 262 | int cyclic = echan->edesc->cyclic; | 262 | int cyclic = echan->edesc->cyclic; |
| 263 | |||
| 264 | /* | ||
| 265 | * free the running request descriptor | ||
| 266 | * since it is not in any of the vdesc lists | ||
| 267 | */ | ||
| 268 | edma_desc_free(&echan->edesc->vdesc); | ||
| 269 | |||
| 263 | echan->edesc = NULL; | 270 | echan->edesc = NULL; |
| 264 | edma_stop(echan->ch_num); | 271 | edma_stop(echan->ch_num); |
| 265 | /* Move the cyclic channel back to default queue */ | 272 | /* Move the cyclic channel back to default queue */ |
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 15cab7d79525..b4634109e010 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c | |||
| @@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan) | |||
| 193 | 193 | ||
| 194 | spin_lock_irqsave(&ch->vc.lock, flags); | 194 | spin_lock_irqsave(&ch->vc.lock, flags); |
| 195 | 195 | ||
| 196 | if (ch->desc) | 196 | if (ch->desc) { |
| 197 | moxart_dma_desc_free(&ch->desc->vd); | ||
| 197 | ch->desc = NULL; | 198 | ch->desc = NULL; |
| 199 | } | ||
| 198 | 200 | ||
| 199 | ctrl = readl(ch->base + REG_OFF_CTRL); | 201 | ctrl = readl(ch->base + REG_OFF_CTRL); |
| 200 | ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); | 202 | ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 7dd6dd121681..167dbaf65742 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
| @@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan) | |||
| 981 | * c->desc is NULL and exit.) | 981 | * c->desc is NULL and exit.) |
| 982 | */ | 982 | */ |
| 983 | if (c->desc) { | 983 | if (c->desc) { |
| 984 | omap_dma_desc_free(&c->desc->vd); | ||
| 984 | c->desc = NULL; | 985 | c->desc = NULL; |
| 985 | /* Avoid stopping the dma twice */ | 986 | /* Avoid stopping the dma twice */ |
| 986 | if (!c->paused) | 987 | if (!c->paused) |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 69fac068669f..2eebd28b4c40 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
| @@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num, | |||
| 86 | int i = 0; | 86 | int i = 0; |
| 87 | 87 | ||
| 88 | /* | 88 | /* |
| 89 | * Stop when we see all the items the table claimed to have | 89 | * Stop when we have seen all the items the table claimed to have |
| 90 | * OR we run off the end of the table (also happens) | 90 | * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run |
| 91 | * off the end of the table (should never happen but sometimes does | ||
| 92 | * on bogus implementations.) | ||
| 91 | */ | 93 | */ |
| 92 | while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { | 94 | while ((!num || i < num) && |
| 95 | (data - buf + sizeof(struct dmi_header)) <= len) { | ||
| 93 | const struct dmi_header *dm = (const struct dmi_header *)data; | 96 | const struct dmi_header *dm = (const struct dmi_header *)data; |
| 94 | 97 | ||
| 95 | /* | 98 | /* |
| @@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf) | |||
| 529 | if (memcmp(buf, "_SM3_", 5) == 0 && | 532 | if (memcmp(buf, "_SM3_", 5) == 0 && |
| 530 | buf[6] < 32 && dmi_checksum(buf, buf[6])) { | 533 | buf[6] < 32 && dmi_checksum(buf, buf[6])) { |
| 531 | dmi_ver = get_unaligned_be16(buf + 7); | 534 | dmi_ver = get_unaligned_be16(buf + 7); |
| 535 | dmi_num = 0; /* No longer specified */ | ||
| 532 | dmi_len = get_unaligned_le32(buf + 12); | 536 | dmi_len = get_unaligned_le32(buf + 12); |
| 533 | dmi_base = get_unaligned_le64(buf + 16); | 537 | dmi_base = get_unaligned_le64(buf + 16); |
| 534 | 538 | ||
| 535 | /* | ||
| 536 | * The 64-bit SMBIOS 3.0 entry point no longer has a field | ||
| 537 | * containing the number of structures present in the table. | ||
| 538 | * Instead, it defines the table size as a maximum size, and | ||
| 539 | * relies on the end-of-table structure type (#127) to be used | ||
| 540 | * to signal the end of the table. | ||
| 541 | * So let's define dmi_num as an upper bound as well: each | ||
| 542 | * structure has a 4 byte header, so dmi_len / 4 is an upper | ||
| 543 | * bound for the number of structures in the table. | ||
| 544 | */ | ||
| 545 | dmi_num = dmi_len / 4; | ||
| 546 | |||
| 547 | if (dmi_walk_early(dmi_decode) == 0) { | 539 | if (dmi_walk_early(dmi_decode) == 0) { |
| 548 | pr_info("SMBIOS %d.%d present.\n", | 540 | pr_info("SMBIOS %d.%d present.\n", |
| 549 | dmi_ver >> 8, dmi_ver & 0xFF); | 541 | dmi_ver >> 8, dmi_ver & 0xFF); |
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index a6952ba343a8..a65b75161aa4 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c | |||
| @@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = { | |||
| 334 | .xlate = irq_domain_xlate_twocell, | 334 | .xlate = irq_domain_xlate_twocell, |
| 335 | }; | 335 | }; |
| 336 | 336 | ||
| 337 | static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { | 337 | static struct of_device_id mpc8xxx_gpio_ids[] = { |
| 338 | { .compatible = "fsl,mpc8349-gpio", }, | 338 | { .compatible = "fsl,mpc8349-gpio", }, |
| 339 | { .compatible = "fsl,mpc8572-gpio", }, | 339 | { .compatible = "fsl,mpc8572-gpio", }, |
| 340 | { .compatible = "fsl,mpc8610-gpio", }, | 340 | { .compatible = "fsl,mpc8610-gpio", }, |
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 257e2989215c..045a952576c7 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c | |||
| @@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev) | |||
| 219 | ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, | 219 | ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, |
| 220 | &priv->dir_reg_offset); | 220 | &priv->dir_reg_offset); |
| 221 | if (ret) | 221 | if (ret) |
| 222 | dev_err(dev, "can't read the dir register offset!\n"); | 222 | dev_dbg(dev, "can't read the dir register offset!\n"); |
| 223 | 223 | ||
| 224 | priv->dir_reg_offset <<= 3; | 224 | priv->dir_reg_offset <<= 3; |
| 225 | } | 225 | } |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c0929d938ced..df990f29757a 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 201 | if (!handler) | 201 | if (!handler) |
| 202 | return AE_BAD_PARAMETER; | 202 | return AE_BAD_PARAMETER; |
| 203 | 203 | ||
| 204 | pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin); | ||
| 205 | if (pin < 0) | ||
| 206 | return AE_BAD_PARAMETER; | ||
| 207 | |||
| 204 | desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event"); | 208 | desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event"); |
| 205 | if (IS_ERR(desc)) { | 209 | if (IS_ERR(desc)) { |
| 206 | dev_err(chip->dev, "Failed to request GPIO\n"); | 210 | dev_err(chip->dev, "Failed to request GPIO\n"); |
| @@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
| 551 | struct gpio_desc *desc; | 555 | struct gpio_desc *desc; |
| 552 | bool found; | 556 | bool found; |
| 553 | 557 | ||
| 558 | pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin); | ||
| 559 | if (pin < 0) { | ||
| 560 | status = AE_BAD_PARAMETER; | ||
| 561 | goto out; | ||
| 562 | } | ||
| 563 | |||
| 554 | mutex_lock(&achip->conn_lock); | 564 | mutex_lock(&achip->conn_lock); |
| 555 | 565 | ||
| 556 | found = false; | 566 | found = false; |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f6d04c7b5115..679b10e34fb5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) | |||
| 525 | } | 525 | } |
| 526 | EXPORT_SYMBOL(drm_framebuffer_reference); | 526 | EXPORT_SYMBOL(drm_framebuffer_reference); |
| 527 | 527 | ||
| 528 | static void drm_framebuffer_free_bug(struct kref *kref) | ||
| 529 | { | ||
| 530 | BUG(); | ||
| 531 | } | ||
| 532 | |||
| 533 | static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) | ||
| 534 | { | ||
| 535 | DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); | ||
| 536 | kref_put(&fb->refcount, drm_framebuffer_free_bug); | ||
| 537 | } | ||
| 538 | |||
| 539 | /** | 528 | /** |
| 540 | * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr | 529 | * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr |
| 541 | * @fb: fb to unregister | 530 | * @fb: fb to unregister |
| @@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane) | |||
| 1320 | return; | 1309 | return; |
| 1321 | } | 1310 | } |
| 1322 | /* disconnect the plane from the fb and crtc: */ | 1311 | /* disconnect the plane from the fb and crtc: */ |
| 1323 | __drm_framebuffer_unreference(plane->old_fb); | 1312 | drm_framebuffer_unreference(plane->old_fb); |
| 1324 | plane->old_fb = NULL; | 1313 | plane->old_fb = NULL; |
| 1325 | plane->fb = NULL; | 1314 | plane->fb = NULL; |
| 1326 | plane->crtc = NULL; | 1315 | plane->crtc = NULL; |
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 732cb6f8e653..4c0aa97aaf03 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c | |||
| @@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) | |||
| 287 | 287 | ||
| 288 | drm_mode_connector_update_edid_property(connector, edid); | 288 | drm_mode_connector_update_edid_property(connector, edid); |
| 289 | ret = drm_add_edid_modes(connector, edid); | 289 | ret = drm_add_edid_modes(connector, edid); |
| 290 | drm_edid_to_eld(connector, edid); | ||
| 290 | kfree(edid); | 291 | kfree(edid); |
| 291 | 292 | ||
| 292 | return ret; | 293 | return ret; |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6591d48c1b9d..3fee587bc284 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
| @@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
| 174 | struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; | 174 | struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; |
| 175 | 175 | ||
| 176 | count = drm_add_edid_modes(connector, edid); | 176 | count = drm_add_edid_modes(connector, edid); |
| 177 | drm_edid_to_eld(connector, edid); | ||
| 177 | } else | 178 | } else |
| 178 | count = (*connector_funcs->get_modes)(connector); | 179 | count = (*connector_funcs->get_modes)(connector); |
| 179 | } | 180 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index c300e22da8ac..33a10ce967ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -147,6 +147,7 @@ struct fimd_win_data { | |||
| 147 | unsigned int ovl_height; | 147 | unsigned int ovl_height; |
| 148 | unsigned int fb_width; | 148 | unsigned int fb_width; |
| 149 | unsigned int fb_height; | 149 | unsigned int fb_height; |
| 150 | unsigned int fb_pitch; | ||
| 150 | unsigned int bpp; | 151 | unsigned int bpp; |
| 151 | unsigned int pixel_format; | 152 | unsigned int pixel_format; |
| 152 | dma_addr_t dma_addr; | 153 | dma_addr_t dma_addr; |
| @@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc, | |||
| 532 | win_data->offset_y = plane->crtc_y; | 533 | win_data->offset_y = plane->crtc_y; |
| 533 | win_data->ovl_width = plane->crtc_width; | 534 | win_data->ovl_width = plane->crtc_width; |
| 534 | win_data->ovl_height = plane->crtc_height; | 535 | win_data->ovl_height = plane->crtc_height; |
| 536 | win_data->fb_pitch = plane->pitch; | ||
| 535 | win_data->fb_width = plane->fb_width; | 537 | win_data->fb_width = plane->fb_width; |
| 536 | win_data->fb_height = plane->fb_height; | 538 | win_data->fb_height = plane->fb_height; |
| 537 | win_data->dma_addr = plane->dma_addr[0] + offset; | 539 | win_data->dma_addr = plane->dma_addr[0] + offset; |
| 538 | win_data->bpp = plane->bpp; | 540 | win_data->bpp = plane->bpp; |
| 539 | win_data->pixel_format = plane->pixel_format; | 541 | win_data->pixel_format = plane->pixel_format; |
| 540 | win_data->buf_offsize = (plane->fb_width - plane->crtc_width) * | 542 | win_data->buf_offsize = |
| 541 | (plane->bpp >> 3); | 543 | plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); |
| 542 | win_data->line_size = plane->crtc_width * (plane->bpp >> 3); | 544 | win_data->line_size = plane->crtc_width * (plane->bpp >> 3); |
| 543 | 545 | ||
| 544 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", | 546 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", |
| @@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos) | |||
| 704 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); | 706 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); |
| 705 | 707 | ||
| 706 | /* buffer end address */ | 708 | /* buffer end address */ |
| 707 | size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); | 709 | size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3); |
| 708 | val = (unsigned long)(win_data->dma_addr + size); | 710 | val = (unsigned long)(win_data->dma_addr + size); |
| 709 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); | 711 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); |
| 710 | 712 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 3518bc4654c5..2e3bc57ea50e 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -55,6 +55,7 @@ struct hdmi_win_data { | |||
| 55 | unsigned int fb_x; | 55 | unsigned int fb_x; |
| 56 | unsigned int fb_y; | 56 | unsigned int fb_y; |
| 57 | unsigned int fb_width; | 57 | unsigned int fb_width; |
| 58 | unsigned int fb_pitch; | ||
| 58 | unsigned int fb_height; | 59 | unsigned int fb_height; |
| 59 | unsigned int src_width; | 60 | unsigned int src_width; |
| 60 | unsigned int src_height; | 61 | unsigned int src_height; |
| @@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
| 438 | } else { | 439 | } else { |
| 439 | luma_addr[0] = win_data->dma_addr; | 440 | luma_addr[0] = win_data->dma_addr; |
| 440 | chroma_addr[0] = win_data->dma_addr | 441 | chroma_addr[0] = win_data->dma_addr |
| 441 | + (win_data->fb_width * win_data->fb_height); | 442 | + (win_data->fb_pitch * win_data->fb_height); |
| 442 | } | 443 | } |
| 443 | 444 | ||
| 444 | if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { | 445 | if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { |
| @@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
| 447 | luma_addr[1] = luma_addr[0] + 0x40; | 448 | luma_addr[1] = luma_addr[0] + 0x40; |
| 448 | chroma_addr[1] = chroma_addr[0] + 0x40; | 449 | chroma_addr[1] = chroma_addr[0] + 0x40; |
| 449 | } else { | 450 | } else { |
| 450 | luma_addr[1] = luma_addr[0] + win_data->fb_width; | 451 | luma_addr[1] = luma_addr[0] + win_data->fb_pitch; |
| 451 | chroma_addr[1] = chroma_addr[0] + win_data->fb_width; | 452 | chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch; |
| 452 | } | 453 | } |
| 453 | } else { | 454 | } else { |
| 454 | ctx->interlace = false; | 455 | ctx->interlace = false; |
| @@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
| 469 | vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); | 470 | vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); |
| 470 | 471 | ||
| 471 | /* setting size of input image */ | 472 | /* setting size of input image */ |
| 472 | vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | | 473 | vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) | |
| 473 | VP_IMG_VSIZE(win_data->fb_height)); | 474 | VP_IMG_VSIZE(win_data->fb_height)); |
| 474 | /* chroma height has to reduced by 2 to avoid chroma distorions */ | 475 | /* chroma height has to reduced by 2 to avoid chroma distorions */ |
| 475 | vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | | 476 | vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) | |
| 476 | VP_IMG_VSIZE(win_data->fb_height / 2)); | 477 | VP_IMG_VSIZE(win_data->fb_height / 2)); |
| 477 | 478 | ||
| 478 | vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); | 479 | vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); |
| @@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) | |||
| 559 | /* converting dma address base and source offset */ | 560 | /* converting dma address base and source offset */ |
| 560 | dma_addr = win_data->dma_addr | 561 | dma_addr = win_data->dma_addr |
| 561 | + (win_data->fb_x * win_data->bpp >> 3) | 562 | + (win_data->fb_x * win_data->bpp >> 3) |
| 562 | + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); | 563 | + (win_data->fb_y * win_data->fb_pitch); |
| 563 | src_x_offset = 0; | 564 | src_x_offset = 0; |
| 564 | src_y_offset = 0; | 565 | src_y_offset = 0; |
| 565 | 566 | ||
| @@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) | |||
| 576 | MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); | 577 | MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); |
| 577 | 578 | ||
| 578 | /* setup geometry */ | 579 | /* setup geometry */ |
| 579 | mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); | 580 | mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), |
| 581 | win_data->fb_pitch / (win_data->bpp >> 3)); | ||
| 580 | 582 | ||
| 581 | /* setup display size */ | 583 | /* setup display size */ |
| 582 | if (ctx->mxr_ver == MXR_VER_128_0_0_184 && | 584 | if (ctx->mxr_ver == MXR_VER_128_0_0_184 && |
| @@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc, | |||
| 961 | win_data->fb_y = plane->fb_y; | 963 | win_data->fb_y = plane->fb_y; |
| 962 | win_data->fb_width = plane->fb_width; | 964 | win_data->fb_width = plane->fb_width; |
| 963 | win_data->fb_height = plane->fb_height; | 965 | win_data->fb_height = plane->fb_height; |
| 966 | win_data->fb_pitch = plane->pitch; | ||
| 964 | win_data->src_width = plane->src_width; | 967 | win_data->src_width = plane->src_width; |
| 965 | win_data->src_height = plane->src_height; | 968 | win_data->src_height = plane->src_height; |
| 966 | 969 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5b205863b659..27ea6bdebce7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
| 2737 | 2737 | ||
| 2738 | WARN_ON(i915_verify_lists(ring->dev)); | 2738 | WARN_ON(i915_verify_lists(ring->dev)); |
| 2739 | 2739 | ||
| 2740 | /* Move any buffers on the active list that are no longer referenced | 2740 | /* Retire requests first as we use it above for the early return. |
| 2741 | * by the ringbuffer to the flushing/inactive lists as appropriate, | 2741 | * If we retire requests last, we may use a later seqno and so clear |
| 2742 | * before we free the context associated with the requests. | 2742 | * the requests lists without clearing the active list, leading to |
| 2743 | * confusion. | ||
| 2743 | */ | 2744 | */ |
| 2744 | while (!list_empty(&ring->active_list)) { | ||
| 2745 | struct drm_i915_gem_object *obj; | ||
| 2746 | |||
| 2747 | obj = list_first_entry(&ring->active_list, | ||
| 2748 | struct drm_i915_gem_object, | ||
| 2749 | ring_list); | ||
| 2750 | |||
| 2751 | if (!i915_gem_request_completed(obj->last_read_req, true)) | ||
| 2752 | break; | ||
| 2753 | |||
| 2754 | i915_gem_object_move_to_inactive(obj); | ||
| 2755 | } | ||
| 2756 | |||
| 2757 | |||
| 2758 | while (!list_empty(&ring->request_list)) { | 2745 | while (!list_empty(&ring->request_list)) { |
| 2759 | struct drm_i915_gem_request *request; | 2746 | struct drm_i915_gem_request *request; |
| 2760 | struct intel_ringbuffer *ringbuf; | 2747 | struct intel_ringbuffer *ringbuf; |
| @@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
| 2789 | i915_gem_free_request(request); | 2776 | i915_gem_free_request(request); |
| 2790 | } | 2777 | } |
| 2791 | 2778 | ||
| 2779 | /* Move any buffers on the active list that are no longer referenced | ||
| 2780 | * by the ringbuffer to the flushing/inactive lists as appropriate, | ||
| 2781 | * before we free the context associated with the requests. | ||
| 2782 | */ | ||
| 2783 | while (!list_empty(&ring->active_list)) { | ||
| 2784 | struct drm_i915_gem_object *obj; | ||
| 2785 | |||
| 2786 | obj = list_first_entry(&ring->active_list, | ||
| 2787 | struct drm_i915_gem_object, | ||
| 2788 | ring_list); | ||
| 2789 | |||
| 2790 | if (!i915_gem_request_completed(obj->last_read_req, true)) | ||
| 2791 | break; | ||
| 2792 | |||
| 2793 | i915_gem_object_move_to_inactive(obj); | ||
| 2794 | } | ||
| 2795 | |||
| 2792 | if (unlikely(ring->trace_irq_req && | 2796 | if (unlikely(ring->trace_irq_req && |
| 2793 | i915_gem_request_completed(ring->trace_irq_req, true))) { | 2797 | i915_gem_request_completed(ring->trace_irq_req, true))) { |
| 2794 | ring->irq_put(ring); | 2798 | ring->irq_put(ring); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b773368fc62c..38a742532c4f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1487 | goto err; | 1487 | goto err; |
| 1488 | } | 1488 | } |
| 1489 | 1489 | ||
| 1490 | if (i915_needs_cmd_parser(ring)) { | 1490 | if (i915_needs_cmd_parser(ring) && args->batch_len) { |
| 1491 | batch_obj = i915_gem_execbuffer_parse(ring, | 1491 | batch_obj = i915_gem_execbuffer_parse(ring, |
| 1492 | &shadow_exec_entry, | 1492 | &shadow_exec_entry, |
| 1493 | eb, | 1493 | eb, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6d22128d97b1..f75173c20f47 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2438 | if (!intel_crtc->base.primary->fb) | 2438 | if (!intel_crtc->base.primary->fb) |
| 2439 | return; | 2439 | return; |
| 2440 | 2440 | ||
| 2441 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) | 2441 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) { |
| 2442 | struct drm_plane *primary = intel_crtc->base.primary; | ||
| 2443 | |||
| 2444 | primary->state->crtc = &intel_crtc->base; | ||
| 2445 | primary->crtc = &intel_crtc->base; | ||
| 2446 | update_state_fb(primary); | ||
| 2447 | |||
| 2442 | return; | 2448 | return; |
| 2449 | } | ||
| 2443 | 2450 | ||
| 2444 | kfree(intel_crtc->base.primary->fb); | 2451 | kfree(intel_crtc->base.primary->fb); |
| 2445 | intel_crtc->base.primary->fb = NULL; | 2452 | intel_crtc->base.primary->fb = NULL; |
| @@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2462 | continue; | 2469 | continue; |
| 2463 | 2470 | ||
| 2464 | if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { | 2471 | if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { |
| 2472 | struct drm_plane *primary = intel_crtc->base.primary; | ||
| 2473 | |||
| 2465 | if (obj->tiling_mode != I915_TILING_NONE) | 2474 | if (obj->tiling_mode != I915_TILING_NONE) |
| 2466 | dev_priv->preserve_bios_swizzle = true; | 2475 | dev_priv->preserve_bios_swizzle = true; |
| 2467 | 2476 | ||
| 2468 | drm_framebuffer_reference(c->primary->fb); | 2477 | drm_framebuffer_reference(c->primary->fb); |
| 2469 | intel_crtc->base.primary->fb = c->primary->fb; | 2478 | primary->fb = c->primary->fb; |
| 2479 | primary->state->crtc = &intel_crtc->base; | ||
| 2480 | primary->crtc = &intel_crtc->base; | ||
| 2470 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); | 2481 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); |
| 2471 | break; | 2482 | break; |
| 2472 | } | 2483 | } |
| @@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
| 6663 | plane_config->size); | 6674 | plane_config->size); |
| 6664 | 6675 | ||
| 6665 | crtc->base.primary->fb = fb; | 6676 | crtc->base.primary->fb = fb; |
| 6666 | update_state_fb(crtc->base.primary); | ||
| 6667 | } | 6677 | } |
| 6668 | 6678 | ||
| 6669 | static void chv_crtc_clock_get(struct intel_crtc *crtc, | 6679 | static void chv_crtc_clock_get(struct intel_crtc *crtc, |
| @@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
| 7704 | plane_config->size); | 7714 | plane_config->size); |
| 7705 | 7715 | ||
| 7706 | crtc->base.primary->fb = fb; | 7716 | crtc->base.primary->fb = fb; |
| 7707 | update_state_fb(crtc->base.primary); | ||
| 7708 | return; | 7717 | return; |
| 7709 | 7718 | ||
| 7710 | error: | 7719 | error: |
| @@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
| 7798 | plane_config->size); | 7807 | plane_config->size); |
| 7799 | 7808 | ||
| 7800 | crtc->base.primary->fb = fb; | 7809 | crtc->base.primary->fb = fb; |
| 7801 | update_state_fb(crtc->base.primary); | ||
| 7802 | } | 7810 | } |
| 7803 | 7811 | ||
| 7804 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, | 7812 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0a52c44ad03d..9c5451c97942 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | |||
| 1322 | drm_modeset_lock_all(dev); | 1322 | drm_modeset_lock_all(dev); |
| 1323 | 1323 | ||
| 1324 | plane = drm_plane_find(dev, set->plane_id); | 1324 | plane = drm_plane_find(dev, set->plane_id); |
| 1325 | if (!plane) { | 1325 | if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { |
| 1326 | ret = -ENOENT; | 1326 | ret = -ENOENT; |
| 1327 | goto out_unlock; | 1327 | goto out_unlock; |
| 1328 | } | 1328 | } |
| @@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | |||
| 1349 | drm_modeset_lock_all(dev); | 1349 | drm_modeset_lock_all(dev); |
| 1350 | 1350 | ||
| 1351 | plane = drm_plane_find(dev, get->plane_id); | 1351 | plane = drm_plane_find(dev, get->plane_id); |
| 1352 | if (!plane) { | 1352 | if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { |
| 1353 | ret = -ENOENT; | 1353 | ret = -ENOENT; |
| 1354 | goto out_unlock; | 1354 | goto out_unlock; |
| 1355 | } | 1355 | } |
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index c648e1996dab..243a36c93b8f 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
| @@ -2129,6 +2129,7 @@ | |||
| 2129 | #define VCE_UENC_REG_CLOCK_GATING 0x207c0 | 2129 | #define VCE_UENC_REG_CLOCK_GATING 0x207c0 |
| 2130 | #define VCE_SYS_INT_EN 0x21300 | 2130 | #define VCE_SYS_INT_EN 0x21300 |
| 2131 | # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) | 2131 | # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) |
| 2132 | #define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c | ||
| 2132 | #define VCE_LMI_CTRL2 0x21474 | 2133 | #define VCE_LMI_CTRL2 0x21474 |
| 2133 | #define VCE_LMI_CTRL 0x21498 | 2134 | #define VCE_LMI_CTRL 0x21498 |
| 2134 | #define VCE_LMI_VM_CTRL 0x214a0 | 2135 | #define VCE_LMI_VM_CTRL 0x214a0 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5587603b4a89..33d5a4f4eebd 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -1565,6 +1565,7 @@ struct radeon_dpm { | |||
| 1565 | int new_active_crtc_count; | 1565 | int new_active_crtc_count; |
| 1566 | u32 current_active_crtcs; | 1566 | u32 current_active_crtcs; |
| 1567 | int current_active_crtc_count; | 1567 | int current_active_crtc_count; |
| 1568 | bool single_display; | ||
| 1568 | struct radeon_dpm_dynamic_state dyn_state; | 1569 | struct radeon_dpm_dynamic_state dyn_state; |
| 1569 | struct radeon_dpm_fan fan; | 1570 | struct radeon_dpm_fan fan; |
| 1570 | u32 tdp_limit; | 1571 | u32 tdp_limit; |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 63ccb8fa799c..d27e4ccb848c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) | |||
| 76 | 76 | ||
| 77 | static bool radeon_read_bios(struct radeon_device *rdev) | 77 | static bool radeon_read_bios(struct radeon_device *rdev) |
| 78 | { | 78 | { |
| 79 | uint8_t __iomem *bios; | 79 | uint8_t __iomem *bios, val1, val2; |
| 80 | size_t size; | 80 | size_t size; |
| 81 | 81 | ||
| 82 | rdev->bios = NULL; | 82 | rdev->bios = NULL; |
| @@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
| 86 | return false; | 86 | return false; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | 89 | val1 = readb(&bios[0]); |
| 90 | val2 = readb(&bios[1]); | ||
| 91 | |||
| 92 | if (size == 0 || val1 != 0x55 || val2 != 0xaa) { | ||
| 90 | pci_unmap_rom(rdev->pdev, bios); | 93 | pci_unmap_rom(rdev->pdev, bios); |
| 91 | return false; | 94 | return false; |
| 92 | } | 95 | } |
| 93 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | 96 | rdev->bios = kzalloc(size, GFP_KERNEL); |
| 94 | if (rdev->bios == NULL) { | 97 | if (rdev->bios == NULL) { |
| 95 | pci_unmap_rom(rdev->pdev, bios); | 98 | pci_unmap_rom(rdev->pdev, bios); |
| 96 | return false; | 99 | return false; |
| 97 | } | 100 | } |
| 101 | memcpy_fromio(rdev->bios, bios, size); | ||
| 98 | pci_unmap_rom(rdev->pdev, bios); | 102 | pci_unmap_rom(rdev->pdev, bios); |
| 99 | return true; | 103 | return true; |
| 100 | } | 104 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index a69bd441dd2d..572b4dbec186 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
| @@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
| 122 | it = interval_tree_iter_first(&rmn->objects, start, end); | 122 | it = interval_tree_iter_first(&rmn->objects, start, end); |
| 123 | while (it) { | 123 | while (it) { |
| 124 | struct radeon_bo *bo; | 124 | struct radeon_bo *bo; |
| 125 | struct fence *fence; | ||
| 126 | int r; | 125 | int r; |
| 127 | 126 | ||
| 128 | bo = container_of(it, struct radeon_bo, mn_it); | 127 | bo = container_of(it, struct radeon_bo, mn_it); |
| @@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
| 134 | continue; | 133 | continue; |
| 135 | } | 134 | } |
| 136 | 135 | ||
| 137 | fence = reservation_object_get_excl(bo->tbo.resv); | 136 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, |
| 138 | if (fence) { | 137 | false, MAX_SCHEDULE_TIMEOUT); |
| 139 | r = radeon_fence_wait((struct radeon_fence *)fence, false); | 138 | if (r) |
| 140 | if (r) | 139 | DRM_ERROR("(%d) failed to wait for user bo\n", r); |
| 141 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | ||
| 142 | } | ||
| 143 | 140 | ||
| 144 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); | 141 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); |
| 145 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 142 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 33cf4108386d..c1ba83a8dd8c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) | |||
| 837 | radeon_pm_compute_clocks(rdev); | 837 | radeon_pm_compute_clocks(rdev); |
| 838 | } | 838 | } |
| 839 | 839 | ||
| 840 | static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | 840 | static bool radeon_dpm_single_display(struct radeon_device *rdev) |
| 841 | enum radeon_pm_state_type dpm_state) | ||
| 842 | { | 841 | { |
| 843 | int i; | ||
| 844 | struct radeon_ps *ps; | ||
| 845 | u32 ui_class; | ||
| 846 | bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? | 842 | bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? |
| 847 | true : false; | 843 | true : false; |
| 848 | 844 | ||
| @@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
| 858 | if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) | 854 | if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) |
| 859 | single_display = false; | 855 | single_display = false; |
| 860 | 856 | ||
| 857 | return single_display; | ||
| 858 | } | ||
| 859 | |||
| 860 | static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | ||
| 861 | enum radeon_pm_state_type dpm_state) | ||
| 862 | { | ||
| 863 | int i; | ||
| 864 | struct radeon_ps *ps; | ||
| 865 | u32 ui_class; | ||
| 866 | bool single_display = radeon_dpm_single_display(rdev); | ||
| 867 | |||
| 861 | /* certain older asics have a separare 3D performance state, | 868 | /* certain older asics have a separare 3D performance state, |
| 862 | * so try that first if the user selected performance | 869 | * so try that first if the user selected performance |
| 863 | */ | 870 | */ |
| @@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
| 983 | struct radeon_ps *ps; | 990 | struct radeon_ps *ps; |
| 984 | enum radeon_pm_state_type dpm_state; | 991 | enum radeon_pm_state_type dpm_state; |
| 985 | int ret; | 992 | int ret; |
| 993 | bool single_display = radeon_dpm_single_display(rdev); | ||
| 986 | 994 | ||
| 987 | /* if dpm init failed */ | 995 | /* if dpm init failed */ |
| 988 | if (!rdev->pm.dpm_enabled) | 996 | if (!rdev->pm.dpm_enabled) |
| @@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
| 1007 | /* vce just modifies an existing state so force a change */ | 1015 | /* vce just modifies an existing state so force a change */ |
| 1008 | if (ps->vce_active != rdev->pm.dpm.vce_active) | 1016 | if (ps->vce_active != rdev->pm.dpm.vce_active) |
| 1009 | goto force; | 1017 | goto force; |
| 1018 | /* user has made a display change (such as timing) */ | ||
| 1019 | if (rdev->pm.dpm.single_display != single_display) | ||
| 1020 | goto force; | ||
| 1010 | if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { | 1021 | if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { |
| 1011 | /* for pre-BTC and APUs if the num crtcs changed but state is the same, | 1022 | /* for pre-BTC and APUs if the num crtcs changed but state is the same, |
| 1012 | * all we need to do is update the display configuration. | 1023 | * all we need to do is update the display configuration. |
| @@ -1069,6 +1080,7 @@ force: | |||
| 1069 | 1080 | ||
| 1070 | rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; | 1081 | rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; |
| 1071 | rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; | 1082 | rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; |
| 1083 | rdev->pm.dpm.single_display = single_display; | ||
| 1072 | 1084 | ||
| 1073 | /* wait for the rings to drain */ | 1085 | /* wait for the rings to drain */ |
| 1074 | for (i = 0; i < RADEON_NUM_RINGS; i++) { | 1086 | for (i = 0; i < RADEON_NUM_RINGS; i++) { |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 2456f69efd23..8c7872339c2a 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
| 495 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | 495 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
| 496 | seq_printf(m, "%u dwords in ring\n", count); | 496 | seq_printf(m, "%u dwords in ring\n", count); |
| 497 | 497 | ||
| 498 | if (!ring->ready) | 498 | if (!ring->ring) |
| 499 | return 0; | 499 | return 0; |
| 500 | 500 | ||
| 501 | /* print 8 dw before current rptr as often it's the last executed | 501 | /* print 8 dw before current rptr as often it's the last executed |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d02aa1d0f588..b292aca0f342 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |||
| 598 | enum dma_data_direction direction = write ? | 598 | enum dma_data_direction direction = write ? |
| 599 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | 599 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
| 600 | 600 | ||
| 601 | /* double check that we don't free the table twice */ | ||
| 602 | if (!ttm->sg->sgl) | ||
| 603 | return; | ||
| 604 | |||
| 601 | /* free the sg table and pages again */ | 605 | /* free the sg table and pages again */ |
| 602 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | 606 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
| 603 | 607 | ||
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c index 1ac7bb825a1b..fbbe78fbd087 100644 --- a/drivers/gpu/drm/radeon/vce_v2_0.c +++ b/drivers/gpu/drm/radeon/vce_v2_0.c | |||
| @@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev) | |||
| 156 | WREG32(VCE_LMI_SWAP_CNTL1, 0); | 156 | WREG32(VCE_LMI_SWAP_CNTL1, 0); |
| 157 | WREG32(VCE_LMI_VM_CTRL, 0); | 157 | WREG32(VCE_LMI_VM_CTRL, 0); |
| 158 | 158 | ||
| 159 | WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8); | ||
| 160 | |||
| 161 | addr &= 0xff; | ||
| 159 | size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); | 162 | size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); |
| 160 | WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); | 163 | WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); |
| 161 | WREG32(VCE_VCPU_CACHE_SIZE0, size); | 164 | WREG32(VCE_VCPU_CACHE_SIZE0, size); |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 1096da327130..75c6d2103e07 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c | |||
| @@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p) | |||
| 659 | 659 | ||
| 660 | mutex_lock(&data->mutex); | 660 | mutex_lock(&data->mutex); |
| 661 | 661 | ||
| 662 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 662 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
| 663 | indio_dev->masklength) { | 663 | indio_dev->masklength) { |
| 664 | ret = bma180_get_data_reg(data, bit); | 664 | ret = bma180_get_data_reg(data, bit); |
| 665 | if (ret < 0) { | 665 | if (ret < 0) { |
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 4026122a7592..73e87739d219 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c | |||
| @@ -204,14 +204,14 @@ static const struct { | |||
| 204 | int val; | 204 | int val; |
| 205 | int val2; | 205 | int val2; |
| 206 | u8 bw_bits; | 206 | u8 bw_bits; |
| 207 | } bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08}, | 207 | } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08}, |
| 208 | {15, 630000, 0x09}, | 208 | {31, 260000, 0x09}, |
| 209 | {31, 250000, 0x0A}, | 209 | {62, 500000, 0x0A}, |
| 210 | {62, 500000, 0x0B}, | 210 | {125, 0, 0x0B}, |
| 211 | {125, 0, 0x0C}, | 211 | {250, 0, 0x0C}, |
| 212 | {250, 0, 0x0D}, | 212 | {500, 0, 0x0D}, |
| 213 | {500, 0, 0x0E}, | 213 | {1000, 0, 0x0E}, |
| 214 | {1000, 0, 0x0F} }; | 214 | {2000, 0, 0x0F} }; |
| 215 | 215 | ||
| 216 | static const struct { | 216 | static const struct { |
| 217 | int bw_bits; | 217 | int bw_bits; |
| @@ -1049,7 +1049,7 @@ static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples) | |||
| 1049 | } | 1049 | } |
| 1050 | 1050 | ||
| 1051 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( | 1051 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( |
| 1052 | "7.810000 15.630000 31.250000 62.500000 125 250 500 1000"); | 1052 | "15.620000 31.260000 62.50000 125 250 500 1000 2000"); |
| 1053 | 1053 | ||
| 1054 | static struct attribute *bmc150_accel_attributes[] = { | 1054 | static struct attribute *bmc150_accel_attributes[] = { |
| 1055 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, | 1055 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, |
| @@ -1209,7 +1209,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p) | |||
| 1209 | int bit, ret, i = 0; | 1209 | int bit, ret, i = 0; |
| 1210 | 1210 | ||
| 1211 | mutex_lock(&data->mutex); | 1211 | mutex_lock(&data->mutex); |
| 1212 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 1212 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
| 1213 | indio_dev->masklength) { | 1213 | indio_dev->masklength) { |
| 1214 | ret = i2c_smbus_read_word_data(data->client, | 1214 | ret = i2c_smbus_read_word_data(data->client, |
| 1215 | BMC150_ACCEL_AXIS_TO_REG(bit)); | 1215 | BMC150_ACCEL_AXIS_TO_REG(bit)); |
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index a98b5d212fb3..51da3692d561 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c | |||
| @@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p) | |||
| 956 | 956 | ||
| 957 | mutex_lock(&data->mutex); | 957 | mutex_lock(&data->mutex); |
| 958 | 958 | ||
| 959 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 959 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
| 960 | indio_dev->masklength) { | 960 | indio_dev->masklength) { |
| 961 | ret = kxcjk1013_get_acc_reg(data, bit); | 961 | ret = kxcjk1013_get_acc_reg(data, bit); |
| 962 | if (ret < 0) { | 962 | if (ret < 0) { |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 9eaa8d1e582d..f96074a15ac8 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig | |||
| @@ -137,7 +137,8 @@ config AXP288_ADC | |||
| 137 | 137 | ||
| 138 | config CC10001_ADC | 138 | config CC10001_ADC |
| 139 | tristate "Cosmic Circuits 10001 ADC driver" | 139 | tristate "Cosmic Circuits 10001 ADC driver" |
| 140 | depends on HAS_IOMEM || HAVE_CLK || REGULATOR | 140 | depends on HAVE_CLK || REGULATOR |
| 141 | depends on HAS_IOMEM | ||
| 141 | select IIO_BUFFER | 142 | select IIO_BUFFER |
| 142 | select IIO_TRIGGERED_BUFFER | 143 | select IIO_TRIGGERED_BUFFER |
| 143 | help | 144 | help |
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index ff61ae55dd3f..8a0eb4a04fb5 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c | |||
| @@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
| 544 | { | 544 | { |
| 545 | struct iio_dev *idev = iio_trigger_get_drvdata(trig); | 545 | struct iio_dev *idev = iio_trigger_get_drvdata(trig); |
| 546 | struct at91_adc_state *st = iio_priv(idev); | 546 | struct at91_adc_state *st = iio_priv(idev); |
| 547 | struct iio_buffer *buffer = idev->buffer; | ||
| 548 | struct at91_adc_reg_desc *reg = st->registers; | 547 | struct at91_adc_reg_desc *reg = st->registers; |
| 549 | u32 status = at91_adc_readl(st, reg->trigger_register); | 548 | u32 status = at91_adc_readl(st, reg->trigger_register); |
| 550 | int value; | 549 | int value; |
| @@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
| 564 | at91_adc_writel(st, reg->trigger_register, | 563 | at91_adc_writel(st, reg->trigger_register, |
| 565 | status | value); | 564 | status | value); |
| 566 | 565 | ||
| 567 | for_each_set_bit(bit, buffer->scan_mask, | 566 | for_each_set_bit(bit, idev->active_scan_mask, |
| 568 | st->num_channels) { | 567 | st->num_channels) { |
| 569 | struct iio_chan_spec const *chan = idev->channels + bit; | 568 | struct iio_chan_spec const *chan = idev->channels + bit; |
| 570 | at91_adc_writel(st, AT91_ADC_CHER, | 569 | at91_adc_writel(st, AT91_ADC_CHER, |
| @@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
| 579 | at91_adc_writel(st, reg->trigger_register, | 578 | at91_adc_writel(st, reg->trigger_register, |
| 580 | status & ~value); | 579 | status & ~value); |
| 581 | 580 | ||
| 582 | for_each_set_bit(bit, buffer->scan_mask, | 581 | for_each_set_bit(bit, idev->active_scan_mask, |
| 583 | st->num_channels) { | 582 | st->num_channels) { |
| 584 | struct iio_chan_spec const *chan = idev->channels + bit; | 583 | struct iio_chan_spec const *chan = idev->channels + bit; |
| 585 | at91_adc_writel(st, AT91_ADC_CHDR, | 584 | at91_adc_writel(st, AT91_ADC_CHDR, |
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 2e5cc4409f78..a0e7161f040c 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
| @@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev) | |||
| 188 | static int tiadc_buffer_postenable(struct iio_dev *indio_dev) | 188 | static int tiadc_buffer_postenable(struct iio_dev *indio_dev) |
| 189 | { | 189 | { |
| 190 | struct tiadc_device *adc_dev = iio_priv(indio_dev); | 190 | struct tiadc_device *adc_dev = iio_priv(indio_dev); |
| 191 | struct iio_buffer *buffer = indio_dev->buffer; | ||
| 192 | unsigned int enb = 0; | 191 | unsigned int enb = 0; |
| 193 | u8 bit; | 192 | u8 bit; |
| 194 | 193 | ||
| 195 | tiadc_step_config(indio_dev); | 194 | tiadc_step_config(indio_dev); |
| 196 | for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels) | 195 | for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) |
| 197 | enb |= (get_adc_step_bit(adc_dev, bit) << 1); | 196 | enb |= (get_adc_step_bit(adc_dev, bit) << 1); |
| 198 | adc_dev->buffer_en_ch_steps = enb; | 197 | adc_dev->buffer_en_ch_steps = enb; |
| 199 | 198 | ||
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 5b72d170fd36..56292ae4538d 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
| @@ -141,9 +141,13 @@ struct vf610_adc { | |||
| 141 | struct regulator *vref; | 141 | struct regulator *vref; |
| 142 | struct vf610_adc_feature adc_feature; | 142 | struct vf610_adc_feature adc_feature; |
| 143 | 143 | ||
| 144 | u32 sample_freq_avail[5]; | ||
| 145 | |||
| 144 | struct completion completion; | 146 | struct completion completion; |
| 145 | }; | 147 | }; |
| 146 | 148 | ||
| 149 | static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 }; | ||
| 150 | |||
| 147 | #define VF610_ADC_CHAN(_idx, _chan_type) { \ | 151 | #define VF610_ADC_CHAN(_idx, _chan_type) { \ |
| 148 | .type = (_chan_type), \ | 152 | .type = (_chan_type), \ |
| 149 | .indexed = 1, \ | 153 | .indexed = 1, \ |
| @@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = { | |||
| 180 | /* sentinel */ | 184 | /* sentinel */ |
| 181 | }; | 185 | }; |
| 182 | 186 | ||
| 183 | /* | 187 | static inline void vf610_adc_calculate_rates(struct vf610_adc *info) |
| 184 | * ADC sample frequency, unit is ADCK cycles. | 188 | { |
| 185 | * ADC clk source is ipg clock, which is the same as bus clock. | 189 | unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk); |
| 186 | * | 190 | int i; |
| 187 | * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) | 191 | |
| 188 | * SFCAdder: fixed to 6 ADCK cycles | 192 | /* |
| 189 | * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. | 193 | * Calculate ADC sample frequencies |
| 190 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode | 194 | * Sample time unit is ADCK cycles. ADCK clk source is ipg clock, |
| 191 | * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles | 195 | * which is the same as bus clock. |
| 192 | * | 196 | * |
| 193 | * By default, enable 12 bit resolution mode, clock source | 197 | * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) |
| 194 | * set to ipg clock, So get below frequency group: | 198 | * SFCAdder: fixed to 6 ADCK cycles |
| 195 | */ | 199 | * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. |
| 196 | static const u32 vf610_sample_freq_avail[5] = | 200 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode |
| 197 | {1941176, 559332, 286957, 145374, 73171}; | 201 | * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles |
| 202 | */ | ||
| 203 | adck_rate = ipg_rate / info->adc_feature.clk_div; | ||
| 204 | for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) | ||
| 205 | info->sample_freq_avail[i] = | ||
| 206 | adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3)); | ||
| 207 | } | ||
| 198 | 208 | ||
| 199 | static inline void vf610_adc_cfg_init(struct vf610_adc *info) | 209 | static inline void vf610_adc_cfg_init(struct vf610_adc *info) |
| 200 | { | 210 | { |
| 211 | struct vf610_adc_feature *adc_feature = &info->adc_feature; | ||
| 212 | |||
| 201 | /* set default Configuration for ADC controller */ | 213 | /* set default Configuration for ADC controller */ |
| 202 | info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET; | 214 | adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET; |
| 203 | info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET; | 215 | adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET; |
| 216 | |||
| 217 | adc_feature->calibration = true; | ||
| 218 | adc_feature->ovwren = true; | ||
| 219 | |||
| 220 | adc_feature->res_mode = 12; | ||
| 221 | adc_feature->sample_rate = 1; | ||
| 222 | adc_feature->lpm = true; | ||
| 204 | 223 | ||
| 205 | info->adc_feature.calibration = true; | 224 | /* Use a save ADCK which is below 20MHz on all devices */ |
| 206 | info->adc_feature.ovwren = true; | 225 | adc_feature->clk_div = 8; |
| 207 | 226 | ||
| 208 | info->adc_feature.clk_div = 1; | 227 | vf610_adc_calculate_rates(info); |
| 209 | info->adc_feature.res_mode = 12; | ||
| 210 | info->adc_feature.sample_rate = 1; | ||
| 211 | info->adc_feature.lpm = true; | ||
| 212 | } | 228 | } |
| 213 | 229 | ||
| 214 | static void vf610_adc_cfg_post_set(struct vf610_adc *info) | 230 | static void vf610_adc_cfg_post_set(struct vf610_adc *info) |
| @@ -287,12 +303,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info) | |||
| 287 | 303 | ||
| 288 | cfg_data = readl(info->regs + VF610_REG_ADC_CFG); | 304 | cfg_data = readl(info->regs + VF610_REG_ADC_CFG); |
| 289 | 305 | ||
| 290 | /* low power configuration */ | ||
| 291 | cfg_data &= ~VF610_ADC_ADLPC_EN; | 306 | cfg_data &= ~VF610_ADC_ADLPC_EN; |
| 292 | if (adc_feature->lpm) | 307 | if (adc_feature->lpm) |
| 293 | cfg_data |= VF610_ADC_ADLPC_EN; | 308 | cfg_data |= VF610_ADC_ADLPC_EN; |
| 294 | 309 | ||
| 295 | /* disable high speed */ | ||
| 296 | cfg_data &= ~VF610_ADC_ADHSC_EN; | 310 | cfg_data &= ~VF610_ADC_ADHSC_EN; |
| 297 | 311 | ||
| 298 | writel(cfg_data, info->regs + VF610_REG_ADC_CFG); | 312 | writel(cfg_data, info->regs + VF610_REG_ADC_CFG); |
| @@ -432,10 +446,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id) | |||
| 432 | return IRQ_HANDLED; | 446 | return IRQ_HANDLED; |
| 433 | } | 447 | } |
| 434 | 448 | ||
| 435 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171"); | 449 | static ssize_t vf610_show_samp_freq_avail(struct device *dev, |
| 450 | struct device_attribute *attr, char *buf) | ||
| 451 | { | ||
| 452 | struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev)); | ||
| 453 | size_t len = 0; | ||
| 454 | int i; | ||
| 455 | |||
| 456 | for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++) | ||
| 457 | len += scnprintf(buf + len, PAGE_SIZE - len, | ||
| 458 | "%u ", info->sample_freq_avail[i]); | ||
| 459 | |||
| 460 | /* replace trailing space by newline */ | ||
| 461 | buf[len - 1] = '\n'; | ||
| 462 | |||
| 463 | return len; | ||
| 464 | } | ||
| 465 | |||
| 466 | static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail); | ||
| 436 | 467 | ||
| 437 | static struct attribute *vf610_attributes[] = { | 468 | static struct attribute *vf610_attributes[] = { |
| 438 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, | 469 | &iio_dev_attr_sampling_frequency_available.dev_attr.attr, |
| 439 | NULL | 470 | NULL |
| 440 | }; | 471 | }; |
| 441 | 472 | ||
| @@ -499,7 +530,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev, | |||
| 499 | return IIO_VAL_FRACTIONAL_LOG2; | 530 | return IIO_VAL_FRACTIONAL_LOG2; |
| 500 | 531 | ||
| 501 | case IIO_CHAN_INFO_SAMP_FREQ: | 532 | case IIO_CHAN_INFO_SAMP_FREQ: |
| 502 | *val = vf610_sample_freq_avail[info->adc_feature.sample_rate]; | 533 | *val = info->sample_freq_avail[info->adc_feature.sample_rate]; |
| 503 | *val2 = 0; | 534 | *val2 = 0; |
| 504 | return IIO_VAL_INT; | 535 | return IIO_VAL_INT; |
| 505 | 536 | ||
| @@ -522,9 +553,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev, | |||
| 522 | switch (mask) { | 553 | switch (mask) { |
| 523 | case IIO_CHAN_INFO_SAMP_FREQ: | 554 | case IIO_CHAN_INFO_SAMP_FREQ: |
| 524 | for (i = 0; | 555 | for (i = 0; |
| 525 | i < ARRAY_SIZE(vf610_sample_freq_avail); | 556 | i < ARRAY_SIZE(info->sample_freq_avail); |
| 526 | i++) | 557 | i++) |
| 527 | if (val == vf610_sample_freq_avail[i]) { | 558 | if (val == info->sample_freq_avail[i]) { |
| 528 | info->adc_feature.sample_rate = i; | 559 | info->adc_feature.sample_rate = i; |
| 529 | vf610_adc_sample_set(info); | 560 | vf610_adc_sample_set(info); |
| 530 | return 0; | 561 | return 0; |
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c index 56d68e1d0987..4415f55d26b6 100644 --- a/drivers/iio/gyro/bmg160.c +++ b/drivers/iio/gyro/bmg160.c | |||
| @@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p) | |||
| 822 | int bit, ret, i = 0; | 822 | int bit, ret, i = 0; |
| 823 | 823 | ||
| 824 | mutex_lock(&data->mutex); | 824 | mutex_lock(&data->mutex); |
| 825 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 825 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
| 826 | indio_dev->masklength) { | 826 | indio_dev->masklength) { |
| 827 | ret = i2c_smbus_read_word_data(data->client, | 827 | ret = i2c_smbus_read_word_data(data->client, |
| 828 | BMG160_AXIS_TO_REG(bit)); | 828 | BMG160_AXIS_TO_REG(bit)); |
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index e0017c22bb9c..f53e9a803a0e 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c | |||
| @@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) | |||
| 60 | iio_trigger_set_drvdata(adis->trig, adis); | 60 | iio_trigger_set_drvdata(adis->trig, adis); |
| 61 | ret = iio_trigger_register(adis->trig); | 61 | ret = iio_trigger_register(adis->trig); |
| 62 | 62 | ||
| 63 | indio_dev->trig = adis->trig; | 63 | indio_dev->trig = iio_trigger_get(adis->trig); |
| 64 | if (ret) | 64 | if (ret) |
| 65 | goto error_free_irq; | 65 | goto error_free_irq; |
| 66 | 66 | ||
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 5613f3ab9f96..17d4bb15be4d 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | |||
| @@ -410,42 +410,46 @@ error_read_raw: | |||
| 410 | } | 410 | } |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr) | 413 | static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) |
| 414 | { | 414 | { |
| 415 | int result; | 415 | int result, i; |
| 416 | u8 d; | 416 | u8 d; |
| 417 | 417 | ||
| 418 | if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM) | 418 | for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) { |
| 419 | return -EINVAL; | 419 | if (gyro_scale_6050[i] == val) { |
| 420 | if (fsr == st->chip_config.fsr) | 420 | d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); |
| 421 | return 0; | 421 | result = inv_mpu6050_write_reg(st, |
| 422 | st->reg->gyro_config, d); | ||
| 423 | if (result) | ||
| 424 | return result; | ||
| 422 | 425 | ||
| 423 | d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); | 426 | st->chip_config.fsr = i; |
| 424 | result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d); | 427 | return 0; |
| 425 | if (result) | 428 | } |
| 426 | return result; | 429 | } |
| 427 | st->chip_config.fsr = fsr; | ||
| 428 | 430 | ||
| 429 | return 0; | 431 | return -EINVAL; |
| 430 | } | 432 | } |
| 431 | 433 | ||
| 432 | static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs) | 434 | static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) |
| 433 | { | 435 | { |
| 434 | int result; | 436 | int result, i; |
| 435 | u8 d; | 437 | u8 d; |
| 436 | 438 | ||
| 437 | if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM) | 439 | for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) { |
| 438 | return -EINVAL; | 440 | if (accel_scale[i] == val) { |
| 439 | if (fs == st->chip_config.accl_fs) | 441 | d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); |
| 440 | return 0; | 442 | result = inv_mpu6050_write_reg(st, |
| 443 | st->reg->accl_config, d); | ||
| 444 | if (result) | ||
| 445 | return result; | ||
| 441 | 446 | ||
| 442 | d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); | 447 | st->chip_config.accl_fs = i; |
| 443 | result = inv_mpu6050_write_reg(st, st->reg->accl_config, d); | 448 | return 0; |
| 444 | if (result) | 449 | } |
| 445 | return result; | 450 | } |
| 446 | st->chip_config.accl_fs = fs; | ||
| 447 | 451 | ||
| 448 | return 0; | 452 | return -EINVAL; |
| 449 | } | 453 | } |
| 450 | 454 | ||
| 451 | static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, | 455 | static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, |
| @@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, | |||
| 471 | case IIO_CHAN_INFO_SCALE: | 475 | case IIO_CHAN_INFO_SCALE: |
| 472 | switch (chan->type) { | 476 | switch (chan->type) { |
| 473 | case IIO_ANGL_VEL: | 477 | case IIO_ANGL_VEL: |
| 474 | result = inv_mpu6050_write_fsr(st, val); | 478 | result = inv_mpu6050_write_gyro_scale(st, val2); |
| 475 | break; | 479 | break; |
| 476 | case IIO_ACCEL: | 480 | case IIO_ACCEL: |
| 477 | result = inv_mpu6050_write_accel_fs(st, val); | 481 | result = inv_mpu6050_write_accel_scale(st, val2); |
| 478 | break; | 482 | break; |
| 479 | default: | 483 | default: |
| 480 | result = -EINVAL; | 484 | result = -EINVAL; |
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 0cd306a72a6e..ba27e277511f 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c | |||
| @@ -24,6 +24,16 @@ | |||
| 24 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
| 25 | #include "inv_mpu_iio.h" | 25 | #include "inv_mpu_iio.h" |
| 26 | 26 | ||
| 27 | static void inv_clear_kfifo(struct inv_mpu6050_state *st) | ||
| 28 | { | ||
| 29 | unsigned long flags; | ||
| 30 | |||
| 31 | /* take the spin lock sem to avoid interrupt kick in */ | ||
| 32 | spin_lock_irqsave(&st->time_stamp_lock, flags); | ||
| 33 | kfifo_reset(&st->timestamps); | ||
| 34 | spin_unlock_irqrestore(&st->time_stamp_lock, flags); | ||
| 35 | } | ||
| 36 | |||
| 27 | int inv_reset_fifo(struct iio_dev *indio_dev) | 37 | int inv_reset_fifo(struct iio_dev *indio_dev) |
| 28 | { | 38 | { |
| 29 | int result; | 39 | int result; |
| @@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev) | |||
| 50 | INV_MPU6050_BIT_FIFO_RST); | 60 | INV_MPU6050_BIT_FIFO_RST); |
| 51 | if (result) | 61 | if (result) |
| 52 | goto reset_fifo_fail; | 62 | goto reset_fifo_fail; |
| 63 | |||
| 64 | /* clear timestamps fifo */ | ||
| 65 | inv_clear_kfifo(st); | ||
| 66 | |||
| 53 | /* enable interrupt */ | 67 | /* enable interrupt */ |
| 54 | if (st->chip_config.accl_fifo_enable || | 68 | if (st->chip_config.accl_fifo_enable || |
| 55 | st->chip_config.gyro_fifo_enable) { | 69 | st->chip_config.gyro_fifo_enable) { |
| @@ -83,16 +97,6 @@ reset_fifo_fail: | |||
| 83 | return result; | 97 | return result; |
| 84 | } | 98 | } |
| 85 | 99 | ||
| 86 | static void inv_clear_kfifo(struct inv_mpu6050_state *st) | ||
| 87 | { | ||
| 88 | unsigned long flags; | ||
| 89 | |||
| 90 | /* take the spin lock sem to avoid interrupt kick in */ | ||
| 91 | spin_lock_irqsave(&st->time_stamp_lock, flags); | ||
| 92 | kfifo_reset(&st->timestamps); | ||
| 93 | spin_unlock_irqrestore(&st->time_stamp_lock, flags); | ||
| 94 | } | ||
| 95 | |||
| 96 | /** | 100 | /** |
| 97 | * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. | 101 | * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. |
| 98 | */ | 102 | */ |
| @@ -184,7 +188,6 @@ end_session: | |||
| 184 | flush_fifo: | 188 | flush_fifo: |
| 185 | /* Flush HW and SW FIFOs. */ | 189 | /* Flush HW and SW FIFOs. */ |
| 186 | inv_reset_fifo(indio_dev); | 190 | inv_reset_fifo(indio_dev); |
| 187 | inv_clear_kfifo(st); | ||
| 188 | mutex_unlock(&indio_dev->mlock); | 191 | mutex_unlock(&indio_dev->mlock); |
| 189 | iio_trigger_notify_done(indio_dev->trig); | 192 | iio_trigger_notify_done(indio_dev->trig); |
| 190 | 193 | ||
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 75ab70100015..462a010628cd 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c | |||
| @@ -1215,7 +1215,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p) | |||
| 1215 | base = KMX61_MAG_XOUT_L; | 1215 | base = KMX61_MAG_XOUT_L; |
| 1216 | 1216 | ||
| 1217 | mutex_lock(&data->lock); | 1217 | mutex_lock(&data->lock); |
| 1218 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 1218 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
| 1219 | indio_dev->masklength) { | 1219 | indio_dev->masklength) { |
| 1220 | ret = kmx61_read_measurement(data, base, bit); | 1220 | ret = kmx61_read_measurement(data, base, bit); |
| 1221 | if (ret < 0) { | 1221 | if (ret < 0) { |
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index aaba9d3d980e..4df97f650e44 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c | |||
| @@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, | |||
| 847 | * @attr_list: List of IIO device attributes | 847 | * @attr_list: List of IIO device attributes |
| 848 | * | 848 | * |
| 849 | * This function frees the memory allocated for each of the IIO device | 849 | * This function frees the memory allocated for each of the IIO device |
| 850 | * attributes in the list. Note: if you want to reuse the list after calling | 850 | * attributes in the list. |
| 851 | * this function you have to reinitialize it using INIT_LIST_HEAD(). | ||
| 852 | */ | 851 | */ |
| 853 | void iio_free_chan_devattr_list(struct list_head *attr_list) | 852 | void iio_free_chan_devattr_list(struct list_head *attr_list) |
| 854 | { | 853 | { |
| @@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list) | |||
| 856 | 855 | ||
| 857 | list_for_each_entry_safe(p, n, attr_list, l) { | 856 | list_for_each_entry_safe(p, n, attr_list, l) { |
| 858 | kfree(p->dev_attr.attr.name); | 857 | kfree(p->dev_attr.attr.name); |
| 858 | list_del(&p->l); | ||
| 859 | kfree(p); | 859 | kfree(p); |
| 860 | } | 860 | } |
| 861 | } | 861 | } |
| @@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) | |||
| 936 | 936 | ||
| 937 | iio_free_chan_devattr_list(&indio_dev->channel_attr_list); | 937 | iio_free_chan_devattr_list(&indio_dev->channel_attr_list); |
| 938 | kfree(indio_dev->chan_attr_group.attrs); | 938 | kfree(indio_dev->chan_attr_group.attrs); |
| 939 | indio_dev->chan_attr_group.attrs = NULL; | ||
| 939 | } | 940 | } |
| 940 | 941 | ||
| 941 | static void iio_dev_release(struct device *device) | 942 | static void iio_dev_release(struct device *device) |
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index a4b397048f71..a99692ba91bc 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c | |||
| @@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) | |||
| 500 | error_free_setup_event_lines: | 500 | error_free_setup_event_lines: |
| 501 | iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); | 501 | iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); |
| 502 | kfree(indio_dev->event_interface); | 502 | kfree(indio_dev->event_interface); |
| 503 | indio_dev->event_interface = NULL; | ||
| 503 | return ret; | 504 | return ret; |
| 504 | } | 505 | } |
| 505 | 506 | ||
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 0b4d79490b05..fa40f6d0ca39 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c | |||
| @@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private) | |||
| 494 | 494 | ||
| 495 | mutex_lock(&data->mutex); | 495 | mutex_lock(&data->mutex); |
| 496 | 496 | ||
| 497 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 497 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
| 498 | indio_dev->masklength) { | 498 | indio_dev->masklength) { |
| 499 | ret = sx9500_read_proximity(data, &indio_dev->channels[bit], | 499 | ret = sx9500_read_proximity(data, &indio_dev->channels[bit], |
| 500 | &val); | 500 | &val); |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index aec7a6aa2951..8c014b5dab4c 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 99 | if (dmasync) | 99 | if (dmasync) |
| 100 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | 100 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); |
| 101 | 101 | ||
| 102 | /* | ||
| 103 | * If the combination of the addr and size requested for this memory | ||
| 104 | * region causes an integer overflow, return error. | ||
| 105 | */ | ||
| 106 | if ((PAGE_ALIGN(addr + size) <= size) || | ||
| 107 | (PAGE_ALIGN(addr + size) <= addr)) | ||
| 108 | return ERR_PTR(-EINVAL); | ||
| 109 | |||
| 102 | if (!can_do_mlock()) | 110 | if (!can_do_mlock()) |
| 103 | return ERR_PTR(-EPERM); | 111 | return ERR_PTR(-EPERM); |
| 104 | 112 | ||
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 1bd15ebc01f2..27bcdbc950c9 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
| @@ -1154,10 +1154,28 @@ out: | |||
| 1154 | mutex_unlock(&alps_mutex); | 1154 | mutex_unlock(&alps_mutex); |
| 1155 | } | 1155 | } |
| 1156 | 1156 | ||
| 1157 | static void alps_report_bare_ps2_packet(struct input_dev *dev, | 1157 | static void alps_report_bare_ps2_packet(struct psmouse *psmouse, |
| 1158 | unsigned char packet[], | 1158 | unsigned char packet[], |
| 1159 | bool report_buttons) | 1159 | bool report_buttons) |
| 1160 | { | 1160 | { |
| 1161 | struct alps_data *priv = psmouse->private; | ||
| 1162 | struct input_dev *dev; | ||
| 1163 | |||
| 1164 | /* Figure out which device to use to report the bare packet */ | ||
| 1165 | if (priv->proto_version == ALPS_PROTO_V2 && | ||
| 1166 | (priv->flags & ALPS_DUALPOINT)) { | ||
| 1167 | /* On V2 devices the DualPoint Stick reports bare packets */ | ||
| 1168 | dev = priv->dev2; | ||
| 1169 | } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) { | ||
| 1170 | /* Register dev3 mouse if we received PS/2 packet first time */ | ||
| 1171 | if (!IS_ERR(priv->dev3)) | ||
| 1172 | psmouse_queue_work(psmouse, &priv->dev3_register_work, | ||
| 1173 | 0); | ||
| 1174 | return; | ||
| 1175 | } else { | ||
| 1176 | dev = priv->dev3; | ||
| 1177 | } | ||
| 1178 | |||
| 1161 | if (report_buttons) | 1179 | if (report_buttons) |
| 1162 | alps_report_buttons(dev, NULL, | 1180 | alps_report_buttons(dev, NULL, |
| 1163 | packet[0] & 1, packet[0] & 2, packet[0] & 4); | 1181 | packet[0] & 1, packet[0] & 2, packet[0] & 4); |
| @@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) | |||
| 1232 | * de-synchronization. | 1250 | * de-synchronization. |
| 1233 | */ | 1251 | */ |
| 1234 | 1252 | ||
| 1235 | alps_report_bare_ps2_packet(priv->dev2, | 1253 | alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], |
| 1236 | &psmouse->packet[3], false); | 1254 | false); |
| 1237 | 1255 | ||
| 1238 | /* | 1256 | /* |
| 1239 | * Continue with the standard ALPS protocol handling, | 1257 | * Continue with the standard ALPS protocol handling, |
| @@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) | |||
| 1289 | * properly we only do this if the device is fully synchronized. | 1307 | * properly we only do this if the device is fully synchronized. |
| 1290 | */ | 1308 | */ |
| 1291 | if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { | 1309 | if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { |
| 1292 | |||
| 1293 | /* Register dev3 mouse if we received PS/2 packet first time */ | ||
| 1294 | if (unlikely(!priv->dev3)) | ||
| 1295 | psmouse_queue_work(psmouse, | ||
| 1296 | &priv->dev3_register_work, 0); | ||
| 1297 | |||
| 1298 | if (psmouse->pktcnt == 3) { | 1310 | if (psmouse->pktcnt == 3) { |
| 1299 | /* Once dev3 mouse device is registered report data */ | 1311 | alps_report_bare_ps2_packet(psmouse, psmouse->packet, |
| 1300 | if (likely(!IS_ERR_OR_NULL(priv->dev3))) | 1312 | true); |
| 1301 | alps_report_bare_ps2_packet(priv->dev3, | ||
| 1302 | psmouse->packet, | ||
| 1303 | true); | ||
| 1304 | return PSMOUSE_FULL_PACKET; | 1313 | return PSMOUSE_FULL_PACKET; |
| 1305 | } | 1314 | } |
| 1306 | return PSMOUSE_GOOD_DATA; | 1315 | return PSMOUSE_GOOD_DATA; |
| @@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse, | |||
| 2281 | priv->set_abs_params = alps_set_abs_params_mt; | 2290 | priv->set_abs_params = alps_set_abs_params_mt; |
| 2282 | priv->nibble_commands = alps_v3_nibble_commands; | 2291 | priv->nibble_commands = alps_v3_nibble_commands; |
| 2283 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; | 2292 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; |
| 2284 | priv->x_max = 1360; | ||
| 2285 | priv->y_max = 660; | ||
| 2286 | priv->x_bits = 23; | 2293 | priv->x_bits = 23; |
| 2287 | priv->y_bits = 12; | 2294 | priv->y_bits = 12; |
| 2295 | |||
| 2296 | if (alps_dolphin_get_device_area(psmouse, priv)) | ||
| 2297 | return -EIO; | ||
| 2298 | |||
| 2288 | break; | 2299 | break; |
| 2289 | 2300 | ||
| 2290 | case ALPS_PROTO_V6: | 2301 | case ALPS_PROTO_V6: |
| @@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse, | |||
| 2303 | priv->set_abs_params = alps_set_abs_params_mt; | 2314 | priv->set_abs_params = alps_set_abs_params_mt; |
| 2304 | priv->nibble_commands = alps_v3_nibble_commands; | 2315 | priv->nibble_commands = alps_v3_nibble_commands; |
| 2305 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; | 2316 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; |
| 2306 | 2317 | priv->x_max = 0xfff; | |
| 2307 | if (alps_dolphin_get_device_area(psmouse, priv)) | 2318 | priv->y_max = 0x7ff; |
| 2308 | return -EIO; | ||
| 2309 | 2319 | ||
| 2310 | if (priv->fw_ver[1] != 0xba) | 2320 | if (priv->fw_ver[1] != 0xba) |
| 2311 | priv->flags |= ALPS_BUTTONPAD; | 2321 | priv->flags |= ALPS_BUTTONPAD; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index dda605836546..3b06c8a360b6 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
| @@ -154,6 +154,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = { | |||
| 154 | }, | 154 | }, |
| 155 | { | 155 | { |
| 156 | (const char * const []){"LEN2006", NULL}, | 156 | (const char * const []){"LEN2006", NULL}, |
| 157 | {2691, 2691}, | ||
| 158 | 1024, 5045, 2457, 4832 | ||
| 159 | }, | ||
| 160 | { | ||
| 161 | (const char * const []){"LEN2006", NULL}, | ||
| 157 | {ANY_BOARD_ID, ANY_BOARD_ID}, | 162 | {ANY_BOARD_ID, ANY_BOARD_ID}, |
| 158 | 1264, 5675, 1171, 4688 | 163 | 1264, 5675, 1171, 4688 |
| 159 | }, | 164 | }, |
| @@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
| 189 | "LEN2003", | 194 | "LEN2003", |
| 190 | "LEN2004", /* L440 */ | 195 | "LEN2004", /* L440 */ |
| 191 | "LEN2005", | 196 | "LEN2005", |
| 192 | "LEN2006", | 197 | "LEN2006", /* Edge E440/E540 */ |
| 193 | "LEN2007", | 198 | "LEN2007", |
| 194 | "LEN2008", | 199 | "LEN2008", |
| 195 | "LEN2009", | 200 | "LEN2009", |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fc13dd56953e..a3adde6519f0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
| 1288 | return 0; | 1288 | return 0; |
| 1289 | 1289 | ||
| 1290 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | 1290 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
| 1291 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) | 1291 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && |
| 1292 | smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | ||
| 1292 | ret = arm_smmu_iova_to_phys_hard(domain, iova); | 1293 | ret = arm_smmu_iova_to_phys_hard(domain, iova); |
| 1293 | else | 1294 | } else { |
| 1294 | ret = ops->iova_to_phys(ops, iova); | 1295 | ret = ops->iova_to_phys(ops, iova); |
| 1296 | } | ||
| 1297 | |||
| 1295 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | 1298 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
| 1296 | 1299 | ||
| 1297 | return ret; | 1300 | return ret; |
| @@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1556 | return -ENODEV; | 1559 | return -ENODEV; |
| 1557 | } | 1560 | } |
| 1558 | 1561 | ||
| 1559 | if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { | 1562 | if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) { |
| 1560 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; | 1563 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; |
| 1561 | dev_notice(smmu->dev, "\taddress translation ops\n"); | 1564 | dev_notice(smmu->dev, "\taddress translation ops\n"); |
| 1562 | } | 1565 | } |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ae4c1a854e57..2d1e05bdbb53 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1742 | 1742 | ||
| 1743 | static void domain_exit(struct dmar_domain *domain) | 1743 | static void domain_exit(struct dmar_domain *domain) |
| 1744 | { | 1744 | { |
| 1745 | struct dmar_drhd_unit *drhd; | ||
| 1746 | struct intel_iommu *iommu; | ||
| 1747 | struct page *freelist = NULL; | 1745 | struct page *freelist = NULL; |
| 1746 | int i; | ||
| 1748 | 1747 | ||
| 1749 | /* Domain 0 is reserved, so dont process it */ | 1748 | /* Domain 0 is reserved, so dont process it */ |
| 1750 | if (!domain) | 1749 | if (!domain) |
| @@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain) | |||
| 1764 | 1763 | ||
| 1765 | /* clear attached or cached domains */ | 1764 | /* clear attached or cached domains */ |
| 1766 | rcu_read_lock(); | 1765 | rcu_read_lock(); |
| 1767 | for_each_active_iommu(iommu, drhd) | 1766 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) |
| 1768 | iommu_detach_domain(domain, iommu); | 1767 | iommu_detach_domain(domain, g_iommus[i]); |
| 1769 | rcu_read_unlock(); | 1768 | rcu_read_unlock(); |
| 1770 | 1769 | ||
| 1771 | dma_free_pagelist(freelist); | 1770 | dma_free_pagelist(freelist); |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 10186cac7716..bc39bdf7b99b 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
| @@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev) | |||
| 851 | 851 | ||
| 852 | static const struct of_device_id ipmmu_of_ids[] = { | 852 | static const struct of_device_id ipmmu_of_ids[] = { |
| 853 | { .compatible = "renesas,ipmmu-vmsa", }, | 853 | { .compatible = "renesas,ipmmu-vmsa", }, |
| 854 | { } | ||
| 854 | }; | 855 | }; |
| 855 | 856 | ||
| 856 | static struct platform_driver ipmmu_driver = { | 857 | static struct platform_driver ipmmu_driver = { |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 596b0a9eee99..9687f8afebff 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) | |||
| 169 | 169 | ||
| 170 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) | 170 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) |
| 171 | { | 171 | { |
| 172 | cmd->raw_cmd[0] &= ~(0xffffUL << 32); | 172 | cmd->raw_cmd[0] &= BIT_ULL(32) - 1; |
| 173 | cmd->raw_cmd[0] |= ((u64)devid) << 32; | 173 | cmd->raw_cmd[0] |= ((u64)devid) << 32; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| @@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its) | |||
| 802 | int i; | 802 | int i; |
| 803 | int psz = SZ_64K; | 803 | int psz = SZ_64K; |
| 804 | u64 shr = GITS_BASER_InnerShareable; | 804 | u64 shr = GITS_BASER_InnerShareable; |
| 805 | u64 cache = GITS_BASER_WaWb; | ||
| 805 | 806 | ||
| 806 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 807 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
| 807 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); | 808 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); |
| @@ -848,7 +849,7 @@ retry_baser: | |||
| 848 | val = (virt_to_phys(base) | | 849 | val = (virt_to_phys(base) | |
| 849 | (type << GITS_BASER_TYPE_SHIFT) | | 850 | (type << GITS_BASER_TYPE_SHIFT) | |
| 850 | ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | 851 | ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | |
| 851 | GITS_BASER_WaWb | | 852 | cache | |
| 852 | shr | | 853 | shr | |
| 853 | GITS_BASER_VALID); | 854 | GITS_BASER_VALID); |
| 854 | 855 | ||
| @@ -874,9 +875,12 @@ retry_baser: | |||
| 874 | * Shareability didn't stick. Just use | 875 | * Shareability didn't stick. Just use |
| 875 | * whatever the read reported, which is likely | 876 | * whatever the read reported, which is likely |
| 876 | * to be the only thing this redistributor | 877 | * to be the only thing this redistributor |
| 877 | * supports. | 878 | * supports. If that's zero, make it |
| 879 | * non-cacheable as well. | ||
| 878 | */ | 880 | */ |
| 879 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | 881 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; |
| 882 | if (!shr) | ||
| 883 | cache = GITS_BASER_nC; | ||
| 880 | goto retry_baser; | 884 | goto retry_baser; |
| 881 | } | 885 | } |
| 882 | 886 | ||
| @@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void) | |||
| 980 | tmp = readq_relaxed(rbase + GICR_PROPBASER); | 984 | tmp = readq_relaxed(rbase + GICR_PROPBASER); |
| 981 | 985 | ||
| 982 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { | 986 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { |
| 987 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { | ||
| 988 | /* | ||
| 989 | * The HW reports non-shareable, we must | ||
| 990 | * remove the cacheability attributes as | ||
| 991 | * well. | ||
| 992 | */ | ||
| 993 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | | ||
| 994 | GICR_PROPBASER_CACHEABILITY_MASK); | ||
| 995 | val |= GICR_PROPBASER_nC; | ||
| 996 | writeq_relaxed(val, rbase + GICR_PROPBASER); | ||
| 997 | } | ||
| 983 | pr_info_once("GIC: using cache flushing for LPI property table\n"); | 998 | pr_info_once("GIC: using cache flushing for LPI property table\n"); |
| 984 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; | 999 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; |
| 985 | } | 1000 | } |
| 986 | 1001 | ||
| 987 | /* set PENDBASE */ | 1002 | /* set PENDBASE */ |
| 988 | val = (page_to_phys(pend_page) | | 1003 | val = (page_to_phys(pend_page) | |
| 989 | GICR_PROPBASER_InnerShareable | | 1004 | GICR_PENDBASER_InnerShareable | |
| 990 | GICR_PROPBASER_WaWb); | 1005 | GICR_PENDBASER_WaWb); |
| 991 | 1006 | ||
| 992 | writeq_relaxed(val, rbase + GICR_PENDBASER); | 1007 | writeq_relaxed(val, rbase + GICR_PENDBASER); |
| 1008 | tmp = readq_relaxed(rbase + GICR_PENDBASER); | ||
| 1009 | |||
| 1010 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { | ||
| 1011 | /* | ||
| 1012 | * The HW reports non-shareable, we must remove the | ||
| 1013 | * cacheability attributes as well. | ||
| 1014 | */ | ||
| 1015 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | | ||
| 1016 | GICR_PENDBASER_CACHEABILITY_MASK); | ||
| 1017 | val |= GICR_PENDBASER_nC; | ||
| 1018 | writeq_relaxed(val, rbase + GICR_PENDBASER); | ||
| 1019 | } | ||
| 993 | 1020 | ||
| 994 | /* Enable LPIs */ | 1021 | /* Enable LPIs */ |
| 995 | val = readl_relaxed(rbase + GICR_CTLR); | 1022 | val = readl_relaxed(rbase + GICR_CTLR); |
| @@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void) | |||
| 1026 | * This ITS wants a linear CPU number. | 1053 | * This ITS wants a linear CPU number. |
| 1027 | */ | 1054 | */ |
| 1028 | target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); | 1055 | target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); |
| 1029 | target = GICR_TYPER_CPU_NUMBER(target); | 1056 | target = GICR_TYPER_CPU_NUMBER(target) << 16; |
| 1030 | } | 1057 | } |
| 1031 | 1058 | ||
| 1032 | /* Perform collection mapping */ | 1059 | /* Perform collection mapping */ |
| @@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
| 1422 | 1449 | ||
| 1423 | writeq_relaxed(baser, its->base + GITS_CBASER); | 1450 | writeq_relaxed(baser, its->base + GITS_CBASER); |
| 1424 | tmp = readq_relaxed(its->base + GITS_CBASER); | 1451 | tmp = readq_relaxed(its->base + GITS_CBASER); |
| 1425 | writeq_relaxed(0, its->base + GITS_CWRITER); | ||
| 1426 | writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); | ||
| 1427 | 1452 | ||
| 1428 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { | 1453 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
| 1454 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { | ||
| 1455 | /* | ||
| 1456 | * The HW reports non-shareable, we must | ||
| 1457 | * remove the cacheability attributes as | ||
| 1458 | * well. | ||
| 1459 | */ | ||
| 1460 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | | ||
| 1461 | GITS_CBASER_CACHEABILITY_MASK); | ||
| 1462 | baser |= GITS_CBASER_nC; | ||
| 1463 | writeq_relaxed(baser, its->base + GITS_CBASER); | ||
| 1464 | } | ||
| 1429 | pr_info("ITS: using cache flushing for cmd queue\n"); | 1465 | pr_info("ITS: using cache flushing for cmd queue\n"); |
| 1430 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; | 1466 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; |
| 1431 | } | 1467 | } |
| 1432 | 1468 | ||
| 1469 | writeq_relaxed(0, its->base + GITS_CWRITER); | ||
| 1470 | writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); | ||
| 1471 | |||
| 1433 | if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { | 1472 | if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { |
| 1434 | its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); | 1473 | its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); |
| 1435 | if (!its->domain) { | 1474 | if (!its->domain) { |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index ee035ec4526b..169172d2ba05 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config LGUEST | 1 | config LGUEST |
| 2 | tristate "Linux hypervisor example code" | 2 | tristate "Linux hypervisor example code" |
| 3 | depends on X86_32 && EVENTFD && TTY | 3 | depends on X86_32 && EVENTFD && TTY && PCI_DIRECT |
| 4 | select HVC_DRIVER | 4 | select HVC_DRIVER |
| 5 | ---help--- | 5 | ---help--- |
| 6 | This is a very simple module which allows you to run | 6 | This is a very simple module which allows you to run |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9b641b38b857..8001fe9e3434 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) | |||
| 433 | 433 | ||
| 434 | dm_get(md); | 434 | dm_get(md); |
| 435 | atomic_inc(&md->open_count); | 435 | atomic_inc(&md->open_count); |
| 436 | |||
| 437 | out: | 436 | out: |
| 438 | spin_unlock(&_minor_lock); | 437 | spin_unlock(&_minor_lock); |
| 439 | 438 | ||
| @@ -442,16 +441,20 @@ out: | |||
| 442 | 441 | ||
| 443 | static void dm_blk_close(struct gendisk *disk, fmode_t mode) | 442 | static void dm_blk_close(struct gendisk *disk, fmode_t mode) |
| 444 | { | 443 | { |
| 445 | struct mapped_device *md = disk->private_data; | 444 | struct mapped_device *md; |
| 446 | 445 | ||
| 447 | spin_lock(&_minor_lock); | 446 | spin_lock(&_minor_lock); |
| 448 | 447 | ||
| 448 | md = disk->private_data; | ||
| 449 | if (WARN_ON(!md)) | ||
| 450 | goto out; | ||
| 451 | |||
| 449 | if (atomic_dec_and_test(&md->open_count) && | 452 | if (atomic_dec_and_test(&md->open_count) && |
| 450 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) | 453 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) |
| 451 | queue_work(deferred_remove_workqueue, &deferred_remove_work); | 454 | queue_work(deferred_remove_workqueue, &deferred_remove_work); |
| 452 | 455 | ||
| 453 | dm_put(md); | 456 | dm_put(md); |
| 454 | 457 | out: | |
| 455 | spin_unlock(&_minor_lock); | 458 | spin_unlock(&_minor_lock); |
| 456 | } | 459 | } |
| 457 | 460 | ||
| @@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md) | |||
| 2241 | int minor = MINOR(disk_devt(md->disk)); | 2244 | int minor = MINOR(disk_devt(md->disk)); |
| 2242 | 2245 | ||
| 2243 | unlock_fs(md); | 2246 | unlock_fs(md); |
| 2244 | bdput(md->bdev); | ||
| 2245 | destroy_workqueue(md->wq); | 2247 | destroy_workqueue(md->wq); |
| 2246 | 2248 | ||
| 2247 | if (md->kworker_task) | 2249 | if (md->kworker_task) |
| @@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md) | |||
| 2252 | mempool_destroy(md->rq_pool); | 2254 | mempool_destroy(md->rq_pool); |
| 2253 | if (md->bs) | 2255 | if (md->bs) |
| 2254 | bioset_free(md->bs); | 2256 | bioset_free(md->bs); |
| 2255 | blk_integrity_unregister(md->disk); | 2257 | |
| 2256 | del_gendisk(md->disk); | ||
| 2257 | cleanup_srcu_struct(&md->io_barrier); | 2258 | cleanup_srcu_struct(&md->io_barrier); |
| 2258 | free_table_devices(&md->table_devices); | 2259 | free_table_devices(&md->table_devices); |
| 2259 | free_minor(minor); | 2260 | dm_stats_cleanup(&md->stats); |
| 2260 | 2261 | ||
| 2261 | spin_lock(&_minor_lock); | 2262 | spin_lock(&_minor_lock); |
| 2262 | md->disk->private_data = NULL; | 2263 | md->disk->private_data = NULL; |
| 2263 | spin_unlock(&_minor_lock); | 2264 | spin_unlock(&_minor_lock); |
| 2264 | 2265 | if (blk_get_integrity(md->disk)) | |
| 2266 | blk_integrity_unregister(md->disk); | ||
| 2267 | del_gendisk(md->disk); | ||
| 2265 | put_disk(md->disk); | 2268 | put_disk(md->disk); |
| 2266 | blk_cleanup_queue(md->queue); | 2269 | blk_cleanup_queue(md->queue); |
| 2267 | dm_stats_cleanup(&md->stats); | 2270 | bdput(md->bdev); |
| 2271 | free_minor(minor); | ||
| 2272 | |||
| 2268 | module_put(THIS_MODULE); | 2273 | module_put(THIS_MODULE); |
| 2269 | kfree(md); | 2274 | kfree(md); |
| 2270 | } | 2275 | } |
| @@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
| 2642 | 2647 | ||
| 2643 | might_sleep(); | 2648 | might_sleep(); |
| 2644 | 2649 | ||
| 2645 | spin_lock(&_minor_lock); | ||
| 2646 | map = dm_get_live_table(md, &srcu_idx); | 2650 | map = dm_get_live_table(md, &srcu_idx); |
| 2651 | |||
| 2652 | spin_lock(&_minor_lock); | ||
| 2647 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); | 2653 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
| 2648 | set_bit(DMF_FREEING, &md->flags); | 2654 | set_bit(DMF_FREEING, &md->flags); |
| 2649 | spin_unlock(&_minor_lock); | 2655 | spin_unlock(&_minor_lock); |
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index f38ec424872e..5615522f8d62 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c | |||
| @@ -739,7 +739,7 @@ static int __init kempld_init(void) | |||
| 739 | for (id = kempld_dmi_table; | 739 | for (id = kempld_dmi_table; |
| 740 | id->matches[0].slot != DMI_NONE; id++) | 740 | id->matches[0].slot != DMI_NONE; id++) |
| 741 | if (strstr(id->ident, force_device_id)) | 741 | if (strstr(id->ident, force_device_id)) |
| 742 | if (id->callback && id->callback(id)) | 742 | if (id->callback && !id->callback(id)) |
| 743 | break; | 743 | break; |
| 744 | if (id->matches[0].slot == DMI_NONE) | 744 | if (id->matches[0].slot == DMI_NONE) |
| 745 | return -ENODEV; | 745 | return -ENODEV; |
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index ede50244f265..dbd907d7170e 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c | |||
| @@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register); | |||
| 196 | int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) | 196 | int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) |
| 197 | { | 197 | { |
| 198 | u16 value; | 198 | u16 value; |
| 199 | u8 *buf; | ||
| 200 | int ret; | ||
| 199 | 201 | ||
| 200 | if (!data) | 202 | if (!data) |
| 201 | return -EINVAL; | 203 | return -EINVAL; |
| 202 | *data = 0; | 204 | |
| 205 | buf = kzalloc(sizeof(u8), GFP_KERNEL); | ||
| 206 | if (!buf) | ||
| 207 | return -ENOMEM; | ||
| 203 | 208 | ||
| 204 | addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; | 209 | addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; |
| 205 | value = swab16(addr); | 210 | value = swab16(addr); |
| 206 | 211 | ||
| 207 | return usb_control_msg(ucr->pusb_dev, | 212 | ret = usb_control_msg(ucr->pusb_dev, |
| 208 | usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, | 213 | usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, |
| 209 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 214 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
| 210 | value, 0, data, 1, 100); | 215 | value, 0, buf, 1, 100); |
| 216 | *data = *buf; | ||
| 217 | |||
| 218 | kfree(buf); | ||
| 219 | return ret; | ||
| 211 | } | 220 | } |
| 212 | EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); | 221 | EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); |
| 213 | 222 | ||
| @@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) | |||
| 288 | int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) | 297 | int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) |
| 289 | { | 298 | { |
| 290 | int ret; | 299 | int ret; |
| 300 | u16 *buf; | ||
| 291 | 301 | ||
| 292 | if (!status) | 302 | if (!status) |
| 293 | return -EINVAL; | 303 | return -EINVAL; |
| 294 | 304 | ||
| 295 | if (polling_pipe == 0) | 305 | if (polling_pipe == 0) { |
| 306 | buf = kzalloc(sizeof(u16), GFP_KERNEL); | ||
| 307 | if (!buf) | ||
| 308 | return -ENOMEM; | ||
| 309 | |||
| 296 | ret = usb_control_msg(ucr->pusb_dev, | 310 | ret = usb_control_msg(ucr->pusb_dev, |
| 297 | usb_rcvctrlpipe(ucr->pusb_dev, 0), | 311 | usb_rcvctrlpipe(ucr->pusb_dev, 0), |
| 298 | RTSX_USB_REQ_POLL, | 312 | RTSX_USB_REQ_POLL, |
| 299 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 313 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
| 300 | 0, 0, status, 2, 100); | 314 | 0, 0, buf, 2, 100); |
| 301 | else | 315 | *status = *buf; |
| 316 | |||
| 317 | kfree(buf); | ||
| 318 | } else { | ||
| 302 | ret = rtsx_usb_get_status_with_bulk(ucr, status); | 319 | ret = rtsx_usb_get_status_with_bulk(ucr, status); |
| 320 | } | ||
| 303 | 321 | ||
| 304 | /* usb_control_msg may return positive when success */ | 322 | /* usb_control_msg may return positive when success */ |
| 305 | if (ret < 0) | 323 | if (ret < 0) |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b979c265fc51..089a4028859d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
| 3850 | /* Find out if any slaves have the same mapping as this skb. */ | 3850 | /* Find out if any slaves have the same mapping as this skb. */ |
| 3851 | bond_for_each_slave_rcu(bond, slave, iter) { | 3851 | bond_for_each_slave_rcu(bond, slave, iter) { |
| 3852 | if (slave->queue_id == skb->queue_mapping) { | 3852 | if (slave->queue_id == skb->queue_mapping) { |
| 3853 | if (bond_slave_can_tx(slave)) { | 3853 | if (bond_slave_is_up(slave) && |
| 3854 | slave->link == BOND_LINK_UP) { | ||
| 3854 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3855 | bond_dev_queue_xmit(bond, skb, slave->dev); |
| 3855 | return 0; | 3856 | return 0; |
| 3856 | } | 3857 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 80c46ad4cee4..ad0a7e8c2c2b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
| @@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) | |||
| 592 | rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? | 592 | rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? |
| 593 | CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; | 593 | CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; |
| 594 | new_state = max(tx_state, rx_state); | 594 | new_state = max(tx_state, rx_state); |
| 595 | } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { | 595 | } else { |
| 596 | __flexcan_get_berr_counter(dev, &bec); | 596 | __flexcan_get_berr_counter(dev, &bec); |
| 597 | new_state = CAN_STATE_ERROR_PASSIVE; | 597 | new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ? |
| 598 | CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF; | ||
| 598 | rx_state = bec.rxerr >= bec.txerr ? new_state : 0; | 599 | rx_state = bec.rxerr >= bec.txerr ? new_state : 0; |
| 599 | tx_state = bec.rxerr <= bec.txerr ? new_state : 0; | 600 | tx_state = bec.rxerr <= bec.txerr ? new_state : 0; |
| 600 | } else { | ||
| 601 | new_state = CAN_STATE_BUS_OFF; | ||
| 602 | } | 601 | } |
| 603 | 602 | ||
| 604 | /* state hasn't changed */ | 603 | /* state hasn't changed */ |
| @@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev) | |||
| 1158 | const struct flexcan_devtype_data *devtype_data; | 1157 | const struct flexcan_devtype_data *devtype_data; |
| 1159 | struct net_device *dev; | 1158 | struct net_device *dev; |
| 1160 | struct flexcan_priv *priv; | 1159 | struct flexcan_priv *priv; |
| 1160 | struct regulator *reg_xceiver; | ||
| 1161 | struct resource *mem; | 1161 | struct resource *mem; |
| 1162 | struct clk *clk_ipg = NULL, *clk_per = NULL; | 1162 | struct clk *clk_ipg = NULL, *clk_per = NULL; |
| 1163 | void __iomem *base; | 1163 | void __iomem *base; |
| 1164 | int err, irq; | 1164 | int err, irq; |
| 1165 | u32 clock_freq = 0; | 1165 | u32 clock_freq = 0; |
| 1166 | 1166 | ||
| 1167 | reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); | ||
| 1168 | if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) | ||
| 1169 | return -EPROBE_DEFER; | ||
| 1170 | else if (IS_ERR(reg_xceiver)) | ||
| 1171 | reg_xceiver = NULL; | ||
| 1172 | |||
| 1167 | if (pdev->dev.of_node) | 1173 | if (pdev->dev.of_node) |
| 1168 | of_property_read_u32(pdev->dev.of_node, | 1174 | of_property_read_u32(pdev->dev.of_node, |
| 1169 | "clock-frequency", &clock_freq); | 1175 | "clock-frequency", &clock_freq); |
| @@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev) | |||
| 1224 | priv->pdata = dev_get_platdata(&pdev->dev); | 1230 | priv->pdata = dev_get_platdata(&pdev->dev); |
| 1225 | priv->devtype_data = devtype_data; | 1231 | priv->devtype_data = devtype_data; |
| 1226 | 1232 | ||
| 1227 | priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); | 1233 | priv->reg_xceiver = reg_xceiver; |
| 1228 | if (IS_ERR(priv->reg_xceiver)) | ||
| 1229 | priv->reg_xceiver = NULL; | ||
| 1230 | 1234 | ||
| 1231 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); | 1235 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); |
| 1232 | 1236 | ||
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 009acc8641fc..8b4d3e6875eb 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
| @@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * | |||
| 901 | } | 901 | } |
| 902 | 902 | ||
| 903 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 903 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| 904 | if (!dev) | ||
| 905 | return -ENOMEM; | ||
| 904 | init_usb_anchor(&dev->rx_submitted); | 906 | init_usb_anchor(&dev->rx_submitted); |
| 905 | 907 | ||
| 906 | atomic_set(&dev->active_channels, 0); | 908 | atomic_set(&dev->active_channels, 0); |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index e97a08ce0b90..57611fd91229 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <linux/can/dev.h> | 25 | #include <linux/can/dev.h> |
| 26 | #include <linux/can/error.h> | 26 | #include <linux/can/error.h> |
| 27 | 27 | ||
| 28 | #define MAX_TX_URBS 16 | ||
| 29 | #define MAX_RX_URBS 4 | 28 | #define MAX_RX_URBS 4 |
| 30 | #define START_TIMEOUT 1000 /* msecs */ | 29 | #define START_TIMEOUT 1000 /* msecs */ |
| 31 | #define STOP_TIMEOUT 1000 /* msecs */ | 30 | #define STOP_TIMEOUT 1000 /* msecs */ |
| @@ -443,6 +442,7 @@ struct kvaser_usb_error_summary { | |||
| 443 | }; | 442 | }; |
| 444 | }; | 443 | }; |
| 445 | 444 | ||
| 445 | /* Context for an outstanding, not yet ACKed, transmission */ | ||
| 446 | struct kvaser_usb_tx_urb_context { | 446 | struct kvaser_usb_tx_urb_context { |
| 447 | struct kvaser_usb_net_priv *priv; | 447 | struct kvaser_usb_net_priv *priv; |
| 448 | u32 echo_index; | 448 | u32 echo_index; |
| @@ -456,8 +456,13 @@ struct kvaser_usb { | |||
| 456 | struct usb_endpoint_descriptor *bulk_in, *bulk_out; | 456 | struct usb_endpoint_descriptor *bulk_in, *bulk_out; |
| 457 | struct usb_anchor rx_submitted; | 457 | struct usb_anchor rx_submitted; |
| 458 | 458 | ||
| 459 | /* @max_tx_urbs: Firmware-reported maximum number of oustanding, | ||
| 460 | * not yet ACKed, transmissions on this device. This value is | ||
| 461 | * also used as a sentinel for marking free tx contexts. | ||
| 462 | */ | ||
| 459 | u32 fw_version; | 463 | u32 fw_version; |
| 460 | unsigned int nchannels; | 464 | unsigned int nchannels; |
| 465 | unsigned int max_tx_urbs; | ||
| 461 | enum kvaser_usb_family family; | 466 | enum kvaser_usb_family family; |
| 462 | 467 | ||
| 463 | bool rxinitdone; | 468 | bool rxinitdone; |
| @@ -467,19 +472,18 @@ struct kvaser_usb { | |||
| 467 | 472 | ||
| 468 | struct kvaser_usb_net_priv { | 473 | struct kvaser_usb_net_priv { |
| 469 | struct can_priv can; | 474 | struct can_priv can; |
| 470 | 475 | struct can_berr_counter bec; | |
| 471 | spinlock_t tx_contexts_lock; | ||
| 472 | int active_tx_contexts; | ||
| 473 | struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; | ||
| 474 | |||
| 475 | struct usb_anchor tx_submitted; | ||
| 476 | struct completion start_comp, stop_comp; | ||
| 477 | 476 | ||
| 478 | struct kvaser_usb *dev; | 477 | struct kvaser_usb *dev; |
| 479 | struct net_device *netdev; | 478 | struct net_device *netdev; |
| 480 | int channel; | 479 | int channel; |
| 481 | 480 | ||
| 482 | struct can_berr_counter bec; | 481 | struct completion start_comp, stop_comp; |
| 482 | struct usb_anchor tx_submitted; | ||
| 483 | |||
| 484 | spinlock_t tx_contexts_lock; | ||
| 485 | int active_tx_contexts; | ||
| 486 | struct kvaser_usb_tx_urb_context tx_contexts[]; | ||
| 483 | }; | 487 | }; |
| 484 | 488 | ||
| 485 | static const struct usb_device_id kvaser_usb_table[] = { | 489 | static const struct usb_device_id kvaser_usb_table[] = { |
| @@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
| 592 | * for further details. | 596 | * for further details. |
| 593 | */ | 597 | */ |
| 594 | if (tmp->len == 0) { | 598 | if (tmp->len == 0) { |
| 595 | pos = round_up(pos, | 599 | pos = round_up(pos, le16_to_cpu(dev->bulk_in-> |
| 596 | dev->bulk_in->wMaxPacketSize); | 600 | wMaxPacketSize)); |
| 597 | continue; | 601 | continue; |
| 598 | } | 602 | } |
| 599 | 603 | ||
| @@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev) | |||
| 657 | switch (dev->family) { | 661 | switch (dev->family) { |
| 658 | case KVASER_LEAF: | 662 | case KVASER_LEAF: |
| 659 | dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); | 663 | dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); |
| 664 | dev->max_tx_urbs = | ||
| 665 | le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); | ||
| 660 | break; | 666 | break; |
| 661 | case KVASER_USBCAN: | 667 | case KVASER_USBCAN: |
| 662 | dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); | 668 | dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); |
| 669 | dev->max_tx_urbs = | ||
| 670 | le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); | ||
| 663 | break; | 671 | break; |
| 664 | } | 672 | } |
| 665 | 673 | ||
| @@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
| 715 | 723 | ||
| 716 | stats = &priv->netdev->stats; | 724 | stats = &priv->netdev->stats; |
| 717 | 725 | ||
| 718 | context = &priv->tx_contexts[tid % MAX_TX_URBS]; | 726 | context = &priv->tx_contexts[tid % dev->max_tx_urbs]; |
| 719 | 727 | ||
| 720 | /* Sometimes the state change doesn't come after a bus-off event */ | 728 | /* Sometimes the state change doesn't come after a bus-off event */ |
| 721 | if (priv->can.restart_ms && | 729 | if (priv->can.restart_ms && |
| @@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
| 744 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | 752 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
| 745 | 753 | ||
| 746 | can_get_echo_skb(priv->netdev, context->echo_index); | 754 | can_get_echo_skb(priv->netdev, context->echo_index); |
| 747 | context->echo_index = MAX_TX_URBS; | 755 | context->echo_index = dev->max_tx_urbs; |
| 748 | --priv->active_tx_contexts; | 756 | --priv->active_tx_contexts; |
| 749 | netif_wake_queue(priv->netdev); | 757 | netif_wake_queue(priv->netdev); |
| 750 | 758 | ||
| @@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
| 1329 | * number of events in case of a heavy rx load on the bus. | 1337 | * number of events in case of a heavy rx load on the bus. |
| 1330 | */ | 1338 | */ |
| 1331 | if (msg->len == 0) { | 1339 | if (msg->len == 0) { |
| 1332 | pos = round_up(pos, dev->bulk_in->wMaxPacketSize); | 1340 | pos = round_up(pos, le16_to_cpu(dev->bulk_in-> |
| 1341 | wMaxPacketSize)); | ||
| 1333 | continue; | 1342 | continue; |
| 1334 | } | 1343 | } |
| 1335 | 1344 | ||
| @@ -1512,11 +1521,13 @@ error: | |||
| 1512 | 1521 | ||
| 1513 | static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) | 1522 | static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) |
| 1514 | { | 1523 | { |
| 1515 | int i; | 1524 | int i, max_tx_urbs; |
| 1525 | |||
| 1526 | max_tx_urbs = priv->dev->max_tx_urbs; | ||
| 1516 | 1527 | ||
| 1517 | priv->active_tx_contexts = 0; | 1528 | priv->active_tx_contexts = 0; |
| 1518 | for (i = 0; i < MAX_TX_URBS; i++) | 1529 | for (i = 0; i < max_tx_urbs; i++) |
| 1519 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | 1530 | priv->tx_contexts[i].echo_index = max_tx_urbs; |
| 1520 | } | 1531 | } |
| 1521 | 1532 | ||
| 1522 | /* This method might sleep. Do not call it in the atomic context | 1533 | /* This method might sleep. Do not call it in the atomic context |
| @@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1702 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; | 1713 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; |
| 1703 | 1714 | ||
| 1704 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | 1715 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
| 1705 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { | 1716 | for (i = 0; i < dev->max_tx_urbs; i++) { |
| 1706 | if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { | 1717 | if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { |
| 1707 | context = &priv->tx_contexts[i]; | 1718 | context = &priv->tx_contexts[i]; |
| 1708 | 1719 | ||
| 1709 | context->echo_index = i; | 1720 | context->echo_index = i; |
| 1710 | can_put_echo_skb(skb, netdev, context->echo_index); | 1721 | can_put_echo_skb(skb, netdev, context->echo_index); |
| 1711 | ++priv->active_tx_contexts; | 1722 | ++priv->active_tx_contexts; |
| 1712 | if (priv->active_tx_contexts >= MAX_TX_URBS) | 1723 | if (priv->active_tx_contexts >= dev->max_tx_urbs) |
| 1713 | netif_stop_queue(netdev); | 1724 | netif_stop_queue(netdev); |
| 1714 | 1725 | ||
| 1715 | break; | 1726 | break; |
| @@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1743 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | 1754 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
| 1744 | 1755 | ||
| 1745 | can_free_echo_skb(netdev, context->echo_index); | 1756 | can_free_echo_skb(netdev, context->echo_index); |
| 1746 | context->echo_index = MAX_TX_URBS; | 1757 | context->echo_index = dev->max_tx_urbs; |
| 1747 | --priv->active_tx_contexts; | 1758 | --priv->active_tx_contexts; |
| 1748 | netif_wake_queue(netdev); | 1759 | netif_wake_queue(netdev); |
| 1749 | 1760 | ||
| @@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
| 1881 | if (err) | 1892 | if (err) |
| 1882 | return err; | 1893 | return err; |
| 1883 | 1894 | ||
| 1884 | netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); | 1895 | netdev = alloc_candev(sizeof(*priv) + |
| 1896 | dev->max_tx_urbs * sizeof(*priv->tx_contexts), | ||
| 1897 | dev->max_tx_urbs); | ||
| 1885 | if (!netdev) { | 1898 | if (!netdev) { |
| 1886 | dev_err(&intf->dev, "Cannot alloc candev\n"); | 1899 | dev_err(&intf->dev, "Cannot alloc candev\n"); |
| 1887 | return -ENOMEM; | 1900 | return -ENOMEM; |
| @@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
| 2009 | return err; | 2022 | return err; |
| 2010 | } | 2023 | } |
| 2011 | 2024 | ||
| 2025 | dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", | ||
| 2026 | ((dev->fw_version >> 24) & 0xff), | ||
| 2027 | ((dev->fw_version >> 16) & 0xff), | ||
| 2028 | (dev->fw_version & 0xffff)); | ||
| 2029 | |||
| 2030 | dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs); | ||
| 2031 | |||
| 2012 | err = kvaser_usb_get_card_info(dev); | 2032 | err = kvaser_usb_get_card_info(dev); |
| 2013 | if (err) { | 2033 | if (err) { |
| 2014 | dev_err(&intf->dev, | 2034 | dev_err(&intf->dev, |
| @@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
| 2016 | return err; | 2036 | return err; |
| 2017 | } | 2037 | } |
| 2018 | 2038 | ||
| 2019 | dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", | ||
| 2020 | ((dev->fw_version >> 24) & 0xff), | ||
| 2021 | ((dev->fw_version >> 16) & 0xff), | ||
| 2022 | (dev->fw_version & 0xffff)); | ||
| 2023 | |||
| 2024 | for (i = 0; i < dev->nchannels; i++) { | 2039 | for (i = 0; i < dev->nchannels; i++) { |
| 2025 | err = kvaser_usb_init_one(intf, id, i); | 2040 | err = kvaser_usb_init_one(intf, id, i); |
| 2026 | if (err) { | 2041 | if (err) { |
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h index 1ba7c25002e1..e8fc4952c6b0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_ucan.h +++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h | |||
| @@ -26,8 +26,8 @@ | |||
| 26 | #define PUCAN_CMD_FILTER_STD 0x008 | 26 | #define PUCAN_CMD_FILTER_STD 0x008 |
| 27 | #define PUCAN_CMD_TX_ABORT 0x009 | 27 | #define PUCAN_CMD_TX_ABORT 0x009 |
| 28 | #define PUCAN_CMD_WR_ERR_CNT 0x00a | 28 | #define PUCAN_CMD_WR_ERR_CNT 0x00a |
| 29 | #define PUCAN_CMD_RX_FRAME_ENABLE 0x00b | 29 | #define PUCAN_CMD_SET_EN_OPTION 0x00b |
| 30 | #define PUCAN_CMD_RX_FRAME_DISABLE 0x00c | 30 | #define PUCAN_CMD_CLR_DIS_OPTION 0x00c |
| 31 | #define PUCAN_CMD_END_OF_COLLECTION 0x3ff | 31 | #define PUCAN_CMD_END_OF_COLLECTION 0x3ff |
| 32 | 32 | ||
| 33 | /* uCAN received messages list */ | 33 | /* uCAN received messages list */ |
| @@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt { | |||
| 101 | u16 unused; | 101 | u16 unused; |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | /* uCAN RX_FRAME_ENABLE command fields */ | 104 | /* uCAN SET_EN/CLR_DIS _OPTION command fields */ |
| 105 | #define PUCAN_FLTEXT_ERROR 0x0001 | 105 | #define PUCAN_OPTION_ERROR 0x0001 |
| 106 | #define PUCAN_FLTEXT_BUSLOAD 0x0002 | 106 | #define PUCAN_OPTION_BUSLOAD 0x0002 |
| 107 | #define PUCAN_OPTION_CANDFDISO 0x0004 | ||
| 107 | 108 | ||
| 108 | struct __packed pucan_filter_ext { | 109 | struct __packed pucan_options { |
| 109 | __le16 opcode_channel; | 110 | __le16 opcode_channel; |
| 110 | 111 | ||
| 111 | __le16 ext_mask; | 112 | __le16 options; |
| 112 | u32 unused; | 113 | u32 unused; |
| 113 | }; | 114 | }; |
| 114 | 115 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 0bac0f14edc3..a9221ad9f1a0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
| @@ -110,13 +110,13 @@ struct __packed pcan_ufd_led { | |||
| 110 | u8 unused[5]; | 110 | u8 unused[5]; |
| 111 | }; | 111 | }; |
| 112 | 112 | ||
| 113 | /* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */ | 113 | /* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */ |
| 114 | #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 | 114 | #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 |
| 115 | 115 | ||
| 116 | struct __packed pcan_ufd_filter_ext { | 116 | struct __packed pcan_ufd_options { |
| 117 | __le16 opcode_channel; | 117 | __le16 opcode_channel; |
| 118 | 118 | ||
| 119 | __le16 ext_mask; | 119 | __le16 ucan_mask; |
| 120 | u16 unused; | 120 | u16 unused; |
| 121 | __le16 usb_mask; | 121 | __le16 usb_mask; |
| 122 | }; | 122 | }; |
| @@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) | |||
| 251 | /* moves the pointer forward */ | 251 | /* moves the pointer forward */ |
| 252 | pc += sizeof(struct pucan_wr_err_cnt); | 252 | pc += sizeof(struct pucan_wr_err_cnt); |
| 253 | 253 | ||
| 254 | /* add command to switch from ISO to non-ISO mode, if fw allows it */ | ||
| 255 | if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) { | ||
| 256 | struct pucan_options *puo = (struct pucan_options *)pc; | ||
| 257 | |||
| 258 | puo->opcode_channel = | ||
| 259 | (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? | ||
| 260 | pucan_cmd_opcode_channel(dev, | ||
| 261 | PUCAN_CMD_CLR_DIS_OPTION) : | ||
| 262 | pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION); | ||
| 263 | |||
| 264 | puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); | ||
| 265 | |||
| 266 | /* to be sure that no other extended bits will be taken into | ||
| 267 | * account | ||
| 268 | */ | ||
| 269 | puo->unused = 0; | ||
| 270 | |||
| 271 | /* moves the pointer forward */ | ||
| 272 | pc += sizeof(struct pucan_options); | ||
| 273 | } | ||
| 274 | |||
| 254 | /* next, go back to operational mode */ | 275 | /* next, go back to operational mode */ |
| 255 | cmd = (struct pucan_command *)pc; | 276 | cmd = (struct pucan_command *)pc; |
| 256 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, | 277 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, |
| @@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx, | |||
| 321 | return pcan_usb_fd_send_cmd(dev, cmd); | 342 | return pcan_usb_fd_send_cmd(dev, cmd); |
| 322 | } | 343 | } |
| 323 | 344 | ||
| 324 | /* set/unset notifications filter: | 345 | /* set/unset options |
| 325 | * | 346 | * |
| 326 | * onoff sets(1)/unset(0) notifications | 347 | * onoff set(1)/unset(0) options |
| 327 | * mask each bit defines a kind of notification to set/unset | 348 | * mask each bit defines a kind of options to set/unset |
| 328 | */ | 349 | */ |
| 329 | static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev, | 350 | static int pcan_usb_fd_set_options(struct peak_usb_device *dev, |
| 330 | bool onoff, u16 ext_mask, u16 usb_mask) | 351 | bool onoff, u16 ucan_mask, u16 usb_mask) |
| 331 | { | 352 | { |
| 332 | struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev); | 353 | struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); |
| 333 | 354 | ||
| 334 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, | 355 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, |
| 335 | (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE : | 356 | (onoff) ? PUCAN_CMD_SET_EN_OPTION : |
| 336 | PUCAN_CMD_RX_FRAME_DISABLE); | 357 | PUCAN_CMD_CLR_DIS_OPTION); |
| 337 | 358 | ||
| 338 | cmd->ext_mask = cpu_to_le16(ext_mask); | 359 | cmd->ucan_mask = cpu_to_le16(ucan_mask); |
| 339 | cmd->usb_mask = cpu_to_le16(usb_mask); | 360 | cmd->usb_mask = cpu_to_le16(usb_mask); |
| 340 | 361 | ||
| 341 | /* send the command */ | 362 | /* send the command */ |
| @@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev) | |||
| 770 | &pcan_usb_pro_fd); | 791 | &pcan_usb_pro_fd); |
| 771 | 792 | ||
| 772 | /* enable USB calibration messages */ | 793 | /* enable USB calibration messages */ |
| 773 | err = pcan_usb_fd_set_filter_ext(dev, 1, | 794 | err = pcan_usb_fd_set_options(dev, 1, |
| 774 | PUCAN_FLTEXT_ERROR, | 795 | PUCAN_OPTION_ERROR, |
| 775 | PCAN_UFD_FLTEXT_CALIBRATION); | 796 | PCAN_UFD_FLTEXT_CALIBRATION); |
| 776 | } | 797 | } |
| 777 | 798 | ||
| 778 | pdev->usb_if->dev_opened_count++; | 799 | pdev->usb_if->dev_opened_count++; |
| @@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev) | |||
| 806 | 827 | ||
| 807 | /* turn off special msgs for that interface if no other dev opened */ | 828 | /* turn off special msgs for that interface if no other dev opened */ |
| 808 | if (pdev->usb_if->dev_opened_count == 1) | 829 | if (pdev->usb_if->dev_opened_count == 1) |
| 809 | pcan_usb_fd_set_filter_ext(dev, 0, | 830 | pcan_usb_fd_set_options(dev, 0, |
| 810 | PUCAN_FLTEXT_ERROR, | 831 | PUCAN_OPTION_ERROR, |
| 811 | PCAN_UFD_FLTEXT_CALIBRATION); | 832 | PCAN_UFD_FLTEXT_CALIBRATION); |
| 812 | pdev->usb_if->dev_opened_count--; | 833 | pdev->usb_if->dev_opened_count--; |
| 813 | 834 | ||
| 814 | return 0; | 835 | return 0; |
| @@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
| 860 | pdev->usb_if->fw_info.fw_version[2], | 881 | pdev->usb_if->fw_info.fw_version[2], |
| 861 | dev->adapter->ctrl_count); | 882 | dev->adapter->ctrl_count); |
| 862 | 883 | ||
| 863 | /* the currently supported hw is non-ISO */ | 884 | /* check for ability to switch between ISO/non-ISO modes */ |
| 864 | dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; | 885 | if (pdev->usb_if->fw_info.fw_version[0] >= 2) { |
| 886 | /* firmware >= 2.x supports ISO/non-ISO switching */ | ||
| 887 | dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; | ||
| 888 | } else { | ||
| 889 | /* firmware < 2.x only supports fixed(!) non-ISO */ | ||
| 890 | dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO; | ||
| 891 | } | ||
| 865 | 892 | ||
| 866 | /* tell the hardware the can driver is running */ | 893 | /* tell the hardware the can driver is running */ |
| 867 | err = pcan_usb_fd_drv_loaded(dev, 1); | 894 | err = pcan_usb_fd_drv_loaded(dev, 1); |
| @@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev) | |||
| 937 | if (dev->ctrl_idx == 0) { | 964 | if (dev->ctrl_idx == 0) { |
| 938 | /* turn off calibration message if any device were opened */ | 965 | /* turn off calibration message if any device were opened */ |
| 939 | if (pdev->usb_if->dev_opened_count > 0) | 966 | if (pdev->usb_if->dev_opened_count > 0) |
| 940 | pcan_usb_fd_set_filter_ext(dev, 0, | 967 | pcan_usb_fd_set_options(dev, 0, |
| 941 | PUCAN_FLTEXT_ERROR, | 968 | PUCAN_OPTION_ERROR, |
| 942 | PCAN_UFD_FLTEXT_CALIBRATION); | 969 | PCAN_UFD_FLTEXT_CALIBRATION); |
| 943 | 970 | ||
| 944 | /* tell USB adapter that the driver is being unloaded */ | 971 | /* tell USB adapter that the driver is being unloaded */ |
| 945 | pcan_usb_fd_drv_loaded(dev, 0); | 972 | pcan_usb_fd_drv_loaded(dev, 0); |
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..15a8190a6f75 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
| @@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1543 | { | 1543 | { |
| 1544 | struct pcnet32_private *lp; | 1544 | struct pcnet32_private *lp; |
| 1545 | int i, media; | 1545 | int i, media; |
| 1546 | int fdx, mii, fset, dxsuflo; | 1546 | int fdx, mii, fset, dxsuflo, sram; |
| 1547 | int chip_version; | 1547 | int chip_version; |
| 1548 | char *chipname; | 1548 | char *chipname; |
| 1549 | struct net_device *dev; | 1549 | struct net_device *dev; |
| @@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1580 | } | 1580 | } |
| 1581 | 1581 | ||
| 1582 | /* initialize variables */ | 1582 | /* initialize variables */ |
| 1583 | fdx = mii = fset = dxsuflo = 0; | 1583 | fdx = mii = fset = dxsuflo = sram = 0; |
| 1584 | chip_version = (chip_version >> 12) & 0xffff; | 1584 | chip_version = (chip_version >> 12) & 0xffff; |
| 1585 | 1585 | ||
| 1586 | switch (chip_version) { | 1586 | switch (chip_version) { |
| @@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1613 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | 1613 | chipname = "PCnet/FAST III 79C973"; /* PCI */ |
| 1614 | fdx = 1; | 1614 | fdx = 1; |
| 1615 | mii = 1; | 1615 | mii = 1; |
| 1616 | sram = 1; | ||
| 1616 | break; | 1617 | break; |
| 1617 | case 0x2626: | 1618 | case 0x2626: |
| 1618 | chipname = "PCnet/Home 79C978"; /* PCI */ | 1619 | chipname = "PCnet/Home 79C978"; /* PCI */ |
| @@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1636 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | 1637 | chipname = "PCnet/FAST III 79C975"; /* PCI */ |
| 1637 | fdx = 1; | 1638 | fdx = 1; |
| 1638 | mii = 1; | 1639 | mii = 1; |
| 1640 | sram = 1; | ||
| 1639 | break; | 1641 | break; |
| 1640 | case 0x2628: | 1642 | case 0x2628: |
| 1641 | chipname = "PCnet/PRO 79C976"; | 1643 | chipname = "PCnet/PRO 79C976"; |
| @@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1664 | dxsuflo = 1; | 1666 | dxsuflo = 1; |
| 1665 | } | 1667 | } |
| 1666 | 1668 | ||
| 1669 | /* | ||
| 1670 | * The Am79C973/Am79C975 controllers come with 12K of SRAM | ||
| 1671 | * which we can use for the Tx/Rx buffers but most importantly, | ||
| 1672 | * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid | ||
| 1673 | * Tx fifo underflows. | ||
| 1674 | */ | ||
| 1675 | if (sram) { | ||
| 1676 | /* | ||
| 1677 | * The SRAM is being configured in two steps. First we | ||
| 1678 | * set the SRAM size in the BCR25:SRAM_SIZE bits. According | ||
| 1679 | * to the datasheet, each bit corresponds to a 512-byte | ||
| 1680 | * page so we can have at most 24 pages. The SRAM_SIZE | ||
| 1681 | * holds the value of the upper 8 bits of the 16-bit SRAM size. | ||
| 1682 | * The low 8-bits start at 0x00 and end at 0xff. So the | ||
| 1683 | * address range is from 0x0000 up to 0x17ff. Therefore, | ||
| 1684 | * the SRAM_SIZE is set to 0x17. The next step is to set | ||
| 1685 | * the BCR26:SRAM_BND midway through so the Tx and Rx | ||
| 1686 | * buffers can share the SRAM equally. | ||
| 1687 | */ | ||
| 1688 | a->write_bcr(ioaddr, 25, 0x17); | ||
| 1689 | a->write_bcr(ioaddr, 26, 0xc); | ||
| 1690 | /* And finally enable the NOUFLO bit */ | ||
| 1691 | a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); | ||
| 1692 | } | ||
| 1693 | |||
| 1667 | dev = alloc_etherdev(sizeof(*lp)); | 1694 | dev = alloc_etherdev(sizeof(*lp)); |
| 1668 | if (!dev) { | 1695 | if (!dev) { |
| 1669 | ret = -ENOMEM; | 1696 | ret = -ENOMEM; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 756053c028be..4085c4b31047 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -1811,7 +1811,7 @@ struct bnx2x { | |||
| 1811 | int stats_state; | 1811 | int stats_state; |
| 1812 | 1812 | ||
| 1813 | /* used for synchronization of concurrent threads statistics handling */ | 1813 | /* used for synchronization of concurrent threads statistics handling */ |
| 1814 | spinlock_t stats_lock; | 1814 | struct mutex stats_lock; |
| 1815 | 1815 | ||
| 1816 | /* used by dmae command loader */ | 1816 | /* used by dmae command loader */ |
| 1817 | struct dmae_command stats_dmae; | 1817 | struct dmae_command stats_dmae; |
| @@ -1935,8 +1935,6 @@ struct bnx2x { | |||
| 1935 | 1935 | ||
| 1936 | int fp_array_size; | 1936 | int fp_array_size; |
| 1937 | u32 dump_preset_idx; | 1937 | u32 dump_preset_idx; |
| 1938 | bool stats_started; | ||
| 1939 | struct semaphore stats_sema; | ||
| 1940 | 1938 | ||
| 1941 | u8 phys_port_id[ETH_ALEN]; | 1939 | u8 phys_port_id[ETH_ALEN]; |
| 1942 | 1940 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 996e215fc324..1ec635f54994 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -129,8 +129,8 @@ struct bnx2x_mac_vals { | |||
| 129 | u32 xmac_val; | 129 | u32 xmac_val; |
| 130 | u32 emac_addr; | 130 | u32 emac_addr; |
| 131 | u32 emac_val; | 131 | u32 emac_val; |
| 132 | u32 umac_addr; | 132 | u32 umac_addr[2]; |
| 133 | u32 umac_val; | 133 | u32 umac_val[2]; |
| 134 | u32 bmac_addr; | 134 | u32 bmac_addr; |
| 135 | u32 bmac_val[2]; | 135 | u32 bmac_val[2]; |
| 136 | }; | 136 | }; |
| @@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) | |||
| 7866 | return 0; | 7866 | return 0; |
| 7867 | } | 7867 | } |
| 7868 | 7868 | ||
| 7869 | /* previous driver DMAE transaction may have occurred when pre-boot stage ended | ||
| 7870 | * and boot began, or when kdump kernel was loaded. Either case would invalidate | ||
| 7871 | * the addresses of the transaction, resulting in was-error bit set in the pci | ||
| 7872 | * causing all hw-to-host pcie transactions to timeout. If this happened we want | ||
| 7873 | * to clear the interrupt which detected this from the pglueb and the was done | ||
| 7874 | * bit | ||
| 7875 | */ | ||
| 7876 | static void bnx2x_clean_pglue_errors(struct bnx2x *bp) | ||
| 7877 | { | ||
| 7878 | if (!CHIP_IS_E1x(bp)) | ||
| 7879 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
| 7880 | 1 << BP_ABS_FUNC(bp)); | ||
| 7881 | } | ||
| 7882 | |||
| 7869 | static int bnx2x_init_hw_func(struct bnx2x *bp) | 7883 | static int bnx2x_init_hw_func(struct bnx2x *bp) |
| 7870 | { | 7884 | { |
| 7871 | int port = BP_PORT(bp); | 7885 | int port = BP_PORT(bp); |
| @@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
| 7958 | 7972 | ||
| 7959 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); | 7973 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
| 7960 | 7974 | ||
| 7961 | if (!CHIP_IS_E1x(bp)) | 7975 | bnx2x_clean_pglue_errors(bp); |
| 7962 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); | ||
| 7963 | 7976 | ||
| 7964 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); | 7977 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
| 7965 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); | 7978 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
| @@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) | |||
| 10141 | return base + (BP_ABS_FUNC(bp)) * stride; | 10154 | return base + (BP_ABS_FUNC(bp)) * stride; |
| 10142 | } | 10155 | } |
| 10143 | 10156 | ||
| 10157 | static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, | ||
| 10158 | u8 port, u32 reset_reg, | ||
| 10159 | struct bnx2x_mac_vals *vals) | ||
| 10160 | { | ||
| 10161 | u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; | ||
| 10162 | u32 base_addr; | ||
| 10163 | |||
| 10164 | if (!(mask & reset_reg)) | ||
| 10165 | return false; | ||
| 10166 | |||
| 10167 | BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); | ||
| 10168 | base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | ||
| 10169 | vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; | ||
| 10170 | vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); | ||
| 10171 | REG_WR(bp, vals->umac_addr[port], 0); | ||
| 10172 | |||
| 10173 | return true; | ||
| 10174 | } | ||
| 10175 | |||
| 10144 | static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | 10176 | static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, |
| 10145 | struct bnx2x_mac_vals *vals) | 10177 | struct bnx2x_mac_vals *vals) |
| 10146 | { | 10178 | { |
| @@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
| 10149 | u8 port = BP_PORT(bp); | 10181 | u8 port = BP_PORT(bp); |
| 10150 | 10182 | ||
| 10151 | /* reset addresses as they also mark which values were changed */ | 10183 | /* reset addresses as they also mark which values were changed */ |
| 10152 | vals->bmac_addr = 0; | 10184 | memset(vals, 0, sizeof(*vals)); |
| 10153 | vals->umac_addr = 0; | ||
| 10154 | vals->xmac_addr = 0; | ||
| 10155 | vals->emac_addr = 0; | ||
| 10156 | 10185 | ||
| 10157 | reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); | 10186 | reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); |
| 10158 | 10187 | ||
| @@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
| 10201 | REG_WR(bp, vals->xmac_addr, 0); | 10230 | REG_WR(bp, vals->xmac_addr, 0); |
| 10202 | mac_stopped = true; | 10231 | mac_stopped = true; |
| 10203 | } | 10232 | } |
| 10204 | mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; | 10233 | |
| 10205 | if (mask & reset_reg) { | 10234 | mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, |
| 10206 | BNX2X_DEV_INFO("Disable umac Rx\n"); | 10235 | reset_reg, vals); |
| 10207 | base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | 10236 | mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, |
| 10208 | vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; | 10237 | reset_reg, vals); |
| 10209 | vals->umac_val = REG_RD(bp, vals->umac_addr); | ||
| 10210 | REG_WR(bp, vals->umac_addr, 0); | ||
| 10211 | mac_stopped = true; | ||
| 10212 | } | ||
| 10213 | } | 10238 | } |
| 10214 | 10239 | ||
| 10215 | if (mac_stopped) | 10240 | if (mac_stopped) |
| @@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10505 | /* Close the MAC Rx to prevent BRB from filling up */ | 10530 | /* Close the MAC Rx to prevent BRB from filling up */ |
| 10506 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10531 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
| 10507 | 10532 | ||
| 10508 | /* close LLH filters towards the BRB */ | 10533 | /* close LLH filters for both ports towards the BRB */ |
| 10509 | bnx2x_set_rx_filter(&bp->link_params, 0); | 10534 | bnx2x_set_rx_filter(&bp->link_params, 0); |
| 10535 | bp->link_params.port ^= 1; | ||
| 10536 | bnx2x_set_rx_filter(&bp->link_params, 0); | ||
| 10537 | bp->link_params.port ^= 1; | ||
| 10510 | 10538 | ||
| 10511 | /* Check if the UNDI driver was previously loaded */ | 10539 | /* Check if the UNDI driver was previously loaded */ |
| 10512 | if (bnx2x_prev_is_after_undi(bp)) { | 10540 | if (bnx2x_prev_is_after_undi(bp)) { |
| @@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10553 | 10581 | ||
| 10554 | if (mac_vals.xmac_addr) | 10582 | if (mac_vals.xmac_addr) |
| 10555 | REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); | 10583 | REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); |
| 10556 | if (mac_vals.umac_addr) | 10584 | if (mac_vals.umac_addr[0]) |
| 10557 | REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); | 10585 | REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); |
| 10586 | if (mac_vals.umac_addr[1]) | ||
| 10587 | REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); | ||
| 10558 | if (mac_vals.emac_addr) | 10588 | if (mac_vals.emac_addr) |
| 10559 | REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); | 10589 | REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); |
| 10560 | if (mac_vals.bmac_addr) { | 10590 | if (mac_vals.bmac_addr) { |
| @@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10571 | return bnx2x_prev_mcp_done(bp); | 10601 | return bnx2x_prev_mcp_done(bp); |
| 10572 | } | 10602 | } |
| 10573 | 10603 | ||
| 10574 | /* previous driver DMAE transaction may have occurred when pre-boot stage ended | ||
| 10575 | * and boot began, or when kdump kernel was loaded. Either case would invalidate | ||
| 10576 | * the addresses of the transaction, resulting in was-error bit set in the pci | ||
| 10577 | * causing all hw-to-host pcie transactions to timeout. If this happened we want | ||
| 10578 | * to clear the interrupt which detected this from the pglueb and the was done | ||
| 10579 | * bit | ||
| 10580 | */ | ||
| 10581 | static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) | ||
| 10582 | { | ||
| 10583 | if (!CHIP_IS_E1x(bp)) { | ||
| 10584 | u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); | ||
| 10585 | if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { | ||
| 10586 | DP(BNX2X_MSG_SP, | ||
| 10587 | "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); | ||
| 10588 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
| 10589 | 1 << BP_FUNC(bp)); | ||
| 10590 | } | ||
| 10591 | } | ||
| 10592 | } | ||
| 10593 | |||
| 10594 | static int bnx2x_prev_unload(struct bnx2x *bp) | 10604 | static int bnx2x_prev_unload(struct bnx2x *bp) |
| 10595 | { | 10605 | { |
| 10596 | int time_counter = 10; | 10606 | int time_counter = 10; |
| @@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) | |||
| 10600 | /* clear hw from errors which may have resulted from an interrupted | 10610 | /* clear hw from errors which may have resulted from an interrupted |
| 10601 | * dmae transaction. | 10611 | * dmae transaction. |
| 10602 | */ | 10612 | */ |
| 10603 | bnx2x_prev_interrupted_dmae(bp); | 10613 | bnx2x_clean_pglue_errors(bp); |
| 10604 | 10614 | ||
| 10605 | /* Release previously held locks */ | 10615 | /* Release previously held locks */ |
| 10606 | hw_lock_reg = (BP_FUNC(bp) <= 5) ? | 10616 | hw_lock_reg = (BP_FUNC(bp) <= 5) ? |
| @@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
| 12037 | mutex_init(&bp->port.phy_mutex); | 12047 | mutex_init(&bp->port.phy_mutex); |
| 12038 | mutex_init(&bp->fw_mb_mutex); | 12048 | mutex_init(&bp->fw_mb_mutex); |
| 12039 | mutex_init(&bp->drv_info_mutex); | 12049 | mutex_init(&bp->drv_info_mutex); |
| 12050 | mutex_init(&bp->stats_lock); | ||
| 12040 | bp->drv_info_mng_owner = false; | 12051 | bp->drv_info_mng_owner = false; |
| 12041 | spin_lock_init(&bp->stats_lock); | ||
| 12042 | sema_init(&bp->stats_sema, 1); | ||
| 12043 | 12052 | ||
| 12044 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 12053 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
| 12045 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 12054 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
| @@ -13668,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
| 13668 | cancel_delayed_work_sync(&bp->sp_task); | 13677 | cancel_delayed_work_sync(&bp->sp_task); |
| 13669 | cancel_delayed_work_sync(&bp->period_task); | 13678 | cancel_delayed_work_sync(&bp->period_task); |
| 13670 | 13679 | ||
| 13671 | spin_lock_bh(&bp->stats_lock); | 13680 | mutex_lock(&bp->stats_lock); |
| 13672 | bp->stats_state = STATS_STATE_DISABLED; | 13681 | bp->stats_state = STATS_STATE_DISABLED; |
| 13673 | spin_unlock_bh(&bp->stats_lock); | 13682 | mutex_unlock(&bp->stats_lock); |
| 13674 | 13683 | ||
| 13675 | bnx2x_save_statistics(bp); | 13684 | bnx2x_save_statistics(bp); |
| 13676 | 13685 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e5aca2de1871..cfe3c7695455 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
| @@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 2238 | 2238 | ||
| 2239 | cookie.vf = vf; | 2239 | cookie.vf = vf; |
| 2240 | cookie.state = VF_ACQUIRED; | 2240 | cookie.state = VF_ACQUIRED; |
| 2241 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | 2241 | rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); |
| 2242 | if (rc) | ||
| 2243 | goto op_err; | ||
| 2242 | } | 2244 | } |
| 2243 | 2245 | ||
| 2244 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2246 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d1608297c773..800ab44a07ce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
| @@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp) | |||
| 123 | */ | 123 | */ |
| 124 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | 124 | static void bnx2x_storm_stats_post(struct bnx2x *bp) |
| 125 | { | 125 | { |
| 126 | if (!bp->stats_pending) { | 126 | int rc; |
| 127 | int rc; | ||
| 128 | 127 | ||
| 129 | spin_lock_bh(&bp->stats_lock); | 128 | if (bp->stats_pending) |
| 130 | 129 | return; | |
| 131 | if (bp->stats_pending) { | ||
| 132 | spin_unlock_bh(&bp->stats_lock); | ||
| 133 | return; | ||
| 134 | } | ||
| 135 | |||
| 136 | bp->fw_stats_req->hdr.drv_stats_counter = | ||
| 137 | cpu_to_le16(bp->stats_counter++); | ||
| 138 | 130 | ||
| 139 | DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", | 131 | bp->fw_stats_req->hdr.drv_stats_counter = |
| 140 | le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); | 132 | cpu_to_le16(bp->stats_counter++); |
| 141 | 133 | ||
| 142 | /* adjust the ramrod to include VF queues statistics */ | 134 | DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", |
| 143 | bnx2x_iov_adjust_stats_req(bp); | 135 | le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); |
| 144 | bnx2x_dp_stats(bp); | ||
| 145 | 136 | ||
| 146 | /* send FW stats ramrod */ | 137 | /* adjust the ramrod to include VF queues statistics */ |
| 147 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, | 138 | bnx2x_iov_adjust_stats_req(bp); |
| 148 | U64_HI(bp->fw_stats_req_mapping), | 139 | bnx2x_dp_stats(bp); |
| 149 | U64_LO(bp->fw_stats_req_mapping), | ||
| 150 | NONE_CONNECTION_TYPE); | ||
| 151 | if (rc == 0) | ||
| 152 | bp->stats_pending = 1; | ||
| 153 | 140 | ||
| 154 | spin_unlock_bh(&bp->stats_lock); | 141 | /* send FW stats ramrod */ |
| 155 | } | 142 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, |
| 143 | U64_HI(bp->fw_stats_req_mapping), | ||
| 144 | U64_LO(bp->fw_stats_req_mapping), | ||
| 145 | NONE_CONNECTION_TYPE); | ||
| 146 | if (rc == 0) | ||
| 147 | bp->stats_pending = 1; | ||
| 156 | } | 148 | } |
| 157 | 149 | ||
| 158 | static void bnx2x_hw_stats_post(struct bnx2x *bp) | 150 | static void bnx2x_hw_stats_post(struct bnx2x *bp) |
| @@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp) | |||
| 221 | */ | 213 | */ |
| 222 | 214 | ||
| 223 | /* should be called under stats_sema */ | 215 | /* should be called under stats_sema */ |
| 224 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | 216 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) |
| 225 | { | 217 | { |
| 226 | struct dmae_command *dmae; | 218 | struct dmae_command *dmae; |
| 227 | u32 opcode; | 219 | u32 opcode; |
| @@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
| 519 | } | 511 | } |
| 520 | 512 | ||
| 521 | /* should be called under stats_sema */ | 513 | /* should be called under stats_sema */ |
| 522 | static void __bnx2x_stats_start(struct bnx2x *bp) | 514 | static void bnx2x_stats_start(struct bnx2x *bp) |
| 523 | { | 515 | { |
| 524 | if (IS_PF(bp)) { | 516 | if (IS_PF(bp)) { |
| 525 | if (bp->port.pmf) | 517 | if (bp->port.pmf) |
| @@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp) | |||
| 531 | bnx2x_hw_stats_post(bp); | 523 | bnx2x_hw_stats_post(bp); |
| 532 | bnx2x_storm_stats_post(bp); | 524 | bnx2x_storm_stats_post(bp); |
| 533 | } | 525 | } |
| 534 | |||
| 535 | bp->stats_started = true; | ||
| 536 | } | ||
| 537 | |||
| 538 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
| 539 | { | ||
| 540 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 541 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 542 | __bnx2x_stats_start(bp); | ||
| 543 | up(&bp->stats_sema); | ||
| 544 | } | 526 | } |
| 545 | 527 | ||
| 546 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 528 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
| 547 | { | 529 | { |
| 548 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 549 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 550 | bnx2x_stats_comp(bp); | 530 | bnx2x_stats_comp(bp); |
| 551 | __bnx2x_stats_pmf_update(bp); | 531 | bnx2x_stats_pmf_update(bp); |
| 552 | __bnx2x_stats_start(bp); | 532 | bnx2x_stats_start(bp); |
| 553 | up(&bp->stats_sema); | ||
| 554 | } | ||
| 555 | |||
| 556 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
| 557 | { | ||
| 558 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 559 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 560 | __bnx2x_stats_pmf_update(bp); | ||
| 561 | up(&bp->stats_sema); | ||
| 562 | } | 533 | } |
| 563 | 534 | ||
| 564 | static void bnx2x_stats_restart(struct bnx2x *bp) | 535 | static void bnx2x_stats_restart(struct bnx2x *bp) |
| @@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
| 568 | */ | 539 | */ |
| 569 | if (IS_VF(bp)) | 540 | if (IS_VF(bp)) |
| 570 | return; | 541 | return; |
| 571 | if (down_timeout(&bp->stats_sema, HZ/10)) | 542 | |
| 572 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 573 | bnx2x_stats_comp(bp); | 543 | bnx2x_stats_comp(bp); |
| 574 | __bnx2x_stats_start(bp); | 544 | bnx2x_stats_start(bp); |
| 575 | up(&bp->stats_sema); | ||
| 576 | } | 545 | } |
| 577 | 546 | ||
| 578 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 547 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
| @@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1246 | { | 1215 | { |
| 1247 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1216 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
| 1248 | 1217 | ||
| 1249 | /* we run update from timer context, so give up | 1218 | if (bnx2x_edebug_stats_stopped(bp)) |
| 1250 | * if somebody is in the middle of transition | ||
| 1251 | */ | ||
| 1252 | if (down_trylock(&bp->stats_sema)) | ||
| 1253 | return; | 1219 | return; |
| 1254 | 1220 | ||
| 1255 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
| 1256 | goto out; | ||
| 1257 | |||
| 1258 | if (IS_PF(bp)) { | 1221 | if (IS_PF(bp)) { |
| 1259 | if (*stats_comp != DMAE_COMP_VAL) | 1222 | if (*stats_comp != DMAE_COMP_VAL) |
| 1260 | goto out; | 1223 | return; |
| 1261 | 1224 | ||
| 1262 | if (bp->port.pmf) | 1225 | if (bp->port.pmf) |
| 1263 | bnx2x_hw_stats_update(bp); | 1226 | bnx2x_hw_stats_update(bp); |
| @@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1267 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1230 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
| 1268 | bnx2x_panic(); | 1231 | bnx2x_panic(); |
| 1269 | } | 1232 | } |
| 1270 | goto out; | 1233 | return; |
| 1271 | } | 1234 | } |
| 1272 | } else { | 1235 | } else { |
| 1273 | /* vf doesn't collect HW statistics, and doesn't get completions | 1236 | /* vf doesn't collect HW statistics, and doesn't get completions |
| @@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1281 | 1244 | ||
| 1282 | /* vf is done */ | 1245 | /* vf is done */ |
| 1283 | if (IS_VF(bp)) | 1246 | if (IS_VF(bp)) |
| 1284 | goto out; | 1247 | return; |
| 1285 | 1248 | ||
| 1286 | if (netif_msg_timer(bp)) { | 1249 | if (netif_msg_timer(bp)) { |
| 1287 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1250 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
| @@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1292 | 1255 | ||
| 1293 | bnx2x_hw_stats_post(bp); | 1256 | bnx2x_hw_stats_post(bp); |
| 1294 | bnx2x_storm_stats_post(bp); | 1257 | bnx2x_storm_stats_post(bp); |
| 1295 | |||
| 1296 | out: | ||
| 1297 | up(&bp->stats_sema); | ||
| 1298 | } | 1258 | } |
| 1299 | 1259 | ||
| 1300 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1260 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
| @@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) | |||
| 1358 | 1318 | ||
| 1359 | static void bnx2x_stats_stop(struct bnx2x *bp) | 1319 | static void bnx2x_stats_stop(struct bnx2x *bp) |
| 1360 | { | 1320 | { |
| 1361 | int update = 0; | 1321 | bool update = false; |
| 1362 | |||
| 1363 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 1364 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 1365 | |||
| 1366 | bp->stats_started = false; | ||
| 1367 | 1322 | ||
| 1368 | bnx2x_stats_comp(bp); | 1323 | bnx2x_stats_comp(bp); |
| 1369 | 1324 | ||
| @@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
| 1381 | bnx2x_hw_stats_post(bp); | 1336 | bnx2x_hw_stats_post(bp); |
| 1382 | bnx2x_stats_comp(bp); | 1337 | bnx2x_stats_comp(bp); |
| 1383 | } | 1338 | } |
| 1384 | |||
| 1385 | up(&bp->stats_sema); | ||
| 1386 | } | 1339 | } |
| 1387 | 1340 | ||
| 1388 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1341 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
| @@ -1410,18 +1363,28 @@ static const struct { | |||
| 1410 | 1363 | ||
| 1411 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1364 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
| 1412 | { | 1365 | { |
| 1413 | enum bnx2x_stats_state state; | 1366 | enum bnx2x_stats_state state = bp->stats_state; |
| 1414 | void (*action)(struct bnx2x *bp); | 1367 | |
| 1415 | if (unlikely(bp->panic)) | 1368 | if (unlikely(bp->panic)) |
| 1416 | return; | 1369 | return; |
| 1417 | 1370 | ||
| 1418 | spin_lock_bh(&bp->stats_lock); | 1371 | /* Statistics update run from timer context, and we don't want to stop |
| 1419 | state = bp->stats_state; | 1372 | * that context in case someone is in the middle of a transition. |
| 1373 | * For other events, wait a bit until lock is taken. | ||
| 1374 | */ | ||
| 1375 | if (!mutex_trylock(&bp->stats_lock)) { | ||
| 1376 | if (event == STATS_EVENT_UPDATE) | ||
| 1377 | return; | ||
| 1378 | |||
| 1379 | DP(BNX2X_MSG_STATS, | ||
| 1380 | "Unlikely stats' lock contention [event %d]\n", event); | ||
| 1381 | mutex_lock(&bp->stats_lock); | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | bnx2x_stats_stm[state][event].action(bp); | ||
| 1420 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1385 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
| 1421 | action = bnx2x_stats_stm[state][event].action; | ||
| 1422 | spin_unlock_bh(&bp->stats_lock); | ||
| 1423 | 1386 | ||
| 1424 | action(bp); | 1387 | mutex_unlock(&bp->stats_lock); |
| 1425 | 1388 | ||
| 1426 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
| 1427 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
| @@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
| 1998 | } | 1961 | } |
| 1999 | } | 1962 | } |
| 2000 | 1963 | ||
| 2001 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | 1964 | int bnx2x_stats_safe_exec(struct bnx2x *bp, |
| 2002 | void (func_to_exec)(void *cookie), | 1965 | void (func_to_exec)(void *cookie), |
| 2003 | void *cookie){ | 1966 | void *cookie) |
| 2004 | if (down_timeout(&bp->stats_sema, HZ/10)) | 1967 | { |
| 2005 | BNX2X_ERR("Unable to acquire stats lock\n"); | 1968 | int cnt = 10, rc = 0; |
| 1969 | |||
| 1970 | /* Wait for statistics to end [while blocking further requests], | ||
| 1971 | * then run supplied function 'safely'. | ||
| 1972 | */ | ||
| 1973 | mutex_lock(&bp->stats_lock); | ||
| 1974 | |||
| 2006 | bnx2x_stats_comp(bp); | 1975 | bnx2x_stats_comp(bp); |
| 1976 | while (bp->stats_pending && cnt--) | ||
| 1977 | if (bnx2x_storm_stats_update(bp)) | ||
| 1978 | usleep_range(1000, 2000); | ||
| 1979 | if (bp->stats_pending) { | ||
| 1980 | BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n"); | ||
| 1981 | rc = -EBUSY; | ||
| 1982 | goto out; | ||
| 1983 | } | ||
| 1984 | |||
| 2007 | func_to_exec(cookie); | 1985 | func_to_exec(cookie); |
| 2008 | __bnx2x_stats_start(bp); | 1986 | |
| 2009 | up(&bp->stats_sema); | 1987 | out: |
| 1988 | /* No need to restart statistics - if they're enabled, the timer | ||
| 1989 | * will restart the statistics. | ||
| 1990 | */ | ||
| 1991 | mutex_unlock(&bp->stats_lock); | ||
| 1992 | |||
| 1993 | return rc; | ||
| 2010 | } | 1994 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 2beceaefdeea..965539a9dabe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
| @@ -539,9 +539,9 @@ struct bnx2x; | |||
| 539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
| 540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
| 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
| 542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | 542 | int bnx2x_stats_safe_exec(struct bnx2x *bp, |
| 543 | void (func_to_exec)(void *cookie), | 543 | void (func_to_exec)(void *cookie), |
| 544 | void *cookie); | 544 | void *cookie); |
| 545 | 545 | ||
| 546 | /** | 546 | /** |
| 547 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 97842d03675b..c6ff4890d171 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -376,8 +376,6 @@ enum { | |||
| 376 | enum { | 376 | enum { |
| 377 | INGQ_EXTRAS = 2, /* firmware event queue and */ | 377 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
| 378 | /* forwarded interrupts */ | 378 | /* forwarded interrupts */ |
| 379 | MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 | ||
| 380 | + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, | ||
| 381 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES | 379 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES |
| 382 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, | 380 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, |
| 383 | }; | 381 | }; |
| @@ -616,11 +614,13 @@ struct sge { | |||
| 616 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ | 614 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ |
| 617 | 615 | ||
| 618 | unsigned int egr_start; | 616 | unsigned int egr_start; |
| 617 | unsigned int egr_sz; | ||
| 619 | unsigned int ingr_start; | 618 | unsigned int ingr_start; |
| 620 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ | 619 | unsigned int ingr_sz; |
| 621 | struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ | 620 | void **egr_map; /* qid->queue egress queue map */ |
| 622 | DECLARE_BITMAP(starving_fl, MAX_EGRQ); | 621 | struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ |
| 623 | DECLARE_BITMAP(txq_maperr, MAX_EGRQ); | 622 | unsigned long *starving_fl; |
| 623 | unsigned long *txq_maperr; | ||
| 624 | struct timer_list rx_timer; /* refills starving FLs */ | 624 | struct timer_list rx_timer; /* refills starving FLs */ |
| 625 | struct timer_list tx_timer; /* checks Tx queues */ | 625 | struct timer_list tx_timer; /* checks Tx queues */ |
| 626 | }; | 626 | }; |
| @@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, | |||
| 1136 | 1136 | ||
| 1137 | unsigned int qtimer_val(const struct adapter *adap, | 1137 | unsigned int qtimer_val(const struct adapter *adap, |
| 1138 | const struct sge_rspq *q); | 1138 | const struct sge_rspq *q); |
| 1139 | |||
| 1140 | int t4_init_devlog_params(struct adapter *adapter); | ||
| 1139 | int t4_init_sge_params(struct adapter *adapter); | 1141 | int t4_init_sge_params(struct adapter *adapter); |
| 1140 | int t4_init_tp_params(struct adapter *adap); | 1142 | int t4_init_tp_params(struct adapter *adap); |
| 1141 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | 1143 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 78854ceb0870..dcb047945290 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | |||
| @@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) | |||
| 670 | "0.9375" }; | 670 | "0.9375" }; |
| 671 | 671 | ||
| 672 | int i; | 672 | int i; |
| 673 | u16 incr[NMTUS][NCCTRL_WIN]; | 673 | u16 (*incr)[NCCTRL_WIN]; |
| 674 | struct adapter *adap = seq->private; | 674 | struct adapter *adap = seq->private; |
| 675 | 675 | ||
| 676 | incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL); | ||
| 677 | if (!incr) | ||
| 678 | return -ENOMEM; | ||
| 679 | |||
| 676 | t4_read_cong_tbl(adap, incr); | 680 | t4_read_cong_tbl(adap, incr); |
| 677 | 681 | ||
| 678 | for (i = 0; i < NCCTRL_WIN; ++i) { | 682 | for (i = 0; i < NCCTRL_WIN; ++i) { |
| @@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) | |||
| 685 | adap->params.a_wnd[i], | 689 | adap->params.a_wnd[i], |
| 686 | dec_fac[adap->params.b_wnd[i]]); | 690 | dec_fac[adap->params.b_wnd[i]]); |
| 687 | } | 691 | } |
| 692 | |||
| 693 | kfree(incr); | ||
| 688 | return 0; | 694 | return 0; |
| 689 | } | 695 | } |
| 690 | 696 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a22cf932ca35..d92995138f7e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap) | |||
| 920 | { | 920 | { |
| 921 | int i; | 921 | int i; |
| 922 | 922 | ||
| 923 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | 923 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
| 924 | struct sge_rspq *q = adap->sge.ingr_map[i]; | 924 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
| 925 | 925 | ||
| 926 | if (q && q->handler) { | 926 | if (q && q->handler) { |
| @@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap) | |||
| 934 | } | 934 | } |
| 935 | } | 935 | } |
| 936 | 936 | ||
| 937 | /* Disable interrupt and napi handler */ | ||
| 938 | static void disable_interrupts(struct adapter *adap) | ||
| 939 | { | ||
| 940 | if (adap->flags & FULL_INIT_DONE) { | ||
| 941 | t4_intr_disable(adap); | ||
| 942 | if (adap->flags & USING_MSIX) { | ||
| 943 | free_msix_queue_irqs(adap); | ||
| 944 | free_irq(adap->msix_info[0].vec, adap); | ||
| 945 | } else { | ||
| 946 | free_irq(adap->pdev->irq, adap); | ||
| 947 | } | ||
| 948 | quiesce_rx(adap); | ||
| 949 | } | ||
| 950 | } | ||
| 951 | |||
| 937 | /* | 952 | /* |
| 938 | * Enable NAPI scheduling and interrupt generation for all Rx queues. | 953 | * Enable NAPI scheduling and interrupt generation for all Rx queues. |
| 939 | */ | 954 | */ |
| @@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap) | |||
| 941 | { | 956 | { |
| 942 | int i; | 957 | int i; |
| 943 | 958 | ||
| 944 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | 959 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
| 945 | struct sge_rspq *q = adap->sge.ingr_map[i]; | 960 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
| 946 | 961 | ||
| 947 | if (!q) | 962 | if (!q) |
| @@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap) | |||
| 970 | int err, msi_idx, i, j; | 985 | int err, msi_idx, i, j; |
| 971 | struct sge *s = &adap->sge; | 986 | struct sge *s = &adap->sge; |
| 972 | 987 | ||
| 973 | bitmap_zero(s->starving_fl, MAX_EGRQ); | 988 | bitmap_zero(s->starving_fl, s->egr_sz); |
| 974 | bitmap_zero(s->txq_maperr, MAX_EGRQ); | 989 | bitmap_zero(s->txq_maperr, s->egr_sz); |
| 975 | 990 | ||
| 976 | if (adap->flags & USING_MSIX) | 991 | if (adap->flags & USING_MSIX) |
| 977 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ | 992 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ |
| @@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap) | |||
| 983 | msi_idx = -((int)s->intrq.abs_id + 1); | 998 | msi_idx = -((int)s->intrq.abs_id + 1); |
| 984 | } | 999 | } |
| 985 | 1000 | ||
| 1001 | /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, | ||
| 1002 | * don't forget to update the following which need to be | ||
| 1003 | * synchronized to and changes here. | ||
| 1004 | * | ||
| 1005 | * 1. The calculations of MAX_INGQ in cxgb4.h. | ||
| 1006 | * | ||
| 1007 | * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs | ||
| 1008 | * to accommodate any new/deleted Ingress Queues | ||
| 1009 | * which need MSI-X Vectors. | ||
| 1010 | * | ||
| 1011 | * 3. Update sge_qinfo_show() to include information on the | ||
| 1012 | * new/deleted queues. | ||
| 1013 | */ | ||
| 986 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], | 1014 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], |
| 987 | msi_idx, NULL, fwevtq_handler); | 1015 | msi_idx, NULL, fwevtq_handler); |
| 988 | if (err) { | 1016 | if (err) { |
| @@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap) | |||
| 4244 | 4272 | ||
| 4245 | static void cxgb_down(struct adapter *adapter) | 4273 | static void cxgb_down(struct adapter *adapter) |
| 4246 | { | 4274 | { |
| 4247 | t4_intr_disable(adapter); | ||
| 4248 | cancel_work_sync(&adapter->tid_release_task); | 4275 | cancel_work_sync(&adapter->tid_release_task); |
| 4249 | cancel_work_sync(&adapter->db_full_task); | 4276 | cancel_work_sync(&adapter->db_full_task); |
| 4250 | cancel_work_sync(&adapter->db_drop_task); | 4277 | cancel_work_sync(&adapter->db_drop_task); |
| 4251 | adapter->tid_release_task_busy = false; | 4278 | adapter->tid_release_task_busy = false; |
| 4252 | adapter->tid_release_head = NULL; | 4279 | adapter->tid_release_head = NULL; |
| 4253 | 4280 | ||
| 4254 | if (adapter->flags & USING_MSIX) { | ||
| 4255 | free_msix_queue_irqs(adapter); | ||
| 4256 | free_irq(adapter->msix_info[0].vec, adapter); | ||
| 4257 | } else | ||
| 4258 | free_irq(adapter->pdev->irq, adapter); | ||
| 4259 | quiesce_rx(adapter); | ||
| 4260 | t4_sge_stop(adapter); | 4281 | t4_sge_stop(adapter); |
| 4261 | t4_free_sge_resources(adapter); | 4282 | t4_free_sge_resources(adapter); |
| 4262 | adapter->flags &= ~FULL_INIT_DONE; | 4283 | adapter->flags &= ~FULL_INIT_DONE; |
| @@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) | |||
| 4733 | if (ret < 0) | 4754 | if (ret < 0) |
| 4734 | return ret; | 4755 | return ret; |
| 4735 | 4756 | ||
| 4736 | ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, | 4757 | ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, |
| 4737 | 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); | 4758 | MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, |
| 4759 | FW_CMD_CAP_PF); | ||
| 4738 | if (ret < 0) | 4760 | if (ret < 0) |
| 4739 | return ret; | 4761 | return ret; |
| 4740 | 4762 | ||
| @@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap) | |||
| 5088 | enum dev_state state; | 5110 | enum dev_state state; |
| 5089 | u32 params[7], val[7]; | 5111 | u32 params[7], val[7]; |
| 5090 | struct fw_caps_config_cmd caps_cmd; | 5112 | struct fw_caps_config_cmd caps_cmd; |
| 5091 | struct fw_devlog_cmd devlog_cmd; | ||
| 5092 | u32 devlog_meminfo; | ||
| 5093 | int reset = 1; | 5113 | int reset = 1; |
| 5094 | 5114 | ||
| 5115 | /* Grab Firmware Device Log parameters as early as possible so we have | ||
| 5116 | * access to it for debugging, etc. | ||
| 5117 | */ | ||
| 5118 | ret = t4_init_devlog_params(adap); | ||
| 5119 | if (ret < 0) | ||
| 5120 | return ret; | ||
| 5121 | |||
| 5095 | /* Contact FW, advertising Master capability */ | 5122 | /* Contact FW, advertising Master capability */ |
| 5096 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); | 5123 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); |
| 5097 | if (ret < 0) { | 5124 | if (ret < 0) { |
| @@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap) | |||
| 5169 | if (ret < 0) | 5196 | if (ret < 0) |
| 5170 | goto bye; | 5197 | goto bye; |
| 5171 | 5198 | ||
| 5172 | /* Read firmware device log parameters. We really need to find a way | ||
| 5173 | * to get these parameters initialized with some default values (which | ||
| 5174 | * are likely to be correct) for the case where we either don't | ||
| 5175 | * attache to the firmware or it's crashed when we probe the adapter. | ||
| 5176 | * That way we'll still be able to perform early firmware startup | ||
| 5177 | * debugging ... If the request to get the Firmware's Device Log | ||
| 5178 | * parameters fails, we'll live so we don't make that a fatal error. | ||
| 5179 | */ | ||
| 5180 | memset(&devlog_cmd, 0, sizeof(devlog_cmd)); | ||
| 5181 | devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | | ||
| 5182 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | ||
| 5183 | devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); | ||
| 5184 | ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), | ||
| 5185 | &devlog_cmd); | ||
| 5186 | if (ret == 0) { | ||
| 5187 | devlog_meminfo = | ||
| 5188 | ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); | ||
| 5189 | adap->params.devlog.memtype = | ||
| 5190 | FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); | ||
| 5191 | adap->params.devlog.start = | ||
| 5192 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; | ||
| 5193 | adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog); | ||
| 5194 | } | ||
| 5195 | |||
| 5196 | /* | 5199 | /* |
| 5197 | * Find out what ports are available to us. Note that we need to do | 5200 | * Find out what ports are available to us. Note that we need to do |
| 5198 | * this before calling adap_init0_no_config() since it needs nports | 5201 | * this before calling adap_init0_no_config() since it needs nports |
| @@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap) | |||
| 5293 | adap->tids.nftids = val[4] - val[3] + 1; | 5296 | adap->tids.nftids = val[4] - val[3] + 1; |
| 5294 | adap->sge.ingr_start = val[5]; | 5297 | adap->sge.ingr_start = val[5]; |
| 5295 | 5298 | ||
| 5299 | /* qids (ingress/egress) returned from firmware can be anywhere | ||
| 5300 | * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. | ||
| 5301 | * Hence driver needs to allocate memory for this range to | ||
| 5302 | * store the queue info. Get the highest IQFLINT/EQ index returned | ||
| 5303 | * in FW_EQ_*_CMD.alloc command. | ||
| 5304 | */ | ||
| 5305 | params[0] = FW_PARAM_PFVF(EQ_END); | ||
| 5306 | params[1] = FW_PARAM_PFVF(IQFLINT_END); | ||
| 5307 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); | ||
| 5308 | if (ret < 0) | ||
| 5309 | goto bye; | ||
| 5310 | adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; | ||
| 5311 | adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; | ||
| 5312 | |||
| 5313 | adap->sge.egr_map = kcalloc(adap->sge.egr_sz, | ||
| 5314 | sizeof(*adap->sge.egr_map), GFP_KERNEL); | ||
| 5315 | if (!adap->sge.egr_map) { | ||
| 5316 | ret = -ENOMEM; | ||
| 5317 | goto bye; | ||
| 5318 | } | ||
| 5319 | |||
| 5320 | adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, | ||
| 5321 | sizeof(*adap->sge.ingr_map), GFP_KERNEL); | ||
| 5322 | if (!adap->sge.ingr_map) { | ||
| 5323 | ret = -ENOMEM; | ||
| 5324 | goto bye; | ||
| 5325 | } | ||
| 5326 | |||
| 5327 | /* Allocate the memory for the vaious egress queue bitmaps | ||
| 5328 | * ie starving_fl and txq_maperr. | ||
| 5329 | */ | ||
| 5330 | adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | ||
| 5331 | sizeof(long), GFP_KERNEL); | ||
| 5332 | if (!adap->sge.starving_fl) { | ||
| 5333 | ret = -ENOMEM; | ||
| 5334 | goto bye; | ||
| 5335 | } | ||
| 5336 | |||
| 5337 | adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | ||
| 5338 | sizeof(long), GFP_KERNEL); | ||
| 5339 | if (!adap->sge.txq_maperr) { | ||
| 5340 | ret = -ENOMEM; | ||
| 5341 | goto bye; | ||
| 5342 | } | ||
| 5343 | |||
| 5296 | params[0] = FW_PARAM_PFVF(CLIP_START); | 5344 | params[0] = FW_PARAM_PFVF(CLIP_START); |
| 5297 | params[1] = FW_PARAM_PFVF(CLIP_END); | 5345 | params[1] = FW_PARAM_PFVF(CLIP_END); |
| 5298 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); | 5346 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); |
| @@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap) | |||
| 5501 | * happened to HW/FW, stop issuing commands. | 5549 | * happened to HW/FW, stop issuing commands. |
| 5502 | */ | 5550 | */ |
| 5503 | bye: | 5551 | bye: |
| 5552 | kfree(adap->sge.egr_map); | ||
| 5553 | kfree(adap->sge.ingr_map); | ||
| 5554 | kfree(adap->sge.starving_fl); | ||
| 5555 | kfree(adap->sge.txq_maperr); | ||
| 5504 | if (ret != -ETIMEDOUT && ret != -EIO) | 5556 | if (ret != -ETIMEDOUT && ret != -EIO) |
| 5505 | t4_fw_bye(adap, adap->mbox); | 5557 | t4_fw_bye(adap, adap->mbox); |
| 5506 | return ret; | 5558 | return ret; |
| @@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, | |||
| 5528 | netif_carrier_off(dev); | 5580 | netif_carrier_off(dev); |
| 5529 | } | 5581 | } |
| 5530 | spin_unlock(&adap->stats_lock); | 5582 | spin_unlock(&adap->stats_lock); |
| 5583 | disable_interrupts(adap); | ||
| 5531 | if (adap->flags & FULL_INIT_DONE) | 5584 | if (adap->flags & FULL_INIT_DONE) |
| 5532 | cxgb_down(adap); | 5585 | cxgb_down(adap); |
| 5533 | rtnl_unlock(); | 5586 | rtnl_unlock(); |
| @@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter) | |||
| 5912 | 5965 | ||
| 5913 | t4_free_mem(adapter->l2t); | 5966 | t4_free_mem(adapter->l2t); |
| 5914 | t4_free_mem(adapter->tids.tid_tab); | 5967 | t4_free_mem(adapter->tids.tid_tab); |
| 5968 | kfree(adapter->sge.egr_map); | ||
| 5969 | kfree(adapter->sge.ingr_map); | ||
| 5970 | kfree(adapter->sge.starving_fl); | ||
| 5971 | kfree(adapter->sge.txq_maperr); | ||
| 5915 | disable_msi(adapter); | 5972 | disable_msi(adapter); |
| 5916 | 5973 | ||
| 5917 | for_each_port(adapter, i) | 5974 | for_each_port(adapter, i) |
| @@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev) | |||
| 6237 | if (is_offload(adapter)) | 6294 | if (is_offload(adapter)) |
| 6238 | detach_ulds(adapter); | 6295 | detach_ulds(adapter); |
| 6239 | 6296 | ||
| 6297 | disable_interrupts(adapter); | ||
| 6298 | |||
| 6240 | for_each_port(adapter, i) | 6299 | for_each_port(adapter, i) |
| 6241 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) | 6300 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
| 6242 | unregister_netdev(adapter->port[i]); | 6301 | unregister_netdev(adapter->port[i]); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b4b9f6048fe7..b688b32c21fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
| 2171 | struct adapter *adap = (struct adapter *)data; | 2171 | struct adapter *adap = (struct adapter *)data; |
| 2172 | struct sge *s = &adap->sge; | 2172 | struct sge *s = &adap->sge; |
| 2173 | 2173 | ||
| 2174 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) | 2174 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
| 2175 | for (m = s->starving_fl[i]; m; m &= m - 1) { | 2175 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
| 2176 | struct sge_eth_rxq *rxq; | 2176 | struct sge_eth_rxq *rxq; |
| 2177 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | 2177 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; |
| @@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data) | |||
| 2259 | struct adapter *adap = (struct adapter *)data; | 2259 | struct adapter *adap = (struct adapter *)data; |
| 2260 | struct sge *s = &adap->sge; | 2260 | struct sge *s = &adap->sge; |
| 2261 | 2261 | ||
| 2262 | for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) | 2262 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
| 2263 | for (m = s->txq_maperr[i]; m; m &= m - 1) { | 2263 | for (m = s->txq_maperr[i]; m; m &= m - 1) { |
| 2264 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | 2264 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; |
| 2265 | struct sge_ofld_txq *txq = s->egr_map[id]; | 2265 | struct sge_ofld_txq *txq = s->egr_map[id]; |
| @@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap) | |||
| 2741 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | 2741 | free_rspq_fl(adap, &adap->sge.intrq, NULL); |
| 2742 | 2742 | ||
| 2743 | /* clear the reverse egress queue map */ | 2743 | /* clear the reverse egress queue map */ |
| 2744 | memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); | 2744 | memset(adap->sge.egr_map, 0, |
| 2745 | adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); | ||
| 2745 | } | 2746 | } |
| 2746 | 2747 | ||
| 2747 | void t4_sge_start(struct adapter *adap) | 2748 | void t4_sge_start(struct adapter *adap) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1abdfa123c6c..ee394dc68303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -4459,6 +4459,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, | |||
| 4459 | } | 4459 | } |
| 4460 | 4460 | ||
| 4461 | /** | 4461 | /** |
| 4462 | * t4_init_devlog_params - initialize adapter->params.devlog | ||
| 4463 | * @adap: the adapter | ||
| 4464 | * | ||
| 4465 | * Initialize various fields of the adapter's Firmware Device Log | ||
| 4466 | * Parameters structure. | ||
| 4467 | */ | ||
| 4468 | int t4_init_devlog_params(struct adapter *adap) | ||
| 4469 | { | ||
| 4470 | struct devlog_params *dparams = &adap->params.devlog; | ||
| 4471 | u32 pf_dparams; | ||
| 4472 | unsigned int devlog_meminfo; | ||
| 4473 | struct fw_devlog_cmd devlog_cmd; | ||
| 4474 | int ret; | ||
| 4475 | |||
| 4476 | /* If we're dealing with newer firmware, the Device Log Paramerters | ||
| 4477 | * are stored in a designated register which allows us to access the | ||
| 4478 | * Device Log even if we can't talk to the firmware. | ||
| 4479 | */ | ||
| 4480 | pf_dparams = | ||
| 4481 | t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG)); | ||
| 4482 | if (pf_dparams) { | ||
| 4483 | unsigned int nentries, nentries128; | ||
| 4484 | |||
| 4485 | dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams); | ||
| 4486 | dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4; | ||
| 4487 | |||
| 4488 | nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams); | ||
| 4489 | nentries = (nentries128 + 1) * 128; | ||
| 4490 | dparams->size = nentries * sizeof(struct fw_devlog_e); | ||
| 4491 | |||
| 4492 | return 0; | ||
| 4493 | } | ||
| 4494 | |||
| 4495 | /* Otherwise, ask the firmware for it's Device Log Parameters. | ||
| 4496 | */ | ||
| 4497 | memset(&devlog_cmd, 0, sizeof(devlog_cmd)); | ||
| 4498 | devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | | ||
| 4499 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | ||
| 4500 | devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); | ||
| 4501 | ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), | ||
| 4502 | &devlog_cmd); | ||
| 4503 | if (ret) | ||
| 4504 | return ret; | ||
| 4505 | |||
| 4506 | devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); | ||
| 4507 | dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); | ||
| 4508 | dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; | ||
| 4509 | dparams->size = ntohl(devlog_cmd.memsize_devlog); | ||
| 4510 | |||
| 4511 | return 0; | ||
| 4512 | } | ||
| 4513 | |||
| 4514 | /** | ||
| 4462 | * t4_init_sge_params - initialize adap->params.sge | 4515 | * t4_init_sge_params - initialize adap->params.sge |
| 4463 | * @adapter: the adapter | 4516 | * @adapter: the adapter |
| 4464 | * | 4517 | * |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 231a725f6d5d..326674b19983 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
| @@ -63,6 +63,8 @@ | |||
| 63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | 63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) |
| 64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | 64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) |
| 65 | 65 | ||
| 66 | #define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
| 67 | |||
| 66 | #define SGE_PF_KDOORBELL_A 0x0 | 68 | #define SGE_PF_KDOORBELL_A 0x0 |
| 67 | 69 | ||
| 68 | #define QID_S 15 | 70 | #define QID_S 15 |
| @@ -707,6 +709,7 @@ | |||
| 707 | #define PFNUM_V(x) ((x) << PFNUM_S) | 709 | #define PFNUM_V(x) ((x) << PFNUM_S) |
| 708 | 710 | ||
| 709 | #define PCIE_FW_A 0x30b8 | 711 | #define PCIE_FW_A 0x30b8 |
| 712 | #define PCIE_FW_PF_A 0x30bc | ||
| 710 | 713 | ||
| 711 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 | 714 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 |
| 712 | 715 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 9b353a88cbda..a4a19e0ec7f5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
| @@ -101,7 +101,7 @@ enum fw_wr_opcodes { | |||
| 101 | FW_RI_BIND_MW_WR = 0x18, | 101 | FW_RI_BIND_MW_WR = 0x18, |
| 102 | FW_RI_FR_NSMR_WR = 0x19, | 102 | FW_RI_FR_NSMR_WR = 0x19, |
| 103 | FW_RI_INV_LSTAG_WR = 0x1a, | 103 | FW_RI_INV_LSTAG_WR = 0x1a, |
| 104 | FW_LASTC2E_WR = 0x40 | 104 | FW_LASTC2E_WR = 0x70 |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | struct fw_wr_hdr { | 107 | struct fw_wr_hdr { |
| @@ -993,6 +993,7 @@ enum fw_memtype_cf { | |||
| 993 | FW_MEMTYPE_CF_EXTMEM = 0x2, | 993 | FW_MEMTYPE_CF_EXTMEM = 0x2, |
| 994 | FW_MEMTYPE_CF_FLASH = 0x4, | 994 | FW_MEMTYPE_CF_FLASH = 0x4, |
| 995 | FW_MEMTYPE_CF_INTERNAL = 0x5, | 995 | FW_MEMTYPE_CF_INTERNAL = 0x5, |
| 996 | FW_MEMTYPE_CF_EXTMEM1 = 0x6, | ||
| 996 | }; | 997 | }; |
| 997 | 998 | ||
| 998 | struct fw_caps_config_cmd { | 999 | struct fw_caps_config_cmd { |
| @@ -1035,6 +1036,7 @@ enum fw_params_mnem { | |||
| 1035 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ | 1036 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ |
| 1036 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ | 1037 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ |
| 1037 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ | 1038 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ |
| 1039 | FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ | ||
| 1038 | FW_PARAMS_MNEM_LAST | 1040 | FW_PARAMS_MNEM_LAST |
| 1039 | }; | 1041 | }; |
| 1040 | 1042 | ||
| @@ -3102,7 +3104,8 @@ enum fw_devlog_facility { | |||
| 3102 | FW_DEVLOG_FACILITY_FCOE = 0x2E, | 3104 | FW_DEVLOG_FACILITY_FCOE = 0x2E, |
| 3103 | FW_DEVLOG_FACILITY_FOISCSI = 0x30, | 3105 | FW_DEVLOG_FACILITY_FOISCSI = 0x30, |
| 3104 | FW_DEVLOG_FACILITY_FOFCOE = 0x32, | 3106 | FW_DEVLOG_FACILITY_FOFCOE = 0x32, |
| 3105 | FW_DEVLOG_FACILITY_MAX = 0x32, | 3107 | FW_DEVLOG_FACILITY_CHNET = 0x34, |
| 3108 | FW_DEVLOG_FACILITY_MAX = 0x34, | ||
| 3106 | }; | 3109 | }; |
| 3107 | 3110 | ||
| 3108 | /* log message format */ | 3111 | /* log message format */ |
| @@ -3139,4 +3142,36 @@ struct fw_devlog_cmd { | |||
| 3139 | (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ | 3142 | (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ |
| 3140 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) | 3143 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) |
| 3141 | 3144 | ||
| 3145 | /* P C I E F W P F 7 R E G I S T E R */ | ||
| 3146 | |||
| 3147 | /* PF7 stores the Firmware Device Log parameters which allows Host Drivers to | ||
| 3148 | * access the "devlog" which needing to contact firmware. The encoding is | ||
| 3149 | * mostly the same as that returned by the DEVLOG command except for the size | ||
| 3150 | * which is encoded as the number of entries in multiples-1 of 128 here rather | ||
| 3151 | * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 | ||
| 3152 | * and 15 means 2048. This of course in turn constrains the allowed values | ||
| 3153 | * for the devlog size ... | ||
| 3154 | */ | ||
| 3155 | #define PCIE_FW_PF_DEVLOG 7 | ||
| 3156 | |||
| 3157 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28 | ||
| 3158 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf | ||
| 3159 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \ | ||
| 3160 | ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S) | ||
| 3161 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \ | ||
| 3162 | (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \ | ||
| 3163 | PCIE_FW_PF_DEVLOG_NENTRIES128_M) | ||
| 3164 | |||
| 3165 | #define PCIE_FW_PF_DEVLOG_ADDR16_S 4 | ||
| 3166 | #define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff | ||
| 3167 | #define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S) | ||
| 3168 | #define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \ | ||
| 3169 | (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M) | ||
| 3170 | |||
| 3171 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0 | ||
| 3172 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf | ||
| 3173 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S) | ||
| 3174 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ | ||
| 3175 | (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) | ||
| 3176 | |||
| 3142 | #endif /* _T4FW_INTERFACE_H_ */ | 3177 | #endif /* _T4FW_INTERFACE_H_ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index e2bd3f747858..b9d1cbac0eee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | |||
| @@ -36,13 +36,13 @@ | |||
| 36 | #define __T4FW_VERSION_H__ | 36 | #define __T4FW_VERSION_H__ |
| 37 | 37 | ||
| 38 | #define T4FW_VERSION_MAJOR 0x01 | 38 | #define T4FW_VERSION_MAJOR 0x01 |
| 39 | #define T4FW_VERSION_MINOR 0x0C | 39 | #define T4FW_VERSION_MINOR 0x0D |
| 40 | #define T4FW_VERSION_MICRO 0x19 | 40 | #define T4FW_VERSION_MICRO 0x20 |
| 41 | #define T4FW_VERSION_BUILD 0x00 | 41 | #define T4FW_VERSION_BUILD 0x00 |
| 42 | 42 | ||
| 43 | #define T5FW_VERSION_MAJOR 0x01 | 43 | #define T5FW_VERSION_MAJOR 0x01 |
| 44 | #define T5FW_VERSION_MINOR 0x0C | 44 | #define T5FW_VERSION_MINOR 0x0D |
| 45 | #define T5FW_VERSION_MICRO 0x19 | 45 | #define T5FW_VERSION_MICRO 0x20 |
| 46 | #define T5FW_VERSION_BUILD 0x00 | 46 | #define T5FW_VERSION_BUILD 0x00 |
| 47 | 47 | ||
| 48 | #endif | 48 | #endif |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 0545f0de1c52..e0d711071afb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
| @@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |||
| 1004 | ? (tq->pidx - 1) | 1004 | ? (tq->pidx - 1) |
| 1005 | : (tq->size - 1)); | 1005 | : (tq->size - 1)); |
| 1006 | __be64 *src = (__be64 *)&tq->desc[index]; | 1006 | __be64 *src = (__be64 *)&tq->desc[index]; |
| 1007 | __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + | 1007 | __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + |
| 1008 | SGE_UDB_WCDOORBELL); | 1008 | SGE_UDB_WCDOORBELL); |
| 1009 | unsigned int count = EQ_UNIT / sizeof(__be64); | 1009 | unsigned int count = EQ_UNIT / sizeof(__be64); |
| 1010 | 1010 | ||
| @@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |||
| 1018 | * DMA. | 1018 | * DMA. |
| 1019 | */ | 1019 | */ |
| 1020 | while (count) { | 1020 | while (count) { |
| 1021 | writeq(*src, dst); | 1021 | /* the (__force u64) is because the compiler |
| 1022 | * doesn't understand the endian swizzling | ||
| 1023 | * going on | ||
| 1024 | */ | ||
| 1025 | writeq((__force u64)*src, dst); | ||
| 1022 | src++; | 1026 | src++; |
| 1023 | dst++; | 1027 | dst++; |
| 1024 | count--; | 1028 | count--; |
| @@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1252 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); | 1256 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); |
| 1253 | wr = (void *)&txq->q.desc[txq->q.pidx]; | 1257 | wr = (void *)&txq->q.desc[txq->q.pidx]; |
| 1254 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); | 1258 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); |
| 1255 | wr->r3[0] = cpu_to_be64(0); | 1259 | wr->r3[0] = cpu_to_be32(0); |
| 1256 | wr->r3[1] = cpu_to_be64(0); | 1260 | wr->r3[1] = cpu_to_be32(0); |
| 1257 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); | 1261 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); |
| 1258 | end = (u64 *)wr + flits; | 1262 | end = (u64 *)wr + flits; |
| 1259 | 1263 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 1b5506df35b1..280b4a215849 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
| @@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
| 210 | 210 | ||
| 211 | if (rpl) { | 211 | if (rpl) { |
| 212 | /* request bit in high-order BE word */ | 212 | /* request bit in high-order BE word */ |
| 213 | WARN_ON((be32_to_cpu(*(const u32 *)cmd) | 213 | WARN_ON((be32_to_cpu(*(const __be32 *)cmd) |
| 214 | & FW_CMD_REQUEST_F) == 0); | 214 | & FW_CMD_REQUEST_F) == 0); |
| 215 | get_mbox_rpl(adapter, rpl, size, mbox_data); | 215 | get_mbox_rpl(adapter, rpl, size, mbox_data); |
| 216 | WARN_ON((be32_to_cpu(*(u32 *)rpl) | 216 | WARN_ON((be32_to_cpu(*(__be32 *)rpl) |
| 217 | & FW_CMD_REQUEST_F) != 0); | 217 | & FW_CMD_REQUEST_F) != 0); |
| 218 | } | 218 | } |
| 219 | t4_write_reg(adapter, mbox_ctl, | 219 | t4_write_reg(adapter, mbox_ctl, |
| @@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter, | |||
| 484 | * o The BAR2 Queue ID. | 484 | * o The BAR2 Queue ID. |
| 485 | * o The BAR2 Queue ID Offset into the BAR2 page. | 485 | * o The BAR2 Queue ID Offset into the BAR2 page. |
| 486 | */ | 486 | */ |
| 487 | bar2_page_offset = ((qid >> qpp_shift) << page_shift); | 487 | bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); |
| 488 | bar2_qid = qid & qpp_mask; | 488 | bar2_qid = qid & qpp_mask; |
| 489 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; | 489 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; |
| 490 | 490 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..27b9fe99a9bd 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -354,6 +354,7 @@ struct be_vf_cfg { | |||
| 354 | u16 vlan_tag; | 354 | u16 vlan_tag; |
| 355 | u32 tx_rate; | 355 | u32 tx_rate; |
| 356 | u32 plink_tracking; | 356 | u32 plink_tracking; |
| 357 | u32 privileges; | ||
| 357 | }; | 358 | }; |
| 358 | 359 | ||
| 359 | enum vf_state { | 360 | enum vf_state { |
| @@ -423,6 +424,7 @@ struct be_adapter { | |||
| 423 | 424 | ||
| 424 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ | 425 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ |
| 425 | u8 __iomem *db; /* Door Bell */ | 426 | u8 __iomem *db; /* Door Bell */ |
| 427 | u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ | ||
| 426 | 428 | ||
| 427 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ | 429 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ |
| 428 | struct be_dma_mem mbox_mem; | 430 | struct be_dma_mem mbox_mem; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..7f05f309e935 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, | |||
| 1902 | { | 1902 | { |
| 1903 | int num_eqs, i = 0; | 1903 | int num_eqs, i = 0; |
| 1904 | 1904 | ||
| 1905 | if (lancer_chip(adapter) && num > 8) { | 1905 | while (num) { |
| 1906 | while (num) { | 1906 | num_eqs = min(num, 8); |
| 1907 | num_eqs = min(num, 8); | 1907 | __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); |
| 1908 | __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); | 1908 | i += num_eqs; |
| 1909 | i += num_eqs; | 1909 | num -= num_eqs; |
| 1910 | num -= num_eqs; | ||
| 1911 | } | ||
| 1912 | } else { | ||
| 1913 | __be_cmd_modify_eqd(adapter, set_eqd, num); | ||
| 1914 | } | 1910 | } |
| 1915 | 1911 | ||
| 1916 | return 0; | 1912 | return 0; |
| @@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, | |||
| 1918 | 1914 | ||
| 1919 | /* Uses sycnhronous mcc */ | 1915 | /* Uses sycnhronous mcc */ |
| 1920 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 1916 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
| 1921 | u32 num) | 1917 | u32 num, u32 domain) |
| 1922 | { | 1918 | { |
| 1923 | struct be_mcc_wrb *wrb; | 1919 | struct be_mcc_wrb *wrb; |
| 1924 | struct be_cmd_req_vlan_config *req; | 1920 | struct be_cmd_req_vlan_config *req; |
| @@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | |||
| 1936 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 1932 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
| 1937 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), | 1933 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), |
| 1938 | wrb, NULL); | 1934 | wrb, NULL); |
| 1935 | req->hdr.domain = domain; | ||
| 1939 | 1936 | ||
| 1940 | req->interface_id = if_id; | 1937 | req->interface_id = if_id; |
| 1941 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; | 1938 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e42a3..a7634a3f052a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
| @@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, | |||
| 2256 | int be_cmd_get_fw_ver(struct be_adapter *adapter); | 2256 | int be_cmd_get_fw_ver(struct be_adapter *adapter); |
| 2257 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); | 2257 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); |
| 2258 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 2258 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
| 2259 | u32 num); | 2259 | u32 num, u32 domain); |
| 2260 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); | 2260 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); |
| 2261 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); | 2261 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); |
| 2262 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); | 2262 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..e6b790f0d9dc 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter) | |||
| 1171 | for_each_set_bit(i, adapter->vids, VLAN_N_VID) | 1171 | for_each_set_bit(i, adapter->vids, VLAN_N_VID) |
| 1172 | vids[num++] = cpu_to_le16(i); | 1172 | vids[num++] = cpu_to_le16(i); |
| 1173 | 1173 | ||
| 1174 | status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); | 1174 | status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); |
| 1175 | if (status) { | 1175 | if (status) { |
| 1176 | dev_err(dev, "Setting HW VLAN filtering failed\n"); | 1176 | dev_err(dev, "Setting HW VLAN filtering failed\n"); |
| 1177 | /* Set to VLAN promisc mode as setting VLAN filter failed */ | 1177 | /* Set to VLAN promisc mode as setting VLAN filter failed */ |
| @@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, | |||
| 1380 | return 0; | 1380 | return 0; |
| 1381 | } | 1381 | } |
| 1382 | 1382 | ||
| 1383 | static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) | ||
| 1384 | { | ||
| 1385 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
| 1386 | u16 vids[BE_NUM_VLANS_SUPPORTED]; | ||
| 1387 | int vf_if_id = vf_cfg->if_handle; | ||
| 1388 | int status; | ||
| 1389 | |||
| 1390 | /* Enable Transparent VLAN Tagging */ | ||
| 1391 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); | ||
| 1392 | if (status) | ||
| 1393 | return status; | ||
| 1394 | |||
| 1395 | /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ | ||
| 1396 | vids[0] = 0; | ||
| 1397 | status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); | ||
| 1398 | if (!status) | ||
| 1399 | dev_info(&adapter->pdev->dev, | ||
| 1400 | "Cleared guest VLANs on VF%d", vf); | ||
| 1401 | |||
| 1402 | /* After TVT is enabled, disallow VFs to program VLAN filters */ | ||
| 1403 | if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { | ||
| 1404 | status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & | ||
| 1405 | ~BE_PRIV_FILTMGMT, vf + 1); | ||
| 1406 | if (!status) | ||
| 1407 | vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; | ||
| 1408 | } | ||
| 1409 | return 0; | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) | ||
| 1413 | { | ||
| 1414 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
| 1415 | struct device *dev = &adapter->pdev->dev; | ||
| 1416 | int status; | ||
| 1417 | |||
| 1418 | /* Reset Transparent VLAN Tagging. */ | ||
| 1419 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, | ||
| 1420 | vf_cfg->if_handle, 0); | ||
| 1421 | if (status) | ||
| 1422 | return status; | ||
| 1423 | |||
| 1424 | /* Allow VFs to program VLAN filtering */ | ||
| 1425 | if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { | ||
| 1426 | status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | | ||
| 1427 | BE_PRIV_FILTMGMT, vf + 1); | ||
| 1428 | if (!status) { | ||
| 1429 | vf_cfg->privileges |= BE_PRIV_FILTMGMT; | ||
| 1430 | dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); | ||
| 1431 | } | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | dev_info(dev, | ||
| 1435 | "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); | ||
| 1436 | return 0; | ||
| 1437 | } | ||
| 1438 | |||
| 1383 | static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | 1439 | static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) |
| 1384 | { | 1440 | { |
| 1385 | struct be_adapter *adapter = netdev_priv(netdev); | 1441 | struct be_adapter *adapter = netdev_priv(netdev); |
| 1386 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | 1442 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; |
| 1387 | int status = 0; | 1443 | int status; |
| 1388 | 1444 | ||
| 1389 | if (!sriov_enabled(adapter)) | 1445 | if (!sriov_enabled(adapter)) |
| 1390 | return -EPERM; | 1446 | return -EPERM; |
| @@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
| 1394 | 1450 | ||
| 1395 | if (vlan || qos) { | 1451 | if (vlan || qos) { |
| 1396 | vlan |= qos << VLAN_PRIO_SHIFT; | 1452 | vlan |= qos << VLAN_PRIO_SHIFT; |
| 1397 | if (vf_cfg->vlan_tag != vlan) | 1453 | status = be_set_vf_tvt(adapter, vf, vlan); |
| 1398 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | ||
| 1399 | vf_cfg->if_handle, 0); | ||
| 1400 | } else { | 1454 | } else { |
| 1401 | /* Reset Transparent Vlan Tagging. */ | 1455 | status = be_clear_vf_tvt(adapter, vf); |
| 1402 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, | ||
| 1403 | vf + 1, vf_cfg->if_handle, 0); | ||
| 1404 | } | 1456 | } |
| 1405 | 1457 | ||
| 1406 | if (status) { | 1458 | if (status) { |
| 1407 | dev_err(&adapter->pdev->dev, | 1459 | dev_err(&adapter->pdev->dev, |
| 1408 | "VLAN %d config on VF %d failed : %#x\n", vlan, | 1460 | "VLAN %d config on VF %d failed : %#x\n", vlan, vf, |
| 1409 | vf, status); | 1461 | status); |
| 1410 | return be_cmd_status(status); | 1462 | return be_cmd_status(status); |
| 1411 | } | 1463 | } |
| 1412 | 1464 | ||
| 1413 | vf_cfg->vlan_tag = vlan; | 1465 | vf_cfg->vlan_tag = vlan; |
| 1414 | |||
| 1415 | return 0; | 1466 | return 0; |
| 1416 | } | 1467 | } |
| 1417 | 1468 | ||
| @@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter) | |||
| 2772 | } | 2823 | } |
| 2773 | } | 2824 | } |
| 2774 | } else { | 2825 | } else { |
| 2775 | pci_read_config_dword(adapter->pdev, | 2826 | ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); |
| 2776 | PCICFG_UE_STATUS_LOW, &ue_lo); | 2827 | ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); |
| 2777 | pci_read_config_dword(adapter->pdev, | 2828 | ue_lo_mask = ioread32(adapter->pcicfg + |
| 2778 | PCICFG_UE_STATUS_HIGH, &ue_hi); | 2829 | PCICFG_UE_STATUS_LOW_MASK); |
| 2779 | pci_read_config_dword(adapter->pdev, | 2830 | ue_hi_mask = ioread32(adapter->pcicfg + |
| 2780 | PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); | 2831 | PCICFG_UE_STATUS_HI_MASK); |
| 2781 | pci_read_config_dword(adapter->pdev, | ||
| 2782 | PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); | ||
| 2783 | 2832 | ||
| 2784 | ue_lo = (ue_lo & ~ue_lo_mask); | 2833 | ue_lo = (ue_lo & ~ue_lo_mask); |
| 2785 | ue_hi = (ue_hi & ~ue_hi_mask); | 2834 | ue_hi = (ue_hi & ~ue_hi_mask); |
| @@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, | |||
| 3339 | u32 cap_flags, u32 vf) | 3388 | u32 cap_flags, u32 vf) |
| 3340 | { | 3389 | { |
| 3341 | u32 en_flags; | 3390 | u32 en_flags; |
| 3342 | int status; | ||
| 3343 | 3391 | ||
| 3344 | en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | | 3392 | en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | |
| 3345 | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | | 3393 | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | |
| @@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, | |||
| 3347 | 3395 | ||
| 3348 | en_flags &= cap_flags; | 3396 | en_flags &= cap_flags; |
| 3349 | 3397 | ||
| 3350 | status = be_cmd_if_create(adapter, cap_flags, en_flags, | 3398 | return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); |
| 3351 | if_handle, vf); | ||
| 3352 | |||
| 3353 | return status; | ||
| 3354 | } | 3399 | } |
| 3355 | 3400 | ||
| 3356 | static int be_vfs_if_create(struct be_adapter *adapter) | 3401 | static int be_vfs_if_create(struct be_adapter *adapter) |
| @@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter) | |||
| 3368 | if (!BE3_chip(adapter)) { | 3413 | if (!BE3_chip(adapter)) { |
| 3369 | status = be_cmd_get_profile_config(adapter, &res, | 3414 | status = be_cmd_get_profile_config(adapter, &res, |
| 3370 | vf + 1); | 3415 | vf + 1); |
| 3371 | if (!status) | 3416 | if (!status) { |
| 3372 | cap_flags = res.if_cap_flags; | 3417 | cap_flags = res.if_cap_flags; |
| 3418 | /* Prevent VFs from enabling VLAN promiscuous | ||
| 3419 | * mode | ||
| 3420 | */ | ||
| 3421 | cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; | ||
| 3422 | } | ||
| 3373 | } | 3423 | } |
| 3374 | 3424 | ||
| 3375 | status = be_if_create(adapter, &vf_cfg->if_handle, | 3425 | status = be_if_create(adapter, &vf_cfg->if_handle, |
| @@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
| 3403 | struct device *dev = &adapter->pdev->dev; | 3453 | struct device *dev = &adapter->pdev->dev; |
| 3404 | struct be_vf_cfg *vf_cfg; | 3454 | struct be_vf_cfg *vf_cfg; |
| 3405 | int status, old_vfs, vf; | 3455 | int status, old_vfs, vf; |
| 3406 | u32 privileges; | ||
| 3407 | 3456 | ||
| 3408 | old_vfs = pci_num_vf(adapter->pdev); | 3457 | old_vfs = pci_num_vf(adapter->pdev); |
| 3409 | 3458 | ||
| @@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
| 3433 | 3482 | ||
| 3434 | for_all_vfs(adapter, vf_cfg, vf) { | 3483 | for_all_vfs(adapter, vf_cfg, vf) { |
| 3435 | /* Allow VFs to programs MAC/VLAN filters */ | 3484 | /* Allow VFs to programs MAC/VLAN filters */ |
| 3436 | status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); | 3485 | status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, |
| 3437 | if (!status && !(privileges & BE_PRIV_FILTMGMT)) { | 3486 | vf + 1); |
| 3487 | if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { | ||
| 3438 | status = be_cmd_set_fn_privileges(adapter, | 3488 | status = be_cmd_set_fn_privileges(adapter, |
| 3439 | privileges | | 3489 | vf_cfg->privileges | |
| 3440 | BE_PRIV_FILTMGMT, | 3490 | BE_PRIV_FILTMGMT, |
| 3441 | vf + 1); | 3491 | vf + 1); |
| 3442 | if (!status) | 3492 | if (!status) { |
| 3493 | vf_cfg->privileges |= BE_PRIV_FILTMGMT; | ||
| 3443 | dev_info(dev, "VF%d has FILTMGMT privilege\n", | 3494 | dev_info(dev, "VF%d has FILTMGMT privilege\n", |
| 3444 | vf); | 3495 | vf); |
| 3496 | } | ||
| 3445 | } | 3497 | } |
| 3446 | 3498 | ||
| 3447 | /* Allow full available bandwidth */ | 3499 | /* Allow full available bandwidth */ |
| @@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) | |||
| 4820 | 4872 | ||
| 4821 | static int be_map_pci_bars(struct be_adapter *adapter) | 4873 | static int be_map_pci_bars(struct be_adapter *adapter) |
| 4822 | { | 4874 | { |
| 4875 | struct pci_dev *pdev = adapter->pdev; | ||
| 4823 | u8 __iomem *addr; | 4876 | u8 __iomem *addr; |
| 4824 | 4877 | ||
| 4825 | if (BEx_chip(adapter) && be_physfn(adapter)) { | 4878 | if (BEx_chip(adapter) && be_physfn(adapter)) { |
| 4826 | adapter->csr = pci_iomap(adapter->pdev, 2, 0); | 4879 | adapter->csr = pci_iomap(pdev, 2, 0); |
| 4827 | if (!adapter->csr) | 4880 | if (!adapter->csr) |
| 4828 | return -ENOMEM; | 4881 | return -ENOMEM; |
| 4829 | } | 4882 | } |
| 4830 | 4883 | ||
| 4831 | addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); | 4884 | addr = pci_iomap(pdev, db_bar(adapter), 0); |
| 4832 | if (!addr) | 4885 | if (!addr) |
| 4833 | goto pci_map_err; | 4886 | goto pci_map_err; |
| 4834 | adapter->db = addr; | 4887 | adapter->db = addr; |
| 4835 | 4888 | ||
| 4889 | if (skyhawk_chip(adapter) || BEx_chip(adapter)) { | ||
| 4890 | if (be_physfn(adapter)) { | ||
| 4891 | /* PCICFG is the 2nd BAR in BE2 */ | ||
| 4892 | addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); | ||
| 4893 | if (!addr) | ||
| 4894 | goto pci_map_err; | ||
| 4895 | adapter->pcicfg = addr; | ||
| 4896 | } else { | ||
| 4897 | adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; | ||
| 4898 | } | ||
| 4899 | } | ||
| 4900 | |||
| 4836 | be_roce_map_pci_bars(adapter); | 4901 | be_roce_map_pci_bars(adapter); |
| 4837 | return 0; | 4902 | return 0; |
| 4838 | 4903 | ||
| 4839 | pci_map_err: | 4904 | pci_map_err: |
| 4840 | dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); | 4905 | dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); |
| 4841 | be_unmap_pci_bars(adapter); | 4906 | be_unmap_pci_bars(adapter); |
| 4842 | return -ENOMEM; | 4907 | return -ENOMEM; |
| 4843 | } | 4908 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 78e1ce09b1ab..f6a3a7abd468 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
| 1954 | struct fec_enet_private *fep = netdev_priv(ndev); | 1954 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 1955 | struct device_node *node; | 1955 | struct device_node *node; |
| 1956 | int err = -ENXIO, i; | 1956 | int err = -ENXIO, i; |
| 1957 | u32 mii_speed, holdtime; | ||
| 1957 | 1958 | ||
| 1958 | /* | 1959 | /* |
| 1959 | * The i.MX28 dual fec interfaces are not equal. | 1960 | * The i.MX28 dual fec interfaces are not equal. |
| @@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
| 1991 | * Reference Manual has an error on this, and gets fixed on i.MX6Q | 1992 | * Reference Manual has an error on this, and gets fixed on i.MX6Q |
| 1992 | * document. | 1993 | * document. |
| 1993 | */ | 1994 | */ |
| 1994 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); | 1995 | mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); |
| 1995 | if (fep->quirks & FEC_QUIRK_ENET_MAC) | 1996 | if (fep->quirks & FEC_QUIRK_ENET_MAC) |
| 1996 | fep->phy_speed--; | 1997 | mii_speed--; |
| 1997 | fep->phy_speed <<= 1; | 1998 | if (mii_speed > 63) { |
| 1999 | dev_err(&pdev->dev, | ||
| 2000 | "fec clock (%lu) to fast to get right mii speed\n", | ||
| 2001 | clk_get_rate(fep->clk_ipg)); | ||
| 2002 | err = -EINVAL; | ||
| 2003 | goto err_out; | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | /* | ||
| 2007 | * The i.MX28 and i.MX6 types have another filed in the MSCR (aka | ||
| 2008 | * MII_SPEED) register that defines the MDIO output hold time. Earlier | ||
| 2009 | * versions are RAZ there, so just ignore the difference and write the | ||
| 2010 | * register always. | ||
| 2011 | * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. | ||
| 2012 | * HOLDTIME + 1 is the number of clk cycles the fec is holding the | ||
| 2013 | * output. | ||
| 2014 | * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). | ||
| 2015 | * Given that ceil(clkrate / 5000000) <= 64, the calculation for | ||
| 2016 | * holdtime cannot result in a value greater than 3. | ||
| 2017 | */ | ||
| 2018 | holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; | ||
| 2019 | |||
| 2020 | fep->phy_speed = mii_speed << 1 | holdtime << 8; | ||
| 2021 | |||
| 1998 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 2022 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
| 1999 | 2023 | ||
| 2000 | fep->mii_bus = mdiobus_alloc(); | 2024 | fep->mii_bus = mdiobus_alloc(); |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 357e8b576905..56b774d3a13d 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
| @@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev) | |||
| 3893 | ugeth->phy_interface = phy_interface; | 3893 | ugeth->phy_interface = phy_interface; |
| 3894 | ugeth->max_speed = max_speed; | 3894 | ugeth->max_speed = max_speed; |
| 3895 | 3895 | ||
| 3896 | /* Carrier starts down, phylib will bring it up */ | ||
| 3897 | netif_carrier_off(dev); | ||
| 3898 | |||
| 3896 | err = register_netdev(dev); | 3899 | err = register_netdev(dev); |
| 3897 | if (err) { | 3900 | if (err) { |
| 3898 | if (netif_msg_probe(ugeth)) | 3901 | if (netif_msg_probe(ugeth)) |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 96208f17bb53..2db653225a0e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -2658,16 +2658,11 @@ static int mvneta_stop(struct net_device *dev) | |||
| 2658 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 2658 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| 2659 | { | 2659 | { |
| 2660 | struct mvneta_port *pp = netdev_priv(dev); | 2660 | struct mvneta_port *pp = netdev_priv(dev); |
| 2661 | int ret; | ||
| 2662 | 2661 | ||
| 2663 | if (!pp->phy_dev) | 2662 | if (!pp->phy_dev) |
| 2664 | return -ENOTSUPP; | 2663 | return -ENOTSUPP; |
| 2665 | 2664 | ||
| 2666 | ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); | 2665 | return phy_mii_ioctl(pp->phy_dev, ifr, cmd); |
| 2667 | if (!ret) | ||
| 2668 | mvneta_adjust_link(dev); | ||
| 2669 | |||
| 2670 | return ret; | ||
| 2671 | } | 2666 | } |
| 2672 | 2667 | ||
| 2673 | /* Ethtool methods */ | 2668 | /* Ethtool methods */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a681d7c0bb9f..546ca4226916 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -724,7 +724,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
| 724 | * on the host, we deprecate the error message for this | 724 | * on the host, we deprecate the error message for this |
| 725 | * specific command/input_mod/opcode_mod/fw-status to be debug. | 725 | * specific command/input_mod/opcode_mod/fw-status to be debug. |
| 726 | */ | 726 | */ |
| 727 | if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && | 727 | if (op == MLX4_CMD_SET_PORT && |
| 728 | (in_modifier == 1 || in_modifier == 2) && | ||
| 728 | op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) | 729 | op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) |
| 729 | mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", | 730 | mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", |
| 730 | op, context->fw_status); | 731 | op, context->fw_status); |
| @@ -1993,7 +1994,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, | |||
| 1993 | goto reset_slave; | 1994 | goto reset_slave; |
| 1994 | slave_state[slave].vhcr_dma = ((u64) param) << 48; | 1995 | slave_state[slave].vhcr_dma = ((u64) param) << 48; |
| 1995 | priv->mfunc.master.slave_state[slave].cookie = 0; | 1996 | priv->mfunc.master.slave_state[slave].cookie = 0; |
| 1996 | mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]); | ||
| 1997 | break; | 1997 | break; |
| 1998 | case MLX4_COMM_CMD_VHCR1: | 1998 | case MLX4_COMM_CMD_VHCR1: |
| 1999 | if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) | 1999 | if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) |
| @@ -2225,6 +2225,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) | |||
| 2225 | for (i = 0; i < dev->num_slaves; ++i) { | 2225 | for (i = 0; i < dev->num_slaves; ++i) { |
| 2226 | s_state = &priv->mfunc.master.slave_state[i]; | 2226 | s_state = &priv->mfunc.master.slave_state[i]; |
| 2227 | s_state->last_cmd = MLX4_COMM_CMD_RESET; | 2227 | s_state->last_cmd = MLX4_COMM_CMD_RESET; |
| 2228 | mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); | ||
| 2228 | for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) | 2229 | for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) |
| 2229 | s_state->event_eq[j].eqn = -1; | 2230 | s_state->event_eq[j].eqn = -1; |
| 2230 | __raw_writel((__force u32) 0, | 2231 | __raw_writel((__force u32) 0, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index ebce5bb24df9..3485acf03014 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -2805,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
| 2805 | netif_carrier_off(dev); | 2805 | netif_carrier_off(dev); |
| 2806 | mlx4_en_set_default_moderation(priv); | 2806 | mlx4_en_set_default_moderation(priv); |
| 2807 | 2807 | ||
| 2808 | err = register_netdev(dev); | ||
| 2809 | if (err) { | ||
| 2810 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
| 2811 | goto out; | ||
| 2812 | } | ||
| 2813 | priv->registered = 1; | ||
| 2814 | |||
| 2815 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | 2808 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
| 2816 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 2809 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
| 2817 | 2810 | ||
| @@ -2853,6 +2846,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
| 2853 | 2846 | ||
| 2854 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); | 2847 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); |
| 2855 | 2848 | ||
| 2849 | err = register_netdev(dev); | ||
| 2850 | if (err) { | ||
| 2851 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
| 2852 | goto out; | ||
| 2853 | } | ||
| 2854 | |||
| 2855 | priv->registered = 1; | ||
| 2856 | |||
| 2856 | return 0; | 2857 | return 0; |
| 2857 | 2858 | ||
| 2858 | out: | 2859 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 264bc15c1ff2..6e70ffee8e87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
| @@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work) | |||
| 153 | 153 | ||
| 154 | /* All active slaves need to receive the event */ | 154 | /* All active slaves need to receive the event */ |
| 155 | if (slave == ALL_SLAVES) { | 155 | if (slave == ALL_SLAVES) { |
| 156 | for (i = 0; i < dev->num_slaves; i++) { | 156 | for (i = 0; i <= dev->persist->num_vfs; i++) { |
| 157 | if (i != dev->caps.function && | 157 | if (mlx4_GEN_EQE(dev, i, eqe)) |
| 158 | master->slave_state[i].active) | 158 | mlx4_warn(dev, "Failed to generate event for slave %d\n", |
| 159 | if (mlx4_GEN_EQE(dev, i, eqe)) | 159 | i); |
| 160 | mlx4_warn(dev, "Failed to generate event for slave %d\n", | ||
| 161 | i); | ||
| 162 | } | 160 | } |
| 163 | } else { | 161 | } else { |
| 164 | if (mlx4_GEN_EQE(dev, slave, eqe)) | 162 | if (mlx4_GEN_EQE(dev, slave, eqe)) |
| @@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, | |||
| 203 | struct mlx4_eqe *eqe) | 201 | struct mlx4_eqe *eqe) |
| 204 | { | 202 | { |
| 205 | struct mlx4_priv *priv = mlx4_priv(dev); | 203 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 206 | struct mlx4_slave_state *s_slave = | ||
| 207 | &priv->mfunc.master.slave_state[slave]; | ||
| 208 | 204 | ||
| 209 | if (!s_slave->active) { | 205 | if (slave < 0 || slave > dev->persist->num_vfs || |
| 210 | /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ | 206 | slave == dev->caps.function || |
| 207 | !priv->mfunc.master.slave_state[slave].active) | ||
| 211 | return; | 208 | return; |
| 212 | } | ||
| 213 | 209 | ||
| 214 | slave_event(dev, slave, eqe); | 210 | slave_event(dev, slave, eqe); |
| 215 | } | 211 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index d97ca88c55b5..6e413ac4e940 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -3095,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) | |||
| 3095 | if (!priv->mfunc.master.slave_state) | 3095 | if (!priv->mfunc.master.slave_state) |
| 3096 | return -EINVAL; | 3096 | return -EINVAL; |
| 3097 | 3097 | ||
| 3098 | /* check for slave valid, slave not PF, and slave active */ | ||
| 3099 | if (slave < 0 || slave > dev->persist->num_vfs || | ||
| 3100 | slave == dev->caps.function || | ||
| 3101 | !priv->mfunc.master.slave_state[slave].active) | ||
| 3102 | return 0; | ||
| 3103 | |||
| 3098 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; | 3104 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; |
| 3099 | 3105 | ||
| 3100 | /* Create the event only if the slave is registered */ | 3106 | /* Create the event only if the slave is registered */ |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 9fb6948e14c6..5cecec282aba 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
| @@ -4468,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev) | |||
| 4468 | struct net_device *master = netdev_master_upper_dev_get(dev); | 4468 | struct net_device *master = netdev_master_upper_dev_get(dev); |
| 4469 | int err = 0; | 4469 | int err = 0; |
| 4470 | 4470 | ||
| 4471 | /* There are currently three cases handled here: | ||
| 4472 | * 1. Joining a bridge | ||
| 4473 | * 2. Leaving a previously joined bridge | ||
| 4474 | * 3. Other, e.g. being added to or removed from a bond or openvswitch, | ||
| 4475 | * in which case nothing is done | ||
| 4476 | */ | ||
| 4471 | if (master && master->rtnl_link_ops && | 4477 | if (master && master->rtnl_link_ops && |
| 4472 | !strcmp(master->rtnl_link_ops->kind, "bridge")) | 4478 | !strcmp(master->rtnl_link_ops->kind, "bridge")) |
| 4473 | err = rocker_port_bridge_join(rocker_port, master); | 4479 | err = rocker_port_bridge_join(rocker_port, master); |
| 4474 | else | 4480 | else if (rocker_port_is_bridged(rocker_port)) |
| 4475 | err = rocker_port_bridge_leave(rocker_port); | 4481 | err = rocker_port_bridge_leave(rocker_port); |
| 4476 | 4482 | ||
| 4477 | return err; | 4483 | return err; |
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 924ea98bd531..54549a6223dd 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h | |||
| @@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr); | |||
| 114 | rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); | 114 | rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); |
| 115 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); | 115 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); |
| 116 | void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); | 116 | void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); |
| 117 | bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); | 117 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
| 118 | const void *iaddr, bool is_v6); | ||
| 119 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); | ||
| 118 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, | 120 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, |
| 119 | const void *iaddr, bool is_v6); | 121 | const void *iaddr, bool is_v6); |
| 120 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); | 122 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 2a175006028b..b7877a194cfe 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
| @@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) | |||
| 81 | hash = (addr->atype == IPVL_IPV6) ? | 81 | hash = (addr->atype == IPVL_IPV6) ? |
| 82 | ipvlan_get_v6_hash(&addr->ip6addr) : | 82 | ipvlan_get_v6_hash(&addr->ip6addr) : |
| 83 | ipvlan_get_v4_hash(&addr->ip4addr); | 83 | ipvlan_get_v4_hash(&addr->ip4addr); |
| 84 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | 84 | if (hlist_unhashed(&addr->hlnode)) |
| 85 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | ||
| 85 | } | 86 | } |
| 86 | 87 | ||
| 87 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) | 88 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) |
| 88 | { | 89 | { |
| 89 | hlist_del_rcu(&addr->hlnode); | 90 | hlist_del_init_rcu(&addr->hlnode); |
| 90 | if (sync) | 91 | if (sync) |
| 91 | synchronize_rcu(); | 92 | synchronize_rcu(); |
| 92 | } | 93 | } |
| 93 | 94 | ||
| 94 | bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) | 95 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
| 96 | const void *iaddr, bool is_v6) | ||
| 95 | { | 97 | { |
| 96 | struct ipvl_port *port = ipvlan->port; | ||
| 97 | struct ipvl_addr *addr; | 98 | struct ipvl_addr *addr; |
| 98 | 99 | ||
| 99 | list_for_each_entry(addr, &ipvlan->addrs, anode) { | 100 | list_for_each_entry(addr, &ipvlan->addrs, anode) { |
| @@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) | |||
| 101 | ipv6_addr_equal(&addr->ip6addr, iaddr)) || | 102 | ipv6_addr_equal(&addr->ip6addr, iaddr)) || |
| 102 | (!is_v6 && addr->atype == IPVL_IPV4 && | 103 | (!is_v6 && addr->atype == IPVL_IPV4 && |
| 103 | addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) | 104 | addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) |
| 104 | return true; | 105 | return addr; |
| 105 | } | 106 | } |
| 107 | return NULL; | ||
| 108 | } | ||
| 109 | |||
| 110 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) | ||
| 111 | { | ||
| 112 | struct ipvl_dev *ipvlan; | ||
| 106 | 113 | ||
| 107 | if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) | 114 | ASSERT_RTNL(); |
| 108 | return true; | ||
| 109 | 115 | ||
| 116 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
| 117 | if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) | ||
| 118 | return true; | ||
| 119 | } | ||
| 110 | return false; | 120 | return false; |
| 111 | } | 121 | } |
| 112 | 122 | ||
| @@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, | |||
| 192 | if (skb->protocol == htons(ETH_P_PAUSE)) | 202 | if (skb->protocol == htons(ETH_P_PAUSE)) |
| 193 | return; | 203 | return; |
| 194 | 204 | ||
| 195 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | 205 | rcu_read_lock(); |
| 206 | list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { | ||
| 196 | if (local && (ipvlan == in_dev)) | 207 | if (local && (ipvlan == in_dev)) |
| 197 | continue; | 208 | continue; |
| 198 | 209 | ||
| @@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, | |||
| 219 | mcast_acct: | 230 | mcast_acct: |
| 220 | ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); | 231 | ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); |
| 221 | } | 232 | } |
| 233 | rcu_read_unlock(); | ||
| 222 | 234 | ||
| 223 | /* Locally generated? ...Forward a copy to the main-device as | 235 | /* Locally generated? ...Forward a copy to the main-device as |
| 224 | * well. On the RX side we'll ignore it (wont give it to any | 236 | * well. On the RX side we'll ignore it (wont give it to any |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4f4099d5603d..4fa14208d799 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
| @@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) | |||
| 505 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { | 505 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { |
| 506 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { | 506 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { |
| 507 | ipvlan_ht_addr_del(addr, !dev->dismantle); | 507 | ipvlan_ht_addr_del(addr, !dev->dismantle); |
| 508 | list_del_rcu(&addr->anode); | 508 | list_del(&addr->anode); |
| 509 | } | 509 | } |
| 510 | } | 510 | } |
| 511 | list_del_rcu(&ipvlan->pnode); | 511 | list_del_rcu(&ipvlan->pnode); |
| @@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
| 607 | { | 607 | { |
| 608 | struct ipvl_addr *addr; | 608 | struct ipvl_addr *addr; |
| 609 | 609 | ||
| 610 | if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { | 610 | if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { |
| 611 | netif_err(ipvlan, ifup, ipvlan->dev, | 611 | netif_err(ipvlan, ifup, ipvlan->dev, |
| 612 | "Failed to add IPv6=%pI6c addr for %s intf\n", | 612 | "Failed to add IPv6=%pI6c addr for %s intf\n", |
| 613 | ip6_addr, ipvlan->dev->name); | 613 | ip6_addr, ipvlan->dev->name); |
| @@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
| 620 | addr->master = ipvlan; | 620 | addr->master = ipvlan; |
| 621 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); | 621 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); |
| 622 | addr->atype = IPVL_IPV6; | 622 | addr->atype = IPVL_IPV6; |
| 623 | list_add_tail_rcu(&addr->anode, &ipvlan->addrs); | 623 | list_add_tail(&addr->anode, &ipvlan->addrs); |
| 624 | ipvlan->ipv6cnt++; | 624 | ipvlan->ipv6cnt++; |
| 625 | ipvlan_ht_addr_add(ipvlan, addr); | 625 | /* If the interface is not up, the address will be added to the hash |
| 626 | * list by ipvlan_open. | ||
| 627 | */ | ||
| 628 | if (netif_running(ipvlan->dev)) | ||
| 629 | ipvlan_ht_addr_add(ipvlan, addr); | ||
| 626 | 630 | ||
| 627 | return 0; | 631 | return 0; |
| 628 | } | 632 | } |
| @@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
| 631 | { | 635 | { |
| 632 | struct ipvl_addr *addr; | 636 | struct ipvl_addr *addr; |
| 633 | 637 | ||
| 634 | addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); | 638 | addr = ipvlan_find_addr(ipvlan, ip6_addr, true); |
| 635 | if (!addr) | 639 | if (!addr) |
| 636 | return; | 640 | return; |
| 637 | 641 | ||
| 638 | ipvlan_ht_addr_del(addr, true); | 642 | ipvlan_ht_addr_del(addr, true); |
| 639 | list_del_rcu(&addr->anode); | 643 | list_del(&addr->anode); |
| 640 | ipvlan->ipv6cnt--; | 644 | ipvlan->ipv6cnt--; |
| 641 | WARN_ON(ipvlan->ipv6cnt < 0); | 645 | WARN_ON(ipvlan->ipv6cnt < 0); |
| 642 | kfree_rcu(addr, rcu); | 646 | kfree_rcu(addr, rcu); |
| @@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
| 675 | { | 679 | { |
| 676 | struct ipvl_addr *addr; | 680 | struct ipvl_addr *addr; |
| 677 | 681 | ||
| 678 | if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { | 682 | if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { |
| 679 | netif_err(ipvlan, ifup, ipvlan->dev, | 683 | netif_err(ipvlan, ifup, ipvlan->dev, |
| 680 | "Failed to add IPv4=%pI4 on %s intf.\n", | 684 | "Failed to add IPv4=%pI4 on %s intf.\n", |
| 681 | ip4_addr, ipvlan->dev->name); | 685 | ip4_addr, ipvlan->dev->name); |
| @@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
| 688 | addr->master = ipvlan; | 692 | addr->master = ipvlan; |
| 689 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); | 693 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); |
| 690 | addr->atype = IPVL_IPV4; | 694 | addr->atype = IPVL_IPV4; |
| 691 | list_add_tail_rcu(&addr->anode, &ipvlan->addrs); | 695 | list_add_tail(&addr->anode, &ipvlan->addrs); |
| 692 | ipvlan->ipv4cnt++; | 696 | ipvlan->ipv4cnt++; |
| 693 | ipvlan_ht_addr_add(ipvlan, addr); | 697 | /* If the interface is not up, the address will be added to the hash |
| 698 | * list by ipvlan_open. | ||
| 699 | */ | ||
| 700 | if (netif_running(ipvlan->dev)) | ||
| 701 | ipvlan_ht_addr_add(ipvlan, addr); | ||
| 694 | ipvlan_set_broadcast_mac_filter(ipvlan, true); | 702 | ipvlan_set_broadcast_mac_filter(ipvlan, true); |
| 695 | 703 | ||
| 696 | return 0; | 704 | return 0; |
| @@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
| 700 | { | 708 | { |
| 701 | struct ipvl_addr *addr; | 709 | struct ipvl_addr *addr; |
| 702 | 710 | ||
| 703 | addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); | 711 | addr = ipvlan_find_addr(ipvlan, ip4_addr, false); |
| 704 | if (!addr) | 712 | if (!addr) |
| 705 | return; | 713 | return; |
| 706 | 714 | ||
| 707 | ipvlan_ht_addr_del(addr, true); | 715 | ipvlan_ht_addr_del(addr, true); |
| 708 | list_del_rcu(&addr->anode); | 716 | list_del(&addr->anode); |
| 709 | ipvlan->ipv4cnt--; | 717 | ipvlan->ipv4cnt--; |
| 710 | WARN_ON(ipvlan->ipv4cnt < 0); | 718 | WARN_ON(ipvlan->ipv4cnt < 0); |
| 711 | if (!ipvlan->ipv4cnt) | 719 | if (!ipvlan->ipv4cnt) |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 5c55f11572ba..75d6f26729a3 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
| @@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
| 188 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); | 188 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); |
| 189 | skb_put(skb, sizeof(padbytes)); | 189 | skb_put(skb, sizeof(padbytes)); |
| 190 | } | 190 | } |
| 191 | |||
| 192 | usbnet_set_skb_tx_stats(skb, 1, 0); | ||
| 191 | return skb; | 193 | return skb; |
| 192 | } | 194 | } |
| 193 | 195 | ||
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 9311a08565be..4545e78840b0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -522,6 +522,7 @@ static const struct driver_info wwan_info = { | |||
| 522 | #define DELL_VENDOR_ID 0x413C | 522 | #define DELL_VENDOR_ID 0x413C |
| 523 | #define REALTEK_VENDOR_ID 0x0bda | 523 | #define REALTEK_VENDOR_ID 0x0bda |
| 524 | #define SAMSUNG_VENDOR_ID 0x04e8 | 524 | #define SAMSUNG_VENDOR_ID 0x04e8 |
| 525 | #define LENOVO_VENDOR_ID 0x17ef | ||
| 525 | 526 | ||
| 526 | static const struct usb_device_id products[] = { | 527 | static const struct usb_device_id products[] = { |
| 527 | /* BLACKLIST !! | 528 | /* BLACKLIST !! |
| @@ -702,6 +703,13 @@ static const struct usb_device_id products[] = { | |||
| 702 | .driver_info = 0, | 703 | .driver_info = 0, |
| 703 | }, | 704 | }, |
| 704 | 705 | ||
| 706 | /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ | ||
| 707 | { | ||
| 708 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM, | ||
| 709 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 710 | .driver_info = 0, | ||
| 711 | }, | ||
| 712 | |||
| 705 | /* WHITELIST!!! | 713 | /* WHITELIST!!! |
| 706 | * | 714 | * |
| 707 | * CDC Ether uses two interfaces, not necessarily consecutive. | 715 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 80a844e0ae03..c3e4da9e79ca 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) | |||
| 1172 | 1172 | ||
| 1173 | /* return skb */ | 1173 | /* return skb */ |
| 1174 | ctx->tx_curr_skb = NULL; | 1174 | ctx->tx_curr_skb = NULL; |
| 1175 | dev->net->stats.tx_packets += ctx->tx_curr_frame_num; | ||
| 1176 | 1175 | ||
| 1177 | /* keep private stats: framing overhead and number of NTBs */ | 1176 | /* keep private stats: framing overhead and number of NTBs */ |
| 1178 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; | 1177 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; |
| 1179 | ctx->tx_ntbs++; | 1178 | ctx->tx_ntbs++; |
| 1180 | 1179 | ||
| 1181 | /* usbnet has already counted all the framing overhead. | 1180 | /* usbnet will count all the framing overhead by default. |
| 1182 | * Adjust the stats so that the tx_bytes counter show real | 1181 | * Adjust the stats so that the tx_bytes counter show real |
| 1183 | * payload data instead. | 1182 | * payload data instead. |
| 1184 | */ | 1183 | */ |
| 1185 | dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; | 1184 | usbnet_set_skb_tx_stats(skb_out, n, |
| 1185 | ctx->tx_curr_frame_payload - skb_out->len); | ||
| 1186 | 1186 | ||
| 1187 | return skb_out; | 1187 | return skb_out; |
| 1188 | 1188 | ||
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index fe48f4c51373..1762ad3910b2 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c | |||
| @@ -46,8 +46,7 @@ enum cx82310_status { | |||
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | #define CMD_PACKET_SIZE 64 | 48 | #define CMD_PACKET_SIZE 64 |
| 49 | /* first command after power on can take around 8 seconds */ | 49 | #define CMD_TIMEOUT 100 |
| 50 | #define CMD_TIMEOUT 15000 | ||
| 51 | #define CMD_REPLY_RETRY 5 | 50 | #define CMD_REPLY_RETRY 5 |
| 52 | 51 | ||
| 53 | #define CX82310_MTU 1514 | 52 | #define CX82310_MTU 1514 |
| @@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
| 78 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, | 77 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, |
| 79 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); | 78 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); |
| 80 | if (ret < 0) { | 79 | if (ret < 0) { |
| 81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", | 80 | if (cmd != CMD_GET_LINK_STATUS) |
| 82 | cmd, ret); | 81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", |
| 82 | cmd, ret); | ||
| 83 | goto end; | 83 | goto end; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| @@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
| 90 | buf, CMD_PACKET_SIZE, &actual_len, | 90 | buf, CMD_PACKET_SIZE, &actual_len, |
| 91 | CMD_TIMEOUT); | 91 | CMD_TIMEOUT); |
| 92 | if (ret < 0) { | 92 | if (ret < 0) { |
| 93 | dev_err(&dev->udev->dev, | 93 | if (cmd != CMD_GET_LINK_STATUS) |
| 94 | "reply receive error %d\n", ret); | 94 | dev_err(&dev->udev->dev, |
| 95 | "reply receive error %d\n", | ||
| 96 | ret); | ||
| 95 | goto end; | 97 | goto end; |
| 96 | } | 98 | } |
| 97 | if (actual_len > 0) | 99 | if (actual_len > 0) |
| @@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 134 | int ret; | 136 | int ret; |
| 135 | char buf[15]; | 137 | char buf[15]; |
| 136 | struct usb_device *udev = dev->udev; | 138 | struct usb_device *udev = dev->udev; |
| 139 | u8 link[3]; | ||
| 140 | int timeout = 50; | ||
| 137 | 141 | ||
| 138 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ | 142 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ |
| 139 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 | 143 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 |
| @@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 160 | if (!dev->partial_data) | 164 | if (!dev->partial_data) |
| 161 | return -ENOMEM; | 165 | return -ENOMEM; |
| 162 | 166 | ||
| 167 | /* wait for firmware to become ready (indicated by the link being up) */ | ||
| 168 | while (--timeout) { | ||
| 169 | ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, | ||
| 170 | link, sizeof(link)); | ||
| 171 | /* the command can time out during boot - it's not an error */ | ||
| 172 | if (!ret && link[0] == 1 && link[2] == 1) | ||
| 173 | break; | ||
| 174 | msleep(500); | ||
| 175 | }; | ||
| 176 | if (!timeout) { | ||
| 177 | dev_err(&udev->dev, "firmware not ready in time\n"); | ||
| 178 | return -ETIMEDOUT; | ||
| 179 | } | ||
| 180 | |||
| 163 | /* enable ethernet mode (?) */ | 181 | /* enable ethernet mode (?) */ |
| 164 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); | 182 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); |
| 165 | if (ret) { | 183 | if (ret) { |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 438fc6bcaef1..9f7c0ab3b349 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -492,6 +492,7 @@ enum rtl8152_flags { | |||
| 492 | /* Define these values to match your device */ | 492 | /* Define these values to match your device */ |
| 493 | #define VENDOR_ID_REALTEK 0x0bda | 493 | #define VENDOR_ID_REALTEK 0x0bda |
| 494 | #define VENDOR_ID_SAMSUNG 0x04e8 | 494 | #define VENDOR_ID_SAMSUNG 0x04e8 |
| 495 | #define VENDOR_ID_LENOVO 0x17ef | ||
| 495 | 496 | ||
| 496 | #define MCU_TYPE_PLA 0x0100 | 497 | #define MCU_TYPE_PLA 0x0100 |
| 497 | #define MCU_TYPE_USB 0x0000 | 498 | #define MCU_TYPE_USB 0x0000 |
| @@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = { | |||
| 4037 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, | 4038 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, |
| 4038 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, | 4039 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, |
| 4039 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, | 4040 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, |
| 4041 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, | ||
| 4040 | {} | 4042 | {} |
| 4041 | }; | 4043 | }; |
| 4042 | 4044 | ||
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index b94a0fbb8b3b..953de13267df 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c | |||
| @@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
| 144 | skb_put(skb, sizeof(padbytes)); | 144 | skb_put(skb, sizeof(padbytes)); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | usbnet_set_skb_tx_stats(skb, 1, 0); | ||
| 147 | return skb; | 148 | return skb; |
| 148 | } | 149 | } |
| 149 | 150 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 449835f4331e..777757ae1973 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
| @@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb) | |||
| 1188 | struct usbnet *dev = entry->dev; | 1188 | struct usbnet *dev = entry->dev; |
| 1189 | 1189 | ||
| 1190 | if (urb->status == 0) { | 1190 | if (urb->status == 0) { |
| 1191 | if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) | 1191 | dev->net->stats.tx_packets += entry->packets; |
| 1192 | dev->net->stats.tx_packets++; | ||
| 1193 | dev->net->stats.tx_bytes += entry->length; | 1192 | dev->net->stats.tx_bytes += entry->length; |
| 1194 | } else { | 1193 | } else { |
| 1195 | dev->net->stats.tx_errors++; | 1194 | dev->net->stats.tx_errors++; |
| @@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
| 1347 | } else | 1346 | } else |
| 1348 | urb->transfer_flags |= URB_ZERO_PACKET; | 1347 | urb->transfer_flags |= URB_ZERO_PACKET; |
| 1349 | } | 1348 | } |
| 1350 | entry->length = urb->transfer_buffer_length = length; | 1349 | urb->transfer_buffer_length = length; |
| 1350 | |||
| 1351 | if (info->flags & FLAG_MULTI_PACKET) { | ||
| 1352 | /* Driver has set number of packets and a length delta. | ||
| 1353 | * Calculate the complete length and ensure that it's | ||
| 1354 | * positive. | ||
| 1355 | */ | ||
| 1356 | entry->length += length; | ||
| 1357 | if (WARN_ON_ONCE(entry->length <= 0)) | ||
| 1358 | entry->length = length; | ||
| 1359 | } else { | ||
| 1360 | usbnet_set_skb_tx_stats(skb, 1, length); | ||
| 1361 | } | ||
| 1351 | 1362 | ||
| 1352 | spin_lock_irqsave(&dev->txq.lock, flags); | 1363 | spin_lock_irqsave(&dev->txq.lock, flags); |
| 1353 | retval = usb_autopm_get_interface_async(dev->intf); | 1364 | retval = usb_autopm_get_interface_async(dev->intf); |
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index cb366adc820b..f50a6bc5d06e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c | |||
| @@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif) | |||
| 219 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 219 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
| 220 | struct ath_vif *avp = (void *)vif->drv_priv; | 220 | struct ath_vif *avp = (void *)vif->drv_priv; |
| 221 | struct ath_buf *bf = avp->av_bcbuf; | 221 | struct ath_buf *bf = avp->av_bcbuf; |
| 222 | struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; | ||
| 222 | 223 | ||
| 223 | ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", | 224 | ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", |
| 224 | avp->av_bslot); | 225 | avp->av_bslot); |
| 225 | 226 | ||
| 226 | tasklet_disable(&sc->bcon_tasklet); | 227 | tasklet_disable(&sc->bcon_tasklet); |
| 227 | 228 | ||
| 229 | cur_conf->enable_beacon &= ~BIT(avp->av_bslot); | ||
| 230 | |||
| 228 | if (bf && bf->bf_mpdu) { | 231 | if (bf && bf->bf_mpdu) { |
| 229 | struct sk_buff *skb = bf->bf_mpdu; | 232 | struct sk_buff *skb = bf->bf_mpdu; |
| 230 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 233 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
| @@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, | |||
| 521 | } | 524 | } |
| 522 | 525 | ||
| 523 | if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { | 526 | if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { |
| 524 | if ((vif->type != NL80211_IFTYPE_AP) || | 527 | if (vif->type != NL80211_IFTYPE_AP) { |
| 525 | (sc->nbcnvifs > 1)) { | ||
| 526 | ath_dbg(common, CONFIG, | 528 | ath_dbg(common, CONFIG, |
| 527 | "An AP interface is already present !\n"); | 529 | "An AP interface is already present !\n"); |
| 528 | return false; | 530 | return false; |
| @@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, | |||
| 616 | * enabling/disabling SWBA. | 618 | * enabling/disabling SWBA. |
| 617 | */ | 619 | */ |
| 618 | if (changed & BSS_CHANGED_BEACON_ENABLED) { | 620 | if (changed & BSS_CHANGED_BEACON_ENABLED) { |
| 619 | if (!bss_conf->enable_beacon && | 621 | bool enabled = cur_conf->enable_beacon; |
| 620 | (sc->nbcnvifs <= 1)) { | 622 | |
| 621 | cur_conf->enable_beacon = false; | 623 | if (!bss_conf->enable_beacon) { |
| 622 | } else if (bss_conf->enable_beacon) { | 624 | cur_conf->enable_beacon &= ~BIT(avp->av_bslot); |
| 623 | cur_conf->enable_beacon = true; | 625 | } else { |
| 624 | ath9k_cache_beacon_config(sc, ctx, bss_conf); | 626 | cur_conf->enable_beacon |= BIT(avp->av_bslot); |
| 627 | if (!enabled) | ||
| 628 | ath9k_cache_beacon_config(sc, ctx, bss_conf); | ||
| 625 | } | 629 | } |
| 626 | } | 630 | } |
| 627 | 631 | ||
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 2b79a568e803..d23737342f4f 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h | |||
| @@ -54,7 +54,7 @@ struct ath_beacon_config { | |||
| 54 | u16 dtim_period; | 54 | u16 dtim_period; |
| 55 | u16 bmiss_timeout; | 55 | u16 bmiss_timeout; |
| 56 | u8 dtim_count; | 56 | u8 dtim_count; |
| 57 | bool enable_beacon; | 57 | u8 enable_beacon; |
| 58 | bool ibss_creator; | 58 | bool ibss_creator; |
| 59 | u32 nexttbtt; | 59 | u32 nexttbtt; |
| 60 | u32 intval; | 60 | u32 intval; |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 60aa8d71e753..8529014e1a5e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
| @@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) | |||
| 424 | ah->power_mode = ATH9K_PM_UNDEFINED; | 424 | ah->power_mode = ATH9K_PM_UNDEFINED; |
| 425 | ah->htc_reset_init = true; | 425 | ah->htc_reset_init = true; |
| 426 | 426 | ||
| 427 | ah->tpc_enabled = true; | 427 | ah->tpc_enabled = false; |
| 428 | 428 | ||
| 429 | ah->ani_function = ATH9K_ANI_ALL; | 429 | ah->ani_function = ATH9K_ANI_ALL; |
| 430 | if (!AR_SREV_9300_20_OR_LATER(ah)) | 430 | if (!AR_SREV_9300_20_OR_LATER(ah)) |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c index defb7a44e0bc..7748a1ccf14f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c | |||
| @@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) | |||
| 126 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); | 126 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); |
| 127 | if (drvr->bus_if->wowl_supported) | 127 | if (drvr->bus_if->wowl_supported) |
| 128 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); | 128 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); |
| 129 | brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); | 129 | if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID) |
| 130 | brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); | ||
| 130 | 131 | ||
| 131 | /* set chip related quirks */ | 132 | /* set chip related quirks */ |
| 132 | switch (drvr->bus_if->chip) { | 133 | switch (drvr->bus_if->chip) { |
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index a6f22c32a279..3811878ab9cd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h | |||
| @@ -708,7 +708,6 @@ struct iwl_priv { | |||
| 708 | unsigned long reload_jiffies; | 708 | unsigned long reload_jiffies; |
| 709 | int reload_count; | 709 | int reload_count; |
| 710 | bool ucode_loaded; | 710 | bool ucode_loaded; |
| 711 | bool init_ucode_run; /* Don't run init uCode again */ | ||
| 712 | 711 | ||
| 713 | u8 plcp_delta_threshold; | 712 | u8 plcp_delta_threshold; |
| 714 | 713 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 47e64e8b9517..cceb026e0793 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
| @@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
| 1114 | scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | | 1114 | scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | |
| 1115 | BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); | 1115 | BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); |
| 1116 | 1116 | ||
| 1117 | if (vif) | 1117 | if (drop) { |
| 1118 | scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); | 1118 | IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", |
| 1119 | 1119 | scd_queues); | |
| 1120 | IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); | 1120 | if (iwlagn_txfifo_flush(priv, scd_queues)) { |
| 1121 | if (iwlagn_txfifo_flush(priv, scd_queues)) { | 1121 | IWL_ERR(priv, "flush request fail\n"); |
| 1122 | IWL_ERR(priv, "flush request fail\n"); | 1122 | goto done; |
| 1123 | goto done; | 1123 | } |
| 1124 | } | 1124 | } |
| 1125 | |||
| 1125 | IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); | 1126 | IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); |
| 1126 | iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); | 1127 | iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); |
| 1127 | done: | 1128 | done: |
| 1128 | mutex_unlock(&priv->mutex); | 1129 | mutex_unlock(&priv->mutex); |
| 1129 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 1130 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 4dbef7e58c2e..5244e43bfafb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c | |||
| @@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) | |||
| 418 | if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) | 418 | if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) |
| 419 | return 0; | 419 | return 0; |
| 420 | 420 | ||
| 421 | if (priv->init_ucode_run) | ||
| 422 | return 0; | ||
| 423 | |||
| 424 | iwl_init_notification_wait(&priv->notif_wait, &calib_wait, | 421 | iwl_init_notification_wait(&priv->notif_wait, &calib_wait, |
| 425 | calib_complete, ARRAY_SIZE(calib_complete), | 422 | calib_complete, ARRAY_SIZE(calib_complete), |
| 426 | iwlagn_wait_calib, priv); | 423 | iwlagn_wait_calib, priv); |
| @@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) | |||
| 440 | */ | 437 | */ |
| 441 | ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, | 438 | ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, |
| 442 | UCODE_CALIB_TIMEOUT); | 439 | UCODE_CALIB_TIMEOUT); |
| 443 | if (!ret) | ||
| 444 | priv->init_ucode_run = true; | ||
| 445 | 440 | ||
| 446 | goto out; | 441 | goto out; |
| 447 | 442 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 996e7f16adf9..c7154ac42c8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
| @@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) | |||
| 1257 | op->name, err); | 1257 | op->name, err); |
| 1258 | #endif | 1258 | #endif |
| 1259 | } | 1259 | } |
| 1260 | kfree(pieces); | ||
| 1260 | return; | 1261 | return; |
| 1261 | 1262 | ||
| 1262 | try_again: | 1263 | try_again: |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index efa9688a4cf1..078f24cf4af3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c | |||
| @@ -1278,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r, | |||
| 1278 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1278 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1279 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1279 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 1280 | 1280 | ||
| 1281 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | ||
| 1282 | return; | ||
| 1283 | |||
| 1281 | if (!ieee80211_is_data(hdr->frame_control) || | 1284 | if (!ieee80211_is_data(hdr->frame_control) || |
| 1282 | info->flags & IEEE80211_TX_CTL_NO_ACK) | 1285 | info->flags & IEEE80211_TX_CTL_NO_ACK) |
| 1283 | return; | 1286 | return; |
| @@ -2511,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, | |||
| 2511 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2514 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 2512 | struct iwl_lq_sta *lq_sta = mvm_sta; | 2515 | struct iwl_lq_sta *lq_sta = mvm_sta; |
| 2513 | 2516 | ||
| 2517 | if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { | ||
| 2518 | /* if vif isn't initialized mvm doesn't know about | ||
| 2519 | * this station, so don't do anything with the it | ||
| 2520 | */ | ||
| 2521 | sta = NULL; | ||
| 2522 | mvm_sta = NULL; | ||
| 2523 | } | ||
| 2524 | |||
| 2514 | /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ | 2525 | /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ |
| 2515 | 2526 | ||
| 2516 | /* Treat uninitialized rate scaling data same as non-existing. */ | 2527 | /* Treat uninitialized rate scaling data same as non-existing. */ |
| @@ -2827,6 +2838,9 @@ static void rs_rate_update(void *mvm_r, | |||
| 2827 | (struct iwl_op_mode *)mvm_r; | 2838 | (struct iwl_op_mode *)mvm_r; |
| 2828 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 2839 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 2829 | 2840 | ||
| 2841 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | ||
| 2842 | return; | ||
| 2843 | |||
| 2830 | /* Stop any ongoing aggregations as rs starts off assuming no agg */ | 2844 | /* Stop any ongoing aggregations as rs starts off assuming no agg */ |
| 2831 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) | 2845 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) |
| 2832 | ieee80211_stop_tx_ba_session(sta, tid); | 2846 | ieee80211_stop_tx_ba_session(sta, tid); |
| @@ -3587,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, | |||
| 3587 | 3601 | ||
| 3588 | MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); | 3602 | MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); |
| 3589 | 3603 | ||
| 3590 | static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) | 3604 | static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir) |
| 3591 | { | 3605 | { |
| 3592 | struct iwl_lq_sta *lq_sta = mvm_sta; | 3606 | struct iwl_lq_sta *lq_sta = priv_sta; |
| 3607 | struct iwl_mvm_sta *mvmsta; | ||
| 3608 | |||
| 3609 | mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta); | ||
| 3610 | |||
| 3611 | if (!mvmsta->vif) | ||
| 3612 | return; | ||
| 3593 | 3613 | ||
| 3594 | debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, | 3614 | debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, |
| 3595 | lq_sta, &rs_sta_dbgfs_scale_table_ops); | 3615 | lq_sta, &rs_sta_dbgfs_scale_table_ops); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index f8d6f306dd76..4b81c0bf63b0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
| @@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, | |||
| 197 | struct iwl_time_event_notif *notif) | 197 | struct iwl_time_event_notif *notif) |
| 198 | { | 198 | { |
| 199 | if (!le32_to_cpu(notif->status)) { | 199 | if (!le32_to_cpu(notif->status)) { |
| 200 | if (te_data->vif->type == NL80211_IFTYPE_STATION) | ||
| 201 | ieee80211_connection_loss(te_data->vif); | ||
| 200 | IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); | 202 | IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); |
| 201 | iwl_mvm_te_clear_data(mvm, te_data); | 203 | iwl_mvm_te_clear_data(mvm, te_data); |
| 202 | return; | 204 | return; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 07304e1fd64a..96a05406babf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
| @@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
| 949 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | 949 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 950 | tid_data = &mvmsta->tid_data[tid]; | 950 | tid_data = &mvmsta->tid_data[tid]; |
| 951 | 951 | ||
| 952 | if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d", | 952 | if (tid_data->txq_id != scd_flow) { |
| 953 | tid_data->txq_id, tid, scd_flow)) { | 953 | IWL_ERR(mvm, |
| 954 | "invalid BA notification: Q %d, tid %d, flow %d\n", | ||
| 955 | tid_data->txq_id, tid, scd_flow); | ||
| 954 | rcu_read_unlock(); | 956 | rcu_read_unlock(); |
| 955 | return 0; | 957 | return 0; |
| 956 | } | 958 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index dbd6bcf52205..686dd301cd53 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
| @@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 368 | /* 3165 Series */ | 368 | /* 3165 Series */ |
| 369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, |
| 370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, |
| 371 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | ||
| 372 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, | ||
| 373 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, | 371 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, |
| 374 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, | 372 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, |
| 373 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | ||
| 374 | {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, | ||
| 375 | {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, | ||
| 376 | {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, | ||
| 375 | 377 | ||
| 376 | /* 7265 Series */ | 378 | /* 7265 Series */ |
| 377 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 379 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index a62170ea0481..8c45cf44ce24 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
| @@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) | |||
| 1124 | /*This is for new trx flow*/ | 1124 | /*This is for new trx flow*/ |
| 1125 | struct rtl_tx_buffer_desc *pbuffer_desc = NULL; | 1125 | struct rtl_tx_buffer_desc *pbuffer_desc = NULL; |
| 1126 | u8 temp_one = 1; | 1126 | u8 temp_one = 1; |
| 1127 | u8 *entry; | ||
| 1127 | 1128 | ||
| 1128 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | 1129 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); |
| 1129 | ring = &rtlpci->tx_ring[BEACON_QUEUE]; | 1130 | ring = &rtlpci->tx_ring[BEACON_QUEUE]; |
| 1130 | pskb = __skb_dequeue(&ring->queue); | 1131 | pskb = __skb_dequeue(&ring->queue); |
| 1131 | if (pskb) | 1132 | if (rtlpriv->use_new_trx_flow) |
| 1133 | entry = (u8 *)(&ring->buffer_desc[ring->idx]); | ||
| 1134 | else | ||
| 1135 | entry = (u8 *)(&ring->desc[ring->idx]); | ||
| 1136 | if (pskb) { | ||
| 1137 | pci_unmap_single(rtlpci->pdev, | ||
| 1138 | rtlpriv->cfg->ops->get_desc( | ||
| 1139 | (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), | ||
| 1140 | pskb->len, PCI_DMA_TODEVICE); | ||
| 1132 | kfree_skb(pskb); | 1141 | kfree_skb(pskb); |
| 1142 | } | ||
| 1133 | 1143 | ||
| 1134 | /*NB: the beacon data buffer must be 32-bit aligned. */ | 1144 | /*NB: the beacon data buffer must be 32-bit aligned. */ |
| 1135 | pskb = ieee80211_beacon_get(hw, mac->vif); | 1145 | pskb = ieee80211_beacon_get(hw, mac->vif); |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e9b960f0ff32..720aaf6313d2 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -1008,8 +1008,7 @@ err: | |||
| 1008 | 1008 | ||
| 1009 | static int xennet_change_mtu(struct net_device *dev, int mtu) | 1009 | static int xennet_change_mtu(struct net_device *dev, int mtu) |
| 1010 | { | 1010 | { |
| 1011 | int max = xennet_can_sg(dev) ? | 1011 | int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; |
| 1012 | XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; | ||
| 1013 | 1012 | ||
| 1014 | if (mtu > max) | 1013 | if (mtu > max) |
| 1015 | return -EINVAL; | 1014 | return -EINVAL; |
| @@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
| 1279 | netdev->ethtool_ops = &xennet_ethtool_ops; | 1278 | netdev->ethtool_ops = &xennet_ethtool_ops; |
| 1280 | SET_NETDEV_DEV(netdev, &dev->dev); | 1279 | SET_NETDEV_DEV(netdev, &dev->dev); |
| 1281 | 1280 | ||
| 1282 | netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); | ||
| 1283 | |||
| 1284 | np->netdev = netdev; | 1281 | np->netdev = netdev; |
| 1285 | 1282 | ||
| 1286 | netif_carrier_off(netdev); | 1283 | netif_carrier_off(netdev); |
diff --git a/drivers/of/address.c b/drivers/of/address.c index ad2906919d45..78a7dcbec7d8 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
| @@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np) | |||
| 450 | return NULL; | 450 | return NULL; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | static int of_empty_ranges_quirk(void) | 453 | static int of_empty_ranges_quirk(struct device_node *np) |
| 454 | { | 454 | { |
| 455 | if (IS_ENABLED(CONFIG_PPC)) { | 455 | if (IS_ENABLED(CONFIG_PPC)) { |
| 456 | /* To save cycles, we cache the result */ | 456 | /* To save cycles, we cache the result for global "Mac" setting */ |
| 457 | static int quirk_state = -1; | 457 | static int quirk_state = -1; |
| 458 | 458 | ||
| 459 | /* PA-SEMI sdc DT bug */ | ||
| 460 | if (of_device_is_compatible(np, "1682m-sdc")) | ||
| 461 | return true; | ||
| 462 | |||
| 463 | /* Make quirk cached */ | ||
| 459 | if (quirk_state < 0) | 464 | if (quirk_state < 0) |
| 460 | quirk_state = | 465 | quirk_state = |
| 461 | of_machine_is_compatible("Power Macintosh") || | 466 | of_machine_is_compatible("Power Macintosh") || |
| @@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, | |||
| 490 | * This code is only enabled on powerpc. --gcl | 495 | * This code is only enabled on powerpc. --gcl |
| 491 | */ | 496 | */ |
| 492 | ranges = of_get_property(parent, rprop, &rlen); | 497 | ranges = of_get_property(parent, rprop, &rlen); |
| 493 | if (ranges == NULL && !of_empty_ranges_quirk()) { | 498 | if (ranges == NULL && !of_empty_ranges_quirk(parent)) { |
| 494 | pr_debug("OF: no ranges; cannot translate\n"); | 499 | pr_debug("OF: no ranges; cannot translate\n"); |
| 495 | return 1; | 500 | return 1; |
| 496 | } | 501 | } |
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 9205f433573c..18198316b6cf 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
| @@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev) | |||
| 1572 | if (!pmic) | 1572 | if (!pmic) |
| 1573 | return -ENOMEM; | 1573 | return -ENOMEM; |
| 1574 | 1574 | ||
| 1575 | if (of_device_is_compatible(node, "ti,tps659038-pmic")) | ||
| 1576 | palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr = | ||
| 1577 | TPS659038_REGEN2_CTRL; | ||
| 1578 | |||
| 1575 | pmic->dev = &pdev->dev; | 1579 | pmic->dev = &pdev->dev; |
| 1576 | pmic->palmas = palmas; | 1580 | pmic->palmas = palmas; |
| 1577 | palmas->pmic = pmic; | 1581 | palmas->pmic = pmic; |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index e2436d140175..3a6fd3a8a2ec 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
| @@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev) | |||
| 413 | mrst->dev = NULL; | 413 | mrst->dev = NULL; |
| 414 | } | 414 | } |
| 415 | 415 | ||
| 416 | #ifdef CONFIG_PM | 416 | #ifdef CONFIG_PM_SLEEP |
| 417 | static int mrst_suspend(struct device *dev, pm_message_t mesg) | 417 | static int mrst_suspend(struct device *dev) |
| 418 | { | 418 | { |
| 419 | struct mrst_rtc *mrst = dev_get_drvdata(dev); | 419 | struct mrst_rtc *mrst = dev_get_drvdata(dev); |
| 420 | unsigned char tmp; | 420 | unsigned char tmp; |
| @@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg) | |||
| 453 | */ | 453 | */ |
| 454 | static inline int mrst_poweroff(struct device *dev) | 454 | static inline int mrst_poweroff(struct device *dev) |
| 455 | { | 455 | { |
| 456 | return mrst_suspend(dev, PMSG_HIBERNATE); | 456 | return mrst_suspend(dev); |
| 457 | } | 457 | } |
| 458 | 458 | ||
| 459 | static int mrst_resume(struct device *dev) | 459 | static int mrst_resume(struct device *dev) |
| @@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev) | |||
| 490 | return 0; | 490 | return 0; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume); | ||
| 494 | #define MRST_PM_OPS (&mrst_pm_ops) | ||
| 495 | |||
| 493 | #else | 496 | #else |
| 494 | #define mrst_suspend NULL | 497 | #define MRST_PM_OPS NULL |
| 495 | #define mrst_resume NULL | ||
| 496 | 498 | ||
| 497 | static inline int mrst_poweroff(struct device *dev) | 499 | static inline int mrst_poweroff(struct device *dev) |
| 498 | { | 500 | { |
| @@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = { | |||
| 529 | .remove = vrtc_mrst_platform_remove, | 531 | .remove = vrtc_mrst_platform_remove, |
| 530 | .shutdown = vrtc_mrst_platform_shutdown, | 532 | .shutdown = vrtc_mrst_platform_shutdown, |
| 531 | .driver = { | 533 | .driver = { |
| 532 | .name = (char *) driver_name, | 534 | .name = driver_name, |
| 533 | .suspend = mrst_suspend, | 535 | .pm = MRST_PM_OPS, |
| 534 | .resume = mrst_resume, | ||
| 535 | } | 536 | } |
| 536 | }; | 537 | }; |
| 537 | 538 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9219953ee949..d9afc51af7d3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = { | |||
| 6815 | }; | 6815 | }; |
| 6816 | 6816 | ||
| 6817 | static struct ata_port_info sata_port_info = { | 6817 | static struct ata_port_info sata_port_info = { |
| 6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | |
| 6819 | ATA_FLAG_SAS_HOST, | ||
| 6819 | .pio_mask = ATA_PIO4_ONLY, | 6820 | .pio_mask = ATA_PIO4_ONLY, |
| 6820 | .mwdma_mask = ATA_MWDMA2, | 6821 | .mwdma_mask = ATA_MWDMA2, |
| 6821 | .udma_mask = ATA_UDMA6, | 6822 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 932d9cc98d2f..9c706d8c1441 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
| @@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = { | |||
| 547 | }; | 547 | }; |
| 548 | 548 | ||
| 549 | static struct ata_port_info sata_port_info = { | 549 | static struct ata_port_info sata_port_info = { |
| 550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, | 550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | |
| 551 | ATA_FLAG_SAS_HOST, | ||
| 551 | .pio_mask = ATA_PIO4, | 552 | .pio_mask = ATA_PIO4, |
| 552 | .mwdma_mask = ATA_MWDMA2, | 553 | .mwdma_mask = ATA_MWDMA2, |
| 553 | .udma_mask = ATA_UDMA6, | 554 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 3ce39d10fafb..4f8c798e0633 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
| @@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg) | |||
| 108 | { | 108 | { |
| 109 | struct dw_spi *dws = arg; | 109 | struct dw_spi *dws = arg; |
| 110 | 110 | ||
| 111 | if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) | 111 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 112 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) | ||
| 112 | return; | 113 | return; |
| 113 | dw_spi_xfer_done(dws); | 114 | dw_spi_xfer_done(dws); |
| 114 | } | 115 | } |
| @@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg) | |||
| 156 | { | 157 | { |
| 157 | struct dw_spi *dws = arg; | 158 | struct dw_spi *dws = arg; |
| 158 | 159 | ||
| 159 | if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) | 160 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 161 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) | ||
| 160 | return; | 162 | return; |
| 161 | dw_spi_xfer_done(dws); | 163 | dw_spi_xfer_done(dws); |
| 162 | } | 164 | } |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index ff9cdbdb6672..2b2c359f5a50 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
| @@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
| 498 | struct resource *res; | 498 | struct resource *res; |
| 499 | struct device *dev; | 499 | struct device *dev; |
| 500 | void __iomem *base; | 500 | void __iomem *base; |
| 501 | u32 max_freq, iomode; | 501 | u32 max_freq, iomode, num_cs; |
| 502 | int ret, irq, size; | 502 | int ret, irq, size; |
| 503 | 503 | ||
| 504 | dev = &pdev->dev; | 504 | dev = &pdev->dev; |
| @@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
| 550 | } | 550 | } |
| 551 | 551 | ||
| 552 | /* use num-cs unless not present or out of range */ | 552 | /* use num-cs unless not present or out of range */ |
| 553 | if (of_property_read_u16(dev->of_node, "num-cs", | 553 | if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || |
| 554 | &master->num_chipselect) || | 554 | num_cs > SPI_NUM_CHIPSELECTS) |
| 555 | (master->num_chipselect > SPI_NUM_CHIPSELECTS)) | ||
| 556 | master->num_chipselect = SPI_NUM_CHIPSELECTS; | 555 | master->num_chipselect = SPI_NUM_CHIPSELECTS; |
| 556 | else | ||
| 557 | master->num_chipselect = num_cs; | ||
| 557 | 558 | ||
| 558 | master->bus_num = pdev->id; | 559 | master->bus_num = pdev->id; |
| 559 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | 560 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c64a3e59fce3..57a195041dc7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master) | |||
| 1105 | "failed to unprepare message: %d\n", ret); | 1105 | "failed to unprepare message: %d\n", ret); |
| 1106 | } | 1106 | } |
| 1107 | } | 1107 | } |
| 1108 | |||
| 1109 | trace_spi_message_done(mesg); | ||
| 1110 | |||
| 1108 | master->cur_msg_prepared = false; | 1111 | master->cur_msg_prepared = false; |
| 1109 | 1112 | ||
| 1110 | mesg->state = NULL; | 1113 | mesg->state = NULL; |
| 1111 | if (mesg->complete) | 1114 | if (mesg->complete) |
| 1112 | mesg->complete(mesg->context); | 1115 | mesg->complete(mesg->context); |
| 1113 | |||
| 1114 | trace_spi_message_done(mesg); | ||
| 1115 | } | 1116 | } |
| 1116 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); | 1117 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); |
| 1117 | 1118 | ||
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index 24183028bd71..6d5b38d69578 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig | |||
| @@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS | |||
| 38 | config IIO_SIMPLE_DUMMY_BUFFER | 38 | config IIO_SIMPLE_DUMMY_BUFFER |
| 39 | bool "Buffered capture support" | 39 | bool "Buffered capture support" |
| 40 | select IIO_BUFFER | 40 | select IIO_BUFFER |
| 41 | select IIO_TRIGGER | ||
| 41 | select IIO_KFIFO_BUF | 42 | select IIO_KFIFO_BUF |
| 42 | help | 43 | help |
| 43 | Add buffered data capture to the simple dummy driver. | 44 | Add buffered data capture to the simple dummy driver. |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b1893f3f88f1..3ad1458bfeb0 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
| @@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport) | |||
| 921 | writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, | 921 | writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, |
| 922 | sport->port.membase + UARTPFIFO); | 922 | sport->port.membase + UARTPFIFO); |
| 923 | 923 | ||
| 924 | /* explicitly clear RDRF */ | ||
| 925 | readb(sport->port.membase + UARTSR1); | ||
| 926 | |||
| 924 | /* flush Tx and Rx FIFO */ | 927 | /* flush Tx and Rx FIFO */ |
| 925 | writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, | 928 | writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, |
| 926 | sport->port.membase + UARTCFIFO); | 929 | sport->port.membase + UARTCFIFO); |
| @@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port) | |||
| 1076 | sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & | 1079 | sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & |
| 1077 | UARTPFIFO_FIFOSIZE_MASK) + 1); | 1080 | UARTPFIFO_FIFOSIZE_MASK) + 1); |
| 1078 | 1081 | ||
| 1082 | sport->port.fifosize = sport->txfifo_size; | ||
| 1083 | |||
| 1079 | sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & | 1084 | sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & |
| 1080 | UARTPFIFO_FIFOSIZE_MASK) + 1); | 1085 | UARTPFIFO_FIFOSIZE_MASK) + 1); |
| 1081 | 1086 | ||
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index af821a908720..cf08876922f1 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
| @@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port) | |||
| 963 | free_irq(ourport->tx_irq, ourport); | 963 | free_irq(ourport->tx_irq, ourport); |
| 964 | tx_enabled(port) = 0; | 964 | tx_enabled(port) = 0; |
| 965 | ourport->tx_claimed = 0; | 965 | ourport->tx_claimed = 0; |
| 966 | ourport->tx_mode = 0; | ||
| 966 | } | 967 | } |
| 967 | 968 | ||
| 968 | if (ourport->rx_claimed) { | 969 | if (ourport->rx_claimed) { |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a7865c4b0498..0827d7c96527 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
| @@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, | |||
| 387 | status = PORT_PLC; | 387 | status = PORT_PLC; |
| 388 | port_change_bit = "link state"; | 388 | port_change_bit = "link state"; |
| 389 | break; | 389 | break; |
| 390 | case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: | ||
| 391 | status = PORT_CEC; | ||
| 392 | port_change_bit = "config error"; | ||
| 393 | break; | ||
| 390 | default: | 394 | default: |
| 391 | /* Should never happen */ | 395 | /* Should never happen */ |
| 392 | return; | 396 | return; |
| @@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
| 588 | status |= USB_PORT_STAT_C_LINK_STATE << 16; | 592 | status |= USB_PORT_STAT_C_LINK_STATE << 16; |
| 589 | if ((raw_port_status & PORT_WRC)) | 593 | if ((raw_port_status & PORT_WRC)) |
| 590 | status |= USB_PORT_STAT_C_BH_RESET << 16; | 594 | status |= USB_PORT_STAT_C_BH_RESET << 16; |
| 595 | if ((raw_port_status & PORT_CEC)) | ||
| 596 | status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; | ||
| 591 | } | 597 | } |
| 592 | 598 | ||
| 593 | if (hcd->speed != HCD_USB3) { | 599 | if (hcd->speed != HCD_USB3) { |
| @@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
| 1005 | case USB_PORT_FEAT_C_OVER_CURRENT: | 1011 | case USB_PORT_FEAT_C_OVER_CURRENT: |
| 1006 | case USB_PORT_FEAT_C_ENABLE: | 1012 | case USB_PORT_FEAT_C_ENABLE: |
| 1007 | case USB_PORT_FEAT_C_PORT_LINK_STATE: | 1013 | case USB_PORT_FEAT_C_PORT_LINK_STATE: |
| 1014 | case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: | ||
| 1008 | xhci_clear_port_change_bit(xhci, wValue, wIndex, | 1015 | xhci_clear_port_change_bit(xhci, wValue, wIndex, |
| 1009 | port_array[wIndex], temp); | 1016 | port_array[wIndex], temp); |
| 1010 | break; | 1017 | break; |
| @@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
| 1069 | */ | 1076 | */ |
| 1070 | status = bus_state->resuming_ports; | 1077 | status = bus_state->resuming_ports; |
| 1071 | 1078 | ||
| 1072 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; | 1079 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; |
| 1073 | 1080 | ||
| 1074 | spin_lock_irqsave(&xhci->lock, flags); | 1081 | spin_lock_irqsave(&xhci->lock, flags); |
| 1075 | /* For each port, did anything change? If so, set that bit in buf. */ | 1082 | /* For each port, did anything change? If so, set that bit in buf. */ |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fd53c9ebd662..2af32e26fafc 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 115 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 115 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
| 116 | xhci->quirks |= XHCI_LPM_SUPPORT; | 116 | xhci->quirks |= XHCI_LPM_SUPPORT; |
| 117 | xhci->quirks |= XHCI_INTEL_HOST; | 117 | xhci->quirks |= XHCI_INTEL_HOST; |
| 118 | xhci->quirks |= XHCI_AVOID_BEI; | ||
| 118 | } | 119 | } |
| 119 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 120 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
| 120 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { | 121 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { |
| @@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 130 | * PPT chipsets. | 131 | * PPT chipsets. |
| 131 | */ | 132 | */ |
| 132 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; | 133 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; |
| 133 | xhci->quirks |= XHCI_AVOID_BEI; | ||
| 134 | } | 134 | } |
| 135 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 135 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
| 136 | pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { | 136 | pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { |
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c index f32c292cc868..3fc4fe770253 100644 --- a/drivers/usb/isp1760/isp1760-udc.c +++ b/drivers/usb/isp1760/isp1760-udc.c | |||
| @@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
| 1203 | 1203 | ||
| 1204 | if (udc->driver) { | 1204 | if (udc->driver) { |
| 1205 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); | 1205 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); |
| 1206 | spin_unlock(&udc->lock); | 1206 | spin_unlock_irqrestore(&udc->lock, flags); |
| 1207 | return -EBUSY; | 1207 | return -EBUSY; |
| 1208 | } | 1208 | } |
| 1209 | 1209 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 3086dec0ef53..8eb68a31cab6 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 604 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 604 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 605 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), | 605 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), |
| 606 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 606 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 607 | { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, | ||
| 607 | /* | 608 | /* |
| 608 | * ELV devices: | 609 | * ELV devices: |
| 609 | */ | 610 | */ |
| @@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) | |||
| 1883 | { | 1884 | { |
| 1884 | struct usb_device *udev = serial->dev; | 1885 | struct usb_device *udev = serial->dev; |
| 1885 | 1886 | ||
| 1886 | if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || | 1887 | if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) |
| 1887 | (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) | 1888 | return ftdi_jtag_probe(serial); |
| 1889 | |||
| 1890 | if (udev->product && | ||
| 1891 | (!strcmp(udev->product, "BeagleBone/XDS100V2") || | ||
| 1892 | !strcmp(udev->product, "SNAP Connect E10"))) | ||
| 1888 | return ftdi_jtag_probe(serial); | 1893 | return ftdi_jtag_probe(serial); |
| 1889 | 1894 | ||
| 1890 | return 0; | 1895 | return 0; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 56b1b55c4751..4e4f46f3c89c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -561,6 +561,12 @@ | |||
| 561 | */ | 561 | */ |
| 562 | #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ | 562 | #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ |
| 563 | 563 | ||
| 564 | /* | ||
| 565 | * Synapse Wireless product ids (FTDI_VID) | ||
| 566 | * http://www.synapse-wireless.com | ||
| 567 | */ | ||
| 568 | #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */ | ||
| 569 | |||
| 564 | 570 | ||
| 565 | /********************************/ | 571 | /********************************/ |
| 566 | /** third-party VID/PID combos **/ | 572 | /** third-party VID/PID combos **/ |
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index dd97d8b572c3..4f7e072e4e00 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c | |||
| @@ -61,6 +61,7 @@ struct keyspan_pda_private { | |||
| 61 | /* For Xircom PGSDB9 and older Entrega version of the same device */ | 61 | /* For Xircom PGSDB9 and older Entrega version of the same device */ |
| 62 | #define XIRCOM_VENDOR_ID 0x085a | 62 | #define XIRCOM_VENDOR_ID 0x085a |
| 63 | #define XIRCOM_FAKE_ID 0x8027 | 63 | #define XIRCOM_FAKE_ID 0x8027 |
| 64 | #define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */ | ||
| 64 | #define ENTREGA_VENDOR_ID 0x1645 | 65 | #define ENTREGA_VENDOR_ID 0x1645 |
| 65 | #define ENTREGA_FAKE_ID 0x8093 | 66 | #define ENTREGA_FAKE_ID 0x8093 |
| 66 | 67 | ||
| @@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 70 | #endif | 71 | #endif |
| 71 | #ifdef XIRCOM | 72 | #ifdef XIRCOM |
| 72 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, | 73 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, |
| 74 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, | ||
| 73 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, | 75 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, |
| 74 | #endif | 76 | #endif |
| 75 | { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, | 77 | { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, |
| @@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = { | |||
| 93 | #ifdef XIRCOM | 95 | #ifdef XIRCOM |
| 94 | static const struct usb_device_id id_table_fake_xircom[] = { | 96 | static const struct usb_device_id id_table_fake_xircom[] = { |
| 95 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, | 97 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, |
| 98 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, | ||
| 96 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, | 99 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, |
| 97 | { } | 100 | { } |
| 98 | }; | 101 | }; |
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index c8def68d9e4c..0deaa4f971f5 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c | |||
| @@ -42,10 +42,10 @@ | |||
| 42 | #define PDC_WDT_MIN_TIMEOUT 1 | 42 | #define PDC_WDT_MIN_TIMEOUT 1 |
| 43 | #define PDC_WDT_DEF_TIMEOUT 64 | 43 | #define PDC_WDT_DEF_TIMEOUT 64 |
| 44 | 44 | ||
| 45 | static int heartbeat; | 45 | static int heartbeat = PDC_WDT_DEF_TIMEOUT; |
| 46 | module_param(heartbeat, int, 0); | 46 | module_param(heartbeat, int, 0); |
| 47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " | 47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds " |
| 48 | "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); | 48 | "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); |
| 49 | 49 | ||
| 50 | static bool nowayout = WATCHDOG_NOWAYOUT; | 50 | static bool nowayout = WATCHDOG_NOWAYOUT; |
| 51 | module_param(nowayout, bool, 0); | 51 | module_param(nowayout, bool, 0); |
| @@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
| 191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; | 191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; |
| 192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; | 192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; |
| 193 | pdc_wdt->wdt_dev.parent = &pdev->dev; | 193 | pdc_wdt->wdt_dev.parent = &pdev->dev; |
| 194 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
| 194 | 195 | ||
| 195 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); | 196 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); |
| 196 | if (ret < 0) { | 197 | if (ret < 0) { |
| @@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
| 232 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); | 233 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); |
| 233 | 234 | ||
| 234 | platform_set_drvdata(pdev, pdc_wdt); | 235 | platform_set_drvdata(pdev, pdc_wdt); |
| 235 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
| 236 | 236 | ||
| 237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); | 237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); |
| 238 | if (ret) | 238 | if (ret) |
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index a87f6df6e85f..938b987de551 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c | |||
| @@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev) | |||
| 133 | u32 reg; | 133 | u32 reg; |
| 134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); | 134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); |
| 135 | void __iomem *wdt_base = mtk_wdt->wdt_base; | 135 | void __iomem *wdt_base = mtk_wdt->wdt_base; |
| 136 | u32 ret; | 136 | int ret; |
| 137 | 137 | ||
| 138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); | 138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); |
| 139 | if (ret < 0) | 139 | if (ret < 0) |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b812462083fc..94d96809e686 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
| @@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG | |||
| 55 | 55 | ||
| 56 | In that case step 3 should be omitted. | 56 | In that case step 3 should be omitted. |
| 57 | 57 | ||
| 58 | config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | ||
| 59 | int "Hotplugged memory limit (in GiB) for a PV guest" | ||
| 60 | default 512 if X86_64 | ||
| 61 | default 4 if X86_32 | ||
| 62 | range 0 64 if X86_32 | ||
| 63 | depends on XEN_HAVE_PVMMU | ||
| 64 | depends on XEN_BALLOON_MEMORY_HOTPLUG | ||
| 65 | help | ||
| 66 | Maxmium amount of memory (in GiB) that a PV guest can be | ||
| 67 | expanded to when using memory hotplug. | ||
| 68 | |||
| 69 | A PV guest can have more memory than this limit if is | ||
| 70 | started with a larger maximum. | ||
| 71 | |||
| 72 | This value is used to allocate enough space in internal | ||
| 73 | tables needed for physical memory administration. | ||
| 74 | |||
| 58 | config XEN_SCRUB_PAGES | 75 | config XEN_SCRUB_PAGES |
| 59 | bool "Scrub pages before returning them to system" | 76 | bool "Scrub pages before returning them to system" |
| 60 | depends on XEN_BALLOON | 77 | depends on XEN_BALLOON |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 0b52d92cb2e5..fd933695f232 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit) | |||
| 229 | balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); | 229 | balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); |
| 230 | nid = memory_add_physaddr_to_nid(hotplug_start_paddr); | 230 | nid = memory_add_physaddr_to_nid(hotplug_start_paddr); |
| 231 | 231 | ||
| 232 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
| 233 | /* | ||
| 234 | * add_memory() will build page tables for the new memory so | ||
| 235 | * the p2m must contain invalid entries so the correct | ||
| 236 | * non-present PTEs will be written. | ||
| 237 | * | ||
| 238 | * If a failure occurs, the original (identity) p2m entries | ||
| 239 | * are not restored since this region is now known not to | ||
| 240 | * conflict with any devices. | ||
| 241 | */ | ||
| 242 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
| 243 | unsigned long pfn, i; | ||
| 244 | |||
| 245 | pfn = PFN_DOWN(hotplug_start_paddr); | ||
| 246 | for (i = 0; i < balloon_hotplug; i++) { | ||
| 247 | if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { | ||
| 248 | pr_warn("set_phys_to_machine() failed, no memory added\n"); | ||
| 249 | return BP_ECANCELED; | ||
| 250 | } | ||
| 251 | } | ||
| 252 | } | ||
| 253 | #endif | ||
| 254 | |||
| 232 | rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); | 255 | rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); |
| 233 | 256 | ||
| 234 | if (rc) { | 257 | if (rc) { |
diff --git a/fs/affs/file.c b/fs/affs/file.c index d2468bf95669..a91795e01a7f 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
| @@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
| 699 | boff = tmp % bsize; | 699 | boff = tmp % bsize; |
| 700 | if (boff) { | 700 | if (boff) { |
| 701 | bh = affs_bread_ino(inode, bidx, 0); | 701 | bh = affs_bread_ino(inode, bidx, 0); |
| 702 | if (IS_ERR(bh)) | 702 | if (IS_ERR(bh)) { |
| 703 | return PTR_ERR(bh); | 703 | written = PTR_ERR(bh); |
| 704 | goto err_first_bh; | ||
| 705 | } | ||
| 704 | tmp = min(bsize - boff, to - from); | 706 | tmp = min(bsize - boff, to - from); |
| 705 | BUG_ON(boff + tmp > bsize || tmp > bsize); | 707 | BUG_ON(boff + tmp > bsize || tmp > bsize); |
| 706 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); | 708 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); |
| @@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
| 712 | bidx++; | 714 | bidx++; |
| 713 | } else if (bidx) { | 715 | } else if (bidx) { |
| 714 | bh = affs_bread_ino(inode, bidx - 1, 0); | 716 | bh = affs_bread_ino(inode, bidx - 1, 0); |
| 715 | if (IS_ERR(bh)) | 717 | if (IS_ERR(bh)) { |
| 716 | return PTR_ERR(bh); | 718 | written = PTR_ERR(bh); |
| 719 | goto err_first_bh; | ||
| 720 | } | ||
| 717 | } | 721 | } |
| 718 | while (from + bsize <= to) { | 722 | while (from + bsize <= to) { |
| 719 | prev_bh = bh; | 723 | prev_bh = bh; |
| 720 | bh = affs_getemptyblk_ino(inode, bidx); | 724 | bh = affs_getemptyblk_ino(inode, bidx); |
| 721 | if (IS_ERR(bh)) | 725 | if (IS_ERR(bh)) |
| 722 | goto out; | 726 | goto err_bh; |
| 723 | memcpy(AFFS_DATA(bh), data + from, bsize); | 727 | memcpy(AFFS_DATA(bh), data + from, bsize); |
| 724 | if (buffer_new(bh)) { | 728 | if (buffer_new(bh)) { |
| 725 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | 729 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); |
| @@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
| 751 | prev_bh = bh; | 755 | prev_bh = bh; |
| 752 | bh = affs_bread_ino(inode, bidx, 1); | 756 | bh = affs_bread_ino(inode, bidx, 1); |
| 753 | if (IS_ERR(bh)) | 757 | if (IS_ERR(bh)) |
| 754 | goto out; | 758 | goto err_bh; |
| 755 | tmp = min(bsize, to - from); | 759 | tmp = min(bsize, to - from); |
| 756 | BUG_ON(tmp > bsize); | 760 | BUG_ON(tmp > bsize); |
| 757 | memcpy(AFFS_DATA(bh), data + from, tmp); | 761 | memcpy(AFFS_DATA(bh), data + from, tmp); |
| @@ -790,12 +794,13 @@ done: | |||
| 790 | if (tmp > inode->i_size) | 794 | if (tmp > inode->i_size) |
| 791 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; | 795 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; |
| 792 | 796 | ||
| 797 | err_first_bh: | ||
| 793 | unlock_page(page); | 798 | unlock_page(page); |
| 794 | page_cache_release(page); | 799 | page_cache_release(page); |
| 795 | 800 | ||
| 796 | return written; | 801 | return written; |
| 797 | 802 | ||
| 798 | out: | 803 | err_bh: |
| 799 | bh = prev_bh; | 804 | bh = prev_bh; |
| 800 | if (!written) | 805 | if (!written) |
| 801 | written = PTR_ERR(bh); | 806 | written = PTR_ERR(bh); |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 4ac7445e6ec7..aa0dc2573374 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * fs/cifs/cifsencrypt.c | 2 | * fs/cifs/cifsencrypt.c |
| 3 | * | 3 | * |
| 4 | * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP | ||
| 5 | * for more detailed information | ||
| 6 | * | ||
| 4 | * Copyright (C) International Business Machines Corp., 2005,2013 | 7 | * Copyright (C) International Business Machines Corp., 2005,2013 |
| 5 | * Author(s): Steve French (sfrench@us.ibm.com) | 8 | * Author(s): Steve French (sfrench@us.ibm.com) |
| 6 | * | 9 | * |
| @@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, | |||
| 515 | __func__); | 518 | __func__); |
| 516 | return rc; | 519 | return rc; |
| 517 | } | 520 | } |
| 518 | } else if (ses->serverName) { | 521 | } else { |
| 522 | /* We use ses->serverName if no domain name available */ | ||
| 519 | len = strlen(ses->serverName); | 523 | len = strlen(ses->serverName); |
| 520 | 524 | ||
| 521 | server = kmalloc(2 + (len * 2), GFP_KERNEL); | 525 | server = kmalloc(2 + (len * 2), GFP_KERNEL); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d3aa999ab785..480cf9c81d50 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 1599 | pr_warn("CIFS: username too long\n"); | 1599 | pr_warn("CIFS: username too long\n"); |
| 1600 | goto cifs_parse_mount_err; | 1600 | goto cifs_parse_mount_err; |
| 1601 | } | 1601 | } |
| 1602 | |||
| 1603 | kfree(vol->username); | ||
| 1602 | vol->username = kstrdup(string, GFP_KERNEL); | 1604 | vol->username = kstrdup(string, GFP_KERNEL); |
| 1603 | if (!vol->username) | 1605 | if (!vol->username) |
| 1604 | goto cifs_parse_mount_err; | 1606 | goto cifs_parse_mount_err; |
| @@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 1700 | goto cifs_parse_mount_err; | 1702 | goto cifs_parse_mount_err; |
| 1701 | } | 1703 | } |
| 1702 | 1704 | ||
| 1705 | kfree(vol->domainname); | ||
| 1703 | vol->domainname = kstrdup(string, GFP_KERNEL); | 1706 | vol->domainname = kstrdup(string, GFP_KERNEL); |
| 1704 | if (!vol->domainname) { | 1707 | if (!vol->domainname) { |
| 1705 | pr_warn("CIFS: no memory for domainname\n"); | 1708 | pr_warn("CIFS: no memory for domainname\n"); |
| @@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 1731 | } | 1734 | } |
| 1732 | 1735 | ||
| 1733 | if (strncasecmp(string, "default", 7) != 0) { | 1736 | if (strncasecmp(string, "default", 7) != 0) { |
| 1737 | kfree(vol->iocharset); | ||
| 1734 | vol->iocharset = kstrdup(string, | 1738 | vol->iocharset = kstrdup(string, |
| 1735 | GFP_KERNEL); | 1739 | GFP_KERNEL); |
| 1736 | if (!vol->iocharset) { | 1740 | if (!vol->iocharset) { |
| @@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) | |||
| 2913 | * calling name ends in null (byte 16) from old smb | 2917 | * calling name ends in null (byte 16) from old smb |
| 2914 | * convention. | 2918 | * convention. |
| 2915 | */ | 2919 | */ |
| 2916 | if (server->workstation_RFC1001_name && | 2920 | if (server->workstation_RFC1001_name[0] != 0) |
| 2917 | server->workstation_RFC1001_name[0] != 0) | ||
| 2918 | rfc1002mangle(ses_init_buf->trailer. | 2921 | rfc1002mangle(ses_init_buf->trailer. |
| 2919 | session_req.calling_name, | 2922 | session_req.calling_name, |
| 2920 | server->workstation_RFC1001_name, | 2923 | server->workstation_RFC1001_name, |
| @@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | |||
| 3692 | #endif /* CIFS_WEAK_PW_HASH */ | 3695 | #endif /* CIFS_WEAK_PW_HASH */ |
| 3693 | rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, | 3696 | rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, |
| 3694 | bcc_ptr, nls_codepage); | 3697 | bcc_ptr, nls_codepage); |
| 3698 | if (rc) { | ||
| 3699 | cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n", | ||
| 3700 | __func__, rc); | ||
| 3701 | cifs_buf_release(smb_buffer); | ||
| 3702 | return rc; | ||
| 3703 | } | ||
| 3695 | 3704 | ||
| 3696 | bcc_ptr += CIFS_AUTH_RESP_SIZE; | 3705 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
| 3697 | if (ses->capabilities & CAP_UNICODE) { | 3706 | if (ses->capabilities & CAP_UNICODE) { |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a94b3e673182..ca30c391a894 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -1823,6 +1823,7 @@ refind_writable: | |||
| 1823 | cifsFileInfo_put(inv_file); | 1823 | cifsFileInfo_put(inv_file); |
| 1824 | spin_lock(&cifs_file_list_lock); | 1824 | spin_lock(&cifs_file_list_lock); |
| 1825 | ++refind; | 1825 | ++refind; |
| 1826 | inv_file = NULL; | ||
| 1826 | goto refind_writable; | 1827 | goto refind_writable; |
| 1827 | } | 1828 | } |
| 1828 | } | 1829 | } |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 2d4f37235ed0..3e126d7bb2ea 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
| @@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, | |||
| 771 | cifs_buf_release(srchinf->ntwrk_buf_start); | 771 | cifs_buf_release(srchinf->ntwrk_buf_start); |
| 772 | } | 772 | } |
| 773 | kfree(srchinf); | 773 | kfree(srchinf); |
| 774 | if (rc) | ||
| 775 | goto cgii_exit; | ||
| 774 | } else | 776 | } else |
| 775 | goto cgii_exit; | 777 | goto cgii_exit; |
| 776 | 778 | ||
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 689f035915cf..22dfdf17d065 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
| @@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) | |||
| 322 | 322 | ||
| 323 | /* return pointer to beginning of data area, ie offset from SMB start */ | 323 | /* return pointer to beginning of data area, ie offset from SMB start */ |
| 324 | if ((*off != 0) && (*len != 0)) | 324 | if ((*off != 0) && (*len != 0)) |
| 325 | return hdr->ProtocolId + *off; | 325 | return (char *)(&hdr->ProtocolId[0]) + *off; |
| 326 | else | 326 | else |
| 327 | return NULL; | 327 | return NULL; |
| 328 | } | 328 | } |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 96b5d40a2ece..eab05e1aa587 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
| @@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid, | |||
| 684 | 684 | ||
| 685 | /* No need to change MaxChunks since already set to 1 */ | 685 | /* No need to change MaxChunks since already set to 1 */ |
| 686 | chunk_sizes_updated = true; | 686 | chunk_sizes_updated = true; |
| 687 | } | 687 | } else |
| 688 | goto cchunk_out; | ||
| 688 | } | 689 | } |
| 689 | 690 | ||
| 690 | cchunk_out: | 691 | cchunk_out: |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 3417340bf89e..65cd7a84c8bc 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
| @@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1218 | struct smb2_ioctl_req *req; | 1218 | struct smb2_ioctl_req *req; |
| 1219 | struct smb2_ioctl_rsp *rsp; | 1219 | struct smb2_ioctl_rsp *rsp; |
| 1220 | struct TCP_Server_Info *server; | 1220 | struct TCP_Server_Info *server; |
| 1221 | struct cifs_ses *ses = tcon->ses; | 1221 | struct cifs_ses *ses; |
| 1222 | struct kvec iov[2]; | 1222 | struct kvec iov[2]; |
| 1223 | int resp_buftype; | 1223 | int resp_buftype; |
| 1224 | int num_iovecs; | 1224 | int num_iovecs; |
| @@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1233 | if (plen) | 1233 | if (plen) |
| 1234 | *plen = 0; | 1234 | *plen = 0; |
| 1235 | 1235 | ||
| 1236 | if (tcon) | ||
| 1237 | ses = tcon->ses; | ||
| 1238 | else | ||
| 1239 | return -EIO; | ||
| 1240 | |||
| 1236 | if (ses && (ses->server)) | 1241 | if (ses && (ses->server)) |
| 1237 | server = ses->server; | 1242 | server = ses->server; |
| 1238 | else | 1243 | else |
| @@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1296 | rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; | 1301 | rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; |
| 1297 | 1302 | ||
| 1298 | if ((rc != 0) && (rc != -EINVAL)) { | 1303 | if ((rc != 0) && (rc != -EINVAL)) { |
| 1299 | if (tcon) | 1304 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); |
| 1300 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); | ||
| 1301 | goto ioctl_exit; | 1305 | goto ioctl_exit; |
| 1302 | } else if (rc == -EINVAL) { | 1306 | } else if (rc == -EINVAL) { |
| 1303 | if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && | 1307 | if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && |
| 1304 | (opcode != FSCTL_SRV_COPYCHUNK)) { | 1308 | (opcode != FSCTL_SRV_COPYCHUNK)) { |
| 1305 | if (tcon) | 1309 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); |
| 1306 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); | ||
| 1307 | goto ioctl_exit; | 1310 | goto ioctl_exit; |
| 1308 | } | 1311 | } |
| 1309 | } | 1312 | } |
| @@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1629 | 1632 | ||
| 1630 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); | 1633 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); |
| 1631 | 1634 | ||
| 1632 | if ((rc != 0) && tcon) | 1635 | if (rc != 0) |
| 1633 | cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); | 1636 | cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); |
| 1634 | 1637 | ||
| 1635 | free_rsp_buf(resp_buftype, iov[0].iov_base); | 1638 | free_rsp_buf(resp_buftype, iov[0].iov_base); |
| @@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2114 | struct kvec iov[2]; | 2117 | struct kvec iov[2]; |
| 2115 | int rc = 0; | 2118 | int rc = 0; |
| 2116 | int len; | 2119 | int len; |
| 2117 | int resp_buftype; | 2120 | int resp_buftype = CIFS_NO_BUFFER; |
| 2118 | unsigned char *bufptr; | 2121 | unsigned char *bufptr; |
| 2119 | struct TCP_Server_Info *server; | 2122 | struct TCP_Server_Info *server; |
| 2120 | struct cifs_ses *ses = tcon->ses; | 2123 | struct cifs_ses *ses = tcon->ses; |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e907052eeadb..32a8bbd7a9ad 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -53,6 +53,18 @@ struct wb_writeback_work { | |||
| 53 | struct completion *done; /* set if the caller waits */ | 53 | struct completion *done; /* set if the caller waits */ |
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| 56 | /* | ||
| 57 | * If an inode is constantly having its pages dirtied, but then the | ||
| 58 | * updates stop dirtytime_expire_interval seconds in the past, it's | ||
| 59 | * possible for the worst case time between when an inode has its | ||
| 60 | * timestamps updated and when they finally get written out to be two | ||
| 61 | * dirtytime_expire_intervals. We set the default to 12 hours (in | ||
| 62 | * seconds), which means most of the time inodes will have their | ||
| 63 | * timestamps written to disk after 12 hours, but in the worst case a | ||
| 64 | * few inodes might not their timestamps updated for 24 hours. | ||
| 65 | */ | ||
| 66 | unsigned int dirtytime_expire_interval = 12 * 60 * 60; | ||
| 67 | |||
| 56 | /** | 68 | /** |
| 57 | * writeback_in_progress - determine whether there is writeback in progress | 69 | * writeback_in_progress - determine whether there is writeback in progress |
| 58 | * @bdi: the device's backing_dev_info structure. | 70 | * @bdi: the device's backing_dev_info structure. |
| @@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, | |||
| 275 | 287 | ||
| 276 | if ((flags & EXPIRE_DIRTY_ATIME) == 0) | 288 | if ((flags & EXPIRE_DIRTY_ATIME) == 0) |
| 277 | older_than_this = work->older_than_this; | 289 | older_than_this = work->older_than_this; |
| 278 | else if ((work->reason == WB_REASON_SYNC) == 0) { | 290 | else if (!work->for_sync) { |
| 279 | expire_time = jiffies - (HZ * 86400); | 291 | expire_time = jiffies - (dirtytime_expire_interval * HZ); |
| 280 | older_than_this = &expire_time; | 292 | older_than_this = &expire_time; |
| 281 | } | 293 | } |
| 282 | while (!list_empty(delaying_queue)) { | 294 | while (!list_empty(delaying_queue)) { |
| @@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, | |||
| 458 | */ | 470 | */ |
| 459 | redirty_tail(inode, wb); | 471 | redirty_tail(inode, wb); |
| 460 | } else if (inode->i_state & I_DIRTY_TIME) { | 472 | } else if (inode->i_state & I_DIRTY_TIME) { |
| 473 | inode->dirtied_when = jiffies; | ||
| 461 | list_move(&inode->i_wb_list, &wb->b_dirty_time); | 474 | list_move(&inode->i_wb_list, &wb->b_dirty_time); |
| 462 | } else { | 475 | } else { |
| 463 | /* The inode is clean. Remove from writeback lists. */ | 476 | /* The inode is clean. Remove from writeback lists. */ |
| @@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 505 | spin_lock(&inode->i_lock); | 518 | spin_lock(&inode->i_lock); |
| 506 | 519 | ||
| 507 | dirty = inode->i_state & I_DIRTY; | 520 | dirty = inode->i_state & I_DIRTY; |
| 508 | if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) && | 521 | if (inode->i_state & I_DIRTY_TIME) { |
| 509 | (inode->i_state & I_DIRTY_TIME)) || | 522 | if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || |
| 510 | (inode->i_state & I_DIRTY_TIME_EXPIRED)) { | 523 | unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || |
| 511 | dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; | 524 | unlikely(time_after(jiffies, |
| 512 | trace_writeback_lazytime(inode); | 525 | (inode->dirtied_time_when + |
| 513 | } | 526 | dirtytime_expire_interval * HZ)))) { |
| 527 | dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; | ||
| 528 | trace_writeback_lazytime(inode); | ||
| 529 | } | ||
| 530 | } else | ||
| 531 | inode->i_state &= ~I_DIRTY_TIME_EXPIRED; | ||
| 514 | inode->i_state &= ~dirty; | 532 | inode->i_state &= ~dirty; |
| 515 | 533 | ||
| 516 | /* | 534 | /* |
| @@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) | |||
| 1131 | rcu_read_unlock(); | 1149 | rcu_read_unlock(); |
| 1132 | } | 1150 | } |
| 1133 | 1151 | ||
| 1152 | /* | ||
| 1153 | * Wake up bdi's periodically to make sure dirtytime inodes gets | ||
| 1154 | * written back periodically. We deliberately do *not* check the | ||
| 1155 | * b_dirtytime list in wb_has_dirty_io(), since this would cause the | ||
| 1156 | * kernel to be constantly waking up once there are any dirtytime | ||
| 1157 | * inodes on the system. So instead we define a separate delayed work | ||
| 1158 | * function which gets called much more rarely. (By default, only | ||
| 1159 | * once every 12 hours.) | ||
| 1160 | * | ||
| 1161 | * If there is any other write activity going on in the file system, | ||
| 1162 | * this function won't be necessary. But if the only thing that has | ||
| 1163 | * happened on the file system is a dirtytime inode caused by an atime | ||
| 1164 | * update, we need this infrastructure below to make sure that inode | ||
| 1165 | * eventually gets pushed out to disk. | ||
| 1166 | */ | ||
| 1167 | static void wakeup_dirtytime_writeback(struct work_struct *w); | ||
| 1168 | static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); | ||
| 1169 | |||
| 1170 | static void wakeup_dirtytime_writeback(struct work_struct *w) | ||
| 1171 | { | ||
| 1172 | struct backing_dev_info *bdi; | ||
| 1173 | |||
| 1174 | rcu_read_lock(); | ||
| 1175 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | ||
| 1176 | if (list_empty(&bdi->wb.b_dirty_time)) | ||
| 1177 | continue; | ||
| 1178 | bdi_wakeup_thread(bdi); | ||
| 1179 | } | ||
| 1180 | rcu_read_unlock(); | ||
| 1181 | schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | static int __init start_dirtytime_writeback(void) | ||
| 1185 | { | ||
| 1186 | schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); | ||
| 1187 | return 0; | ||
| 1188 | } | ||
| 1189 | __initcall(start_dirtytime_writeback); | ||
| 1190 | |||
| 1191 | int dirtytime_interval_handler(struct ctl_table *table, int write, | ||
| 1192 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
| 1193 | { | ||
| 1194 | int ret; | ||
| 1195 | |||
| 1196 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
| 1197 | if (ret == 0 && write) | ||
| 1198 | mod_delayed_work(system_wq, &dirtytime_work, 0); | ||
| 1199 | return ret; | ||
| 1200 | } | ||
| 1201 | |||
| 1134 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) | 1202 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) |
| 1135 | { | 1203 | { |
| 1136 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { | 1204 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { |
| @@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
| 1269 | } | 1337 | } |
| 1270 | 1338 | ||
| 1271 | inode->dirtied_when = jiffies; | 1339 | inode->dirtied_when = jiffies; |
| 1272 | list_move(&inode->i_wb_list, dirtytime ? | 1340 | if (dirtytime) |
| 1273 | &bdi->wb.b_dirty_time : &bdi->wb.b_dirty); | 1341 | inode->dirtied_time_when = jiffies; |
| 1342 | if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) | ||
| 1343 | list_move(&inode->i_wb_list, &bdi->wb.b_dirty); | ||
| 1344 | else | ||
| 1345 | list_move(&inode->i_wb_list, | ||
| 1346 | &bdi->wb.b_dirty_time); | ||
| 1274 | spin_unlock(&bdi->wb.list_lock); | 1347 | spin_unlock(&bdi->wb.list_lock); |
| 1275 | trace_writeback_dirty_inode_enqueue(inode); | 1348 | trace_writeback_dirty_inode_enqueue(inode); |
| 1276 | 1349 | ||
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 6e560d56094b..754fdf8c6356 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c | |||
| @@ -131,13 +131,16 @@ skip: | |||
| 131 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); | 131 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); |
| 132 | hfs_bnode_dump(node); | 132 | hfs_bnode_dump(node); |
| 133 | 133 | ||
| 134 | if (new_node) { | 134 | /* |
| 135 | /* update parent key if we inserted a key | 135 | * update parent key if we inserted a key |
| 136 | * at the start of the first node | 136 | * at the start of the node and it is not the new node |
| 137 | */ | 137 | */ |
| 138 | if (!rec && new_node != node) | 138 | if (!rec && new_node != node) { |
| 139 | hfs_brec_update_parent(fd); | 139 | hfs_bnode_read_key(node, fd->search_key, data_off + size); |
| 140 | hfs_brec_update_parent(fd); | ||
| 141 | } | ||
| 140 | 142 | ||
| 143 | if (new_node) { | ||
| 141 | hfs_bnode_put(fd->bnode); | 144 | hfs_bnode_put(fd->bnode); |
| 142 | if (!new_node->parent) { | 145 | if (!new_node->parent) { |
| 143 | hfs_btree_inc_height(tree); | 146 | hfs_btree_inc_height(tree); |
| @@ -168,9 +171,6 @@ skip: | |||
| 168 | goto again; | 171 | goto again; |
| 169 | } | 172 | } |
| 170 | 173 | ||
| 171 | if (!rec) | ||
| 172 | hfs_brec_update_parent(fd); | ||
| 173 | |||
| 174 | return 0; | 174 | return 0; |
| 175 | } | 175 | } |
| 176 | 176 | ||
| @@ -370,6 +370,8 @@ again: | |||
| 370 | if (IS_ERR(parent)) | 370 | if (IS_ERR(parent)) |
| 371 | return PTR_ERR(parent); | 371 | return PTR_ERR(parent); |
| 372 | __hfs_brec_find(parent, fd, hfs_find_rec_by_key); | 372 | __hfs_brec_find(parent, fd, hfs_find_rec_by_key); |
| 373 | if (fd->record < 0) | ||
| 374 | return -ENOENT; | ||
| 373 | hfs_bnode_dump(parent); | 375 | hfs_bnode_dump(parent); |
| 374 | rec = fd->record; | 376 | rec = fd->record; |
| 375 | 377 | ||
diff --git a/fs/locks.c b/fs/locks.c index 528fedfda15e..40bc384728c0 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
| @@ -1388,9 +1388,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker) | |||
| 1388 | int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | 1388 | int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) |
| 1389 | { | 1389 | { |
| 1390 | int error = 0; | 1390 | int error = 0; |
| 1391 | struct file_lock *new_fl; | ||
| 1392 | struct file_lock_context *ctx = inode->i_flctx; | 1391 | struct file_lock_context *ctx = inode->i_flctx; |
| 1393 | struct file_lock *fl; | 1392 | struct file_lock *new_fl, *fl, *tmp; |
| 1394 | unsigned long break_time; | 1393 | unsigned long break_time; |
| 1395 | int want_write = (mode & O_ACCMODE) != O_RDONLY; | 1394 | int want_write = (mode & O_ACCMODE) != O_RDONLY; |
| 1396 | LIST_HEAD(dispose); | 1395 | LIST_HEAD(dispose); |
| @@ -1420,7 +1419,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
| 1420 | break_time++; /* so that 0 means no break time */ | 1419 | break_time++; /* so that 0 means no break time */ |
| 1421 | } | 1420 | } |
| 1422 | 1421 | ||
| 1423 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { | 1422 | list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { |
| 1424 | if (!leases_conflict(fl, new_fl)) | 1423 | if (!leases_conflict(fl, new_fl)) |
| 1425 | continue; | 1424 | continue; |
| 1426 | if (want_write) { | 1425 | if (want_write) { |
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index cdbc78c72542..03d647bf195d 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
| @@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, | |||
| 137 | seg->offset = iomap.offset; | 137 | seg->offset = iomap.offset; |
| 138 | seg->length = iomap.length; | 138 | seg->length = iomap.length; |
| 139 | 139 | ||
| 140 | dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es); | 140 | dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es); |
| 141 | return 0; | 141 | return 0; |
| 142 | 142 | ||
| 143 | out_error: | 143 | out_error: |
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index 9da89fddab33..9aa2796da90d 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c | |||
| @@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, | |||
| 122 | 122 | ||
| 123 | p = xdr_decode_hyper(p, &bex.foff); | 123 | p = xdr_decode_hyper(p, &bex.foff); |
| 124 | if (bex.foff & (block_size - 1)) { | 124 | if (bex.foff & (block_size - 1)) { |
| 125 | dprintk("%s: unaligned offset %lld\n", | 125 | dprintk("%s: unaligned offset 0x%llx\n", |
| 126 | __func__, bex.foff); | 126 | __func__, bex.foff); |
| 127 | goto fail; | 127 | goto fail; |
| 128 | } | 128 | } |
| 129 | p = xdr_decode_hyper(p, &bex.len); | 129 | p = xdr_decode_hyper(p, &bex.len); |
| 130 | if (bex.len & (block_size - 1)) { | 130 | if (bex.len & (block_size - 1)) { |
| 131 | dprintk("%s: unaligned length %lld\n", | 131 | dprintk("%s: unaligned length 0x%llx\n", |
| 132 | __func__, bex.foff); | 132 | __func__, bex.foff); |
| 133 | goto fail; | 133 | goto fail; |
| 134 | } | 134 | } |
| 135 | p = xdr_decode_hyper(p, &bex.soff); | 135 | p = xdr_decode_hyper(p, &bex.soff); |
| 136 | if (bex.soff & (block_size - 1)) { | 136 | if (bex.soff & (block_size - 1)) { |
| 137 | dprintk("%s: unaligned disk offset %lld\n", | 137 | dprintk("%s: unaligned disk offset 0x%llx\n", |
| 138 | __func__, bex.soff); | 138 | __func__, bex.soff); |
| 139 | goto fail; | 139 | goto fail; |
| 140 | } | 140 | } |
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 1028a0629543..6904213a4363 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c | |||
| @@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp) | |||
| 118 | { | 118 | { |
| 119 | struct super_block *sb = exp->ex_path.mnt->mnt_sb; | 119 | struct super_block *sb = exp->ex_path.mnt->mnt_sb; |
| 120 | 120 | ||
| 121 | if (exp->ex_flags & NFSEXP_NOPNFS) | 121 | if (!(exp->ex_flags & NFSEXP_PNFS)) |
| 122 | return; | 122 | return; |
| 123 | 123 | ||
| 124 | if (sb->s_export_op->get_uuid && | 124 | if (sb->s_export_op->get_uuid && |
| @@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg, | |||
| 440 | list_move_tail(&lp->lo_perstate, reaplist); | 440 | list_move_tail(&lp->lo_perstate, reaplist); |
| 441 | return; | 441 | return; |
| 442 | } | 442 | } |
| 443 | end = seg->offset; | 443 | lo->offset = layout_end(seg); |
| 444 | } else { | 444 | } else { |
| 445 | /* retain the whole layout segment on a split. */ | 445 | /* retain the whole layout segment on a split. */ |
| 446 | if (layout_end(seg) < end) { | 446 | if (layout_end(seg) < end) { |
| 447 | dprintk("%s: split not supported\n", __func__); | 447 | dprintk("%s: split not supported\n", __func__); |
| 448 | return; | 448 | return; |
| 449 | } | 449 | } |
| 450 | 450 | end = seg->offset; | |
| 451 | lo->offset = layout_end(seg); | ||
| 452 | } | 451 | } |
| 453 | 452 | ||
| 454 | layout_update_len(lo, end); | 453 | layout_update_len(lo, end); |
| @@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp, | |||
| 513 | 512 | ||
| 514 | spin_lock(&clp->cl_lock); | 513 | spin_lock(&clp->cl_lock); |
| 515 | list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { | 514 | list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { |
| 515 | if (ls->ls_layout_type != lrp->lr_layout_type) | ||
| 516 | continue; | ||
| 517 | |||
| 516 | if (lrp->lr_return_type == RETURN_FSID && | 518 | if (lrp->lr_return_type == RETURN_FSID && |
| 517 | !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, | 519 | !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, |
| 518 | &cstate->current_fh.fh_handle)) | 520 | &cstate->current_fh.fh_handle)) |
| @@ -587,6 +589,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) | |||
| 587 | 589 | ||
| 588 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); | 590 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); |
| 589 | 591 | ||
| 592 | trace_layout_recall_fail(&ls->ls_stid.sc_stateid); | ||
| 593 | |||
| 590 | printk(KERN_WARNING | 594 | printk(KERN_WARNING |
| 591 | "nfsd: client %s failed to respond to layout recall. " | 595 | "nfsd: client %s failed to respond to layout recall. " |
| 592 | " Fencing..\n", addr_str); | 596 | " Fencing..\n", addr_str); |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index d30bea8d0277..92b9d97aff4f 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
| @@ -1237,8 +1237,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp, | |||
| 1237 | nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); | 1237 | nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); |
| 1238 | 1238 | ||
| 1239 | gdp->gd_notify_types &= ops->notify_types; | 1239 | gdp->gd_notify_types &= ops->notify_types; |
| 1240 | exp_put(exp); | ||
| 1241 | out: | 1240 | out: |
| 1241 | exp_put(exp); | ||
| 1242 | return nfserr; | 1242 | return nfserr; |
| 1243 | } | 1243 | } |
| 1244 | 1244 | ||
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d2f2c37dc2db..8ba1d888f1e6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, | |||
| 3221 | } else | 3221 | } else |
| 3222 | nfs4_free_openowner(&oo->oo_owner); | 3222 | nfs4_free_openowner(&oo->oo_owner); |
| 3223 | spin_unlock(&clp->cl_lock); | 3223 | spin_unlock(&clp->cl_lock); |
| 3224 | return oo; | 3224 | return ret; |
| 3225 | } | 3225 | } |
| 3226 | 3226 | ||
| 3227 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { | 3227 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { |
| @@ -5062,7 +5062,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, | |||
| 5062 | } else | 5062 | } else |
| 5063 | nfs4_free_lockowner(&lo->lo_owner); | 5063 | nfs4_free_lockowner(&lo->lo_owner); |
| 5064 | spin_unlock(&clp->cl_lock); | 5064 | spin_unlock(&clp->cl_lock); |
| 5065 | return lo; | 5065 | return ret; |
| 5066 | } | 5066 | } |
| 5067 | 5067 | ||
| 5068 | static void | 5068 | static void |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index df5e66caf100..5fb7e78169a6 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
| @@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp, | |||
| 1562 | p = xdr_decode_hyper(p, &lgp->lg_seg.offset); | 1562 | p = xdr_decode_hyper(p, &lgp->lg_seg.offset); |
| 1563 | p = xdr_decode_hyper(p, &lgp->lg_seg.length); | 1563 | p = xdr_decode_hyper(p, &lgp->lg_seg.length); |
| 1564 | p = xdr_decode_hyper(p, &lgp->lg_minlength); | 1564 | p = xdr_decode_hyper(p, &lgp->lg_minlength); |
| 1565 | nfsd4_decode_stateid(argp, &lgp->lg_sid); | 1565 | |
| 1566 | status = nfsd4_decode_stateid(argp, &lgp->lg_sid); | ||
| 1567 | if (status) | ||
| 1568 | return status; | ||
| 1569 | |||
| 1566 | READ_BUF(4); | 1570 | READ_BUF(4); |
| 1567 | lgp->lg_maxcount = be32_to_cpup(p++); | 1571 | lgp->lg_maxcount = be32_to_cpup(p++); |
| 1568 | 1572 | ||
| @@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp, | |||
| 1580 | p = xdr_decode_hyper(p, &lcp->lc_seg.offset); | 1584 | p = xdr_decode_hyper(p, &lcp->lc_seg.offset); |
| 1581 | p = xdr_decode_hyper(p, &lcp->lc_seg.length); | 1585 | p = xdr_decode_hyper(p, &lcp->lc_seg.length); |
| 1582 | lcp->lc_reclaim = be32_to_cpup(p++); | 1586 | lcp->lc_reclaim = be32_to_cpup(p++); |
| 1583 | nfsd4_decode_stateid(argp, &lcp->lc_sid); | 1587 | |
| 1588 | status = nfsd4_decode_stateid(argp, &lcp->lc_sid); | ||
| 1589 | if (status) | ||
| 1590 | return status; | ||
| 1591 | |||
| 1584 | READ_BUF(4); | 1592 | READ_BUF(4); |
| 1585 | lcp->lc_newoffset = be32_to_cpup(p++); | 1593 | lcp->lc_newoffset = be32_to_cpup(p++); |
| 1586 | if (lcp->lc_newoffset) { | 1594 | if (lcp->lc_newoffset) { |
| @@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp, | |||
| 1628 | READ_BUF(16); | 1636 | READ_BUF(16); |
| 1629 | p = xdr_decode_hyper(p, &lrp->lr_seg.offset); | 1637 | p = xdr_decode_hyper(p, &lrp->lr_seg.offset); |
| 1630 | p = xdr_decode_hyper(p, &lrp->lr_seg.length); | 1638 | p = xdr_decode_hyper(p, &lrp->lr_seg.length); |
| 1631 | nfsd4_decode_stateid(argp, &lrp->lr_sid); | 1639 | |
| 1640 | status = nfsd4_decode_stateid(argp, &lrp->lr_sid); | ||
| 1641 | if (status) | ||
| 1642 | return status; | ||
| 1643 | |||
| 1632 | READ_BUF(4); | 1644 | READ_BUF(4); |
| 1633 | lrp->lrf_body_len = be32_to_cpup(p++); | 1645 | lrp->lrf_body_len = be32_to_cpup(p++); |
| 1634 | if (lrp->lrf_body_len > 0) { | 1646 | if (lrp->lrf_body_len > 0) { |
| @@ -4123,7 +4135,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr, | |||
| 4123 | return nfserr_resource; | 4135 | return nfserr_resource; |
| 4124 | *p++ = cpu_to_be32(lrp->lrs_present); | 4136 | *p++ = cpu_to_be32(lrp->lrs_present); |
| 4125 | if (lrp->lrs_present) | 4137 | if (lrp->lrs_present) |
| 4126 | nfsd4_encode_stateid(xdr, &lrp->lr_sid); | 4138 | return nfsd4_encode_stateid(xdr, &lrp->lr_sid); |
| 4127 | return nfs_ok; | 4139 | return nfs_ok; |
| 4128 | } | 4140 | } |
| 4129 | #endif /* CONFIG_NFSD_PNFS */ | 4141 | #endif /* CONFIG_NFSD_PNFS */ |
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 83a9694ec485..46ec934f5dee 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
| @@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void) | |||
| 165 | { | 165 | { |
| 166 | unsigned int hashsize; | 166 | unsigned int hashsize; |
| 167 | unsigned int i; | 167 | unsigned int i; |
| 168 | int status = 0; | ||
| 168 | 169 | ||
| 169 | max_drc_entries = nfsd_cache_size_limit(); | 170 | max_drc_entries = nfsd_cache_size_limit(); |
| 170 | atomic_set(&num_drc_entries, 0); | 171 | atomic_set(&num_drc_entries, 0); |
| 171 | hashsize = nfsd_hashsize(max_drc_entries); | 172 | hashsize = nfsd_hashsize(max_drc_entries); |
| 172 | maskbits = ilog2(hashsize); | 173 | maskbits = ilog2(hashsize); |
| 173 | 174 | ||
| 174 | register_shrinker(&nfsd_reply_cache_shrinker); | 175 | status = register_shrinker(&nfsd_reply_cache_shrinker); |
| 176 | if (status) | ||
| 177 | return status; | ||
| 178 | |||
| 175 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), | 179 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
| 176 | 0, 0, NULL); | 180 | 0, 0, NULL); |
| 177 | if (!drc_slab) | 181 | if (!drc_slab) |
diff --git a/include/linux/fs.h b/include/linux/fs.h index b4d71b5e1ff2..f4131e8ead74 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -604,6 +604,7 @@ struct inode { | |||
| 604 | struct mutex i_mutex; | 604 | struct mutex i_mutex; |
| 605 | 605 | ||
| 606 | unsigned long dirtied_when; /* jiffies of first dirtying */ | 606 | unsigned long dirtied_when; /* jiffies of first dirtying */ |
| 607 | unsigned long dirtied_time_when; | ||
| 607 | 608 | ||
| 608 | struct hlist_node i_hash; | 609 | struct hlist_node i_hash; |
| 609 | struct list_head i_wb_list; /* backing dev IO list */ | 610 | struct list_head i_wb_list; /* backing dev IO list */ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 781974afff9f..ffbc034c8810 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -126,8 +126,23 @@ | |||
| 126 | #define GICR_PROPBASER_WaWb (5U << 7) | 126 | #define GICR_PROPBASER_WaWb (5U << 7) |
| 127 | #define GICR_PROPBASER_RaWaWt (6U << 7) | 127 | #define GICR_PROPBASER_RaWaWt (6U << 7) |
| 128 | #define GICR_PROPBASER_RaWaWb (7U << 7) | 128 | #define GICR_PROPBASER_RaWaWb (7U << 7) |
| 129 | #define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) | ||
| 129 | #define GICR_PROPBASER_IDBITS_MASK (0x1f) | 130 | #define GICR_PROPBASER_IDBITS_MASK (0x1f) |
| 130 | 131 | ||
| 132 | #define GICR_PENDBASER_NonShareable (0U << 10) | ||
| 133 | #define GICR_PENDBASER_InnerShareable (1U << 10) | ||
| 134 | #define GICR_PENDBASER_OuterShareable (2U << 10) | ||
| 135 | #define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) | ||
| 136 | #define GICR_PENDBASER_nCnB (0U << 7) | ||
| 137 | #define GICR_PENDBASER_nC (1U << 7) | ||
| 138 | #define GICR_PENDBASER_RaWt (2U << 7) | ||
| 139 | #define GICR_PENDBASER_RaWb (3U << 7) | ||
| 140 | #define GICR_PENDBASER_WaWt (4U << 7) | ||
| 141 | #define GICR_PENDBASER_WaWb (5U << 7) | ||
| 142 | #define GICR_PENDBASER_RaWaWt (6U << 7) | ||
| 143 | #define GICR_PENDBASER_RaWaWb (7U << 7) | ||
| 144 | #define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) | ||
| 145 | |||
| 131 | /* | 146 | /* |
| 132 | * Re-Distributor registers, offsets from SGI_base | 147 | * Re-Distributor registers, offsets from SGI_base |
| 133 | */ | 148 | */ |
| @@ -182,6 +197,7 @@ | |||
| 182 | #define GITS_CBASER_WaWb (5UL << 59) | 197 | #define GITS_CBASER_WaWb (5UL << 59) |
| 183 | #define GITS_CBASER_RaWaWt (6UL << 59) | 198 | #define GITS_CBASER_RaWaWt (6UL << 59) |
| 184 | #define GITS_CBASER_RaWaWb (7UL << 59) | 199 | #define GITS_CBASER_RaWaWb (7UL << 59) |
| 200 | #define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) | ||
| 185 | #define GITS_CBASER_NonShareable (0UL << 10) | 201 | #define GITS_CBASER_NonShareable (0UL << 10) |
| 186 | #define GITS_CBASER_InnerShareable (1UL << 10) | 202 | #define GITS_CBASER_InnerShareable (1UL << 10) |
| 187 | #define GITS_CBASER_OuterShareable (2UL << 10) | 203 | #define GITS_CBASER_OuterShareable (2UL << 10) |
| @@ -198,6 +214,7 @@ | |||
| 198 | #define GITS_BASER_WaWb (5UL << 59) | 214 | #define GITS_BASER_WaWb (5UL << 59) |
| 199 | #define GITS_BASER_RaWaWt (6UL << 59) | 215 | #define GITS_BASER_RaWaWt (6UL << 59) |
| 200 | #define GITS_BASER_RaWaWb (7UL << 59) | 216 | #define GITS_BASER_RaWaWb (7UL << 59) |
| 217 | #define GITS_BASER_CACHEABILITY_MASK (7UL << 59) | ||
| 201 | #define GITS_BASER_TYPE_SHIFT (56) | 218 | #define GITS_BASER_TYPE_SHIFT (56) |
| 202 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) | 219 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) |
| 203 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) | 220 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) |
diff --git a/include/linux/lcm.h b/include/linux/lcm.h index 7bf01d779b45..1ce79a7f1daa 100644 --- a/include/linux/lcm.h +++ b/include/linux/lcm.h | |||
| @@ -4,5 +4,6 @@ | |||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | 5 | ||
| 6 | unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; | 6 | unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; |
| 7 | unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__; | ||
| 7 | 8 | ||
| 8 | #endif /* _LCM_H */ | 9 | #endif /* _LCM_H */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index fc03efa64ffe..6b08cc106c21 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -232,6 +232,7 @@ enum { | |||
| 232 | * led */ | 232 | * led */ |
| 233 | ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ | 233 | ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ |
| 234 | ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ | 234 | ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ |
| 235 | ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */ | ||
| 235 | 236 | ||
| 236 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ | 237 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ |
| 237 | 238 | ||
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index fb0390a1a498..ee7b1ce7a6f8 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h | |||
| @@ -2999,6 +2999,9 @@ enum usb_irq_events { | |||
| 2999 | #define PALMAS_GPADC_TRIM15 0x0E | 2999 | #define PALMAS_GPADC_TRIM15 0x0E |
| 3000 | #define PALMAS_GPADC_TRIM16 0x0F | 3000 | #define PALMAS_GPADC_TRIM16 0x0F |
| 3001 | 3001 | ||
| 3002 | /* TPS659038 regen2_ctrl offset iss different from palmas */ | ||
| 3003 | #define TPS659038_REGEN2_CTRL 0x12 | ||
| 3004 | |||
| 3002 | /* TPS65917 Interrupt registers */ | 3005 | /* TPS65917 Interrupt registers */ |
| 3003 | 3006 | ||
| 3004 | /* Registers for function INTERRUPT */ | 3007 | /* Registers for function INTERRUPT */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index dcf6ec27739b..278738873703 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -2185,6 +2185,12 @@ void netdev_freemem(struct net_device *dev); | |||
| 2185 | void synchronize_net(void); | 2185 | void synchronize_net(void); |
| 2186 | int init_dummy_netdev(struct net_device *dev); | 2186 | int init_dummy_netdev(struct net_device *dev); |
| 2187 | 2187 | ||
| 2188 | DECLARE_PER_CPU(int, xmit_recursion); | ||
| 2189 | static inline int dev_recursion_level(void) | ||
| 2190 | { | ||
| 2191 | return this_cpu_read(xmit_recursion); | ||
| 2192 | } | ||
| 2193 | |||
| 2188 | struct net_device *dev_get_by_index(struct net *net, int ifindex); | 2194 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
| 2189 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); | 2195 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
| 2190 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | 2196 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d4ad5b5a02bb..045f709cb89b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
| @@ -316,7 +316,7 @@ struct regulator_desc { | |||
| 316 | * @driver_data: private regulator data | 316 | * @driver_data: private regulator data |
| 317 | * @of_node: OpenFirmware node to parse for device tree bindings (may be | 317 | * @of_node: OpenFirmware node to parse for device tree bindings (may be |
| 318 | * NULL). | 318 | * NULL). |
| 319 | * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is | 319 | * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is |
| 320 | * insufficient. | 320 | * insufficient. |
| 321 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly | 321 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly |
| 322 | * initialized, meaning that >= 0 is a valid gpio | 322 | * initialized, meaning that >= 0 is a valid gpio |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d77432e14ff..a419b65770d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1625,11 +1625,11 @@ struct task_struct { | |||
| 1625 | 1625 | ||
| 1626 | /* | 1626 | /* |
| 1627 | * numa_faults_locality tracks if faults recorded during the last | 1627 | * numa_faults_locality tracks if faults recorded during the last |
| 1628 | * scan window were remote/local. The task scan period is adapted | 1628 | * scan window were remote/local or failed to migrate. The task scan |
| 1629 | * based on the locality of the faults with different weights | 1629 | * period is adapted based on the locality of the faults with different |
| 1630 | * depending on whether they were shared or private faults | 1630 | * weights depending on whether they were shared or private faults |
| 1631 | */ | 1631 | */ |
| 1632 | unsigned long numa_faults_locality[2]; | 1632 | unsigned long numa_faults_locality[3]; |
| 1633 | 1633 | ||
| 1634 | unsigned long numa_pages_migrated; | 1634 | unsigned long numa_pages_migrated; |
| 1635 | #endif /* CONFIG_NUMA_BALANCING */ | 1635 | #endif /* CONFIG_NUMA_BALANCING */ |
| @@ -1719,6 +1719,7 @@ struct task_struct { | |||
| 1719 | #define TNF_NO_GROUP 0x02 | 1719 | #define TNF_NO_GROUP 0x02 |
| 1720 | #define TNF_SHARED 0x04 | 1720 | #define TNF_SHARED 0x04 |
| 1721 | #define TNF_FAULT_LOCAL 0x08 | 1721 | #define TNF_FAULT_LOCAL 0x08 |
| 1722 | #define TNF_MIGRATE_FAIL 0x10 | ||
| 1722 | 1723 | ||
| 1723 | #ifdef CONFIG_NUMA_BALANCING | 1724 | #ifdef CONFIG_NUMA_BALANCING |
| 1724 | extern void task_numa_fault(int last_node, int node, int pages, int flags); | 1725 | extern void task_numa_fault(int last_node, int node, int pages, int flags); |
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index c57d8ea0716c..59a7889e15db 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h | |||
| @@ -60,17 +60,17 @@ struct rpc_xprt; | |||
| 60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 61 | void rpc_register_sysctl(void); | 61 | void rpc_register_sysctl(void); |
| 62 | void rpc_unregister_sysctl(void); | 62 | void rpc_unregister_sysctl(void); |
| 63 | int sunrpc_debugfs_init(void); | 63 | void sunrpc_debugfs_init(void); |
| 64 | void sunrpc_debugfs_exit(void); | 64 | void sunrpc_debugfs_exit(void); |
| 65 | int rpc_clnt_debugfs_register(struct rpc_clnt *); | 65 | void rpc_clnt_debugfs_register(struct rpc_clnt *); |
| 66 | void rpc_clnt_debugfs_unregister(struct rpc_clnt *); | 66 | void rpc_clnt_debugfs_unregister(struct rpc_clnt *); |
| 67 | int rpc_xprt_debugfs_register(struct rpc_xprt *); | 67 | void rpc_xprt_debugfs_register(struct rpc_xprt *); |
| 68 | void rpc_xprt_debugfs_unregister(struct rpc_xprt *); | 68 | void rpc_xprt_debugfs_unregister(struct rpc_xprt *); |
| 69 | #else | 69 | #else |
| 70 | static inline int | 70 | static inline void |
| 71 | sunrpc_debugfs_init(void) | 71 | sunrpc_debugfs_init(void) |
| 72 | { | 72 | { |
| 73 | return 0; | 73 | return; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | static inline void | 76 | static inline void |
| @@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void) | |||
| 79 | return; | 79 | return; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static inline int | 82 | static inline void |
| 83 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | 83 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) |
| 84 | { | 84 | { |
| 85 | return 0; | 85 | return; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static inline void | 88 | static inline void |
| @@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) | |||
| 91 | return; | 91 | return; |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | static inline int | 94 | static inline void |
| 95 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | 95 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) |
| 96 | { | 96 | { |
| 97 | return 0; | 97 | return; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static inline void | 100 | static inline void |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index d9a4905e01d0..6e0ce8c7b8cb 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
| @@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */ | |||
| 227 | struct urb *urb; | 227 | struct urb *urb; |
| 228 | struct usbnet *dev; | 228 | struct usbnet *dev; |
| 229 | enum skb_state state; | 229 | enum skb_state state; |
| 230 | size_t length; | 230 | long length; |
| 231 | unsigned long packets; | ||
| 231 | }; | 232 | }; |
| 232 | 233 | ||
| 234 | /* Drivers that set FLAG_MULTI_PACKET must call this in their | ||
| 235 | * tx_fixup method before returning an skb. | ||
| 236 | */ | ||
| 237 | static inline void | ||
| 238 | usbnet_set_skb_tx_stats(struct sk_buff *skb, | ||
| 239 | unsigned long packets, long bytes_delta) | ||
| 240 | { | ||
| 241 | struct skb_data *entry = (struct skb_data *) skb->cb; | ||
| 242 | |||
| 243 | entry->packets = packets; | ||
| 244 | entry->length = bytes_delta; | ||
| 245 | } | ||
| 246 | |||
| 233 | extern int usbnet_open(struct net_device *net); | 247 | extern int usbnet_open(struct net_device *net); |
| 234 | extern int usbnet_stop(struct net_device *net); | 248 | extern int usbnet_stop(struct net_device *net); |
| 235 | extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, | 249 | extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 00048339c23e..b2dd371ec0ca 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -130,6 +130,7 @@ extern int vm_dirty_ratio; | |||
| 130 | extern unsigned long vm_dirty_bytes; | 130 | extern unsigned long vm_dirty_bytes; |
| 131 | extern unsigned int dirty_writeback_interval; | 131 | extern unsigned int dirty_writeback_interval; |
| 132 | extern unsigned int dirty_expire_interval; | 132 | extern unsigned int dirty_expire_interval; |
| 133 | extern unsigned int dirtytime_expire_interval; | ||
| 133 | extern int vm_highmem_is_dirtyable; | 134 | extern int vm_highmem_is_dirtyable; |
| 134 | extern int block_dump; | 135 | extern int block_dump; |
| 135 | extern int laptop_mode; | 136 | extern int laptop_mode; |
| @@ -146,6 +147,8 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write, | |||
| 146 | extern int dirty_bytes_handler(struct ctl_table *table, int write, | 147 | extern int dirty_bytes_handler(struct ctl_table *table, int write, |
| 147 | void __user *buffer, size_t *lenp, | 148 | void __user *buffer, size_t *lenp, |
| 148 | loff_t *ppos); | 149 | loff_t *ppos); |
| 150 | int dirtytime_interval_handler(struct ctl_table *table, int write, | ||
| 151 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
| 149 | 152 | ||
| 150 | struct ctl_table; | 153 | struct ctl_table; |
| 151 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, | 154 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, |
diff --git a/include/net/ip.h b/include/net/ip.h index 025c61c0dffb..6cc1eafb153a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk) | |||
| 453 | 453 | ||
| 454 | #endif | 454 | #endif |
| 455 | 455 | ||
| 456 | static inline int sk_mc_loop(struct sock *sk) | ||
| 457 | { | ||
| 458 | if (!sk) | ||
| 459 | return 1; | ||
| 460 | switch (sk->sk_family) { | ||
| 461 | case AF_INET: | ||
| 462 | return inet_sk(sk)->mc_loop; | ||
| 463 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 464 | case AF_INET6: | ||
| 465 | return inet6_sk(sk)->mc_loop; | ||
| 466 | #endif | ||
| 467 | } | ||
| 468 | WARN_ON(1); | ||
| 469 | return 1; | ||
| 470 | } | ||
| 471 | |||
| 472 | bool ip_call_ra_chain(struct sk_buff *skb); | 456 | bool ip_call_ra_chain(struct sk_buff *skb); |
| 473 | 457 | ||
| 474 | /* | 458 | /* |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 1d09b46c1e48..eda131d179d9 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
| @@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); | |||
| 174 | 174 | ||
| 175 | static inline int ip6_skb_dst_mtu(struct sk_buff *skb) | 175 | static inline int ip6_skb_dst_mtu(struct sk_buff *skb) |
| 176 | { | 176 | { |
| 177 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 177 | struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
| 178 | inet6_sk(skb->sk) : NULL; | ||
| 178 | 179 | ||
| 179 | return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? | 180 | return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? |
| 180 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | 181 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 534e1f2ac4fc..57639fca223a 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h | |||
| @@ -79,6 +79,16 @@ void nf_log_packet(struct net *net, | |||
| 79 | const struct nf_loginfo *li, | 79 | const struct nf_loginfo *li, |
| 80 | const char *fmt, ...); | 80 | const char *fmt, ...); |
| 81 | 81 | ||
| 82 | __printf(8, 9) | ||
| 83 | void nf_log_trace(struct net *net, | ||
| 84 | u_int8_t pf, | ||
| 85 | unsigned int hooknum, | ||
| 86 | const struct sk_buff *skb, | ||
| 87 | const struct net_device *in, | ||
| 88 | const struct net_device *out, | ||
| 89 | const struct nf_loginfo *li, | ||
| 90 | const char *fmt, ...); | ||
| 91 | |||
| 82 | struct nf_log_buf; | 92 | struct nf_log_buf; |
| 83 | 93 | ||
| 84 | struct nf_log_buf *nf_log_buf_open(void); | 94 | struct nf_log_buf *nf_log_buf_open(void); |
diff --git a/include/net/sock.h b/include/net/sock.h index ab186b1d31ff..e4079c28e6b8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | |||
| 1762 | 1762 | ||
| 1763 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); | 1763 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
| 1764 | 1764 | ||
| 1765 | bool sk_mc_loop(struct sock *sk); | ||
| 1766 | |||
| 1765 | static inline bool sk_can_gso(const struct sock *sk) | 1767 | static inline bool sk_can_gso(const struct sock *sk) |
| 1766 | { | 1768 | { |
| 1767 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); | 1769 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); |
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 23d561512f64..22317d2b52ab 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h | |||
| @@ -7,27 +7,26 @@ | |||
| 7 | #include <linux/ktime.h> | 7 | #include <linux/ktime.h> |
| 8 | #include <linux/tracepoint.h> | 8 | #include <linux/tracepoint.h> |
| 9 | 9 | ||
| 10 | struct device; | 10 | #include "../../../drivers/base/regmap/internal.h" |
| 11 | struct regmap; | ||
| 12 | 11 | ||
| 13 | /* | 12 | /* |
| 14 | * Log register events | 13 | * Log register events |
| 15 | */ | 14 | */ |
| 16 | DECLARE_EVENT_CLASS(regmap_reg, | 15 | DECLARE_EVENT_CLASS(regmap_reg, |
| 17 | 16 | ||
| 18 | TP_PROTO(struct device *dev, unsigned int reg, | 17 | TP_PROTO(struct regmap *map, unsigned int reg, |
| 19 | unsigned int val), | 18 | unsigned int val), |
| 20 | 19 | ||
| 21 | TP_ARGS(dev, reg, val), | 20 | TP_ARGS(map, reg, val), |
| 22 | 21 | ||
| 23 | TP_STRUCT__entry( | 22 | TP_STRUCT__entry( |
| 24 | __string( name, dev_name(dev) ) | 23 | __string( name, regmap_name(map) ) |
| 25 | __field( unsigned int, reg ) | 24 | __field( unsigned int, reg ) |
| 26 | __field( unsigned int, val ) | 25 | __field( unsigned int, val ) |
| 27 | ), | 26 | ), |
| 28 | 27 | ||
| 29 | TP_fast_assign( | 28 | TP_fast_assign( |
| 30 | __assign_str(name, dev_name(dev)); | 29 | __assign_str(name, regmap_name(map)); |
| 31 | __entry->reg = reg; | 30 | __entry->reg = reg; |
| 32 | __entry->val = val; | 31 | __entry->val = val; |
| 33 | ), | 32 | ), |
| @@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg, | |||
| 39 | 38 | ||
| 40 | DEFINE_EVENT(regmap_reg, regmap_reg_write, | 39 | DEFINE_EVENT(regmap_reg, regmap_reg_write, |
| 41 | 40 | ||
| 42 | TP_PROTO(struct device *dev, unsigned int reg, | 41 | TP_PROTO(struct regmap *map, unsigned int reg, |
| 43 | unsigned int val), | 42 | unsigned int val), |
| 44 | 43 | ||
| 45 | TP_ARGS(dev, reg, val) | 44 | TP_ARGS(map, reg, val) |
| 46 | 45 | ||
| 47 | ); | 46 | ); |
| 48 | 47 | ||
| 49 | DEFINE_EVENT(regmap_reg, regmap_reg_read, | 48 | DEFINE_EVENT(regmap_reg, regmap_reg_read, |
| 50 | 49 | ||
| 51 | TP_PROTO(struct device *dev, unsigned int reg, | 50 | TP_PROTO(struct regmap *map, unsigned int reg, |
| 52 | unsigned int val), | 51 | unsigned int val), |
| 53 | 52 | ||
| 54 | TP_ARGS(dev, reg, val) | 53 | TP_ARGS(map, reg, val) |
| 55 | 54 | ||
| 56 | ); | 55 | ); |
| 57 | 56 | ||
| 58 | DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, | 57 | DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, |
| 59 | 58 | ||
| 60 | TP_PROTO(struct device *dev, unsigned int reg, | 59 | TP_PROTO(struct regmap *map, unsigned int reg, |
| 61 | unsigned int val), | 60 | unsigned int val), |
| 62 | 61 | ||
| 63 | TP_ARGS(dev, reg, val) | 62 | TP_ARGS(map, reg, val) |
| 64 | 63 | ||
| 65 | ); | 64 | ); |
| 66 | 65 | ||
| 67 | DECLARE_EVENT_CLASS(regmap_block, | 66 | DECLARE_EVENT_CLASS(regmap_block, |
| 68 | 67 | ||
| 69 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 68 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
| 70 | 69 | ||
| 71 | TP_ARGS(dev, reg, count), | 70 | TP_ARGS(map, reg, count), |
| 72 | 71 | ||
| 73 | TP_STRUCT__entry( | 72 | TP_STRUCT__entry( |
| 74 | __string( name, dev_name(dev) ) | 73 | __string( name, regmap_name(map) ) |
| 75 | __field( unsigned int, reg ) | 74 | __field( unsigned int, reg ) |
| 76 | __field( int, count ) | 75 | __field( int, count ) |
| 77 | ), | 76 | ), |
| 78 | 77 | ||
| 79 | TP_fast_assign( | 78 | TP_fast_assign( |
| 80 | __assign_str(name, dev_name(dev)); | 79 | __assign_str(name, regmap_name(map)); |
| 81 | __entry->reg = reg; | 80 | __entry->reg = reg; |
| 82 | __entry->count = count; | 81 | __entry->count = count; |
| 83 | ), | 82 | ), |
| @@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block, | |||
| 89 | 88 | ||
| 90 | DEFINE_EVENT(regmap_block, regmap_hw_read_start, | 89 | DEFINE_EVENT(regmap_block, regmap_hw_read_start, |
| 91 | 90 | ||
| 92 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 91 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
| 93 | 92 | ||
| 94 | TP_ARGS(dev, reg, count) | 93 | TP_ARGS(map, reg, count) |
| 95 | ); | 94 | ); |
| 96 | 95 | ||
| 97 | DEFINE_EVENT(regmap_block, regmap_hw_read_done, | 96 | DEFINE_EVENT(regmap_block, regmap_hw_read_done, |
| 98 | 97 | ||
| 99 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 98 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
| 100 | 99 | ||
| 101 | TP_ARGS(dev, reg, count) | 100 | TP_ARGS(map, reg, count) |
| 102 | ); | 101 | ); |
| 103 | 102 | ||
| 104 | DEFINE_EVENT(regmap_block, regmap_hw_write_start, | 103 | DEFINE_EVENT(regmap_block, regmap_hw_write_start, |
| 105 | 104 | ||
| 106 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 105 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
| 107 | 106 | ||
| 108 | TP_ARGS(dev, reg, count) | 107 | TP_ARGS(map, reg, count) |
| 109 | ); | 108 | ); |
| 110 | 109 | ||
| 111 | DEFINE_EVENT(regmap_block, regmap_hw_write_done, | 110 | DEFINE_EVENT(regmap_block, regmap_hw_write_done, |
| 112 | 111 | ||
| 113 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 112 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
| 114 | 113 | ||
| 115 | TP_ARGS(dev, reg, count) | 114 | TP_ARGS(map, reg, count) |
| 116 | ); | 115 | ); |
| 117 | 116 | ||
| 118 | TRACE_EVENT(regcache_sync, | 117 | TRACE_EVENT(regcache_sync, |
| 119 | 118 | ||
| 120 | TP_PROTO(struct device *dev, const char *type, | 119 | TP_PROTO(struct regmap *map, const char *type, |
| 121 | const char *status), | 120 | const char *status), |
| 122 | 121 | ||
| 123 | TP_ARGS(dev, type, status), | 122 | TP_ARGS(map, type, status), |
| 124 | 123 | ||
| 125 | TP_STRUCT__entry( | 124 | TP_STRUCT__entry( |
| 126 | __string( name, dev_name(dev) ) | 125 | __string( name, regmap_name(map) ) |
| 127 | __string( status, status ) | 126 | __string( status, status ) |
| 128 | __string( type, type ) | 127 | __string( type, type ) |
| 129 | __field( int, type ) | 128 | __field( int, type ) |
| 130 | ), | 129 | ), |
| 131 | 130 | ||
| 132 | TP_fast_assign( | 131 | TP_fast_assign( |
| 133 | __assign_str(name, dev_name(dev)); | 132 | __assign_str(name, regmap_name(map)); |
| 134 | __assign_str(status, status); | 133 | __assign_str(status, status); |
| 135 | __assign_str(type, type); | 134 | __assign_str(type, type); |
| 136 | ), | 135 | ), |
| @@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync, | |||
| 141 | 140 | ||
| 142 | DECLARE_EVENT_CLASS(regmap_bool, | 141 | DECLARE_EVENT_CLASS(regmap_bool, |
| 143 | 142 | ||
| 144 | TP_PROTO(struct device *dev, bool flag), | 143 | TP_PROTO(struct regmap *map, bool flag), |
| 145 | 144 | ||
| 146 | TP_ARGS(dev, flag), | 145 | TP_ARGS(map, flag), |
| 147 | 146 | ||
| 148 | TP_STRUCT__entry( | 147 | TP_STRUCT__entry( |
| 149 | __string( name, dev_name(dev) ) | 148 | __string( name, regmap_name(map) ) |
| 150 | __field( int, flag ) | 149 | __field( int, flag ) |
| 151 | ), | 150 | ), |
| 152 | 151 | ||
| 153 | TP_fast_assign( | 152 | TP_fast_assign( |
| 154 | __assign_str(name, dev_name(dev)); | 153 | __assign_str(name, regmap_name(map)); |
| 155 | __entry->flag = flag; | 154 | __entry->flag = flag; |
| 156 | ), | 155 | ), |
| 157 | 156 | ||
| @@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool, | |||
| 161 | 160 | ||
| 162 | DEFINE_EVENT(regmap_bool, regmap_cache_only, | 161 | DEFINE_EVENT(regmap_bool, regmap_cache_only, |
| 163 | 162 | ||
| 164 | TP_PROTO(struct device *dev, bool flag), | 163 | TP_PROTO(struct regmap *map, bool flag), |
| 165 | 164 | ||
| 166 | TP_ARGS(dev, flag) | 165 | TP_ARGS(map, flag) |
| 167 | 166 | ||
| 168 | ); | 167 | ); |
| 169 | 168 | ||
| 170 | DEFINE_EVENT(regmap_bool, regmap_cache_bypass, | 169 | DEFINE_EVENT(regmap_bool, regmap_cache_bypass, |
| 171 | 170 | ||
| 172 | TP_PROTO(struct device *dev, bool flag), | 171 | TP_PROTO(struct regmap *map, bool flag), |
| 173 | 172 | ||
| 174 | TP_ARGS(dev, flag) | 173 | TP_ARGS(map, flag) |
| 175 | 174 | ||
| 176 | ); | 175 | ); |
| 177 | 176 | ||
| 178 | DECLARE_EVENT_CLASS(regmap_async, | 177 | DECLARE_EVENT_CLASS(regmap_async, |
| 179 | 178 | ||
| 180 | TP_PROTO(struct device *dev), | 179 | TP_PROTO(struct regmap *map), |
| 181 | 180 | ||
| 182 | TP_ARGS(dev), | 181 | TP_ARGS(map), |
| 183 | 182 | ||
| 184 | TP_STRUCT__entry( | 183 | TP_STRUCT__entry( |
| 185 | __string( name, dev_name(dev) ) | 184 | __string( name, regmap_name(map) ) |
| 186 | ), | 185 | ), |
| 187 | 186 | ||
| 188 | TP_fast_assign( | 187 | TP_fast_assign( |
| 189 | __assign_str(name, dev_name(dev)); | 188 | __assign_str(name, regmap_name(map)); |
| 190 | ), | 189 | ), |
| 191 | 190 | ||
| 192 | TP_printk("%s", __get_str(name)) | 191 | TP_printk("%s", __get_str(name)) |
| @@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async, | |||
| 194 | 193 | ||
| 195 | DEFINE_EVENT(regmap_block, regmap_async_write_start, | 194 | DEFINE_EVENT(regmap_block, regmap_async_write_start, |
| 196 | 195 | ||
| 197 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 196 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
| 198 | 197 | ||
| 199 | TP_ARGS(dev, reg, count) | 198 | TP_ARGS(map, reg, count) |
| 200 | ); | 199 | ); |
| 201 | 200 | ||
| 202 | DEFINE_EVENT(regmap_async, regmap_async_io_complete, | 201 | DEFINE_EVENT(regmap_async, regmap_async_io_complete, |
| 203 | 202 | ||
| 204 | TP_PROTO(struct device *dev), | 203 | TP_PROTO(struct regmap *map), |
| 205 | 204 | ||
| 206 | TP_ARGS(dev) | 205 | TP_ARGS(map) |
| 207 | 206 | ||
| 208 | ); | 207 | ); |
| 209 | 208 | ||
| 210 | DEFINE_EVENT(regmap_async, regmap_async_complete_start, | 209 | DEFINE_EVENT(regmap_async, regmap_async_complete_start, |
| 211 | 210 | ||
| 212 | TP_PROTO(struct device *dev), | 211 | TP_PROTO(struct regmap *map), |
| 213 | 212 | ||
| 214 | TP_ARGS(dev) | 213 | TP_ARGS(map) |
| 215 | 214 | ||
| 216 | ); | 215 | ); |
| 217 | 216 | ||
| 218 | DEFINE_EVENT(regmap_async, regmap_async_complete_done, | 217 | DEFINE_EVENT(regmap_async, regmap_async_complete_done, |
| 219 | 218 | ||
| 220 | TP_PROTO(struct device *dev), | 219 | TP_PROTO(struct regmap *map), |
| 221 | 220 | ||
| 222 | TP_ARGS(dev) | 221 | TP_ARGS(map) |
| 223 | 222 | ||
| 224 | ); | 223 | ); |
| 225 | 224 | ||
| 226 | TRACE_EVENT(regcache_drop_region, | 225 | TRACE_EVENT(regcache_drop_region, |
| 227 | 226 | ||
| 228 | TP_PROTO(struct device *dev, unsigned int from, | 227 | TP_PROTO(struct regmap *map, unsigned int from, |
| 229 | unsigned int to), | 228 | unsigned int to), |
| 230 | 229 | ||
| 231 | TP_ARGS(dev, from, to), | 230 | TP_ARGS(map, from, to), |
| 232 | 231 | ||
| 233 | TP_STRUCT__entry( | 232 | TP_STRUCT__entry( |
| 234 | __string( name, dev_name(dev) ) | 233 | __string( name, regmap_name(map) ) |
| 235 | __field( unsigned int, from ) | 234 | __field( unsigned int, from ) |
| 236 | __field( unsigned int, to ) | 235 | __field( unsigned int, to ) |
| 237 | ), | 236 | ), |
| 238 | 237 | ||
| 239 | TP_fast_assign( | 238 | TP_fast_assign( |
| 240 | __assign_str(name, dev_name(dev)); | 239 | __assign_str(name, regmap_name(map)); |
| 241 | __entry->from = from; | 240 | __entry->from = from; |
| 242 | __entry->to = to; | 241 | __entry->to = to; |
| 243 | ), | 242 | ), |
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index b0a813079852..2f62ab2d7bf9 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
| @@ -973,7 +973,8 @@ struct input_keymap_entry { | |||
| 973 | */ | 973 | */ |
| 974 | #define MT_TOOL_FINGER 0 | 974 | #define MT_TOOL_FINGER 0 |
| 975 | #define MT_TOOL_PEN 1 | 975 | #define MT_TOOL_PEN 1 |
| 976 | #define MT_TOOL_MAX 1 | 976 | #define MT_TOOL_PALM 2 |
| 977 | #define MT_TOOL_MAX 2 | ||
| 977 | 978 | ||
| 978 | /* | 979 | /* |
| 979 | * Values describing the status of a force-feedback effect | 980 | * Values describing the status of a force-feedback effect |
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 4742f2cb42f2..d3bd6ffec041 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * exported filesystem. | 47 | * exported filesystem. |
| 48 | */ | 48 | */ |
| 49 | #define NFSEXP_V4ROOT 0x10000 | 49 | #define NFSEXP_V4ROOT 0x10000 |
| 50 | #define NFSEXP_NOPNFS 0x20000 | 50 | #define NFSEXP_PNFS 0x20000 |
| 51 | 51 | ||
| 52 | /* All flags that we claim to support. (Note we don't support NOACL.) */ | 52 | /* All flags that we claim to support. (Note we don't support NOACL.) */ |
| 53 | #define NFSEXP_ALLFLAGS 0x3FE7F | 53 | #define NFSEXP_ALLFLAGS 0x3FE7F |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 453ef61311d4..2fabc0627165 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry) | |||
| 4574 | { | 4574 | { |
| 4575 | struct perf_event *event = container_of(entry, | 4575 | struct perf_event *event = container_of(entry, |
| 4576 | struct perf_event, pending); | 4576 | struct perf_event, pending); |
| 4577 | int rctx; | ||
| 4578 | |||
| 4579 | rctx = perf_swevent_get_recursion_context(); | ||
| 4580 | /* | ||
| 4581 | * If we 'fail' here, that's OK, it means recursion is already disabled | ||
| 4582 | * and we won't recurse 'further'. | ||
| 4583 | */ | ||
| 4577 | 4584 | ||
| 4578 | if (event->pending_disable) { | 4585 | if (event->pending_disable) { |
| 4579 | event->pending_disable = 0; | 4586 | event->pending_disable = 0; |
| @@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry) | |||
| 4584 | event->pending_wakeup = 0; | 4591 | event->pending_wakeup = 0; |
| 4585 | perf_event_wakeup(event); | 4592 | perf_event_wakeup(event); |
| 4586 | } | 4593 | } |
| 4594 | |||
| 4595 | if (rctx >= 0) | ||
| 4596 | perf_swevent_put_recursion_context(rctx); | ||
| 4587 | } | 4597 | } |
| 4588 | 4598 | ||
| 4589 | /* | 4599 | /* |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 88d0d4420ad2..ba77ab5f64dd 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class) | |||
| 633 | if (!new_class->name) | 633 | if (!new_class->name) |
| 634 | return 0; | 634 | return 0; |
| 635 | 635 | ||
| 636 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | 636 | list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { |
| 637 | if (new_class->key - new_class->subclass == class->key) | 637 | if (new_class->key - new_class->subclass == class->key) |
| 638 | return class->name_version; | 638 | return class->name_version; |
| 639 | if (class->name && !strcmp(class->name, new_class->name)) | 639 | if (class->name && !strcmp(class->name, new_class->name)) |
| @@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 700 | hash_head = classhashentry(key); | 700 | hash_head = classhashentry(key); |
| 701 | 701 | ||
| 702 | /* | 702 | /* |
| 703 | * We can walk the hash lockfree, because the hash only | 703 | * We do an RCU walk of the hash, see lockdep_free_key_range(). |
| 704 | * grows, and we are careful when adding entries to the end: | ||
| 705 | */ | 704 | */ |
| 706 | list_for_each_entry(class, hash_head, hash_entry) { | 705 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 706 | return NULL; | ||
| 707 | |||
| 708 | list_for_each_entry_rcu(class, hash_head, hash_entry) { | ||
| 707 | if (class->key == key) { | 709 | if (class->key == key) { |
| 708 | /* | 710 | /* |
| 709 | * Huh! same key, different name? Did someone trample | 711 | * Huh! same key, different name? Did someone trample |
| @@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
| 728 | struct lockdep_subclass_key *key; | 730 | struct lockdep_subclass_key *key; |
| 729 | struct list_head *hash_head; | 731 | struct list_head *hash_head; |
| 730 | struct lock_class *class; | 732 | struct lock_class *class; |
| 731 | unsigned long flags; | 733 | |
| 734 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | ||
| 732 | 735 | ||
| 733 | class = look_up_lock_class(lock, subclass); | 736 | class = look_up_lock_class(lock, subclass); |
| 734 | if (likely(class)) | 737 | if (likely(class)) |
| @@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
| 750 | key = lock->key->subkeys + subclass; | 753 | key = lock->key->subkeys + subclass; |
| 751 | hash_head = classhashentry(key); | 754 | hash_head = classhashentry(key); |
| 752 | 755 | ||
| 753 | raw_local_irq_save(flags); | ||
| 754 | if (!graph_lock()) { | 756 | if (!graph_lock()) { |
| 755 | raw_local_irq_restore(flags); | ||
| 756 | return NULL; | 757 | return NULL; |
| 757 | } | 758 | } |
| 758 | /* | 759 | /* |
| 759 | * We have to do the hash-walk again, to avoid races | 760 | * We have to do the hash-walk again, to avoid races |
| 760 | * with another CPU: | 761 | * with another CPU: |
| 761 | */ | 762 | */ |
| 762 | list_for_each_entry(class, hash_head, hash_entry) | 763 | list_for_each_entry_rcu(class, hash_head, hash_entry) { |
| 763 | if (class->key == key) | 764 | if (class->key == key) |
| 764 | goto out_unlock_set; | 765 | goto out_unlock_set; |
| 766 | } | ||
| 767 | |||
| 765 | /* | 768 | /* |
| 766 | * Allocate a new key from the static array, and add it to | 769 | * Allocate a new key from the static array, and add it to |
| 767 | * the hash: | 770 | * the hash: |
| 768 | */ | 771 | */ |
| 769 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 772 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
| 770 | if (!debug_locks_off_graph_unlock()) { | 773 | if (!debug_locks_off_graph_unlock()) { |
| 771 | raw_local_irq_restore(flags); | ||
| 772 | return NULL; | 774 | return NULL; |
| 773 | } | 775 | } |
| 774 | raw_local_irq_restore(flags); | ||
| 775 | 776 | ||
| 776 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); | 777 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); |
| 777 | dump_stack(); | 778 | dump_stack(); |
| @@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
| 798 | 799 | ||
| 799 | if (verbose(class)) { | 800 | if (verbose(class)) { |
| 800 | graph_unlock(); | 801 | graph_unlock(); |
| 801 | raw_local_irq_restore(flags); | ||
| 802 | 802 | ||
| 803 | printk("\nnew class %p: %s", class->key, class->name); | 803 | printk("\nnew class %p: %s", class->key, class->name); |
| 804 | if (class->name_version > 1) | 804 | if (class->name_version > 1) |
| @@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
| 806 | printk("\n"); | 806 | printk("\n"); |
| 807 | dump_stack(); | 807 | dump_stack(); |
| 808 | 808 | ||
| 809 | raw_local_irq_save(flags); | ||
| 810 | if (!graph_lock()) { | 809 | if (!graph_lock()) { |
| 811 | raw_local_irq_restore(flags); | ||
| 812 | return NULL; | 810 | return NULL; |
| 813 | } | 811 | } |
| 814 | } | 812 | } |
| 815 | out_unlock_set: | 813 | out_unlock_set: |
| 816 | graph_unlock(); | 814 | graph_unlock(); |
| 817 | raw_local_irq_restore(flags); | ||
| 818 | 815 | ||
| 819 | out_set_class_cache: | 816 | out_set_class_cache: |
| 820 | if (!subclass || force) | 817 | if (!subclass || force) |
| @@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
| 870 | entry->distance = distance; | 867 | entry->distance = distance; |
| 871 | entry->trace = *trace; | 868 | entry->trace = *trace; |
| 872 | /* | 869 | /* |
| 873 | * Since we never remove from the dependency list, the list can | 870 | * Both allocation and removal are done under the graph lock; but |
| 874 | * be walked lockless by other CPUs, it's only allocation | 871 | * iteration is under RCU-sched; see look_up_lock_class() and |
| 875 | * that must be protected by the spinlock. But this also means | 872 | * lockdep_free_key_range(). |
| 876 | * we must make new entries visible only once writes to the | ||
| 877 | * entry become visible - hence the RCU op: | ||
| 878 | */ | 873 | */ |
| 879 | list_add_tail_rcu(&entry->entry, head); | 874 | list_add_tail_rcu(&entry->entry, head); |
| 880 | 875 | ||
| @@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry, | |||
| 1025 | else | 1020 | else |
| 1026 | head = &lock->class->locks_before; | 1021 | head = &lock->class->locks_before; |
| 1027 | 1022 | ||
| 1028 | list_for_each_entry(entry, head, entry) { | 1023 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
| 1024 | |||
| 1025 | list_for_each_entry_rcu(entry, head, entry) { | ||
| 1029 | if (!lock_accessed(entry)) { | 1026 | if (!lock_accessed(entry)) { |
| 1030 | unsigned int cq_depth; | 1027 | unsigned int cq_depth; |
| 1031 | mark_lock_accessed(entry, lock); | 1028 | mark_lock_accessed(entry, lock); |
| @@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
| 2022 | * We can walk it lock-free, because entries only get added | 2019 | * We can walk it lock-free, because entries only get added |
| 2023 | * to the hash: | 2020 | * to the hash: |
| 2024 | */ | 2021 | */ |
| 2025 | list_for_each_entry(chain, hash_head, entry) { | 2022 | list_for_each_entry_rcu(chain, hash_head, entry) { |
| 2026 | if (chain->chain_key == chain_key) { | 2023 | if (chain->chain_key == chain_key) { |
| 2027 | cache_hit: | 2024 | cache_hit: |
| 2028 | debug_atomic_inc(chain_lookup_hits); | 2025 | debug_atomic_inc(chain_lookup_hits); |
| @@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 2996 | if (unlikely(!debug_locks)) | 2993 | if (unlikely(!debug_locks)) |
| 2997 | return; | 2994 | return; |
| 2998 | 2995 | ||
| 2999 | if (subclass) | 2996 | if (subclass) { |
| 2997 | unsigned long flags; | ||
| 2998 | |||
| 2999 | if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) | ||
| 3000 | return; | ||
| 3001 | |||
| 3002 | raw_local_irq_save(flags); | ||
| 3003 | current->lockdep_recursion = 1; | ||
| 3000 | register_lock_class(lock, subclass, 1); | 3004 | register_lock_class(lock, subclass, 1); |
| 3005 | current->lockdep_recursion = 0; | ||
| 3006 | raw_local_irq_restore(flags); | ||
| 3007 | } | ||
| 3001 | } | 3008 | } |
| 3002 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 3009 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
| 3003 | 3010 | ||
| @@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size) | |||
| 3887 | return addr >= start && addr < start + size; | 3894 | return addr >= start && addr < start + size; |
| 3888 | } | 3895 | } |
| 3889 | 3896 | ||
| 3897 | /* | ||
| 3898 | * Used in module.c to remove lock classes from memory that is going to be | ||
| 3899 | * freed; and possibly re-used by other modules. | ||
| 3900 | * | ||
| 3901 | * We will have had one sync_sched() before getting here, so we're guaranteed | ||
| 3902 | * nobody will look up these exact classes -- they're properly dead but still | ||
| 3903 | * allocated. | ||
| 3904 | */ | ||
| 3890 | void lockdep_free_key_range(void *start, unsigned long size) | 3905 | void lockdep_free_key_range(void *start, unsigned long size) |
| 3891 | { | 3906 | { |
| 3892 | struct lock_class *class, *next; | 3907 | struct lock_class *class; |
| 3893 | struct list_head *head; | 3908 | struct list_head *head; |
| 3894 | unsigned long flags; | 3909 | unsigned long flags; |
| 3895 | int i; | 3910 | int i; |
| @@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
| 3905 | head = classhash_table + i; | 3920 | head = classhash_table + i; |
| 3906 | if (list_empty(head)) | 3921 | if (list_empty(head)) |
| 3907 | continue; | 3922 | continue; |
| 3908 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3923 | list_for_each_entry_rcu(class, head, hash_entry) { |
| 3909 | if (within(class->key, start, size)) | 3924 | if (within(class->key, start, size)) |
| 3910 | zap_class(class); | 3925 | zap_class(class); |
| 3911 | else if (within(class->name, start, size)) | 3926 | else if (within(class->name, start, size)) |
| @@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
| 3916 | if (locked) | 3931 | if (locked) |
| 3917 | graph_unlock(); | 3932 | graph_unlock(); |
| 3918 | raw_local_irq_restore(flags); | 3933 | raw_local_irq_restore(flags); |
| 3934 | |||
| 3935 | /* | ||
| 3936 | * Wait for any possible iterators from look_up_lock_class() to pass | ||
| 3937 | * before continuing to free the memory they refer to. | ||
| 3938 | * | ||
| 3939 | * sync_sched() is sufficient because the read-side is IRQ disable. | ||
| 3940 | */ | ||
| 3941 | synchronize_sched(); | ||
| 3942 | |||
| 3943 | /* | ||
| 3944 | * XXX at this point we could return the resources to the pool; | ||
| 3945 | * instead we leak them. We would need to change to bitmap allocators | ||
| 3946 | * instead of the linear allocators we have now. | ||
| 3947 | */ | ||
| 3919 | } | 3948 | } |
| 3920 | 3949 | ||
| 3921 | void lockdep_reset_lock(struct lockdep_map *lock) | 3950 | void lockdep_reset_lock(struct lockdep_map *lock) |
| 3922 | { | 3951 | { |
| 3923 | struct lock_class *class, *next; | 3952 | struct lock_class *class; |
| 3924 | struct list_head *head; | 3953 | struct list_head *head; |
| 3925 | unsigned long flags; | 3954 | unsigned long flags; |
| 3926 | int i, j; | 3955 | int i, j; |
| @@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
| 3948 | head = classhash_table + i; | 3977 | head = classhash_table + i; |
| 3949 | if (list_empty(head)) | 3978 | if (list_empty(head)) |
| 3950 | continue; | 3979 | continue; |
| 3951 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3980 | list_for_each_entry_rcu(class, head, hash_entry) { |
| 3952 | int match = 0; | 3981 | int match = 0; |
| 3953 | 3982 | ||
| 3954 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | 3983 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
diff --git a/kernel/module.c b/kernel/module.c index b3d634ed06c9..99fdf94efce8 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1865,7 +1865,7 @@ static void free_module(struct module *mod) | |||
| 1865 | kfree(mod->args); | 1865 | kfree(mod->args); |
| 1866 | percpu_modfree(mod); | 1866 | percpu_modfree(mod); |
| 1867 | 1867 | ||
| 1868 | /* Free lock-classes: */ | 1868 | /* Free lock-classes; relies on the preceding sync_rcu(). */ |
| 1869 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1869 | lockdep_free_key_range(mod->module_core, mod->core_size); |
| 1870 | 1870 | ||
| 1871 | /* Finally, free the core (containing the module structure) */ | 1871 | /* Finally, free the core (containing the module structure) */ |
| @@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3349 | module_bug_cleanup(mod); | 3349 | module_bug_cleanup(mod); |
| 3350 | mutex_unlock(&module_mutex); | 3350 | mutex_unlock(&module_mutex); |
| 3351 | 3351 | ||
| 3352 | /* Free lock-classes: */ | ||
| 3353 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
| 3354 | |||
| 3355 | /* we can't deallocate the module until we clear memory protection */ | 3352 | /* we can't deallocate the module until we clear memory protection */ |
| 3356 | unset_module_init_ro_nx(mod); | 3353 | unset_module_init_ro_nx(mod); |
| 3357 | unset_module_core_ro_nx(mod); | 3354 | unset_module_core_ro_nx(mod); |
| @@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3375 | synchronize_rcu(); | 3372 | synchronize_rcu(); |
| 3376 | mutex_unlock(&module_mutex); | 3373 | mutex_unlock(&module_mutex); |
| 3377 | free_module: | 3374 | free_module: |
| 3375 | /* Free lock-classes; relies on the preceding sync_rcu() */ | ||
| 3376 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
| 3377 | |||
| 3378 | module_deallocate(mod, info); | 3378 | module_deallocate(mod, info); |
| 3379 | free_copy: | 3379 | free_copy: |
| 3380 | free_copy(info); | 3380 | free_copy(info); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f0f831e8a345..62671f53202a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
| 3034 | } else { | 3034 | } else { |
| 3035 | if (dl_prio(oldprio)) | 3035 | if (dl_prio(oldprio)) |
| 3036 | p->dl.dl_boosted = 0; | 3036 | p->dl.dl_boosted = 0; |
| 3037 | if (rt_prio(oldprio)) | ||
| 3038 | p->rt.timeout = 0; | ||
| 3037 | p->sched_class = &fair_sched_class; | 3039 | p->sched_class = &fair_sched_class; |
| 3038 | } | 3040 | } |
| 3039 | 3041 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ce18f3c097a..bcfe32088b37 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p, | |||
| 1609 | /* | 1609 | /* |
| 1610 | * If there were no record hinting faults then either the task is | 1610 | * If there were no record hinting faults then either the task is |
| 1611 | * completely idle or all activity is areas that are not of interest | 1611 | * completely idle or all activity is areas that are not of interest |
| 1612 | * to automatic numa balancing. Scan slower | 1612 | * to automatic numa balancing. Related to that, if there were failed |
| 1613 | * migration then it implies we are migrating too quickly or the local | ||
| 1614 | * node is overloaded. In either case, scan slower | ||
| 1613 | */ | 1615 | */ |
| 1614 | if (local + shared == 0) { | 1616 | if (local + shared == 0 || p->numa_faults_locality[2]) { |
| 1615 | p->numa_scan_period = min(p->numa_scan_period_max, | 1617 | p->numa_scan_period = min(p->numa_scan_period_max, |
| 1616 | p->numa_scan_period << 1); | 1618 | p->numa_scan_period << 1); |
| 1617 | 1619 | ||
| @@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) | |||
| 2080 | 2082 | ||
| 2081 | if (migrated) | 2083 | if (migrated) |
| 2082 | p->numa_pages_migrated += pages; | 2084 | p->numa_pages_migrated += pages; |
| 2085 | if (flags & TNF_MIGRATE_FAIL) | ||
| 2086 | p->numa_faults_locality[2] += pages; | ||
| 2083 | 2087 | ||
| 2084 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; | 2088 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; |
| 2085 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; | 2089 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 88ea2d6e0031..ce410bb9f2e1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -1228,6 +1228,14 @@ static struct ctl_table vm_table[] = { | |||
| 1228 | .extra1 = &zero, | 1228 | .extra1 = &zero, |
| 1229 | }, | 1229 | }, |
| 1230 | { | 1230 | { |
| 1231 | .procname = "dirtytime_expire_seconds", | ||
| 1232 | .data = &dirtytime_expire_interval, | ||
| 1233 | .maxlen = sizeof(dirty_expire_interval), | ||
| 1234 | .mode = 0644, | ||
| 1235 | .proc_handler = dirtytime_interval_handler, | ||
| 1236 | .extra1 = &zero, | ||
| 1237 | }, | ||
| 1238 | { | ||
| 1231 | .procname = "nr_pdflush_threads", | 1239 | .procname = "nr_pdflush_threads", |
| 1232 | .mode = 0444 /* read-only */, | 1240 | .mode = 0444 /* read-only */, |
| 1233 | .proc_handler = pdflush_proc_obsolete, | 1241 | .proc_handler = pdflush_proc_obsolete, |
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index eb682d5c697c..6aac4beedbbe 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
| @@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode, | |||
| 49 | */ | 49 | */ |
| 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) |
| 51 | { | 51 | { |
| 52 | int bc_moved; | ||
| 52 | /* | 53 | /* |
| 53 | * We try to cancel the timer first. If the callback is on | 54 | * We try to cancel the timer first. If the callback is on |
| 54 | * flight on some other cpu then we let it handle it. If we | 55 | * flight on some other cpu then we let it handle it. If we |
| @@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | |||
| 60 | * restart the timer because we are in the callback, but we | 61 | * restart the timer because we are in the callback, but we |
| 61 | * can set the expiry time and let the callback return | 62 | * can set the expiry time and let the callback return |
| 62 | * HRTIMER_RESTART. | 63 | * HRTIMER_RESTART. |
| 64 | * | ||
| 65 | * Since we are in the idle loop at this point and because | ||
| 66 | * hrtimer_{start/cancel} functions call into tracing, | ||
| 67 | * calls to these functions must be bound within RCU_NONIDLE. | ||
| 63 | */ | 68 | */ |
| 64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | 69 | RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ? |
| 65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | 70 | !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) : |
| 71 | 0); | ||
| 72 | if (bc_moved) { | ||
| 66 | /* Bind the "device" to the cpu */ | 73 | /* Bind the "device" to the cpu */ |
| 67 | bc->bound_on = smp_processor_id(); | 74 | bc->bound_on = smp_processor_id(); |
| 68 | } else if (bc->bound_on == smp_processor_id()) { | 75 | } else if (bc->bound_on == smp_processor_id()) { |
| @@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b) | |||
| 12 | return 0; | 12 | return 0; |
| 13 | } | 13 | } |
| 14 | EXPORT_SYMBOL_GPL(lcm); | 14 | EXPORT_SYMBOL_GPL(lcm); |
| 15 | |||
| 16 | unsigned long lcm_not_zero(unsigned long a, unsigned long b) | ||
| 17 | { | ||
| 18 | unsigned long l = lcm(a, b); | ||
| 19 | |||
| 20 | if (l) | ||
| 21 | return l; | ||
| 22 | |||
| 23 | return (b ? : a); | ||
| 24 | } | ||
| 25 | EXPORT_SYMBOL_GPL(lcm_not_zero); | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index 76a1b59523ab..f5907d23272d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count) | |||
| 279 | int minlen = min_t(int, count, nla_len(src)); | 279 | int minlen = min_t(int, count, nla_len(src)); |
| 280 | 280 | ||
| 281 | memcpy(dest, nla_data(src), minlen); | 281 | memcpy(dest, nla_data(src), minlen); |
| 282 | if (count > minlen) | ||
| 283 | memset(dest + minlen, 0, count - minlen); | ||
| 282 | 284 | ||
| 283 | return minlen; | 285 | return minlen; |
| 284 | } | 286 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 626e93db28ba..6817b0350c71 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1260 | int target_nid, last_cpupid = -1; | 1260 | int target_nid, last_cpupid = -1; |
| 1261 | bool page_locked; | 1261 | bool page_locked; |
| 1262 | bool migrated = false; | 1262 | bool migrated = false; |
| 1263 | bool was_writable; | ||
| 1263 | int flags = 0; | 1264 | int flags = 0; |
| 1264 | 1265 | ||
| 1265 | /* A PROT_NONE fault should not end up here */ | 1266 | /* A PROT_NONE fault should not end up here */ |
| @@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1291 | flags |= TNF_FAULT_LOCAL; | 1292 | flags |= TNF_FAULT_LOCAL; |
| 1292 | } | 1293 | } |
| 1293 | 1294 | ||
| 1294 | /* | 1295 | /* See similar comment in do_numa_page for explanation */ |
| 1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1296 | if (!(vma->vm_flags & VM_WRITE)) |
| 1296 | * in general, RO pages shouldn't hurt as much anyway since | ||
| 1297 | * they can be in shared cache state. | ||
| 1298 | * | ||
| 1299 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
| 1300 | * "is this a read-only page", since checking "pmd_write()" | ||
| 1301 | * is even more broken. We haven't actually turned this into | ||
| 1302 | * a writable page, so pmd_write() will always be false. | ||
| 1303 | */ | ||
| 1304 | if (!pmd_dirty(pmd)) | ||
| 1305 | flags |= TNF_NO_GROUP; | 1297 | flags |= TNF_NO_GROUP; |
| 1306 | 1298 | ||
| 1307 | /* | 1299 | /* |
| @@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1358 | if (migrated) { | 1350 | if (migrated) { |
| 1359 | flags |= TNF_MIGRATED; | 1351 | flags |= TNF_MIGRATED; |
| 1360 | page_nid = target_nid; | 1352 | page_nid = target_nid; |
| 1361 | } | 1353 | } else |
| 1354 | flags |= TNF_MIGRATE_FAIL; | ||
| 1362 | 1355 | ||
| 1363 | goto out; | 1356 | goto out; |
| 1364 | clear_pmdnuma: | 1357 | clear_pmdnuma: |
| 1365 | BUG_ON(!PageLocked(page)); | 1358 | BUG_ON(!PageLocked(page)); |
| 1359 | was_writable = pmd_write(pmd); | ||
| 1366 | pmd = pmd_modify(pmd, vma->vm_page_prot); | 1360 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
| 1361 | pmd = pmd_mkyoung(pmd); | ||
| 1362 | if (was_writable) | ||
| 1363 | pmd = pmd_mkwrite(pmd); | ||
| 1367 | set_pmd_at(mm, haddr, pmdp, pmd); | 1364 | set_pmd_at(mm, haddr, pmdp, pmd); |
| 1368 | update_mmu_cache_pmd(vma, addr, pmdp); | 1365 | update_mmu_cache_pmd(vma, addr, pmdp); |
| 1369 | unlock_page(page); | 1366 | unlock_page(page); |
| @@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 1487 | 1484 | ||
| 1488 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1485 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
| 1489 | pmd_t entry; | 1486 | pmd_t entry; |
| 1487 | bool preserve_write = prot_numa && pmd_write(*pmd); | ||
| 1490 | ret = 1; | 1488 | ret = 1; |
| 1491 | 1489 | ||
| 1492 | /* | 1490 | /* |
| @@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 1502 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1500 | if (!prot_numa || !pmd_protnone(*pmd)) { |
| 1503 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1501 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
| 1504 | entry = pmd_modify(entry, newprot); | 1502 | entry = pmd_modify(entry, newprot); |
| 1503 | if (preserve_write) | ||
| 1504 | entry = pmd_mkwrite(entry); | ||
| 1505 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
| 1506 | set_pmd_at(mm, addr, pmd, entry); | 1506 | set_pmd_at(mm, addr, pmd, entry); |
| 1507 | BUG_ON(pmd_write(entry)); | 1507 | BUG_ON(!preserve_write && pmd_write(entry)); |
| 1508 | } | 1508 | } |
| 1509 | spin_unlock(ptl); | 1509 | spin_unlock(ptl); |
| 1510 | } | 1510 | } |
diff --git a/mm/memory.c b/mm/memory.c index 411144f977b1..97839f5c8c30 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3035 | int last_cpupid; | 3035 | int last_cpupid; |
| 3036 | int target_nid; | 3036 | int target_nid; |
| 3037 | bool migrated = false; | 3037 | bool migrated = false; |
| 3038 | bool was_writable = pte_write(pte); | ||
| 3038 | int flags = 0; | 3039 | int flags = 0; |
| 3039 | 3040 | ||
| 3040 | /* A PROT_NONE fault should not end up here */ | 3041 | /* A PROT_NONE fault should not end up here */ |
| @@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3059 | /* Make it present again */ | 3060 | /* Make it present again */ |
| 3060 | pte = pte_modify(pte, vma->vm_page_prot); | 3061 | pte = pte_modify(pte, vma->vm_page_prot); |
| 3061 | pte = pte_mkyoung(pte); | 3062 | pte = pte_mkyoung(pte); |
| 3063 | if (was_writable) | ||
| 3064 | pte = pte_mkwrite(pte); | ||
| 3062 | set_pte_at(mm, addr, ptep, pte); | 3065 | set_pte_at(mm, addr, ptep, pte); |
| 3063 | update_mmu_cache(vma, addr, ptep); | 3066 | update_mmu_cache(vma, addr, ptep); |
| 3064 | 3067 | ||
| @@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3069 | } | 3072 | } |
| 3070 | 3073 | ||
| 3071 | /* | 3074 | /* |
| 3072 | * Avoid grouping on DSO/COW pages in specific and RO pages | 3075 | * Avoid grouping on RO pages in general. RO pages shouldn't hurt as |
| 3073 | * in general, RO pages shouldn't hurt as much anyway since | 3076 | * much anyway since they can be in shared cache state. This misses |
| 3074 | * they can be in shared cache state. | 3077 | * the case where a mapping is writable but the process never writes |
| 3075 | * | 3078 | * to it but pte_write gets cleared during protection updates and |
| 3076 | * FIXME! This checks "pmd_dirty()" as an approximation of | 3079 | * pte_dirty has unpredictable behaviour between PTE scan updates, |
| 3077 | * "is this a read-only page", since checking "pmd_write()" | 3080 | * background writeback, dirty balancing and application behaviour. |
| 3078 | * is even more broken. We haven't actually turned this into | ||
| 3079 | * a writable page, so pmd_write() will always be false. | ||
| 3080 | */ | 3081 | */ |
| 3081 | if (!pte_dirty(pte)) | 3082 | if (!(vma->vm_flags & VM_WRITE)) |
| 3082 | flags |= TNF_NO_GROUP; | 3083 | flags |= TNF_NO_GROUP; |
| 3083 | 3084 | ||
| 3084 | /* | 3085 | /* |
| @@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3102 | if (migrated) { | 3103 | if (migrated) { |
| 3103 | page_nid = target_nid; | 3104 | page_nid = target_nid; |
| 3104 | flags |= TNF_MIGRATED; | 3105 | flags |= TNF_MIGRATED; |
| 3105 | } | 3106 | } else |
| 3107 | flags |= TNF_MIGRATE_FAIL; | ||
| 3106 | 3108 | ||
| 3107 | out: | 3109 | out: |
| 3108 | if (page_nid != -1) | 3110 | if (page_nid != -1) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9fab10795bea..65842d688b7c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
| 1092 | return NULL; | 1092 | return NULL; |
| 1093 | 1093 | ||
| 1094 | arch_refresh_nodedata(nid, pgdat); | 1094 | arch_refresh_nodedata(nid, pgdat); |
| 1095 | } else { | ||
| 1096 | /* Reset the nr_zones and classzone_idx to 0 before reuse */ | ||
| 1097 | pgdat->nr_zones = 0; | ||
| 1098 | pgdat->classzone_idx = 0; | ||
| 1095 | } | 1099 | } |
| 1096 | 1100 | ||
| 1097 | /* we can use NODE_DATA(nid) from here */ | 1101 | /* we can use NODE_DATA(nid) from here */ |
| @@ -1977,15 +1981,6 @@ void try_offline_node(int nid) | |||
| 1977 | if (is_vmalloc_addr(zone->wait_table)) | 1981 | if (is_vmalloc_addr(zone->wait_table)) |
| 1978 | vfree(zone->wait_table); | 1982 | vfree(zone->wait_table); |
| 1979 | } | 1983 | } |
| 1980 | |||
| 1981 | /* | ||
| 1982 | * Since there is no way to guarentee the address of pgdat/zone is not | ||
| 1983 | * on stack of any kernel threads or used by other kernel objects | ||
| 1984 | * without reference counting or other symchronizing method, do not | ||
| 1985 | * reset node_data and free pgdat here. Just reset it to 0 and reuse | ||
| 1986 | * the memory when the node is online again. | ||
| 1987 | */ | ||
| 1988 | memset(pgdat, 0, sizeof(*pgdat)); | ||
| 1989 | } | 1984 | } |
| 1990 | EXPORT_SYMBOL(try_offline_node); | 1985 | EXPORT_SYMBOL(try_offline_node); |
| 1991 | 1986 | ||
| @@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end); | |||
| 774 | 774 | ||
| 775 | importer->anon_vma = exporter->anon_vma; | 775 | importer->anon_vma = exporter->anon_vma; |
| 776 | error = anon_vma_clone(importer, exporter); | 776 | error = anon_vma_clone(importer, exporter); |
| 777 | if (error) { | 777 | if (error) |
| 778 | importer->anon_vma = NULL; | ||
| 779 | return error; | 778 | return error; |
| 780 | } | ||
| 781 | } | 779 | } |
| 782 | } | 780 | } |
| 783 | 781 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 44727811bf4c..88584838e704 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
| @@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 75 | oldpte = *pte; | 75 | oldpte = *pte; |
| 76 | if (pte_present(oldpte)) { | 76 | if (pte_present(oldpte)) { |
| 77 | pte_t ptent; | 77 | pte_t ptent; |
| 78 | bool preserve_write = prot_numa && pte_write(oldpte); | ||
| 78 | 79 | ||
| 79 | /* | 80 | /* |
| 80 | * Avoid trapping faults against the zero or KSM | 81 | * Avoid trapping faults against the zero or KSM |
| @@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 94 | 95 | ||
| 95 | ptent = ptep_modify_prot_start(mm, addr, pte); | 96 | ptent = ptep_modify_prot_start(mm, addr, pte); |
| 96 | ptent = pte_modify(ptent, newprot); | 97 | ptent = pte_modify(ptent, newprot); |
| 98 | if (preserve_write) | ||
| 99 | ptent = pte_mkwrite(ptent); | ||
| 97 | 100 | ||
| 98 | /* Avoid taking write faults for known dirty pages */ | 101 | /* Avoid taking write faults for known dirty pages */ |
| 99 | if (dirty_accountable && pte_dirty(ptent) && | 102 | if (dirty_accountable && pte_dirty(ptent) && |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 45e187b2d971..644bcb665773 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, | |||
| 857 | * bw * elapsed + write_bandwidth * (period - elapsed) | 857 | * bw * elapsed + write_bandwidth * (period - elapsed) |
| 858 | * write_bandwidth = --------------------------------------------------- | 858 | * write_bandwidth = --------------------------------------------------- |
| 859 | * period | 859 | * period |
| 860 | * | ||
| 861 | * @written may have decreased due to account_page_redirty(). | ||
| 862 | * Avoid underflowing @bw calculation. | ||
| 860 | */ | 863 | */ |
| 861 | bw = written - bdi->written_stamp; | 864 | bw = written - min(written, bdi->written_stamp); |
| 862 | bw *= HZ; | 865 | bw *= HZ; |
| 863 | if (unlikely(elapsed > period)) { | 866 | if (unlikely(elapsed > period)) { |
| 864 | do_div(bw, elapsed); | 867 | do_div(bw, elapsed); |
| @@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh, | |||
| 922 | unsigned long now) | 925 | unsigned long now) |
| 923 | { | 926 | { |
| 924 | static DEFINE_SPINLOCK(dirty_lock); | 927 | static DEFINE_SPINLOCK(dirty_lock); |
| 925 | static unsigned long update_time; | 928 | static unsigned long update_time = INITIAL_JIFFIES; |
| 926 | 929 | ||
| 927 | /* | 930 | /* |
| 928 | * check locklessly first to optimize away locking for the most time | 931 | * check locklessly first to optimize away locking for the most time |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 72f5ac381ab3..755a42c76eb4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
| @@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |||
| 103 | 103 | ||
| 104 | if (!is_migrate_isolate_page(buddy)) { | 104 | if (!is_migrate_isolate_page(buddy)) { |
| 105 | __isolate_free_page(page, order); | 105 | __isolate_free_page(page, order); |
| 106 | kernel_map_pages(page, (1 << order), 1); | ||
| 106 | set_page_refcounted(page); | 107 | set_page_refcounted(page); |
| 107 | isolated_page = page; | 108 | isolated_page = page; |
| 108 | } | 109 | } |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 75c1f2878519..29f2f8b853ae 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end, | |||
| 265 | vma = vma->vm_next; | 265 | vma = vma->vm_next; |
| 266 | 266 | ||
| 267 | err = walk_page_test(start, next, walk); | 267 | err = walk_page_test(start, next, walk); |
| 268 | if (err > 0) | 268 | if (err > 0) { |
| 269 | /* | ||
| 270 | * positive return values are purely for | ||
| 271 | * controlling the pagewalk, so should never | ||
| 272 | * be passed to the callers. | ||
| 273 | */ | ||
| 274 | err = 0; | ||
| 269 | continue; | 275 | continue; |
| 276 | } | ||
| 270 | if (err < 0) | 277 | if (err < 0) |
| 271 | break; | 278 | break; |
| 272 | } | 279 | } |
| @@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |||
| 287 | return 0; | 287 | return 0; |
| 288 | 288 | ||
| 289 | enomem_failure: | 289 | enomem_failure: |
| 290 | /* | ||
| 291 | * dst->anon_vma is dropped here otherwise its degree can be incorrectly | ||
| 292 | * decremented in unlink_anon_vmas(). | ||
| 293 | * We can safely do this because callers of anon_vma_clone() don't care | ||
| 294 | * about dst->anon_vma if anon_vma_clone() failed. | ||
| 295 | */ | ||
| 296 | dst->anon_vma = NULL; | ||
| 290 | unlink_anon_vmas(dst); | 297 | unlink_anon_vmas(dst); |
| 291 | return -ENOMEM; | 298 | return -ENOMEM; |
| 292 | } | 299 | } |
| @@ -2449,7 +2449,8 @@ redo: | |||
| 2449 | do { | 2449 | do { |
| 2450 | tid = this_cpu_read(s->cpu_slab->tid); | 2450 | tid = this_cpu_read(s->cpu_slab->tid); |
| 2451 | c = raw_cpu_ptr(s->cpu_slab); | 2451 | c = raw_cpu_ptr(s->cpu_slab); |
| 2452 | } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); | 2452 | } while (IS_ENABLED(CONFIG_PREEMPT) && |
| 2453 | unlikely(tid != READ_ONCE(c->tid))); | ||
| 2453 | 2454 | ||
| 2454 | /* | 2455 | /* |
| 2455 | * Irqless object alloc/free algorithm used here depends on sequence | 2456 | * Irqless object alloc/free algorithm used here depends on sequence |
| @@ -2718,7 +2719,8 @@ redo: | |||
| 2718 | do { | 2719 | do { |
| 2719 | tid = this_cpu_read(s->cpu_slab->tid); | 2720 | tid = this_cpu_read(s->cpu_slab->tid); |
| 2720 | c = raw_cpu_ptr(s->cpu_slab); | 2721 | c = raw_cpu_ptr(s->cpu_slab); |
| 2721 | } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); | 2722 | } while (IS_ENABLED(CONFIG_PREEMPT) && |
| 2723 | unlikely(tid != READ_ONCE(c->tid))); | ||
| 2722 | 2724 | ||
| 2723 | /* Same with comment on barrier() in slab_alloc_node() */ | 2725 | /* Same with comment on barrier() in slab_alloc_node() */ |
| 2724 | barrier(); | 2726 | barrier(); |
diff --git a/net/compat.c b/net/compat.c index 94d3d5e97883..f7bd286a8280 100644 --- a/net/compat.c +++ b/net/compat.c | |||
| @@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg, | |||
| 49 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || | 49 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || |
| 50 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) | 50 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
| 51 | return -EFAULT; | 51 | return -EFAULT; |
| 52 | |||
| 53 | if (!uaddr) | ||
| 54 | kmsg->msg_namelen = 0; | ||
| 55 | |||
| 56 | if (kmsg->msg_namelen < 0) | ||
| 57 | return -EINVAL; | ||
| 58 | |||
| 52 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 59 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
| 53 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); | 60 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
| 54 | kmsg->msg_control = compat_ptr(tmp3); | 61 | kmsg->msg_control = compat_ptr(tmp3); |
diff --git a/net/core/dev.c b/net/core/dev.c index 962ee9d71964..45109b70664e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2848,7 +2848,9 @@ static void skb_update_prio(struct sk_buff *skb) | |||
| 2848 | #define skb_update_prio(skb) | 2848 | #define skb_update_prio(skb) |
| 2849 | #endif | 2849 | #endif |
| 2850 | 2850 | ||
| 2851 | static DEFINE_PER_CPU(int, xmit_recursion); | 2851 | DEFINE_PER_CPU(int, xmit_recursion); |
| 2852 | EXPORT_SYMBOL(xmit_recursion); | ||
| 2853 | |||
| 2852 | #define RECURSION_LIMIT 10 | 2854 | #define RECURSION_LIMIT 10 |
| 2853 | 2855 | ||
| 2854 | /** | 2856 | /** |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 44706e81b2e0..e4fdc9dfb2c7 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
| @@ -175,9 +175,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
| 175 | 175 | ||
| 176 | spin_lock(&net->rules_mod_lock); | 176 | spin_lock(&net->rules_mod_lock); |
| 177 | list_del_rcu(&ops->list); | 177 | list_del_rcu(&ops->list); |
| 178 | fib_rules_cleanup_ops(ops); | ||
| 179 | spin_unlock(&net->rules_mod_lock); | 178 | spin_unlock(&net->rules_mod_lock); |
| 180 | 179 | ||
| 180 | fib_rules_cleanup_ops(ops); | ||
| 181 | call_rcu(&ops->rcu, fib_rules_put_rcu); | 181 | call_rcu(&ops->rcu, fib_rules_put_rcu); |
| 182 | } | 182 | } |
| 183 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 183 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index cb5290b8c428..70d3450588b2 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
| @@ -198,8 +198,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc) | |||
| 198 | */ | 198 | */ |
| 199 | int peernet2id(struct net *net, struct net *peer) | 199 | int peernet2id(struct net *net, struct net *peer) |
| 200 | { | 200 | { |
| 201 | int id = __peernet2id(net, peer, true); | 201 | bool alloc = atomic_read(&peer->count) == 0 ? false : true; |
| 202 | int id; | ||
| 202 | 203 | ||
| 204 | id = __peernet2id(net, peer, alloc); | ||
| 203 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; | 205 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; |
| 204 | } | 206 | } |
| 205 | EXPORT_SYMBOL(peernet2id); | 207 | EXPORT_SYMBOL(peernet2id); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ee0608bb3bc0..7ebed55b5f7d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -1932,10 +1932,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb, | |||
| 1932 | struct ifinfomsg *ifm, | 1932 | struct ifinfomsg *ifm, |
| 1933 | struct nlattr **tb) | 1933 | struct nlattr **tb) |
| 1934 | { | 1934 | { |
| 1935 | struct net_device *dev; | 1935 | struct net_device *dev, *aux; |
| 1936 | int err; | 1936 | int err; |
| 1937 | 1937 | ||
| 1938 | for_each_netdev(net, dev) { | 1938 | for_each_netdev_safe(net, dev, aux) { |
| 1939 | if (dev->group == group) { | 1939 | if (dev->group == group) { |
| 1940 | err = do_setlink(skb, dev, ifm, tb, NULL, 0); | 1940 | err = do_setlink(skb, dev, ifm, tb, NULL, 0); |
| 1941 | if (err < 0) | 1941 | if (err < 0) |
diff --git a/net/core/sock.c b/net/core/sock.c index 78e89eb7eb70..71e3e5f1eaa0 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | |||
| 653 | sock_reset_flag(sk, bit); | 653 | sock_reset_flag(sk, bit); |
| 654 | } | 654 | } |
| 655 | 655 | ||
| 656 | bool sk_mc_loop(struct sock *sk) | ||
| 657 | { | ||
| 658 | if (dev_recursion_level()) | ||
| 659 | return false; | ||
| 660 | if (!sk) | ||
| 661 | return true; | ||
| 662 | switch (sk->sk_family) { | ||
| 663 | case AF_INET: | ||
| 664 | return inet_sk(sk)->mc_loop; | ||
| 665 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 666 | case AF_INET6: | ||
| 667 | return inet6_sk(sk)->mc_loop; | ||
| 668 | #endif | ||
| 669 | } | ||
| 670 | WARN_ON(1); | ||
| 671 | return true; | ||
| 672 | } | ||
| 673 | EXPORT_SYMBOL(sk_mc_loop); | ||
| 674 | |||
| 656 | /* | 675 | /* |
| 657 | * This is meant for all protocols to use and covers goings on | 676 | * This is meant for all protocols to use and covers goings on |
| 658 | * at the socket level. Everything here is generic. | 677 | * at the socket level. Everything here is generic. |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index faf7cc3483fe..9d66a0f72f90 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
| @@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void) | |||
| 248 | 248 | ||
| 249 | void __exit dn_fib_rules_cleanup(void) | 249 | void __exit dn_fib_rules_cleanup(void) |
| 250 | { | 250 | { |
| 251 | rtnl_lock(); | ||
| 251 | fib_rules_unregister(dn_fib_rules_ops); | 252 | fib_rules_unregister(dn_fib_rules_ops); |
| 253 | rtnl_unlock(); | ||
| 252 | rcu_barrier(); | 254 | rcu_barrier(); |
| 253 | } | 255 | } |
| 254 | 256 | ||
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 2173402d87e0..4dea2e0681d1 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
| @@ -501,12 +501,10 @@ static struct net_device *dev_to_net_device(struct device *dev) | |||
| 501 | #ifdef CONFIG_OF | 501 | #ifdef CONFIG_OF |
| 502 | static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | 502 | static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, |
| 503 | struct dsa_chip_data *cd, | 503 | struct dsa_chip_data *cd, |
| 504 | int chip_index, | 504 | int chip_index, int port_index, |
| 505 | struct device_node *link) | 505 | struct device_node *link) |
| 506 | { | 506 | { |
| 507 | int ret; | ||
| 508 | const __be32 *reg; | 507 | const __be32 *reg; |
| 509 | int link_port_addr; | ||
| 510 | int link_sw_addr; | 508 | int link_sw_addr; |
| 511 | struct device_node *parent_sw; | 509 | struct device_node *parent_sw; |
| 512 | int len; | 510 | int len; |
| @@ -519,6 +517,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | |||
| 519 | if (!reg || (len != sizeof(*reg) * 2)) | 517 | if (!reg || (len != sizeof(*reg) * 2)) |
| 520 | return -EINVAL; | 518 | return -EINVAL; |
| 521 | 519 | ||
| 520 | /* | ||
| 521 | * Get the destination switch number from the second field of its 'reg' | ||
| 522 | * property, i.e. for "reg = <0x19 1>" sw_addr is '1'. | ||
| 523 | */ | ||
| 522 | link_sw_addr = be32_to_cpup(reg + 1); | 524 | link_sw_addr = be32_to_cpup(reg + 1); |
| 523 | 525 | ||
| 524 | if (link_sw_addr >= pd->nr_chips) | 526 | if (link_sw_addr >= pd->nr_chips) |
| @@ -535,20 +537,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | |||
| 535 | memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); | 537 | memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); |
| 536 | } | 538 | } |
| 537 | 539 | ||
| 538 | reg = of_get_property(link, "reg", NULL); | 540 | cd->rtable[link_sw_addr] = port_index; |
| 539 | if (!reg) { | ||
| 540 | ret = -EINVAL; | ||
| 541 | goto out; | ||
| 542 | } | ||
| 543 | |||
| 544 | link_port_addr = be32_to_cpup(reg); | ||
| 545 | |||
| 546 | cd->rtable[link_sw_addr] = link_port_addr; | ||
| 547 | 541 | ||
| 548 | return 0; | 542 | return 0; |
| 549 | out: | ||
| 550 | kfree(cd->rtable); | ||
| 551 | return ret; | ||
| 552 | } | 543 | } |
| 553 | 544 | ||
| 554 | static void dsa_of_free_platform_data(struct dsa_platform_data *pd) | 545 | static void dsa_of_free_platform_data(struct dsa_platform_data *pd) |
| @@ -658,7 +649,7 @@ static int dsa_of_probe(struct platform_device *pdev) | |||
| 658 | if (!strcmp(port_name, "dsa") && link && | 649 | if (!strcmp(port_name, "dsa") && link && |
| 659 | pd->nr_chips > 1) { | 650 | pd->nr_chips > 1) { |
| 660 | ret = dsa_of_setup_routing_table(pd, cd, | 651 | ret = dsa_of_setup_routing_table(pd, cd, |
| 661 | chip_index, link); | 652 | chip_index, port_index, link); |
| 662 | if (ret) | 653 | if (ret) |
| 663 | goto out_free_chip; | 654 | goto out_free_chip; |
| 664 | } | 655 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 57be71dd6a9e..23b9b3e86f4c 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -1111,11 +1111,10 @@ static void ip_fib_net_exit(struct net *net) | |||
| 1111 | { | 1111 | { |
| 1112 | unsigned int i; | 1112 | unsigned int i; |
| 1113 | 1113 | ||
| 1114 | rtnl_lock(); | ||
| 1114 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 1115 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
| 1115 | fib4_rules_exit(net); | 1116 | fib4_rules_exit(net); |
| 1116 | #endif | 1117 | #endif |
| 1117 | |||
| 1118 | rtnl_lock(); | ||
| 1119 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { | 1118 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { |
| 1120 | struct fib_table *tb; | 1119 | struct fib_table *tb; |
| 1121 | struct hlist_head *head; | 1120 | struct hlist_head *head; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d78427652d2..fe54eba6d00d 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
| @@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
| 268 | return 0; | 268 | return 0; |
| 269 | 269 | ||
| 270 | err2: | 270 | err2: |
| 271 | kfree(mrt); | 271 | ipmr_free_table(mrt); |
| 272 | err1: | 272 | err1: |
| 273 | fib_rules_unregister(ops); | 273 | fib_rules_unregister(ops); |
| 274 | return err; | 274 | return err; |
| @@ -278,11 +278,13 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
| 278 | { | 278 | { |
| 279 | struct mr_table *mrt, *next; | 279 | struct mr_table *mrt, *next; |
| 280 | 280 | ||
| 281 | rtnl_lock(); | ||
| 281 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { | 282 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
| 282 | list_del(&mrt->list); | 283 | list_del(&mrt->list); |
| 283 | ipmr_free_table(mrt); | 284 | ipmr_free_table(mrt); |
| 284 | } | 285 | } |
| 285 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 286 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
| 287 | rtnl_unlock(); | ||
| 286 | } | 288 | } |
| 287 | #else | 289 | #else |
| 288 | #define ipmr_for_each_table(mrt, net) \ | 290 | #define ipmr_for_each_table(mrt, net) \ |
| @@ -308,7 +310,10 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
| 308 | 310 | ||
| 309 | static void __net_exit ipmr_rules_exit(struct net *net) | 311 | static void __net_exit ipmr_rules_exit(struct net *net) |
| 310 | { | 312 | { |
| 313 | rtnl_lock(); | ||
| 311 | ipmr_free_table(net->ipv4.mrt); | 314 | ipmr_free_table(net->ipv4.mrt); |
| 315 | net->ipv4.mrt = NULL; | ||
| 316 | rtnl_unlock(); | ||
| 312 | } | 317 | } |
| 313 | #endif | 318 | #endif |
| 314 | 319 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 99e810f84671..cf5e82f39d3b 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
| 272 | &chainname, &comment, &rulenum) != 0) | 272 | &chainname, &comment, &rulenum) != 0) |
| 273 | break; | 273 | break; |
| 274 | 274 | ||
| 275 | nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, | 275 | nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, |
| 276 | "TRACE: %s:%s:%s:%u ", | 276 | "TRACE: %s:%s:%s:%u ", |
| 277 | tablename, chainname, comment, rulenum); | 277 | tablename, chainname, comment, rulenum); |
| 278 | } | 278 | } |
| 279 | #endif | 279 | #endif |
| 280 | 280 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fb4cf8b8e121..f501ac048366 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
| 3105 | if (!first_ackt.v64) | 3105 | if (!first_ackt.v64) |
| 3106 | first_ackt = last_ackt; | 3106 | first_ackt = last_ackt; |
| 3107 | 3107 | ||
| 3108 | if (!(sacked & TCPCB_SACKED_ACKED)) | 3108 | if (!(sacked & TCPCB_SACKED_ACKED)) { |
| 3109 | reord = min(pkts_acked, reord); | 3109 | reord = min(pkts_acked, reord); |
| 3110 | if (!after(scb->end_seq, tp->high_seq)) | 3110 | if (!after(scb->end_seq, tp->high_seq)) |
| 3111 | flag |= FLAG_ORIG_SACK_ACKED; | 3111 | flag |= FLAG_ORIG_SACK_ACKED; |
| 3112 | } | ||
| 3112 | } | 3113 | } |
| 3113 | 3114 | ||
| 3114 | if (sacked & TCPCB_SACKED_ACKED) | 3115 | if (sacked & TCPCB_SACKED_ACKED) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a2dfed4783b..f1756ee02207 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
| 1518 | skb->sk = sk; | 1518 | skb->sk = sk; |
| 1519 | skb->destructor = sock_edemux; | 1519 | skb->destructor = sock_edemux; |
| 1520 | if (sk->sk_state != TCP_TIME_WAIT) { | 1520 | if (sk->sk_state != TCP_TIME_WAIT) { |
| 1521 | struct dst_entry *dst = sk->sk_rx_dst; | 1521 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
| 1522 | 1522 | ||
| 1523 | if (dst) | 1523 | if (dst) |
| 1524 | dst = dst_check(dst, 0); | 1524 | dst = dst_check(dst, 0); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a2a796c5536b..1db253e36045 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk) | |||
| 2773 | } else { | 2773 | } else { |
| 2774 | /* Socket is locked, keep trying until memory is available. */ | 2774 | /* Socket is locked, keep trying until memory is available. */ |
| 2775 | for (;;) { | 2775 | for (;;) { |
| 2776 | skb = alloc_skb_fclone(MAX_TCP_HEADER, | 2776 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
| 2777 | sk->sk_allocation); | ||
| 2778 | if (skb) | 2777 | if (skb) |
| 2779 | break; | 2778 | break; |
| 2780 | yield(); | 2779 | yield(); |
| 2781 | } | 2780 | } |
| 2782 | |||
| 2783 | /* Reserve space for headers and prepare control bits. */ | ||
| 2784 | skb_reserve(skb, MAX_TCP_HEADER); | ||
| 2785 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ | 2781 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
| 2786 | tcp_init_nondata_skb(skb, tp->write_seq, | 2782 | tcp_init_nondata_skb(skb, tp->write_seq, |
| 2787 | TCPHDR_ACK | TCPHDR_FIN); | 2783 | TCPHDR_ACK | TCPHDR_FIN); |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b4d5e1d97c1b..70bc6abc0639 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
| @@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
| 104 | goto again; | 104 | goto again; |
| 105 | flp6->saddr = saddr; | 105 | flp6->saddr = saddr; |
| 106 | } | 106 | } |
| 107 | err = rt->dst.error; | ||
| 107 | goto out; | 108 | goto out; |
| 108 | } | 109 | } |
| 109 | again: | 110 | again: |
| @@ -321,7 +322,9 @@ out_fib6_rules_ops: | |||
| 321 | 322 | ||
| 322 | static void __net_exit fib6_rules_net_exit(struct net *net) | 323 | static void __net_exit fib6_rules_net_exit(struct net *net) |
| 323 | { | 324 | { |
| 325 | rtnl_lock(); | ||
| 324 | fib_rules_unregister(net->ipv6.fib6_rules_ops); | 326 | fib_rules_unregister(net->ipv6.fib6_rules_ops); |
| 327 | rtnl_unlock(); | ||
| 325 | } | 328 | } |
| 326 | 329 | ||
| 327 | static struct pernet_operations fib6_rules_net_ops = { | 330 | static struct pernet_operations fib6_rules_net_ops = { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7e80b61b51ff..36cf0ab685a0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -542,7 +542,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
| 542 | { | 542 | { |
| 543 | struct sk_buff *frag; | 543 | struct sk_buff *frag; |
| 544 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); | 544 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
| 545 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 545 | struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
| 546 | inet6_sk(skb->sk) : NULL; | ||
| 546 | struct ipv6hdr *tmp_hdr; | 547 | struct ipv6hdr *tmp_hdr; |
| 547 | struct frag_hdr *fh; | 548 | struct frag_hdr *fh; |
| 548 | unsigned int mtu, hlen, left, len; | 549 | unsigned int mtu, hlen, left, len; |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 34b682617f50..312e0ff47339 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
| @@ -252,7 +252,7 @@ static int __net_init ip6mr_rules_init(struct net *net) | |||
| 252 | return 0; | 252 | return 0; |
| 253 | 253 | ||
| 254 | err2: | 254 | err2: |
| 255 | kfree(mrt); | 255 | ip6mr_free_table(mrt); |
| 256 | err1: | 256 | err1: |
| 257 | fib_rules_unregister(ops); | 257 | fib_rules_unregister(ops); |
| 258 | return err; | 258 | return err; |
| @@ -267,8 +267,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
| 267 | list_del(&mrt->list); | 267 | list_del(&mrt->list); |
| 268 | ip6mr_free_table(mrt); | 268 | ip6mr_free_table(mrt); |
| 269 | } | 269 | } |
| 270 | rtnl_unlock(); | ||
| 271 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 270 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
| 271 | rtnl_unlock(); | ||
| 272 | } | 272 | } |
| 273 | #else | 273 | #else |
| 274 | #define ip6mr_for_each_table(mrt, net) \ | 274 | #define ip6mr_for_each_table(mrt, net) \ |
| @@ -336,7 +336,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | |||
| 336 | 336 | ||
| 337 | static void ip6mr_free_table(struct mr6_table *mrt) | 337 | static void ip6mr_free_table(struct mr6_table *mrt) |
| 338 | { | 338 | { |
| 339 | del_timer(&mrt->ipmr_expire_timer); | 339 | del_timer_sync(&mrt->ipmr_expire_timer); |
| 340 | mroute_clean_tables(mrt); | 340 | mroute_clean_tables(mrt); |
| 341 | kfree(mrt); | 341 | kfree(mrt); |
| 342 | } | 342 | } |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 471ed24aabae..14ecdaf06bf7 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
| @@ -1218,7 +1218,14 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
| 1218 | if (rt) | 1218 | if (rt) |
| 1219 | rt6_set_expires(rt, jiffies + (HZ * lifetime)); | 1219 | rt6_set_expires(rt, jiffies + (HZ * lifetime)); |
| 1220 | if (ra_msg->icmph.icmp6_hop_limit) { | 1220 | if (ra_msg->icmph.icmp6_hop_limit) { |
| 1221 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | 1221 | /* Only set hop_limit on the interface if it is higher than |
| 1222 | * the current hop_limit. | ||
| 1223 | */ | ||
| 1224 | if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) { | ||
| 1225 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | ||
| 1226 | } else { | ||
| 1227 | ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n"); | ||
| 1228 | } | ||
| 1222 | if (rt) | 1229 | if (rt) |
| 1223 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, | 1230 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, |
| 1224 | ra_msg->icmph.icmp6_hop_limit); | 1231 | ra_msg->icmph.icmp6_hop_limit); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e080fbbbc0e5..bb00c6f2a885 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
| 298 | &chainname, &comment, &rulenum) != 0) | 298 | &chainname, &comment, &rulenum) != 0) |
| 299 | break; | 299 | break; |
| 300 | 300 | ||
| 301 | nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, | 301 | nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, |
| 302 | "TRACE: %s:%s:%s:%u ", | 302 | "TRACE: %s:%s:%s:%u ", |
| 303 | tablename, chainname, comment, rulenum); | 303 | tablename, chainname, comment, rulenum); |
| 304 | } | 304 | } |
| 305 | #endif | 305 | #endif |
| 306 | 306 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5d46832c6f72..1f5e62229aaa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1411,6 +1411,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, | |||
| 1411 | TCP_SKB_CB(skb)->sacked = 0; | 1411 | TCP_SKB_CB(skb)->sacked = 0; |
| 1412 | } | 1412 | } |
| 1413 | 1413 | ||
| 1414 | static void tcp_v6_restore_cb(struct sk_buff *skb) | ||
| 1415 | { | ||
| 1416 | /* We need to move header back to the beginning if xfrm6_policy_check() | ||
| 1417 | * and tcp_v6_fill_cb() are going to be called again. | ||
| 1418 | */ | ||
| 1419 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | ||
| 1420 | sizeof(struct inet6_skb_parm)); | ||
| 1421 | } | ||
| 1422 | |||
| 1414 | static int tcp_v6_rcv(struct sk_buff *skb) | 1423 | static int tcp_v6_rcv(struct sk_buff *skb) |
| 1415 | { | 1424 | { |
| 1416 | const struct tcphdr *th; | 1425 | const struct tcphdr *th; |
| @@ -1543,6 +1552,7 @@ do_time_wait: | |||
| 1543 | inet_twsk_deschedule(tw, &tcp_death_row); | 1552 | inet_twsk_deschedule(tw, &tcp_death_row); |
| 1544 | inet_twsk_put(tw); | 1553 | inet_twsk_put(tw); |
| 1545 | sk = sk2; | 1554 | sk = sk2; |
| 1555 | tcp_v6_restore_cb(skb); | ||
| 1546 | goto process; | 1556 | goto process; |
| 1547 | } | 1557 | } |
| 1548 | /* Fall through to ACK */ | 1558 | /* Fall through to ACK */ |
| @@ -1551,6 +1561,7 @@ do_time_wait: | |||
| 1551 | tcp_v6_timewait_ack(sk, skb); | 1561 | tcp_v6_timewait_ack(sk, skb); |
| 1552 | break; | 1562 | break; |
| 1553 | case TCP_TW_RST: | 1563 | case TCP_TW_RST: |
| 1564 | tcp_v6_restore_cb(skb); | ||
| 1554 | goto no_tcp_socket; | 1565 | goto no_tcp_socket; |
| 1555 | case TCP_TW_SUCCESS: | 1566 | case TCP_TW_SUCCESS: |
| 1556 | ; | 1567 | ; |
| @@ -1585,7 +1596,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
| 1585 | skb->sk = sk; | 1596 | skb->sk = sk; |
| 1586 | skb->destructor = sock_edemux; | 1597 | skb->destructor = sock_edemux; |
| 1587 | if (sk->sk_state != TCP_TIME_WAIT) { | 1598 | if (sk->sk_state != TCP_TIME_WAIT) { |
| 1588 | struct dst_entry *dst = sk->sk_rx_dst; | 1599 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
| 1589 | 1600 | ||
| 1590 | if (dst) | 1601 | if (dst) |
| 1591 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); | 1602 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index ab889bb16b3c..be2c0ba82c85 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
| @@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
| 112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | 112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
| 113 | fptr->nexthdr = nexthdr; | 113 | fptr->nexthdr = nexthdr; |
| 114 | fptr->reserved = 0; | 114 | fptr->reserved = 0; |
| 115 | if (skb_shinfo(skb)->ip6_frag_id) | 115 | if (!skb_shinfo(skb)->ip6_frag_id) |
| 116 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; | 116 | ipv6_proxy_select_ident(skb); |
| 117 | else | 117 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; |
| 118 | ipv6_select_ident(fptr, | ||
| 119 | (struct rt6_info *)skb_dst(skb)); | ||
| 120 | 118 | ||
| 121 | /* Fragment the skb. ipv6 header and the remaining fields of the | 119 | /* Fragment the skb. ipv6 header and the remaining fields of the |
| 122 | * fragment header are updated in ipv6_gso_segment() | 120 | * fragment header are updated in ipv6_gso_segment() |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 2e9953b2db84..53d931172088 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1114 | noblock, &err); | 1114 | noblock, &err); |
| 1115 | else | 1115 | else |
| 1116 | skb = sock_alloc_send_skb(sk, len, noblock, &err); | 1116 | skb = sock_alloc_send_skb(sk, len, noblock, &err); |
| 1117 | if (!skb) { | 1117 | if (!skb) |
| 1118 | err = -ENOMEM; | ||
| 1119 | goto out; | 1118 | goto out; |
| 1120 | } | ||
| 1121 | if (iucv->transport == AF_IUCV_TRANS_HIPER) | 1119 | if (iucv->transport == AF_IUCV_TRANS_HIPER) |
| 1122 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); | 1120 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); |
| 1123 | if (memcpy_from_msg(skb_put(skb, len), msg, len)) { | 1121 | if (memcpy_from_msg(skb_put(skb, len), msg, len)) { |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 895348e44c7d..a29a504492af 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -1871,6 +1871,7 @@ static int __init l2tp_init(void) | |||
| 1871 | l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); | 1871 | l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); |
| 1872 | if (!l2tp_wq) { | 1872 | if (!l2tp_wq) { |
| 1873 | pr_err("alloc_workqueue failed\n"); | 1873 | pr_err("alloc_workqueue failed\n"); |
| 1874 | unregister_pernet_device(&l2tp_net_ops); | ||
| 1874 | rc = -ENOMEM; | 1875 | rc = -ENOMEM; |
| 1875 | goto out; | 1876 | goto out; |
| 1876 | } | 1877 | } |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a48bad468880..7702978a4c99 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
| @@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h) | |||
| 49 | container_of(h, struct tid_ampdu_rx, rcu_head); | 49 | container_of(h, struct tid_ampdu_rx, rcu_head); |
| 50 | int i; | 50 | int i; |
| 51 | 51 | ||
| 52 | del_timer_sync(&tid_rx->reorder_timer); | ||
| 53 | |||
| 54 | for (i = 0; i < tid_rx->buf_size; i++) | 52 | for (i = 0; i < tid_rx->buf_size; i++) |
| 55 | __skb_queue_purge(&tid_rx->reorder_buf[i]); | 53 | __skb_queue_purge(&tid_rx->reorder_buf[i]); |
| 56 | kfree(tid_rx->reorder_buf); | 54 | kfree(tid_rx->reorder_buf); |
| @@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
| 93 | 91 | ||
| 94 | del_timer_sync(&tid_rx->session_timer); | 92 | del_timer_sync(&tid_rx->session_timer); |
| 95 | 93 | ||
| 94 | /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ | ||
| 95 | spin_lock_bh(&tid_rx->reorder_lock); | ||
| 96 | tid_rx->removed = true; | ||
| 97 | spin_unlock_bh(&tid_rx->reorder_lock); | ||
| 98 | del_timer_sync(&tid_rx->reorder_timer); | ||
| 99 | |||
| 96 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); | 100 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); |
| 97 | } | 101 | } |
| 98 | 102 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 944bdc04e913..1eb730bf8752 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | |||
| 873 | 873 | ||
| 874 | set_release_timer: | 874 | set_release_timer: |
| 875 | 875 | ||
| 876 | mod_timer(&tid_agg_rx->reorder_timer, | 876 | if (!tid_agg_rx->removed) |
| 877 | tid_agg_rx->reorder_time[j] + 1 + | 877 | mod_timer(&tid_agg_rx->reorder_timer, |
| 878 | HT_RX_REORDER_BUF_TIMEOUT); | 878 | tid_agg_rx->reorder_time[j] + 1 + |
| 879 | HT_RX_REORDER_BUF_TIMEOUT); | ||
| 879 | } else { | 880 | } else { |
| 880 | del_timer(&tid_agg_rx->reorder_timer); | 881 | del_timer(&tid_agg_rx->reorder_timer); |
| 881 | } | 882 | } |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 925e68fe64c7..fb0fc1302a58 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
| @@ -175,6 +175,7 @@ struct tid_ampdu_tx { | |||
| 175 | * @reorder_lock: serializes access to reorder buffer, see below. | 175 | * @reorder_lock: serializes access to reorder buffer, see below. |
| 176 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and | 176 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and |
| 177 | * and ssn. | 177 | * and ssn. |
| 178 | * @removed: this session is removed (but might have been found due to RCU) | ||
| 178 | * | 179 | * |
| 179 | * This structure's lifetime is managed by RCU, assignments to | 180 | * This structure's lifetime is managed by RCU, assignments to |
| 180 | * the array holding it must hold the aggregation mutex. | 181 | * the array holding it must hold the aggregation mutex. |
| @@ -199,6 +200,7 @@ struct tid_ampdu_rx { | |||
| 199 | u16 timeout; | 200 | u16 timeout; |
| 200 | u8 dialog_token; | 201 | u8 dialog_token; |
| 201 | bool auto_seq; | 202 | bool auto_seq; |
| 203 | bool removed; | ||
| 202 | }; | 204 | }; |
| 203 | 205 | ||
| 204 | /** | 206 | /** |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 0d8448f19dfe..675d12c69e32 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
| @@ -212,6 +212,30 @@ void nf_log_packet(struct net *net, | |||
| 212 | } | 212 | } |
| 213 | EXPORT_SYMBOL(nf_log_packet); | 213 | EXPORT_SYMBOL(nf_log_packet); |
| 214 | 214 | ||
| 215 | void nf_log_trace(struct net *net, | ||
| 216 | u_int8_t pf, | ||
| 217 | unsigned int hooknum, | ||
| 218 | const struct sk_buff *skb, | ||
| 219 | const struct net_device *in, | ||
| 220 | const struct net_device *out, | ||
| 221 | const struct nf_loginfo *loginfo, const char *fmt, ...) | ||
| 222 | { | ||
| 223 | va_list args; | ||
| 224 | char prefix[NF_LOG_PREFIXLEN]; | ||
| 225 | const struct nf_logger *logger; | ||
| 226 | |||
| 227 | rcu_read_lock(); | ||
| 228 | logger = rcu_dereference(net->nf.nf_loggers[pf]); | ||
| 229 | if (logger) { | ||
| 230 | va_start(args, fmt); | ||
| 231 | vsnprintf(prefix, sizeof(prefix), fmt, args); | ||
| 232 | va_end(args); | ||
| 233 | logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); | ||
| 234 | } | ||
| 235 | rcu_read_unlock(); | ||
| 236 | } | ||
| 237 | EXPORT_SYMBOL(nf_log_trace); | ||
| 238 | |||
| 215 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) | 239 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) |
| 216 | 240 | ||
| 217 | struct nf_log_buf { | 241 | struct nf_log_buf { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 6ab777912237..ac1a9528dbf2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
| 1225 | 1225 | ||
| 1226 | if (nla[NFTA_CHAIN_POLICY]) { | 1226 | if (nla[NFTA_CHAIN_POLICY]) { |
| 1227 | if ((chain != NULL && | 1227 | if ((chain != NULL && |
| 1228 | !(chain->flags & NFT_BASE_CHAIN)) || | 1228 | !(chain->flags & NFT_BASE_CHAIN))) |
| 1229 | return -EOPNOTSUPP; | ||
| 1230 | |||
| 1231 | if (chain == NULL && | ||
| 1229 | nla[NFTA_CHAIN_HOOK] == NULL) | 1232 | nla[NFTA_CHAIN_HOOK] == NULL) |
| 1230 | return -EOPNOTSUPP; | 1233 | return -EOPNOTSUPP; |
| 1231 | 1234 | ||
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 3b90eb2b2c55..2d298dccb6dd 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
| @@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt, | |||
| 94 | { | 94 | { |
| 95 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); | 95 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); |
| 96 | 96 | ||
| 97 | nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, | 97 | nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, |
| 98 | pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", | 98 | pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", |
| 99 | chain->table->name, chain->name, comments[type], | 99 | chain->table->name, chain->name, comments[type], |
| 100 | rulenum); | 100 | rulenum); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | unsigned int | 103 | unsigned int |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index a5599fc51a6f..54330fb5efaf 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
| @@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, | |||
| 77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) | 77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) |
| 78 | return -EINVAL; | 78 | return -EINVAL; |
| 79 | 79 | ||
| 80 | /* Not all fields are initialized so first zero the tuple */ | ||
| 81 | memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); | ||
| 82 | |||
| 80 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); | 83 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); |
| 81 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); | 84 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); |
| 82 | 85 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 213584cf04b3..65f3e2b6be44 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -133,6 +133,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
| 133 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 133 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
| 134 | break; | 134 | break; |
| 135 | case AF_INET6: | 135 | case AF_INET6: |
| 136 | if (proto) | ||
| 137 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
| 138 | |||
| 136 | entry->e6.ipv6.proto = proto; | 139 | entry->e6.ipv6.proto = proto; |
| 137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 140 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
| 138 | break; | 141 | break; |
| @@ -344,6 +347,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
| 344 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 347 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
| 345 | break; | 348 | break; |
| 346 | case AF_INET6: | 349 | case AF_INET6: |
| 350 | if (proto) | ||
| 351 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
| 352 | |||
| 347 | entry->e6.ipv6.proto = proto; | 353 | entry->e6.ipv6.proto = proto; |
| 348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 354 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
| 349 | break; | 355 | break; |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index c82df0a48fcd..37c15e674884 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
| @@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | |||
| 153 | iter->err = err; | 153 | iter->err = err; |
| 154 | goto out; | 154 | goto out; |
| 155 | } | 155 | } |
| 156 | |||
| 157 | continue; | ||
| 156 | } | 158 | } |
| 157 | 159 | ||
| 158 | if (iter->count < iter->skip) | 160 | if (iter->count < iter->skip) |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index ef8a926752a9..50e1e5aaf4ce 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
| @@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) | |||
| 513 | { | 513 | { |
| 514 | const struct ip6t_ip6 *i = par->entryinfo; | 514 | const struct ip6t_ip6 *i = par->entryinfo; |
| 515 | 515 | ||
| 516 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) | 516 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && |
| 517 | && !(i->flags & IP6T_INV_PROTO)) | 517 | !(i->invflags & IP6T_INV_PROTO)) |
| 518 | return 0; | 518 | return 0; |
| 519 | 519 | ||
| 520 | pr_info("Can be used only in combination with " | 520 | pr_info("Can be used only in combination with " |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ec2954ffc690..067a3fff1d2c 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
| @@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport) | |||
| 274 | ASSERT_OVSL(); | 274 | ASSERT_OVSL(); |
| 275 | 275 | ||
| 276 | hlist_del_rcu(&vport->hash_node); | 276 | hlist_del_rcu(&vport->hash_node); |
| 277 | |||
| 278 | vport->ops->destroy(vport); | ||
| 279 | |||
| 280 | module_put(vport->ops->owner); | 277 | module_put(vport->ops->owner); |
| 278 | vport->ops->destroy(vport); | ||
| 281 | } | 279 | } |
| 282 | 280 | ||
| 283 | /** | 281 | /** |
diff --git a/net/socket.c b/net/socket.c index bbedbfcb42c2..245330ca0015 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, | |||
| 1702 | 1702 | ||
| 1703 | if (len > INT_MAX) | 1703 | if (len > INT_MAX) |
| 1704 | len = INT_MAX; | 1704 | len = INT_MAX; |
| 1705 | if (unlikely(!access_ok(VERIFY_READ, buff, len))) | ||
| 1706 | return -EFAULT; | ||
| 1705 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1707 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
| 1706 | if (!sock) | 1708 | if (!sock) |
| 1707 | goto out; | 1709 | goto out; |
| @@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, | |||
| 1760 | 1762 | ||
| 1761 | if (size > INT_MAX) | 1763 | if (size > INT_MAX) |
| 1762 | size = INT_MAX; | 1764 | size = INT_MAX; |
| 1765 | if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size))) | ||
| 1766 | return -EFAULT; | ||
| 1763 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1767 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
| 1764 | if (!sock) | 1768 | if (!sock) |
| 1765 | goto out; | 1769 | goto out; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 612aa73bbc60..e6ce1517367f 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt, | |||
| 303 | struct super_block *pipefs_sb; | 303 | struct super_block *pipefs_sb; |
| 304 | int err; | 304 | int err; |
| 305 | 305 | ||
| 306 | err = rpc_clnt_debugfs_register(clnt); | 306 | rpc_clnt_debugfs_register(clnt); |
| 307 | if (err) | ||
| 308 | return err; | ||
| 309 | 307 | ||
| 310 | pipefs_sb = rpc_get_sb_net(net); | 308 | pipefs_sb = rpc_get_sb_net(net); |
| 311 | if (pipefs_sb) { | 309 | if (pipefs_sb) { |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index e811f390f9f6..82962f7e6e88 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
| @@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = { | |||
| 129 | .release = tasks_release, | 129 | .release = tasks_release, |
| 130 | }; | 130 | }; |
| 131 | 131 | ||
| 132 | int | 132 | void |
| 133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | 133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) |
| 134 | { | 134 | { |
| 135 | int len, err; | 135 | int len; |
| 136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ | 136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ |
| 137 | struct rpc_xprt *xprt; | ||
| 137 | 138 | ||
| 138 | /* Already registered? */ | 139 | /* Already registered? */ |
| 139 | if (clnt->cl_debugfs) | 140 | if (clnt->cl_debugfs || !rpc_clnt_dir) |
| 140 | return 0; | 141 | return; |
| 141 | 142 | ||
| 142 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); | 143 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); |
| 143 | if (len >= sizeof(name)) | 144 | if (len >= sizeof(name)) |
| 144 | return -EINVAL; | 145 | return; |
| 145 | 146 | ||
| 146 | /* make the per-client dir */ | 147 | /* make the per-client dir */ |
| 147 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); | 148 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); |
| 148 | if (!clnt->cl_debugfs) | 149 | if (!clnt->cl_debugfs) |
| 149 | return -ENOMEM; | 150 | return; |
| 150 | 151 | ||
| 151 | /* make tasks file */ | 152 | /* make tasks file */ |
| 152 | err = -ENOMEM; | ||
| 153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, | 153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, |
| 154 | clnt, &tasks_fops)) | 154 | clnt, &tasks_fops)) |
| 155 | goto out_err; | 155 | goto out_err; |
| 156 | 156 | ||
| 157 | err = -EINVAL; | ||
| 158 | rcu_read_lock(); | 157 | rcu_read_lock(); |
| 158 | xprt = rcu_dereference(clnt->cl_xprt); | ||
| 159 | /* no "debugfs" dentry? Don't bother with the symlink. */ | ||
| 160 | if (!xprt->debugfs) { | ||
| 161 | rcu_read_unlock(); | ||
| 162 | return; | ||
| 163 | } | ||
| 159 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", | 164 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", |
| 160 | rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name); | 165 | xprt->debugfs->d_name.name); |
| 161 | rcu_read_unlock(); | 166 | rcu_read_unlock(); |
| 167 | |||
| 162 | if (len >= sizeof(name)) | 168 | if (len >= sizeof(name)) |
| 163 | goto out_err; | 169 | goto out_err; |
| 164 | 170 | ||
| 165 | err = -ENOMEM; | ||
| 166 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) | 171 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) |
| 167 | goto out_err; | 172 | goto out_err; |
| 168 | 173 | ||
| 169 | return 0; | 174 | return; |
| 170 | out_err: | 175 | out_err: |
| 171 | debugfs_remove_recursive(clnt->cl_debugfs); | 176 | debugfs_remove_recursive(clnt->cl_debugfs); |
| 172 | clnt->cl_debugfs = NULL; | 177 | clnt->cl_debugfs = NULL; |
| 173 | return err; | ||
| 174 | } | 178 | } |
| 175 | 179 | ||
| 176 | void | 180 | void |
| @@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = { | |||
| 226 | .release = xprt_info_release, | 230 | .release = xprt_info_release, |
| 227 | }; | 231 | }; |
| 228 | 232 | ||
| 229 | int | 233 | void |
| 230 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | 234 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) |
| 231 | { | 235 | { |
| 232 | int len, id; | 236 | int len, id; |
| 233 | static atomic_t cur_id; | 237 | static atomic_t cur_id; |
| 234 | char name[9]; /* 8 hex digits + NULL term */ | 238 | char name[9]; /* 8 hex digits + NULL term */ |
| 235 | 239 | ||
| 240 | if (!rpc_xprt_dir) | ||
| 241 | return; | ||
| 242 | |||
| 236 | id = (unsigned int)atomic_inc_return(&cur_id); | 243 | id = (unsigned int)atomic_inc_return(&cur_id); |
| 237 | 244 | ||
| 238 | len = snprintf(name, sizeof(name), "%x", id); | 245 | len = snprintf(name, sizeof(name), "%x", id); |
| 239 | if (len >= sizeof(name)) | 246 | if (len >= sizeof(name)) |
| 240 | return -EINVAL; | 247 | return; |
| 241 | 248 | ||
| 242 | /* make the per-client dir */ | 249 | /* make the per-client dir */ |
| 243 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); | 250 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); |
| 244 | if (!xprt->debugfs) | 251 | if (!xprt->debugfs) |
| 245 | return -ENOMEM; | 252 | return; |
| 246 | 253 | ||
| 247 | /* make tasks file */ | 254 | /* make tasks file */ |
| 248 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, | 255 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, |
| 249 | xprt, &xprt_info_fops)) { | 256 | xprt, &xprt_info_fops)) { |
| 250 | debugfs_remove_recursive(xprt->debugfs); | 257 | debugfs_remove_recursive(xprt->debugfs); |
| 251 | xprt->debugfs = NULL; | 258 | xprt->debugfs = NULL; |
| 252 | return -ENOMEM; | ||
| 253 | } | 259 | } |
| 254 | |||
| 255 | return 0; | ||
| 256 | } | 260 | } |
| 257 | 261 | ||
| 258 | void | 262 | void |
| @@ -266,14 +270,17 @@ void __exit | |||
| 266 | sunrpc_debugfs_exit(void) | 270 | sunrpc_debugfs_exit(void) |
| 267 | { | 271 | { |
| 268 | debugfs_remove_recursive(topdir); | 272 | debugfs_remove_recursive(topdir); |
| 273 | topdir = NULL; | ||
| 274 | rpc_clnt_dir = NULL; | ||
| 275 | rpc_xprt_dir = NULL; | ||
| 269 | } | 276 | } |
| 270 | 277 | ||
| 271 | int __init | 278 | void __init |
| 272 | sunrpc_debugfs_init(void) | 279 | sunrpc_debugfs_init(void) |
| 273 | { | 280 | { |
| 274 | topdir = debugfs_create_dir("sunrpc", NULL); | 281 | topdir = debugfs_create_dir("sunrpc", NULL); |
| 275 | if (!topdir) | 282 | if (!topdir) |
| 276 | goto out; | 283 | return; |
| 277 | 284 | ||
| 278 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); | 285 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); |
| 279 | if (!rpc_clnt_dir) | 286 | if (!rpc_clnt_dir) |
| @@ -283,10 +290,9 @@ sunrpc_debugfs_init(void) | |||
| 283 | if (!rpc_xprt_dir) | 290 | if (!rpc_xprt_dir) |
| 284 | goto out_remove; | 291 | goto out_remove; |
| 285 | 292 | ||
| 286 | return 0; | 293 | return; |
| 287 | out_remove: | 294 | out_remove: |
| 288 | debugfs_remove_recursive(topdir); | 295 | debugfs_remove_recursive(topdir); |
| 289 | topdir = NULL; | 296 | topdir = NULL; |
| 290 | out: | 297 | rpc_clnt_dir = NULL; |
| 291 | return -ENOMEM; | ||
| 292 | } | 298 | } |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index e37fbed87956..ee5d3d253102 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
| @@ -98,10 +98,7 @@ init_sunrpc(void) | |||
| 98 | if (err) | 98 | if (err) |
| 99 | goto out4; | 99 | goto out4; |
| 100 | 100 | ||
| 101 | err = sunrpc_debugfs_init(); | 101 | sunrpc_debugfs_init(); |
| 102 | if (err) | ||
| 103 | goto out5; | ||
| 104 | |||
| 105 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 102 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 106 | rpc_register_sysctl(); | 103 | rpc_register_sysctl(); |
| 107 | #endif | 104 | #endif |
| @@ -109,8 +106,6 @@ init_sunrpc(void) | |||
| 109 | init_socket_xprt(); /* clnt sock transport */ | 106 | init_socket_xprt(); /* clnt sock transport */ |
| 110 | return 0; | 107 | return 0; |
| 111 | 108 | ||
| 112 | out5: | ||
| 113 | unregister_rpc_pipefs(); | ||
| 114 | out4: | 109 | out4: |
| 115 | unregister_pernet_subsys(&sunrpc_net_ops); | 110 | unregister_pernet_subsys(&sunrpc_net_ops); |
| 116 | out3: | 111 | out3: |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e3015aede0d9..9949722d99ce 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) | |||
| 1331 | */ | 1331 | */ |
| 1332 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | 1332 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
| 1333 | { | 1333 | { |
| 1334 | int err; | ||
| 1335 | struct rpc_xprt *xprt; | 1334 | struct rpc_xprt *xprt; |
| 1336 | struct xprt_class *t; | 1335 | struct xprt_class *t; |
| 1337 | 1336 | ||
| @@ -1372,11 +1371,7 @@ found: | |||
| 1372 | return ERR_PTR(-ENOMEM); | 1371 | return ERR_PTR(-ENOMEM); |
| 1373 | } | 1372 | } |
| 1374 | 1373 | ||
| 1375 | err = rpc_xprt_debugfs_register(xprt); | 1374 | rpc_xprt_debugfs_register(xprt); |
| 1376 | if (err) { | ||
| 1377 | xprt_destroy(xprt); | ||
| 1378 | return ERR_PTR(err); | ||
| 1379 | } | ||
| 1380 | 1375 | ||
| 1381 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1376 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
| 1382 | xprt->max_reqs); | 1377 | xprt->max_reqs); |
diff --git a/net/tipc/core.c b/net/tipc/core.c index 935205e6bcfe..be1c9fa60b09 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
| @@ -152,11 +152,11 @@ out_netlink: | |||
| 152 | static void __exit tipc_exit(void) | 152 | static void __exit tipc_exit(void) |
| 153 | { | 153 | { |
| 154 | tipc_bearer_cleanup(); | 154 | tipc_bearer_cleanup(); |
| 155 | unregister_pernet_subsys(&tipc_net_ops); | ||
| 155 | tipc_netlink_stop(); | 156 | tipc_netlink_stop(); |
| 156 | tipc_netlink_compat_stop(); | 157 | tipc_netlink_compat_stop(); |
| 157 | tipc_socket_stop(); | 158 | tipc_socket_stop(); |
| 158 | tipc_unregister_sysctl(); | 159 | tipc_unregister_sysctl(); |
| 159 | unregister_pernet_subsys(&tipc_net_ops); | ||
| 160 | 160 | ||
| 161 | pr_info("Deactivated\n"); | 161 | pr_info("Deactivated\n"); |
| 162 | } | 162 | } |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 1684bcc78b34..5fde34326dcf 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
| @@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, | |||
| 152 | goto out; | 152 | goto out; |
| 153 | 153 | ||
| 154 | /* No partial writes. */ | 154 | /* No partial writes. */ |
| 155 | length = EINVAL; | 155 | length = -EINVAL; |
| 156 | if (*ppos != 0) | 156 | if (*ppos != 0) |
| 157 | goto out; | 157 | goto out; |
| 158 | 158 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4ca3d5d02436..a8a1e14272a1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = { | |||
| 1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
| 1990 | /* Sunrise Point */ | 1990 | /* Sunrise Point */ |
| 1991 | { PCI_DEVICE(0x8086, 0xa170), | 1991 | { PCI_DEVICE(0x8086, 0xa170), |
| 1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
| 1993 | /* Sunrise Point-LP */ | 1993 | /* Sunrise Point-LP */ |
| 1994 | { PCI_DEVICE(0x8086, 0x9d70), | 1994 | { PCI_DEVICE(0x8086, 0x9d70), |
| 1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 526398a4a442..74382137b9f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) | |||
| 396 | { | 396 | { |
| 397 | /* We currently only handle front, HP */ | 397 | /* We currently only handle front, HP */ |
| 398 | static hda_nid_t pins[] = { | 398 | static hda_nid_t pins[] = { |
| 399 | 0x0f, 0x10, 0x14, 0x15, 0 | 399 | 0x0f, 0x10, 0x14, 0x15, 0x17, 0 |
| 400 | }; | 400 | }; |
| 401 | hda_nid_t *p; | 401 | hda_nid_t *p; |
| 402 | for (p = pins; *p; p++) | 402 | for (p = pins; *p; p++) |
| @@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), | 5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
| 5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), | 5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), |
| 5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 5039 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), | ||
| 5039 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5040 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 5040 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), | 5041 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
| 5041 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), | 5042 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 4e511221a0c1..0db571340edb 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
| @@ -22,6 +22,14 @@ TARGETS += vm | |||
| 22 | TARGETS_HOTPLUG = cpu-hotplug | 22 | TARGETS_HOTPLUG = cpu-hotplug |
| 23 | TARGETS_HOTPLUG += memory-hotplug | 23 | TARGETS_HOTPLUG += memory-hotplug |
| 24 | 24 | ||
| 25 | # Clear LDFLAGS and MAKEFLAGS if called from main | ||
| 26 | # Makefile to avoid test build failures when test | ||
| 27 | # Makefile doesn't have explicit build rules. | ||
| 28 | ifeq (1,$(MAKELEVEL)) | ||
| 29 | undefine LDFLAGS | ||
| 30 | override MAKEFLAGS = | ||
| 31 | endif | ||
| 32 | |||
| 25 | all: | 33 | all: |
| 26 | for TARGET in $(TARGETS); do \ | 34 | for TARGET in $(TARGETS); do \ |
| 27 | make -C $$TARGET; \ | 35 | make -C $$TARGET; \ |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a2214d9609bd..cc6a25d95fbf 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
| 471 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); | 471 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
| 472 | 472 | ||
| 473 | r = -ENOMEM; | 473 | r = -ENOMEM; |
| 474 | kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); | 474 | kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
| 475 | if (!kvm->memslots) | 475 | if (!kvm->memslots) |
| 476 | goto out_err_no_srcu; | 476 | goto out_err_no_srcu; |
| 477 | 477 | ||
| @@ -522,7 +522,7 @@ out_err_no_srcu: | |||
| 522 | out_err_no_disable: | 522 | out_err_no_disable: |
| 523 | for (i = 0; i < KVM_NR_BUSES; i++) | 523 | for (i = 0; i < KVM_NR_BUSES; i++) |
| 524 | kfree(kvm->buses[i]); | 524 | kfree(kvm->buses[i]); |
| 525 | kfree(kvm->memslots); | 525 | kvfree(kvm->memslots); |
| 526 | kvm_arch_free_vm(kvm); | 526 | kvm_arch_free_vm(kvm); |
| 527 | return ERR_PTR(r); | 527 | return ERR_PTR(r); |
| 528 | } | 528 | } |
| @@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
| 578 | kvm_for_each_memslot(memslot, slots) | 578 | kvm_for_each_memslot(memslot, slots) |
| 579 | kvm_free_physmem_slot(kvm, memslot, NULL); | 579 | kvm_free_physmem_slot(kvm, memslot, NULL); |
| 580 | 580 | ||
| 581 | kfree(kvm->memslots); | 581 | kvfree(kvm->memslots); |
| 582 | } | 582 | } |
| 583 | 583 | ||
| 584 | static void kvm_destroy_devices(struct kvm *kvm) | 584 | static void kvm_destroy_devices(struct kvm *kvm) |
| @@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 871 | goto out_free; | 871 | goto out_free; |
| 872 | } | 872 | } |
| 873 | 873 | ||
| 874 | slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), | 874 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
| 875 | GFP_KERNEL); | ||
| 876 | if (!slots) | 875 | if (!slots) |
| 877 | goto out_free; | 876 | goto out_free; |
| 877 | memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); | ||
| 878 | 878 | ||
| 879 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { | 879 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { |
| 880 | slot = id_to_memslot(slots, mem->slot); | 880 | slot = id_to_memslot(slots, mem->slot); |
| @@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 917 | kvm_arch_commit_memory_region(kvm, mem, &old, change); | 917 | kvm_arch_commit_memory_region(kvm, mem, &old, change); |
| 918 | 918 | ||
| 919 | kvm_free_physmem_slot(kvm, &old, &new); | 919 | kvm_free_physmem_slot(kvm, &old, &new); |
| 920 | kfree(old_memslots); | 920 | kvfree(old_memslots); |
| 921 | 921 | ||
| 922 | /* | 922 | /* |
| 923 | * IOMMU mapping: New slots need to be mapped. Old slots need to be | 923 | * IOMMU mapping: New slots need to be mapped. Old slots need to be |
| @@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 936 | return 0; | 936 | return 0; |
| 937 | 937 | ||
| 938 | out_slots: | 938 | out_slots: |
| 939 | kfree(slots); | 939 | kvfree(slots); |
| 940 | out_free: | 940 | out_free: |
| 941 | kvm_free_physmem_slot(kvm, &new, &old); | 941 | kvm_free_physmem_slot(kvm, &new, &old); |
| 942 | out: | 942 | out: |
