diff options
174 files changed, 1557 insertions, 1040 deletions
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt index a6a352c2771e..5992dceec7af 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt | |||
@@ -21,9 +21,9 @@ Required Properties: | |||
21 | must appear in the same order as the output clocks. | 21 | must appear in the same order as the output clocks. |
22 | - #clock-cells: Must be 1 | 22 | - #clock-cells: Must be 1 |
23 | - clock-output-names: The name of the clocks as free-form strings | 23 | - clock-output-names: The name of the clocks as free-form strings |
24 | - renesas,indices: Indices of the gate clocks into the group (0 to 31) | 24 | - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31) |
25 | 25 | ||
26 | The clocks, clock-output-names and renesas,indices properties contain one | 26 | The clocks, clock-output-names and renesas,clock-indices properties contain one |
27 | entry per gate clock. The MSTP groups are sparsely populated. Unimplemented | 27 | entry per gate clock. The MSTP groups are sparsely populated. Unimplemented |
28 | gate clocks must not be declared. | 28 | gate clocks must not be declared. |
29 | 29 | ||
diff --git a/Documentation/devicetree/bindings/net/opencores-ethoc.txt b/Documentation/devicetree/bindings/net/opencores-ethoc.txt new file mode 100644 index 000000000000..2dc127c30d9b --- /dev/null +++ b/Documentation/devicetree/bindings/net/opencores-ethoc.txt | |||
@@ -0,0 +1,22 @@ | |||
1 | * OpenCores MAC 10/100 Mbps | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "opencores,ethoc". | ||
5 | - reg: two memory regions (address and length), | ||
6 | first region is for the device registers and descriptor rings, | ||
7 | second is for the device packet memory. | ||
8 | - interrupts: interrupt for the device. | ||
9 | |||
10 | Optional properties: | ||
11 | - clocks: phandle to refer to the clk used as per | ||
12 | Documentation/devicetree/bindings/clock/clock-bindings.txt | ||
13 | |||
14 | Examples: | ||
15 | |||
16 | enet0: ethoc@fd030000 { | ||
17 | compatible = "opencores,ethoc"; | ||
18 | reg = <0xfd030000 0x4000 0xfd800000 0x4000>; | ||
19 | interrupts = <1>; | ||
20 | local-mac-address = [00 50 c2 13 6f 00]; | ||
21 | clocks = <&osc>; | ||
22 | }; | ||
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt index f3089d423515..0cbe6ec22d6f 100644 --- a/Documentation/networking/can.txt +++ b/Documentation/networking/can.txt | |||
@@ -554,12 +554,6 @@ solution for a couple of reasons: | |||
554 | not specified in the struct can_frame and therefore it is only valid in | 554 | not specified in the struct can_frame and therefore it is only valid in |
555 | CANFD_MTU sized CAN FD frames. | 555 | CANFD_MTU sized CAN FD frames. |
556 | 556 | ||
557 | As long as the payload length is <=8 the received CAN frames from CAN FD | ||
558 | capable CAN devices can be received and read by legacy sockets too. When | ||
559 | user-generated CAN FD frames have a payload length <=8 these can be send | ||
560 | by legacy CAN network interfaces too. Sending CAN FD frames with payload | ||
561 | length > 8 to a legacy CAN network interface returns an -EMSGSIZE error. | ||
562 | |||
563 | Implementation hint for new CAN applications: | 557 | Implementation hint for new CAN applications: |
564 | 558 | ||
565 | To build a CAN FD aware application use struct canfd_frame as basic CAN | 559 | To build a CAN FD aware application use struct canfd_frame as basic CAN |
diff --git a/MAINTAINERS b/MAINTAINERS index c6d0e93eff62..e1297ff255e1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -73,7 +73,8 @@ Descriptions of section entries: | |||
73 | L: Mailing list that is relevant to this area | 73 | L: Mailing list that is relevant to this area |
74 | W: Web-page with status/info | 74 | W: Web-page with status/info |
75 | Q: Patchwork web based patch tracking system site | 75 | Q: Patchwork web based patch tracking system site |
76 | T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit. | 76 | T: SCM tree type and location. |
77 | Type is one of: git, hg, quilt, stgit, topgit | ||
77 | S: Status, one of the following: | 78 | S: Status, one of the following: |
78 | Supported: Someone is actually paid to look after this. | 79 | Supported: Someone is actually paid to look after this. |
79 | Maintained: Someone actually looks after it. | 80 | Maintained: Someone actually looks after it. |
@@ -1612,11 +1613,11 @@ S: Maintained | |||
1612 | F: drivers/net/wireless/atmel* | 1613 | F: drivers/net/wireless/atmel* |
1613 | 1614 | ||
1614 | ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER | 1615 | ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER |
1615 | M: Bradley Grove <linuxdrivers@attotech.com> | 1616 | M: Bradley Grove <linuxdrivers@attotech.com> |
1616 | L: linux-scsi@vger.kernel.org | 1617 | L: linux-scsi@vger.kernel.org |
1617 | W: http://www.attotech.com | 1618 | W: http://www.attotech.com |
1618 | S: Supported | 1619 | S: Supported |
1619 | F: drivers/scsi/esas2r | 1620 | F: drivers/scsi/esas2r |
1620 | 1621 | ||
1621 | AUDIT SUBSYSTEM | 1622 | AUDIT SUBSYSTEM |
1622 | M: Eric Paris <eparis@redhat.com> | 1623 | M: Eric Paris <eparis@redhat.com> |
@@ -2159,7 +2160,7 @@ F: Documentation/zh_CN/ | |||
2159 | 2160 | ||
2160 | CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER | 2161 | CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER |
2161 | M: Peter Chen <Peter.Chen@freescale.com> | 2162 | M: Peter Chen <Peter.Chen@freescale.com> |
2162 | T: git://github.com/hzpeterchen/linux-usb.git | 2163 | T: git git://github.com/hzpeterchen/linux-usb.git |
2163 | L: linux-usb@vger.kernel.org | 2164 | L: linux-usb@vger.kernel.org |
2164 | S: Maintained | 2165 | S: Maintained |
2165 | F: drivers/usb/chipidea/ | 2166 | F: drivers/usb/chipidea/ |
@@ -2179,9 +2180,9 @@ S: Supported | |||
2179 | F: drivers/net/ethernet/cisco/enic/ | 2180 | F: drivers/net/ethernet/cisco/enic/ |
2180 | 2181 | ||
2181 | CISCO VIC LOW LATENCY NIC DRIVER | 2182 | CISCO VIC LOW LATENCY NIC DRIVER |
2182 | M: Upinder Malhi <umalhi@cisco.com> | 2183 | M: Upinder Malhi <umalhi@cisco.com> |
2183 | S: Supported | 2184 | S: Supported |
2184 | F: drivers/infiniband/hw/usnic | 2185 | F: drivers/infiniband/hw/usnic |
2185 | 2186 | ||
2186 | CIRRUS LOGIC EP93XX ETHERNET DRIVER | 2187 | CIRRUS LOGIC EP93XX ETHERNET DRIVER |
2187 | M: Hartley Sweeten <hsweeten@visionengravers.com> | 2188 | M: Hartley Sweeten <hsweeten@visionengravers.com> |
@@ -2378,20 +2379,20 @@ F: drivers/cpufreq/arm_big_little.c | |||
2378 | F: drivers/cpufreq/arm_big_little_dt.c | 2379 | F: drivers/cpufreq/arm_big_little_dt.c |
2379 | 2380 | ||
2380 | CPUIDLE DRIVER - ARM BIG LITTLE | 2381 | CPUIDLE DRIVER - ARM BIG LITTLE |
2381 | M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | 2382 | M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> |
2382 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | 2383 | M: Daniel Lezcano <daniel.lezcano@linaro.org> |
2383 | L: linux-pm@vger.kernel.org | 2384 | L: linux-pm@vger.kernel.org |
2384 | L: linux-arm-kernel@lists.infradead.org | 2385 | L: linux-arm-kernel@lists.infradead.org |
2385 | T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git | 2386 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git |
2386 | S: Maintained | 2387 | S: Maintained |
2387 | F: drivers/cpuidle/cpuidle-big_little.c | 2388 | F: drivers/cpuidle/cpuidle-big_little.c |
2388 | 2389 | ||
2389 | CPUIDLE DRIVERS | 2390 | CPUIDLE DRIVERS |
2390 | M: Rafael J. Wysocki <rjw@rjwysocki.net> | 2391 | M: Rafael J. Wysocki <rjw@rjwysocki.net> |
2391 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | 2392 | M: Daniel Lezcano <daniel.lezcano@linaro.org> |
2392 | L: linux-pm@vger.kernel.org | 2393 | L: linux-pm@vger.kernel.org |
2393 | S: Maintained | 2394 | S: Maintained |
2394 | T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git | 2395 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git |
2395 | F: drivers/cpuidle/* | 2396 | F: drivers/cpuidle/* |
2396 | F: include/linux/cpuidle.h | 2397 | F: include/linux/cpuidle.h |
2397 | 2398 | ||
@@ -2458,9 +2459,9 @@ S: Maintained | |||
2458 | F: sound/pci/cs5535audio/ | 2459 | F: sound/pci/cs5535audio/ |
2459 | 2460 | ||
2460 | CW1200 WLAN driver | 2461 | CW1200 WLAN driver |
2461 | M: Solomon Peachy <pizza@shaftnet.org> | 2462 | M: Solomon Peachy <pizza@shaftnet.org> |
2462 | S: Maintained | 2463 | S: Maintained |
2463 | F: drivers/net/wireless/cw1200/ | 2464 | F: drivers/net/wireless/cw1200/ |
2464 | 2465 | ||
2465 | CX18 VIDEO4LINUX DRIVER | 2466 | CX18 VIDEO4LINUX DRIVER |
2466 | M: Andy Walls <awalls@md.metrocast.net> | 2467 | M: Andy Walls <awalls@md.metrocast.net> |
@@ -3095,6 +3096,8 @@ F: fs/ecryptfs/ | |||
3095 | 3096 | ||
3096 | EDAC-CORE | 3097 | EDAC-CORE |
3097 | M: Doug Thompson <dougthompson@xmission.com> | 3098 | M: Doug Thompson <dougthompson@xmission.com> |
3099 | M: Borislav Petkov <bp@alien8.de> | ||
3100 | M: Mauro Carvalho Chehab <m.chehab@samsung.com> | ||
3098 | L: linux-edac@vger.kernel.org | 3101 | L: linux-edac@vger.kernel.org |
3099 | W: bluesmoke.sourceforge.net | 3102 | W: bluesmoke.sourceforge.net |
3100 | S: Supported | 3103 | S: Supported |
@@ -4558,6 +4561,7 @@ F: Documentation/networking/ixgbevf.txt | |||
4558 | F: Documentation/networking/i40e.txt | 4561 | F: Documentation/networking/i40e.txt |
4559 | F: Documentation/networking/i40evf.txt | 4562 | F: Documentation/networking/i40evf.txt |
4560 | F: drivers/net/ethernet/intel/ | 4563 | F: drivers/net/ethernet/intel/ |
4564 | F: drivers/net/ethernet/intel/*/ | ||
4561 | 4565 | ||
4562 | INTEL-MID GPIO DRIVER | 4566 | INTEL-MID GPIO DRIVER |
4563 | M: David Cohen <david.a.cohen@linux.intel.com> | 4567 | M: David Cohen <david.a.cohen@linux.intel.com> |
@@ -4914,7 +4918,7 @@ F: drivers/staging/ktap/ | |||
4914 | KCONFIG | 4918 | KCONFIG |
4915 | M: "Yann E. MORIN" <yann.morin.1998@free.fr> | 4919 | M: "Yann E. MORIN" <yann.morin.1998@free.fr> |
4916 | L: linux-kbuild@vger.kernel.org | 4920 | L: linux-kbuild@vger.kernel.org |
4917 | T: git://gitorious.org/linux-kconfig/linux-kconfig | 4921 | T: git git://gitorious.org/linux-kconfig/linux-kconfig |
4918 | S: Maintained | 4922 | S: Maintained |
4919 | F: Documentation/kbuild/kconfig-language.txt | 4923 | F: Documentation/kbuild/kconfig-language.txt |
4920 | F: scripts/kconfig/ | 4924 | F: scripts/kconfig/ |
@@ -5471,11 +5475,11 @@ S: Maintained | |||
5471 | F: drivers/media/tuners/m88ts2022* | 5475 | F: drivers/media/tuners/m88ts2022* |
5472 | 5476 | ||
5473 | MA901 MASTERKIT USB FM RADIO DRIVER | 5477 | MA901 MASTERKIT USB FM RADIO DRIVER |
5474 | M: Alexey Klimov <klimov.linux@gmail.com> | 5478 | M: Alexey Klimov <klimov.linux@gmail.com> |
5475 | L: linux-media@vger.kernel.org | 5479 | L: linux-media@vger.kernel.org |
5476 | T: git git://linuxtv.org/media_tree.git | 5480 | T: git git://linuxtv.org/media_tree.git |
5477 | S: Maintained | 5481 | S: Maintained |
5478 | F: drivers/media/radio/radio-ma901.c | 5482 | F: drivers/media/radio/radio-ma901.c |
5479 | 5483 | ||
5480 | MAC80211 | 5484 | MAC80211 |
5481 | M: Johannes Berg <johannes@sipsolutions.net> | 5485 | M: Johannes Berg <johannes@sipsolutions.net> |
@@ -5636,7 +5640,7 @@ F: drivers/scsi/megaraid/ | |||
5636 | 5640 | ||
5637 | MELLANOX ETHERNET DRIVER (mlx4_en) | 5641 | MELLANOX ETHERNET DRIVER (mlx4_en) |
5638 | M: Amir Vadai <amirv@mellanox.com> | 5642 | M: Amir Vadai <amirv@mellanox.com> |
5639 | L: netdev@vger.kernel.org | 5643 | L: netdev@vger.kernel.org |
5640 | S: Supported | 5644 | S: Supported |
5641 | W: http://www.mellanox.com | 5645 | W: http://www.mellanox.com |
5642 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | 5646 | Q: http://patchwork.ozlabs.org/project/netdev/list/ |
@@ -5677,7 +5681,7 @@ F: include/linux/mtd/ | |||
5677 | F: include/uapi/mtd/ | 5681 | F: include/uapi/mtd/ |
5678 | 5682 | ||
5679 | MEN A21 WATCHDOG DRIVER | 5683 | MEN A21 WATCHDOG DRIVER |
5680 | M: Johannes Thumshirn <johannes.thumshirn@men.de> | 5684 | M: Johannes Thumshirn <johannes.thumshirn@men.de> |
5681 | L: linux-watchdog@vger.kernel.org | 5685 | L: linux-watchdog@vger.kernel.org |
5682 | S: Supported | 5686 | S: Supported |
5683 | F: drivers/watchdog/mena21_wdt.c | 5687 | F: drivers/watchdog/mena21_wdt.c |
@@ -5733,20 +5737,20 @@ L: linux-rdma@vger.kernel.org | |||
5733 | W: http://www.mellanox.com | 5737 | W: http://www.mellanox.com |
5734 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | 5738 | Q: http://patchwork.ozlabs.org/project/netdev/list/ |
5735 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5739 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
5736 | T: git://openfabrics.org/~eli/connect-ib.git | 5740 | T: git git://openfabrics.org/~eli/connect-ib.git |
5737 | S: Supported | 5741 | S: Supported |
5738 | F: drivers/net/ethernet/mellanox/mlx5/core/ | 5742 | F: drivers/net/ethernet/mellanox/mlx5/core/ |
5739 | F: include/linux/mlx5/ | 5743 | F: include/linux/mlx5/ |
5740 | 5744 | ||
5741 | Mellanox MLX5 IB driver | 5745 | Mellanox MLX5 IB driver |
5742 | M: Eli Cohen <eli@mellanox.com> | 5746 | M: Eli Cohen <eli@mellanox.com> |
5743 | L: linux-rdma@vger.kernel.org | 5747 | L: linux-rdma@vger.kernel.org |
5744 | W: http://www.mellanox.com | 5748 | W: http://www.mellanox.com |
5745 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5749 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
5746 | T: git://openfabrics.org/~eli/connect-ib.git | 5750 | T: git git://openfabrics.org/~eli/connect-ib.git |
5747 | S: Supported | 5751 | S: Supported |
5748 | F: include/linux/mlx5/ | 5752 | F: include/linux/mlx5/ |
5749 | F: drivers/infiniband/hw/mlx5/ | 5753 | F: drivers/infiniband/hw/mlx5/ |
5750 | 5754 | ||
5751 | MODULE SUPPORT | 5755 | MODULE SUPPORT |
5752 | M: Rusty Russell <rusty@rustcorp.com.au> | 5756 | M: Rusty Russell <rusty@rustcorp.com.au> |
@@ -8700,17 +8704,17 @@ S: Maintained | |||
8700 | F: drivers/media/radio/radio-raremono.c | 8704 | F: drivers/media/radio/radio-raremono.c |
8701 | 8705 | ||
8702 | THERMAL | 8706 | THERMAL |
8703 | M: Zhang Rui <rui.zhang@intel.com> | 8707 | M: Zhang Rui <rui.zhang@intel.com> |
8704 | M: Eduardo Valentin <eduardo.valentin@ti.com> | 8708 | M: Eduardo Valentin <eduardo.valentin@ti.com> |
8705 | L: linux-pm@vger.kernel.org | 8709 | L: linux-pm@vger.kernel.org |
8706 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git | 8710 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git |
8707 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git | 8711 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git |
8708 | Q: https://patchwork.kernel.org/project/linux-pm/list/ | 8712 | Q: https://patchwork.kernel.org/project/linux-pm/list/ |
8709 | S: Supported | 8713 | S: Supported |
8710 | F: drivers/thermal/ | 8714 | F: drivers/thermal/ |
8711 | F: include/linux/thermal.h | 8715 | F: include/linux/thermal.h |
8712 | F: include/linux/cpu_cooling.h | 8716 | F: include/linux/cpu_cooling.h |
8713 | F: Documentation/devicetree/bindings/thermal/ | 8717 | F: Documentation/devicetree/bindings/thermal/ |
8714 | 8718 | ||
8715 | THINGM BLINK(1) USB RGB LED DRIVER | 8719 | THINGM BLINK(1) USB RGB LED DRIVER |
8716 | M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> | 8720 | M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> |
@@ -9812,7 +9816,7 @@ ZR36067 VIDEO FOR LINUX DRIVER | |||
9812 | L: mjpeg-users@lists.sourceforge.net | 9816 | L: mjpeg-users@lists.sourceforge.net |
9813 | L: linux-media@vger.kernel.org | 9817 | L: linux-media@vger.kernel.org |
9814 | W: http://mjpeg.sourceforge.net/driver-zoran/ | 9818 | W: http://mjpeg.sourceforge.net/driver-zoran/ |
9815 | T: Mercurial http://linuxtv.org/hg/v4l-dvb | 9819 | T: hg http://linuxtv.org/hg/v4l-dvb |
9816 | S: Odd Fixes | 9820 | S: Odd Fixes |
9817 | F: drivers/media/pci/zoran/ | 9821 | F: drivers/media/pci/zoran/ |
9818 | 9822 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e25419817791..1eb3a250210a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1903,6 +1903,7 @@ config XEN | |||
1903 | depends on ARM && AEABI && OF | 1903 | depends on ARM && AEABI && OF |
1904 | depends on CPU_V7 && !CPU_V6 | 1904 | depends on CPU_V7 && !CPU_V6 |
1905 | depends on !GENERIC_ATOMIC64 | 1905 | depends on !GENERIC_ATOMIC64 |
1906 | depends on MMU | ||
1906 | select ARM_PSCI | 1907 | select ARM_PSCI |
1907 | select SWIOTLB_XEN | 1908 | select SWIOTLB_XEN |
1908 | select ARCH_DMA_ADDR_T_64BIT | 1909 | select ARCH_DMA_ADDR_T_64BIT |
diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi index 2363593e1050..ef58d1c24313 100644 --- a/arch/arm/boot/dts/keystone-clocks.dtsi +++ b/arch/arm/boot/dts/keystone-clocks.dtsi | |||
@@ -612,7 +612,7 @@ clocks { | |||
612 | compatible = "ti,keystone,psc-clock"; | 612 | compatible = "ti,keystone,psc-clock"; |
613 | clocks = <&chipclk13>; | 613 | clocks = <&chipclk13>; |
614 | clock-output-names = "vcp-3"; | 614 | clock-output-names = "vcp-3"; |
615 | reg = <0x0235000a8 0xb00>, <0x02350060 0x400>; | 615 | reg = <0x023500a8 0xb00>, <0x02350060 0x400>; |
616 | reg-names = "control", "domain"; | 616 | reg-names = "control", "domain"; |
617 | domain-id = <24>; | 617 | domain-id = <24>; |
618 | }; | 618 | }; |
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 09c5a0f5f4d1..86648c083bb4 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _ASM_C6X_CACHE_H | 12 | #define _ASM_C6X_CACHE_H |
13 | 13 | ||
14 | #include <linux/irqflags.h> | 14 | #include <linux/irqflags.h> |
15 | #include <linux/init.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Cache line size | 18 | * Cache line size |
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h index 673515bc4135..aa1b2b9088a7 100644 --- a/arch/sh/include/cpu-sh2/cpu/cache.h +++ b/arch/sh/include/cpu-sh2/cpu/cache.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) | 20 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) |
21 | #define CCR 0xffffffec | 21 | #define SH_CCR 0xffffffec |
22 | 22 | ||
23 | #define CCR_CACHE_CE 0x01 /* Cache enable */ | 23 | #define CCR_CACHE_CE 0x01 /* Cache enable */ |
24 | #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ | 24 | #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ |
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h index defb0baa5a06..b27ce92cb600 100644 --- a/arch/sh/include/cpu-sh2a/cpu/cache.h +++ b/arch/sh/include/cpu-sh2a/cpu/cache.h | |||
@@ -17,8 +17,8 @@ | |||
17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #define CCR 0xfffc1000 /* CCR1 */ | 20 | #define SH_CCR 0xfffc1000 /* CCR1 */ |
21 | #define CCR2 0xfffc1004 | 21 | #define SH_CCR2 0xfffc1004 |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not | 24 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not |
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h index bee2d81c56bf..29700fd88c75 100644 --- a/arch/sh/include/cpu-sh3/cpu/cache.h +++ b/arch/sh/include/cpu-sh3/cpu/cache.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #define CCR 0xffffffec /* Address of Cache Control Register */ | 20 | #define SH_CCR 0xffffffec /* Address of Cache Control Register */ |
21 | 21 | ||
22 | #define CCR_CACHE_CE 0x01 /* Cache Enable */ | 22 | #define CCR_CACHE_CE 0x01 /* Cache Enable */ |
23 | #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ | 23 | #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h index 7bfb9e8b069c..92c4cd119b66 100644 --- a/arch/sh/include/cpu-sh4/cpu/cache.h +++ b/arch/sh/include/cpu-sh4/cpu/cache.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #define CCR 0xff00001c /* Address of Cache Control Register */ | 20 | #define SH_CCR 0xff00001c /* Address of Cache Control Register */ |
21 | #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ | 21 | #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ |
22 | #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ | 22 | #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ |
23 | #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ | 23 | #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ |
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ecf83cd158dc..0d7360d549c1 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -112,7 +112,7 @@ static void cache_init(void) | |||
112 | unsigned long ccr, flags; | 112 | unsigned long ccr, flags; |
113 | 113 | ||
114 | jump_to_uncached(); | 114 | jump_to_uncached(); |
115 | ccr = __raw_readl(CCR); | 115 | ccr = __raw_readl(SH_CCR); |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * At this point we don't know whether the cache is enabled or not - a | 118 | * At this point we don't know whether the cache is enabled or not - a |
@@ -189,7 +189,7 @@ static void cache_init(void) | |||
189 | 189 | ||
190 | l2_cache_init(); | 190 | l2_cache_init(); |
191 | 191 | ||
192 | __raw_writel(flags, CCR); | 192 | __raw_writel(flags, SH_CCR); |
193 | back_to_cached(); | 193 | back_to_cached(); |
194 | } | 194 | } |
195 | #else | 195 | #else |
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 115725198038..777e50f33c00 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c | |||
@@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) | |||
36 | */ | 36 | */ |
37 | jump_to_uncached(); | 37 | jump_to_uncached(); |
38 | 38 | ||
39 | ccr = __raw_readl(CCR); | 39 | ccr = __raw_readl(SH_CCR); |
40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { | 40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { |
41 | back_to_cached(); | 41 | back_to_cached(); |
42 | 42 | ||
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index defcf719f2e8..a74259f2f981 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
63 | local_irq_save(flags); | 63 | local_irq_save(flags); |
64 | jump_to_uncached(); | 64 | jump_to_uncached(); |
65 | 65 | ||
66 | ccr = __raw_readl(CCR); | 66 | ccr = __raw_readl(SH_CCR); |
67 | ccr |= CCR_CACHE_INVALIDATE; | 67 | ccr |= CCR_CACHE_INVALIDATE; |
68 | __raw_writel(ccr, CCR); | 68 | __raw_writel(ccr, SH_CCR); |
69 | 69 | ||
70 | back_to_cached(); | 70 | back_to_cached(); |
71 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 949e2d3138a0..ee87d081259b 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
134 | 134 | ||
135 | /* If there are too many pages then just blow the cache */ | 135 | /* If there are too many pages then just blow the cache */ |
136 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { | 136 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
137 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); | 137 | __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, |
138 | SH_CCR); | ||
138 | } else { | 139 | } else { |
139 | for (v = begin; v < end; v += L1_CACHE_BYTES) | 140 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
140 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); | 141 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); |
@@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) | |||
167 | /* I-Cache invalidate */ | 168 | /* I-Cache invalidate */ |
168 | /* If there are too many pages then just blow the cache */ | 169 | /* If there are too many pages then just blow the cache */ |
169 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 170 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
170 | __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); | 171 | __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, |
172 | SH_CCR); | ||
171 | } else { | 173 | } else { |
172 | for (v = start; v < end; v += L1_CACHE_BYTES) | 174 | for (v = start; v < end; v += L1_CACHE_BYTES) |
173 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); | 175 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 0e529285b28d..51d8f7f31d1d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -133,9 +133,9 @@ static void flush_icache_all(void) | |||
133 | jump_to_uncached(); | 133 | jump_to_uncached(); |
134 | 134 | ||
135 | /* Flush I-cache */ | 135 | /* Flush I-cache */ |
136 | ccr = __raw_readl(CCR); | 136 | ccr = __raw_readl(SH_CCR); |
137 | ccr |= CCR_CACHE_ICI; | 137 | ccr |= CCR_CACHE_ICI; |
138 | __raw_writel(ccr, CCR); | 138 | __raw_writel(ccr, SH_CCR); |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * back_to_cached() will take care of the barrier for us, don't add | 141 | * back_to_cached() will take care of the barrier for us, don't add |
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index c0adbee97b5f..24c58b7dc022 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c | |||
@@ -19,7 +19,7 @@ void __init shx3_cache_init(void) | |||
19 | { | 19 | { |
20 | unsigned int ccr; | 20 | unsigned int ccr; |
21 | 21 | ||
22 | ccr = __raw_readl(CCR); | 22 | ccr = __raw_readl(SH_CCR); |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * If we've got cache aliases, resolve them in hardware. | 25 | * If we've got cache aliases, resolve them in hardware. |
@@ -40,5 +40,5 @@ void __init shx3_cache_init(void) | |||
40 | ccr |= CCR_CACHE_IBE; | 40 | ccr |= CCR_CACHE_IBE; |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | writel_uncached(ccr, CCR); | 43 | writel_uncached(ccr, SH_CCR); |
44 | } | 44 | } |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 616966a96cba..097c2cdd117f 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -285,8 +285,8 @@ void __init cpu_cache_init(void) | |||
285 | { | 285 | { |
286 | unsigned int cache_disabled = 0; | 286 | unsigned int cache_disabled = 0; |
287 | 287 | ||
288 | #ifdef CCR | 288 | #ifdef SH_CCR |
289 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 289 | cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); |
290 | #endif | 290 | #endif |
291 | 291 | ||
292 | compute_alias(&boot_cpu_data.icache); | 292 | compute_alias(&boot_cpu_data.icache); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8184451b57c0..422b7d84f686 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio) | |||
874 | /* Non-zero page count for non-head members of | 874 | /* Non-zero page count for non-head members of |
875 | * compound pages is no longer allowed by the kernel. | 875 | * compound pages is no longer allowed by the kernel. |
876 | */ | 876 | */ |
877 | page = compound_trans_head(bv.bv_page); | 877 | page = compound_head(bv.bv_page); |
878 | atomic_inc(&page->_count); | 878 | atomic_inc(&page->_count); |
879 | } | 879 | } |
880 | } | 880 | } |
@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio) | |||
887 | struct bvec_iter iter; | 887 | struct bvec_iter iter; |
888 | 888 | ||
889 | bio_for_each_segment(bv, bio, iter) { | 889 | bio_for_each_segment(bv, bio, iter) { |
890 | page = compound_trans_head(bv.bv_page); | 890 | page = compound_head(bv.bv_page); |
891 | atomic_dec(&page->_count); | 891 | atomic_dec(&page->_count); |
892 | } | 892 | } |
893 | } | 893 | } |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 011e55d820b1..51c557cfd92b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev, | |||
612 | 612 | ||
613 | disksize = PAGE_ALIGN(disksize); | 613 | disksize = PAGE_ALIGN(disksize); |
614 | meta = zram_meta_alloc(disksize); | 614 | meta = zram_meta_alloc(disksize); |
615 | if (!meta) | ||
616 | return -ENOMEM; | ||
615 | down_write(&zram->init_lock); | 617 | down_write(&zram->init_lock); |
616 | if (zram->init_done) { | 618 | if (zram->init_done) { |
617 | up_write(&zram->init_lock); | 619 | up_write(&zram->init_lock); |
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index bd313f7816a8..c1af80bcdf20 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c | |||
@@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, | |||
242 | 242 | ||
243 | irq = irq_of_parse_and_map(np, 0); | 243 | irq = irq_of_parse_and_map(np, 0); |
244 | if (!irq) | 244 | if (!irq) |
245 | return; | 245 | goto out_free_characteristics; |
246 | 246 | ||
247 | clk = at91_clk_register_master(pmc, irq, name, num_parents, | 247 | clk = at91_clk_register_master(pmc, irq, name, num_parents, |
248 | parent_names, layout, | 248 | parent_names, layout, |
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 6a934a5296bd..05e04ce0f148 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c | |||
@@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = { | |||
494 | 494 | ||
495 | static int __init nomadik_src_clk_init_debugfs(void) | 495 | static int __init nomadik_src_clk_init_debugfs(void) |
496 | { | 496 | { |
497 | /* Vital for multiplatform */ | ||
498 | if (!src_base) | ||
499 | return -ENODEV; | ||
497 | src_pcksr0_boot = readl(src_base + SRC_PCKSR0); | 500 | src_pcksr0_boot = readl(src_base + SRC_PCKSR0); |
498 | src_pcksr1_boot = readl(src_base + SRC_PCKSR1); | 501 | src_pcksr1_boot = readl(src_base + SRC_PCKSR1); |
499 | debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, | 502 | debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5517944495d8..c42e608af6bb 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -2226,24 +2226,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister); | |||
2226 | */ | 2226 | */ |
2227 | int __clk_get(struct clk *clk) | 2227 | int __clk_get(struct clk *clk) |
2228 | { | 2228 | { |
2229 | if (clk && !try_module_get(clk->owner)) | 2229 | if (clk) { |
2230 | return 0; | 2230 | if (!try_module_get(clk->owner)) |
2231 | return 0; | ||
2231 | 2232 | ||
2232 | kref_get(&clk->ref); | 2233 | kref_get(&clk->ref); |
2234 | } | ||
2233 | return 1; | 2235 | return 1; |
2234 | } | 2236 | } |
2235 | 2237 | ||
2236 | void __clk_put(struct clk *clk) | 2238 | void __clk_put(struct clk *clk) |
2237 | { | 2239 | { |
2238 | if (WARN_ON_ONCE(IS_ERR(clk))) | 2240 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
2239 | return; | 2241 | return; |
2240 | 2242 | ||
2241 | clk_prepare_lock(); | 2243 | clk_prepare_lock(); |
2242 | kref_put(&clk->ref, __clk_release); | 2244 | kref_put(&clk->ref, __clk_release); |
2243 | clk_prepare_unlock(); | 2245 | clk_prepare_unlock(); |
2244 | 2246 | ||
2245 | if (clk) | 2247 | module_put(clk->owner); |
2246 | module_put(clk->owner); | ||
2247 | } | 2248 | } |
2248 | 2249 | ||
2249 | /*** clk rate change notifiers ***/ | 2250 | /*** clk rate change notifiers ***/ |
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c index 17a598398a53..86f1e362eafb 100644 --- a/drivers/clk/keystone/gate.c +++ b/drivers/clk/keystone/gate.c | |||
@@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev, | |||
179 | 179 | ||
180 | init.name = name; | 180 | init.name = name; |
181 | init.ops = &clk_psc_ops; | 181 | init.ops = &clk_psc_ops; |
182 | init.flags = 0; | ||
182 | init.parent_names = (parent_name ? &parent_name : NULL); | 183 | init.parent_names = (parent_name ? &parent_name : NULL); |
183 | init.num_parents = (parent_name ? 1 : 0); | 184 | init.num_parents = (parent_name ? 1 : 0); |
184 | 185 | ||
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 81a202d12a7a..bef198a83863 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c | |||
@@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = { | |||
141 | .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), | 141 | .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static void __init a370_coreclk_init(struct device_node *np) | ||
145 | { | ||
146 | mvebu_coreclk_setup(np, &a370_coreclks); | ||
147 | } | ||
148 | CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock", | ||
149 | a370_coreclk_init); | ||
150 | |||
151 | /* | 144 | /* |
152 | * Clock Gating Control | 145 | * Clock Gating Control |
153 | */ | 146 | */ |
@@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = { | |||
168 | { } | 161 | { } |
169 | }; | 162 | }; |
170 | 163 | ||
171 | static void __init a370_clk_gating_init(struct device_node *np) | 164 | static void __init a370_clk_init(struct device_node *np) |
172 | { | 165 | { |
173 | mvebu_clk_gating_setup(np, a370_gating_desc); | 166 | struct device_node *cgnp = |
167 | of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock"); | ||
168 | |||
169 | mvebu_coreclk_setup(np, &a370_coreclks); | ||
170 | |||
171 | if (cgnp) | ||
172 | mvebu_clk_gating_setup(cgnp, a370_gating_desc); | ||
174 | } | 173 | } |
175 | CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock", | 174 | CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); |
176 | a370_clk_gating_init); | 175 | |
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index 9922c4475aa8..b3094315a3c0 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c | |||
@@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = { | |||
158 | .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), | 158 | .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static void __init axp_coreclk_init(struct device_node *np) | ||
162 | { | ||
163 | mvebu_coreclk_setup(np, &axp_coreclks); | ||
164 | } | ||
165 | CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock", | ||
166 | axp_coreclk_init); | ||
167 | |||
168 | /* | 161 | /* |
169 | * Clock Gating Control | 162 | * Clock Gating Control |
170 | */ | 163 | */ |
@@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = { | |||
202 | { } | 195 | { } |
203 | }; | 196 | }; |
204 | 197 | ||
205 | static void __init axp_clk_gating_init(struct device_node *np) | 198 | static void __init axp_clk_init(struct device_node *np) |
206 | { | 199 | { |
207 | mvebu_clk_gating_setup(np, axp_gating_desc); | 200 | struct device_node *cgnp = |
201 | of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock"); | ||
202 | |||
203 | mvebu_coreclk_setup(np, &axp_coreclks); | ||
204 | |||
205 | if (cgnp) | ||
206 | mvebu_clk_gating_setup(cgnp, axp_gating_desc); | ||
208 | } | 207 | } |
209 | CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock", | 208 | CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); |
210 | axp_clk_gating_init); | ||
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c index 38aee1e3f242..b8c2424ac926 100644 --- a/drivers/clk/mvebu/dove.c +++ b/drivers/clk/mvebu/dove.c | |||
@@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = { | |||
154 | .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), | 154 | .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static void __init dove_coreclk_init(struct device_node *np) | ||
158 | { | ||
159 | mvebu_coreclk_setup(np, &dove_coreclks); | ||
160 | } | ||
161 | CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init); | ||
162 | |||
163 | /* | 157 | /* |
164 | * Clock Gating Control | 158 | * Clock Gating Control |
165 | */ | 159 | */ |
@@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = { | |||
186 | { } | 180 | { } |
187 | }; | 181 | }; |
188 | 182 | ||
189 | static void __init dove_clk_gating_init(struct device_node *np) | 183 | static void __init dove_clk_init(struct device_node *np) |
190 | { | 184 | { |
191 | mvebu_clk_gating_setup(np, dove_gating_desc); | 185 | struct device_node *cgnp = |
186 | of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); | ||
187 | |||
188 | mvebu_coreclk_setup(np, &dove_coreclks); | ||
189 | |||
190 | if (cgnp) | ||
191 | mvebu_clk_gating_setup(cgnp, dove_gating_desc); | ||
192 | } | 192 | } |
193 | CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock", | 193 | CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); |
194 | dove_clk_gating_init); | ||
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 2636a55f29f9..ddb666a86500 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c | |||
@@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = { | |||
193 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), | 193 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), |
194 | }; | 194 | }; |
195 | 195 | ||
196 | static void __init kirkwood_coreclk_init(struct device_node *np) | ||
197 | { | ||
198 | mvebu_coreclk_setup(np, &kirkwood_coreclks); | ||
199 | } | ||
200 | CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock", | ||
201 | kirkwood_coreclk_init); | ||
202 | |||
203 | static const struct coreclk_soc_desc mv88f6180_coreclks = { | 196 | static const struct coreclk_soc_desc mv88f6180_coreclks = { |
204 | .get_tclk_freq = kirkwood_get_tclk_freq, | 197 | .get_tclk_freq = kirkwood_get_tclk_freq, |
205 | .get_cpu_freq = mv88f6180_get_cpu_freq, | 198 | .get_cpu_freq = mv88f6180_get_cpu_freq, |
@@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = { | |||
208 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), | 201 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), |
209 | }; | 202 | }; |
210 | 203 | ||
211 | static void __init mv88f6180_coreclk_init(struct device_node *np) | ||
212 | { | ||
213 | mvebu_coreclk_setup(np, &mv88f6180_coreclks); | ||
214 | } | ||
215 | CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock", | ||
216 | mv88f6180_coreclk_init); | ||
217 | |||
218 | /* | 204 | /* |
219 | * Clock Gating Control | 205 | * Clock Gating Control |
220 | */ | 206 | */ |
@@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = { | |||
239 | { } | 225 | { } |
240 | }; | 226 | }; |
241 | 227 | ||
242 | static void __init kirkwood_clk_gating_init(struct device_node *np) | 228 | static void __init kirkwood_clk_init(struct device_node *np) |
243 | { | 229 | { |
244 | mvebu_clk_gating_setup(np, kirkwood_gating_desc); | 230 | struct device_node *cgnp = |
231 | of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock"); | ||
232 | |||
233 | |||
234 | if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) | ||
235 | mvebu_coreclk_setup(np, &mv88f6180_coreclks); | ||
236 | else | ||
237 | mvebu_coreclk_setup(np, &kirkwood_coreclks); | ||
238 | |||
239 | if (cgnp) | ||
240 | mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); | ||
245 | } | 241 | } |
246 | CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock", | 242 | CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", |
247 | kirkwood_clk_gating_init); | 243 | kirkwood_clk_init); |
244 | CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", | ||
245 | kirkwood_clk_init); | ||
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index a59ec217a124..dd272a0d1446 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c | |||
@@ -186,7 +186,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, | |||
186 | const char *name) | 186 | const char *name) |
187 | { | 187 | { |
188 | const struct clk_div_table *table = NULL; | 188 | const struct clk_div_table *table = NULL; |
189 | const char *parent_name = "main"; | 189 | const char *parent_name; |
190 | unsigned int shift; | 190 | unsigned int shift; |
191 | unsigned int mult = 1; | 191 | unsigned int mult = 1; |
192 | unsigned int div = 1; | 192 | unsigned int div = 1; |
@@ -201,23 +201,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, | |||
201 | * the multiplier value. | 201 | * the multiplier value. |
202 | */ | 202 | */ |
203 | u32 value = clk_readl(cpg->reg + CPG_PLL0CR); | 203 | u32 value = clk_readl(cpg->reg + CPG_PLL0CR); |
204 | parent_name = "main"; | ||
204 | mult = ((value >> 24) & ((1 << 7) - 1)) + 1; | 205 | mult = ((value >> 24) & ((1 << 7) - 1)) + 1; |
205 | } else if (!strcmp(name, "pll1")) { | 206 | } else if (!strcmp(name, "pll1")) { |
207 | parent_name = "main"; | ||
206 | mult = config->pll1_mult / 2; | 208 | mult = config->pll1_mult / 2; |
207 | } else if (!strcmp(name, "pll3")) { | 209 | } else if (!strcmp(name, "pll3")) { |
210 | parent_name = "main"; | ||
208 | mult = config->pll3_mult; | 211 | mult = config->pll3_mult; |
209 | } else if (!strcmp(name, "lb")) { | 212 | } else if (!strcmp(name, "lb")) { |
213 | parent_name = "pll1_div2"; | ||
210 | div = cpg_mode & BIT(18) ? 36 : 24; | 214 | div = cpg_mode & BIT(18) ? 36 : 24; |
211 | } else if (!strcmp(name, "qspi")) { | 215 | } else if (!strcmp(name, "qspi")) { |
216 | parent_name = "pll1_div2"; | ||
212 | div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) | 217 | div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) |
213 | ? 16 : 20; | 218 | ? 8 : 10; |
214 | } else if (!strcmp(name, "sdh")) { | 219 | } else if (!strcmp(name, "sdh")) { |
220 | parent_name = "pll1_div2"; | ||
215 | table = cpg_sdh_div_table; | 221 | table = cpg_sdh_div_table; |
216 | shift = 8; | 222 | shift = 8; |
217 | } else if (!strcmp(name, "sd0")) { | 223 | } else if (!strcmp(name, "sd0")) { |
224 | parent_name = "pll1_div2"; | ||
218 | table = cpg_sd01_div_table; | 225 | table = cpg_sd01_div_table; |
219 | shift = 4; | 226 | shift = 4; |
220 | } else if (!strcmp(name, "sd1")) { | 227 | } else if (!strcmp(name, "sd1")) { |
228 | parent_name = "pll1_div2"; | ||
221 | table = cpg_sd01_div_table; | 229 | table = cpg_sd01_div_table; |
222 | shift = 0; | 230 | shift = 0; |
223 | } else if (!strcmp(name, "z")) { | 231 | } else if (!strcmp(name, "z")) { |
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c index 4d75b1f37e3a..290f9c1a3749 100644 --- a/drivers/clk/tegra/clk-divider.c +++ b/drivers/clk/tegra/clk-divider.c | |||
@@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate, | |||
59 | return 0; | 59 | return 0; |
60 | 60 | ||
61 | if (divider_ux1 > get_max_div(divider)) | 61 | if (divider_ux1 > get_max_div(divider)) |
62 | return -EINVAL; | 62 | return get_max_div(divider); |
63 | 63 | ||
64 | return divider_ux1; | 64 | return divider_ux1; |
65 | } | 65 | } |
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index cf0c323f2c36..c39613c519af 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h | |||
@@ -180,9 +180,13 @@ enum clk_id { | |||
180 | tegra_clk_sbc6_8, | 180 | tegra_clk_sbc6_8, |
181 | tegra_clk_sclk, | 181 | tegra_clk_sclk, |
182 | tegra_clk_sdmmc1, | 182 | tegra_clk_sdmmc1, |
183 | tegra_clk_sdmmc1_8, | ||
183 | tegra_clk_sdmmc2, | 184 | tegra_clk_sdmmc2, |
185 | tegra_clk_sdmmc2_8, | ||
184 | tegra_clk_sdmmc3, | 186 | tegra_clk_sdmmc3, |
187 | tegra_clk_sdmmc3_8, | ||
185 | tegra_clk_sdmmc4, | 188 | tegra_clk_sdmmc4, |
189 | tegra_clk_sdmmc4_8, | ||
186 | tegra_clk_se, | 190 | tegra_clk_se, |
187 | tegra_clk_soc_therm, | 191 | tegra_clk_soc_therm, |
188 | tegra_clk_sor0, | 192 | tegra_clk_sor0, |
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 5c35885f4a7c..1fa5c3f33b20 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c | |||
@@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = { | |||
371 | static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { | 371 | static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { |
372 | "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" | 372 | "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" |
373 | }; | 373 | }; |
374 | static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = { | 374 | #define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL |
375 | [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, | ||
376 | }; | ||
377 | 375 | ||
378 | static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { | 376 | static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { |
379 | "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", | 377 | "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", |
@@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = { | |||
465 | MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), | 463 | MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), |
466 | MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), | 464 | MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), |
467 | MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), | 465 | MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), |
466 | MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), | ||
467 | MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), | ||
468 | MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), | ||
469 | MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), | ||
468 | MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), | 470 | MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), |
469 | MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), | 471 | MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), |
470 | MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), | 472 | MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), |
@@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = { | |||
492 | UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), | 494 | UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), |
493 | UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), | 495 | UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), |
494 | UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), | 496 | UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), |
495 | UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte), | 497 | UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte), |
496 | XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), | 498 | XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), |
497 | XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), | 499 | XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), |
498 | XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src), | 500 | XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src), |
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index 05dce4aa2c11..feb3201c85ce 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c | |||
@@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base, | |||
120 | ARRAY_SIZE(cclk_lp_parents), | 120 | ARRAY_SIZE(cclk_lp_parents), |
121 | CLK_SET_RATE_PARENT, | 121 | CLK_SET_RATE_PARENT, |
122 | clk_base + CCLKLP_BURST_POLICY, | 122 | clk_base + CCLKLP_BURST_POLICY, |
123 | 0, 4, 8, 9, NULL); | 123 | TEGRA_DIVIDER_2, 4, 8, 9, NULL); |
124 | *dt_clk = clk; | 124 | *dt_clk = clk; |
125 | } | 125 | } |
126 | 126 | ||
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 90d9d25f2228..80431f0fb268 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c | |||
@@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { | |||
682 | [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, | 682 | [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, |
683 | [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, | 683 | [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, |
684 | [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, | 684 | [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, |
685 | [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, | 685 | [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, |
686 | [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, | 686 | [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, |
687 | [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, | 687 | [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, |
688 | [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, | 688 | [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, |
689 | [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, | 689 | [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, |
690 | [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, | 690 | [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, |
691 | [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, | 691 | [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, |
692 | [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, | 692 | [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, |
693 | [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, | 693 | [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, |
@@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { | |||
723 | [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, | 723 | [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, |
724 | [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, | 724 | [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, |
725 | [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, | 725 | [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, |
726 | [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, | 726 | [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, |
727 | [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, | 727 | [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, |
728 | [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, | 728 | [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, |
729 | [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true }, | 729 | [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true }, |
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index aff86b5bc745..166e02f16c8a 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c | |||
@@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = { | |||
516 | }; | 516 | }; |
517 | 517 | ||
518 | static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { | 518 | static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { |
519 | {12000000, 216000000, 432, 12, 1, 8}, | 519 | {12000000, 408000000, 408, 12, 0, 8}, |
520 | {13000000, 216000000, 432, 13, 1, 8}, | 520 | {13000000, 408000000, 408, 13, 0, 8}, |
521 | {16800000, 216000000, 360, 14, 1, 8}, | 521 | {16800000, 408000000, 340, 14, 0, 8}, |
522 | {19200000, 216000000, 360, 16, 1, 8}, | 522 | {19200000, 408000000, 340, 16, 0, 8}, |
523 | {26000000, 216000000, 432, 26, 1, 8}, | 523 | {26000000, 408000000, 408, 26, 0, 8}, |
524 | {0, 0, 0, 0, 0, 0}, | 524 | {0, 0, 0, 0, 0, 0}, |
525 | }; | 525 | }; |
526 | 526 | ||
@@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = { | |||
570 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, | 570 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, |
571 | }; | 571 | }; |
572 | 572 | ||
573 | static struct div_nmp plld_nmp = { | ||
574 | .divm_shift = 0, | ||
575 | .divm_width = 5, | ||
576 | .divn_shift = 8, | ||
577 | .divn_width = 11, | ||
578 | .divp_shift = 20, | ||
579 | .divp_width = 3, | ||
580 | }; | ||
581 | |||
573 | static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { | 582 | static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { |
574 | {12000000, 216000000, 864, 12, 4, 12}, | 583 | {12000000, 216000000, 864, 12, 4, 12}, |
575 | {13000000, 216000000, 864, 13, 4, 12}, | 584 | {13000000, 216000000, 864, 13, 4, 12}, |
@@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = { | |||
603 | .lock_mask = PLL_BASE_LOCK, | 612 | .lock_mask = PLL_BASE_LOCK, |
604 | .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, | 613 | .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, |
605 | .lock_delay = 1000, | 614 | .lock_delay = 1000, |
606 | .div_nmp = &pllp_nmp, | 615 | .div_nmp = &plld_nmp, |
607 | .freq_table = pll_d_freq_table, | 616 | .freq_table = pll_d_freq_table, |
608 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | | 617 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | |
609 | TEGRA_PLL_USE_LOCK, | 618 | TEGRA_PLL_USE_LOCK, |
610 | }; | 619 | }; |
611 | 620 | ||
612 | static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { | 621 | static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { |
613 | { 12000000, 148500000, 99, 1, 8}, | 622 | { 12000000, 594000000, 99, 1, 2}, |
614 | { 12000000, 594000000, 99, 1, 1}, | 623 | { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */ |
615 | { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */ | 624 | { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */ |
616 | { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */ | 625 | { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */ |
617 | { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */ | 626 | { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */ |
618 | { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */ | ||
619 | { 0, 0, 0, 0, 0, 0 }, | 627 | { 0, 0, 0, 0, 0, 0 }, |
620 | }; | 628 | }; |
621 | 629 | ||
@@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { | |||
753 | [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, | 761 | [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, |
754 | [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, | 762 | [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, |
755 | [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, | 763 | [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, |
756 | [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, | 764 | [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, |
757 | [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, | 765 | [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, |
758 | [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, | 766 | [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, |
759 | [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, | 767 | [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, |
760 | [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, | 768 | [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, |
761 | [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, | 769 | [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, |
762 | [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, | 770 | [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, |
763 | [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, | 771 | [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, |
764 | [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true }, | ||
765 | [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, | 772 | [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, |
766 | [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, | 773 | [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, |
767 | [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true }, | ||
768 | [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, | 774 | [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, |
769 | [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, | 775 | [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, |
770 | [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, | 776 | [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, |
771 | [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, | 777 | [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, |
772 | [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, | 778 | [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, |
773 | [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, | 779 | [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, |
@@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { | |||
794 | [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, | 800 | [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, |
795 | [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, | 801 | [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, |
796 | [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, | 802 | [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, |
797 | [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, | 803 | [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, |
798 | [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, | 804 | [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, |
799 | [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, | 805 | [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, |
800 | [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, | 806 | [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, |
@@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base, | |||
1286 | clk_register_clkdev(clk, "pll_d2", NULL); | 1292 | clk_register_clkdev(clk, "pll_d2", NULL); |
1287 | clks[TEGRA124_CLK_PLL_D2] = clk; | 1293 | clks[TEGRA124_CLK_PLL_D2] = clk; |
1288 | 1294 | ||
1289 | /* PLLD2_OUT0 ?? */ | 1295 | /* PLLD2_OUT0 */ |
1290 | clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", | 1296 | clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", |
1291 | CLK_SET_RATE_PARENT, 1, 2); | 1297 | CLK_SET_RATE_PARENT, 1, 1); |
1292 | clk_register_clkdev(clk, "pll_d2_out0", NULL); | 1298 | clk_register_clkdev(clk, "pll_d2_out0", NULL); |
1293 | clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; | 1299 | clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; |
1294 | 1300 | ||
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index dbace152b2fa..dace2b1b5ae6 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c | |||
@@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = { | |||
574 | [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, | 574 | [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, |
575 | [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, | 575 | [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, |
576 | [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, | 576 | [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, |
577 | [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true }, | ||
578 | [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true }, | ||
577 | }; | 579 | }; |
578 | 580 | ||
579 | static unsigned long tegra20_clk_measure_input_freq(void) | 581 | static unsigned long tegra20_clk_measure_input_freq(void) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e81c5547e647..f9c12e92fdd6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -53,8 +53,8 @@ | |||
53 | #include "user.h" | 53 | #include "user.h" |
54 | 54 | ||
55 | #define DRV_NAME MLX4_IB_DRV_NAME | 55 | #define DRV_NAME MLX4_IB_DRV_NAME |
56 | #define DRV_VERSION "1.0" | 56 | #define DRV_VERSION "2.2-1" |
57 | #define DRV_RELDATE "April 4, 2008" | 57 | #define DRV_RELDATE "Feb 2014" |
58 | 58 | ||
59 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 59 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
60 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 60 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aa03e732b6a8..bf900579ac08 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -46,8 +46,8 @@ | |||
46 | #include "mlx5_ib.h" | 46 | #include "mlx5_ib.h" |
47 | 47 | ||
48 | #define DRIVER_NAME "mlx5_ib" | 48 | #define DRIVER_NAME "mlx5_ib" |
49 | #define DRIVER_VERSION "1.0" | 49 | #define DRIVER_VERSION "2.2-1" |
50 | #define DRIVER_RELDATE "June 2013" | 50 | #define DRIVER_RELDATE "Feb 2014" |
51 | 51 | ||
52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); |
53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); | 53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 6d20fbde8d43..dcde56057fe1 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -181,7 +181,7 @@ static inline int __agg_has_partner(struct aggregator *agg) | |||
181 | */ | 181 | */ |
182 | static inline void __disable_port(struct port *port) | 182 | static inline void __disable_port(struct port *port) |
183 | { | 183 | { |
184 | bond_set_slave_inactive_flags(port->slave); | 184 | bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); |
185 | } | 185 | } |
186 | 186 | ||
187 | /** | 187 | /** |
@@ -193,7 +193,7 @@ static inline void __enable_port(struct port *port) | |||
193 | struct slave *slave = port->slave; | 193 | struct slave *slave = port->slave; |
194 | 194 | ||
195 | if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) | 195 | if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) |
196 | bond_set_slave_active_flags(slave); | 196 | bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); |
197 | } | 197 | } |
198 | 198 | ||
199 | /** | 199 | /** |
@@ -2062,6 +2062,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2062 | struct list_head *iter; | 2062 | struct list_head *iter; |
2063 | struct slave *slave; | 2063 | struct slave *slave; |
2064 | struct port *port; | 2064 | struct port *port; |
2065 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; | ||
2065 | 2066 | ||
2066 | read_lock(&bond->lock); | 2067 | read_lock(&bond->lock); |
2067 | rcu_read_lock(); | 2068 | rcu_read_lock(); |
@@ -2119,8 +2120,19 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2119 | } | 2120 | } |
2120 | 2121 | ||
2121 | re_arm: | 2122 | re_arm: |
2123 | bond_for_each_slave_rcu(bond, slave, iter) { | ||
2124 | if (slave->should_notify) { | ||
2125 | should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; | ||
2126 | break; | ||
2127 | } | ||
2128 | } | ||
2122 | rcu_read_unlock(); | 2129 | rcu_read_unlock(); |
2123 | read_unlock(&bond->lock); | 2130 | read_unlock(&bond->lock); |
2131 | |||
2132 | if (should_notify_rtnl && rtnl_trylock()) { | ||
2133 | bond_slave_state_notify(bond); | ||
2134 | rtnl_unlock(); | ||
2135 | } | ||
2124 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2136 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); |
2125 | } | 2137 | } |
2126 | 2138 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1c6104d3501d..e5628fc725c3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -829,21 +829,25 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
829 | if (bond_is_lb(bond)) { | 829 | if (bond_is_lb(bond)) { |
830 | bond_alb_handle_active_change(bond, new_active); | 830 | bond_alb_handle_active_change(bond, new_active); |
831 | if (old_active) | 831 | if (old_active) |
832 | bond_set_slave_inactive_flags(old_active); | 832 | bond_set_slave_inactive_flags(old_active, |
833 | BOND_SLAVE_NOTIFY_NOW); | ||
833 | if (new_active) | 834 | if (new_active) |
834 | bond_set_slave_active_flags(new_active); | 835 | bond_set_slave_active_flags(new_active, |
836 | BOND_SLAVE_NOTIFY_NOW); | ||
835 | } else { | 837 | } else { |
836 | rcu_assign_pointer(bond->curr_active_slave, new_active); | 838 | rcu_assign_pointer(bond->curr_active_slave, new_active); |
837 | } | 839 | } |
838 | 840 | ||
839 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { | 841 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { |
840 | if (old_active) | 842 | if (old_active) |
841 | bond_set_slave_inactive_flags(old_active); | 843 | bond_set_slave_inactive_flags(old_active, |
844 | BOND_SLAVE_NOTIFY_NOW); | ||
842 | 845 | ||
843 | if (new_active) { | 846 | if (new_active) { |
844 | bool should_notify_peers = false; | 847 | bool should_notify_peers = false; |
845 | 848 | ||
846 | bond_set_slave_active_flags(new_active); | 849 | bond_set_slave_active_flags(new_active, |
850 | BOND_SLAVE_NOTIFY_NOW); | ||
847 | 851 | ||
848 | if (bond->params.fail_over_mac) | 852 | if (bond->params.fail_over_mac) |
849 | bond_do_fail_over_mac(bond, new_active, | 853 | bond_do_fail_over_mac(bond, new_active, |
@@ -1193,6 +1197,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1193 | return -EBUSY; | 1197 | return -EBUSY; |
1194 | } | 1198 | } |
1195 | 1199 | ||
1200 | if (bond_dev == slave_dev) { | ||
1201 | pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name); | ||
1202 | return -EPERM; | ||
1203 | } | ||
1204 | |||
1196 | /* vlan challenged mutual exclusion */ | 1205 | /* vlan challenged mutual exclusion */ |
1197 | /* no need to lock since we're protected by rtnl_lock */ | 1206 | /* no need to lock since we're protected by rtnl_lock */ |
1198 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { | 1207 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { |
@@ -1463,14 +1472,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1463 | 1472 | ||
1464 | switch (bond->params.mode) { | 1473 | switch (bond->params.mode) { |
1465 | case BOND_MODE_ACTIVEBACKUP: | 1474 | case BOND_MODE_ACTIVEBACKUP: |
1466 | bond_set_slave_inactive_flags(new_slave); | 1475 | bond_set_slave_inactive_flags(new_slave, |
1476 | BOND_SLAVE_NOTIFY_NOW); | ||
1467 | break; | 1477 | break; |
1468 | case BOND_MODE_8023AD: | 1478 | case BOND_MODE_8023AD: |
1469 | /* in 802.3ad mode, the internal mechanism | 1479 | /* in 802.3ad mode, the internal mechanism |
1470 | * will activate the slaves in the selected | 1480 | * will activate the slaves in the selected |
1471 | * aggregator | 1481 | * aggregator |
1472 | */ | 1482 | */ |
1473 | bond_set_slave_inactive_flags(new_slave); | 1483 | bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); |
1474 | /* if this is the first slave */ | 1484 | /* if this is the first slave */ |
1475 | if (!prev_slave) { | 1485 | if (!prev_slave) { |
1476 | SLAVE_AD_INFO(new_slave).id = 1; | 1486 | SLAVE_AD_INFO(new_slave).id = 1; |
@@ -1488,7 +1498,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1488 | case BOND_MODE_TLB: | 1498 | case BOND_MODE_TLB: |
1489 | case BOND_MODE_ALB: | 1499 | case BOND_MODE_ALB: |
1490 | bond_set_active_slave(new_slave); | 1500 | bond_set_active_slave(new_slave); |
1491 | bond_set_slave_inactive_flags(new_slave); | 1501 | bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); |
1492 | break; | 1502 | break; |
1493 | default: | 1503 | default: |
1494 | pr_debug("This slave is always active in trunk mode\n"); | 1504 | pr_debug("This slave is always active in trunk mode\n"); |
@@ -1654,9 +1664,6 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1654 | return -EINVAL; | 1664 | return -EINVAL; |
1655 | } | 1665 | } |
1656 | 1666 | ||
1657 | /* release the slave from its bond */ | ||
1658 | bond->slave_cnt--; | ||
1659 | |||
1660 | bond_sysfs_slave_del(slave); | 1667 | bond_sysfs_slave_del(slave); |
1661 | 1668 | ||
1662 | bond_upper_dev_unlink(bond_dev, slave_dev); | 1669 | bond_upper_dev_unlink(bond_dev, slave_dev); |
@@ -1738,6 +1745,7 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1738 | 1745 | ||
1739 | unblock_netpoll_tx(); | 1746 | unblock_netpoll_tx(); |
1740 | synchronize_rcu(); | 1747 | synchronize_rcu(); |
1748 | bond->slave_cnt--; | ||
1741 | 1749 | ||
1742 | if (!bond_has_slaves(bond)) { | 1750 | if (!bond_has_slaves(bond)) { |
1743 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); | 1751 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); |
@@ -2015,7 +2023,8 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2015 | 2023 | ||
2016 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || | 2024 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || |
2017 | bond->params.mode == BOND_MODE_8023AD) | 2025 | bond->params.mode == BOND_MODE_8023AD) |
2018 | bond_set_slave_inactive_flags(slave); | 2026 | bond_set_slave_inactive_flags(slave, |
2027 | BOND_SLAVE_NOTIFY_NOW); | ||
2019 | 2028 | ||
2020 | pr_info("%s: link status definitely down for interface %s, disabling it\n", | 2029 | pr_info("%s: link status definitely down for interface %s, disabling it\n", |
2021 | bond->dev->name, slave->dev->name); | 2030 | bond->dev->name, slave->dev->name); |
@@ -2562,7 +2571,8 @@ static void bond_ab_arp_commit(struct bonding *bond) | |||
2562 | slave->link = BOND_LINK_UP; | 2571 | slave->link = BOND_LINK_UP; |
2563 | if (bond->current_arp_slave) { | 2572 | if (bond->current_arp_slave) { |
2564 | bond_set_slave_inactive_flags( | 2573 | bond_set_slave_inactive_flags( |
2565 | bond->current_arp_slave); | 2574 | bond->current_arp_slave, |
2575 | BOND_SLAVE_NOTIFY_NOW); | ||
2566 | bond->current_arp_slave = NULL; | 2576 | bond->current_arp_slave = NULL; |
2567 | } | 2577 | } |
2568 | 2578 | ||
@@ -2582,7 +2592,8 @@ static void bond_ab_arp_commit(struct bonding *bond) | |||
2582 | slave->link_failure_count++; | 2592 | slave->link_failure_count++; |
2583 | 2593 | ||
2584 | slave->link = BOND_LINK_DOWN; | 2594 | slave->link = BOND_LINK_DOWN; |
2585 | bond_set_slave_inactive_flags(slave); | 2595 | bond_set_slave_inactive_flags(slave, |
2596 | BOND_SLAVE_NOTIFY_NOW); | ||
2586 | 2597 | ||
2587 | pr_info("%s: link status definitely down for interface %s, disabling it\n", | 2598 | pr_info("%s: link status definitely down for interface %s, disabling it\n", |
2588 | bond->dev->name, slave->dev->name); | 2599 | bond->dev->name, slave->dev->name); |
@@ -2615,17 +2626,17 @@ do_failover: | |||
2615 | 2626 | ||
2616 | /* | 2627 | /* |
2617 | * Send ARP probes for active-backup mode ARP monitor. | 2628 | * Send ARP probes for active-backup mode ARP monitor. |
2629 | * | ||
2630 | * Called with rcu_read_lock hold. | ||
2618 | */ | 2631 | */ |
2619 | static bool bond_ab_arp_probe(struct bonding *bond) | 2632 | static bool bond_ab_arp_probe(struct bonding *bond) |
2620 | { | 2633 | { |
2621 | struct slave *slave, *before = NULL, *new_slave = NULL, | 2634 | struct slave *slave, *before = NULL, *new_slave = NULL, |
2622 | *curr_arp_slave, *curr_active_slave; | 2635 | *curr_arp_slave = rcu_dereference(bond->current_arp_slave), |
2636 | *curr_active_slave = rcu_dereference(bond->curr_active_slave); | ||
2623 | struct list_head *iter; | 2637 | struct list_head *iter; |
2624 | bool found = false; | 2638 | bool found = false; |
2625 | 2639 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; | |
2626 | rcu_read_lock(); | ||
2627 | curr_arp_slave = rcu_dereference(bond->current_arp_slave); | ||
2628 | curr_active_slave = rcu_dereference(bond->curr_active_slave); | ||
2629 | 2640 | ||
2630 | if (curr_arp_slave && curr_active_slave) | 2641 | if (curr_arp_slave && curr_active_slave) |
2631 | pr_info("PROBE: c_arp %s && cas %s BAD\n", | 2642 | pr_info("PROBE: c_arp %s && cas %s BAD\n", |
@@ -2634,32 +2645,23 @@ static bool bond_ab_arp_probe(struct bonding *bond) | |||
2634 | 2645 | ||
2635 | if (curr_active_slave) { | 2646 | if (curr_active_slave) { |
2636 | bond_arp_send_all(bond, curr_active_slave); | 2647 | bond_arp_send_all(bond, curr_active_slave); |
2637 | rcu_read_unlock(); | 2648 | return should_notify_rtnl; |
2638 | return true; | ||
2639 | } | 2649 | } |
2640 | rcu_read_unlock(); | ||
2641 | 2650 | ||
2642 | /* if we don't have a curr_active_slave, search for the next available | 2651 | /* if we don't have a curr_active_slave, search for the next available |
2643 | * backup slave from the current_arp_slave and make it the candidate | 2652 | * backup slave from the current_arp_slave and make it the candidate |
2644 | * for becoming the curr_active_slave | 2653 | * for becoming the curr_active_slave |
2645 | */ | 2654 | */ |
2646 | 2655 | ||
2647 | if (!rtnl_trylock()) | ||
2648 | return false; | ||
2649 | /* curr_arp_slave might have gone away */ | ||
2650 | curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave); | ||
2651 | |||
2652 | if (!curr_arp_slave) { | 2656 | if (!curr_arp_slave) { |
2653 | curr_arp_slave = bond_first_slave(bond); | 2657 | curr_arp_slave = bond_first_slave_rcu(bond); |
2654 | if (!curr_arp_slave) { | 2658 | if (!curr_arp_slave) |
2655 | rtnl_unlock(); | 2659 | return should_notify_rtnl; |
2656 | return true; | ||
2657 | } | ||
2658 | } | 2660 | } |
2659 | 2661 | ||
2660 | bond_set_slave_inactive_flags(curr_arp_slave); | 2662 | bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); |
2661 | 2663 | ||
2662 | bond_for_each_slave(bond, slave, iter) { | 2664 | bond_for_each_slave_rcu(bond, slave, iter) { |
2663 | if (!found && !before && IS_UP(slave->dev)) | 2665 | if (!found && !before && IS_UP(slave->dev)) |
2664 | before = slave; | 2666 | before = slave; |
2665 | 2667 | ||
@@ -2677,7 +2679,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) | |||
2677 | if (slave->link_failure_count < UINT_MAX) | 2679 | if (slave->link_failure_count < UINT_MAX) |
2678 | slave->link_failure_count++; | 2680 | slave->link_failure_count++; |
2679 | 2681 | ||
2680 | bond_set_slave_inactive_flags(slave); | 2682 | bond_set_slave_inactive_flags(slave, |
2683 | BOND_SLAVE_NOTIFY_LATER); | ||
2681 | 2684 | ||
2682 | pr_info("%s: backup interface %s is now down.\n", | 2685 | pr_info("%s: backup interface %s is now down.\n", |
2683 | bond->dev->name, slave->dev->name); | 2686 | bond->dev->name, slave->dev->name); |
@@ -2689,26 +2692,31 @@ static bool bond_ab_arp_probe(struct bonding *bond) | |||
2689 | if (!new_slave && before) | 2692 | if (!new_slave && before) |
2690 | new_slave = before; | 2693 | new_slave = before; |
2691 | 2694 | ||
2692 | if (!new_slave) { | 2695 | if (!new_slave) |
2693 | rtnl_unlock(); | 2696 | goto check_state; |
2694 | return true; | ||
2695 | } | ||
2696 | 2697 | ||
2697 | new_slave->link = BOND_LINK_BACK; | 2698 | new_slave->link = BOND_LINK_BACK; |
2698 | bond_set_slave_active_flags(new_slave); | 2699 | bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); |
2699 | bond_arp_send_all(bond, new_slave); | 2700 | bond_arp_send_all(bond, new_slave); |
2700 | new_slave->jiffies = jiffies; | 2701 | new_slave->jiffies = jiffies; |
2701 | rcu_assign_pointer(bond->current_arp_slave, new_slave); | 2702 | rcu_assign_pointer(bond->current_arp_slave, new_slave); |
2702 | rtnl_unlock(); | ||
2703 | 2703 | ||
2704 | return true; | 2704 | check_state: |
2705 | bond_for_each_slave_rcu(bond, slave, iter) { | ||
2706 | if (slave->should_notify) { | ||
2707 | should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; | ||
2708 | break; | ||
2709 | } | ||
2710 | } | ||
2711 | return should_notify_rtnl; | ||
2705 | } | 2712 | } |
2706 | 2713 | ||
2707 | static void bond_activebackup_arp_mon(struct work_struct *work) | 2714 | static void bond_activebackup_arp_mon(struct work_struct *work) |
2708 | { | 2715 | { |
2709 | struct bonding *bond = container_of(work, struct bonding, | 2716 | struct bonding *bond = container_of(work, struct bonding, |
2710 | arp_work.work); | 2717 | arp_work.work); |
2711 | bool should_notify_peers = false, should_commit = false; | 2718 | bool should_notify_peers = false; |
2719 | bool should_notify_rtnl = false; | ||
2712 | int delta_in_ticks; | 2720 | int delta_in_ticks; |
2713 | 2721 | ||
2714 | delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); | 2722 | delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); |
@@ -2717,11 +2725,12 @@ static void bond_activebackup_arp_mon(struct work_struct *work) | |||
2717 | goto re_arm; | 2725 | goto re_arm; |
2718 | 2726 | ||
2719 | rcu_read_lock(); | 2727 | rcu_read_lock(); |
2728 | |||
2720 | should_notify_peers = bond_should_notify_peers(bond); | 2729 | should_notify_peers = bond_should_notify_peers(bond); |
2721 | should_commit = bond_ab_arp_inspect(bond); | ||
2722 | rcu_read_unlock(); | ||
2723 | 2730 | ||
2724 | if (should_commit) { | 2731 | if (bond_ab_arp_inspect(bond)) { |
2732 | rcu_read_unlock(); | ||
2733 | |||
2725 | /* Race avoidance with bond_close flush of workqueue */ | 2734 | /* Race avoidance with bond_close flush of workqueue */ |
2726 | if (!rtnl_trylock()) { | 2735 | if (!rtnl_trylock()) { |
2727 | delta_in_ticks = 1; | 2736 | delta_in_ticks = 1; |
@@ -2730,23 +2739,28 @@ static void bond_activebackup_arp_mon(struct work_struct *work) | |||
2730 | } | 2739 | } |
2731 | 2740 | ||
2732 | bond_ab_arp_commit(bond); | 2741 | bond_ab_arp_commit(bond); |
2742 | |||
2733 | rtnl_unlock(); | 2743 | rtnl_unlock(); |
2744 | rcu_read_lock(); | ||
2734 | } | 2745 | } |
2735 | 2746 | ||
2736 | if (!bond_ab_arp_probe(bond)) { | 2747 | should_notify_rtnl = bond_ab_arp_probe(bond); |
2737 | /* rtnl locking failed, re-arm */ | 2748 | rcu_read_unlock(); |
2738 | delta_in_ticks = 1; | ||
2739 | should_notify_peers = false; | ||
2740 | } | ||
2741 | 2749 | ||
2742 | re_arm: | 2750 | re_arm: |
2743 | if (bond->params.arp_interval) | 2751 | if (bond->params.arp_interval) |
2744 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2752 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2745 | 2753 | ||
2746 | if (should_notify_peers) { | 2754 | if (should_notify_peers || should_notify_rtnl) { |
2747 | if (!rtnl_trylock()) | 2755 | if (!rtnl_trylock()) |
2748 | return; | 2756 | return; |
2749 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); | 2757 | |
2758 | if (should_notify_peers) | ||
2759 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, | ||
2760 | bond->dev); | ||
2761 | if (should_notify_rtnl) | ||
2762 | bond_slave_state_notify(bond); | ||
2763 | |||
2750 | rtnl_unlock(); | 2764 | rtnl_unlock(); |
2751 | } | 2765 | } |
2752 | } | 2766 | } |
@@ -3046,9 +3060,11 @@ static int bond_open(struct net_device *bond_dev) | |||
3046 | bond_for_each_slave(bond, slave, iter) { | 3060 | bond_for_each_slave(bond, slave, iter) { |
3047 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) | 3061 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) |
3048 | && (slave != bond->curr_active_slave)) { | 3062 | && (slave != bond->curr_active_slave)) { |
3049 | bond_set_slave_inactive_flags(slave); | 3063 | bond_set_slave_inactive_flags(slave, |
3064 | BOND_SLAVE_NOTIFY_NOW); | ||
3050 | } else { | 3065 | } else { |
3051 | bond_set_slave_active_flags(slave); | 3066 | bond_set_slave_active_flags(slave, |
3067 | BOND_SLAVE_NOTIFY_NOW); | ||
3052 | } | 3068 | } |
3053 | } | 3069 | } |
3054 | read_unlock(&bond->curr_slave_lock); | 3070 | read_unlock(&bond->curr_slave_lock); |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 86ccfb9f71cc..2b0fdec695f7 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -195,7 +195,8 @@ struct slave { | |||
195 | s8 new_link; | 195 | s8 new_link; |
196 | u8 backup:1, /* indicates backup slave. Value corresponds with | 196 | u8 backup:1, /* indicates backup slave. Value corresponds with |
197 | BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ | 197 | BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ |
198 | inactive:1; /* indicates inactive slave */ | 198 | inactive:1, /* indicates inactive slave */ |
199 | should_notify:1; /* indicateds whether the state changed */ | ||
199 | u8 duplex; | 200 | u8 duplex; |
200 | u32 original_mtu; | 201 | u32 original_mtu; |
201 | u32 link_failure_count; | 202 | u32 link_failure_count; |
@@ -303,6 +304,24 @@ static inline void bond_set_backup_slave(struct slave *slave) | |||
303 | } | 304 | } |
304 | } | 305 | } |
305 | 306 | ||
307 | static inline void bond_set_slave_state(struct slave *slave, | ||
308 | int slave_state, bool notify) | ||
309 | { | ||
310 | if (slave->backup == slave_state) | ||
311 | return; | ||
312 | |||
313 | slave->backup = slave_state; | ||
314 | if (notify) { | ||
315 | rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); | ||
316 | slave->should_notify = 0; | ||
317 | } else { | ||
318 | if (slave->should_notify) | ||
319 | slave->should_notify = 0; | ||
320 | else | ||
321 | slave->should_notify = 1; | ||
322 | } | ||
323 | } | ||
324 | |||
306 | static inline void bond_slave_state_change(struct bonding *bond) | 325 | static inline void bond_slave_state_change(struct bonding *bond) |
307 | { | 326 | { |
308 | struct list_head *iter; | 327 | struct list_head *iter; |
@@ -316,6 +335,19 @@ static inline void bond_slave_state_change(struct bonding *bond) | |||
316 | } | 335 | } |
317 | } | 336 | } |
318 | 337 | ||
338 | static inline void bond_slave_state_notify(struct bonding *bond) | ||
339 | { | ||
340 | struct list_head *iter; | ||
341 | struct slave *tmp; | ||
342 | |||
343 | bond_for_each_slave(bond, tmp, iter) { | ||
344 | if (tmp->should_notify) { | ||
345 | rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL); | ||
346 | tmp->should_notify = 0; | ||
347 | } | ||
348 | } | ||
349 | } | ||
350 | |||
319 | static inline int bond_slave_state(struct slave *slave) | 351 | static inline int bond_slave_state(struct slave *slave) |
320 | { | 352 | { |
321 | return slave->backup; | 353 | return slave->backup; |
@@ -343,6 +375,9 @@ static inline bool bond_is_active_slave(struct slave *slave) | |||
343 | #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ | 375 | #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ |
344 | BOND_ARP_VALIDATE_BACKUP) | 376 | BOND_ARP_VALIDATE_BACKUP) |
345 | 377 | ||
378 | #define BOND_SLAVE_NOTIFY_NOW true | ||
379 | #define BOND_SLAVE_NOTIFY_LATER false | ||
380 | |||
346 | static inline int slave_do_arp_validate(struct bonding *bond, | 381 | static inline int slave_do_arp_validate(struct bonding *bond, |
347 | struct slave *slave) | 382 | struct slave *slave) |
348 | { | 383 | { |
@@ -394,17 +429,19 @@ static inline void bond_netpoll_send_skb(const struct slave *slave, | |||
394 | } | 429 | } |
395 | #endif | 430 | #endif |
396 | 431 | ||
397 | static inline void bond_set_slave_inactive_flags(struct slave *slave) | 432 | static inline void bond_set_slave_inactive_flags(struct slave *slave, |
433 | bool notify) | ||
398 | { | 434 | { |
399 | if (!bond_is_lb(slave->bond)) | 435 | if (!bond_is_lb(slave->bond)) |
400 | bond_set_backup_slave(slave); | 436 | bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); |
401 | if (!slave->bond->params.all_slaves_active) | 437 | if (!slave->bond->params.all_slaves_active) |
402 | slave->inactive = 1; | 438 | slave->inactive = 1; |
403 | } | 439 | } |
404 | 440 | ||
405 | static inline void bond_set_slave_active_flags(struct slave *slave) | 441 | static inline void bond_set_slave_active_flags(struct slave *slave, |
442 | bool notify) | ||
406 | { | 443 | { |
407 | bond_set_active_slave(slave); | 444 | bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); |
408 | slave->inactive = 0; | 445 | slave->inactive = 0; |
409 | } | 446 | } |
410 | 447 | ||
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 320bef2dba42..61376abdab39 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -144,6 +144,8 @@ | |||
144 | 144 | ||
145 | #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) | 145 | #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) |
146 | 146 | ||
147 | #define FLEXCAN_TIMEOUT_US (50) | ||
148 | |||
147 | /* | 149 | /* |
148 | * FLEXCAN hardware feature flags | 150 | * FLEXCAN hardware feature flags |
149 | * | 151 | * |
@@ -262,6 +264,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr) | |||
262 | } | 264 | } |
263 | #endif | 265 | #endif |
264 | 266 | ||
267 | static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) | ||
268 | { | ||
269 | if (!priv->reg_xceiver) | ||
270 | return 0; | ||
271 | |||
272 | return regulator_enable(priv->reg_xceiver); | ||
273 | } | ||
274 | |||
275 | static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) | ||
276 | { | ||
277 | if (!priv->reg_xceiver) | ||
278 | return 0; | ||
279 | |||
280 | return regulator_disable(priv->reg_xceiver); | ||
281 | } | ||
282 | |||
265 | static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, | 283 | static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, |
266 | u32 reg_esr) | 284 | u32 reg_esr) |
267 | { | 285 | { |
@@ -269,26 +287,95 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, | |||
269 | (reg_esr & FLEXCAN_ESR_ERR_BUS); | 287 | (reg_esr & FLEXCAN_ESR_ERR_BUS); |
270 | } | 288 | } |
271 | 289 | ||
272 | static inline void flexcan_chip_enable(struct flexcan_priv *priv) | 290 | static int flexcan_chip_enable(struct flexcan_priv *priv) |
273 | { | 291 | { |
274 | struct flexcan_regs __iomem *regs = priv->base; | 292 | struct flexcan_regs __iomem *regs = priv->base; |
293 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
275 | u32 reg; | 294 | u32 reg; |
276 | 295 | ||
277 | reg = flexcan_read(®s->mcr); | 296 | reg = flexcan_read(®s->mcr); |
278 | reg &= ~FLEXCAN_MCR_MDIS; | 297 | reg &= ~FLEXCAN_MCR_MDIS; |
279 | flexcan_write(reg, ®s->mcr); | 298 | flexcan_write(reg, ®s->mcr); |
280 | 299 | ||
281 | udelay(10); | 300 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
301 | usleep_range(10, 20); | ||
302 | |||
303 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) | ||
304 | return -ETIMEDOUT; | ||
305 | |||
306 | return 0; | ||
282 | } | 307 | } |
283 | 308 | ||
284 | static inline void flexcan_chip_disable(struct flexcan_priv *priv) | 309 | static int flexcan_chip_disable(struct flexcan_priv *priv) |
285 | { | 310 | { |
286 | struct flexcan_regs __iomem *regs = priv->base; | 311 | struct flexcan_regs __iomem *regs = priv->base; |
312 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
287 | u32 reg; | 313 | u32 reg; |
288 | 314 | ||
289 | reg = flexcan_read(®s->mcr); | 315 | reg = flexcan_read(®s->mcr); |
290 | reg |= FLEXCAN_MCR_MDIS; | 316 | reg |= FLEXCAN_MCR_MDIS; |
291 | flexcan_write(reg, ®s->mcr); | 317 | flexcan_write(reg, ®s->mcr); |
318 | |||
319 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | ||
320 | usleep_range(10, 20); | ||
321 | |||
322 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | ||
323 | return -ETIMEDOUT; | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int flexcan_chip_freeze(struct flexcan_priv *priv) | ||
329 | { | ||
330 | struct flexcan_regs __iomem *regs = priv->base; | ||
331 | unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; | ||
332 | u32 reg; | ||
333 | |||
334 | reg = flexcan_read(®s->mcr); | ||
335 | reg |= FLEXCAN_MCR_HALT; | ||
336 | flexcan_write(reg, ®s->mcr); | ||
337 | |||
338 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | ||
339 | usleep_range(100, 200); | ||
340 | |||
341 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | ||
342 | return -ETIMEDOUT; | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | static int flexcan_chip_unfreeze(struct flexcan_priv *priv) | ||
348 | { | ||
349 | struct flexcan_regs __iomem *regs = priv->base; | ||
350 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
351 | u32 reg; | ||
352 | |||
353 | reg = flexcan_read(®s->mcr); | ||
354 | reg &= ~FLEXCAN_MCR_HALT; | ||
355 | flexcan_write(reg, ®s->mcr); | ||
356 | |||
357 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | ||
358 | usleep_range(10, 20); | ||
359 | |||
360 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) | ||
361 | return -ETIMEDOUT; | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int flexcan_chip_softreset(struct flexcan_priv *priv) | ||
367 | { | ||
368 | struct flexcan_regs __iomem *regs = priv->base; | ||
369 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
370 | |||
371 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | ||
372 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) | ||
373 | usleep_range(10, 20); | ||
374 | |||
375 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) | ||
376 | return -ETIMEDOUT; | ||
377 | |||
378 | return 0; | ||
292 | } | 379 | } |
293 | 380 | ||
294 | static int flexcan_get_berr_counter(const struct net_device *dev, | 381 | static int flexcan_get_berr_counter(const struct net_device *dev, |
@@ -709,19 +796,14 @@ static int flexcan_chip_start(struct net_device *dev) | |||
709 | u32 reg_mcr, reg_ctrl; | 796 | u32 reg_mcr, reg_ctrl; |
710 | 797 | ||
711 | /* enable module */ | 798 | /* enable module */ |
712 | flexcan_chip_enable(priv); | 799 | err = flexcan_chip_enable(priv); |
800 | if (err) | ||
801 | return err; | ||
713 | 802 | ||
714 | /* soft reset */ | 803 | /* soft reset */ |
715 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | 804 | err = flexcan_chip_softreset(priv); |
716 | udelay(10); | 805 | if (err) |
717 | 806 | goto out_chip_disable; | |
718 | reg_mcr = flexcan_read(®s->mcr); | ||
719 | if (reg_mcr & FLEXCAN_MCR_SOFTRST) { | ||
720 | netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n", | ||
721 | reg_mcr); | ||
722 | err = -ENODEV; | ||
723 | goto out; | ||
724 | } | ||
725 | 807 | ||
726 | flexcan_set_bittiming(dev); | 808 | flexcan_set_bittiming(dev); |
727 | 809 | ||
@@ -788,16 +870,14 @@ static int flexcan_chip_start(struct net_device *dev) | |||
788 | if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) | 870 | if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) |
789 | flexcan_write(0x0, ®s->rxfgmask); | 871 | flexcan_write(0x0, ®s->rxfgmask); |
790 | 872 | ||
791 | if (priv->reg_xceiver) { | 873 | err = flexcan_transceiver_enable(priv); |
792 | err = regulator_enable(priv->reg_xceiver); | 874 | if (err) |
793 | if (err) | 875 | goto out_chip_disable; |
794 | goto out; | ||
795 | } | ||
796 | 876 | ||
797 | /* synchronize with the can bus */ | 877 | /* synchronize with the can bus */ |
798 | reg_mcr = flexcan_read(®s->mcr); | 878 | err = flexcan_chip_unfreeze(priv); |
799 | reg_mcr &= ~FLEXCAN_MCR_HALT; | 879 | if (err) |
800 | flexcan_write(reg_mcr, ®s->mcr); | 880 | goto out_transceiver_disable; |
801 | 881 | ||
802 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | 882 | priv->can.state = CAN_STATE_ERROR_ACTIVE; |
803 | 883 | ||
@@ -810,7 +890,9 @@ static int flexcan_chip_start(struct net_device *dev) | |||
810 | 890 | ||
811 | return 0; | 891 | return 0; |
812 | 892 | ||
813 | out: | 893 | out_transceiver_disable: |
894 | flexcan_transceiver_disable(priv); | ||
895 | out_chip_disable: | ||
814 | flexcan_chip_disable(priv); | 896 | flexcan_chip_disable(priv); |
815 | return err; | 897 | return err; |
816 | } | 898 | } |
@@ -825,18 +907,17 @@ static void flexcan_chip_stop(struct net_device *dev) | |||
825 | { | 907 | { |
826 | struct flexcan_priv *priv = netdev_priv(dev); | 908 | struct flexcan_priv *priv = netdev_priv(dev); |
827 | struct flexcan_regs __iomem *regs = priv->base; | 909 | struct flexcan_regs __iomem *regs = priv->base; |
828 | u32 reg; | 910 | |
911 | /* freeze + disable module */ | ||
912 | flexcan_chip_freeze(priv); | ||
913 | flexcan_chip_disable(priv); | ||
829 | 914 | ||
830 | /* Disable all interrupts */ | 915 | /* Disable all interrupts */ |
831 | flexcan_write(0, ®s->imask1); | 916 | flexcan_write(0, ®s->imask1); |
917 | flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, | ||
918 | ®s->ctrl); | ||
832 | 919 | ||
833 | /* Disable + halt module */ | 920 | flexcan_transceiver_disable(priv); |
834 | reg = flexcan_read(®s->mcr); | ||
835 | reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; | ||
836 | flexcan_write(reg, ®s->mcr); | ||
837 | |||
838 | if (priv->reg_xceiver) | ||
839 | regulator_disable(priv->reg_xceiver); | ||
840 | priv->can.state = CAN_STATE_STOPPED; | 921 | priv->can.state = CAN_STATE_STOPPED; |
841 | 922 | ||
842 | return; | 923 | return; |
@@ -866,7 +947,7 @@ static int flexcan_open(struct net_device *dev) | |||
866 | /* start chip and queuing */ | 947 | /* start chip and queuing */ |
867 | err = flexcan_chip_start(dev); | 948 | err = flexcan_chip_start(dev); |
868 | if (err) | 949 | if (err) |
869 | goto out_close; | 950 | goto out_free_irq; |
870 | 951 | ||
871 | can_led_event(dev, CAN_LED_EVENT_OPEN); | 952 | can_led_event(dev, CAN_LED_EVENT_OPEN); |
872 | 953 | ||
@@ -875,6 +956,8 @@ static int flexcan_open(struct net_device *dev) | |||
875 | 956 | ||
876 | return 0; | 957 | return 0; |
877 | 958 | ||
959 | out_free_irq: | ||
960 | free_irq(dev->irq, dev); | ||
878 | out_close: | 961 | out_close: |
879 | close_candev(dev); | 962 | close_candev(dev); |
880 | out_disable_per: | 963 | out_disable_per: |
@@ -945,12 +1028,16 @@ static int register_flexcandev(struct net_device *dev) | |||
945 | goto out_disable_ipg; | 1028 | goto out_disable_ipg; |
946 | 1029 | ||
947 | /* select "bus clock", chip must be disabled */ | 1030 | /* select "bus clock", chip must be disabled */ |
948 | flexcan_chip_disable(priv); | 1031 | err = flexcan_chip_disable(priv); |
1032 | if (err) | ||
1033 | goto out_disable_per; | ||
949 | reg = flexcan_read(®s->ctrl); | 1034 | reg = flexcan_read(®s->ctrl); |
950 | reg |= FLEXCAN_CTRL_CLK_SRC; | 1035 | reg |= FLEXCAN_CTRL_CLK_SRC; |
951 | flexcan_write(reg, ®s->ctrl); | 1036 | flexcan_write(reg, ®s->ctrl); |
952 | 1037 | ||
953 | flexcan_chip_enable(priv); | 1038 | err = flexcan_chip_enable(priv); |
1039 | if (err) | ||
1040 | goto out_chip_disable; | ||
954 | 1041 | ||
955 | /* set freeze, halt and activate FIFO, restrict register access */ | 1042 | /* set freeze, halt and activate FIFO, restrict register access */ |
956 | reg = flexcan_read(®s->mcr); | 1043 | reg = flexcan_read(®s->mcr); |
@@ -967,14 +1054,15 @@ static int register_flexcandev(struct net_device *dev) | |||
967 | if (!(reg & FLEXCAN_MCR_FEN)) { | 1054 | if (!(reg & FLEXCAN_MCR_FEN)) { |
968 | netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); | 1055 | netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); |
969 | err = -ENODEV; | 1056 | err = -ENODEV; |
970 | goto out_disable_per; | 1057 | goto out_chip_disable; |
971 | } | 1058 | } |
972 | 1059 | ||
973 | err = register_candev(dev); | 1060 | err = register_candev(dev); |
974 | 1061 | ||
975 | out_disable_per: | ||
976 | /* disable core and turn off clocks */ | 1062 | /* disable core and turn off clocks */ |
1063 | out_chip_disable: | ||
977 | flexcan_chip_disable(priv); | 1064 | flexcan_chip_disable(priv); |
1065 | out_disable_per: | ||
978 | clk_disable_unprepare(priv->clk_per); | 1066 | clk_disable_unprepare(priv->clk_per); |
979 | out_disable_ipg: | 1067 | out_disable_ipg: |
980 | clk_disable_unprepare(priv->clk_ipg); | 1068 | clk_disable_unprepare(priv->clk_ipg); |
@@ -1104,9 +1192,10 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1104 | static int flexcan_remove(struct platform_device *pdev) | 1192 | static int flexcan_remove(struct platform_device *pdev) |
1105 | { | 1193 | { |
1106 | struct net_device *dev = platform_get_drvdata(pdev); | 1194 | struct net_device *dev = platform_get_drvdata(pdev); |
1195 | struct flexcan_priv *priv = netdev_priv(dev); | ||
1107 | 1196 | ||
1108 | unregister_flexcandev(dev); | 1197 | unregister_flexcandev(dev); |
1109 | 1198 | netif_napi_del(&priv->napi); | |
1110 | free_candev(dev); | 1199 | free_candev(dev); |
1111 | 1200 | ||
1112 | return 0; | 1201 | return 0; |
@@ -1117,8 +1206,11 @@ static int flexcan_suspend(struct device *device) | |||
1117 | { | 1206 | { |
1118 | struct net_device *dev = dev_get_drvdata(device); | 1207 | struct net_device *dev = dev_get_drvdata(device); |
1119 | struct flexcan_priv *priv = netdev_priv(dev); | 1208 | struct flexcan_priv *priv = netdev_priv(dev); |
1209 | int err; | ||
1120 | 1210 | ||
1121 | flexcan_chip_disable(priv); | 1211 | err = flexcan_chip_disable(priv); |
1212 | if (err) | ||
1213 | return err; | ||
1122 | 1214 | ||
1123 | if (netif_running(dev)) { | 1215 | if (netif_running(dev)) { |
1124 | netif_stop_queue(dev); | 1216 | netif_stop_queue(dev); |
@@ -1139,9 +1231,7 @@ static int flexcan_resume(struct device *device) | |||
1139 | netif_device_attach(dev); | 1231 | netif_device_attach(dev); |
1140 | netif_start_queue(dev); | 1232 | netif_start_queue(dev); |
1141 | } | 1233 | } |
1142 | flexcan_chip_enable(priv); | 1234 | return flexcan_chip_enable(priv); |
1143 | |||
1144 | return 0; | ||
1145 | } | 1235 | } |
1146 | #endif /* CONFIG_PM_SLEEP */ | 1236 | #endif /* CONFIG_PM_SLEEP */ |
1147 | 1237 | ||
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 1f7b5aa114fa..8a7bf7dad898 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
@@ -1484,6 +1484,10 @@ static int b44_open(struct net_device *dev) | |||
1484 | add_timer(&bp->timer); | 1484 | add_timer(&bp->timer); |
1485 | 1485 | ||
1486 | b44_enable_ints(bp); | 1486 | b44_enable_ints(bp); |
1487 | |||
1488 | if (bp->flags & B44_FLAG_EXTERNAL_PHY) | ||
1489 | phy_start(bp->phydev); | ||
1490 | |||
1487 | netif_start_queue(dev); | 1491 | netif_start_queue(dev); |
1488 | out: | 1492 | out: |
1489 | return err; | 1493 | return err; |
@@ -1646,6 +1650,9 @@ static int b44_close(struct net_device *dev) | |||
1646 | 1650 | ||
1647 | netif_stop_queue(dev); | 1651 | netif_stop_queue(dev); |
1648 | 1652 | ||
1653 | if (bp->flags & B44_FLAG_EXTERNAL_PHY) | ||
1654 | phy_stop(bp->phydev); | ||
1655 | |||
1649 | napi_disable(&bp->napi); | 1656 | napi_disable(&bp->napi); |
1650 | 1657 | ||
1651 | del_timer_sync(&bp->timer); | 1658 | del_timer_sync(&bp->timer); |
@@ -2222,7 +2229,12 @@ static void b44_adjust_link(struct net_device *dev) | |||
2222 | } | 2229 | } |
2223 | 2230 | ||
2224 | if (status_changed) { | 2231 | if (status_changed) { |
2225 | b44_check_phy(bp); | 2232 | u32 val = br32(bp, B44_TX_CTRL); |
2233 | if (bp->flags & B44_FLAG_FULL_DUPLEX) | ||
2234 | val |= TX_CTRL_DUPLEX; | ||
2235 | else | ||
2236 | val &= ~TX_CTRL_DUPLEX; | ||
2237 | bw32(bp, B44_TX_CTRL, val); | ||
2226 | phy_print_status(phydev); | 2238 | phy_print_status(phydev); |
2227 | } | 2239 | } |
2228 | } | 2240 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 66c0df78c3ff..dbcff509dc3f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -3875,7 +3875,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3875 | xmit_type); | 3875 | xmit_type); |
3876 | } | 3876 | } |
3877 | 3877 | ||
3878 | /* Add the macs to the parsing BD this is a vf */ | 3878 | /* Add the macs to the parsing BD if this is a vf or if |
3879 | * Tx Switching is enabled. | ||
3880 | */ | ||
3879 | if (IS_VF(bp)) { | 3881 | if (IS_VF(bp)) { |
3880 | /* override GRE parameters in BD */ | 3882 | /* override GRE parameters in BD */ |
3881 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, | 3883 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, |
@@ -3887,6 +3889,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3887 | &pbd_e2->data.mac_addr.dst_mid, | 3889 | &pbd_e2->data.mac_addr.dst_mid, |
3888 | &pbd_e2->data.mac_addr.dst_lo, | 3890 | &pbd_e2->data.mac_addr.dst_lo, |
3889 | eth->h_dest); | 3891 | eth->h_dest); |
3892 | } else if (bp->flags & TX_SWITCHING) { | ||
3893 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, | ||
3894 | &pbd_e2->data.mac_addr.dst_mid, | ||
3895 | &pbd_e2->data.mac_addr.dst_lo, | ||
3896 | eth->h_dest); | ||
3890 | } | 3897 | } |
3891 | 3898 | ||
3892 | SET_FLAG(pbd_e2_parsing_data, | 3899 | SET_FLAG(pbd_e2_parsing_data, |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3167ed6593b0..3b6d0ba86c71 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -6843,8 +6843,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
6843 | 6843 | ||
6844 | work_mask |= opaque_key; | 6844 | work_mask |= opaque_key; |
6845 | 6845 | ||
6846 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 6846 | if (desc->err_vlan & RXD_ERR_MASK) { |
6847 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | ||
6848 | drop_it: | 6847 | drop_it: |
6849 | tg3_recycle_rx(tnapi, tpr, opaque_key, | 6848 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
6850 | desc_idx, *post_ptr); | 6849 | desc_idx, *post_ptr); |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index ef472385bce4..04321e5a356e 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
@@ -2608,7 +2608,11 @@ struct tg3_rx_buffer_desc { | |||
2608 | #define RXD_ERR_TOO_SMALL 0x00400000 | 2608 | #define RXD_ERR_TOO_SMALL 0x00400000 |
2609 | #define RXD_ERR_NO_RESOURCES 0x00800000 | 2609 | #define RXD_ERR_NO_RESOURCES 0x00800000 |
2610 | #define RXD_ERR_HUGE_FRAME 0x01000000 | 2610 | #define RXD_ERR_HUGE_FRAME 0x01000000 |
2611 | #define RXD_ERR_MASK 0xffff0000 | 2611 | |
2612 | #define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ | ||
2613 | RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ | ||
2614 | RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ | ||
2615 | RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) | ||
2612 | 2616 | ||
2613 | u32 reserved; | 2617 | u32 reserved; |
2614 | u32 opaque; | 2618 | u32 opaque; |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index cf64f3d0b60d..4ad1187e82fb 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -707,7 +707,8 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
707 | else | 707 | else |
708 | skb_checksum_none_assert(skb); | 708 | skb_checksum_none_assert(skb); |
709 | 709 | ||
710 | if (flags & BNA_CQ_EF_VLAN) | 710 | if ((flags & BNA_CQ_EF_VLAN) && |
711 | (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) | ||
711 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); | 712 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); |
712 | 713 | ||
713 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) | 714 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
@@ -2094,7 +2095,9 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | |||
2094 | rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; | 2095 | rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; |
2095 | } | 2096 | } |
2096 | 2097 | ||
2097 | rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; | 2098 | rx_config->vlan_strip_status = |
2099 | (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? | ||
2100 | BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; | ||
2098 | } | 2101 | } |
2099 | 2102 | ||
2100 | static void | 2103 | static void |
@@ -3245,11 +3248,6 @@ bnad_set_rx_mode(struct net_device *netdev) | |||
3245 | BNA_RXMODE_ALLMULTI; | 3248 | BNA_RXMODE_ALLMULTI; |
3246 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); | 3249 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); |
3247 | 3250 | ||
3248 | if (bnad->cfg_flags & BNAD_CF_PROMISC) | ||
3249 | bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); | ||
3250 | else | ||
3251 | bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); | ||
3252 | |||
3253 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 3251 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3254 | } | 3252 | } |
3255 | 3253 | ||
@@ -3374,6 +3372,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
3374 | return 0; | 3372 | return 0; |
3375 | } | 3373 | } |
3376 | 3374 | ||
3375 | static int bnad_set_features(struct net_device *dev, netdev_features_t features) | ||
3376 | { | ||
3377 | struct bnad *bnad = netdev_priv(dev); | ||
3378 | netdev_features_t changed = features ^ dev->features; | ||
3379 | |||
3380 | if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { | ||
3381 | unsigned long flags; | ||
3382 | |||
3383 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
3384 | |||
3385 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | ||
3386 | bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); | ||
3387 | else | ||
3388 | bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); | ||
3389 | |||
3390 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
3391 | } | ||
3392 | |||
3393 | return 0; | ||
3394 | } | ||
3395 | |||
3377 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3396 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3378 | static void | 3397 | static void |
3379 | bnad_netpoll(struct net_device *netdev) | 3398 | bnad_netpoll(struct net_device *netdev) |
@@ -3421,6 +3440,7 @@ static const struct net_device_ops bnad_netdev_ops = { | |||
3421 | .ndo_change_mtu = bnad_change_mtu, | 3440 | .ndo_change_mtu = bnad_change_mtu, |
3422 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, | 3441 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, |
3423 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, | 3442 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, |
3443 | .ndo_set_features = bnad_set_features, | ||
3424 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3444 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3425 | .ndo_poll_controller = bnad_netpoll | 3445 | .ndo_poll_controller = bnad_netpoll |
3426 | #endif | 3446 | #endif |
@@ -3433,14 +3453,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) | |||
3433 | 3453 | ||
3434 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | | 3454 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | |
3435 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 3455 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
3436 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; | 3456 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | |
3457 | NETIF_F_HW_VLAN_CTAG_RX; | ||
3437 | 3458 | ||
3438 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | | 3459 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | |
3439 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 3460 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
3440 | NETIF_F_TSO | NETIF_F_TSO6; | 3461 | NETIF_F_TSO | NETIF_F_TSO6; |
3441 | 3462 | ||
3442 | netdev->features |= netdev->hw_features | | 3463 | netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; |
3443 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; | ||
3444 | 3464 | ||
3445 | if (using_dac) | 3465 | if (using_dac) |
3446 | netdev->features |= NETIF_F_HIGHDMA; | 3466 | netdev->features |= NETIF_F_HIGHDMA; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 43ab35fea48d..34e2488767d9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -6179,6 +6179,7 @@ static struct pci_driver cxgb4_driver = { | |||
6179 | .id_table = cxgb4_pci_tbl, | 6179 | .id_table = cxgb4_pci_tbl, |
6180 | .probe = init_one, | 6180 | .probe = init_one, |
6181 | .remove = remove_one, | 6181 | .remove = remove_one, |
6182 | .shutdown = remove_one, | ||
6182 | .err_handler = &cxgb4_eeh, | 6183 | .err_handler = &cxgb4_eeh, |
6183 | }; | 6184 | }; |
6184 | 6185 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8d09615da585..05529e273050 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -350,11 +350,13 @@ struct be_drv_stats { | |||
350 | u32 roce_drops_crc; | 350 | u32 roce_drops_crc; |
351 | }; | 351 | }; |
352 | 352 | ||
353 | /* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */ | ||
354 | #define BE_RESET_VLAN_TAG_ID 0xFFFF | ||
355 | |||
353 | struct be_vf_cfg { | 356 | struct be_vf_cfg { |
354 | unsigned char mac_addr[ETH_ALEN]; | 357 | unsigned char mac_addr[ETH_ALEN]; |
355 | int if_handle; | 358 | int if_handle; |
356 | int pmac_id; | 359 | int pmac_id; |
357 | u16 def_vid; | ||
358 | u16 vlan_tag; | 360 | u16 vlan_tag; |
359 | u32 tx_rate; | 361 | u32 tx_rate; |
360 | }; | 362 | }; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 04ac9c6a0d39..36c80612e21a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -913,24 +913,14 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, | |||
913 | return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); | 913 | return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); |
914 | } | 914 | } |
915 | 915 | ||
916 | static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | 916 | static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, |
917 | struct sk_buff *skb, | 917 | struct sk_buff *skb, |
918 | bool *skip_hw_vlan) | 918 | bool *skip_hw_vlan) |
919 | { | 919 | { |
920 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 920 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
921 | unsigned int eth_hdr_len; | 921 | unsigned int eth_hdr_len; |
922 | struct iphdr *ip; | 922 | struct iphdr *ip; |
923 | 923 | ||
924 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less | ||
925 | * may cause a transmit stall on that port. So the work-around is to | ||
926 | * pad short packets (<= 32 bytes) to a 36-byte length. | ||
927 | */ | ||
928 | if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { | ||
929 | if (skb_padto(skb, 36)) | ||
930 | goto tx_drop; | ||
931 | skb->len = 36; | ||
932 | } | ||
933 | |||
934 | /* For padded packets, BE HW modifies tot_len field in IP header | 924 | /* For padded packets, BE HW modifies tot_len field in IP header |
935 | * incorrecly when VLAN tag is inserted by HW. | 925 | * incorrecly when VLAN tag is inserted by HW. |
936 | * For padded packets, Lancer computes incorrect checksum. | 926 | * For padded packets, Lancer computes incorrect checksum. |
@@ -959,7 +949,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
959 | vlan_tx_tag_present(skb)) { | 949 | vlan_tx_tag_present(skb)) { |
960 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); | 950 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); |
961 | if (unlikely(!skb)) | 951 | if (unlikely(!skb)) |
962 | goto tx_drop; | 952 | goto err; |
963 | } | 953 | } |
964 | 954 | ||
965 | /* HW may lockup when VLAN HW tagging is requested on | 955 | /* HW may lockup when VLAN HW tagging is requested on |
@@ -981,15 +971,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
981 | be_vlan_tag_tx_chk(adapter, skb)) { | 971 | be_vlan_tag_tx_chk(adapter, skb)) { |
982 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); | 972 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); |
983 | if (unlikely(!skb)) | 973 | if (unlikely(!skb)) |
984 | goto tx_drop; | 974 | goto err; |
985 | } | 975 | } |
986 | 976 | ||
987 | return skb; | 977 | return skb; |
988 | tx_drop: | 978 | tx_drop: |
989 | dev_kfree_skb_any(skb); | 979 | dev_kfree_skb_any(skb); |
980 | err: | ||
990 | return NULL; | 981 | return NULL; |
991 | } | 982 | } |
992 | 983 | ||
984 | static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | ||
985 | struct sk_buff *skb, | ||
986 | bool *skip_hw_vlan) | ||
987 | { | ||
988 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or | ||
989 | * less may cause a transmit stall on that port. So the work-around is | ||
990 | * to pad short packets (<= 32 bytes) to a 36-byte length. | ||
991 | */ | ||
992 | if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { | ||
993 | if (skb_padto(skb, 36)) | ||
994 | return NULL; | ||
995 | skb->len = 36; | ||
996 | } | ||
997 | |||
998 | if (BEx_chip(adapter) || lancer_chip(adapter)) { | ||
999 | skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); | ||
1000 | if (!skb) | ||
1001 | return NULL; | ||
1002 | } | ||
1003 | |||
1004 | return skb; | ||
1005 | } | ||
1006 | |||
993 | static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) | 1007 | static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) |
994 | { | 1008 | { |
995 | struct be_adapter *adapter = netdev_priv(netdev); | 1009 | struct be_adapter *adapter = netdev_priv(netdev); |
@@ -1157,6 +1171,14 @@ ret: | |||
1157 | return status; | 1171 | return status; |
1158 | } | 1172 | } |
1159 | 1173 | ||
1174 | static void be_clear_promisc(struct be_adapter *adapter) | ||
1175 | { | ||
1176 | adapter->promiscuous = false; | ||
1177 | adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; | ||
1178 | |||
1179 | be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); | ||
1180 | } | ||
1181 | |||
1160 | static void be_set_rx_mode(struct net_device *netdev) | 1182 | static void be_set_rx_mode(struct net_device *netdev) |
1161 | { | 1183 | { |
1162 | struct be_adapter *adapter = netdev_priv(netdev); | 1184 | struct be_adapter *adapter = netdev_priv(netdev); |
@@ -1170,9 +1192,7 @@ static void be_set_rx_mode(struct net_device *netdev) | |||
1170 | 1192 | ||
1171 | /* BE was previously in promiscuous mode; disable it */ | 1193 | /* BE was previously in promiscuous mode; disable it */ |
1172 | if (adapter->promiscuous) { | 1194 | if (adapter->promiscuous) { |
1173 | adapter->promiscuous = false; | 1195 | be_clear_promisc(adapter); |
1174 | be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); | ||
1175 | |||
1176 | if (adapter->vlans_added) | 1196 | if (adapter->vlans_added) |
1177 | be_vid_config(adapter); | 1197 | be_vid_config(adapter); |
1178 | } | 1198 | } |
@@ -1287,24 +1307,20 @@ static int be_set_vf_vlan(struct net_device *netdev, | |||
1287 | 1307 | ||
1288 | if (vlan || qos) { | 1308 | if (vlan || qos) { |
1289 | vlan |= qos << VLAN_PRIO_SHIFT; | 1309 | vlan |= qos << VLAN_PRIO_SHIFT; |
1290 | if (vf_cfg->vlan_tag != vlan) { | 1310 | if (vf_cfg->vlan_tag != vlan) |
1291 | /* If this is new value, program it. Else skip. */ | ||
1292 | vf_cfg->vlan_tag = vlan; | ||
1293 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | 1311 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, |
1294 | vf_cfg->if_handle, 0); | 1312 | vf_cfg->if_handle, 0); |
1295 | } | ||
1296 | } else { | 1313 | } else { |
1297 | /* Reset Transparent Vlan Tagging. */ | 1314 | /* Reset Transparent Vlan Tagging. */ |
1298 | vf_cfg->vlan_tag = 0; | 1315 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, |
1299 | vlan = vf_cfg->def_vid; | 1316 | vf + 1, vf_cfg->if_handle, 0); |
1300 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | ||
1301 | vf_cfg->if_handle, 0); | ||
1302 | } | 1317 | } |
1303 | 1318 | ||
1304 | 1319 | if (!status) | |
1305 | if (status) | 1320 | vf_cfg->vlan_tag = vlan; |
1321 | else | ||
1306 | dev_info(&adapter->pdev->dev, | 1322 | dev_info(&adapter->pdev->dev, |
1307 | "VLAN %d config on VF %d failed\n", vlan, vf); | 1323 | "VLAN %d config on VF %d failed\n", vlan, vf); |
1308 | return status; | 1324 | return status; |
1309 | } | 1325 | } |
1310 | 1326 | ||
@@ -3013,11 +3029,11 @@ static int be_vf_setup_init(struct be_adapter *adapter) | |||
3013 | 3029 | ||
3014 | static int be_vf_setup(struct be_adapter *adapter) | 3030 | static int be_vf_setup(struct be_adapter *adapter) |
3015 | { | 3031 | { |
3032 | struct device *dev = &adapter->pdev->dev; | ||
3016 | struct be_vf_cfg *vf_cfg; | 3033 | struct be_vf_cfg *vf_cfg; |
3017 | u16 def_vlan, lnk_speed; | ||
3018 | int status, old_vfs, vf; | 3034 | int status, old_vfs, vf; |
3019 | struct device *dev = &adapter->pdev->dev; | ||
3020 | u32 privileges; | 3035 | u32 privileges; |
3036 | u16 lnk_speed; | ||
3021 | 3037 | ||
3022 | old_vfs = pci_num_vf(adapter->pdev); | 3038 | old_vfs = pci_num_vf(adapter->pdev); |
3023 | if (old_vfs) { | 3039 | if (old_vfs) { |
@@ -3084,12 +3100,6 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
3084 | if (!status) | 3100 | if (!status) |
3085 | vf_cfg->tx_rate = lnk_speed; | 3101 | vf_cfg->tx_rate = lnk_speed; |
3086 | 3102 | ||
3087 | status = be_cmd_get_hsw_config(adapter, &def_vlan, | ||
3088 | vf + 1, vf_cfg->if_handle, NULL); | ||
3089 | if (status) | ||
3090 | goto err; | ||
3091 | vf_cfg->def_vid = def_vlan; | ||
3092 | |||
3093 | if (!old_vfs) | 3103 | if (!old_vfs) |
3094 | be_cmd_enable_vf(adapter, vf + 1); | 3104 | be_cmd_enable_vf(adapter, vf + 1); |
3095 | } | 3105 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 903362a7b584..479a7cba45c0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -389,12 +389,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
389 | netdev_err(ndev, "Tx DMA memory map failed\n"); | 389 | netdev_err(ndev, "Tx DMA memory map failed\n"); |
390 | return NETDEV_TX_OK; | 390 | return NETDEV_TX_OK; |
391 | } | 391 | } |
392 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | ||
393 | * it's the last BD of the frame, and to put the CRC on the end. | ||
394 | */ | ||
395 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | ||
396 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
397 | bdp->cbd_sc = status; | ||
398 | 392 | ||
399 | if (fep->bufdesc_ex) { | 393 | if (fep->bufdesc_ex) { |
400 | 394 | ||
@@ -416,6 +410,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
416 | } | 410 | } |
417 | } | 411 | } |
418 | 412 | ||
413 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | ||
414 | * it's the last BD of the frame, and to put the CRC on the end. | ||
415 | */ | ||
416 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | ||
417 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
418 | bdp->cbd_sc = status; | ||
419 | |||
419 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); | 420 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); |
420 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | 421 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && |
421 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | 422 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 6b65f7795215..7aec6c833973 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -51,8 +51,8 @@ | |||
51 | 51 | ||
52 | #define DRV_NAME "mlx4_core" | 52 | #define DRV_NAME "mlx4_core" |
53 | #define PFX DRV_NAME ": " | 53 | #define PFX DRV_NAME ": " |
54 | #define DRV_VERSION "1.1" | 54 | #define DRV_VERSION "2.2-1" |
55 | #define DRV_RELDATE "Dec, 2011" | 55 | #define DRV_RELDATE "Feb, 2014" |
56 | 56 | ||
57 | #define MLX4_FS_UDP_UC_EN (1 << 1) | 57 | #define MLX4_FS_UDP_UC_EN (1 << 1) |
58 | #define MLX4_FS_TCP_UC_EN (1 << 2) | 58 | #define MLX4_FS_TCP_UC_EN (1 << 2) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9ca223bc90fc..b57e8c87a34e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -57,8 +57,8 @@ | |||
57 | #include "en_port.h" | 57 | #include "en_port.h" |
58 | 58 | ||
59 | #define DRV_NAME "mlx4_en" | 59 | #define DRV_NAME "mlx4_en" |
60 | #define DRV_VERSION "2.0" | 60 | #define DRV_VERSION "2.2-1" |
61 | #define DRV_RELDATE "Dec 2011" | 61 | #define DRV_RELDATE "Feb 2014" |
62 | 62 | ||
63 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | 63 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) |
64 | 64 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a064f06e0cb8..23b7e2d35a93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -46,8 +46,8 @@ | |||
46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
47 | 47 | ||
48 | #define DRIVER_NAME "mlx5_core" | 48 | #define DRIVER_NAME "mlx5_core" |
49 | #define DRIVER_VERSION "1.0" | 49 | #define DRIVER_VERSION "2.2-1" |
50 | #define DRIVER_RELDATE "June 2013" | 50 | #define DRIVER_RELDATE "Feb 2014" |
51 | 51 | ||
52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); |
53 | MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); | 53 | MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 4146664d4d6a..27c4f131863b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -340,6 +340,7 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter) | |||
340 | if (qlcnic_sriov_vf_check(adapter)) | 340 | if (qlcnic_sriov_vf_check(adapter)) |
341 | return -EINVAL; | 341 | return -EINVAL; |
342 | num_msix = 1; | 342 | num_msix = 1; |
343 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | ||
343 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; | 344 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; |
344 | } | 345 | } |
345 | } | 346 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 77f1bce432d2..7d4f54912bad 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c | |||
@@ -807,7 +807,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, | |||
807 | !type->tc_param_valid) | 807 | !type->tc_param_valid) |
808 | return; | 808 | return; |
809 | 809 | ||
810 | if (tc < 0 || (tc > QLC_DCB_MAX_TC)) | 810 | if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) |
811 | return; | 811 | return; |
812 | 812 | ||
813 | tc_cfg = &type->tc_cfg[tc]; | 813 | tc_cfg = &type->tc_cfg[tc]; |
@@ -843,7 +843,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, | |||
843 | !type->tc_param_valid) | 843 | !type->tc_param_valid) |
844 | return; | 844 | return; |
845 | 845 | ||
846 | if (pgid < 0 || pgid > QLC_DCB_MAX_PG) | 846 | if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) |
847 | return; | 847 | return; |
848 | 848 | ||
849 | pgcfg = &type->pg_cfg[pgid]; | 849 | pgcfg = &type->pg_cfg[pgid]; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ba78c7481fa3..1222865cfb73 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -816,9 +816,10 @@ static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter) | |||
816 | 816 | ||
817 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | 817 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { |
818 | qlcnic_disable_multi_tx(adapter); | 818 | qlcnic_disable_multi_tx(adapter); |
819 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | ||
819 | 820 | ||
820 | err = qlcnic_enable_msi_legacy(adapter); | 821 | err = qlcnic_enable_msi_legacy(adapter); |
821 | if (!err) | 822 | if (err) |
822 | return err; | 823 | return err; |
823 | } | 824 | } |
824 | } | 825 | } |
@@ -3863,7 +3864,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, | |||
3863 | strcpy(buf, "Tx"); | 3864 | strcpy(buf, "Tx"); |
3864 | } | 3865 | } |
3865 | 3866 | ||
3866 | if (!qlcnic_use_msi_x && !qlcnic_use_msi) { | 3867 | if (!QLCNIC_IS_MSI_FAMILY(adapter)) { |
3867 | netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); | 3868 | netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); |
3868 | return -EINVAL; | 3869 | return -EINVAL; |
3869 | } | 3870 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 09acf15c3a56..e5277a632671 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
@@ -13,8 +13,6 @@ | |||
13 | #define QLC_VF_MIN_TX_RATE 100 | 13 | #define QLC_VF_MIN_TX_RATE 100 |
14 | #define QLC_VF_MAX_TX_RATE 9999 | 14 | #define QLC_VF_MAX_TX_RATE 9999 |
15 | #define QLC_MAC_OPCODE_MASK 0x7 | 15 | #define QLC_MAC_OPCODE_MASK 0x7 |
16 | #define QLC_MAC_STAR_ADD 6 | ||
17 | #define QLC_MAC_STAR_DEL 7 | ||
18 | #define QLC_VF_FLOOD_BIT BIT_16 | 16 | #define QLC_VF_FLOOD_BIT BIT_16 |
19 | #define QLC_FLOOD_MODE 0x5 | 17 | #define QLC_FLOOD_MODE 0x5 |
20 | 18 | ||
@@ -1206,13 +1204,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, | |||
1206 | struct qlcnic_vport *vp = vf->vp; | 1204 | struct qlcnic_vport *vp = vf->vp; |
1207 | u8 op, new_op; | 1205 | u8 op, new_op; |
1208 | 1206 | ||
1209 | if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) || | ||
1210 | ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) { | ||
1211 | netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n", | ||
1212 | vf->pci_func); | ||
1213 | return -EINVAL; | ||
1214 | } | ||
1215 | |||
1216 | if (!(cmd->req.arg[1] & BIT_8)) | 1207 | if (!(cmd->req.arg[1] & BIT_8)) |
1217 | return -EINVAL; | 1208 | return -EINVAL; |
1218 | 1209 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91a67ae8f17b..e9779653cd4c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -7118,6 +7118,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7118 | } | 7118 | } |
7119 | 7119 | ||
7120 | mutex_init(&tp->wk.mutex); | 7120 | mutex_init(&tp->wk.mutex); |
7121 | u64_stats_init(&tp->rx_stats.syncp); | ||
7122 | u64_stats_init(&tp->tx_stats.syncp); | ||
7121 | 7123 | ||
7122 | /* Get MAC address */ | 7124 | /* Get MAC address */ |
7123 | for (i = 0; i < ETH_ALEN; i++) | 7125 | for (i = 0; i < ETH_ALEN; i++) |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index eb75fbd11a01..d7a36829649a 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -1668,6 +1668,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) | |||
1668 | struct efx_ptp_data *ptp = efx->ptp_data; | 1668 | struct efx_ptp_data *ptp = efx->ptp_data; |
1669 | int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); | 1669 | int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); |
1670 | 1670 | ||
1671 | if (!ptp) { | ||
1672 | if (net_ratelimit()) | ||
1673 | netif_warn(efx, drv, efx->net_dev, | ||
1674 | "Received PTP event but PTP not set up\n"); | ||
1675 | return; | ||
1676 | } | ||
1677 | |||
1671 | if (!ptp->enabled) | 1678 | if (!ptp->enabled) |
1672 | return; | 1679 | return; |
1673 | 1680 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a2e7d2c96e36..078ad0ec8593 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1705,7 +1705,7 @@ static int stmmac_open(struct net_device *dev) | |||
1705 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1705 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
1706 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1706 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
1707 | 1707 | ||
1708 | alloc_dma_desc_resources(priv); | 1708 | ret = alloc_dma_desc_resources(priv); |
1709 | if (ret < 0) { | 1709 | if (ret < 0) { |
1710 | pr_err("%s: DMA descriptors allocation failed\n", __func__); | 1710 | pr_err("%s: DMA descriptors allocation failed\n", __func__); |
1711 | goto dma_desc_error; | 1711 | goto dma_desc_error; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 651087b5c8da..ffd4d12acf6d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -1164,11 +1164,17 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) | |||
1164 | 1164 | ||
1165 | static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) | 1165 | static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) |
1166 | { | 1166 | { |
1167 | u32 slave_port; | ||
1168 | |||
1169 | slave_port = cpsw_get_slave_port(priv, slave->slave_num); | ||
1170 | |||
1167 | if (!slave->phy) | 1171 | if (!slave->phy) |
1168 | return; | 1172 | return; |
1169 | phy_stop(slave->phy); | 1173 | phy_stop(slave->phy); |
1170 | phy_disconnect(slave->phy); | 1174 | phy_disconnect(slave->phy); |
1171 | slave->phy = NULL; | 1175 | slave->phy = NULL; |
1176 | cpsw_ale_control_set(priv->ale, slave_port, | ||
1177 | ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); | ||
1172 | } | 1178 | } |
1173 | 1179 | ||
1174 | static int cpsw_ndo_open(struct net_device *ndev) | 1180 | static int cpsw_ndo_open(struct net_device *ndev) |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a5d21893670d..1831fb7cd017 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -506,6 +506,9 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) | |||
506 | static struct lock_class_key macvlan_netdev_xmit_lock_key; | 506 | static struct lock_class_key macvlan_netdev_xmit_lock_key; |
507 | static struct lock_class_key macvlan_netdev_addr_lock_key; | 507 | static struct lock_class_key macvlan_netdev_addr_lock_key; |
508 | 508 | ||
509 | #define ALWAYS_ON_FEATURES \ | ||
510 | (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) | ||
511 | |||
509 | #define MACVLAN_FEATURES \ | 512 | #define MACVLAN_FEATURES \ |
510 | (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ | 513 | (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ |
511 | NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ | 514 | NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ |
@@ -539,7 +542,7 @@ static int macvlan_init(struct net_device *dev) | |||
539 | dev->state = (dev->state & ~MACVLAN_STATE_MASK) | | 542 | dev->state = (dev->state & ~MACVLAN_STATE_MASK) | |
540 | (lowerdev->state & MACVLAN_STATE_MASK); | 543 | (lowerdev->state & MACVLAN_STATE_MASK); |
541 | dev->features = lowerdev->features & MACVLAN_FEATURES; | 544 | dev->features = lowerdev->features & MACVLAN_FEATURES; |
542 | dev->features |= NETIF_F_LLTX; | 545 | dev->features |= ALWAYS_ON_FEATURES; |
543 | dev->gso_max_size = lowerdev->gso_max_size; | 546 | dev->gso_max_size = lowerdev->gso_max_size; |
544 | dev->iflink = lowerdev->ifindex; | 547 | dev->iflink = lowerdev->ifindex; |
545 | dev->hard_header_len = lowerdev->hard_header_len; | 548 | dev->hard_header_len = lowerdev->hard_header_len; |
@@ -699,7 +702,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
699 | features = netdev_increment_features(vlan->lowerdev->features, | 702 | features = netdev_increment_features(vlan->lowerdev->features, |
700 | features, | 703 | features, |
701 | mask); | 704 | mask); |
702 | features |= NETIF_F_LLTX; | 705 | features |= ALWAYS_ON_FEATURES; |
703 | 706 | ||
704 | return features; | 707 | return features; |
705 | } | 708 | } |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 82514e72b3d8..4b970f7624c0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -916,6 +916,8 @@ int genphy_read_status(struct phy_device *phydev) | |||
916 | int err; | 916 | int err; |
917 | int lpa; | 917 | int lpa; |
918 | int lpagb = 0; | 918 | int lpagb = 0; |
919 | int common_adv; | ||
920 | int common_adv_gb = 0; | ||
919 | 921 | ||
920 | /* Update the link, but return if there was an error */ | 922 | /* Update the link, but return if there was an error */ |
921 | err = genphy_update_link(phydev); | 923 | err = genphy_update_link(phydev); |
@@ -937,7 +939,7 @@ int genphy_read_status(struct phy_device *phydev) | |||
937 | 939 | ||
938 | phydev->lp_advertising = | 940 | phydev->lp_advertising = |
939 | mii_stat1000_to_ethtool_lpa_t(lpagb); | 941 | mii_stat1000_to_ethtool_lpa_t(lpagb); |
940 | lpagb &= adv << 2; | 942 | common_adv_gb = lpagb & adv << 2; |
941 | } | 943 | } |
942 | 944 | ||
943 | lpa = phy_read(phydev, MII_LPA); | 945 | lpa = phy_read(phydev, MII_LPA); |
@@ -950,25 +952,25 @@ int genphy_read_status(struct phy_device *phydev) | |||
950 | if (adv < 0) | 952 | if (adv < 0) |
951 | return adv; | 953 | return adv; |
952 | 954 | ||
953 | lpa &= adv; | 955 | common_adv = lpa & adv; |
954 | 956 | ||
955 | phydev->speed = SPEED_10; | 957 | phydev->speed = SPEED_10; |
956 | phydev->duplex = DUPLEX_HALF; | 958 | phydev->duplex = DUPLEX_HALF; |
957 | phydev->pause = 0; | 959 | phydev->pause = 0; |
958 | phydev->asym_pause = 0; | 960 | phydev->asym_pause = 0; |
959 | 961 | ||
960 | if (lpagb & (LPA_1000FULL | LPA_1000HALF)) { | 962 | if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) { |
961 | phydev->speed = SPEED_1000; | 963 | phydev->speed = SPEED_1000; |
962 | 964 | ||
963 | if (lpagb & LPA_1000FULL) | 965 | if (common_adv_gb & LPA_1000FULL) |
964 | phydev->duplex = DUPLEX_FULL; | 966 | phydev->duplex = DUPLEX_FULL; |
965 | } else if (lpa & (LPA_100FULL | LPA_100HALF)) { | 967 | } else if (common_adv & (LPA_100FULL | LPA_100HALF)) { |
966 | phydev->speed = SPEED_100; | 968 | phydev->speed = SPEED_100; |
967 | 969 | ||
968 | if (lpa & LPA_100FULL) | 970 | if (common_adv & LPA_100FULL) |
969 | phydev->duplex = DUPLEX_FULL; | 971 | phydev->duplex = DUPLEX_FULL; |
970 | } else | 972 | } else |
971 | if (lpa & LPA_10FULL) | 973 | if (common_adv & LPA_10FULL) |
972 | phydev->duplex = DUPLEX_FULL; | 974 | phydev->duplex = DUPLEX_FULL; |
973 | 975 | ||
974 | if (phydev->duplex == DUPLEX_FULL) { | 976 | if (phydev->duplex == DUPLEX_FULL) { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8fe9cb7d0f72..26f8635b027d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1686,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1686 | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | | 1686 | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | |
1687 | NETIF_F_HW_VLAN_STAG_TX; | 1687 | NETIF_F_HW_VLAN_STAG_TX; |
1688 | dev->features = dev->hw_features; | 1688 | dev->features = dev->hw_features; |
1689 | dev->vlan_features = dev->features; | 1689 | dev->vlan_features = dev->features & |
1690 | ~(NETIF_F_HW_VLAN_CTAG_TX | | ||
1691 | NETIF_F_HW_VLAN_STAG_TX); | ||
1690 | 1692 | ||
1691 | INIT_LIST_HEAD(&tun->disabled); | 1693 | INIT_LIST_HEAD(&tun->disabled); |
1692 | err = tun_attach(tun, file, false); | 1694 | err = tun_attach(tun, file, false); |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 955df81a4358..d2e6fdb25e28 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
@@ -1395,6 +1395,19 @@ static const struct driver_info ax88178a_info = { | |||
1395 | .tx_fixup = ax88179_tx_fixup, | 1395 | .tx_fixup = ax88179_tx_fixup, |
1396 | }; | 1396 | }; |
1397 | 1397 | ||
1398 | static const struct driver_info dlink_dub1312_info = { | ||
1399 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", | ||
1400 | .bind = ax88179_bind, | ||
1401 | .unbind = ax88179_unbind, | ||
1402 | .status = ax88179_status, | ||
1403 | .link_reset = ax88179_link_reset, | ||
1404 | .reset = ax88179_reset, | ||
1405 | .stop = ax88179_stop, | ||
1406 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
1407 | .rx_fixup = ax88179_rx_fixup, | ||
1408 | .tx_fixup = ax88179_tx_fixup, | ||
1409 | }; | ||
1410 | |||
1398 | static const struct driver_info sitecom_info = { | 1411 | static const struct driver_info sitecom_info = { |
1399 | .description = "Sitecom USB 3.0 to Gigabit Adapter", | 1412 | .description = "Sitecom USB 3.0 to Gigabit Adapter", |
1400 | .bind = ax88179_bind, | 1413 | .bind = ax88179_bind, |
@@ -1421,6 +1434,19 @@ static const struct driver_info samsung_info = { | |||
1421 | .tx_fixup = ax88179_tx_fixup, | 1434 | .tx_fixup = ax88179_tx_fixup, |
1422 | }; | 1435 | }; |
1423 | 1436 | ||
1437 | static const struct driver_info lenovo_info = { | ||
1438 | .description = "Lenovo OneLinkDock Gigabit LAN", | ||
1439 | .bind = ax88179_bind, | ||
1440 | .unbind = ax88179_unbind, | ||
1441 | .status = ax88179_status, | ||
1442 | .link_reset = ax88179_link_reset, | ||
1443 | .reset = ax88179_reset, | ||
1444 | .stop = ax88179_stop, | ||
1445 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
1446 | .rx_fixup = ax88179_rx_fixup, | ||
1447 | .tx_fixup = ax88179_tx_fixup, | ||
1448 | }; | ||
1449 | |||
1424 | static const struct usb_device_id products[] = { | 1450 | static const struct usb_device_id products[] = { |
1425 | { | 1451 | { |
1426 | /* ASIX AX88179 10/100/1000 */ | 1452 | /* ASIX AX88179 10/100/1000 */ |
@@ -1431,6 +1457,10 @@ static const struct usb_device_id products[] = { | |||
1431 | USB_DEVICE(0x0b95, 0x178a), | 1457 | USB_DEVICE(0x0b95, 0x178a), |
1432 | .driver_info = (unsigned long)&ax88178a_info, | 1458 | .driver_info = (unsigned long)&ax88178a_info, |
1433 | }, { | 1459 | }, { |
1460 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ | ||
1461 | USB_DEVICE(0x2001, 0x4a00), | ||
1462 | .driver_info = (unsigned long)&dlink_dub1312_info, | ||
1463 | }, { | ||
1434 | /* Sitecom USB 3.0 to Gigabit Adapter */ | 1464 | /* Sitecom USB 3.0 to Gigabit Adapter */ |
1435 | USB_DEVICE(0x0df6, 0x0072), | 1465 | USB_DEVICE(0x0df6, 0x0072), |
1436 | .driver_info = (unsigned long)&sitecom_info, | 1466 | .driver_info = (unsigned long)&sitecom_info, |
@@ -1438,6 +1468,10 @@ static const struct usb_device_id products[] = { | |||
1438 | /* Samsung USB Ethernet Adapter */ | 1468 | /* Samsung USB Ethernet Adapter */ |
1439 | USB_DEVICE(0x04e8, 0xa100), | 1469 | USB_DEVICE(0x04e8, 0xa100), |
1440 | .driver_info = (unsigned long)&samsung_info, | 1470 | .driver_info = (unsigned long)&samsung_info, |
1471 | }, { | ||
1472 | /* Lenovo OneLinkDock Gigabit LAN */ | ||
1473 | USB_DEVICE(0x17ef, 0x304b), | ||
1474 | .driver_info = (unsigned long)&lenovo_info, | ||
1441 | }, | 1475 | }, |
1442 | { }, | 1476 | { }, |
1443 | }; | 1477 | }; |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 2ec2041b62d4..5b374370f71c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -285,7 +285,8 @@ static void veth_setup(struct net_device *dev) | |||
285 | dev->ethtool_ops = &veth_ethtool_ops; | 285 | dev->ethtool_ops = &veth_ethtool_ops; |
286 | dev->features |= NETIF_F_LLTX; | 286 | dev->features |= NETIF_F_LLTX; |
287 | dev->features |= VETH_FEATURES; | 287 | dev->features |= VETH_FEATURES; |
288 | dev->vlan_features = dev->features; | 288 | dev->vlan_features = dev->features & |
289 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); | ||
289 | dev->destructor = veth_dev_free; | 290 | dev->destructor = veth_dev_free; |
290 | 291 | ||
291 | dev->hw_features = VETH_FEATURES; | 292 | dev->hw_features = VETH_FEATURES; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d75f8edf4fb3..5632a99cbbd2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1711,7 +1711,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1711 | /* If we can receive ANY GSO packets, we must allocate large ones. */ | 1711 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
1712 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || | 1712 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1713 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | 1713 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
1714 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) | 1714 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
1715 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | ||
1715 | vi->big_packets = true; | 1716 | vi->big_packets = true; |
1716 | 1717 | ||
1717 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) | 1718 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 1cc13569b17b..1b6b4d0cfa97 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h | |||
@@ -57,7 +57,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { | |||
57 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, | 57 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, |
58 | {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 58 | {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
59 | {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, | 59 | {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, |
60 | {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, | 60 | {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, |
61 | {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, | 61 | {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, |
62 | {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, | 62 | {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, |
63 | {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, | 63 | {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, |
@@ -96,7 +96,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { | |||
96 | {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, | 96 | {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, |
97 | {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 97 | {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
98 | {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, | 98 | {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, |
99 | {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, | 99 | {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, |
100 | {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, | 100 | {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, |
101 | }; | 101 | }; |
102 | 102 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 11eab9f01fd8..303ce27964c1 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -1534,7 +1534,7 @@ EXPORT_SYMBOL(ath9k_hw_check_nav); | |||
1534 | bool ath9k_hw_check_alive(struct ath_hw *ah) | 1534 | bool ath9k_hw_check_alive(struct ath_hw *ah) |
1535 | { | 1535 | { |
1536 | int count = 50; | 1536 | int count = 50; |
1537 | u32 reg; | 1537 | u32 reg, last_val; |
1538 | 1538 | ||
1539 | if (AR_SREV_9300(ah)) | 1539 | if (AR_SREV_9300(ah)) |
1540 | return !ath9k_hw_detect_mac_hang(ah); | 1540 | return !ath9k_hw_detect_mac_hang(ah); |
@@ -1542,9 +1542,13 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
1542 | if (AR_SREV_9285_12_OR_LATER(ah)) | 1542 | if (AR_SREV_9285_12_OR_LATER(ah)) |
1543 | return true; | 1543 | return true; |
1544 | 1544 | ||
1545 | last_val = REG_READ(ah, AR_OBS_BUS_1); | ||
1545 | do { | 1546 | do { |
1546 | reg = REG_READ(ah, AR_OBS_BUS_1); | 1547 | reg = REG_READ(ah, AR_OBS_BUS_1); |
1548 | if (reg != last_val) | ||
1549 | return true; | ||
1547 | 1550 | ||
1551 | last_val = reg; | ||
1548 | if ((reg & 0x7E7FFFEF) == 0x00702400) | 1552 | if ((reg & 0x7E7FFFEF) == 0x00702400) |
1549 | continue; | 1553 | continue; |
1550 | 1554 | ||
@@ -1556,6 +1560,8 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
1556 | default: | 1560 | default: |
1557 | return true; | 1561 | return true; |
1558 | } | 1562 | } |
1563 | |||
1564 | udelay(1); | ||
1559 | } while (count-- > 0); | 1565 | } while (count-- > 0); |
1560 | 1566 | ||
1561 | return false; | 1567 | return false; |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index a0ebdd000fc2..82e340d3ec60 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -732,11 +732,18 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, | |||
732 | return NULL; | 732 | return NULL; |
733 | 733 | ||
734 | /* | 734 | /* |
735 | * mark descriptor as zero-length and set the 'more' | 735 | * Re-check previous descriptor, in case it has been filled |
736 | * flag to ensure that both buffers get discarded | 736 | * in the mean time. |
737 | */ | 737 | */ |
738 | rs->rs_datalen = 0; | 738 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
739 | rs->rs_more = true; | 739 | if (ret == -EINPROGRESS) { |
740 | /* | ||
741 | * mark descriptor as zero-length and set the 'more' | ||
742 | * flag to ensure that both buffers get discarded | ||
743 | */ | ||
744 | rs->rs_datalen = 0; | ||
745 | rs->rs_more = true; | ||
746 | } | ||
740 | } | 747 | } |
741 | 748 | ||
742 | list_del(&bf->list); | 749 | list_del(&bf->list); |
@@ -985,32 +992,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
985 | struct ath_common *common = ath9k_hw_common(ah); | 992 | struct ath_common *common = ath9k_hw_common(ah); |
986 | struct ieee80211_hdr *hdr; | 993 | struct ieee80211_hdr *hdr; |
987 | bool discard_current = sc->rx.discard_next; | 994 | bool discard_current = sc->rx.discard_next; |
988 | int ret = 0; | ||
989 | 995 | ||
990 | /* | 996 | /* |
991 | * Discard corrupt descriptors which are marked in | 997 | * Discard corrupt descriptors which are marked in |
992 | * ath_get_next_rx_buf(). | 998 | * ath_get_next_rx_buf(). |
993 | */ | 999 | */ |
994 | sc->rx.discard_next = rx_stats->rs_more; | ||
995 | if (discard_current) | 1000 | if (discard_current) |
996 | return -EINVAL; | 1001 | goto corrupt; |
1002 | |||
1003 | sc->rx.discard_next = false; | ||
997 | 1004 | ||
998 | /* | 1005 | /* |
999 | * Discard zero-length packets. | 1006 | * Discard zero-length packets. |
1000 | */ | 1007 | */ |
1001 | if (!rx_stats->rs_datalen) { | 1008 | if (!rx_stats->rs_datalen) { |
1002 | RX_STAT_INC(rx_len_err); | 1009 | RX_STAT_INC(rx_len_err); |
1003 | return -EINVAL; | 1010 | goto corrupt; |
1004 | } | 1011 | } |
1005 | 1012 | ||
1006 | /* | 1013 | /* |
1007 | * rs_status follows rs_datalen so if rs_datalen is too large | 1014 | * rs_status follows rs_datalen so if rs_datalen is too large |
1008 | * we can take a hint that hardware corrupted it, so ignore | 1015 | * we can take a hint that hardware corrupted it, so ignore |
1009 | * those frames. | 1016 | * those frames. |
1010 | */ | 1017 | */ |
1011 | if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { | 1018 | if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { |
1012 | RX_STAT_INC(rx_len_err); | 1019 | RX_STAT_INC(rx_len_err); |
1013 | return -EINVAL; | 1020 | goto corrupt; |
1014 | } | 1021 | } |
1015 | 1022 | ||
1016 | /* Only use status info from the last fragment */ | 1023 | /* Only use status info from the last fragment */ |
@@ -1024,10 +1031,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
1024 | * This is different from the other corrupt descriptor | 1031 | * This is different from the other corrupt descriptor |
1025 | * condition handled above. | 1032 | * condition handled above. |
1026 | */ | 1033 | */ |
1027 | if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { | 1034 | if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) |
1028 | ret = -EINVAL; | 1035 | goto corrupt; |
1029 | goto exit; | ||
1030 | } | ||
1031 | 1036 | ||
1032 | hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); | 1037 | hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); |
1033 | 1038 | ||
@@ -1043,18 +1048,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
1043 | if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) | 1048 | if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) |
1044 | RX_STAT_INC(rx_spectral); | 1049 | RX_STAT_INC(rx_spectral); |
1045 | 1050 | ||
1046 | ret = -EINVAL; | 1051 | return -EINVAL; |
1047 | goto exit; | ||
1048 | } | 1052 | } |
1049 | 1053 | ||
1050 | /* | 1054 | /* |
1051 | * everything but the rate is checked here, the rate check is done | 1055 | * everything but the rate is checked here, the rate check is done |
1052 | * separately to avoid doing two lookups for a rate for each frame. | 1056 | * separately to avoid doing two lookups for a rate for each frame. |
1053 | */ | 1057 | */ |
1054 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { | 1058 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) |
1055 | ret = -EINVAL; | 1059 | return -EINVAL; |
1056 | goto exit; | ||
1057 | } | ||
1058 | 1060 | ||
1059 | if (ath_is_mybeacon(common, hdr)) { | 1061 | if (ath_is_mybeacon(common, hdr)) { |
1060 | RX_STAT_INC(rx_beacons); | 1062 | RX_STAT_INC(rx_beacons); |
@@ -1064,15 +1066,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
1064 | /* | 1066 | /* |
1065 | * This shouldn't happen, but have a safety check anyway. | 1067 | * This shouldn't happen, but have a safety check anyway. |
1066 | */ | 1068 | */ |
1067 | if (WARN_ON(!ah->curchan)) { | 1069 | if (WARN_ON(!ah->curchan)) |
1068 | ret = -EINVAL; | 1070 | return -EINVAL; |
1069 | goto exit; | ||
1070 | } | ||
1071 | 1071 | ||
1072 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { | 1072 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) |
1073 | ret =-EINVAL; | 1073 | return -EINVAL; |
1074 | goto exit; | ||
1075 | } | ||
1076 | 1074 | ||
1077 | ath9k_process_rssi(common, hw, rx_stats, rx_status); | 1075 | ath9k_process_rssi(common, hw, rx_stats, rx_status); |
1078 | 1076 | ||
@@ -1087,9 +1085,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
1087 | sc->rx.num_pkts++; | 1085 | sc->rx.num_pkts++; |
1088 | #endif | 1086 | #endif |
1089 | 1087 | ||
1090 | exit: | 1088 | return 0; |
1091 | sc->rx.discard_next = false; | 1089 | |
1092 | return ret; | 1090 | corrupt: |
1091 | sc->rx.discard_next = rx_stats->rs_more; | ||
1092 | return -EINVAL; | ||
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static void ath9k_rx_skb_postprocess(struct ath_common *common, | 1095 | static void ath9k_rx_skb_postprocess(struct ath_common *common, |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0a75e2f68c9d..f042a18c8495 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1444,14 +1444,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, | |||
1444 | for (tidno = 0, tid = &an->tid[tidno]; | 1444 | for (tidno = 0, tid = &an->tid[tidno]; |
1445 | tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { | 1445 | tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { |
1446 | 1446 | ||
1447 | if (!tid->sched) | ||
1448 | continue; | ||
1449 | |||
1450 | ac = tid->ac; | 1447 | ac = tid->ac; |
1451 | txq = ac->txq; | 1448 | txq = ac->txq; |
1452 | 1449 | ||
1453 | ath_txq_lock(sc, txq); | 1450 | ath_txq_lock(sc, txq); |
1454 | 1451 | ||
1452 | if (!tid->sched) { | ||
1453 | ath_txq_unlock(sc, txq); | ||
1454 | continue; | ||
1455 | } | ||
1456 | |||
1455 | buffered = ath_tid_has_buffered(tid); | 1457 | buffered = ath_tid_has_buffered(tid); |
1456 | 1458 | ||
1457 | tid->sched = false; | 1459 | tid->sched = false; |
@@ -2184,14 +2186,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2184 | txq->stopped = true; | 2186 | txq->stopped = true; |
2185 | } | 2187 | } |
2186 | 2188 | ||
2189 | if (txctl->an) | ||
2190 | tid = ath_get_skb_tid(sc, txctl->an, skb); | ||
2191 | |||
2187 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { | 2192 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { |
2188 | ath_txq_unlock(sc, txq); | 2193 | ath_txq_unlock(sc, txq); |
2189 | txq = sc->tx.uapsdq; | 2194 | txq = sc->tx.uapsdq; |
2190 | ath_txq_lock(sc, txq); | 2195 | ath_txq_lock(sc, txq); |
2191 | } else if (txctl->an && | 2196 | } else if (txctl->an && |
2192 | ieee80211_is_data_present(hdr->frame_control)) { | 2197 | ieee80211_is_data_present(hdr->frame_control)) { |
2193 | tid = ath_get_skb_tid(sc, txctl->an, skb); | ||
2194 | |||
2195 | WARN_ON(tid->ac->txq != txctl->txq); | 2198 | WARN_ON(tid->ac->txq != txctl->txq); |
2196 | 2199 | ||
2197 | if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) | 2200 | if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 3e991897d7ca..119ee6eaf1c3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -457,7 +457,6 @@ struct brcmf_sdio { | |||
457 | 457 | ||
458 | u8 tx_hdrlen; /* sdio bus header length for tx packet */ | 458 | u8 tx_hdrlen; /* sdio bus header length for tx packet */ |
459 | bool txglom; /* host tx glomming enable flag */ | 459 | bool txglom; /* host tx glomming enable flag */ |
460 | struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */ | ||
461 | u16 head_align; /* buffer pointer alignment */ | 460 | u16 head_align; /* buffer pointer alignment */ |
462 | u16 sgentry_align; /* scatter-gather buffer alignment */ | 461 | u16 sgentry_align; /* scatter-gather buffer alignment */ |
463 | }; | 462 | }; |
@@ -1944,9 +1943,8 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, | |||
1944 | if (lastfrm && chain_pad) | 1943 | if (lastfrm && chain_pad) |
1945 | tail_pad += blksize - chain_pad; | 1944 | tail_pad += blksize - chain_pad; |
1946 | if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { | 1945 | if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { |
1947 | pkt_pad = bus->txglom_sgpad; | 1946 | pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop + |
1948 | if (pkt_pad == NULL) | 1947 | bus->head_align); |
1949 | brcmu_pkt_buf_get_skb(tail_pad + tail_chop); | ||
1950 | if (pkt_pad == NULL) | 1948 | if (pkt_pad == NULL) |
1951 | return -ENOMEM; | 1949 | return -ENOMEM; |
1952 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); | 1950 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); |
@@ -1957,6 +1955,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, | |||
1957 | tail_chop); | 1955 | tail_chop); |
1958 | *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; | 1956 | *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; |
1959 | skb_trim(pkt, pkt->len - tail_chop); | 1957 | skb_trim(pkt, pkt->len - tail_chop); |
1958 | skb_trim(pkt_pad, tail_pad + tail_chop); | ||
1960 | __skb_queue_after(pktq, pkt, pkt_pad); | 1959 | __skb_queue_after(pktq, pkt, pkt_pad); |
1961 | } else { | 1960 | } else { |
1962 | ntail = pkt->data_len + tail_pad - | 1961 | ntail = pkt->data_len + tail_pad - |
@@ -2011,7 +2010,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, | |||
2011 | return ret; | 2010 | return ret; |
2012 | head_pad = (u16)ret; | 2011 | head_pad = (u16)ret; |
2013 | if (head_pad) | 2012 | if (head_pad) |
2014 | memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen); | 2013 | memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad); |
2015 | 2014 | ||
2016 | total_len += pkt_next->len; | 2015 | total_len += pkt_next->len; |
2017 | 2016 | ||
@@ -3486,10 +3485,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev) | |||
3486 | bus->txglom = false; | 3485 | bus->txglom = false; |
3487 | value = 1; | 3486 | value = 1; |
3488 | pad_size = bus->sdiodev->func[2]->cur_blksize << 1; | 3487 | pad_size = bus->sdiodev->func[2]->cur_blksize << 1; |
3489 | bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size); | ||
3490 | if (!bus->txglom_sgpad) | ||
3491 | brcmf_err("allocating txglom padding skb failed, reduced performance\n"); | ||
3492 | |||
3493 | err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", | 3488 | err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", |
3494 | &value, sizeof(u32)); | 3489 | &value, sizeof(u32)); |
3495 | if (err < 0) { | 3490 | if (err < 0) { |
@@ -4053,7 +4048,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) | |||
4053 | brcmf_sdio_chip_detach(&bus->ci); | 4048 | brcmf_sdio_chip_detach(&bus->ci); |
4054 | } | 4049 | } |
4055 | 4050 | ||
4056 | brcmu_pkt_buf_free_skb(bus->txglom_sgpad); | ||
4057 | kfree(bus->rxbuf); | 4051 | kfree(bus->rxbuf); |
4058 | kfree(bus->hdrbuf); | 4052 | kfree(bus->hdrbuf); |
4059 | kfree(bus); | 4053 | kfree(bus); |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index d36e252d2ccb..596525528f50 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -147,7 +147,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) | |||
147 | 147 | ||
148 | if (!sta->ap && sta->u.sta.challenge) | 148 | if (!sta->ap && sta->u.sta.challenge) |
149 | kfree(sta->u.sta.challenge); | 149 | kfree(sta->u.sta.challenge); |
150 | del_timer(&sta->timer); | 150 | del_timer_sync(&sta->timer); |
151 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ | 151 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ |
152 | 152 | ||
153 | kfree(sta); | 153 | kfree(sta); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index c0d070c5df5e..9cdd91cdf661 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c | |||
@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, | |||
590 | sizeof(priv->tid_data[sta_id][tid])); | 590 | sizeof(priv->tid_data[sta_id][tid])); |
591 | 591 | ||
592 | priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; | 592 | priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; |
593 | priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; | ||
593 | 594 | ||
594 | priv->num_stations--; | 595 | priv->num_stations--; |
595 | 596 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index a6839dfcb82d..398dd096674c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
1291 | struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; | 1291 | struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; |
1292 | struct iwl_ht_agg *agg; | 1292 | struct iwl_ht_agg *agg; |
1293 | struct sk_buff_head reclaimed_skbs; | 1293 | struct sk_buff_head reclaimed_skbs; |
1294 | struct ieee80211_tx_info *info; | ||
1295 | struct ieee80211_hdr *hdr; | ||
1296 | struct sk_buff *skb; | 1294 | struct sk_buff *skb; |
1297 | int sta_id; | 1295 | int sta_id; |
1298 | int tid; | 1296 | int tid; |
@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
1379 | freed = 0; | 1377 | freed = 0; |
1380 | 1378 | ||
1381 | skb_queue_walk(&reclaimed_skbs, skb) { | 1379 | skb_queue_walk(&reclaimed_skbs, skb) { |
1382 | hdr = (struct ieee80211_hdr *)skb->data; | 1380 | struct ieee80211_hdr *hdr = (void *)skb->data; |
1381 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1383 | 1382 | ||
1384 | if (ieee80211_is_data_qos(hdr->frame_control)) | 1383 | if (ieee80211_is_data_qos(hdr->frame_control)) |
1385 | freed++; | 1384 | freed++; |
1386 | else | 1385 | else |
1387 | WARN_ON_ONCE(1); | 1386 | WARN_ON_ONCE(1); |
1388 | 1387 | ||
1389 | info = IEEE80211_SKB_CB(skb); | ||
1390 | iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); | 1388 | iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); |
1391 | 1389 | ||
1390 | memset(&info->status, 0, sizeof(info->status)); | ||
1391 | /* Packet was transmitted successfully, failures come as single | ||
1392 | * frames because before failing a frame the firmware transmits | ||
1393 | * it without aggregation at least once. | ||
1394 | */ | ||
1395 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1396 | |||
1392 | if (freed == 1) { | 1397 | if (freed == 1) { |
1393 | /* this is the first skb we deliver in this batch */ | 1398 | /* this is the first skb we deliver in this batch */ |
1394 | /* put the rate scaling data there */ | 1399 | /* put the rate scaling data there */ |
1395 | info = IEEE80211_SKB_CB(skb); | 1400 | info = IEEE80211_SKB_CB(skb); |
1396 | memset(&info->status, 0, sizeof(info->status)); | 1401 | memset(&info->status, 0, sizeof(info->status)); |
1397 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1398 | info->flags |= IEEE80211_TX_STAT_AMPDU; | 1402 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
1399 | info->status.ampdu_ack_len = ba_resp->txed_2_done; | 1403 | info->status.ampdu_ack_len = ba_resp->txed_2_done; |
1400 | info->status.ampdu_len = ba_resp->txed; | 1404 | info->status.ampdu_len = ba_resp->txed; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index e4ead86f06d6..2b0ba1fc3c82 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
@@ -152,7 +152,7 @@ enum iwl_power_scheme { | |||
152 | IWL_POWER_SCHEME_LP | 152 | IWL_POWER_SCHEME_LP |
153 | }; | 153 | }; |
154 | 154 | ||
155 | #define IWL_CONN_MAX_LISTEN_INTERVAL 70 | 155 | #define IWL_CONN_MAX_LISTEN_INTERVAL 10 |
156 | #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ | 156 | #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ |
157 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ | 157 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ |
158 | IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ | 158 | IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4df12fa9d336..76ee486039d7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -822,16 +822,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
822 | struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; | 822 | struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; |
823 | struct sk_buff_head reclaimed_skbs; | 823 | struct sk_buff_head reclaimed_skbs; |
824 | struct iwl_mvm_tid_data *tid_data; | 824 | struct iwl_mvm_tid_data *tid_data; |
825 | struct ieee80211_tx_info *info; | ||
826 | struct ieee80211_sta *sta; | 825 | struct ieee80211_sta *sta; |
827 | struct iwl_mvm_sta *mvmsta; | 826 | struct iwl_mvm_sta *mvmsta; |
828 | struct ieee80211_hdr *hdr; | ||
829 | struct sk_buff *skb; | 827 | struct sk_buff *skb; |
830 | int sta_id, tid, freed; | 828 | int sta_id, tid, freed; |
831 | |||
832 | /* "flow" corresponds to Tx queue */ | 829 | /* "flow" corresponds to Tx queue */ |
833 | u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); | 830 | u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); |
834 | |||
835 | /* "ssn" is start of block-ack Tx window, corresponds to index | 831 | /* "ssn" is start of block-ack Tx window, corresponds to index |
836 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | 832 | * (in Tx queue's circular buffer) of first TFD/frame in window */ |
837 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); | 833 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); |
@@ -888,22 +884,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
888 | freed = 0; | 884 | freed = 0; |
889 | 885 | ||
890 | skb_queue_walk(&reclaimed_skbs, skb) { | 886 | skb_queue_walk(&reclaimed_skbs, skb) { |
891 | hdr = (struct ieee80211_hdr *)skb->data; | 887 | struct ieee80211_hdr *hdr = (void *)skb->data; |
888 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
892 | 889 | ||
893 | if (ieee80211_is_data_qos(hdr->frame_control)) | 890 | if (ieee80211_is_data_qos(hdr->frame_control)) |
894 | freed++; | 891 | freed++; |
895 | else | 892 | else |
896 | WARN_ON_ONCE(1); | 893 | WARN_ON_ONCE(1); |
897 | 894 | ||
898 | info = IEEE80211_SKB_CB(skb); | ||
899 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); | 895 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
900 | 896 | ||
897 | memset(&info->status, 0, sizeof(info->status)); | ||
898 | /* Packet was transmitted successfully, failures come as single | ||
899 | * frames because before failing a frame the firmware transmits | ||
900 | * it without aggregation at least once. | ||
901 | */ | ||
902 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
903 | |||
901 | if (freed == 1) { | 904 | if (freed == 1) { |
902 | /* this is the first skb we deliver in this batch */ | 905 | /* this is the first skb we deliver in this batch */ |
903 | /* put the rate scaling data there */ | 906 | /* put the rate scaling data there */ |
904 | info = IEEE80211_SKB_CB(skb); | ||
905 | memset(&info->status, 0, sizeof(info->status)); | ||
906 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
907 | info->flags |= IEEE80211_TX_STAT_AMPDU; | 907 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
908 | info->status.ampdu_ack_len = ba_notif->txed_2_done; | 908 | info->status.ampdu_ack_len = ba_notif->txed_2_done; |
909 | info->status.ampdu_len = ba_notif->txed; | 909 | info->status.ampdu_len = ba_notif->txed; |
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 32f75007a825..cb6d189bc3e6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c | |||
@@ -621,7 +621,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, | |||
621 | id = *pos++; | 621 | id = *pos++; |
622 | elen = *pos++; | 622 | elen = *pos++; |
623 | left -= 2; | 623 | left -= 2; |
624 | if (elen > left || elen == 0) { | 624 | if (elen > left) { |
625 | lbs_deb_scan("scan response: invalid IE fmt\n"); | 625 | lbs_deb_scan("scan response: invalid IE fmt\n"); |
626 | goto done; | 626 | goto done; |
627 | } | 627 | } |
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 03688aa14e8a..7fe7b53fb17a 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c | |||
@@ -1211,6 +1211,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) | |||
1211 | rd_index = card->rxbd_rdptr & reg->rx_mask; | 1211 | rd_index = card->rxbd_rdptr & reg->rx_mask; |
1212 | skb_data = card->rx_buf_list[rd_index]; | 1212 | skb_data = card->rx_buf_list[rd_index]; |
1213 | 1213 | ||
1214 | /* If skb allocation was failed earlier for Rx packet, | ||
1215 | * rx_buf_list[rd_index] would have been left with a NULL. | ||
1216 | */ | ||
1217 | if (!skb_data) | ||
1218 | return -ENOMEM; | ||
1219 | |||
1214 | MWIFIEX_SKB_PACB(skb_data, &buf_pa); | 1220 | MWIFIEX_SKB_PACB(skb_data, &buf_pa); |
1215 | pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, | 1221 | pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, |
1216 | PCI_DMA_FROMDEVICE); | 1222 | PCI_DMA_FROMDEVICE); |
@@ -1525,6 +1531,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) | |||
1525 | if (adapter->ps_state == PS_STATE_SLEEP_CFM) { | 1531 | if (adapter->ps_state == PS_STATE_SLEEP_CFM) { |
1526 | mwifiex_process_sleep_confirm_resp(adapter, skb->data, | 1532 | mwifiex_process_sleep_confirm_resp(adapter, skb->data, |
1527 | skb->len); | 1533 | skb->len); |
1534 | mwifiex_pcie_enable_host_int(adapter); | ||
1535 | if (mwifiex_write_reg(adapter, | ||
1536 | PCIE_CPU_INT_EVENT, | ||
1537 | CPU_INTR_SLEEP_CFM_DONE)) { | ||
1538 | dev_warn(adapter->dev, | ||
1539 | "Write register failed\n"); | ||
1540 | return -1; | ||
1541 | } | ||
1528 | while (reg->sleep_cookie && (count++ < 10) && | 1542 | while (reg->sleep_cookie && (count++ < 10) && |
1529 | mwifiex_pcie_ok_to_access_hw(adapter)) | 1543 | mwifiex_pcie_ok_to_access_hw(adapter)) |
1530 | usleep_range(50, 60); | 1544 | usleep_range(50, 60); |
@@ -1993,23 +2007,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) | |||
1993 | adapter->int_status |= pcie_ireg; | 2007 | adapter->int_status |= pcie_ireg; |
1994 | spin_unlock_irqrestore(&adapter->int_lock, flags); | 2008 | spin_unlock_irqrestore(&adapter->int_lock, flags); |
1995 | 2009 | ||
1996 | if (pcie_ireg & HOST_INTR_CMD_DONE) { | 2010 | if (!adapter->pps_uapsd_mode && |
1997 | if ((adapter->ps_state == PS_STATE_SLEEP_CFM) || | 2011 | adapter->ps_state == PS_STATE_SLEEP && |
1998 | (adapter->ps_state == PS_STATE_SLEEP)) { | 2012 | mwifiex_pcie_ok_to_access_hw(adapter)) { |
1999 | mwifiex_pcie_enable_host_int(adapter); | ||
2000 | if (mwifiex_write_reg(adapter, | ||
2001 | PCIE_CPU_INT_EVENT, | ||
2002 | CPU_INTR_SLEEP_CFM_DONE) | ||
2003 | ) { | ||
2004 | dev_warn(adapter->dev, | ||
2005 | "Write register failed\n"); | ||
2006 | return; | ||
2007 | |||
2008 | } | ||
2009 | } | ||
2010 | } else if (!adapter->pps_uapsd_mode && | ||
2011 | adapter->ps_state == PS_STATE_SLEEP && | ||
2012 | mwifiex_pcie_ok_to_access_hw(adapter)) { | ||
2013 | /* Potentially for PCIe we could get other | 2013 | /* Potentially for PCIe we could get other |
2014 | * interrupts like shared. Don't change power | 2014 | * interrupts like shared. Don't change power |
2015 | * state until cookie is set */ | 2015 | * state until cookie is set */ |
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index e8ebbd4bc3cd..208748804a55 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #define USB_VERSION "1.0" | 23 | #define USB_VERSION "1.0" |
24 | 24 | ||
25 | static const char usbdriver_name[] = "usb8xxx"; | ||
26 | |||
27 | static struct mwifiex_if_ops usb_ops; | 25 | static struct mwifiex_if_ops usb_ops; |
28 | static struct semaphore add_remove_card_sem; | 26 | static struct semaphore add_remove_card_sem; |
29 | static struct usb_card_rec *usb_card; | 27 | static struct usb_card_rec *usb_card; |
@@ -527,13 +525,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf) | |||
527 | MWIFIEX_BSS_ROLE_ANY), | 525 | MWIFIEX_BSS_ROLE_ANY), |
528 | MWIFIEX_ASYNC_CMD); | 526 | MWIFIEX_ASYNC_CMD); |
529 | 527 | ||
530 | #ifdef CONFIG_PM | ||
531 | /* Resume handler may be called due to remote wakeup, | ||
532 | * force to exit suspend anyway | ||
533 | */ | ||
534 | usb_disable_autosuspend(card->udev); | ||
535 | #endif /* CONFIG_PM */ | ||
536 | |||
537 | return 0; | 528 | return 0; |
538 | } | 529 | } |
539 | 530 | ||
@@ -567,13 +558,12 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) | |||
567 | } | 558 | } |
568 | 559 | ||
569 | static struct usb_driver mwifiex_usb_driver = { | 560 | static struct usb_driver mwifiex_usb_driver = { |
570 | .name = usbdriver_name, | 561 | .name = "mwifiex_usb", |
571 | .probe = mwifiex_usb_probe, | 562 | .probe = mwifiex_usb_probe, |
572 | .disconnect = mwifiex_usb_disconnect, | 563 | .disconnect = mwifiex_usb_disconnect, |
573 | .id_table = mwifiex_usb_table, | 564 | .id_table = mwifiex_usb_table, |
574 | .suspend = mwifiex_usb_suspend, | 565 | .suspend = mwifiex_usb_suspend, |
575 | .resume = mwifiex_usb_resume, | 566 | .resume = mwifiex_usb_resume, |
576 | .supports_autosuspend = 1, | ||
577 | }; | 567 | }; |
578 | 568 | ||
579 | static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) | 569 | static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) |
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 13eaeed03898..981cf6e7c73b 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) | |||
559 | mwifiex_wmm_delete_all_ralist(priv); | 559 | mwifiex_wmm_delete_all_ralist(priv); |
560 | memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); | 560 | memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); |
561 | 561 | ||
562 | if (priv->adapter->if_ops.clean_pcie_ring) | 562 | if (priv->adapter->if_ops.clean_pcie_ring && |
563 | !priv->adapter->surprise_removed) | ||
563 | priv->adapter->if_ops.clean_pcie_ring(priv->adapter); | 564 | priv->adapter->if_ops.clean_pcie_ring(priv->adapter); |
564 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); | 565 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
565 | } | 566 | } |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f9daa9e183f2..e30d80033cbc 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -907,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev, | |||
907 | 907 | ||
908 | /* Ethernet work: Delayed to here as it peeks the header. */ | 908 | /* Ethernet work: Delayed to here as it peeks the header. */ |
909 | skb->protocol = eth_type_trans(skb, dev); | 909 | skb->protocol = eth_type_trans(skb, dev); |
910 | skb_reset_network_header(skb); | ||
910 | 911 | ||
911 | if (checksum_setup(dev, skb)) { | 912 | if (checksum_setup(dev, skb)) { |
912 | kfree_skb(skb); | 913 | kfree_skb(skb); |
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index b4b0d83f9ef6..7061ac0ad428 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h | |||
@@ -678,6 +678,7 @@ struct tsi721_bdma_chan { | |||
678 | struct list_head free_list; | 678 | struct list_head free_list; |
679 | dma_cookie_t completed_cookie; | 679 | dma_cookie_t completed_cookie; |
680 | struct tasklet_struct tasklet; | 680 | struct tasklet_struct tasklet; |
681 | bool active; | ||
681 | }; | 682 | }; |
682 | 683 | ||
683 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | 684 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 502663f5f7c6..91245f5dbe81 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | |||
206 | { | 206 | { |
207 | /* Disable BDMA channel interrupts */ | 207 | /* Disable BDMA channel interrupts */ |
208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | 208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); |
209 | 209 | if (bdma_chan->active) | |
210 | tasklet_schedule(&bdma_chan->tasklet); | 210 | tasklet_schedule(&bdma_chan->tasklet); |
211 | } | 211 | } |
212 | 212 | ||
213 | #ifdef CONFIG_PCI_MSI | 213 | #ifdef CONFIG_PCI_MSI |
@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
562 | } | 562 | } |
563 | #endif /* CONFIG_PCI_MSI */ | 563 | #endif /* CONFIG_PCI_MSI */ |
564 | 564 | ||
565 | tasklet_enable(&bdma_chan->tasklet); | 565 | bdma_chan->active = true; |
566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); | 566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
567 | 567 | ||
568 | return bdma_chan->bd_num - 1; | 568 | return bdma_chan->bd_num - 1; |
@@ -576,9 +576,7 @@ err_out: | |||
576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) | 576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) |
577 | { | 577 | { |
578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | 578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
579 | #ifdef CONFIG_PCI_MSI | ||
580 | struct tsi721_device *priv = to_tsi721(dchan->device); | 579 | struct tsi721_device *priv = to_tsi721(dchan->device); |
581 | #endif | ||
582 | LIST_HEAD(list); | 580 | LIST_HEAD(list); |
583 | 581 | ||
584 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | 582 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); |
@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) | |||
589 | BUG_ON(!list_empty(&bdma_chan->active_list)); | 587 | BUG_ON(!list_empty(&bdma_chan->active_list)); |
590 | BUG_ON(!list_empty(&bdma_chan->queue)); | 588 | BUG_ON(!list_empty(&bdma_chan->queue)); |
591 | 589 | ||
592 | tasklet_disable(&bdma_chan->tasklet); | 590 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
591 | bdma_chan->active = false; | ||
592 | |||
593 | #ifdef CONFIG_PCI_MSI | ||
594 | if (priv->flags & TSI721_USING_MSIX) { | ||
595 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + | ||
596 | bdma_chan->id].vector); | ||
597 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
598 | bdma_chan->id].vector); | ||
599 | } else | ||
600 | #endif | ||
601 | synchronize_irq(priv->pdev->irq); | ||
602 | |||
603 | tasklet_kill(&bdma_chan->tasklet); | ||
593 | 604 | ||
594 | spin_lock_bh(&bdma_chan->lock); | 605 | spin_lock_bh(&bdma_chan->lock); |
595 | list_splice_init(&bdma_chan->free_list, &list); | 606 | list_splice_init(&bdma_chan->free_list, &list); |
596 | spin_unlock_bh(&bdma_chan->lock); | 607 | spin_unlock_bh(&bdma_chan->lock); |
597 | 608 | ||
598 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | ||
599 | |||
600 | #ifdef CONFIG_PCI_MSI | 609 | #ifdef CONFIG_PCI_MSI |
601 | if (priv->flags & TSI721_USING_MSIX) { | 610 | if (priv->flags & TSI721_USING_MSIX) { |
602 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | 611 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + |
@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
790 | bdma_chan->dchan.cookie = 1; | 799 | bdma_chan->dchan.cookie = 1; |
791 | bdma_chan->dchan.chan_id = i; | 800 | bdma_chan->dchan.chan_id = i; |
792 | bdma_chan->id = i; | 801 | bdma_chan->id = i; |
802 | bdma_chan->active = false; | ||
793 | 803 | ||
794 | spin_lock_init(&bdma_chan->lock); | 804 | spin_lock_init(&bdma_chan->lock); |
795 | 805 | ||
@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
799 | 809 | ||
800 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | 810 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, |
801 | (unsigned long)bdma_chan); | 811 | (unsigned long)bdma_chan); |
802 | tasklet_disable(&bdma_chan->tasklet); | ||
803 | list_add_tail(&bdma_chan->dchan.device_node, | 812 | list_add_tail(&bdma_chan->dchan.device_node, |
804 | &mport->dma.channels); | 813 | &mport->dma.channels); |
805 | } | 814 | } |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d1ac4caaf1b0..afca1bc24f26 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -953,6 +953,8 @@ static int machine_constraints_current(struct regulator_dev *rdev, | |||
953 | return 0; | 953 | return 0; |
954 | } | 954 | } |
955 | 955 | ||
956 | static int _regulator_do_enable(struct regulator_dev *rdev); | ||
957 | |||
956 | /** | 958 | /** |
957 | * set_machine_constraints - sets regulator constraints | 959 | * set_machine_constraints - sets regulator constraints |
958 | * @rdev: regulator source | 960 | * @rdev: regulator source |
@@ -1013,10 +1015,9 @@ static int set_machine_constraints(struct regulator_dev *rdev, | |||
1013 | /* If the constraints say the regulator should be on at this point | 1015 | /* If the constraints say the regulator should be on at this point |
1014 | * and we have control then make sure it is enabled. | 1016 | * and we have control then make sure it is enabled. |
1015 | */ | 1017 | */ |
1016 | if ((rdev->constraints->always_on || rdev->constraints->boot_on) && | 1018 | if (rdev->constraints->always_on || rdev->constraints->boot_on) { |
1017 | ops->enable) { | 1019 | ret = _regulator_do_enable(rdev); |
1018 | ret = ops->enable(rdev); | 1020 | if (ret < 0 && ret != -EINVAL) { |
1019 | if (ret < 0) { | ||
1020 | rdev_err(rdev, "failed to enable\n"); | 1021 | rdev_err(rdev, "failed to enable\n"); |
1021 | goto out; | 1022 | goto out; |
1022 | } | 1023 | } |
@@ -1907,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev) | |||
1907 | 1908 | ||
1908 | trace_regulator_disable_complete(rdev_get_name(rdev)); | 1909 | trace_regulator_disable_complete(rdev_get_name(rdev)); |
1909 | 1910 | ||
1910 | _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, | ||
1911 | NULL); | ||
1912 | return 0; | 1911 | return 0; |
1913 | } | 1912 | } |
1914 | 1913 | ||
@@ -1932,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev) | |||
1932 | rdev_err(rdev, "failed to disable\n"); | 1931 | rdev_err(rdev, "failed to disable\n"); |
1933 | return ret; | 1932 | return ret; |
1934 | } | 1933 | } |
1934 | _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, | ||
1935 | NULL); | ||
1935 | } | 1936 | } |
1936 | 1937 | ||
1937 | rdev->use_count = 0; | 1938 | rdev->use_count = 0; |
@@ -1984,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev) | |||
1984 | { | 1985 | { |
1985 | int ret = 0; | 1986 | int ret = 0; |
1986 | 1987 | ||
1987 | /* force disable */ | 1988 | ret = _regulator_do_disable(rdev); |
1988 | if (rdev->desc->ops->disable) { | 1989 | if (ret < 0) { |
1989 | /* ah well, who wants to live forever... */ | 1990 | rdev_err(rdev, "failed to force disable\n"); |
1990 | ret = rdev->desc->ops->disable(rdev); | 1991 | return ret; |
1991 | if (ret < 0) { | ||
1992 | rdev_err(rdev, "failed to force disable\n"); | ||
1993 | return ret; | ||
1994 | } | ||
1995 | /* notify other consumers that power has been forced off */ | ||
1996 | _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | | ||
1997 | REGULATOR_EVENT_DISABLE, NULL); | ||
1998 | } | 1992 | } |
1999 | 1993 | ||
2000 | return ret; | 1994 | _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | |
1995 | REGULATOR_EVENT_DISABLE, NULL); | ||
1996 | |||
1997 | return 0; | ||
2001 | } | 1998 | } |
2002 | 1999 | ||
2003 | /** | 2000 | /** |
@@ -3630,23 +3627,18 @@ int regulator_suspend_finish(void) | |||
3630 | 3627 | ||
3631 | mutex_lock(®ulator_list_mutex); | 3628 | mutex_lock(®ulator_list_mutex); |
3632 | list_for_each_entry(rdev, ®ulator_list, list) { | 3629 | list_for_each_entry(rdev, ®ulator_list, list) { |
3633 | struct regulator_ops *ops = rdev->desc->ops; | ||
3634 | |||
3635 | mutex_lock(&rdev->mutex); | 3630 | mutex_lock(&rdev->mutex); |
3636 | if ((rdev->use_count > 0 || rdev->constraints->always_on) && | 3631 | if (rdev->use_count > 0 || rdev->constraints->always_on) { |
3637 | ops->enable) { | 3632 | error = _regulator_do_enable(rdev); |
3638 | error = ops->enable(rdev); | ||
3639 | if (error) | 3633 | if (error) |
3640 | ret = error; | 3634 | ret = error; |
3641 | } else { | 3635 | } else { |
3642 | if (!have_full_constraints()) | 3636 | if (!have_full_constraints()) |
3643 | goto unlock; | 3637 | goto unlock; |
3644 | if (!ops->disable) | ||
3645 | goto unlock; | ||
3646 | if (!_regulator_is_enabled(rdev)) | 3638 | if (!_regulator_is_enabled(rdev)) |
3647 | goto unlock; | 3639 | goto unlock; |
3648 | 3640 | ||
3649 | error = ops->disable(rdev); | 3641 | error = _regulator_do_disable(rdev); |
3650 | if (error) | 3642 | if (error) |
3651 | ret = error; | 3643 | ret = error; |
3652 | } | 3644 | } |
@@ -3820,7 +3812,7 @@ static int __init regulator_init_complete(void) | |||
3820 | ops = rdev->desc->ops; | 3812 | ops = rdev->desc->ops; |
3821 | c = rdev->constraints; | 3813 | c = rdev->constraints; |
3822 | 3814 | ||
3823 | if (!ops->disable || (c && c->always_on)) | 3815 | if (c && c->always_on) |
3824 | continue; | 3816 | continue; |
3825 | 3817 | ||
3826 | mutex_lock(&rdev->mutex); | 3818 | mutex_lock(&rdev->mutex); |
@@ -3841,7 +3833,7 @@ static int __init regulator_init_complete(void) | |||
3841 | /* We log since this may kill the system if it | 3833 | /* We log since this may kill the system if it |
3842 | * goes wrong. */ | 3834 | * goes wrong. */ |
3843 | rdev_info(rdev, "disabling\n"); | 3835 | rdev_info(rdev, "disabling\n"); |
3844 | ret = ops->disable(rdev); | 3836 | ret = _regulator_do_disable(rdev); |
3845 | if (ret != 0) | 3837 | if (ret != 0) |
3846 | rdev_err(rdev, "couldn't disable: %d\n", ret); | 3838 | rdev_err(rdev, "couldn't disable: %d\n", ret); |
3847 | } else { | 3839 | } else { |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 7afd373b9595..c4cde9c08f1f 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev) | |||
580 | 580 | ||
581 | clk_enable(rtc_clk); | 581 | clk_enable(rtc_clk); |
582 | /* save TICNT for anyone using periodic interrupts */ | 582 | /* save TICNT for anyone using periodic interrupts */ |
583 | ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); | ||
584 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { | 583 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { |
585 | ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); | 584 | ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); |
586 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; | 585 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; |
586 | ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT); | ||
587 | } else { | ||
588 | ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); | ||
587 | } | 589 | } |
588 | s3c_rtc_enable(pdev, 0); | 590 | s3c_rtc_enable(pdev, 0); |
589 | 591 | ||
@@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev) | |||
605 | 607 | ||
606 | clk_enable(rtc_clk); | 608 | clk_enable(rtc_clk); |
607 | s3c_rtc_enable(pdev, 1); | 609 | s3c_rtc_enable(pdev, 1); |
608 | writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); | 610 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { |
609 | if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { | 611 | writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT); |
610 | tmp = readw(s3c_rtc_base + S3C2410_RTCCON); | 612 | if (ticnt_en_save) { |
611 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); | 613 | tmp = readw(s3c_rtc_base + S3C2410_RTCCON); |
614 | writew(tmp | ticnt_en_save, | ||
615 | s3c_rtc_base + S3C2410_RTCCON); | ||
616 | } | ||
617 | } else { | ||
618 | writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); | ||
612 | } | 619 | } |
613 | 620 | ||
614 | if (device_may_wakeup(dev) && wake_en) { | 621 | if (device_may_wakeup(dev) && wake_en) { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c3a83df07894..795ed61a5496 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -1660,7 +1660,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1660 | QDIO_FLAG_CLEANUP_USING_CLEAR); | 1660 | QDIO_FLAG_CLEANUP_USING_CLEAR); |
1661 | if (rc) | 1661 | if (rc) |
1662 | QETH_CARD_TEXT_(card, 3, "1err%d", rc); | 1662 | QETH_CARD_TEXT_(card, 3, "1err%d", rc); |
1663 | qdio_free(CARD_DDEV(card)); | ||
1664 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); | 1663 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); |
1665 | break; | 1664 | break; |
1666 | case QETH_QDIO_CLEANING: | 1665 | case QETH_QDIO_CLEANING: |
@@ -2605,6 +2604,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) | |||
2605 | return 0; | 2604 | return 0; |
2606 | out_qdio: | 2605 | out_qdio: |
2607 | qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | 2606 | qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); |
2607 | qdio_free(CARD_DDEV(card)); | ||
2608 | return rc; | 2608 | return rc; |
2609 | } | 2609 | } |
2610 | 2610 | ||
@@ -4906,9 +4906,11 @@ retry: | |||
4906 | if (retries < 3) | 4906 | if (retries < 3) |
4907 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", | 4907 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", |
4908 | dev_name(&card->gdev->dev)); | 4908 | dev_name(&card->gdev->dev)); |
4909 | rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | ||
4909 | ccw_device_set_offline(CARD_DDEV(card)); | 4910 | ccw_device_set_offline(CARD_DDEV(card)); |
4910 | ccw_device_set_offline(CARD_WDEV(card)); | 4911 | ccw_device_set_offline(CARD_WDEV(card)); |
4911 | ccw_device_set_offline(CARD_RDEV(card)); | 4912 | ccw_device_set_offline(CARD_RDEV(card)); |
4913 | qdio_free(CARD_DDEV(card)); | ||
4912 | rc = ccw_device_set_online(CARD_RDEV(card)); | 4914 | rc = ccw_device_set_online(CARD_RDEV(card)); |
4913 | if (rc) | 4915 | if (rc) |
4914 | goto retriable; | 4916 | goto retriable; |
@@ -4918,7 +4920,6 @@ retry: | |||
4918 | rc = ccw_device_set_online(CARD_DDEV(card)); | 4920 | rc = ccw_device_set_online(CARD_DDEV(card)); |
4919 | if (rc) | 4921 | if (rc) |
4920 | goto retriable; | 4922 | goto retriable; |
4921 | rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | ||
4922 | retriable: | 4923 | retriable: |
4923 | if (rc == -ERESTARTSYS) { | 4924 | if (rc == -ERESTARTSYS) { |
4924 | QETH_DBF_TEXT(SETUP, 2, "break1"); | 4925 | QETH_DBF_TEXT(SETUP, 2, "break1"); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 0710550093ce..908d82529ee9 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1091,6 +1091,7 @@ out_remove: | |||
1091 | ccw_device_set_offline(CARD_DDEV(card)); | 1091 | ccw_device_set_offline(CARD_DDEV(card)); |
1092 | ccw_device_set_offline(CARD_WDEV(card)); | 1092 | ccw_device_set_offline(CARD_WDEV(card)); |
1093 | ccw_device_set_offline(CARD_RDEV(card)); | 1093 | ccw_device_set_offline(CARD_RDEV(card)); |
1094 | qdio_free(CARD_DDEV(card)); | ||
1094 | if (recover_flag == CARD_STATE_RECOVER) | 1095 | if (recover_flag == CARD_STATE_RECOVER) |
1095 | card->state = CARD_STATE_RECOVER; | 1096 | card->state = CARD_STATE_RECOVER; |
1096 | else | 1097 | else |
@@ -1132,6 +1133,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1132 | rc = (rc2) ? rc2 : rc3; | 1133 | rc = (rc2) ? rc2 : rc3; |
1133 | if (rc) | 1134 | if (rc) |
1134 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1135 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1136 | qdio_free(CARD_DDEV(card)); | ||
1135 | if (recover_flag == CARD_STATE_UP) | 1137 | if (recover_flag == CARD_STATE_UP) |
1136 | card->state = CARD_STATE_RECOVER; | 1138 | card->state = CARD_STATE_RECOVER; |
1137 | /* let user_space know that device is offline */ | 1139 | /* let user_space know that device is offline */ |
@@ -1194,6 +1196,7 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev) | |||
1194 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); | 1196 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); |
1195 | qeth_qdio_clear_card(card, 0); | 1197 | qeth_qdio_clear_card(card, 0); |
1196 | qeth_clear_qdio_buffers(card); | 1198 | qeth_clear_qdio_buffers(card); |
1199 | qdio_free(CARD_DDEV(card)); | ||
1197 | } | 1200 | } |
1198 | 1201 | ||
1199 | static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) | 1202 | static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0f430424c3b8..3524d34ff694 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -3447,6 +3447,7 @@ out_remove: | |||
3447 | ccw_device_set_offline(CARD_DDEV(card)); | 3447 | ccw_device_set_offline(CARD_DDEV(card)); |
3448 | ccw_device_set_offline(CARD_WDEV(card)); | 3448 | ccw_device_set_offline(CARD_WDEV(card)); |
3449 | ccw_device_set_offline(CARD_RDEV(card)); | 3449 | ccw_device_set_offline(CARD_RDEV(card)); |
3450 | qdio_free(CARD_DDEV(card)); | ||
3450 | if (recover_flag == CARD_STATE_RECOVER) | 3451 | if (recover_flag == CARD_STATE_RECOVER) |
3451 | card->state = CARD_STATE_RECOVER; | 3452 | card->state = CARD_STATE_RECOVER; |
3452 | else | 3453 | else |
@@ -3493,6 +3494,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3493 | rc = (rc2) ? rc2 : rc3; | 3494 | rc = (rc2) ? rc2 : rc3; |
3494 | if (rc) | 3495 | if (rc) |
3495 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 3496 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3497 | qdio_free(CARD_DDEV(card)); | ||
3496 | if (recover_flag == CARD_STATE_UP) | 3498 | if (recover_flag == CARD_STATE_UP) |
3497 | card->state = CARD_STATE_RECOVER; | 3499 | card->state = CARD_STATE_RECOVER; |
3498 | /* let user_space know that device is offline */ | 3500 | /* let user_space know that device is offline */ |
@@ -3545,6 +3547,7 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev) | |||
3545 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); | 3547 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); |
3546 | qeth_qdio_clear_card(card, 0); | 3548 | qeth_qdio_clear_card(card, 0); |
3547 | qeth_clear_qdio_buffers(card); | 3549 | qeth_clear_qdio_buffers(card); |
3550 | qdio_free(CARD_DDEV(card)); | ||
3548 | } | 3551 | } |
3549 | 3552 | ||
3550 | static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) | 3553 | static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4fb7a8f83c8a..54af4e933695 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) | |||
186 | if (pfn_valid(pfn)) { | 186 | if (pfn_valid(pfn)) { |
187 | bool reserved; | 187 | bool reserved; |
188 | struct page *tail = pfn_to_page(pfn); | 188 | struct page *tail = pfn_to_page(pfn); |
189 | struct page *head = compound_trans_head(tail); | 189 | struct page *head = compound_head(tail); |
190 | reserved = !!(PageReserved(head)); | 190 | reserved = !!(PageReserved(head)); |
191 | if (head != tail) { | 191 | if (head != tail) { |
192 | /* | 192 | /* |
193 | * "head" is not a dangling pointer | 193 | * "head" is not a dangling pointer |
194 | * (compound_trans_head takes care of that) | 194 | * (compound_head takes care of that) |
195 | * but the hugepage may have been split | 195 | * but the hugepage may have been split |
196 | * from under us (and we may not hold a | 196 | * from under us (and we may not hold a |
197 | * reference count on the head page so it can | 197 | * reference count on the head page so it can |
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 968eab5bc1f5..68537e8b7a09 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c | |||
@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force) | |||
75 | int token; | 75 | int token; |
76 | 76 | ||
77 | if (!input) | 77 | if (!input) |
78 | return 0; | 78 | return 1; |
79 | 79 | ||
80 | while ((p = strsep(&input, ",")) != NULL) { | 80 | while ((p = strsep(&input, ",")) != NULL) { |
81 | if (!*p) | 81 | if (!*p) |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index aaa50611ec66..d7b5108789e2 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot) | |||
717 | */ | 717 | */ |
718 | if (status < 0) | 718 | if (status < 0) |
719 | mlog_errno(status); | 719 | mlog_errno(status); |
720 | /* | ||
721 | * Clear dq_off so that we search for the structure in quota file next | ||
722 | * time we acquire it. The structure might be deleted and reallocated | ||
723 | * elsewhere by another node while our dquot structure is on freelist. | ||
724 | */ | ||
725 | dquot->dq_off = 0; | ||
720 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 726 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
721 | out_trans: | 727 | out_trans: |
722 | ocfs2_commit_trans(osb, handle); | 728 | ocfs2_commit_trans(osb, handle); |
@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) | |||
756 | status = ocfs2_lock_global_qf(info, 1); | 762 | status = ocfs2_lock_global_qf(info, 1); |
757 | if (status < 0) | 763 | if (status < 0) |
758 | goto out; | 764 | goto out; |
759 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { | 765 | status = ocfs2_qinfo_lock(info, 0); |
760 | status = ocfs2_qinfo_lock(info, 0); | 766 | if (status < 0) |
761 | if (status < 0) | 767 | goto out_dq; |
762 | goto out_dq; | 768 | /* |
763 | status = qtree_read_dquot(&info->dqi_gi, dquot); | 769 | * We always want to read dquot structure from disk because we don't |
764 | ocfs2_qinfo_unlock(info, 0); | 770 | * know what happened with it while it was on freelist. |
765 | if (status < 0) | 771 | */ |
766 | goto out_dq; | 772 | status = qtree_read_dquot(&info->dqi_gi, dquot); |
767 | } | 773 | ocfs2_qinfo_unlock(info, 0); |
768 | set_bit(DQ_READ_B, &dquot->dq_flags); | 774 | if (status < 0) |
775 | goto out_dq; | ||
769 | 776 | ||
770 | OCFS2_DQUOT(dquot)->dq_use_count++; | 777 | OCFS2_DQUOT(dquot)->dq_use_count++; |
771 | OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; | 778 | OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; |
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 2e4344be3b96..2001862bf2b1 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) | |||
1303 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); | 1303 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); |
1304 | 1304 | ||
1305 | out: | 1305 | out: |
1306 | /* Clear the read bit so that next time someone uses this | ||
1307 | * dquot he reads fresh info from disk and allocates local | ||
1308 | * dquot structure */ | ||
1309 | clear_bit(DQ_READ_B, &dquot->dq_flags); | ||
1310 | return status; | 1306 | return status; |
1311 | } | 1307 | } |
1312 | 1308 | ||
diff --git a/fs/proc/page.c b/fs/proc/page.c index 02174a610315..e647c55275d9 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page) | |||
121 | * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon | 121 | * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon |
122 | * to make sure a given page is a thp, not a non-huge compound page. | 122 | * to make sure a given page is a thp, not a non-huge compound page. |
123 | */ | 123 | */ |
124 | else if (PageTransCompound(page) && | 124 | else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || |
125 | (PageLRU(compound_trans_head(page)) || | 125 | PageAnon(compound_head(page)))) |
126 | PageAnon(compound_trans_head(page)))) | ||
127 | u |= 1 << KPF_THP; | 126 | u |= 1 << KPF_THP; |
128 | 127 | ||
129 | /* | 128 | /* |
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h index a1116a3b54ef..8c1603b10665 100644 --- a/include/dt-bindings/clock/tegra124-car.h +++ b/include/dt-bindings/clock/tegra124-car.h | |||
@@ -36,10 +36,10 @@ | |||
36 | #define TEGRA124_CLK_PWM 17 | 36 | #define TEGRA124_CLK_PWM 17 |
37 | #define TEGRA124_CLK_I2S2 18 | 37 | #define TEGRA124_CLK_I2S2 18 |
38 | /* 20 (register bit affects vi and vi_sensor) */ | 38 | /* 20 (register bit affects vi and vi_sensor) */ |
39 | #define TEGRA124_CLK_GR_2D 21 | 39 | /* 21 */ |
40 | #define TEGRA124_CLK_USBD 22 | 40 | #define TEGRA124_CLK_USBD 22 |
41 | #define TEGRA124_CLK_ISP 23 | 41 | #define TEGRA124_CLK_ISP 23 |
42 | #define TEGRA124_CLK_GR_3D 24 | 42 | /* 26 */ |
43 | /* 25 */ | 43 | /* 25 */ |
44 | #define TEGRA124_CLK_DISP2 26 | 44 | #define TEGRA124_CLK_DISP2 26 |
45 | #define TEGRA124_CLK_DISP1 27 | 45 | #define TEGRA124_CLK_DISP1 27 |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index db512014e061..b826239bdce0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page) | |||
157 | return HPAGE_PMD_NR; | 157 | return HPAGE_PMD_NR; |
158 | return 1; | 158 | return 1; |
159 | } | 159 | } |
160 | /* | ||
161 | * compound_trans_head() should be used instead of compound_head(), | ||
162 | * whenever the "page" passed as parameter could be the tail of a | ||
163 | * transparent hugepage that could be undergoing a | ||
164 | * __split_huge_page_refcount(). The page structure layout often | ||
165 | * changes across releases and it makes extensive use of unions. So if | ||
166 | * the page structure layout will change in a way that | ||
167 | * page->first_page gets clobbered by __split_huge_page_refcount, the | ||
168 | * implementation making use of smp_rmb() will be required. | ||
169 | * | ||
170 | * Currently we define compound_trans_head as compound_head, because | ||
171 | * page->private is in the same union with page->first_page, and | ||
172 | * page->private isn't clobbered. However this also means we're | ||
173 | * currently leaving dirt into the page->private field of anonymous | ||
174 | * pages resulting from a THP split, instead of setting page->private | ||
175 | * to zero like for every other page that has PG_private not set. But | ||
176 | * anonymous pages don't use page->private so this is not a problem. | ||
177 | */ | ||
178 | #if 0 | ||
179 | /* This will be needed if page->private will be clobbered in split_huge_page */ | ||
180 | static inline struct page *compound_trans_head(struct page *page) | ||
181 | { | ||
182 | if (PageTail(page)) { | ||
183 | struct page *head; | ||
184 | head = page->first_page; | ||
185 | smp_rmb(); | ||
186 | /* | ||
187 | * head may be a dangling pointer. | ||
188 | * __split_huge_page_refcount clears PageTail before | ||
189 | * overwriting first_page, so if PageTail is still | ||
190 | * there it means the head pointer isn't dangling. | ||
191 | */ | ||
192 | if (PageTail(page)) | ||
193 | return head; | ||
194 | } | ||
195 | return page; | ||
196 | } | ||
197 | #else | ||
198 | #define compound_trans_head(page) compound_head(page) | ||
199 | #endif | ||
200 | 160 | ||
201 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | 161 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
202 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); | 162 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); |
@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page) | |||
226 | do { } while (0) | 186 | do { } while (0) |
227 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ | 187 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ |
228 | do { } while (0) | 188 | do { } while (0) |
229 | #define compound_trans_head(page) compound_head(page) | ||
230 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 189 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
231 | unsigned long *vm_flags, int advice) | 190 | unsigned long *vm_flags, int advice) |
232 | { | 191 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f28f46eade6a..c1b7414c7bef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp); | |||
175 | * Special vmas that are non-mergable, non-mlock()able. | 175 | * Special vmas that are non-mergable, non-mlock()able. |
176 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. | 176 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. |
177 | */ | 177 | */ |
178 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) | 178 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * mapping from the currently active vm_flags protection bits (the | 181 | * mapping from the currently active vm_flags protection bits (the |
@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page, | |||
399 | 399 | ||
400 | static inline struct page *compound_head(struct page *page) | 400 | static inline struct page *compound_head(struct page *page) |
401 | { | 401 | { |
402 | if (unlikely(PageTail(page))) | 402 | if (unlikely(PageTail(page))) { |
403 | return page->first_page; | 403 | struct page *head = page->first_page; |
404 | |||
405 | /* | ||
406 | * page->first_page may be a dangling pointer to an old | ||
407 | * compound page, so recheck that it is still a tail | ||
408 | * page before returning. | ||
409 | */ | ||
410 | smp_rmb(); | ||
411 | if (likely(PageTail(page))) | ||
412 | return head; | ||
413 | } | ||
404 | return page; | 414 | return page; |
405 | } | 415 | } |
406 | 416 | ||
@@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) | |||
757 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS | 767 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
758 | static inline int page_cpupid_xchg_last(struct page *page, int cpupid) | 768 | static inline int page_cpupid_xchg_last(struct page *page, int cpupid) |
759 | { | 769 | { |
760 | return xchg(&page->_last_cpupid, cpupid); | 770 | return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); |
761 | } | 771 | } |
762 | 772 | ||
763 | static inline int page_cpupid_last(struct page *page) | 773 | static inline int page_cpupid_last(struct page *page) |
@@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page) | |||
766 | } | 776 | } |
767 | static inline void page_cpupid_reset_last(struct page *page) | 777 | static inline void page_cpupid_reset_last(struct page *page) |
768 | { | 778 | { |
769 | page->_last_cpupid = -1; | 779 | page->_last_cpupid = -1 & LAST_CPUPID_MASK; |
770 | } | 780 | } |
771 | #else | 781 | #else |
772 | static inline int page_cpupid_last(struct page *page) | 782 | static inline int page_cpupid_last(struct page *page) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ebbbe7b6d05..5e1e6f2d98c2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2725,7 +2725,7 @@ static inline void nf_reset(struct sk_buff *skb) | |||
2725 | 2725 | ||
2726 | static inline void nf_reset_trace(struct sk_buff *skb) | 2726 | static inline void nf_reset_trace(struct sk_buff *skb) |
2727 | { | 2727 | { |
2728 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | 2728 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) |
2729 | skb->nf_trace = 0; | 2729 | skb->nf_trace = 0; |
2730 | #endif | 2730 | #endif |
2731 | } | 2731 | } |
@@ -2742,6 +2742,9 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) | |||
2742 | dst->nf_bridge = src->nf_bridge; | 2742 | dst->nf_bridge = src->nf_bridge; |
2743 | nf_bridge_get(src->nf_bridge); | 2743 | nf_bridge_get(src->nf_bridge); |
2744 | #endif | 2744 | #endif |
2745 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) | ||
2746 | dst->nf_trace = src->nf_trace; | ||
2747 | #endif | ||
2745 | } | 2748 | } |
2746 | 2749 | ||
2747 | static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) | 2750 | static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 48ed75c21260..e77c10405d51 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
@@ -129,6 +129,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], | |||
129 | int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], | 129 | int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], |
130 | struct ip_tunnel_parm *p); | 130 | struct ip_tunnel_parm *p); |
131 | void ip_tunnel_setup(struct net_device *dev, int net_id); | 131 | void ip_tunnel_setup(struct net_device *dev, int net_id); |
132 | void ip_tunnel_dst_reset_all(struct ip_tunnel *t); | ||
132 | 133 | ||
133 | /* Extract dsfield from inner protocol */ | 134 | /* Extract dsfield from inner protocol */ |
134 | static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, | 135 | static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 56fc366da6d5..8c4dd63134d4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1303,7 +1303,8 @@ struct tcp_fastopen_request { | |||
1303 | /* Fast Open cookie. Size 0 means a cookie request */ | 1303 | /* Fast Open cookie. Size 0 means a cookie request */ |
1304 | struct tcp_fastopen_cookie cookie; | 1304 | struct tcp_fastopen_cookie cookie; |
1305 | struct msghdr *data; /* data in MSG_FASTOPEN */ | 1305 | struct msghdr *data; /* data in MSG_FASTOPEN */ |
1306 | u16 copied; /* queued in tcp_connect() */ | 1306 | size_t size; |
1307 | int copied; /* queued in tcp_connect() */ | ||
1307 | }; | 1308 | }; |
1308 | void tcp_free_fastopen_req(struct tcp_sock *tp); | 1309 | void tcp_free_fastopen_req(struct tcp_sock *tp); |
1309 | 1310 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index afa5730fb3bd..fb5654a8ca3c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -1648,6 +1648,11 @@ static inline int xfrm_aevent_is_on(struct net *net) | |||
1648 | } | 1648 | } |
1649 | #endif | 1649 | #endif |
1650 | 1650 | ||
1651 | static inline int aead_len(struct xfrm_algo_aead *alg) | ||
1652 | { | ||
1653 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); | ||
1654 | } | ||
1655 | |||
1651 | static inline int xfrm_alg_len(const struct xfrm_algo *alg) | 1656 | static inline int xfrm_alg_len(const struct xfrm_algo *alg) |
1652 | { | 1657 | { |
1653 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); | 1658 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); |
@@ -1686,6 +1691,12 @@ static inline int xfrm_replay_clone(struct xfrm_state *x, | |||
1686 | return 0; | 1691 | return 0; |
1687 | } | 1692 | } |
1688 | 1693 | ||
1694 | static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) | ||
1695 | { | ||
1696 | return kmemdup(orig, aead_len(orig), GFP_KERNEL); | ||
1697 | } | ||
1698 | |||
1699 | |||
1689 | static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) | 1700 | static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) |
1690 | { | 1701 | { |
1691 | return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); | 1702 | return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b8838b56d1c..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) | |||
70 | 70 | ||
71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) | 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) |
72 | { | 72 | { |
73 | WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); | 73 | WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); |
74 | 74 | ||
75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { | 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { |
76 | cp->elements[idx].dl = new_dl; | 76 | cp->elements[idx].dl = new_dl; |
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, | |||
117 | } | 117 | } |
118 | 118 | ||
119 | out: | 119 | out: |
120 | WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); | 120 | WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
121 | 121 | ||
122 | return best_cpu; | 122 | return best_cpu; |
123 | } | 123 | } |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
136 | { | 136 | { |
137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
139 | 138 | ||
140 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
141 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
146 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
147 | { | 146 | { |
148 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
149 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
150 | 148 | ||
151 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
152 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
564 | return 1; | 562 | return 1; |
565 | } | 563 | } |
566 | 564 | ||
565 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
566 | |||
567 | /* | 567 | /* |
568 | * Update the current task's runtime statistics (provided it is still | 568 | * Update the current task's runtime statistics (provided it is still |
569 | * a -deadline task and has not been removed from the dl_rq). | 569 | * a -deadline task and has not been removed from the dl_rq). |
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) | |||
627 | struct rt_rq *rt_rq = &rq->rt; | 627 | struct rt_rq *rt_rq = &rq->rt; |
628 | 628 | ||
629 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
630 | rt_rq->rt_time += delta_exec; | ||
631 | /* | 630 | /* |
632 | * We'll let actual RT tasks worry about the overflow here, we | 631 | * We'll let actual RT tasks worry about the overflow here, we |
633 | * have our own CBS to keep us inline -- see above. | 632 | * have our own CBS to keep us inline; only account when RT |
633 | * bandwidth is relevant. | ||
634 | */ | 634 | */ |
635 | if (sched_rt_bandwidth_account(rt_rq)) | ||
636 | rt_rq->rt_time += delta_exec; | ||
635 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
636 | } | 638 | } |
637 | } | 639 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78157099b167..9b4c4f320130 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
7002 | 7002 | ||
7003 | /* | 7003 | /* |
7004 | * Ensure the task's vruntime is normalized, so that when its | 7004 | * Ensure the task's vruntime is normalized, so that when it's |
7005 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7005 | * switched back to the fair class the enqueue_entity(.flags=0) will |
7006 | * do the right thing. | 7006 | * do the right thing. |
7007 | * | 7007 | * |
7008 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 7008 | * If it's on_rq, then the dequeue_entity(.flags=0) will already |
7009 | * have normalized the vruntime, if it was !on_rq, then only when | 7009 | * have normalized the vruntime, if it's !on_rq, then only when |
7010 | * the task is sleeping will it still have non-normalized vruntime. | 7010 | * the task is sleeping will it still have non-normalized vruntime. |
7011 | */ | 7011 | */ |
7012 | if (!se->on_rq && p->state != TASK_RUNNING) { | 7012 | if (!p->on_rq && p->state != TASK_RUNNING) { |
7013 | /* | 7013 | /* |
7014 | * Fix up our vruntime so that the current sleep doesn't | 7014 | * Fix up our vruntime so that the current sleep doesn't |
7015 | * cause 'unlimited' sleep bonus. | 7015 | * cause 'unlimited' sleep bonus. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..1999021042c7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
538 | 538 | ||
539 | #endif /* CONFIG_RT_GROUP_SCHED */ | 539 | #endif /* CONFIG_RT_GROUP_SCHED */ |
540 | 540 | ||
541 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) | ||
542 | { | ||
543 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
544 | |||
545 | return (hrtimer_active(&rt_b->rt_period_timer) || | ||
546 | rt_rq->rt_time < rt_b->rt_runtime); | ||
547 | } | ||
548 | |||
541 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
542 | /* | 550 | /* |
543 | * We ran out of runtime, see if we can borrow some from our neighbours. | 551 | * We ran out of runtime, see if we can borrow some from our neighbours. |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 2defd1308b04..98f2d7e91a91 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev) | |||
424 | EXPORT_SYMBOL(debug_dma_dump_mappings); | 424 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * For each page mapped (initial page in the case of | 427 | * For each mapping (initial cacheline in the case of |
428 | * dma_alloc_coherent/dma_map_{single|page}, or each page in a | 428 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a |
429 | * scatterlist) insert into this tree using the pfn as the key. At | 429 | * scatterlist, or the cacheline specified in dma_map_single) insert |
430 | * into this tree using the cacheline as the key. At | ||
430 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If | 431 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If |
431 | * the pfn already exists at insertion time add a tag as a reference | 432 | * the entry already exists at insertion time add a tag as a reference |
432 | * count for the overlapping mappings. For now, the overlap tracking | 433 | * count for the overlapping mappings. For now, the overlap tracking |
433 | * just ensures that 'unmaps' balance 'maps' before marking the pfn | 434 | * just ensures that 'unmaps' balance 'maps' before marking the |
434 | * idle, but we should also be flagging overlaps as an API violation. | 435 | * cacheline idle, but we should also be flagging overlaps as an API |
436 | * violation. | ||
435 | * | 437 | * |
436 | * Memory usage is mostly constrained by the maximum number of available | 438 | * Memory usage is mostly constrained by the maximum number of available |
437 | * dma-debug entries in that we need a free dma_debug_entry before | 439 | * dma-debug entries in that we need a free dma_debug_entry before |
438 | * inserting into the tree. In the case of dma_map_{single|page} and | 440 | * inserting into the tree. In the case of dma_map_page and |
439 | * dma_alloc_coherent there is only one dma_debug_entry and one pfn to | 441 | * dma_alloc_coherent there is only one dma_debug_entry and one |
440 | * track per event. dma_map_sg(), on the other hand, | 442 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the |
441 | * consumes a single dma_debug_entry, but inserts 'nents' entries into | 443 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' |
442 | * the tree. | 444 | * entries into the tree. |
443 | * | 445 | * |
444 | * At any time debug_dma_assert_idle() can be called to trigger a | 446 | * At any time debug_dma_assert_idle() can be called to trigger a |
445 | * warning if the given page is in the active set. | 447 | * warning if any cachelines in the given page are in the active set. |
446 | */ | 448 | */ |
447 | static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); | 449 | static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); |
448 | static DEFINE_SPINLOCK(radix_lock); | 450 | static DEFINE_SPINLOCK(radix_lock); |
449 | #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) | 451 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) |
452 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) | ||
453 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) | ||
450 | 454 | ||
451 | static int active_pfn_read_overlap(unsigned long pfn) | 455 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) |
456 | { | ||
457 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + | ||
458 | (entry->offset >> L1_CACHE_SHIFT); | ||
459 | } | ||
460 | |||
461 | static int active_cacheline_read_overlap(phys_addr_t cln) | ||
452 | { | 462 | { |
453 | int overlap = 0, i; | 463 | int overlap = 0, i; |
454 | 464 | ||
455 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | 465 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
456 | if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) | 466 | if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) |
457 | overlap |= 1 << i; | 467 | overlap |= 1 << i; |
458 | return overlap; | 468 | return overlap; |
459 | } | 469 | } |
460 | 470 | ||
461 | static int active_pfn_set_overlap(unsigned long pfn, int overlap) | 471 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) |
462 | { | 472 | { |
463 | int i; | 473 | int i; |
464 | 474 | ||
465 | if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) | 475 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) |
466 | return overlap; | 476 | return overlap; |
467 | 477 | ||
468 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | 478 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
469 | if (overlap & 1 << i) | 479 | if (overlap & 1 << i) |
470 | radix_tree_tag_set(&dma_active_pfn, pfn, i); | 480 | radix_tree_tag_set(&dma_active_cacheline, cln, i); |
471 | else | 481 | else |
472 | radix_tree_tag_clear(&dma_active_pfn, pfn, i); | 482 | radix_tree_tag_clear(&dma_active_cacheline, cln, i); |
473 | 483 | ||
474 | return overlap; | 484 | return overlap; |
475 | } | 485 | } |
476 | 486 | ||
477 | static void active_pfn_inc_overlap(unsigned long pfn) | 487 | static void active_cacheline_inc_overlap(phys_addr_t cln) |
478 | { | 488 | { |
479 | int overlap = active_pfn_read_overlap(pfn); | 489 | int overlap = active_cacheline_read_overlap(cln); |
480 | 490 | ||
481 | overlap = active_pfn_set_overlap(pfn, ++overlap); | 491 | overlap = active_cacheline_set_overlap(cln, ++overlap); |
482 | 492 | ||
483 | /* If we overflowed the overlap counter then we're potentially | 493 | /* If we overflowed the overlap counter then we're potentially |
484 | * leaking dma-mappings. Otherwise, if maps and unmaps are | 494 | * leaking dma-mappings. Otherwise, if maps and unmaps are |
485 | * balanced then this overflow may cause false negatives in | 495 | * balanced then this overflow may cause false negatives in |
486 | * debug_dma_assert_idle() as the pfn may be marked idle | 496 | * debug_dma_assert_idle() as the cacheline may be marked idle |
487 | * prematurely. | 497 | * prematurely. |
488 | */ | 498 | */ |
489 | WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, | 499 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
490 | "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", | 500 | "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", |
491 | ACTIVE_PFN_MAX_OVERLAP, pfn); | 501 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
492 | } | 502 | } |
493 | 503 | ||
494 | static int active_pfn_dec_overlap(unsigned long pfn) | 504 | static int active_cacheline_dec_overlap(phys_addr_t cln) |
495 | { | 505 | { |
496 | int overlap = active_pfn_read_overlap(pfn); | 506 | int overlap = active_cacheline_read_overlap(cln); |
497 | 507 | ||
498 | return active_pfn_set_overlap(pfn, --overlap); | 508 | return active_cacheline_set_overlap(cln, --overlap); |
499 | } | 509 | } |
500 | 510 | ||
501 | static int active_pfn_insert(struct dma_debug_entry *entry) | 511 | static int active_cacheline_insert(struct dma_debug_entry *entry) |
502 | { | 512 | { |
513 | phys_addr_t cln = to_cacheline_number(entry); | ||
503 | unsigned long flags; | 514 | unsigned long flags; |
504 | int rc; | 515 | int rc; |
505 | 516 | ||
517 | /* If the device is not writing memory then we don't have any | ||
518 | * concerns about the cpu consuming stale data. This mitigates | ||
519 | * legitimate usages of overlapping mappings. | ||
520 | */ | ||
521 | if (entry->direction == DMA_TO_DEVICE) | ||
522 | return 0; | ||
523 | |||
506 | spin_lock_irqsave(&radix_lock, flags); | 524 | spin_lock_irqsave(&radix_lock, flags); |
507 | rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); | 525 | rc = radix_tree_insert(&dma_active_cacheline, cln, entry); |
508 | if (rc == -EEXIST) | 526 | if (rc == -EEXIST) |
509 | active_pfn_inc_overlap(entry->pfn); | 527 | active_cacheline_inc_overlap(cln); |
510 | spin_unlock_irqrestore(&radix_lock, flags); | 528 | spin_unlock_irqrestore(&radix_lock, flags); |
511 | 529 | ||
512 | return rc; | 530 | return rc; |
513 | } | 531 | } |
514 | 532 | ||
515 | static void active_pfn_remove(struct dma_debug_entry *entry) | 533 | static void active_cacheline_remove(struct dma_debug_entry *entry) |
516 | { | 534 | { |
535 | phys_addr_t cln = to_cacheline_number(entry); | ||
517 | unsigned long flags; | 536 | unsigned long flags; |
518 | 537 | ||
538 | /* ...mirror the insert case */ | ||
539 | if (entry->direction == DMA_TO_DEVICE) | ||
540 | return; | ||
541 | |||
519 | spin_lock_irqsave(&radix_lock, flags); | 542 | spin_lock_irqsave(&radix_lock, flags); |
520 | /* since we are counting overlaps the final put of the | 543 | /* since we are counting overlaps the final put of the |
521 | * entry->pfn will occur when the overlap count is 0. | 544 | * cacheline will occur when the overlap count is 0. |
522 | * active_pfn_dec_overlap() returns -1 in that case | 545 | * active_cacheline_dec_overlap() returns -1 in that case |
523 | */ | 546 | */ |
524 | if (active_pfn_dec_overlap(entry->pfn) < 0) | 547 | if (active_cacheline_dec_overlap(cln) < 0) |
525 | radix_tree_delete(&dma_active_pfn, entry->pfn); | 548 | radix_tree_delete(&dma_active_cacheline, cln); |
526 | spin_unlock_irqrestore(&radix_lock, flags); | 549 | spin_unlock_irqrestore(&radix_lock, flags); |
527 | } | 550 | } |
528 | 551 | ||
529 | /** | 552 | /** |
530 | * debug_dma_assert_idle() - assert that a page is not undergoing dma | 553 | * debug_dma_assert_idle() - assert that a page is not undergoing dma |
531 | * @page: page to lookup in the dma_active_pfn tree | 554 | * @page: page to lookup in the dma_active_cacheline tree |
532 | * | 555 | * |
533 | * Place a call to this routine in cases where the cpu touching the page | 556 | * Place a call to this routine in cases where the cpu touching the page |
534 | * before the dma completes (page is dma_unmapped) will lead to data | 557 | * before the dma completes (page is dma_unmapped) will lead to data |
@@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry) | |||
536 | */ | 559 | */ |
537 | void debug_dma_assert_idle(struct page *page) | 560 | void debug_dma_assert_idle(struct page *page) |
538 | { | 561 | { |
562 | static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; | ||
563 | struct dma_debug_entry *entry = NULL; | ||
564 | void **results = (void **) &ents; | ||
565 | unsigned int nents, i; | ||
539 | unsigned long flags; | 566 | unsigned long flags; |
540 | struct dma_debug_entry *entry; | 567 | phys_addr_t cln; |
541 | 568 | ||
542 | if (!page) | 569 | if (!page) |
543 | return; | 570 | return; |
544 | 571 | ||
572 | cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; | ||
545 | spin_lock_irqsave(&radix_lock, flags); | 573 | spin_lock_irqsave(&radix_lock, flags); |
546 | entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); | 574 | nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, |
575 | CACHELINES_PER_PAGE); | ||
576 | for (i = 0; i < nents; i++) { | ||
577 | phys_addr_t ent_cln = to_cacheline_number(ents[i]); | ||
578 | |||
579 | if (ent_cln == cln) { | ||
580 | entry = ents[i]; | ||
581 | break; | ||
582 | } else if (ent_cln >= cln + CACHELINES_PER_PAGE) | ||
583 | break; | ||
584 | } | ||
547 | spin_unlock_irqrestore(&radix_lock, flags); | 585 | spin_unlock_irqrestore(&radix_lock, flags); |
548 | 586 | ||
549 | if (!entry) | 587 | if (!entry) |
550 | return; | 588 | return; |
551 | 589 | ||
590 | cln = to_cacheline_number(entry); | ||
552 | err_printk(entry->dev, entry, | 591 | err_printk(entry->dev, entry, |
553 | "DMA-API: cpu touching an active dma mapped page " | 592 | "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", |
554 | "[pfn=0x%lx]\n", entry->pfn); | 593 | &cln); |
555 | } | 594 | } |
556 | 595 | ||
557 | /* | 596 | /* |
@@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
568 | hash_bucket_add(bucket, entry); | 607 | hash_bucket_add(bucket, entry); |
569 | put_hash_bucket(bucket, &flags); | 608 | put_hash_bucket(bucket, &flags); |
570 | 609 | ||
571 | rc = active_pfn_insert(entry); | 610 | rc = active_cacheline_insert(entry); |
572 | if (rc == -ENOMEM) { | 611 | if (rc == -ENOMEM) { |
573 | pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); | 612 | pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); |
574 | global_disable = true; | 613 | global_disable = true; |
575 | } | 614 | } |
576 | 615 | ||
@@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
631 | { | 670 | { |
632 | unsigned long flags; | 671 | unsigned long flags; |
633 | 672 | ||
634 | active_pfn_remove(entry); | 673 | active_cacheline_remove(entry); |
635 | 674 | ||
636 | /* | 675 | /* |
637 | * add to beginning of the list - this way the entries are | 676 | * add to beginning of the list - this way the entries are |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7811ed3b4e70..bd4a8dfdf0b8 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |||
1253 | 1253 | ||
1254 | node = indirect_to_ptr(node); | 1254 | node = indirect_to_ptr(node); |
1255 | max_index = radix_tree_maxindex(node->height); | 1255 | max_index = radix_tree_maxindex(node->height); |
1256 | if (cur_index > max_index) | 1256 | if (cur_index > max_index) { |
1257 | rcu_read_unlock(); | ||
1257 | break; | 1258 | break; |
1259 | } | ||
1258 | 1260 | ||
1259 | cur_index = __locate(node, item, cur_index, &found_index); | 1261 | cur_index = __locate(node, item, cur_index, &found_index); |
1260 | rcu_read_unlock(); | 1262 | rcu_read_unlock(); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4df39b1bde91..1546655a2d78 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1961,7 +1961,7 @@ out: | |||
1961 | return ret; | 1961 | return ret; |
1962 | } | 1962 | } |
1963 | 1963 | ||
1964 | #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) | 1964 | #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) |
1965 | 1965 | ||
1966 | int hugepage_madvise(struct vm_area_struct *vma, | 1966 | int hugepage_madvise(struct vm_area_struct *vma, |
1967 | unsigned long *vm_flags, int advice) | 1967 | unsigned long *vm_flags, int advice) |
@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item) | |||
444 | static struct page *page_trans_compound_anon(struct page *page) | 444 | static struct page *page_trans_compound_anon(struct page *page) |
445 | { | 445 | { |
446 | if (PageTransCompound(page)) { | 446 | if (PageTransCompound(page)) { |
447 | struct page *head = compound_trans_head(page); | 447 | struct page *head = compound_head(page); |
448 | /* | 448 | /* |
449 | * head may actually be splitted and freed from under | 449 | * head may actually be splitted and freed from under |
450 | * us but it's ok here. | 450 | * us but it's ok here. |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ce7a8cc7b404..5b6b0039f725 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1127,8 +1127,8 @@ skip_node: | |||
1127 | * skipping css reference should be safe. | 1127 | * skipping css reference should be safe. |
1128 | */ | 1128 | */ |
1129 | if (next_css) { | 1129 | if (next_css) { |
1130 | if ((next_css->flags & CSS_ONLINE) && | 1130 | if ((next_css == &root->css) || |
1131 | (next_css == &root->css || css_tryget(next_css))) | 1131 | ((next_css->flags & CSS_ONLINE) && css_tryget(next_css))) |
1132 | return mem_cgroup_from_css(next_css); | 1132 | return mem_cgroup_from_css(next_css); |
1133 | 1133 | ||
1134 | prev_css = next_css; | 1134 | prev_css = next_css; |
@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
6595 | { | 6595 | { |
6596 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 6596 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
6597 | struct mem_cgroup_event *event, *tmp; | 6597 | struct mem_cgroup_event *event, *tmp; |
6598 | struct cgroup_subsys_state *iter; | ||
6598 | 6599 | ||
6599 | /* | 6600 | /* |
6600 | * Unregister events and notify userspace. | 6601 | * Unregister events and notify userspace. |
@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
6611 | kmem_cgroup_css_offline(memcg); | 6612 | kmem_cgroup_css_offline(memcg); |
6612 | 6613 | ||
6613 | mem_cgroup_invalidate_reclaim_iterators(memcg); | 6614 | mem_cgroup_invalidate_reclaim_iterators(memcg); |
6614 | mem_cgroup_reparent_charges(memcg); | 6615 | |
6616 | /* | ||
6617 | * This requires that offlining is serialized. Right now that is | ||
6618 | * guaranteed because css_killed_work_fn() holds the cgroup_mutex. | ||
6619 | */ | ||
6620 | css_for_each_descendant_post(iter, css) | ||
6621 | mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); | ||
6622 | |||
6615 | mem_cgroup_destroy_all_caches(memcg); | 6623 | mem_cgroup_destroy_all_caches(memcg); |
6616 | vmpressure_cleanup(&memcg->vmpressure); | 6624 | vmpressure_cleanup(&memcg->vmpressure); |
6617 | } | 6625 | } |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2f2f34a4e77d..90002ea43638 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags) | |||
1651 | { | 1651 | { |
1652 | int ret; | 1652 | int ret; |
1653 | unsigned long pfn = page_to_pfn(page); | 1653 | unsigned long pfn = page_to_pfn(page); |
1654 | struct page *hpage = compound_trans_head(page); | 1654 | struct page *hpage = compound_head(page); |
1655 | 1655 | ||
1656 | if (PageHWPoison(page)) { | 1656 | if (PageHWPoison(page)) { |
1657 | pr_info("soft offline: %#lx page already poisoned\n", pfn); | 1657 | pr_info("soft offline: %#lx page already poisoned\n", pfn); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3758a09a009..3bac76ae4b30 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order) | |||
369 | __SetPageHead(page); | 369 | __SetPageHead(page); |
370 | for (i = 1; i < nr_pages; i++) { | 370 | for (i = 1; i < nr_pages; i++) { |
371 | struct page *p = page + i; | 371 | struct page *p = page + i; |
372 | __SetPageTail(p); | ||
373 | set_page_count(p, 0); | 372 | set_page_count(p, 0); |
374 | p->first_page = page; | 373 | p->first_page = page; |
374 | /* Make sure p->first_page is always valid for PageTail() */ | ||
375 | smp_wmb(); | ||
376 | __SetPageTail(p); | ||
375 | } | 377 | } |
376 | } | 378 | } |
377 | 379 | ||
@@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) | |||
1236 | } | 1238 | } |
1237 | local_irq_restore(flags); | 1239 | local_irq_restore(flags); |
1238 | } | 1240 | } |
1241 | static bool gfp_thisnode_allocation(gfp_t gfp_mask) | ||
1242 | { | ||
1243 | return (gfp_mask & GFP_THISNODE) == GFP_THISNODE; | ||
1244 | } | ||
1245 | #else | ||
1246 | static bool gfp_thisnode_allocation(gfp_t gfp_mask) | ||
1247 | { | ||
1248 | return false; | ||
1249 | } | ||
1239 | #endif | 1250 | #endif |
1240 | 1251 | ||
1241 | /* | 1252 | /* |
@@ -1572,7 +1583,13 @@ again: | |||
1572 | get_pageblock_migratetype(page)); | 1583 | get_pageblock_migratetype(page)); |
1573 | } | 1584 | } |
1574 | 1585 | ||
1575 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); | 1586 | /* |
1587 | * NOTE: GFP_THISNODE allocations do not partake in the kswapd | ||
1588 | * aging protocol, so they can't be fair. | ||
1589 | */ | ||
1590 | if (!gfp_thisnode_allocation(gfp_flags)) | ||
1591 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); | ||
1592 | |||
1576 | __count_zone_vm_events(PGALLOC, zone, 1 << order); | 1593 | __count_zone_vm_events(PGALLOC, zone, 1 << order); |
1577 | zone_statistics(preferred_zone, zone, gfp_flags); | 1594 | zone_statistics(preferred_zone, zone, gfp_flags); |
1578 | local_irq_restore(flags); | 1595 | local_irq_restore(flags); |
@@ -1944,8 +1961,12 @@ zonelist_scan: | |||
1944 | * ultimately fall back to remote zones that do not | 1961 | * ultimately fall back to remote zones that do not |
1945 | * partake in the fairness round-robin cycle of this | 1962 | * partake in the fairness round-robin cycle of this |
1946 | * zonelist. | 1963 | * zonelist. |
1964 | * | ||
1965 | * NOTE: GFP_THISNODE allocations do not partake in | ||
1966 | * the kswapd aging protocol, so they can't be fair. | ||
1947 | */ | 1967 | */ |
1948 | if (alloc_flags & ALLOC_WMARK_LOW) { | 1968 | if ((alloc_flags & ALLOC_WMARK_LOW) && |
1969 | !gfp_thisnode_allocation(gfp_mask)) { | ||
1949 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) | 1970 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) |
1950 | continue; | 1971 | continue; |
1951 | if (!zone_local(preferred_zone, zone)) | 1972 | if (!zone_local(preferred_zone, zone)) |
@@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2501 | * allowed per node queues are empty and that nodes are | 2522 | * allowed per node queues are empty and that nodes are |
2502 | * over allocated. | 2523 | * over allocated. |
2503 | */ | 2524 | */ |
2504 | if (IS_ENABLED(CONFIG_NUMA) && | 2525 | if (gfp_thisnode_allocation(gfp_mask)) |
2505 | (gfp_mask & GFP_THISNODE) == GFP_THISNODE) | ||
2506 | goto nopage; | 2526 | goto nopage; |
2507 | 2527 | ||
2508 | restart: | 2528 | restart: |
@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | /* __split_huge_page_refcount can run under us */ | 100 | /* __split_huge_page_refcount can run under us */ |
101 | page_head = compound_trans_head(page); | 101 | page_head = compound_head(page); |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * THP can not break up slab pages so avoid taking | 104 | * THP can not break up slab pages so avoid taking |
@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page) | |||
253 | */ | 253 | */ |
254 | unsigned long flags; | 254 | unsigned long flags; |
255 | bool got; | 255 | bool got; |
256 | struct page *page_head = compound_trans_head(page); | 256 | struct page *page_head = compound_head(page); |
257 | 257 | ||
258 | /* Ref to put_compound_page() comment. */ | 258 | /* Ref to put_compound_page() comment. */ |
259 | if (!__compound_tail_refcounted(page_head)) { | 259 | if (!__compound_tail_refcounted(page_head)) { |
diff --git a/net/can/raw.c b/net/can/raw.c index 8be757cca2ec..081e81fd017f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -121,13 +121,9 @@ static void raw_rcv(struct sk_buff *oskb, void *data) | |||
121 | if (!ro->recv_own_msgs && oskb->sk == sk) | 121 | if (!ro->recv_own_msgs && oskb->sk == sk) |
122 | return; | 122 | return; |
123 | 123 | ||
124 | /* do not pass frames with DLC > 8 to a legacy socket */ | 124 | /* do not pass non-CAN2.0 frames to a legacy socket */ |
125 | if (!ro->fd_frames) { | 125 | if (!ro->fd_frames && oskb->len != CAN_MTU) |
126 | struct canfd_frame *cfd = (struct canfd_frame *)oskb->data; | 126 | return; |
127 | |||
128 | if (unlikely(cfd->len > CAN_MAX_DLEN)) | ||
129 | return; | ||
130 | } | ||
131 | 127 | ||
132 | /* clone the given skb to be able to enqueue it into the rcv queue */ | 128 | /* clone the given skb to be able to enqueue it into the rcv queue */ |
133 | skb = skb_clone(oskb, GFP_ATOMIC); | 129 | skb = skb_clone(oskb, GFP_ATOMIC); |
@@ -738,9 +734,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
738 | struct msghdr *msg, size_t size, int flags) | 734 | struct msghdr *msg, size_t size, int flags) |
739 | { | 735 | { |
740 | struct sock *sk = sock->sk; | 736 | struct sock *sk = sock->sk; |
741 | struct raw_sock *ro = raw_sk(sk); | ||
742 | struct sk_buff *skb; | 737 | struct sk_buff *skb; |
743 | int rxmtu; | ||
744 | int err = 0; | 738 | int err = 0; |
745 | int noblock; | 739 | int noblock; |
746 | 740 | ||
@@ -751,20 +745,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
751 | if (!skb) | 745 | if (!skb) |
752 | return err; | 746 | return err; |
753 | 747 | ||
754 | /* | 748 | if (size < skb->len) |
755 | * when serving a legacy socket the DLC <= 8 is already checked inside | ||
756 | * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy | ||
757 | * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU | ||
758 | */ | ||
759 | if (!ro->fd_frames) | ||
760 | rxmtu = CAN_MTU; | ||
761 | else | ||
762 | rxmtu = skb->len; | ||
763 | |||
764 | if (size < rxmtu) | ||
765 | msg->msg_flags |= MSG_TRUNC; | 749 | msg->msg_flags |= MSG_TRUNC; |
766 | else | 750 | else |
767 | size = rxmtu; | 751 | size = skb->len; |
768 | 752 | ||
769 | err = memcpy_toiovec(msg->msg_iov, skb->data, size); | 753 | err = memcpy_toiovec(msg->msg_iov, skb->data, size); |
770 | if (err < 0) { | 754 | if (err < 0) { |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index b9e9e0d38672..e16129019c66 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -766,9 +766,6 @@ static void neigh_periodic_work(struct work_struct *work) | |||
766 | nht = rcu_dereference_protected(tbl->nht, | 766 | nht = rcu_dereference_protected(tbl->nht, |
767 | lockdep_is_held(&tbl->lock)); | 767 | lockdep_is_held(&tbl->lock)); |
768 | 768 | ||
769 | if (atomic_read(&tbl->entries) < tbl->gc_thresh1) | ||
770 | goto out; | ||
771 | |||
772 | /* | 769 | /* |
773 | * periodically recompute ReachableTime from random function | 770 | * periodically recompute ReachableTime from random function |
774 | */ | 771 | */ |
@@ -781,6 +778,9 @@ static void neigh_periodic_work(struct work_struct *work) | |||
781 | neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); | 778 | neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); |
782 | } | 779 | } |
783 | 780 | ||
781 | if (atomic_read(&tbl->entries) < tbl->gc_thresh1) | ||
782 | goto out; | ||
783 | |||
784 | for (i = 0 ; i < (1 << nht->hash_shift); i++) { | 784 | for (i = 0 ; i < (1 << nht->hash_shift); i++) { |
785 | np = &nht->hash_buckets[i]; | 785 | np = &nht->hash_buckets[i]; |
786 | 786 | ||
@@ -3046,7 +3046,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |||
3046 | if (!t) | 3046 | if (!t) |
3047 | goto err; | 3047 | goto err; |
3048 | 3048 | ||
3049 | for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) { | 3049 | for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { |
3050 | t->neigh_vars[i].data += (long) p; | 3050 | t->neigh_vars[i].data += (long) p; |
3051 | t->neigh_vars[i].extra1 = dev; | 3051 | t->neigh_vars[i].extra1 = dev; |
3052 | t->neigh_vars[i].extra2 = p; | 3052 | t->neigh_vars[i].extra2 = p; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5976ef0846bd..5d6236d9fdce 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -707,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
707 | new->mark = old->mark; | 707 | new->mark = old->mark; |
708 | new->skb_iif = old->skb_iif; | 708 | new->skb_iif = old->skb_iif; |
709 | __nf_copy(new, old); | 709 | __nf_copy(new, old); |
710 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | ||
711 | new->nf_trace = old->nf_trace; | ||
712 | #endif | ||
713 | #ifdef CONFIG_NET_SCHED | 710 | #ifdef CONFIG_NET_SCHED |
714 | new->tc_index = old->tc_index; | 711 | new->tc_index = old->tc_index; |
715 | #ifdef CONFIG_NET_CLS_ACT | 712 | #ifdef CONFIG_NET_CLS_ACT |
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 327060c6c874..7ae0d7f6dbd0 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c | |||
@@ -297,7 +297,7 @@ static bool seq_nr_after(u16 a, u16 b) | |||
297 | 297 | ||
298 | void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx) | 298 | void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx) |
299 | { | 299 | { |
300 | if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) { | 300 | if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) { |
301 | WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx); | 301 | WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx); |
302 | return; | 302 | return; |
303 | } | 303 | } |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ecd2c3f245ce..19ab78aca547 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1296,8 +1296,11 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1296 | 1296 | ||
1297 | segs = ERR_PTR(-EPROTONOSUPPORT); | 1297 | segs = ERR_PTR(-EPROTONOSUPPORT); |
1298 | 1298 | ||
1299 | /* Note : following gso_segment() might change skb->encapsulation */ | 1299 | if (skb->encapsulation && |
1300 | udpfrag = !skb->encapsulation && proto == IPPROTO_UDP; | 1300 | skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) |
1301 | udpfrag = proto == IPPROTO_UDP && encap; | ||
1302 | else | ||
1303 | udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; | ||
1301 | 1304 | ||
1302 | ops = rcu_dereference(inet_offloads[proto]); | 1305 | ops = rcu_dereference(inet_offloads[proto]); |
1303 | if (likely(ops && ops->callbacks.gso_segment)) | 1306 | if (likely(ops && ops->callbacks.gso_segment)) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 8971780aec7c..73c6b63bba74 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -422,9 +422,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
422 | to->tc_index = from->tc_index; | 422 | to->tc_index = from->tc_index; |
423 | #endif | 423 | #endif |
424 | nf_copy(to, from); | 424 | nf_copy(to, from); |
425 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | ||
426 | to->nf_trace = from->nf_trace; | ||
427 | #endif | ||
428 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 425 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
429 | to->ipvs_property = from->ipvs_property; | 426 | to->ipvs_property = from->ipvs_property; |
430 | #endif | 427 | #endif |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 50228be5c17b..78a89e61925d 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -93,13 +93,14 @@ static void tunnel_dst_reset(struct ip_tunnel *t) | |||
93 | tunnel_dst_set(t, NULL); | 93 | tunnel_dst_set(t, NULL); |
94 | } | 94 | } |
95 | 95 | ||
96 | static void tunnel_dst_reset_all(struct ip_tunnel *t) | 96 | void ip_tunnel_dst_reset_all(struct ip_tunnel *t) |
97 | { | 97 | { |
98 | int i; | 98 | int i; |
99 | 99 | ||
100 | for_each_possible_cpu(i) | 100 | for_each_possible_cpu(i) |
101 | __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); | 101 | __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); |
102 | } | 102 | } |
103 | EXPORT_SYMBOL(ip_tunnel_dst_reset_all); | ||
103 | 104 | ||
104 | static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) | 105 | static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) |
105 | { | 106 | { |
@@ -119,52 +120,6 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) | |||
119 | return (struct rtable *)dst; | 120 | return (struct rtable *)dst; |
120 | } | 121 | } |
121 | 122 | ||
122 | /* Often modified stats are per cpu, other are shared (netdev->stats) */ | ||
123 | struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, | ||
124 | struct rtnl_link_stats64 *tot) | ||
125 | { | ||
126 | int i; | ||
127 | |||
128 | for_each_possible_cpu(i) { | ||
129 | const struct pcpu_sw_netstats *tstats = | ||
130 | per_cpu_ptr(dev->tstats, i); | ||
131 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; | ||
132 | unsigned int start; | ||
133 | |||
134 | do { | ||
135 | start = u64_stats_fetch_begin_bh(&tstats->syncp); | ||
136 | rx_packets = tstats->rx_packets; | ||
137 | tx_packets = tstats->tx_packets; | ||
138 | rx_bytes = tstats->rx_bytes; | ||
139 | tx_bytes = tstats->tx_bytes; | ||
140 | } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); | ||
141 | |||
142 | tot->rx_packets += rx_packets; | ||
143 | tot->tx_packets += tx_packets; | ||
144 | tot->rx_bytes += rx_bytes; | ||
145 | tot->tx_bytes += tx_bytes; | ||
146 | } | ||
147 | |||
148 | tot->multicast = dev->stats.multicast; | ||
149 | |||
150 | tot->rx_crc_errors = dev->stats.rx_crc_errors; | ||
151 | tot->rx_fifo_errors = dev->stats.rx_fifo_errors; | ||
152 | tot->rx_length_errors = dev->stats.rx_length_errors; | ||
153 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | ||
154 | tot->rx_errors = dev->stats.rx_errors; | ||
155 | |||
156 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; | ||
157 | tot->tx_carrier_errors = dev->stats.tx_carrier_errors; | ||
158 | tot->tx_dropped = dev->stats.tx_dropped; | ||
159 | tot->tx_aborted_errors = dev->stats.tx_aborted_errors; | ||
160 | tot->tx_errors = dev->stats.tx_errors; | ||
161 | |||
162 | tot->collisions = dev->stats.collisions; | ||
163 | |||
164 | return tot; | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); | ||
167 | |||
168 | static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, | 123 | static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, |
169 | __be16 flags, __be32 key) | 124 | __be16 flags, __be32 key) |
170 | { | 125 | { |
@@ -759,7 +714,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, | |||
759 | if (set_mtu) | 714 | if (set_mtu) |
760 | dev->mtu = mtu; | 715 | dev->mtu = mtu; |
761 | } | 716 | } |
762 | tunnel_dst_reset_all(t); | 717 | ip_tunnel_dst_reset_all(t); |
763 | netdev_state_change(dev); | 718 | netdev_state_change(dev); |
764 | } | 719 | } |
765 | 720 | ||
@@ -1088,7 +1043,7 @@ void ip_tunnel_uninit(struct net_device *dev) | |||
1088 | if (itn->fb_tunnel_dev != dev) | 1043 | if (itn->fb_tunnel_dev != dev) |
1089 | ip_tunnel_del(netdev_priv(dev)); | 1044 | ip_tunnel_del(netdev_priv(dev)); |
1090 | 1045 | ||
1091 | tunnel_dst_reset_all(tunnel); | 1046 | ip_tunnel_dst_reset_all(tunnel); |
1092 | } | 1047 | } |
1093 | EXPORT_SYMBOL_GPL(ip_tunnel_uninit); | 1048 | EXPORT_SYMBOL_GPL(ip_tunnel_uninit); |
1094 | 1049 | ||
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6156f4ef5e91..6f847dd56dbc 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -108,7 +108,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) | |||
108 | nf_reset(skb); | 108 | nf_reset(skb); |
109 | secpath_reset(skb); | 109 | secpath_reset(skb); |
110 | skb_clear_hash_if_not_l4(skb); | 110 | skb_clear_hash_if_not_l4(skb); |
111 | skb_dst_drop(skb); | ||
112 | skb->vlan_tci = 0; | 111 | skb->vlan_tci = 0; |
113 | skb_set_queue_mapping(skb, 0); | 112 | skb_set_queue_mapping(skb, 0); |
114 | skb->pkt_type = PACKET_HOST; | 113 | skb->pkt_type = PACKET_HOST; |
@@ -148,3 +147,49 @@ error: | |||
148 | return ERR_PTR(err); | 147 | return ERR_PTR(err); |
149 | } | 148 | } |
150 | EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); | 149 | EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); |
150 | |||
151 | /* Often modified stats are per cpu, other are shared (netdev->stats) */ | ||
152 | struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, | ||
153 | struct rtnl_link_stats64 *tot) | ||
154 | { | ||
155 | int i; | ||
156 | |||
157 | for_each_possible_cpu(i) { | ||
158 | const struct pcpu_sw_netstats *tstats = | ||
159 | per_cpu_ptr(dev->tstats, i); | ||
160 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; | ||
161 | unsigned int start; | ||
162 | |||
163 | do { | ||
164 | start = u64_stats_fetch_begin_bh(&tstats->syncp); | ||
165 | rx_packets = tstats->rx_packets; | ||
166 | tx_packets = tstats->tx_packets; | ||
167 | rx_bytes = tstats->rx_bytes; | ||
168 | tx_bytes = tstats->tx_bytes; | ||
169 | } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); | ||
170 | |||
171 | tot->rx_packets += rx_packets; | ||
172 | tot->tx_packets += tx_packets; | ||
173 | tot->rx_bytes += rx_bytes; | ||
174 | tot->tx_bytes += tx_bytes; | ||
175 | } | ||
176 | |||
177 | tot->multicast = dev->stats.multicast; | ||
178 | |||
179 | tot->rx_crc_errors = dev->stats.rx_crc_errors; | ||
180 | tot->rx_fifo_errors = dev->stats.rx_fifo_errors; | ||
181 | tot->rx_length_errors = dev->stats.rx_length_errors; | ||
182 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | ||
183 | tot->rx_errors = dev->stats.rx_errors; | ||
184 | |||
185 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; | ||
186 | tot->tx_carrier_errors = dev->stats.tx_carrier_errors; | ||
187 | tot->tx_dropped = dev->stats.tx_dropped; | ||
188 | tot->tx_aborted_errors = dev->stats.tx_aborted_errors; | ||
189 | tot->tx_errors = dev->stats.tx_errors; | ||
190 | |||
191 | tot->collisions = dev->stats.collisions; | ||
192 | |||
193 | return tot; | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); | ||
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index d551e31b416e..7c676671329d 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -1198,8 +1198,8 @@ static int snmp_translate(struct nf_conn *ct, | |||
1198 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); | 1198 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); |
1199 | } else { | 1199 | } else { |
1200 | /* DNAT replies */ | 1200 | /* DNAT replies */ |
1201 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); | 1201 | map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip); |
1202 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); | 1202 | map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip); |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | if (map.from == map.to) | 1205 | if (map.from == map.to) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9f3a2db9109e..97c8f5620c43 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1044,7 +1044,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp) | |||
1044 | } | 1044 | } |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) | 1047 | static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, |
1048 | int *copied, size_t size) | ||
1048 | { | 1049 | { |
1049 | struct tcp_sock *tp = tcp_sk(sk); | 1050 | struct tcp_sock *tp = tcp_sk(sk); |
1050 | int err, flags; | 1051 | int err, flags; |
@@ -1059,11 +1060,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) | |||
1059 | if (unlikely(tp->fastopen_req == NULL)) | 1060 | if (unlikely(tp->fastopen_req == NULL)) |
1060 | return -ENOBUFS; | 1061 | return -ENOBUFS; |
1061 | tp->fastopen_req->data = msg; | 1062 | tp->fastopen_req->data = msg; |
1063 | tp->fastopen_req->size = size; | ||
1062 | 1064 | ||
1063 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; | 1065 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; |
1064 | err = __inet_stream_connect(sk->sk_socket, msg->msg_name, | 1066 | err = __inet_stream_connect(sk->sk_socket, msg->msg_name, |
1065 | msg->msg_namelen, flags); | 1067 | msg->msg_namelen, flags); |
1066 | *size = tp->fastopen_req->copied; | 1068 | *copied = tp->fastopen_req->copied; |
1067 | tcp_free_fastopen_req(tp); | 1069 | tcp_free_fastopen_req(tp); |
1068 | return err; | 1070 | return err; |
1069 | } | 1071 | } |
@@ -1083,7 +1085,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1083 | 1085 | ||
1084 | flags = msg->msg_flags; | 1086 | flags = msg->msg_flags; |
1085 | if (flags & MSG_FASTOPEN) { | 1087 | if (flags & MSG_FASTOPEN) { |
1086 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn); | 1088 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); |
1087 | if (err == -EINPROGRESS && copied_syn > 0) | 1089 | if (err == -EINPROGRESS && copied_syn > 0) |
1088 | goto out; | 1090 | goto out; |
1089 | else if (err) | 1091 | else if (err) |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index ad37bf18ae4b..2388275adb9b 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -290,8 +290,7 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | |||
290 | left = tp->snd_cwnd - in_flight; | 290 | left = tp->snd_cwnd - in_flight; |
291 | if (sk_can_gso(sk) && | 291 | if (sk_can_gso(sk) && |
292 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && | 292 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && |
293 | left * tp->mss_cache < sk->sk_gso_max_size && | 293 | left < tp->xmit_size_goal_segs) |
294 | left < sk->sk_gso_max_segs) | ||
295 | return true; | 294 | return true; |
296 | return left <= tcp_max_tso_deferred_mss(tp); | 295 | return left <= tcp_max_tso_deferred_mss(tp); |
297 | } | 296 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 227cba79fa6b..eeaac399420d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1945,8 +1945,9 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1945 | if (skb == tcp_send_head(sk)) | 1945 | if (skb == tcp_send_head(sk)) |
1946 | break; | 1946 | break; |
1947 | 1947 | ||
1948 | if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) | 1948 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) |
1949 | tp->undo_marker = 0; | 1949 | tp->undo_marker = 0; |
1950 | |||
1950 | TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; | 1951 | TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; |
1951 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { | 1952 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { |
1952 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; | 1953 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3be16727f058..f0eb4e337ec8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -864,8 +864,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
864 | 864 | ||
865 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && | 865 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && |
866 | fclone->fclone == SKB_FCLONE_CLONE)) | 866 | fclone->fclone == SKB_FCLONE_CLONE)) |
867 | NET_INC_STATS_BH(sock_net(sk), | 867 | NET_INC_STATS(sock_net(sk), |
868 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); | 868 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); |
869 | 869 | ||
870 | if (unlikely(skb_cloned(skb))) | 870 | if (unlikely(skb_cloned(skb))) |
871 | skb = pskb_copy(skb, gfp_mask); | 871 | skb = pskb_copy(skb, gfp_mask); |
@@ -2337,6 +2337,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2337 | struct tcp_sock *tp = tcp_sk(sk); | 2337 | struct tcp_sock *tp = tcp_sk(sk); |
2338 | struct inet_connection_sock *icsk = inet_csk(sk); | 2338 | struct inet_connection_sock *icsk = inet_csk(sk); |
2339 | unsigned int cur_mss; | 2339 | unsigned int cur_mss; |
2340 | int err; | ||
2340 | 2341 | ||
2341 | /* Inconslusive MTU probe */ | 2342 | /* Inconslusive MTU probe */ |
2342 | if (icsk->icsk_mtup.probe_size) { | 2343 | if (icsk->icsk_mtup.probe_size) { |
@@ -2400,11 +2401,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2400 | skb_headroom(skb) >= 0xFFFF)) { | 2401 | skb_headroom(skb) >= 0xFFFF)) { |
2401 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, | 2402 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, |
2402 | GFP_ATOMIC); | 2403 | GFP_ATOMIC); |
2403 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2404 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
2404 | -ENOBUFS; | 2405 | -ENOBUFS; |
2405 | } else { | 2406 | } else { |
2406 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2407 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2407 | } | 2408 | } |
2409 | |||
2410 | if (likely(!err)) | ||
2411 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; | ||
2412 | return err; | ||
2408 | } | 2413 | } |
2409 | 2414 | ||
2410 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | 2415 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
@@ -2908,7 +2913,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
2908 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - | 2913 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - |
2909 | MAX_TCP_OPTION_SPACE; | 2914 | MAX_TCP_OPTION_SPACE; |
2910 | 2915 | ||
2911 | syn_data = skb_copy_expand(syn, skb_headroom(syn), space, | 2916 | space = min_t(size_t, space, fo->size); |
2917 | |||
2918 | /* limit to order-0 allocations */ | ||
2919 | space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); | ||
2920 | |||
2921 | syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, | ||
2912 | sk->sk_allocation); | 2922 | sk->sk_allocation); |
2913 | if (syn_data == NULL) | 2923 | if (syn_data == NULL) |
2914 | goto fallback; | 2924 | goto fallback; |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index d92e5586783e..438a73aa777c 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -138,6 +138,7 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION | |||
138 | config IPV6_VTI | 138 | config IPV6_VTI |
139 | tristate "Virtual (secure) IPv6: tunneling" | 139 | tristate "Virtual (secure) IPv6: tunneling" |
140 | select IPV6_TUNNEL | 140 | select IPV6_TUNNEL |
141 | select NET_IP_TUNNEL | ||
141 | depends on INET6_XFRM_MODE_TUNNEL | 142 | depends on INET6_XFRM_MODE_TUNNEL |
142 | ---help--- | 143 | ---help--- |
143 | Tunneling means encapsulating data of one protocol type within | 144 | Tunneling means encapsulating data of one protocol type within |
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 140748debc4a..8af3eb57f438 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c | |||
@@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, | |||
212 | found = (nexthdr == target); | 212 | found = (nexthdr == target); |
213 | 213 | ||
214 | if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { | 214 | if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { |
215 | if (target < 0) | 215 | if (target < 0 || found) |
216 | break; | 216 | break; |
217 | return -ENOENT; | 217 | return -ENOENT; |
218 | } | 218 | } |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 1e8683b135bb..59f95affceb0 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -89,7 +89,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
89 | unsigned int unfrag_ip6hlen; | 89 | unsigned int unfrag_ip6hlen; |
90 | u8 *prevhdr; | 90 | u8 *prevhdr; |
91 | int offset = 0; | 91 | int offset = 0; |
92 | bool tunnel; | 92 | bool encap, udpfrag; |
93 | int nhoff; | 93 | int nhoff; |
94 | 94 | ||
95 | if (unlikely(skb_shinfo(skb)->gso_type & | 95 | if (unlikely(skb_shinfo(skb)->gso_type & |
@@ -110,8 +110,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
110 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | 110 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) |
111 | goto out; | 111 | goto out; |
112 | 112 | ||
113 | tunnel = SKB_GSO_CB(skb)->encap_level > 0; | 113 | encap = SKB_GSO_CB(skb)->encap_level > 0; |
114 | if (tunnel) | 114 | if (encap) |
115 | features = skb->dev->hw_enc_features & netif_skb_features(skb); | 115 | features = skb->dev->hw_enc_features & netif_skb_features(skb); |
116 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); | 116 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); |
117 | 117 | ||
@@ -121,6 +121,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
121 | 121 | ||
122 | proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 122 | proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); |
123 | 123 | ||
124 | if (skb->encapsulation && | ||
125 | skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) | ||
126 | udpfrag = proto == IPPROTO_UDP && encap; | ||
127 | else | ||
128 | udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; | ||
129 | |||
124 | ops = rcu_dereference(inet6_offloads[proto]); | 130 | ops = rcu_dereference(inet6_offloads[proto]); |
125 | if (likely(ops && ops->callbacks.gso_segment)) { | 131 | if (likely(ops && ops->callbacks.gso_segment)) { |
126 | skb_reset_transport_header(skb); | 132 | skb_reset_transport_header(skb); |
@@ -133,13 +139,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
133 | for (skb = segs; skb; skb = skb->next) { | 139 | for (skb = segs; skb; skb = skb->next) { |
134 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); | 140 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); |
135 | ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); | 141 | ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); |
136 | if (tunnel) { | ||
137 | skb_reset_inner_headers(skb); | ||
138 | skb->encapsulation = 1; | ||
139 | } | ||
140 | skb->network_header = (u8 *)ipv6h - skb->head; | 142 | skb->network_header = (u8 *)ipv6h - skb->head; |
141 | 143 | ||
142 | if (!tunnel && proto == IPPROTO_UDP) { | 144 | if (udpfrag) { |
143 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | 145 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); |
144 | fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); | 146 | fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); |
145 | fptr->frag_off = htons(offset); | 147 | fptr->frag_off = htons(offset); |
@@ -148,6 +150,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
148 | offset += (ntohs(ipv6h->payload_len) - | 150 | offset += (ntohs(ipv6h->payload_len) - |
149 | sizeof(struct frag_hdr)); | 151 | sizeof(struct frag_hdr)); |
150 | } | 152 | } |
153 | if (encap) | ||
154 | skb_reset_inner_headers(skb); | ||
151 | } | 155 | } |
152 | 156 | ||
153 | out: | 157 | out: |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 070a2fae2375..16f91a2e7888 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -530,9 +530,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
530 | to->tc_index = from->tc_index; | 530 | to->tc_index = from->tc_index; |
531 | #endif | 531 | #endif |
532 | nf_copy(to, from); | 532 | nf_copy(to, from); |
533 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | ||
534 | to->nf_trace = from->nf_trace; | ||
535 | #endif | ||
536 | skb_copy_secmark(to, from); | 533 | skb_copy_secmark(to, from); |
537 | } | 534 | } |
538 | 535 | ||
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index fb9beb78f00b..587bbdcb22b4 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -135,6 +135,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
135 | fl6.flowi6_proto = IPPROTO_ICMPV6; | 135 | fl6.flowi6_proto = IPPROTO_ICMPV6; |
136 | fl6.saddr = np->saddr; | 136 | fl6.saddr = np->saddr; |
137 | fl6.daddr = *daddr; | 137 | fl6.daddr = *daddr; |
138 | fl6.flowi6_mark = sk->sk_mark; | ||
138 | fl6.fl6_icmp_type = user_icmph.icmp6_type; | 139 | fl6.fl6_icmp_type = user_icmph.icmp6_type; |
139 | fl6.fl6_icmp_code = user_icmph.icmp6_code; | 140 | fl6.fl6_icmp_code = user_icmph.icmp6_code; |
140 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 141 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3dfbcf1dcb1c..b4d74c86586c 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -475,6 +475,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
475 | ipip6_tunnel_unlink(sitn, tunnel); | 475 | ipip6_tunnel_unlink(sitn, tunnel); |
476 | ipip6_tunnel_del_prl(tunnel, NULL); | 476 | ipip6_tunnel_del_prl(tunnel, NULL); |
477 | } | 477 | } |
478 | ip_tunnel_dst_reset_all(tunnel); | ||
478 | dev_put(dev); | 479 | dev_put(dev); |
479 | } | 480 | } |
480 | 481 | ||
@@ -1082,6 +1083,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) | |||
1082 | t->parms.link = p->link; | 1083 | t->parms.link = p->link; |
1083 | ipip6_tunnel_bind_dev(t->dev); | 1084 | ipip6_tunnel_bind_dev(t->dev); |
1084 | } | 1085 | } |
1086 | ip_tunnel_dst_reset_all(t); | ||
1085 | netdev_state_change(t->dev); | 1087 | netdev_state_change(t->dev); |
1086 | } | 1088 | } |
1087 | 1089 | ||
@@ -1112,6 +1114,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, | |||
1112 | t->ip6rd.relay_prefix = relay_prefix; | 1114 | t->ip6rd.relay_prefix = relay_prefix; |
1113 | t->ip6rd.prefixlen = ip6rd->prefixlen; | 1115 | t->ip6rd.prefixlen = ip6rd->prefixlen; |
1114 | t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; | 1116 | t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; |
1117 | ip_tunnel_dst_reset_all(t); | ||
1115 | netdev_state_change(t->dev); | 1118 | netdev_state_change(t->dev); |
1116 | return 0; | 1119 | return 0; |
1117 | } | 1120 | } |
@@ -1271,6 +1274,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1271 | err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); | 1274 | err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); |
1272 | break; | 1275 | break; |
1273 | } | 1276 | } |
1277 | ip_tunnel_dst_reset_all(t); | ||
1274 | netdev_state_change(dev); | 1278 | netdev_state_change(dev); |
1275 | break; | 1279 | break; |
1276 | 1280 | ||
@@ -1326,6 +1330,9 @@ static const struct net_device_ops ipip6_netdev_ops = { | |||
1326 | 1330 | ||
1327 | static void ipip6_dev_free(struct net_device *dev) | 1331 | static void ipip6_dev_free(struct net_device *dev) |
1328 | { | 1332 | { |
1333 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
1334 | |||
1335 | free_percpu(tunnel->dst_cache); | ||
1329 | free_percpu(dev->tstats); | 1336 | free_percpu(dev->tstats); |
1330 | free_netdev(dev); | 1337 | free_netdev(dev); |
1331 | } | 1338 | } |
@@ -1375,6 +1382,12 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
1375 | u64_stats_init(&ipip6_tunnel_stats->syncp); | 1382 | u64_stats_init(&ipip6_tunnel_stats->syncp); |
1376 | } | 1383 | } |
1377 | 1384 | ||
1385 | tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); | ||
1386 | if (!tunnel->dst_cache) { | ||
1387 | free_percpu(dev->tstats); | ||
1388 | return -ENOMEM; | ||
1389 | } | ||
1390 | |||
1378 | return 0; | 1391 | return 0; |
1379 | } | 1392 | } |
1380 | 1393 | ||
@@ -1405,6 +1418,12 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) | |||
1405 | u64_stats_init(&ipip6_fb_stats->syncp); | 1418 | u64_stats_init(&ipip6_fb_stats->syncp); |
1406 | } | 1419 | } |
1407 | 1420 | ||
1421 | tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); | ||
1422 | if (!tunnel->dst_cache) { | ||
1423 | free_percpu(dev->tstats); | ||
1424 | return -ENOMEM; | ||
1425 | } | ||
1426 | |||
1408 | dev_hold(dev); | 1427 | dev_hold(dev); |
1409 | rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); | 1428 | rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); |
1410 | return 0; | 1429 | return 0; |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index e7359f9eaa8d..b261ee8b83fc 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
@@ -113,7 +113,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
113 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | 113 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
114 | fptr->nexthdr = nexthdr; | 114 | fptr->nexthdr = nexthdr; |
115 | fptr->reserved = 0; | 115 | fptr->reserved = 0; |
116 | ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); | 116 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; |
117 | 117 | ||
118 | /* Fragment the skb. ipv6 header and the remaining fields of the | 118 | /* Fragment the skb. ipv6 header and the remaining fields of the |
119 | * fragment header are updated in ipv6_gso_segment() | 119 | * fragment header are updated in ipv6_gso_segment() |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3701930c6649..5e44e3179e02 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1692,14 +1692,8 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, | |||
1692 | void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); | 1692 | void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); |
1693 | void ieee80211_add_pending_skb(struct ieee80211_local *local, | 1693 | void ieee80211_add_pending_skb(struct ieee80211_local *local, |
1694 | struct sk_buff *skb); | 1694 | struct sk_buff *skb); |
1695 | void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, | 1695 | void ieee80211_add_pending_skbs(struct ieee80211_local *local, |
1696 | struct sk_buff_head *skbs, | 1696 | struct sk_buff_head *skbs); |
1697 | void (*fn)(void *data), void *data); | ||
1698 | static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, | ||
1699 | struct sk_buff_head *skbs) | ||
1700 | { | ||
1701 | ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); | ||
1702 | } | ||
1703 | void ieee80211_flush_queues(struct ieee80211_local *local, | 1697 | void ieee80211_flush_queues(struct ieee80211_local *local, |
1704 | struct ieee80211_sub_if_data *sdata); | 1698 | struct ieee80211_sub_if_data *sdata); |
1705 | 1699 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index fc1d82465b3c..245dce969b31 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -222,6 +222,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
222 | switch (vht_oper->chan_width) { | 222 | switch (vht_oper->chan_width) { |
223 | case IEEE80211_VHT_CHANWIDTH_USE_HT: | 223 | case IEEE80211_VHT_CHANWIDTH_USE_HT: |
224 | vht_chandef.width = chandef->width; | 224 | vht_chandef.width = chandef->width; |
225 | vht_chandef.center_freq1 = chandef->center_freq1; | ||
225 | break; | 226 | break; |
226 | case IEEE80211_VHT_CHANWIDTH_80MHZ: | 227 | case IEEE80211_VHT_CHANWIDTH_80MHZ: |
227 | vht_chandef.width = NL80211_CHAN_WIDTH_80; | 228 | vht_chandef.width = NL80211_CHAN_WIDTH_80; |
@@ -271,6 +272,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
271 | ret = 0; | 272 | ret = 0; |
272 | 273 | ||
273 | out: | 274 | out: |
275 | /* | ||
276 | * When tracking the current AP, don't do any further checks if the | ||
277 | * new chandef is identical to the one we're currently using for the | ||
278 | * connection. This keeps us from playing ping-pong with regulatory, | ||
279 | * without it the following can happen (for example): | ||
280 | * - connect to an AP with 80 MHz, world regdom allows 80 MHz | ||
281 | * - AP advertises regdom US | ||
282 | * - CRDA loads regdom US with 80 MHz prohibited (old database) | ||
283 | * - the code below detects an unsupported channel, downgrades, and | ||
284 | * we disconnect from the AP in the caller | ||
285 | * - disconnect causes CRDA to reload world regdomain and the game | ||
286 | * starts anew. | ||
287 | * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) | ||
288 | * | ||
289 | * It seems possible that there are still scenarios with CSA or real | ||
290 | * bandwidth changes where a this could happen, but those cases are | ||
291 | * less common and wouldn't completely prevent using the AP. | ||
292 | */ | ||
293 | if (tracking && | ||
294 | cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) | ||
295 | return ret; | ||
296 | |||
274 | /* don't print the message below for VHT mismatch if VHT is disabled */ | 297 | /* don't print the message below for VHT mismatch if VHT is disabled */ |
275 | if (ret & IEEE80211_STA_DISABLE_VHT) | 298 | if (ret & IEEE80211_STA_DISABLE_VHT) |
276 | vht_chandef = *chandef; | 299 | vht_chandef = *chandef; |
@@ -3753,6 +3776,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
3753 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | 3776 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
3754 | if (WARN_ON(!chanctx_conf)) { | 3777 | if (WARN_ON(!chanctx_conf)) { |
3755 | rcu_read_unlock(); | 3778 | rcu_read_unlock(); |
3779 | sta_info_free(local, new_sta); | ||
3756 | return -EINVAL; | 3780 | return -EINVAL; |
3757 | } | 3781 | } |
3758 | rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); | 3782 | rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c24ca0d0f469..3e57f96c9666 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1128,6 +1128,13 @@ static void sta_ps_end(struct sta_info *sta) | |||
1128 | sta->sta.addr, sta->sta.aid); | 1128 | sta->sta.addr, sta->sta.aid); |
1129 | 1129 | ||
1130 | if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { | 1130 | if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { |
1131 | /* | ||
1132 | * Clear the flag only if the other one is still set | ||
1133 | * so that the TX path won't start TX'ing new frames | ||
1134 | * directly ... In the case that the driver flag isn't | ||
1135 | * set ieee80211_sta_ps_deliver_wakeup() will clear it. | ||
1136 | */ | ||
1137 | clear_sta_flag(sta, WLAN_STA_PS_STA); | ||
1131 | ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", | 1138 | ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", |
1132 | sta->sta.addr, sta->sta.aid); | 1139 | sta->sta.addr, sta->sta.aid); |
1133 | return; | 1140 | return; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index decd30c1e290..a023b432143b 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -91,7 +91,7 @@ static int sta_info_hash_del(struct ieee80211_local *local, | |||
91 | return -ENOENT; | 91 | return -ENOENT; |
92 | } | 92 | } |
93 | 93 | ||
94 | static void cleanup_single_sta(struct sta_info *sta) | 94 | static void __cleanup_single_sta(struct sta_info *sta) |
95 | { | 95 | { |
96 | int ac, i; | 96 | int ac, i; |
97 | struct tid_ampdu_tx *tid_tx; | 97 | struct tid_ampdu_tx *tid_tx; |
@@ -99,7 +99,8 @@ static void cleanup_single_sta(struct sta_info *sta) | |||
99 | struct ieee80211_local *local = sdata->local; | 99 | struct ieee80211_local *local = sdata->local; |
100 | struct ps_data *ps; | 100 | struct ps_data *ps; |
101 | 101 | ||
102 | if (test_sta_flag(sta, WLAN_STA_PS_STA)) { | 102 | if (test_sta_flag(sta, WLAN_STA_PS_STA) || |
103 | test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { | ||
103 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP || | 104 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP || |
104 | sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 105 | sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
105 | ps = &sdata->bss->ps; | 106 | ps = &sdata->bss->ps; |
@@ -109,6 +110,7 @@ static void cleanup_single_sta(struct sta_info *sta) | |||
109 | return; | 110 | return; |
110 | 111 | ||
111 | clear_sta_flag(sta, WLAN_STA_PS_STA); | 112 | clear_sta_flag(sta, WLAN_STA_PS_STA); |
113 | clear_sta_flag(sta, WLAN_STA_PS_DRIVER); | ||
112 | 114 | ||
113 | atomic_dec(&ps->num_sta_ps); | 115 | atomic_dec(&ps->num_sta_ps); |
114 | sta_info_recalc_tim(sta); | 116 | sta_info_recalc_tim(sta); |
@@ -139,7 +141,14 @@ static void cleanup_single_sta(struct sta_info *sta) | |||
139 | ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); | 141 | ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); |
140 | kfree(tid_tx); | 142 | kfree(tid_tx); |
141 | } | 143 | } |
144 | } | ||
142 | 145 | ||
146 | static void cleanup_single_sta(struct sta_info *sta) | ||
147 | { | ||
148 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
149 | struct ieee80211_local *local = sdata->local; | ||
150 | |||
151 | __cleanup_single_sta(sta); | ||
143 | sta_info_free(local, sta); | 152 | sta_info_free(local, sta); |
144 | } | 153 | } |
145 | 154 | ||
@@ -330,6 +339,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
330 | rcu_read_unlock(); | 339 | rcu_read_unlock(); |
331 | 340 | ||
332 | spin_lock_init(&sta->lock); | 341 | spin_lock_init(&sta->lock); |
342 | spin_lock_init(&sta->ps_lock); | ||
333 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); | 343 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); |
334 | INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); | 344 | INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); |
335 | mutex_init(&sta->ampdu_mlme.mtx); | 345 | mutex_init(&sta->ampdu_mlme.mtx); |
@@ -487,21 +497,26 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) | |||
487 | goto out_err; | 497 | goto out_err; |
488 | } | 498 | } |
489 | 499 | ||
490 | /* notify driver */ | ||
491 | err = sta_info_insert_drv_state(local, sdata, sta); | ||
492 | if (err) | ||
493 | goto out_err; | ||
494 | |||
495 | local->num_sta++; | 500 | local->num_sta++; |
496 | local->sta_generation++; | 501 | local->sta_generation++; |
497 | smp_mb(); | 502 | smp_mb(); |
498 | 503 | ||
504 | /* simplify things and don't accept BA sessions yet */ | ||
505 | set_sta_flag(sta, WLAN_STA_BLOCK_BA); | ||
506 | |||
499 | /* make the station visible */ | 507 | /* make the station visible */ |
500 | sta_info_hash_add(local, sta); | 508 | sta_info_hash_add(local, sta); |
501 | 509 | ||
502 | list_add_rcu(&sta->list, &local->sta_list); | 510 | list_add_rcu(&sta->list, &local->sta_list); |
503 | 511 | ||
512 | /* notify driver */ | ||
513 | err = sta_info_insert_drv_state(local, sdata, sta); | ||
514 | if (err) | ||
515 | goto out_remove; | ||
516 | |||
504 | set_sta_flag(sta, WLAN_STA_INSERTED); | 517 | set_sta_flag(sta, WLAN_STA_INSERTED); |
518 | /* accept BA sessions now */ | ||
519 | clear_sta_flag(sta, WLAN_STA_BLOCK_BA); | ||
505 | 520 | ||
506 | ieee80211_recalc_min_chandef(sdata); | 521 | ieee80211_recalc_min_chandef(sdata); |
507 | ieee80211_sta_debugfs_add(sta); | 522 | ieee80211_sta_debugfs_add(sta); |
@@ -522,6 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) | |||
522 | mesh_accept_plinks_update(sdata); | 537 | mesh_accept_plinks_update(sdata); |
523 | 538 | ||
524 | return 0; | 539 | return 0; |
540 | out_remove: | ||
541 | sta_info_hash_del(local, sta); | ||
542 | list_del_rcu(&sta->list); | ||
543 | local->num_sta--; | ||
544 | synchronize_net(); | ||
545 | __cleanup_single_sta(sta); | ||
525 | out_err: | 546 | out_err: |
526 | mutex_unlock(&local->sta_mtx); | 547 | mutex_unlock(&local->sta_mtx); |
527 | rcu_read_lock(); | 548 | rcu_read_lock(); |
@@ -1071,10 +1092,14 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, | |||
1071 | } | 1092 | } |
1072 | EXPORT_SYMBOL(ieee80211_find_sta); | 1093 | EXPORT_SYMBOL(ieee80211_find_sta); |
1073 | 1094 | ||
1074 | static void clear_sta_ps_flags(void *_sta) | 1095 | /* powersave support code */ |
1096 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | ||
1075 | { | 1097 | { |
1076 | struct sta_info *sta = _sta; | ||
1077 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 1098 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
1099 | struct ieee80211_local *local = sdata->local; | ||
1100 | struct sk_buff_head pending; | ||
1101 | int filtered = 0, buffered = 0, ac; | ||
1102 | unsigned long flags; | ||
1078 | struct ps_data *ps; | 1103 | struct ps_data *ps; |
1079 | 1104 | ||
1080 | if (sdata->vif.type == NL80211_IFTYPE_AP || | 1105 | if (sdata->vif.type == NL80211_IFTYPE_AP || |
@@ -1085,20 +1110,6 @@ static void clear_sta_ps_flags(void *_sta) | |||
1085 | else | 1110 | else |
1086 | return; | 1111 | return; |
1087 | 1112 | ||
1088 | clear_sta_flag(sta, WLAN_STA_PS_DRIVER); | ||
1089 | if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) | ||
1090 | atomic_dec(&ps->num_sta_ps); | ||
1091 | } | ||
1092 | |||
1093 | /* powersave support code */ | ||
1094 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | ||
1095 | { | ||
1096 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
1097 | struct ieee80211_local *local = sdata->local; | ||
1098 | struct sk_buff_head pending; | ||
1099 | int filtered = 0, buffered = 0, ac; | ||
1100 | unsigned long flags; | ||
1101 | |||
1102 | clear_sta_flag(sta, WLAN_STA_SP); | 1113 | clear_sta_flag(sta, WLAN_STA_SP); |
1103 | 1114 | ||
1104 | BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); | 1115 | BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); |
@@ -1109,6 +1120,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
1109 | 1120 | ||
1110 | skb_queue_head_init(&pending); | 1121 | skb_queue_head_init(&pending); |
1111 | 1122 | ||
1123 | /* sync with ieee80211_tx_h_unicast_ps_buf */ | ||
1124 | spin_lock(&sta->ps_lock); | ||
1112 | /* Send all buffered frames to the station */ | 1125 | /* Send all buffered frames to the station */ |
1113 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | 1126 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { |
1114 | int count = skb_queue_len(&pending), tmp; | 1127 | int count = skb_queue_len(&pending), tmp; |
@@ -1127,7 +1140,12 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
1127 | buffered += tmp - count; | 1140 | buffered += tmp - count; |
1128 | } | 1141 | } |
1129 | 1142 | ||
1130 | ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta); | 1143 | ieee80211_add_pending_skbs(local, &pending); |
1144 | clear_sta_flag(sta, WLAN_STA_PS_DRIVER); | ||
1145 | clear_sta_flag(sta, WLAN_STA_PS_STA); | ||
1146 | spin_unlock(&sta->ps_lock); | ||
1147 | |||
1148 | atomic_dec(&ps->num_sta_ps); | ||
1131 | 1149 | ||
1132 | /* This station just woke up and isn't aware of our SMPS state */ | 1150 | /* This station just woke up and isn't aware of our SMPS state */ |
1133 | if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, | 1151 | if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index d77ff7090630..d3a6d8208f2f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -267,6 +267,7 @@ struct ieee80211_tx_latency_stat { | |||
267 | * @drv_unblock_wk: used for driver PS unblocking | 267 | * @drv_unblock_wk: used for driver PS unblocking |
268 | * @listen_interval: listen interval of this station, when we're acting as AP | 268 | * @listen_interval: listen interval of this station, when we're acting as AP |
269 | * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly | 269 | * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly |
270 | * @ps_lock: used for powersave (when mac80211 is the AP) related locking | ||
270 | * @ps_tx_buf: buffers (per AC) of frames to transmit to this station | 271 | * @ps_tx_buf: buffers (per AC) of frames to transmit to this station |
271 | * when it leaves power saving state or polls | 272 | * when it leaves power saving state or polls |
272 | * @tx_filtered: buffers (per AC) of frames we already tried to | 273 | * @tx_filtered: buffers (per AC) of frames we already tried to |
@@ -356,10 +357,8 @@ struct sta_info { | |||
356 | /* use the accessors defined below */ | 357 | /* use the accessors defined below */ |
357 | unsigned long _flags; | 358 | unsigned long _flags; |
358 | 359 | ||
359 | /* | 360 | /* STA powersave lock and frame queues */ |
360 | * STA powersave frame queues, no more than the internal | 361 | spinlock_t ps_lock; |
361 | * locking required. | ||
362 | */ | ||
363 | struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; | 362 | struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; |
364 | struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; | 363 | struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; |
365 | unsigned long driver_buffered_tids; | 364 | unsigned long driver_buffered_tids; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 97a02d3f7d87..4080c615636f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -478,6 +478,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
478 | sta->sta.addr, sta->sta.aid, ac); | 478 | sta->sta.addr, sta->sta.aid, ac); |
479 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 479 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
480 | purge_old_ps_buffers(tx->local); | 480 | purge_old_ps_buffers(tx->local); |
481 | |||
482 | /* sync with ieee80211_sta_ps_deliver_wakeup */ | ||
483 | spin_lock(&sta->ps_lock); | ||
484 | /* | ||
485 | * STA woke up the meantime and all the frames on ps_tx_buf have | ||
486 | * been queued to pending queue. No reordering can happen, go | ||
487 | * ahead and Tx the packet. | ||
488 | */ | ||
489 | if (!test_sta_flag(sta, WLAN_STA_PS_STA) && | ||
490 | !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { | ||
491 | spin_unlock(&sta->ps_lock); | ||
492 | return TX_CONTINUE; | ||
493 | } | ||
494 | |||
481 | if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { | 495 | if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { |
482 | struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); | 496 | struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); |
483 | ps_dbg(tx->sdata, | 497 | ps_dbg(tx->sdata, |
@@ -492,6 +506,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
492 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 506 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
493 | info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; | 507 | info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; |
494 | skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); | 508 | skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); |
509 | spin_unlock(&sta->ps_lock); | ||
495 | 510 | ||
496 | if (!timer_pending(&local->sta_cleanup)) | 511 | if (!timer_pending(&local->sta_cleanup)) |
497 | mod_timer(&local->sta_cleanup, | 512 | mod_timer(&local->sta_cleanup, |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 676dc0967f37..b8700d417a9c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -435,9 +435,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, | |||
435 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 435 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
436 | } | 436 | } |
437 | 437 | ||
438 | void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, | 438 | void ieee80211_add_pending_skbs(struct ieee80211_local *local, |
439 | struct sk_buff_head *skbs, | 439 | struct sk_buff_head *skbs) |
440 | void (*fn)(void *data), void *data) | ||
441 | { | 440 | { |
442 | struct ieee80211_hw *hw = &local->hw; | 441 | struct ieee80211_hw *hw = &local->hw; |
443 | struct sk_buff *skb; | 442 | struct sk_buff *skb; |
@@ -461,9 +460,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, | |||
461 | __skb_queue_tail(&local->pending[queue], skb); | 460 | __skb_queue_tail(&local->pending[queue], skb); |
462 | } | 461 | } |
463 | 462 | ||
464 | if (fn) | ||
465 | fn(data); | ||
466 | |||
467 | for (i = 0; i < hw->queues; i++) | 463 | for (i = 0; i < hw->queues; i++) |
468 | __ieee80211_wake_queue(hw, i, | 464 | __ieee80211_wake_queue(hw, i, |
469 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | 465 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); |
@@ -1741,6 +1737,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1741 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | 1737 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); |
1742 | 1738 | ||
1743 | /* | 1739 | /* |
1740 | * Reconfigure sched scan if it was interrupted by FW restart or | ||
1741 | * suspend. | ||
1742 | */ | ||
1743 | mutex_lock(&local->mtx); | ||
1744 | sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, | ||
1745 | lockdep_is_held(&local->mtx)); | ||
1746 | if (sched_scan_sdata && local->sched_scan_req) | ||
1747 | /* | ||
1748 | * Sched scan stopped, but we don't want to report it. Instead, | ||
1749 | * we're trying to reschedule. | ||
1750 | */ | ||
1751 | if (__ieee80211_request_sched_scan_start(sched_scan_sdata, | ||
1752 | local->sched_scan_req)) | ||
1753 | sched_scan_stopped = true; | ||
1754 | mutex_unlock(&local->mtx); | ||
1755 | |||
1756 | if (sched_scan_stopped) | ||
1757 | cfg80211_sched_scan_stopped(local->hw.wiphy); | ||
1758 | |||
1759 | /* | ||
1744 | * If this is for hw restart things are still running. | 1760 | * If this is for hw restart things are still running. |
1745 | * We may want to change that later, however. | 1761 | * We may want to change that later, however. |
1746 | */ | 1762 | */ |
@@ -1768,26 +1784,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1768 | WARN_ON(1); | 1784 | WARN_ON(1); |
1769 | #endif | 1785 | #endif |
1770 | 1786 | ||
1771 | /* | ||
1772 | * Reconfigure sched scan if it was interrupted by FW restart or | ||
1773 | * suspend. | ||
1774 | */ | ||
1775 | mutex_lock(&local->mtx); | ||
1776 | sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, | ||
1777 | lockdep_is_held(&local->mtx)); | ||
1778 | if (sched_scan_sdata && local->sched_scan_req) | ||
1779 | /* | ||
1780 | * Sched scan stopped, but we don't want to report it. Instead, | ||
1781 | * we're trying to reschedule. | ||
1782 | */ | ||
1783 | if (__ieee80211_request_sched_scan_start(sched_scan_sdata, | ||
1784 | local->sched_scan_req)) | ||
1785 | sched_scan_stopped = true; | ||
1786 | mutex_unlock(&local->mtx); | ||
1787 | |||
1788 | if (sched_scan_stopped) | ||
1789 | cfg80211_sched_scan_stopped(local->hw.wiphy); | ||
1790 | |||
1791 | return 0; | 1787 | return 0; |
1792 | } | 1788 | } |
1793 | 1789 | ||
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 21211c60ca98..d51422c778de 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -154,6 +154,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | |||
154 | return IEEE80211_AC_BE; | 154 | return IEEE80211_AC_BE; |
155 | } | 155 | } |
156 | 156 | ||
157 | if (skb->protocol == sdata->control_port_protocol) { | ||
158 | skb->priority = 7; | ||
159 | return ieee80211_downgrade_queue(sdata, skb); | ||
160 | } | ||
161 | |||
157 | /* use the data classifier to determine what 802.1d tag the | 162 | /* use the data classifier to determine what 802.1d tag the |
158 | * data frame has */ | 163 | * data frame has */ |
159 | rcu_read_lock(); | 164 | rcu_read_lock(); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index bb322d0beb48..b9f0e0374322 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1310,27 +1310,22 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) | |||
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | static int | 1312 | static int |
1313 | ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) | 1313 | ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) |
1314 | { | 1314 | { |
1315 | #ifdef CONFIG_NF_NAT_NEEDED | 1315 | #ifdef CONFIG_NF_NAT_NEEDED |
1316 | int ret; | 1316 | int ret; |
1317 | 1317 | ||
1318 | if (cda[CTA_NAT_DST]) { | 1318 | ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, |
1319 | ret = ctnetlink_parse_nat_setup(ct, | 1319 | cda[CTA_NAT_DST]); |
1320 | NF_NAT_MANIP_DST, | 1320 | if (ret < 0) |
1321 | cda[CTA_NAT_DST]); | 1321 | return ret; |
1322 | if (ret < 0) | 1322 | |
1323 | return ret; | 1323 | ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, |
1324 | } | 1324 | cda[CTA_NAT_SRC]); |
1325 | if (cda[CTA_NAT_SRC]) { | 1325 | return ret; |
1326 | ret = ctnetlink_parse_nat_setup(ct, | ||
1327 | NF_NAT_MANIP_SRC, | ||
1328 | cda[CTA_NAT_SRC]); | ||
1329 | if (ret < 0) | ||
1330 | return ret; | ||
1331 | } | ||
1332 | return 0; | ||
1333 | #else | 1326 | #else |
1327 | if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) | ||
1328 | return 0; | ||
1334 | return -EOPNOTSUPP; | 1329 | return -EOPNOTSUPP; |
1335 | #endif | 1330 | #endif |
1336 | } | 1331 | } |
@@ -1659,11 +1654,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
1659 | goto err2; | 1654 | goto err2; |
1660 | } | 1655 | } |
1661 | 1656 | ||
1662 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { | 1657 | err = ctnetlink_setup_nat(ct, cda); |
1663 | err = ctnetlink_change_nat(ct, cda); | 1658 | if (err < 0) |
1664 | if (err < 0) | 1659 | goto err2; |
1665 | goto err2; | ||
1666 | } | ||
1667 | 1660 | ||
1668 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | 1661 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); |
1669 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); | 1662 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index d3f5cd6dd962..52ca952b802c 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -432,15 +432,15 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
432 | } | 432 | } |
433 | EXPORT_SYMBOL(nf_nat_setup_info); | 433 | EXPORT_SYMBOL(nf_nat_setup_info); |
434 | 434 | ||
435 | unsigned int | 435 | static unsigned int |
436 | nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | 436 | __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) |
437 | { | 437 | { |
438 | /* Force range to this IP; let proto decide mapping for | 438 | /* Force range to this IP; let proto decide mapping for |
439 | * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). | 439 | * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). |
440 | * Use reply in case it's already been mangled (eg local packet). | 440 | * Use reply in case it's already been mangled (eg local packet). |
441 | */ | 441 | */ |
442 | union nf_inet_addr ip = | 442 | union nf_inet_addr ip = |
443 | (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? | 443 | (manip == NF_NAT_MANIP_SRC ? |
444 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : | 444 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : |
445 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); | 445 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); |
446 | struct nf_nat_range range = { | 446 | struct nf_nat_range range = { |
@@ -448,7 +448,13 @@ nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | |||
448 | .min_addr = ip, | 448 | .min_addr = ip, |
449 | .max_addr = ip, | 449 | .max_addr = ip, |
450 | }; | 450 | }; |
451 | return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); | 451 | return nf_nat_setup_info(ct, &range, manip); |
452 | } | ||
453 | |||
454 | unsigned int | ||
455 | nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | ||
456 | { | ||
457 | return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); | ||
452 | } | 458 | } |
453 | EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); | 459 | EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); |
454 | 460 | ||
@@ -702,9 +708,9 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { | |||
702 | 708 | ||
703 | static int | 709 | static int |
704 | nfnetlink_parse_nat(const struct nlattr *nat, | 710 | nfnetlink_parse_nat(const struct nlattr *nat, |
705 | const struct nf_conn *ct, struct nf_nat_range *range) | 711 | const struct nf_conn *ct, struct nf_nat_range *range, |
712 | const struct nf_nat_l3proto *l3proto) | ||
706 | { | 713 | { |
707 | const struct nf_nat_l3proto *l3proto; | ||
708 | struct nlattr *tb[CTA_NAT_MAX+1]; | 714 | struct nlattr *tb[CTA_NAT_MAX+1]; |
709 | int err; | 715 | int err; |
710 | 716 | ||
@@ -714,38 +720,46 @@ nfnetlink_parse_nat(const struct nlattr *nat, | |||
714 | if (err < 0) | 720 | if (err < 0) |
715 | return err; | 721 | return err; |
716 | 722 | ||
717 | rcu_read_lock(); | ||
718 | l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); | ||
719 | if (l3proto == NULL) { | ||
720 | err = -EAGAIN; | ||
721 | goto out; | ||
722 | } | ||
723 | err = l3proto->nlattr_to_range(tb, range); | 723 | err = l3proto->nlattr_to_range(tb, range); |
724 | if (err < 0) | 724 | if (err < 0) |
725 | goto out; | 725 | return err; |
726 | 726 | ||
727 | if (!tb[CTA_NAT_PROTO]) | 727 | if (!tb[CTA_NAT_PROTO]) |
728 | goto out; | 728 | return 0; |
729 | 729 | ||
730 | err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); | 730 | return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); |
731 | out: | ||
732 | rcu_read_unlock(); | ||
733 | return err; | ||
734 | } | 731 | } |
735 | 732 | ||
733 | /* This function is called under rcu_read_lock() */ | ||
736 | static int | 734 | static int |
737 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | 735 | nfnetlink_parse_nat_setup(struct nf_conn *ct, |
738 | enum nf_nat_manip_type manip, | 736 | enum nf_nat_manip_type manip, |
739 | const struct nlattr *attr) | 737 | const struct nlattr *attr) |
740 | { | 738 | { |
741 | struct nf_nat_range range; | 739 | struct nf_nat_range range; |
740 | const struct nf_nat_l3proto *l3proto; | ||
742 | int err; | 741 | int err; |
743 | 742 | ||
744 | err = nfnetlink_parse_nat(attr, ct, &range); | 743 | /* Should not happen, restricted to creating new conntracks |
744 | * via ctnetlink. | ||
745 | */ | ||
746 | if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) | ||
747 | return -EEXIST; | ||
748 | |||
749 | /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to | ||
750 | * attach the null binding, otherwise this may oops. | ||
751 | */ | ||
752 | l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); | ||
753 | if (l3proto == NULL) | ||
754 | return -EAGAIN; | ||
755 | |||
756 | /* No NAT information has been passed, allocate the null-binding */ | ||
757 | if (attr == NULL) | ||
758 | return __nf_nat_alloc_null_binding(ct, manip); | ||
759 | |||
760 | err = nfnetlink_parse_nat(attr, ct, &range, l3proto); | ||
745 | if (err < 0) | 761 | if (err < 0) |
746 | return err; | 762 | return err; |
747 | if (nf_nat_initialized(ct, manip)) | ||
748 | return -EEXIST; | ||
749 | 763 | ||
750 | return nf_nat_setup_info(ct, &range, manip); | 764 | return nf_nat_setup_info(ct, &range, manip); |
751 | } | 765 | } |
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e8254ad2e5a9..425cf39af890 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c | |||
@@ -116,7 +116,7 @@ static void nft_meta_get_eval(const struct nft_expr *expr, | |||
116 | skb->sk->sk_socket->file->f_cred->fsgid); | 116 | skb->sk->sk_socket->file->f_cred->fsgid); |
117 | read_unlock_bh(&skb->sk->sk_callback_lock); | 117 | read_unlock_bh(&skb->sk->sk_callback_lock); |
118 | break; | 118 | break; |
119 | #ifdef CONFIG_NET_CLS_ROUTE | 119 | #ifdef CONFIG_IP_ROUTE_CLASSID |
120 | case NFT_META_RTCLASSID: { | 120 | case NFT_META_RTCLASSID: { |
121 | const struct dst_entry *dst = skb_dst(skb); | 121 | const struct dst_entry *dst = skb_dst(skb); |
122 | 122 | ||
@@ -199,7 +199,7 @@ static int nft_meta_init_validate_get(uint32_t key) | |||
199 | case NFT_META_OIFTYPE: | 199 | case NFT_META_OIFTYPE: |
200 | case NFT_META_SKUID: | 200 | case NFT_META_SKUID: |
201 | case NFT_META_SKGID: | 201 | case NFT_META_SKGID: |
202 | #ifdef CONFIG_NET_CLS_ROUTE | 202 | #ifdef CONFIG_IP_ROUTE_CLASSID |
203 | case NFT_META_RTCLASSID: | 203 | case NFT_META_RTCLASSID: |
204 | #endif | 204 | #endif |
205 | #ifdef CONFIG_NETWORK_SECMARK | 205 | #ifdef CONFIG_NETWORK_SECMARK |
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index a2aeb318678f..85daa84bfdfe 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c | |||
@@ -135,7 +135,8 @@ nft_payload_select_ops(const struct nft_ctx *ctx, | |||
135 | if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data)) | 135 | if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data)) |
136 | return ERR_PTR(-EINVAL); | 136 | return ERR_PTR(-EINVAL); |
137 | 137 | ||
138 | if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER) | 138 | if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && |
139 | base != NFT_PAYLOAD_LL_HEADER) | ||
139 | return &nft_payload_fast_ops; | 140 | return &nft_payload_fast_ops; |
140 | else | 141 | else |
141 | return &nft_payload_ops; | 142 | return &nft_payload_ops; |
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 8a310f239c93..b718a52a4654 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c | |||
@@ -21,9 +21,9 @@ static void nft_reject_inet_eval(const struct nft_expr *expr, | |||
21 | { | 21 | { |
22 | switch (pkt->ops->pf) { | 22 | switch (pkt->ops->pf) { |
23 | case NFPROTO_IPV4: | 23 | case NFPROTO_IPV4: |
24 | nft_reject_ipv4_eval(expr, data, pkt); | 24 | return nft_reject_ipv4_eval(expr, data, pkt); |
25 | case NFPROTO_IPV6: | 25 | case NFPROTO_IPV6: |
26 | nft_reject_ipv6_eval(expr, data, pkt); | 26 | return nft_reject_ipv6_eval(expr, data, pkt); |
27 | } | 27 | } |
28 | } | 28 | } |
29 | 29 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index fdf51353cf78..04748ab649c2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1489,8 +1489,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |||
1489 | if (addr->sa_family != AF_NETLINK) | 1489 | if (addr->sa_family != AF_NETLINK) |
1490 | return -EINVAL; | 1490 | return -EINVAL; |
1491 | 1491 | ||
1492 | /* Only superuser is allowed to send multicasts */ | 1492 | if ((nladdr->nl_groups || nladdr->nl_pid) && |
1493 | if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) | 1493 | !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) |
1494 | return -EPERM; | 1494 | return -EPERM; |
1495 | 1495 | ||
1496 | if (!nlk->portid) | 1496 | if (!nlk->portid) |
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 46bda010bf11..56db888b1cd5 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c | |||
@@ -301,7 +301,7 @@ static int nci_open_device(struct nci_dev *ndev) | |||
301 | rc = __nci_request(ndev, nci_reset_req, 0, | 301 | rc = __nci_request(ndev, nci_reset_req, 0, |
302 | msecs_to_jiffies(NCI_RESET_TIMEOUT)); | 302 | msecs_to_jiffies(NCI_RESET_TIMEOUT)); |
303 | 303 | ||
304 | if (ndev->ops->setup(ndev)) | 304 | if (ndev->ops->setup) |
305 | ndev->ops->setup(ndev); | 305 | ndev->ops->setup(ndev); |
306 | 306 | ||
307 | if (!rc) { | 307 | if (!rc) { |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 1cb413fead89..4f505a006896 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -334,18 +334,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
334 | qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, | 334 | qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, |
335 | tb[TCA_TBF_PTAB])); | 335 | tb[TCA_TBF_PTAB])); |
336 | 336 | ||
337 | if (q->qdisc != &noop_qdisc) { | ||
338 | err = fifo_set_limit(q->qdisc, qopt->limit); | ||
339 | if (err) | ||
340 | goto done; | ||
341 | } else if (qopt->limit > 0) { | ||
342 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); | ||
343 | if (IS_ERR(child)) { | ||
344 | err = PTR_ERR(child); | ||
345 | goto done; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); | 337 | buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); |
350 | mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); | 338 | mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); |
351 | 339 | ||
@@ -390,6 +378,18 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
390 | goto done; | 378 | goto done; |
391 | } | 379 | } |
392 | 380 | ||
381 | if (q->qdisc != &noop_qdisc) { | ||
382 | err = fifo_set_limit(q->qdisc, qopt->limit); | ||
383 | if (err) | ||
384 | goto done; | ||
385 | } else if (qopt->limit > 0) { | ||
386 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); | ||
387 | if (IS_ERR(child)) { | ||
388 | err = PTR_ERR(child); | ||
389 | goto done; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | sch_tree_lock(sch); | 393 | sch_tree_lock(sch); |
394 | if (child) { | 394 | if (child) { |
395 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); | 395 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f558433537b8..ee13d28d39d1 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1239,78 +1239,107 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | /* Update the retran path for sending a retransmitted packet. | 1241 | /* Update the retran path for sending a retransmitted packet. |
1242 | * Round-robin through the active transports, else round-robin | 1242 | * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: |
1243 | * through the inactive transports as this is the next best thing | 1243 | * |
1244 | * we can try. | 1244 | * When there is outbound data to send and the primary path |
1245 | * becomes inactive (e.g., due to failures), or where the | ||
1246 | * SCTP user explicitly requests to send data to an | ||
1247 | * inactive destination transport address, before reporting | ||
1248 | * an error to its ULP, the SCTP endpoint should try to send | ||
1249 | * the data to an alternate active destination transport | ||
1250 | * address if one exists. | ||
1251 | * | ||
1252 | * When retransmitting data that timed out, if the endpoint | ||
1253 | * is multihomed, it should consider each source-destination | ||
1254 | * address pair in its retransmission selection policy. | ||
1255 | * When retransmitting timed-out data, the endpoint should | ||
1256 | * attempt to pick the most divergent source-destination | ||
1257 | * pair from the original source-destination pair to which | ||
1258 | * the packet was transmitted. | ||
1259 | * | ||
1260 | * Note: Rules for picking the most divergent source-destination | ||
1261 | * pair are an implementation decision and are not specified | ||
1262 | * within this document. | ||
1263 | * | ||
1264 | * Our basic strategy is to round-robin transports in priorities | ||
1265 | * according to sctp_state_prio_map[] e.g., if no such | ||
1266 | * transport with state SCTP_ACTIVE exists, round-robin through | ||
1267 | * SCTP_UNKNOWN, etc. You get the picture. | ||
1245 | */ | 1268 | */ |
1246 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) | 1269 | static const u8 sctp_trans_state_to_prio_map[] = { |
1270 | [SCTP_ACTIVE] = 3, /* best case */ | ||
1271 | [SCTP_UNKNOWN] = 2, | ||
1272 | [SCTP_PF] = 1, | ||
1273 | [SCTP_INACTIVE] = 0, /* worst case */ | ||
1274 | }; | ||
1275 | |||
1276 | static u8 sctp_trans_score(const struct sctp_transport *trans) | ||
1247 | { | 1277 | { |
1248 | struct sctp_transport *t, *next; | 1278 | return sctp_trans_state_to_prio_map[trans->state]; |
1249 | struct list_head *head = &asoc->peer.transport_addr_list; | 1279 | } |
1250 | struct list_head *pos; | ||
1251 | 1280 | ||
1252 | if (asoc->peer.transport_count == 1) | 1281 | static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, |
1253 | return; | 1282 | struct sctp_transport *best) |
1283 | { | ||
1284 | if (best == NULL) | ||
1285 | return curr; | ||
1254 | 1286 | ||
1255 | /* Find the next transport in a round-robin fashion. */ | 1287 | return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; |
1256 | t = asoc->peer.retran_path; | 1288 | } |
1257 | pos = &t->transports; | ||
1258 | next = NULL; | ||
1259 | 1289 | ||
1260 | while (1) { | 1290 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) |
1261 | /* Skip the head. */ | 1291 | { |
1262 | if (pos->next == head) | 1292 | struct sctp_transport *trans = asoc->peer.retran_path; |
1263 | pos = head->next; | 1293 | struct sctp_transport *trans_next = NULL; |
1264 | else | ||
1265 | pos = pos->next; | ||
1266 | 1294 | ||
1267 | t = list_entry(pos, struct sctp_transport, transports); | 1295 | /* We're done as we only have the one and only path. */ |
1296 | if (asoc->peer.transport_count == 1) | ||
1297 | return; | ||
1298 | /* If active_path and retran_path are the same and active, | ||
1299 | * then this is the only active path. Use it. | ||
1300 | */ | ||
1301 | if (asoc->peer.active_path == asoc->peer.retran_path && | ||
1302 | asoc->peer.active_path->state == SCTP_ACTIVE) | ||
1303 | return; | ||
1268 | 1304 | ||
1269 | /* We have exhausted the list, but didn't find any | 1305 | /* Iterate from retran_path's successor back to retran_path. */ |
1270 | * other active transports. If so, use the next | 1306 | for (trans = list_next_entry(trans, transports); 1; |
1271 | * transport. | 1307 | trans = list_next_entry(trans, transports)) { |
1272 | */ | 1308 | /* Manually skip the head element. */ |
1273 | if (t == asoc->peer.retran_path) { | 1309 | if (&trans->transports == &asoc->peer.transport_addr_list) |
1274 | t = next; | 1310 | continue; |
1311 | if (trans->state == SCTP_UNCONFIRMED) | ||
1312 | continue; | ||
1313 | trans_next = sctp_trans_elect_best(trans, trans_next); | ||
1314 | /* Active is good enough for immediate return. */ | ||
1315 | if (trans_next->state == SCTP_ACTIVE) | ||
1275 | break; | 1316 | break; |
1276 | } | 1317 | /* We've reached the end, time to update path. */ |
1277 | 1318 | if (trans == asoc->peer.retran_path) | |
1278 | /* Try to find an active transport. */ | ||
1279 | |||
1280 | if ((t->state == SCTP_ACTIVE) || | ||
1281 | (t->state == SCTP_UNKNOWN)) { | ||
1282 | break; | 1319 | break; |
1283 | } else { | ||
1284 | /* Keep track of the next transport in case | ||
1285 | * we don't find any active transport. | ||
1286 | */ | ||
1287 | if (t->state != SCTP_UNCONFIRMED && !next) | ||
1288 | next = t; | ||
1289 | } | ||
1290 | } | 1320 | } |
1291 | 1321 | ||
1292 | if (t) | 1322 | if (trans_next != NULL) |
1293 | asoc->peer.retran_path = t; | 1323 | asoc->peer.retran_path = trans_next; |
1294 | else | ||
1295 | t = asoc->peer.retran_path; | ||
1296 | 1324 | ||
1297 | pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, | 1325 | pr_debug("%s: association:%p updated new path to addr:%pISpc\n", |
1298 | &t->ipaddr.sa); | 1326 | __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); |
1299 | } | 1327 | } |
1300 | 1328 | ||
1301 | /* Choose the transport for sending retransmit packet. */ | 1329 | struct sctp_transport * |
1302 | struct sctp_transport *sctp_assoc_choose_alter_transport( | 1330 | sctp_assoc_choose_alter_transport(struct sctp_association *asoc, |
1303 | struct sctp_association *asoc, struct sctp_transport *last_sent_to) | 1331 | struct sctp_transport *last_sent_to) |
1304 | { | 1332 | { |
1305 | /* If this is the first time packet is sent, use the active path, | 1333 | /* If this is the first time packet is sent, use the active path, |
1306 | * else use the retran path. If the last packet was sent over the | 1334 | * else use the retran path. If the last packet was sent over the |
1307 | * retran path, update the retran path and use it. | 1335 | * retran path, update the retran path and use it. |
1308 | */ | 1336 | */ |
1309 | if (!last_sent_to) | 1337 | if (last_sent_to == NULL) { |
1310 | return asoc->peer.active_path; | 1338 | return asoc->peer.active_path; |
1311 | else { | 1339 | } else { |
1312 | if (last_sent_to == asoc->peer.retran_path) | 1340 | if (last_sent_to == asoc->peer.retran_path) |
1313 | sctp_assoc_update_retran_path(asoc); | 1341 | sctp_assoc_update_retran_path(asoc); |
1342 | |||
1314 | return asoc->peer.retran_path; | 1343 | return asoc->peer.retran_path; |
1315 | } | 1344 | } |
1316 | } | 1345 | } |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index bd859154000e..5d6883ff00c3 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -495,11 +495,12 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, | |||
495 | } | 495 | } |
496 | 496 | ||
497 | /* If the transport error count is greater than the pf_retrans | 497 | /* If the transport error count is greater than the pf_retrans |
498 | * threshold, and less than pathmaxrtx, then mark this transport | 498 | * threshold, and less than pathmaxrtx, and if the current state |
499 | * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1, | 499 | * is not SCTP_UNCONFIRMED, then mark this transport as Partially |
500 | * point 1 | 500 | * Failed, see SCTP Quick Failover Draft, section 5.1 |
501 | */ | 501 | */ |
502 | if ((transport->state != SCTP_PF) && | 502 | if ((transport->state != SCTP_PF) && |
503 | (transport->state != SCTP_UNCONFIRMED) && | ||
503 | (asoc->pf_retrans < transport->pathmaxrxt) && | 504 | (asoc->pf_retrans < transport->pathmaxrxt) && |
504 | (transport->error_count > asoc->pf_retrans)) { | 505 | (transport->error_count > asoc->pf_retrans)) { |
505 | 506 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 591b44d3b7de..ae65b6b5973a 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -758,6 +758,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
758 | struct sctp_chunk auth; | 758 | struct sctp_chunk auth; |
759 | sctp_ierror_t ret; | 759 | sctp_ierror_t ret; |
760 | 760 | ||
761 | /* Make sure that we and the peer are AUTH capable */ | ||
762 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { | ||
763 | kfree_skb(chunk->auth_chunk); | ||
764 | sctp_association_free(new_asoc); | ||
765 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | ||
766 | } | ||
767 | |||
761 | /* set-up our fake chunk so that we can process it */ | 768 | /* set-up our fake chunk so that we can process it */ |
762 | auth.skb = chunk->auth_chunk; | 769 | auth.skb = chunk->auth_chunk; |
763 | auth.asoc = chunk->asoc; | 770 | auth.asoc = chunk->asoc; |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index a38c89969c68..574b86193b15 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -610,8 +610,13 @@ static struct notifier_block notifier = { | |||
610 | 610 | ||
611 | int tipc_bearer_setup(void) | 611 | int tipc_bearer_setup(void) |
612 | { | 612 | { |
613 | int err; | ||
614 | |||
615 | err = register_netdevice_notifier(¬ifier); | ||
616 | if (err) | ||
617 | return err; | ||
613 | dev_add_pack(&tipc_packet_type); | 618 | dev_add_pack(&tipc_packet_type); |
614 | return register_netdevice_notifier(¬ifier); | 619 | return 0; |
615 | } | 620 | } |
616 | 621 | ||
617 | void tipc_bearer_cleanup(void) | 622 | void tipc_bearer_cleanup(void) |
diff --git a/net/tipc/config.c b/net/tipc/config.c index c301a9a592d8..e74eef2e7490 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -181,7 +181,7 @@ static struct sk_buff *cfg_set_own_addr(void) | |||
181 | if (tipc_own_addr) | 181 | if (tipc_own_addr) |
182 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 182 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
183 | " (cannot change node address once assigned)"); | 183 | " (cannot change node address once assigned)"); |
184 | tipc_core_start_net(addr); | 184 | tipc_net_start(addr); |
185 | return tipc_cfg_reply_none(); | 185 | return tipc_cfg_reply_none(); |
186 | } | 186 | } |
187 | 187 | ||
diff --git a/net/tipc/core.c b/net/tipc/core.c index f9e88d8b04ca..80c20647b3d2 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -77,37 +77,13 @@ struct sk_buff *tipc_buf_acquire(u32 size) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /** | 79 | /** |
80 | * tipc_core_stop_net - shut down TIPC networking sub-systems | ||
81 | */ | ||
82 | static void tipc_core_stop_net(void) | ||
83 | { | ||
84 | tipc_net_stop(); | ||
85 | tipc_bearer_cleanup(); | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * start_net - start TIPC networking sub-systems | ||
90 | */ | ||
91 | int tipc_core_start_net(unsigned long addr) | ||
92 | { | ||
93 | int res; | ||
94 | |||
95 | tipc_net_start(addr); | ||
96 | res = tipc_bearer_setup(); | ||
97 | if (res < 0) | ||
98 | goto err; | ||
99 | return res; | ||
100 | |||
101 | err: | ||
102 | tipc_core_stop_net(); | ||
103 | return res; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode | 80 | * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode |
108 | */ | 81 | */ |
109 | static void tipc_core_stop(void) | 82 | static void tipc_core_stop(void) |
110 | { | 83 | { |
84 | tipc_handler_stop(); | ||
85 | tipc_net_stop(); | ||
86 | tipc_bearer_cleanup(); | ||
111 | tipc_netlink_stop(); | 87 | tipc_netlink_stop(); |
112 | tipc_cfg_stop(); | 88 | tipc_cfg_stop(); |
113 | tipc_subscr_stop(); | 89 | tipc_subscr_stop(); |
@@ -122,30 +98,65 @@ static void tipc_core_stop(void) | |||
122 | */ | 98 | */ |
123 | static int tipc_core_start(void) | 99 | static int tipc_core_start(void) |
124 | { | 100 | { |
125 | int res; | 101 | int err; |
126 | 102 | ||
127 | get_random_bytes(&tipc_random, sizeof(tipc_random)); | 103 | get_random_bytes(&tipc_random, sizeof(tipc_random)); |
128 | 104 | ||
129 | res = tipc_handler_start(); | 105 | err = tipc_handler_start(); |
130 | if (!res) | 106 | if (err) |
131 | res = tipc_ref_table_init(tipc_max_ports, tipc_random); | 107 | goto out_handler; |
132 | if (!res) | 108 | |
133 | res = tipc_nametbl_init(); | 109 | err = tipc_ref_table_init(tipc_max_ports, tipc_random); |
134 | if (!res) | 110 | if (err) |
135 | res = tipc_netlink_start(); | 111 | goto out_reftbl; |
136 | if (!res) | 112 | |
137 | res = tipc_socket_init(); | 113 | err = tipc_nametbl_init(); |
138 | if (!res) | 114 | if (err) |
139 | res = tipc_register_sysctl(); | 115 | goto out_nametbl; |
140 | if (!res) | 116 | |
141 | res = tipc_subscr_start(); | 117 | err = tipc_netlink_start(); |
142 | if (!res) | 118 | if (err) |
143 | res = tipc_cfg_init(); | 119 | goto out_netlink; |
144 | if (res) { | 120 | |
145 | tipc_handler_stop(); | 121 | err = tipc_socket_init(); |
146 | tipc_core_stop(); | 122 | if (err) |
147 | } | 123 | goto out_socket; |
148 | return res; | 124 | |
125 | err = tipc_register_sysctl(); | ||
126 | if (err) | ||
127 | goto out_sysctl; | ||
128 | |||
129 | err = tipc_subscr_start(); | ||
130 | if (err) | ||
131 | goto out_subscr; | ||
132 | |||
133 | err = tipc_cfg_init(); | ||
134 | if (err) | ||
135 | goto out_cfg; | ||
136 | |||
137 | err = tipc_bearer_setup(); | ||
138 | if (err) | ||
139 | goto out_bearer; | ||
140 | |||
141 | return 0; | ||
142 | out_bearer: | ||
143 | tipc_cfg_stop(); | ||
144 | out_cfg: | ||
145 | tipc_subscr_stop(); | ||
146 | out_subscr: | ||
147 | tipc_unregister_sysctl(); | ||
148 | out_sysctl: | ||
149 | tipc_socket_stop(); | ||
150 | out_socket: | ||
151 | tipc_netlink_stop(); | ||
152 | out_netlink: | ||
153 | tipc_nametbl_stop(); | ||
154 | out_nametbl: | ||
155 | tipc_ref_table_stop(); | ||
156 | out_reftbl: | ||
157 | tipc_handler_stop(); | ||
158 | out_handler: | ||
159 | return err; | ||
149 | } | 160 | } |
150 | 161 | ||
151 | static int __init tipc_init(void) | 162 | static int __init tipc_init(void) |
@@ -174,8 +185,6 @@ static int __init tipc_init(void) | |||
174 | 185 | ||
175 | static void __exit tipc_exit(void) | 186 | static void __exit tipc_exit(void) |
176 | { | 187 | { |
177 | tipc_handler_stop(); | ||
178 | tipc_core_stop_net(); | ||
179 | tipc_core_stop(); | 188 | tipc_core_stop(); |
180 | pr_info("Deactivated\n"); | 189 | pr_info("Deactivated\n"); |
181 | } | 190 | } |
diff --git a/net/tipc/core.h b/net/tipc/core.h index 5569d96b4da3..4dfe137587bb 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -90,7 +90,6 @@ extern int tipc_random __read_mostly; | |||
90 | /* | 90 | /* |
91 | * Routines available to privileged subsystems | 91 | * Routines available to privileged subsystems |
92 | */ | 92 | */ |
93 | int tipc_core_start_net(unsigned long); | ||
94 | int tipc_handler_start(void); | 93 | int tipc_handler_start(void); |
95 | void tipc_handler_stop(void); | 94 | void tipc_handler_stop(void); |
96 | int tipc_netlink_start(void); | 95 | int tipc_netlink_start(void); |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 92a1533af4e0..48302be175ce 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -945,9 +945,6 @@ void tipc_nametbl_stop(void) | |||
945 | { | 945 | { |
946 | u32 i; | 946 | u32 i; |
947 | 947 | ||
948 | if (!table.types) | ||
949 | return; | ||
950 | |||
951 | /* Verify name table is empty, then release it */ | 948 | /* Verify name table is empty, then release it */ |
952 | write_lock_bh(&tipc_nametbl_lock); | 949 | write_lock_bh(&tipc_nametbl_lock); |
953 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 950 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 9f72a6376362..3aaf73de9e2d 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -83,8 +83,6 @@ static struct genl_ops tipc_genl_ops[] = { | |||
83 | }, | 83 | }, |
84 | }; | 84 | }; |
85 | 85 | ||
86 | static int tipc_genl_family_registered; | ||
87 | |||
88 | int tipc_netlink_start(void) | 86 | int tipc_netlink_start(void) |
89 | { | 87 | { |
90 | int res; | 88 | int res; |
@@ -94,16 +92,10 @@ int tipc_netlink_start(void) | |||
94 | pr_err("Failed to register netlink interface\n"); | 92 | pr_err("Failed to register netlink interface\n"); |
95 | return res; | 93 | return res; |
96 | } | 94 | } |
97 | |||
98 | tipc_genl_family_registered = 1; | ||
99 | return 0; | 95 | return 0; |
100 | } | 96 | } |
101 | 97 | ||
102 | void tipc_netlink_stop(void) | 98 | void tipc_netlink_stop(void) |
103 | { | 99 | { |
104 | if (!tipc_genl_family_registered) | ||
105 | return; | ||
106 | |||
107 | genl_unregister_family(&tipc_genl_family); | 100 | genl_unregister_family(&tipc_genl_family); |
108 | tipc_genl_family_registered = 0; | ||
109 | } | 101 | } |
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 2a2a938dc22c..de3d593e2fee 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -126,9 +126,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start) | |||
126 | */ | 126 | */ |
127 | void tipc_ref_table_stop(void) | 127 | void tipc_ref_table_stop(void) |
128 | { | 128 | { |
129 | if (!tipc_ref_table.entries) | ||
130 | return; | ||
131 | |||
132 | vfree(tipc_ref_table.entries); | 129 | vfree(tipc_ref_table.entries); |
133 | tipc_ref_table.entries = NULL; | 130 | tipc_ref_table.entries = NULL; |
134 | } | 131 | } |
diff --git a/net/tipc/server.c b/net/tipc/server.c index b635ca347a87..373979789a73 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
@@ -573,7 +573,6 @@ int tipc_server_start(struct tipc_server *s) | |||
573 | kmem_cache_destroy(s->rcvbuf_cache); | 573 | kmem_cache_destroy(s->rcvbuf_cache); |
574 | return ret; | 574 | return ret; |
575 | } | 575 | } |
576 | s->enabled = 1; | ||
577 | return ret; | 576 | return ret; |
578 | } | 577 | } |
579 | 578 | ||
@@ -583,10 +582,6 @@ void tipc_server_stop(struct tipc_server *s) | |||
583 | int total = 0; | 582 | int total = 0; |
584 | int id; | 583 | int id; |
585 | 584 | ||
586 | if (!s->enabled) | ||
587 | return; | ||
588 | |||
589 | s->enabled = 0; | ||
590 | spin_lock_bh(&s->idr_lock); | 585 | spin_lock_bh(&s->idr_lock); |
591 | for (id = 0; total < s->idr_in_use; id++) { | 586 | for (id = 0; total < s->idr_in_use; id++) { |
592 | con = idr_find(&s->conn_idr, id); | 587 | con = idr_find(&s->conn_idr, id); |
diff --git a/net/tipc/server.h b/net/tipc/server.h index 98b23f20bc0f..be817b0b547e 100644 --- a/net/tipc/server.h +++ b/net/tipc/server.h | |||
@@ -56,7 +56,6 @@ | |||
56 | * @name: server name | 56 | * @name: server name |
57 | * @imp: message importance | 57 | * @imp: message importance |
58 | * @type: socket type | 58 | * @type: socket type |
59 | * @enabled: identify whether server is launched or not | ||
60 | */ | 59 | */ |
61 | struct tipc_server { | 60 | struct tipc_server { |
62 | struct idr conn_idr; | 61 | struct idr conn_idr; |
@@ -74,7 +73,6 @@ struct tipc_server { | |||
74 | const char name[TIPC_SERVER_NAME_LEN]; | 73 | const char name[TIPC_SERVER_NAME_LEN]; |
75 | int imp; | 74 | int imp; |
76 | int type; | 75 | int type; |
77 | int enabled; | ||
78 | }; | 76 | }; |
79 | 77 | ||
80 | int tipc_conn_sendmsg(struct tipc_server *s, int conid, | 78 | int tipc_conn_sendmsg(struct tipc_server *s, int conid, |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index aab4948f0aff..a4cf274455aa 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -70,8 +70,6 @@ static const struct proto_ops msg_ops; | |||
70 | static struct proto tipc_proto; | 70 | static struct proto tipc_proto; |
71 | static struct proto tipc_proto_kern; | 71 | static struct proto tipc_proto_kern; |
72 | 72 | ||
73 | static int sockets_enabled; | ||
74 | |||
75 | /* | 73 | /* |
76 | * Revised TIPC socket locking policy: | 74 | * Revised TIPC socket locking policy: |
77 | * | 75 | * |
@@ -2027,8 +2025,6 @@ int tipc_socket_init(void) | |||
2027 | proto_unregister(&tipc_proto); | 2025 | proto_unregister(&tipc_proto); |
2028 | goto out; | 2026 | goto out; |
2029 | } | 2027 | } |
2030 | |||
2031 | sockets_enabled = 1; | ||
2032 | out: | 2028 | out: |
2033 | return res; | 2029 | return res; |
2034 | } | 2030 | } |
@@ -2038,10 +2034,6 @@ int tipc_socket_init(void) | |||
2038 | */ | 2034 | */ |
2039 | void tipc_socket_stop(void) | 2035 | void tipc_socket_stop(void) |
2040 | { | 2036 | { |
2041 | if (!sockets_enabled) | ||
2042 | return; | ||
2043 | |||
2044 | sockets_enabled = 0; | ||
2045 | sock_unregister(tipc_family_ops.family); | 2037 | sock_unregister(tipc_family_ops.family); |
2046 | proto_unregister(&tipc_proto); | 2038 | proto_unregister(&tipc_proto); |
2047 | } | 2039 | } |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 9b897fca7487..f0541370e68e 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1700,7 +1700,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) | |||
1700 | return; | 1700 | return; |
1701 | case NL80211_REGDOM_SET_BY_USER: | 1701 | case NL80211_REGDOM_SET_BY_USER: |
1702 | treatment = reg_process_hint_user(reg_request); | 1702 | treatment = reg_process_hint_user(reg_request); |
1703 | if (treatment == REG_REQ_OK || | 1703 | if (treatment == REG_REQ_IGNORE || |
1704 | treatment == REG_REQ_ALREADY_SET) | 1704 | treatment == REG_REQ_ALREADY_SET) |
1705 | return; | 1705 | return; |
1706 | schedule_delayed_work(®_timeout, msecs_to_jiffies(3142)); | 1706 | schedule_delayed_work(®_timeout, msecs_to_jiffies(3142)); |
@@ -2373,6 +2373,7 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, | |||
2373 | int set_regdom(const struct ieee80211_regdomain *rd) | 2373 | int set_regdom(const struct ieee80211_regdomain *rd) |
2374 | { | 2374 | { |
2375 | struct regulatory_request *lr; | 2375 | struct regulatory_request *lr; |
2376 | bool user_reset = false; | ||
2376 | int r; | 2377 | int r; |
2377 | 2378 | ||
2378 | if (!reg_is_valid_request(rd->alpha2)) { | 2379 | if (!reg_is_valid_request(rd->alpha2)) { |
@@ -2389,6 +2390,7 @@ int set_regdom(const struct ieee80211_regdomain *rd) | |||
2389 | break; | 2390 | break; |
2390 | case NL80211_REGDOM_SET_BY_USER: | 2391 | case NL80211_REGDOM_SET_BY_USER: |
2391 | r = reg_set_rd_user(rd, lr); | 2392 | r = reg_set_rd_user(rd, lr); |
2393 | user_reset = true; | ||
2392 | break; | 2394 | break; |
2393 | case NL80211_REGDOM_SET_BY_DRIVER: | 2395 | case NL80211_REGDOM_SET_BY_DRIVER: |
2394 | r = reg_set_rd_driver(rd, lr); | 2396 | r = reg_set_rd_driver(rd, lr); |
@@ -2402,8 +2404,14 @@ int set_regdom(const struct ieee80211_regdomain *rd) | |||
2402 | } | 2404 | } |
2403 | 2405 | ||
2404 | if (r) { | 2406 | if (r) { |
2405 | if (r == -EALREADY) | 2407 | switch (r) { |
2408 | case -EALREADY: | ||
2406 | reg_set_request_processed(); | 2409 | reg_set_request_processed(); |
2410 | break; | ||
2411 | default: | ||
2412 | /* Back to world regulatory in case of errors */ | ||
2413 | restore_regulatory_settings(user_reset); | ||
2414 | } | ||
2407 | 2415 | ||
2408 | kfree(rd); | 2416 | kfree(rd); |
2409 | return r; | 2417 | return r; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4b98b25793c5..1d5c7bf29938 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1158,7 +1158,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, | |||
1158 | if (hlist_unhashed(&pol->bydst)) | 1158 | if (hlist_unhashed(&pol->bydst)) |
1159 | return NULL; | 1159 | return NULL; |
1160 | 1160 | ||
1161 | hlist_del(&pol->bydst); | 1161 | hlist_del_init(&pol->bydst); |
1162 | hlist_del(&pol->byidx); | 1162 | hlist_del(&pol->byidx); |
1163 | list_del(&pol->walk.all); | 1163 | list_del(&pol->walk.all); |
1164 | net->xfrm.policy_count[dir]--; | 1164 | net->xfrm.policy_count[dir]--; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index a26b7aa79475..40f1b3e92e78 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1159,6 +1159,11 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) | |||
1159 | } | 1159 | } |
1160 | x->props.aalgo = orig->props.aalgo; | 1160 | x->props.aalgo = orig->props.aalgo; |
1161 | 1161 | ||
1162 | if (orig->aead) { | ||
1163 | x->aead = xfrm_algo_aead_clone(orig->aead); | ||
1164 | if (!x->aead) | ||
1165 | goto error; | ||
1166 | } | ||
1162 | if (orig->ealg) { | 1167 | if (orig->ealg) { |
1163 | x->ealg = xfrm_algo_clone(orig->ealg); | 1168 | x->ealg = xfrm_algo_clone(orig->ealg); |
1164 | if (!x->ealg) | 1169 | if (!x->ealg) |
@@ -1201,6 +1206,9 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) | |||
1201 | x->props.flags = orig->props.flags; | 1206 | x->props.flags = orig->props.flags; |
1202 | x->props.extra_flags = orig->props.extra_flags; | 1207 | x->props.extra_flags = orig->props.extra_flags; |
1203 | 1208 | ||
1209 | x->tfcpad = orig->tfcpad; | ||
1210 | x->replay_maxdiff = orig->replay_maxdiff; | ||
1211 | x->replay_maxage = orig->replay_maxage; | ||
1204 | x->curlft.add_time = orig->curlft.add_time; | 1212 | x->curlft.add_time = orig->curlft.add_time; |
1205 | x->km.state = orig->km.state; | 1213 | x->km.state = orig->km.state; |
1206 | x->km.seq = orig->km.seq; | 1214 | x->km.seq = orig->km.seq; |
@@ -1215,11 +1223,12 @@ out: | |||
1215 | return NULL; | 1223 | return NULL; |
1216 | } | 1224 | } |
1217 | 1225 | ||
1218 | /* net->xfrm.xfrm_state_lock is held */ | ||
1219 | struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) | 1226 | struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) |
1220 | { | 1227 | { |
1221 | unsigned int h; | 1228 | unsigned int h; |
1222 | struct xfrm_state *x; | 1229 | struct xfrm_state *x = NULL; |
1230 | |||
1231 | spin_lock_bh(&net->xfrm.xfrm_state_lock); | ||
1223 | 1232 | ||
1224 | if (m->reqid) { | 1233 | if (m->reqid) { |
1225 | h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, | 1234 | h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, |
@@ -1236,7 +1245,7 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n | |||
1236 | m->old_family)) | 1245 | m->old_family)) |
1237 | continue; | 1246 | continue; |
1238 | xfrm_state_hold(x); | 1247 | xfrm_state_hold(x); |
1239 | return x; | 1248 | break; |
1240 | } | 1249 | } |
1241 | } else { | 1250 | } else { |
1242 | h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, | 1251 | h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, |
@@ -1251,11 +1260,13 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n | |||
1251 | m->old_family)) | 1260 | m->old_family)) |
1252 | continue; | 1261 | continue; |
1253 | xfrm_state_hold(x); | 1262 | xfrm_state_hold(x); |
1254 | return x; | 1263 | break; |
1255 | } | 1264 | } |
1256 | } | 1265 | } |
1257 | 1266 | ||
1258 | return NULL; | 1267 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1268 | |||
1269 | return x; | ||
1259 | } | 1270 | } |
1260 | EXPORT_SYMBOL(xfrm_migrate_state_find); | 1271 | EXPORT_SYMBOL(xfrm_migrate_state_find); |
1261 | 1272 | ||
@@ -1451,7 +1462,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, | |||
1451 | { | 1462 | { |
1452 | int err = 0; | 1463 | int err = 0; |
1453 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 1464 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
1454 | struct net *net = xs_net(*dst); | 1465 | struct net *net = xs_net(*src); |
1455 | 1466 | ||
1456 | if (!afinfo) | 1467 | if (!afinfo) |
1457 | return -EAFNOSUPPORT; | 1468 | return -EAFNOSUPPORT; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 1ae3ec7c18b0..c274179d60a2 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -32,11 +32,6 @@ | |||
32 | #include <linux/in6.h> | 32 | #include <linux/in6.h> |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static inline int aead_len(struct xfrm_algo_aead *alg) | ||
36 | { | ||
37 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); | ||
38 | } | ||
39 | |||
40 | static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) | 35 | static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) |
41 | { | 36 | { |
42 | struct nlattr *rt = attrs[type]; | 37 | struct nlattr *rt = attrs[type]; |
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index ef474098d9f1..17fa901418ae 100644 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh | |||
@@ -257,7 +257,7 @@ case "$arg" in | |||
257 | && compr="lzop -9 -f" | 257 | && compr="lzop -9 -f" |
258 | echo "$output_file" | grep -q "\.lz4$" \ | 258 | echo "$output_file" | grep -q "\.lz4$" \ |
259 | && [ -x "`which lz4 2> /dev/null`" ] \ | 259 | && [ -x "`which lz4 2> /dev/null`" ] \ |
260 | && compr="lz4 -9 -f" | 260 | && compr="lz4 -l -9 -f" |
261 | echo "$output_file" | grep -q "\.cpio$" && compr="cat" | 261 | echo "$output_file" | grep -q "\.cpio$" && compr="cat" |
262 | shift | 262 | shift |
263 | ;; | 263 | ;; |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 10085de886fe..276e84b8a8e5 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
@@ -330,8 +330,7 @@ static void write_src(void) | |||
330 | printf("\tPTR\t_text + %#llx\n", | 330 | printf("\tPTR\t_text + %#llx\n", |
331 | table[i].addr - _text); | 331 | table[i].addr - _text); |
332 | else | 332 | else |
333 | printf("\tPTR\t_text - %#llx\n", | 333 | printf("\tPTR\t%#llx\n", table[i].addr); |
334 | _text - table[i].addr); | ||
335 | } else { | 334 | } else { |
336 | printf("\tPTR\t%#llx\n", table[i].addr); | 335 | printf("\tPTR\t%#llx\n", table[i].addr); |
337 | } | 336 | } |
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index da8b7aa3d351..07b0b7542511 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile | |||
@@ -87,8 +87,8 @@ endif # BUILD_SRC | |||
87 | # We process the rest of the Makefile if this is the final invocation of make | 87 | # We process the rest of the Makefile if this is the final invocation of make |
88 | ifeq ($(skip-makefile),) | 88 | ifeq ($(skip-makefile),) |
89 | 89 | ||
90 | srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)) | 90 | srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))) |
91 | objtree := $(CURDIR) | 91 | objtree := $(realpath $(CURDIR)) |
92 | src := $(srctree) | 92 | src := $(srctree) |
93 | obj := $(objtree) | 93 | obj := $(objtree) |
94 | 94 | ||
@@ -112,7 +112,7 @@ export Q VERBOSE | |||
112 | 112 | ||
113 | LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION) | 113 | LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION) |
114 | 114 | ||
115 | INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES) | 115 | INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES) |
116 | 116 | ||
117 | # Set compile option CFLAGS if not set elsewhere | 117 | # Set compile option CFLAGS if not set elsewhere |
118 | CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g | 118 | CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g |
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index f8465a811aa5..23bd69cb5ade 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c | |||
@@ -418,7 +418,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) | |||
418 | 418 | ||
419 | __attribute__((constructor)) static void init_preload(void) | 419 | __attribute__((constructor)) static void init_preload(void) |
420 | { | 420 | { |
421 | if (__init_state != done) | 421 | if (__init_state == done) |
422 | return; | 422 | return; |
423 | 423 | ||
424 | #ifndef __GLIBC__ | 424 | #ifndef __GLIBC__ |
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh index 5334ad9d39b7..5334ad9d39b7 100644..100755 --- a/tools/lib/lockdep/run_tests.sh +++ b/tools/lib/lockdep/run_tests.sh | |||
diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h new file mode 100644 index 000000000000..d82b170bb216 --- /dev/null +++ b/tools/lib/lockdep/uinclude/asm/hash.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_GENERIC_HASH_H | ||
2 | #define __ASM_GENERIC_HASH_H | ||
3 | |||
4 | /* Stub */ | ||
5 | |||
6 | #endif /* __ASM_GENERIC_HASH_H */ | ||
diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h index 4c99fcb5da27..042ee8e463c9 100644 --- a/tools/lib/lockdep/uinclude/linux/rcu.h +++ b/tools/lib/lockdep/uinclude/linux/rcu.h | |||
@@ -13,4 +13,9 @@ static inline int rcu_is_cpu_idle(void) | |||
13 | return 1; | 13 | return 1; |
14 | } | 14 | } |
15 | 15 | ||
16 | static inline bool rcu_is_watching(void) | ||
17 | { | ||
18 | return false; | ||
19 | } | ||
20 | |||
16 | #endif | 21 | #endif |