aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/sti-dwmac.txt58
-rw-r--r--Documentation/networking/3c505.txt45
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts10
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi10
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h15
-rw-r--r--arch/arm/include/asm/spinlock.h15
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-hyperv.c11
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_3ad.c6
-rw-r--r--drivers/net/bonding/bond_3ad.h1
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c13
-rw-r--r--drivers/net/hyperv/netvsc_drv.c53
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c9
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/sr9800.c6
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c22
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h10
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c18
-rw-r--r--drivers/of/of_mdio.c22
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/staging/bcm/Bcmnet.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c2
-rw-r--r--drivers/vhost/net.c47
-rw-r--r--fs/ceph/acl.c11
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/super.c32
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/ceph/xattr.c54
-rw-r--r--fs/cifs/cifsacl.c33
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifsproto.h6
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c39
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2glob.h3
-rw-r--r--fs/cifs/smb2ops.c14
-rw-r--r--fs/cifs/smb2pdu.c4
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/resize.c34
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/fscache/object-list.c5
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/reiserfs/do_balan.c895
-rw-r--r--include/linux/ceph/ceph_fs.h5
-rw-r--r--include/linux/netdevice.h36
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--net/batman-adv/bat_iv_ogm.c30
-rw-r--r--net/batman-adv/hard-interface.c22
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/send.c9
-rw-r--r--net/batman-adv/translation-table.c23
-rw-r--r--net/bluetooth/hidp/core.c16
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/core/dev.c22
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/rtnetlink.c19
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/ipv4/ip_forward.c71
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/route.c13
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_output.c17
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/packet/af_packet.c26
-rw-r--r--net/sched/sch_pie.c21
-rw-r--r--net/sctp/associola.c82
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c47
-rw-r--r--net/sctp/sysctl.c18
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/link.c7
140 files changed, 1590 insertions, 1194 deletions
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
new file mode 100644
index 000000000000..3dd3d0bf112f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -0,0 +1,58 @@
1STMicroelectronics SoC DWMAC glue layer controller
2
3The device node has following properties.
4
5Required properties:
6 - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
7 "st,stid127-dwmac".
8 - reg : Offset of the glue configuration register map in system
9 configuration regmap pointed by st,syscon property and size.
10
11 - reg-names : Should be "sti-ethconf".
12
13 - st,syscon : Should be phandle to system configuration node which
14 encompases this glue registers.
15
16 - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
17 wired up in from different sources. One via TXCLK pin and other via CLK_125
18 pin. This wiring is totally board dependent. However the retiming glue
19 logic should be configured accordingly. Possible values for this property
20
21 "txclk" - if 125Mhz clock is wired up via txclk line.
22 "clk_125" - if 125Mhz clock is wired up via clk_125 line.
23
24 This property is only valid for Giga bit setup( GMII, RGMII), and it is
25 un-used for non-giga bit (MII and RMII) setups. Also note that internal
26 clockgen can not generate stable 125Mhz clock.
27
28 - st,ext-phyclk: This boolean property indicates who is generating the clock
29 for tx and rx. This property is only valid for RMII case where the clock can
30 be generated from the MAC or PHY.
31
32 - clock-names: should be "sti-ethclk".
33 - clocks: Should point to ethernet clockgen which can generate phyclk.
34
35
36Example:
37
38ethernet0: dwmac@fe810000 {
39 device_type = "network";
40 compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
41 reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
42 reg-names = "stmmaceth", "sti-ethconf";
43 interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
44 interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
45 phy-mode = "mii";
46
47 st,syscon = <&syscfg_rear>;
48
49 snps,pbl = <32>;
50 snps,mixed-burst;
51
52 resets = <&softreset STIH416_ETH0_SOFTRESET>;
53 reset-names = "stmmaceth";
54 pinctrl-0 = <&pinctrl_mii0>;
55 pinctrl-names = "default";
56 clocks = <&CLK_S_GMAC0_PHY>;
57 clock-names = "stmmaceth";
58};
diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt
deleted file mode 100644
index 72f38b13101d..000000000000
--- a/Documentation/networking/3c505.txt
+++ /dev/null
@@ -1,45 +0,0 @@
1The 3Com Etherlink Plus (3c505) driver.
2
3This driver now uses DMA. There is currently no support for PIO operation.
4The default DMA channel is 6; this is _not_ autoprobed, so you must
5make sure you configure it correctly. If loading the driver as a
6module, you can do this with "modprobe 3c505 dma=n". If the driver is
7linked statically into the kernel, you must either use an "ether="
8statement on the command line, or change the definition of ELP_DMA in 3c505.h.
9
10The driver will warn you if it has to fall back on the compiled in
11default DMA channel.
12
13If no base address is given at boot time, the driver will autoprobe
14ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
15will try to probe for it.
16
17The driver can be used as a loadable module.
18
19Theoretically, one instance of the driver can now run multiple cards,
20in the standard way (when loading a module, say "modprobe 3c505
21io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
22this, though.
23
24The driver may now support revision 2 hardware; the dependency on
25being able to read the host control register has been removed. This
26is also untested, since I don't have a suitable card.
27
28Known problems:
29 I still see "DMA upload timed out" messages from time to time. These
30seem to be fairly non-fatal though.
31 The card is old and slow.
32
33To do:
34 Improve probe/setup code
35 Test multicast and promiscuous operation
36
37Authors:
38 The driver is mainly written by Craig Southeren, email
39 <craigs@ineluki.apana.org.au>.
40 Parts of the driver (adapting the driver to 1.1.4+ kernels,
41 IRQ/address detection, some changes) and this README by
42 Juha Laiho <jlaiho@ichaos.nullnet.fi>.
43 DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
44 Multicard support, Software configurable DMA, etc., by
45 Christopher Collins <ccollins@pcug.org.au>
diff --git a/MAINTAINERS b/MAINTAINERS
index fb08dcececf1..0dba50b405d6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3324,6 +3324,17 @@ S: Maintained
3324F: include/linux/netfilter_bridge/ 3324F: include/linux/netfilter_bridge/
3325F: net/bridge/ 3325F: net/bridge/
3326 3326
3327ETHERNET PHY LIBRARY
3328M: Florian Fainelli <f.fainelli@gmail.com>
3329L: netdev@vger.kernel.org
3330S: Maintained
3331F: include/linux/phy.h
3332F: include/linux/phy_fixed.h
3333F: drivers/net/phy/
3334F: Documentation/networking/phy.txt
3335F: drivers/of/of_mdio.c
3336F: drivers/of/of_net.c
3337
3327EXT2 FILE SYSTEM 3338EXT2 FILE SYSTEM
3328M: Jan Kara <jack@suse.cz> 3339M: Jan Kara <jack@suse.cz>
3329L: linux-ext4@vger.kernel.org 3340L: linux-ext4@vger.kernel.org
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index fd8fc7cd53f3..5bfae54fb780 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -52,12 +52,6 @@
52 }; 52 };
53 }; 53 };
54 54
55 codec: spdif-transmitter {
56 compatible = "linux,spdif-dit";
57 pinctrl-names = "default";
58 pinctrl-0 = <&pinctrl_hummingboard_spdif>;
59 };
60
61 sound-spdif { 55 sound-spdif {
62 compatible = "fsl,imx-audio-spdif"; 56 compatible = "fsl,imx-audio-spdif";
63 model = "imx-spdif"; 57 model = "imx-spdif";
@@ -111,7 +105,7 @@
111 }; 105 };
112 106
113 pinctrl_hummingboard_spdif: hummingboard-spdif { 107 pinctrl_hummingboard_spdif: hummingboard-spdif {
114 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 108 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
115 }; 109 };
116 110
117 pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { 111 pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
@@ -142,6 +136,8 @@
142}; 136};
143 137
144&spdif { 138&spdif {
139 pinctrl-names = "default";
140 pinctrl-0 = <&pinctrl_hummingboard_spdif>;
145 status = "okay"; 141 status = "okay";
146}; 142};
147 143
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 64daa3b311f6..c2a24888a276 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -46,12 +46,6 @@
46 }; 46 };
47 }; 47 };
48 48
49 codec: spdif-transmitter {
50 compatible = "linux,spdif-dit";
51 pinctrl-names = "default";
52 pinctrl-0 = <&pinctrl_cubox_i_spdif>;
53 };
54
55 sound-spdif { 49 sound-spdif {
56 compatible = "fsl,imx-audio-spdif"; 50 compatible = "fsl,imx-audio-spdif";
57 model = "imx-spdif"; 51 model = "imx-spdif";
@@ -89,7 +83,7 @@
89 }; 83 };
90 84
91 pinctrl_cubox_i_spdif: cubox-i-spdif { 85 pinctrl_cubox_i_spdif: cubox-i-spdif {
92 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 86 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
93 }; 87 };
94 88
95 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { 89 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
@@ -121,6 +115,8 @@
121}; 115};
122 116
123&spdif { 117&spdif {
118 pinctrl-names = "default";
119 pinctrl-0 = <&pinctrl_cubox_i_spdif>;
124 status = "okay"; 120 status = "okay";
125}; 121};
126 122
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e9a49fe0284e..8b8b61685a34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
212static inline void __flush_icache_all(void) 212static inline void __flush_icache_all(void)
213{ 213{
214 __flush_icache_preferred(); 214 __flush_icache_preferred();
215 dsb();
215} 216}
216 217
217/* 218/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 03243f7eeddf..85c60adc8b60 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,13 +120,16 @@
120/* 120/*
121 * 2nd stage PTE definitions for LPAE. 121 * 2nd stage PTE definitions for LPAE.
122 */ 122 */
123#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ 123#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
124#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 124#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
125#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 125#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
126#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 126#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
127#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 127#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
128 128
129#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 129#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
130#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
131
132#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
130 133
131/* 134/*
132 * Hyp-mode PL2 PTE definitions for LPAE. 135 * Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c6072aa45..ac4bfae26702 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,18 +37,9 @@
37 37
38static inline void dsb_sev(void) 38static inline void dsb_sev(void)
39{ 39{
40#if __LINUX_ARM_ARCH__ >= 7 40
41 __asm__ __volatile__ ( 41 dsb(ishst);
42 "dsb ishst\n" 42 __asm__(SEV);
43 SEV
44 );
45#else
46 __asm__ __volatile__ (
47 "mcr p15, 0, %0, c7, c10, 4\n"
48 SEV
49 : : "r" (0)
50 );
51#endif
52} 43}
53 44
54/* 45/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b0df9761de6d..1e8b030dbefd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
731 kernel_data.end = virt_to_phys(_end - 1); 731 kernel_data.end = virt_to_phys(_end - 1);
732 732
733 for_each_memblock(memory, region) { 733 for_each_memblock(memory, region) {
734 res = memblock_virt_alloc_low(sizeof(*res), 0); 734 res = memblock_virt_alloc(sizeof(*res), 0);
735 res->name = "System RAM"; 735 res->name = "System RAM";
736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d15a88..7ea641b7aa7d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
38 38
39struct mem_type { 39struct mem_type {
40 pteval_t prot_pte; 40 pteval_t prot_pte;
41 pteval_t prot_pte_s2;
41 pmdval_t prot_l1; 42 pmdval_t prot_l1;
42 pmdval_t prot_sect; 43 pmdval_t prot_sect;
43 unsigned int domain; 44 unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f08c133cc25..a623cb3ad012 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -232,12 +232,16 @@ __setup("noalign", noalign_setup);
232#endif /* ifdef CONFIG_CPU_CP15 / else */ 232#endif /* ifdef CONFIG_CPU_CP15 / else */
233 233
234#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 234#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
235#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
235#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 236#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
236 237
237static struct mem_type mem_types[] = { 238static struct mem_type mem_types[] = {
238 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 239 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
239 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 240 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
240 L_PTE_SHARED, 241 L_PTE_SHARED,
242 .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
243 s2_policy(L_PTE_S2_MT_DEV_SHARED) |
244 L_PTE_SHARED,
241 .prot_l1 = PMD_TYPE_TABLE, 245 .prot_l1 = PMD_TYPE_TABLE,
242 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 246 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
243 .domain = DOMAIN_IO, 247 .domain = DOMAIN_IO,
@@ -508,7 +512,8 @@ static void __init build_mem_type_table(void)
508 cp = &cache_policies[cachepolicy]; 512 cp = &cache_policies[cachepolicy];
509 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 513 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
510 s2_pgprot = cp->pte_s2; 514 s2_pgprot = cp->pte_s2;
511 hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; 515 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
516 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
512 517
513 /* 518 /*
514 * ARMv6 and above have extended page tables. 519 * ARMv6 and above have extended page tables.
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 45dc29f85d56..32b3558321c4 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -208,7 +208,6 @@ __v6_setup:
208 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache 208 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
210 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 210 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
211 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
212#ifdef CONFIG_MMU 211#ifdef CONFIG_MMU
213 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 212 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
214 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@@ -218,6 +217,8 @@ __v6_setup:
218 ALT_UP(orr r8, r8, #TTB_FLAGS_UP) 217 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
219 mcr p15, 0, r8, c2, c0, 1 @ load TTB1 218 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
220#endif /* CONFIG_MMU */ 219#endif /* CONFIG_MMU */
220 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
221 @ complete invalidations
221 adr r5, v6_crval 222 adr r5, v6_crval
222 ldmia r5, {r5, r6} 223 ldmia r5, {r5, r6}
223 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables 224 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index bd1781979a39..74f6033e76dd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -351,7 +351,6 @@ __v7_setup:
351 351
3524: mov r10, #0 3524: mov r10, #0
353 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 353 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
354 dsb
355#ifdef CONFIG_MMU 354#ifdef CONFIG_MMU
356 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 355 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
357 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup 356 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@@ -360,6 +359,7 @@ __v7_setup:
360 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 359 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
361 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 360 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
362#endif 361#endif
362 dsb @ Complete invalidations
363#ifndef CONFIG_ARM_THUMBEE 363#ifndef CONFIG_ARM_THUMBEE
364 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE 364 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
365 and r0, r0, #(0xf << 12) @ ThumbEE enabled field 365 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 497558127bb3..f822fd2a1ada 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = {
469 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 469 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
470 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 470 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
471 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 471 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
472 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
473 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
474 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
472 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), 475 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
473 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 476 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
474 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 477 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3bfac3accd22..cc32a6f96c64 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1679 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1679 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1680 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, 1680 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
1681 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, 1681 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
1682 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
1682 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1683 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1683 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1684 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1684 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, 1685 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
1779 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1780 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
1780 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1781 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
1781 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
1782 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1785 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1786 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, 1787 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8fae6d1414cc..c24908f14934 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -157,6 +157,7 @@ struct mousevsc_dev {
157 u32 report_desc_size; 157 u32 report_desc_size;
158 struct hv_input_dev_info hid_dev_info; 158 struct hv_input_dev_info hid_dev_info;
159 struct hid_device *hid_device; 159 struct hid_device *hid_device;
160 u8 input_buf[HID_MAX_BUFFER_SIZE];
160}; 161};
161 162
162 163
@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device,
256 struct synthhid_msg *hid_msg; 257 struct synthhid_msg *hid_msg;
257 struct mousevsc_dev *input_dev = hv_get_drvdata(device); 258 struct mousevsc_dev *input_dev = hv_get_drvdata(device);
258 struct synthhid_input_report *input_report; 259 struct synthhid_input_report *input_report;
260 size_t len;
259 261
260 pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + 262 pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
261 (packet->offset8 << 3)); 263 (packet->offset8 << 3));
@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device,
300 (struct synthhid_input_report *)pipe_msg->data; 302 (struct synthhid_input_report *)pipe_msg->data;
301 if (!input_dev->init_complete) 303 if (!input_dev->init_complete)
302 break; 304 break;
303 hid_input_report(input_dev->hid_device, 305
304 HID_INPUT_REPORT, input_report->buffer, 306 len = min(input_report->header.size,
305 input_report->header.size, 1); 307 (u32)sizeof(input_dev->input_buf));
308 memcpy(input_dev->input_buf, input_report->buffer, len);
309 hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
310 input_dev->input_buf, len, 1);
306 break; 311 break;
307 default: 312 default:
308 pr_err("unsupported hid msg type - type %d len %d", 313 pr_err("unsupported hid msg type - type %d len %d",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a5248f2cc07..22f28d6b33a8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,7 @@
135#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 135#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
136#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 136#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
137#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 137#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
138#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
138#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 139#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
139#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 140#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
140#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 141#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -240,6 +241,7 @@
240 241
241#define USB_VENDOR_ID_CYGNAL 0x10c4 242#define USB_VENDOR_ID_CYGNAL 0x10c4
242#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a 243#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
244#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
243 245
244#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 246#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
245 247
@@ -451,6 +453,9 @@
451#define USB_VENDOR_ID_INTEL_1 0x8087 453#define USB_VENDOR_ID_INTEL_1 0x8087
452#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa 454#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
453 455
456#define USB_VENDOR_ID_STM_0 0x0483
457#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
458
454#define USB_VENDOR_ID_ION 0x15e4 459#define USB_VENDOR_ID_ION 0x15e4
455#define USB_DEVICE_ID_ICADE 0x0132 460#define USB_DEVICE_ID_ICADE 0x0132
456 461
@@ -619,6 +624,8 @@
619#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 624#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
620#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 625#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
621#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 626#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
627#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
628#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
622 629
623#define USB_VENDOR_ID_MOJO 0x8282 630#define USB_VENDOR_ID_MOJO 0x8282
624#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 631#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -644,6 +651,7 @@
644 651
645#define USB_VENDOR_ID_NEXIO 0x1870 652#define USB_VENDOR_ID_NEXIO 0x1870
646#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d 653#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
654#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
647 655
648#define USB_VENDOR_ID_NEXTWINDOW 0x1926 656#define USB_VENDOR_ID_NEXTWINDOW 0x1926
649#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 657#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d50e7313b171..a713e6211419 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work)
1178 1178
1179 /* fall back to generic raw-output-report */ 1179 /* fall back to generic raw-output-report */
1180 len = ((report->size - 1) >> 3) + 1 + (report->id > 0); 1180 len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
1181 buf = kmalloc(len, GFP_KERNEL); 1181 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1182 if (!buf) 1182 if (!buf)
1183 return; 1183 return;
1184 1184
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6ef6eed3091..404a3a8a82f1 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = {
208 .driver_data = MS_NOGET }, 208 .driver_data = MS_NOGET },
209 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 209 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
210 .driver_data = MS_DUPLICATE_USAGES }, 210 .driver_data = MS_DUPLICATE_USAGES },
211 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
212 .driver_data = 0 },
213 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
214 .driver_data = 0 },
211 215
212 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), 216 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
213 .driver_data = MS_PRESENTER }, 217 .driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f134d73beca1..221d503f1c24 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = {
1166 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, 1166 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
1167 USB_DEVICE_ID_MULTITOUCH_3200) }, 1167 USB_DEVICE_ID_MULTITOUCH_3200) },
1168 1168
1169 /* FocalTech Panels */
1170 { .driver_data = MT_CLS_SERIAL,
1171 MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
1172 USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) },
1173
1169 /* GeneralTouch panel */ 1174 /* GeneralTouch panel */
1170 { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, 1175 { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
1171 MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 1176 MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 46f4480035bc..9c22e14c57f0 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
665 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, 665 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
666 USB_DEVICE_ID_INTEL_HID_SENSOR), 666 USB_DEVICE_ID_INTEL_HID_SENSOR),
667 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 667 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
668 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
669 USB_DEVICE_ID_STM_HID_SENSOR),
670 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
668 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 671 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
669 HID_ANY_ID) }, 672 HID_ANY_ID) },
670 { } 673 { }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d1f81f52481a..42eebd14de1f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
582 int ret; 582 int ret;
583 int len = i2c_hid_get_report_length(rep) - 2; 583 int len = i2c_hid_get_report_length(rep) - 2;
584 584
585 buf = kzalloc(len, GFP_KERNEL); 585 buf = hid_alloc_report_buf(rep, GFP_KERNEL);
586 if (!buf) 586 if (!buf)
587 return; 587 return;
588 588
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 175ec0afb70c..dbd83878ff99 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
78 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 80 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc54fe4b..3e049c13429c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
198 198
199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
200 limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 200 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
201 201
202 mq->card = card; 202 mq->card = card;
203 mq->queue = blk_init_queue(mmc_request_fn, lock); 203 mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f342278539d5..494b888a6568 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@ config MACVTAP
139 This adds a specialized tap character device driver that is based 139 This adds a specialized tap character device driver that is based
140 on the MAC-VLAN network interface, called macvtap. A macvtap device 140 on the MAC-VLAN network interface, called macvtap. A macvtap device
141 can be added in the same way as a macvlan device, using 'type 141 can be added in the same way as a macvlan device, using 'type
142 macvlan', and then be accessed through the tap user space interface. 142 macvtap', and then be accessed through the tap user space interface.
143 143
144 To compile this driver as a module, choose M here: the module 144 To compile this driver as a module, choose M here: the module
145 will be called macvtap. 145 will be called macvtap.
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cce1f1bf90b4..6d20fbde8d43 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1796,8 +1796,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
1796 BOND_AD_INFO(bond).agg_select_timer = timeout; 1796 BOND_AD_INFO(bond).agg_select_timer = timeout;
1797} 1797}
1798 1798
1799static u16 aggregator_identifier;
1800
1801/** 1799/**
1802 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures 1800 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
1803 * @bond: bonding struct to work on 1801 * @bond: bonding struct to work on
@@ -1811,7 +1809,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
1811 if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), 1809 if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
1812 bond->dev->dev_addr)) { 1810 bond->dev->dev_addr)) {
1813 1811
1814 aggregator_identifier = 0; 1812 BOND_AD_INFO(bond).aggregator_identifier = 0;
1815 1813
1816 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; 1814 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
1817 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); 1815 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1880,7 +1878,7 @@ void bond_3ad_bind_slave(struct slave *slave)
1880 ad_initialize_agg(aggregator); 1878 ad_initialize_agg(aggregator);
1881 1879
1882 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); 1880 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
1883 aggregator->aggregator_identifier = (++aggregator_identifier); 1881 aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
1884 aggregator->slave = slave; 1882 aggregator->slave = slave;
1885 aggregator->is_active = 0; 1883 aggregator->is_active = 0;
1886 aggregator->num_of_ports = 0; 1884 aggregator->num_of_ports = 0;
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 13dc9d3c5e34..f4dd9592ac62 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,6 +253,7 @@ struct ad_system {
253struct ad_bond_info { 253struct ad_bond_info {
254 struct ad_system system; /* 802.3ad system structure */ 254 struct ad_system system; /* 802.3ad system structure */
255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes 255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
256 u16 aggregator_identifier;
256}; 257};
257 258
258struct ad_slave_info { 259struct ad_slave_info {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 71ba18efa15b..1c6104d3501d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1543,9 +1543,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1543 bond_set_carrier(bond); 1543 bond_set_carrier(bond);
1544 1544
1545 if (USES_PRIMARY(bond->params.mode)) { 1545 if (USES_PRIMARY(bond->params.mode)) {
1546 block_netpoll_tx();
1546 write_lock_bh(&bond->curr_slave_lock); 1547 write_lock_bh(&bond->curr_slave_lock);
1547 bond_select_active_slave(bond); 1548 bond_select_active_slave(bond);
1548 write_unlock_bh(&bond->curr_slave_lock); 1549 write_unlock_bh(&bond->curr_slave_lock);
1550 unblock_netpoll_tx();
1549 } 1551 }
1550 1552
1551 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1553 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1571,10 +1573,12 @@ err_detach:
1571 if (bond->primary_slave == new_slave) 1573 if (bond->primary_slave == new_slave)
1572 bond->primary_slave = NULL; 1574 bond->primary_slave = NULL;
1573 if (bond->curr_active_slave == new_slave) { 1575 if (bond->curr_active_slave == new_slave) {
1576 block_netpoll_tx();
1574 write_lock_bh(&bond->curr_slave_lock); 1577 write_lock_bh(&bond->curr_slave_lock);
1575 bond_change_active_slave(bond, NULL); 1578 bond_change_active_slave(bond, NULL);
1576 bond_select_active_slave(bond); 1579 bond_select_active_slave(bond);
1577 write_unlock_bh(&bond->curr_slave_lock); 1580 write_unlock_bh(&bond->curr_slave_lock);
1581 unblock_netpoll_tx();
1578 } 1582 }
1579 slave_disable_netpoll(new_slave); 1583 slave_disable_netpoll(new_slave);
1580 1584
@@ -2864,9 +2868,12 @@ static int bond_slave_netdev_event(unsigned long event,
2864 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n", 2868 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
2865 bond->dev->name, bond->primary_slave ? slave_dev->name : 2869 bond->dev->name, bond->primary_slave ? slave_dev->name :
2866 "none"); 2870 "none");
2871
2872 block_netpoll_tx();
2867 write_lock_bh(&bond->curr_slave_lock); 2873 write_lock_bh(&bond->curr_slave_lock);
2868 bond_select_active_slave(bond); 2874 bond_select_active_slave(bond);
2869 write_unlock_bh(&bond->curr_slave_lock); 2875 write_unlock_bh(&bond->curr_slave_lock);
2876 unblock_netpoll_tx();
2870 break; 2877 break;
2871 case NETDEV_FEAT_CHANGE: 2878 case NETDEV_FEAT_CHANGE:
2872 bond_compute_features(bond); 2879 bond_compute_features(bond);
@@ -3700,7 +3707,7 @@ static inline int bond_slave_override(struct bonding *bond,
3700 3707
3701 3708
3702static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3709static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3703 void *accel_priv) 3710 void *accel_priv, select_queue_fallback_t fallback)
3704{ 3711{
3705 /* 3712 /*
3706 * This helper function exists to help dev_pick_tx get the correct 3713 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 11cb943222d5..c37878432717 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -14,7 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/if.h> 15#include <linux/if.h>
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/rwlock.h> 17#include <linux/spinlock.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/inet.h> 20#include <linux/inet.h>
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6c859bba8b65..e77d11049747 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
473 return err; 473 return err;
474 474
475 dev->nchannels = msg.u.cardinfo.nchannels; 475 dev->nchannels = msg.u.cardinfo.nchannels;
476 if (dev->nchannels > MAX_NET_DEVICES)
477 return -EINVAL;
476 478
477 return 0; 479 return 0;
478} 480}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9d7419e0390b..66c0df78c3ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1873,7 +1873,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1873} 1873}
1874 1874
1875u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1875u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1876 void *accel_priv) 1876 void *accel_priv, select_queue_fallback_t fallback)
1877{ 1877{
1878 struct bnx2x *bp = netdev_priv(dev); 1878 struct bnx2x *bp = netdev_priv(dev);
1879 1879
@@ -1895,7 +1895,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1895 } 1895 }
1896 1896
1897 /* select a non-FCoE queue */ 1897 /* select a non-FCoE queue */
1898 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1898 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1899} 1899}
1900 1900
1901void bnx2x_set_num_queues(struct bnx2x *bp) 1901void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index bfc58d488bb5..a89a40f88c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
496 496
497/* select_queue callback */ 497/* select_queue callback */
498u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 498u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
499 void *accel_priv); 499 void *accel_priv, select_queue_fallback_t fallback);
500 500
501static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 501static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
502 struct bnx2x_fastpath *fp, 502 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f14b38b..1642de78aac8 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
1939 pci_iounmap(pdev, tp->base_addr); 1939 pci_iounmap(pdev, tp->base_addr);
1940 free_netdev (dev); 1940 free_netdev (dev);
1941 pci_release_regions (pdev); 1941 pci_release_regions (pdev);
1942 pci_disable_device(pdev);
1942 1943
1943 /* pci_power_off (pdev, -1); */ 1944 /* pci_power_off (pdev, -1); */
1944} 1945}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4782b42401b..903362a7b584 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev)
1778 struct fec_enet_private *fep = netdev_priv(ndev); 1778 struct fec_enet_private *fep = netdev_priv(ndev);
1779 int ret; 1779 int ret;
1780 1780
1781 napi_enable(&fep->napi);
1782
1783 /* I should reset the ring buffers here, but I don't yet know 1781 /* I should reset the ring buffers here, but I don't yet know
1784 * a simple way to do that. 1782 * a simple way to do that.
1785 */ 1783 */
@@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev)
1794 fec_enet_free_buffers(ndev); 1792 fec_enet_free_buffers(ndev);
1795 return ret; 1793 return ret;
1796 } 1794 }
1795
1796 napi_enable(&fep->napi);
1797 phy_start(fep->phy_dev); 1797 phy_start(fep->phy_dev);
1798 netif_start_queue(ndev); 1798 netif_start_queue(ndev);
1799 fep->opened = 1; 1799 fep->opened = 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d4ada72dfd0..18076c4178b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6881} 6881}
6882 6882
6883static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6883static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6884 void *accel_priv) 6884 void *accel_priv, select_queue_fallback_t fallback)
6885{ 6885{
6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6887#ifdef IXGBE_FCOE 6887#ifdef IXGBE_FCOE
@@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6908 break; 6908 break;
6909 default: 6909 default:
6910 return __netdev_pick_tx(dev, skb); 6910 return fallback(dev, skb);
6911 } 6911 }
6912 6912
6913 f = &adapter->ring_feature[RING_F_FCOE]; 6913 f = &adapter->ring_feature[RING_F_FCOE];
@@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6920 6920
6921 return txq + f->offset; 6921 return txq + f->offset;
6922#else 6922#else
6923 return __netdev_pick_tx(dev, skb); 6923 return fallback(dev, skb);
6924#endif 6924#endif
6925} 6925}
6926 6926
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 8f9266c64c75..fd4b6aecf6ee 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619 619
620static u16 620static u16
621ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 621ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
622 void *accel_priv) 622 void *accel_priv, select_queue_fallback_t fallback)
623{ 623{
624 /* we are currently only using the first queue */ 624 /* we are currently only using the first queue */
625 return 0; 625 return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 6300fd27f2db..68e6a6613e9a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,12 +43,12 @@ config MVMDIO
43 This driver is used by the MV643XX_ETH and MVNETA drivers. 43 This driver is used by the MV643XX_ETH and MVNETA drivers.
44 44
45config MVNETA 45config MVNETA
46 tristate "Marvell Armada 370/XP network interface support" 46 tristate "Marvell Armada 370/38x/XP network interface support"
47 depends on MACH_ARMADA_370_XP 47 depends on PLAT_ORION
48 select MVMDIO 48 select MVMDIO
49 ---help--- 49 ---help---
50 This driver supports the network interface units in the 50 This driver supports the network interface units in the
51 Marvell ARMADA XP and ARMADA 370 SoC family. 51 Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
52 52
53 Note that this driver is distinct from the mv643xx_eth 53 Note that this driver is distinct from the mv643xx_eth
54 driver, which should be used for the older Marvell SoCs 54 driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8e8a7eb43a2c..13457032d15f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
629} 629}
630 630
631u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 631u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
632 void *accel_priv) 632 void *accel_priv, select_queue_fallback_t fallback)
633{ 633{
634 struct mlx4_en_priv *priv = netdev_priv(dev); 634 struct mlx4_en_priv *priv = netdev_priv(dev);
635 u16 rings_p_up = priv->num_tx_rings_p_up; 635 u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
641 if (vlan_tx_tag_present(skb)) 641 if (vlan_tx_tag_present(skb))
642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
643 643
644 return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; 644 return fallback(dev, skb) % rings_p_up + up * rings_p_up;
645} 645}
646 646
647static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) 647static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3af04c3f42ea..9ca223bc90fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
723 723
724void mlx4_en_tx_irq(struct mlx4_cq *mcq); 724void mlx4_en_tx_irq(struct mlx4_cq *mcq);
725u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 725u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
726 void *accel_priv); 726 void *accel_priv, select_queue_fallback_t fallback);
727netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 727netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
728 728
729int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 729int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e2f202e3932f..f2d7c702c77f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -37,6 +37,17 @@ config DWMAC_SUNXI
37 stmmac device driver. This driver is used for A20/A31 37 stmmac device driver. This driver is used for A20/A31
38 GMAC ethernet controller. 38 GMAC ethernet controller.
39 39
40config DWMAC_STI
41 bool "STi GMAC support"
42 depends on STMMAC_PLATFORM && ARCH_STI
43 default y
44 ---help---
45 Support for ethernet controller on STi SOCs.
46
47 This selects STi SoC glue layer support for the stmmac
48 device driver. This driver is used on for the STi series
49 SOCs GMAC ethernet controller.
50
40config STMMAC_PCI 51config STMMAC_PCI
41 bool "STMMAC PCI bus support" 52 bool "STMMAC PCI bus support"
42 depends on STMMAC_ETH && PCI 53 depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ecadecea79b2..dcef28775dad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
4stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o 4stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
5stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
5stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 6stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
6 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 7 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
7 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000000..552bbc17863c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
1/**
2 * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
3 *
4 * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
5 * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/platform_device.h>
17#include <linux/stmmac.h>
18#include <linux/phy.h>
19#include <linux/mfd/syscon.h>
20#include <linux/regmap.h>
21#include <linux/clk.h>
22#include <linux/of.h>
23#include <linux/of_net.h>
24
25/**
26 * STi GMAC glue logic.
27 * --------------------
28 *
29 * _
30 * | \
31 * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
32 * phyclk | |___________________________________________
33 * | | | (phyclk-in)
34 * --------|1 / |
35 * int-clk |_ / |
36 * | _
37 * | | \
38 * |_______|1 \ ETH_SEL_TX_RETIME_CLK
39 * | |___________________________
40 * | | (tx-retime-clk)
41 * _______|0 /
42 * | |_ /
43 * _ |
44 * | \ |
45 * --------|0 \ |
46 * clk_125 | |__|
47 * | | ETH_SEL_TXCLK_NOT_CLK125
48 * --------|1 /
49 * txclk |_ /
50 *
51 *
52 * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
53 * generate 50MHz clock or MAC can generate it.
54 * This bit is configured by "st,ext-phyclk" property.
55 *
56 * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
57 * clock either comes from clk-125 pin or txclk pin. This configuration is
58 * totally driven by the board wiring. This bit is configured by
59 * "st,tx-retime-src" property.
60 *
61 * TXCLK configuration is different for different phy interface modes
62 * and changes according to link speed in modes like RGMII.
63 *
64 * Below table summarizes the clock requirement and clock sources for
65 * supported phy interface modes with link speeds.
66 * ________________________________________________
67 *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
68 * ------------------------------------------------
69 *| MII | n/a | 25Mhz |
70 *| | | txclk |
71 * ------------------------------------------------
72 *| GMII | 125Mhz | 25Mhz |
73 *| | clk-125/txclk | txclk |
74 * ------------------------------------------------
75 *| RGMII | 125Mhz | 25Mhz |
76 *| | clk-125/txclk | clkgen |
77 * ------------------------------------------------
78 *| RMII | n/a | 25Mhz |
79 *| | |clkgen/phyclk-in |
80 * ------------------------------------------------
81 *
82 * TX lines are always retimed with a clk, which can vary depending
83 * on the board configuration. Below is the table of these bits
84 * in eth configuration register depending on source of retime clk.
85 *
86 *---------------------------------------------------------------
87 * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
88 *---------------------------------------------------------------
89 * txclk | 0 | n/a | 1 |
90 *---------------------------------------------------------------
91 * ck_125| 0 | n/a | 0 |
92 *---------------------------------------------------------------
93 * phyclk| 1 | 0 | n/a |
94 *---------------------------------------------------------------
95 * clkgen| 1 | 1 | n/a |
96 *---------------------------------------------------------------
97 */
98
99 /* Register definition */
100
101 /* 3 bits [8:6]
102 * [6:6] ETH_SEL_TXCLK_NOT_CLK125
103 * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
104 * [8:8] ETH_SEL_TX_RETIME_CLK
105 *
106 */
107
108#define TX_RETIME_SRC_MASK GENMASK(8, 6)
109#define ETH_SEL_TX_RETIME_CLK BIT(8)
110#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
111#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
112
113#define ENMII_MASK GENMASK(5, 5)
114#define ENMII BIT(5)
115
116/**
117 * 3 bits [4:2]
118 * 000-GMII/MII
119 * 001-RGMII
120 * 010-SGMII
121 * 100-RMII
122*/
123#define MII_PHY_SEL_MASK GENMASK(4, 2)
124#define ETH_PHY_SEL_RMII BIT(4)
125#define ETH_PHY_SEL_SGMII BIT(3)
126#define ETH_PHY_SEL_RGMII BIT(2)
127#define ETH_PHY_SEL_GMII 0x0
128#define ETH_PHY_SEL_MII 0x0
129
130#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
131 iface == PHY_INTERFACE_MODE_RGMII_ID || \
132 iface == PHY_INTERFACE_MODE_RGMII_RXID || \
133 iface == PHY_INTERFACE_MODE_RGMII_TXID)
134
135#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
136 iface == PHY_INTERFACE_MODE_GMII)
137
138struct sti_dwmac {
139 int interface;
140 bool ext_phyclk;
141 bool is_tx_retime_src_clk_125;
142 struct clk *clk;
143 int reg;
144 struct device *dev;
145 struct regmap *regmap;
146};
147
148static u32 phy_intf_sels[] = {
149 [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
150 [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
151 [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
152 [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
153 [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
154 [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
155};
156
157enum {
158 TX_RETIME_SRC_NA = 0,
159 TX_RETIME_SRC_TXCLK = 1,
160 TX_RETIME_SRC_CLK_125,
161 TX_RETIME_SRC_PHYCLK,
162 TX_RETIME_SRC_CLKGEN,
163};
164
165static const char *const tx_retime_srcs[] = {
166 [TX_RETIME_SRC_NA] = "",
167 [TX_RETIME_SRC_TXCLK] = "txclk",
168 [TX_RETIME_SRC_CLK_125] = "clk_125",
169 [TX_RETIME_SRC_PHYCLK] = "phyclk",
170 [TX_RETIME_SRC_CLKGEN] = "clkgen",
171};
172
173static u32 tx_retime_val[] = {
174 [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
175 [TX_RETIME_SRC_CLK_125] = 0x0,
176 [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
177 [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
178 ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
179};
180
181static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
182{
183 u32 src = 0, freq = 0;
184
185 if (spd == SPEED_100) {
186 if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
187 dwmac->interface == PHY_INTERFACE_MODE_GMII) {
188 src = TX_RETIME_SRC_TXCLK;
189 } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
190 if (dwmac->ext_phyclk) {
191 src = TX_RETIME_SRC_PHYCLK;
192 } else {
193 src = TX_RETIME_SRC_CLKGEN;
194 freq = 50000000;
195 }
196
197 } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
198 src = TX_RETIME_SRC_CLKGEN;
199 freq = 25000000;
200 }
201
202 if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
203 clk_set_rate(dwmac->clk, freq);
204
205 } else if (spd == SPEED_1000) {
206 if (dwmac->is_tx_retime_src_clk_125)
207 src = TX_RETIME_SRC_CLK_125;
208 else
209 src = TX_RETIME_SRC_TXCLK;
210 }
211
212 regmap_update_bits(dwmac->regmap, dwmac->reg,
213 TX_RETIME_SRC_MASK, tx_retime_val[src]);
214}
215
216static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
217{
218 struct sti_dwmac *dwmac = priv;
219
220 if (dwmac->clk)
221 clk_disable_unprepare(dwmac->clk);
222}
223
224static void sti_fix_mac_speed(void *priv, unsigned int spd)
225{
226 struct sti_dwmac *dwmac = priv;
227
228 setup_retime_src(dwmac, spd);
229
230 return;
231}
232
233static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
234 struct platform_device *pdev)
235{
236 struct resource *res;
237 struct device *dev = &pdev->dev;
238 struct device_node *np = dev->of_node;
239 struct regmap *regmap;
240 int err;
241
242 if (!np)
243 return -EINVAL;
244
245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
246 if (!res)
247 return -ENODATA;
248
249 regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
250 if (IS_ERR(regmap))
251 return PTR_ERR(regmap);
252
253 dwmac->dev = dev;
254 dwmac->interface = of_get_phy_mode(np);
255 dwmac->regmap = regmap;
256 dwmac->reg = res->start;
257 dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
258 dwmac->is_tx_retime_src_clk_125 = false;
259
260 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
261 const char *rs;
262
263 err = of_property_read_string(np, "st,tx-retime-src", &rs);
264 if (err < 0) {
265 dev_err(dev, "st,tx-retime-src not specified\n");
266 return err;
267 }
268
269 if (!strcasecmp(rs, "clk_125"))
270 dwmac->is_tx_retime_src_clk_125 = true;
271 }
272
273 dwmac->clk = devm_clk_get(dev, "sti-ethclk");
274
275 if (IS_ERR(dwmac->clk))
276 dwmac->clk = NULL;
277
278 return 0;
279}
280
281static int sti_dwmac_init(struct platform_device *pdev, void *priv)
282{
283 struct sti_dwmac *dwmac = priv;
284 struct regmap *regmap = dwmac->regmap;
285 int iface = dwmac->interface;
286 u32 reg = dwmac->reg;
287 u32 val, spd;
288
289 if (dwmac->clk)
290 clk_prepare_enable(dwmac->clk);
291
292 regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
293
294 val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
295 regmap_update_bits(regmap, reg, ENMII_MASK, val);
296
297 if (IS_PHY_IF_MODE_GBIT(iface))
298 spd = SPEED_1000;
299 else
300 spd = SPEED_100;
301
302 setup_retime_src(dwmac, spd);
303
304 return 0;
305}
306
307static void *sti_dwmac_setup(struct platform_device *pdev)
308{
309 struct sti_dwmac *dwmac;
310 int ret;
311
312 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
313 if (!dwmac)
314 return ERR_PTR(-ENOMEM);
315
316 ret = sti_dwmac_parse_data(dwmac, pdev);
317 if (ret) {
318 dev_err(&pdev->dev, "Unable to parse OF data\n");
319 return ERR_PTR(ret);
320 }
321
322 return dwmac;
323}
324
325const struct stmmac_of_data sti_gmac_data = {
326 .fix_mac_speed = sti_fix_mac_speed,
327 .setup = sti_dwmac_setup,
328 .init = sti_dwmac_init,
329 .exit = sti_dwmac_exit,
330};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d9af26ed58ee..f9e60d7918c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
133#ifdef CONFIG_DWMAC_SUNXI 133#ifdef CONFIG_DWMAC_SUNXI
134extern const struct stmmac_of_data sun7i_gmac_data; 134extern const struct stmmac_of_data sun7i_gmac_data;
135#endif 135#endif
136#ifdef CONFIG_DWMAC_STI
137extern const struct stmmac_of_data sti_gmac_data;
138#endif
136extern struct platform_driver stmmac_pltfr_driver; 139extern struct platform_driver stmmac_pltfr_driver;
137static inline int stmmac_register_platform(void) 140static inline int stmmac_register_platform(void)
138{ 141{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5884a7d2063b..c61bc72b8e90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = {
33#ifdef CONFIG_DWMAC_SUNXI 33#ifdef CONFIG_DWMAC_SUNXI
34 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 34 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
35#endif 35#endif
36#ifdef CONFIG_DWMAC_STI
37 { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
38 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
39 { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
40#endif
36 /* SoC specific glue layers should come before generic bindings */ 41 /* SoC specific glue layers should come before generic bindings */
37 { .compatible = "st,spear600-gmac"}, 42 { .compatible = "st,spear600-gmac"},
38 { .compatible = "snps,dwmac-3.610"}, 43 { .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1d860ce914ed..651087b5c8da 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
554 * common for both the interface as the interface shares 554 * common for both the interface as the interface shares
555 * the same hardware resource. 555 * the same hardware resource.
556 */ 556 */
557 for (i = 0; i <= priv->data.slaves; i++) 557 for (i = 0; i < priv->data.slaves; i++)
558 if (priv->slaves[i].ndev->flags & IFF_PROMISC) 558 if (priv->slaves[i].ndev->flags & IFF_PROMISC)
559 flag = true; 559 flag = true;
560 560
@@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
578 unsigned long timeout = jiffies + HZ; 578 unsigned long timeout = jiffies + HZ;
579 579
580 /* Disable Learn for all ports */ 580 /* Disable Learn for all ports */
581 for (i = 0; i <= priv->data.slaves; i++) { 581 for (i = 0; i < priv->data.slaves; i++) {
582 cpsw_ale_control_set(ale, i, 582 cpsw_ale_control_set(ale, i,
583 ALE_PORT_NOLEARN, 1); 583 ALE_PORT_NOLEARN, 1);
584 cpsw_ale_control_set(ale, i, 584 cpsw_ale_control_set(ale, i,
@@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
606 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 606 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
607 607
608 /* Enable Learn for all ports */ 608 /* Enable Learn for all ports */
609 for (i = 0; i <= priv->data.slaves; i++) { 609 for (i = 0; i < priv->data.slaves; i++) {
610 cpsw_ale_control_set(ale, i, 610 cpsw_ale_control_set(ale, i,
611 ALE_PORT_NOLEARN, 0); 611 ALE_PORT_NOLEARN, 0);
612 cpsw_ale_control_set(ale, i, 612 cpsw_ale_control_set(ale, i,
@@ -1896,6 +1896,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1896 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1896 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
1897 1897
1898 slave_data->phy_if = of_get_phy_mode(slave_node); 1898 slave_data->phy_if = of_get_phy_mode(slave_node);
1899 if (slave_data->phy_if < 0) {
1900 pr_err("Missing or malformed slave[%d] phy-mode property\n",
1901 i);
1902 return slave_data->phy_if;
1903 }
1899 1904
1900 if (data->dual_emac) { 1905 if (data->dual_emac) {
1901 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1906 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 023237a65720..17503da9f7a5 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2071 2071
2072/* Return subqueue id on this core (one per core). */ 2072/* Return subqueue id on this core (one per core). */
2073static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2073static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2074 void *accel_priv) 2074 void *accel_priv, select_queue_fallback_t fallback)
2075{ 2075{
2076 return smp_processor_id(); 2076 return smp_processor_id();
2077} 2077}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1ec65feebb9e..4bfdf8c7ada0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -26,6 +26,7 @@
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/of_mdio.h> 27#include <linux/of_mdio.h>
28#include <linux/of_platform.h> 28#include <linux/of_platform.h>
29#include <linux/of_irq.h>
29#include <linux/of_address.h> 30#include <linux/of_address.h>
30#include <linux/skbuff.h> 31#include <linux/skbuff.h>
31#include <linux/spinlock.h> 32#include <linux/spinlock.h>
@@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
600 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 601 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
601 packets++; 602 packets++;
602 603
603 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 604 ++lp->tx_bd_ci;
605 lp->tx_bd_ci %= TX_BD_NUM;
604 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 606 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
605 status = cur_p->status; 607 status = cur_p->status;
606 } 608 }
@@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
686 skb_headlen(skb), DMA_TO_DEVICE); 688 skb_headlen(skb), DMA_TO_DEVICE);
687 689
688 for (ii = 0; ii < num_frag; ii++) { 690 for (ii = 0; ii < num_frag; ii++) {
689 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 691 ++lp->tx_bd_tail;
692 lp->tx_bd_tail %= TX_BD_NUM;
690 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 693 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
691 frag = &skb_shinfo(skb)->frags[ii]; 694 frag = &skb_shinfo(skb)->frags[ii];
692 cur_p->phys = dma_map_single(ndev->dev.parent, 695 cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
702 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 705 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
703 /* Start the transfer */ 706 /* Start the transfer */
704 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 707 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
705 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 708 ++lp->tx_bd_tail;
709 lp->tx_bd_tail %= TX_BD_NUM;
706 710
707 return NETDEV_TX_OK; 711 return NETDEV_TX_OK;
708} 712}
@@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
774 cur_p->status = 0; 778 cur_p->status = 0;
775 cur_p->sw_id_offset = (u32) new_skb; 779 cur_p->sw_id_offset = (u32) new_skb;
776 780
777 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 781 ++lp->rx_bd_ci;
782 lp->rx_bd_ci %= RX_BD_NUM;
778 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 783 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
779 } 784 }
780 785
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7756118c2f0a..7141a1937360 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net)
88{ 88{
89 struct net_device_context *net_device_ctx = netdev_priv(net); 89 struct net_device_context *net_device_ctx = netdev_priv(net);
90 struct hv_device *device_obj = net_device_ctx->device_ctx; 90 struct hv_device *device_obj = net_device_ctx->device_ctx;
91 struct netvsc_device *nvdev;
92 struct rndis_device *rdev;
91 int ret = 0; 93 int ret = 0;
92 94
95 netif_carrier_off(net);
96
93 /* Open up the device */ 97 /* Open up the device */
94 ret = rndis_filter_open(device_obj); 98 ret = rndis_filter_open(device_obj);
95 if (ret != 0) { 99 if (ret != 0) {
@@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net)
99 103
100 netif_start_queue(net); 104 netif_start_queue(net);
101 105
106 nvdev = hv_get_drvdata(device_obj);
107 rdev = nvdev->extension;
108 if (!rdev->link_state)
109 netif_carrier_on(net);
110
102 return ret; 111 return ret;
103} 112}
104 113
@@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
229 struct net_device *net; 238 struct net_device *net;
230 struct net_device_context *ndev_ctx; 239 struct net_device_context *ndev_ctx;
231 struct netvsc_device *net_device; 240 struct netvsc_device *net_device;
241 struct rndis_device *rdev;
232 242
233 net_device = hv_get_drvdata(device_obj); 243 net_device = hv_get_drvdata(device_obj);
244 rdev = net_device->extension;
245
246 rdev->link_state = status != 1;
247
234 net = net_device->ndev; 248 net = net_device->ndev;
235 249
236 if (!net) { 250 if (!net || net->reg_state != NETREG_REGISTERED)
237 netdev_err(net, "got link status but net device "
238 "not initialized yet\n");
239 return; 251 return;
240 }
241 252
253 ndev_ctx = netdev_priv(net);
242 if (status == 1) { 254 if (status == 1) {
243 netif_carrier_on(net);
244 ndev_ctx = netdev_priv(net);
245 schedule_delayed_work(&ndev_ctx->dwork, 0); 255 schedule_delayed_work(&ndev_ctx->dwork, 0);
246 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 256 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
247 } else { 257 } else {
248 netif_carrier_off(net); 258 schedule_delayed_work(&ndev_ctx->dwork, 0);
249 } 259 }
250} 260}
251 261
@@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = {
388 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add 398 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
389 * another netif_notify_peers() into a delayed work, otherwise GARP packet 399 * another netif_notify_peers() into a delayed work, otherwise GARP packet
390 * will not be sent after quick migration, and cause network disconnection. 400 * will not be sent after quick migration, and cause network disconnection.
401 * Also, we update the carrier status here.
391 */ 402 */
392static void netvsc_send_garp(struct work_struct *w) 403static void netvsc_link_change(struct work_struct *w)
393{ 404{
394 struct net_device_context *ndev_ctx; 405 struct net_device_context *ndev_ctx;
395 struct net_device *net; 406 struct net_device *net;
396 struct netvsc_device *net_device; 407 struct netvsc_device *net_device;
408 struct rndis_device *rdev;
409 bool notify;
410
411 rtnl_lock();
397 412
398 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 413 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
399 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 414 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
415 rdev = net_device->extension;
400 net = net_device->ndev; 416 net = net_device->ndev;
401 netdev_notify_peers(net); 417
418 if (rdev->link_state) {
419 netif_carrier_off(net);
420 notify = false;
421 } else {
422 netif_carrier_on(net);
423 notify = true;
424 }
425
426 rtnl_unlock();
427
428 if (notify)
429 netdev_notify_peers(net);
402} 430}
403 431
404 432
@@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev,
414 if (!net) 442 if (!net)
415 return -ENOMEM; 443 return -ENOMEM;
416 444
417 /* Set initial state */
418 netif_carrier_off(net);
419
420 net_device_ctx = netdev_priv(net); 445 net_device_ctx = netdev_priv(net);
421 net_device_ctx->device_ctx = dev; 446 net_device_ctx->device_ctx = dev;
422 hv_set_drvdata(dev, net); 447 hv_set_drvdata(dev, net);
423 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 448 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
424 INIT_WORK(&net_device_ctx->work, do_set_multicast); 449 INIT_WORK(&net_device_ctx->work, do_set_multicast);
425 450
426 net->netdev_ops = &device_ops; 451 net->netdev_ops = &device_ops;
@@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev,
443 } 468 }
444 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 469 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
445 470
446 netif_carrier_on(net);
447
448 ret = register_netdev(net); 471 ret = register_netdev(net);
449 if (ret != 0) { 472 if (ret != 0) {
450 pr_err("Unable to register netdev.\n"); 473 pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441afeb96..24b6dddd7f2f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
522 sirdev_put_instance(priv->dev); 522 sirdev_put_instance(priv->dev);
523 523
524 /* Stop tty */ 524 /* Stop tty */
525 irtty_stop_receiver(tty, TRUE);
526 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 525 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
527 if (tty->ops->stop) 526 if (tty->ops->stop)
528 tty->ops->stop(tty); 527 tty->ops->stop(tty);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8433de4509c7..a5d21893670d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -879,14 +879,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
879 dev->priv_flags |= IFF_MACVLAN; 879 dev->priv_flags |= IFF_MACVLAN;
880 err = netdev_upper_dev_link(lowerdev, dev); 880 err = netdev_upper_dev_link(lowerdev, dev);
881 if (err) 881 if (err)
882 goto destroy_port; 882 goto unregister_netdev;
883
884 883
885 list_add_tail_rcu(&vlan->list, &port->vlans); 884 list_add_tail_rcu(&vlan->list, &port->vlans);
886 netif_stacked_transfer_operstate(lowerdev, dev); 885 netif_stacked_transfer_operstate(lowerdev, dev);
887 886
888 return 0; 887 return 0;
889 888
889unregister_netdev:
890 unregister_netdevice(dev);
890destroy_port: 891destroy_port:
891 port->count -= 1; 892 port->count -= 1;
892 if (!port->count) 893 if (!port->count)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 9414fa272160..98e7cbf720a5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1006,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
1006 } else 1006 } else
1007 list_add_tail(&dp83640->list, &clock->phylist); 1007 list_add_tail(&dp83640->list, &clock->phylist);
1008 1008
1009 if (clock->chosen && !list_empty(&clock->phylist))
1010 recalibrate(clock);
1011 else
1012 enable_broadcast(dp83640->phydev, clock->page, 1);
1013
1014 dp83640_clock_put(clock); 1009 dp83640_clock_put(clock);
1015 return 0; 1010 return 0;
1016 1011
@@ -1063,6 +1058,14 @@ static void dp83640_remove(struct phy_device *phydev)
1063 1058
1064static int dp83640_config_init(struct phy_device *phydev) 1059static int dp83640_config_init(struct phy_device *phydev)
1065{ 1060{
1061 struct dp83640_private *dp83640 = phydev->priv;
1062 struct dp83640_clock *clock = dp83640->clock;
1063
1064 if (clock->chosen && !list_empty(&clock->phylist))
1065 recalibrate(clock);
1066 else
1067 enable_broadcast(phydev, clock->page, 1);
1068
1066 enable_status_frames(phydev, true); 1069 enable_status_frames(phydev, true);
1067 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); 1070 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1068 return 0; 1071 return 0;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 28407426fd6f..c8624a8235ab 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1648,7 +1648,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1648} 1648}
1649 1649
1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv) 1651 void *accel_priv, select_queue_fallback_t fallback)
1652{ 1652{
1653 /* 1653 /*
1654 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44c4db8450f0..8fe9cb7d0f72 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
366 * hope the rxq no. may help here. 366 * hope the rxq no. may help here.
367 */ 367 */
368static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 368static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
369 void *accel_priv) 369 void *accel_priv, select_queue_fallback_t fallback)
370{ 370{
371 struct tun_struct *tun = netdev_priv(dev); 371 struct tun_struct *tun = netdev_priv(dev);
372 struct tun_flow_entry *e; 372 struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 409499fdb157..7e7269fd3707 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -296,7 +296,6 @@ config USB_NET_SR9800
296 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices" 296 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
297 depends on USB_USBNET 297 depends on USB_USBNET
298 select CRC32 298 select CRC32
299 default y
300 ---help--- 299 ---help---
301 Say Y if you want to use one of the following 100Mbps USB Ethernet 300 Say Y if you want to use one of the following 100Mbps USB Ethernet
302 device based on the CoreChip-sz SR9800 chip. 301 device based on the CoreChip-sz SR9800 chip.
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9765a7d4766d..5d194093f3e1 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = {
917 .status = asix_status, 917 .status = asix_status,
918 .link_reset = ax88178_link_reset, 918 .link_reset = ax88178_link_reset,
919 .reset = ax88178_reset, 919 .reset = ax88178_reset,
920 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, 920 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
921 FLAG_MULTI_PACKET,
921 .rx_fixup = asix_rx_fixup_common, 922 .rx_fixup = asix_rx_fixup_common,
922 .tx_fixup = asix_tx_fixup, 923 .tx_fixup = asix_tx_fixup,
923}; 924};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d6f64dad05bc..955df81a4358 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1118 u16 hdr_off; 1118 u16 hdr_off;
1119 u32 *pkt_hdr; 1119 u32 *pkt_hdr;
1120 1120
1121 /* This check is no longer done by usbnet */
1122 if (skb->len < dev->net->hard_header_len)
1123 return 0;
1124
1121 skb_trim(skb, skb->len - 4); 1125 skb_trim(skb, skb->len - 4);
1122 memcpy(&rx_hdr, skb_tail_pointer(skb), 4); 1126 memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
1123 le32_to_cpus(&rx_hdr); 1127 le32_to_cpus(&rx_hdr);
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index e4a8a93fbaf7..1cc24e6f23e2 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
84 u32 size; 84 u32 size;
85 u32 count; 85 u32 count;
86 86
87 /* This check is no longer done by usbnet */
88 if (skb->len < dev->net->hard_header_len)
89 return 0;
90
87 header = (struct gl_header *) skb->data; 91 header = (struct gl_header *) skb->data;
88 92
89 // get the packet count of the received skb 93 // get the packet count of the received skb
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a305a7b2dae6..82d844a8ebd0 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
526{ 526{
527 u8 status; 527 u8 status;
528 528
529 if (skb->len == 0) { 529 /* This check is no longer done by usbnet */
530 dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); 530 if (skb->len < dev->net->hard_header_len) {
531 dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
531 return 0; 532 return 0;
532 } 533 }
533 534
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 0a85d9227775..4cbdb1307f3e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
364 struct nc_trailer *trailer; 364 struct nc_trailer *trailer;
365 u16 hdr_len, packet_len; 365 u16 hdr_len, packet_len;
366 366
367 /* This check is no longer done by usbnet */
368 if (skb->len < dev->net->hard_header_len)
369 return 0;
370
367 if (!(skb->len & 0x01)) { 371 if (!(skb->len & 0x01)) {
368 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", 372 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
369 skb->len, dev->net->hard_header_len, dev->hard_mtu, 373 skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ff5c87128ffe..313cb6cd4848 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
80{ 80{
81 __be16 proto; 81 __be16 proto;
82 82
83 /* usbnet rx_complete guarantees that skb->len is at least 83 /* This check is no longer done by usbnet */
84 * hard_header_len, so we can inspect the dest address without 84 if (skb->len < dev->net->hard_header_len)
85 * checking skb->len 85 return 0;
86 */ 86
87 switch (skb->data[0] & 0xf0) { 87 switch (skb->data[0] & 0xf0) {
88 case 0x40: 88 case 0x40:
89 proto = htons(ETH_P_IP); 89 proto = htons(ETH_P_IP);
@@ -732,6 +732,7 @@ static const struct usb_device_id products[] = {
732 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 732 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
733 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 733 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
734 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 734 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
735 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
735 736
736 /* 4. Gobi 1000 devices */ 737 /* 4. Gobi 1000 devices */
737 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 738 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a48bc0f20c1a..524a47a28120 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
492 */ 492 */
493int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 493int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
494{ 494{
495 /* This check is no longer done by usbnet */
496 if (skb->len < dev->net->hard_header_len)
497 return 0;
498
495 /* peripheral may have batched packets to us... */ 499 /* peripheral may have batched packets to us... */
496 while (likely(skb->len)) { 500 while (likely(skb->len)) {
497 struct rndis_data_hdr *hdr = (void *)skb->data; 501 struct rndis_data_hdr *hdr = (void *)skb->data;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f17b9e02dd34..d9e7892262fa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
2106 2106
2107static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 2107static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2108{ 2108{
2109 /* This check is no longer done by usbnet */
2110 if (skb->len < dev->net->hard_header_len)
2111 return 0;
2112
2109 while (skb->len > 0) { 2113 while (skb->len > 0) {
2110 u32 rx_cmd_a, rx_cmd_b, align_count, size; 2114 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2111 struct sk_buff *ax_skb; 2115 struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8dd54a0f7b29..424db65e4396 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1723 1723
1724static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1724static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1725{ 1725{
1726 /* This check is no longer done by usbnet */
1727 if (skb->len < dev->net->hard_header_len)
1728 return 0;
1729
1726 while (skb->len > 0) { 1730 while (skb->len > 0) {
1727 u32 header, align_count; 1731 u32 header, align_count;
1728 struct sk_buff *ax_skb; 1732 struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 4175eb9fdeca..b94a0fbb8b3b 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -63,6 +63,10 @@ static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
63{ 63{
64 int offset = 0; 64 int offset = 0;
65 65
66 /* This check is no longer done by usbnet */
67 if (skb->len < dev->net->hard_header_len)
68 return 0;
69
66 while (offset + sizeof(u32) < skb->len) { 70 while (offset + sizeof(u32) < skb->len) {
67 struct sk_buff *sr_skb; 71 struct sk_buff *sr_skb;
68 u16 size; 72 u16 size;
@@ -823,7 +827,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
823 dev->rx_urb_size = 827 dev->rx_urb_size =
824 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size; 828 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
825 } 829 }
826 netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__, 830 netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
827 dev->rx_urb_size); 831 dev->rx_urb_size);
828 return 0; 832 return 0;
829 833
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4671da755e7b..dd10d5817d2a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
542 } 542 }
543 // else network stack removes extra byte if we forced a short packet 543 // else network stack removes extra byte if we forced a short packet
544 544
545 if (skb->len) { 545 /* all data was already cloned from skb inside the driver */
546 /* all data was already cloned from skb inside the driver */ 546 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
547 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 547 goto done;
548 dev_kfree_skb_any(skb); 548
549 else 549 if (skb->len < ETH_HLEN) {
550 usbnet_skb_return(dev, skb); 550 dev->net->stats.rx_errors++;
551 dev->net->stats.rx_length_errors++;
552 netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
553 } else {
554 usbnet_skb_return(dev, skb);
551 return; 555 return;
552 } 556 }
553 557
554 netif_dbg(dev, rx_err, dev->net, "drop\n");
555 dev->net->stats.rx_errors++;
556done: 558done:
557 skb_queue_tail(&dev->done, skb); 559 skb_queue_tail(&dev->done, skb);
558} 560}
@@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb)
574 switch (urb_status) { 576 switch (urb_status) {
575 /* success */ 577 /* success */
576 case 0: 578 case 0:
577 if (skb->len < dev->net->hard_header_len) {
578 state = rx_cleanup;
579 dev->net->stats.rx_errors++;
580 dev->net->stats.rx_length_errors++;
581 netif_dbg(dev, rx_err, dev->net,
582 "rx length %d\n", skb->len);
583 }
584 break; 579 break;
585 580
586 /* stalls need manual reset. this is rare ... except that 581 /* stalls need manual reset. this is rare ... except that
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb61bfb..1a2973b7acf2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
110 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); 110 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
111 111
112 if (ah->ah_version == AR5K_AR5210) { 112 if (ah->ah_version == AR5K_AR5210) {
113 srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; 113 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
114 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; 114 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
115 } else { 115 } else {
116 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff; 116 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7a69b..4e5c0f8c9496 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
496 496
497void hostap_remove_proc(local_info_t *local) 497void hostap_remove_proc(local_info_t *local)
498{ 498{
499 remove_proc_subtree(local->ddev->name, hostap_proc); 499 proc_remove(local->proc);
500} 500}
501 501
502 502
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index c24d1d3d55f6..73086c1629ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
696 return ret; 696 return ret;
697} 697}
698 698
699static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
700{
701 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
702 return false;
703 return true;
704}
705
706static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
707{
708 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
709 return false;
710 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
711 return true;
712
713 /* disabled by default */
714 return false;
715}
716
699static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 717static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
700 struct ieee80211_vif *vif, 718 struct ieee80211_vif *vif,
701 enum ieee80211_ampdu_mlme_action action, 719 enum ieee80211_ampdu_mlme_action action,
@@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
717 735
718 switch (action) { 736 switch (action) {
719 case IEEE80211_AMPDU_RX_START: 737 case IEEE80211_AMPDU_RX_START:
720 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 738 if (!iwl_enable_rx_ampdu(priv->cfg))
721 break; 739 break;
722 IWL_DEBUG_HT(priv, "start Rx\n"); 740 IWL_DEBUG_HT(priv, "start Rx\n");
723 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); 741 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
729 case IEEE80211_AMPDU_TX_START: 747 case IEEE80211_AMPDU_TX_START:
730 if (!priv->trans->ops->txq_enable) 748 if (!priv->trans->ops->txq_enable)
731 break; 749 break;
732 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 750 if (!iwl_enable_tx_ampdu(priv->cfg))
733 break; 751 break;
734 IWL_DEBUG_HT(priv, "start Tx\n"); 752 IWL_DEBUG_HT(priv, "start Tx\n");
735 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 753 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c3728163be46..75103554cd63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
1286MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 1286MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1287module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); 1287module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
1288MODULE_PARM_DESC(11n_disable, 1288MODULE_PARM_DESC(11n_disable,
1289 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1289 "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
1290module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1290module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1291 int, S_IRUGO); 1291 int, S_IRUGO);
1292MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); 1292MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 0a84ade7edac..b29075c3da8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -79,9 +79,12 @@ enum iwl_power_level {
79 IWL_POWER_NUM 79 IWL_POWER_NUM
80}; 80};
81 81
82#define IWL_DISABLE_HT_ALL BIT(0) 82enum iwl_disable_11n {
83#define IWL_DISABLE_HT_TXAGG BIT(1) 83 IWL_DISABLE_HT_ALL = BIT(0),
84#define IWL_DISABLE_HT_RXAGG BIT(2) 84 IWL_DISABLE_HT_TXAGG = BIT(1),
85 IWL_DISABLE_HT_RXAGG = BIT(2),
86 IWL_ENABLE_HT_TXAGG = BIT(3),
87};
85 88
86/** 89/**
87 * struct iwl_mod_params 90 * struct iwl_mod_params
@@ -90,7 +93,7 @@ enum iwl_power_level {
90 * 93 *
91 * @sw_crypto: using hardware encryption, default = 0 94 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0, 95 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants 96 * use IWL_[DIS,EN]ABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 0 97 * @amsdu_size_8K: enable 8K amsdu size, default = 0
95 * @restart_fw: restart firmware, default = 1 98 * @restart_fw: restart firmware, default = 1
96 * @wd_disable: enable stuck queue check, default = 0 99 * @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 6bf9766e5982..c35b8661b395 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
328 ieee80211_free_txskb(hw, skb); 328 ieee80211_free_txskb(hw, skb);
329} 329}
330 330
331static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
332{
333 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
334 return false;
335 return true;
336}
337
338static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
339{
340 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
341 return false;
342 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
343 return true;
344
345 /* enabled by default */
346 return true;
347}
348
331static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 349static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
332 struct ieee80211_vif *vif, 350 struct ieee80211_vif *vif,
333 enum ieee80211_ampdu_mlme_action action, 351 enum ieee80211_ampdu_mlme_action action,
@@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
347 365
348 switch (action) { 366 switch (action) {
349 case IEEE80211_AMPDU_RX_START: 367 case IEEE80211_AMPDU_RX_START:
350 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { 368 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
351 ret = -EINVAL; 369 ret = -EINVAL;
352 break; 370 break;
353 } 371 }
@@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
357 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 375 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
358 break; 376 break;
359 case IEEE80211_AMPDU_TX_START: 377 case IEEE80211_AMPDU_TX_START:
360 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { 378 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
361 ret = -EINVAL; 379 ret = -EINVAL;
362 break; 380 break;
363 } 381 }
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4d79761b9c87..9d3d2758ec35 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
748 748
749static u16 749static u16
750mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 750mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
751 void *accel_priv) 751 void *accel_priv, select_queue_fallback_t fallback)
752{ 752{
753 skb->priority = cfg80211_classify8021d(skb, NULL); 753 skb->priority = cfg80211_classify8021d(skb, NULL);
754 return mwifiex_1d_to_wmm_queue[skb->priority]; 754 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee067f324..a6ad79f61bf9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
15#ifndef RTL8187_H 15#ifndef RTL8187_H
16#define RTL8187_H 16#define RTL8187_H
17 17
18#include <linux/cache.h>
19
18#include "rtl818x.h" 20#include "rtl818x.h"
19#include "leds.h" 21#include "leds.h"
20 22
@@ -139,7 +141,10 @@ struct rtl8187_priv {
139 u8 aifsn[4]; 141 u8 aifsn[4];
140 u8 rfkill_mask; 142 u8 rfkill_mask;
141 struct { 143 struct {
142 __le64 buf; 144 union {
145 __le64 buf;
146 u8 dummy1[L1_CACHE_BYTES];
147 } ____cacheline_aligned;
143 struct sk_buff_head queue; 148 struct sk_buff_head queue;
144 } b_tx_status; /* This queue is used by both -b and non-b devices */ 149 } b_tx_status; /* This queue is used by both -b and non-b devices */
145 struct mutex io_mutex; 150 struct mutex io_mutex;
@@ -147,7 +152,8 @@ struct rtl8187_priv {
147 u8 bits8; 152 u8 bits8;
148 __le16 bits16; 153 __le16 bits16;
149 __le32 bits32; 154 __le32 bits32;
150 } *io_dmabuf; 155 u8 dummy2[L1_CACHE_BYTES];
156 } *io_dmabuf ____cacheline_aligned;
151 bool rfkill_off; 157 bool rfkill_off;
152 u16 seqno; 158 u16 seqno;
153}; 159};
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index deedae3c5449..d1c0191a195b 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
48 48
49 /*<2> Enable Adapter */ 49 /*<2> Enable Adapter */
50 if (rtlpriv->cfg->ops->hw_init(hw)) 50 if (rtlpriv->cfg->ops->hw_init(hw))
51 return 1; 51 return false;
52 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 52 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
53 53
54 /*<3> Enable Interrupt */ 54 /*<3> Enable Interrupt */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a1996c..2eb0b38384dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
937 bool is92c; 937 bool is92c;
938 int err; 938 int err;
939 u8 tmp_u1b; 939 u8 tmp_u1b;
940 unsigned long flags;
940 941
941 rtlpci->being_init_adapter = true; 942 rtlpci->being_init_adapter = true;
943
944 /* Since this function can take a very long time (up to 350 ms)
945 * and can be called with irqs disabled, reenable the irqs
946 * to let the other devices continue being serviced.
947 *
948 * It is safe doing so since our own interrupts will only be enabled
949 * in a subsequent step.
950 */
951 local_save_flags(flags);
952 local_irq_enable();
953
942 rtlpriv->intf_ops->disable_aspm(hw); 954 rtlpriv->intf_ops->disable_aspm(hw);
943 rtstatus = _rtl92ce_init_mac(hw); 955 rtstatus = _rtl92ce_init_mac(hw);
944 if (!rtstatus) { 956 if (!rtstatus) {
945 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); 957 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
946 err = 1; 958 err = 1;
947 return err; 959 goto exit;
948 } 960 }
949 961
950 err = rtl92c_download_fw(hw); 962 err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
952 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 964 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
953 "Failed to download FW. Init HW without FW now..\n"); 965 "Failed to download FW. Init HW without FW now..\n");
954 err = 1; 966 err = 1;
955 return err; 967 goto exit;
956 } 968 }
957 969
958 rtlhal->last_hmeboxnum = 0; 970 rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
1032 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); 1044 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
1033 } 1045 }
1034 rtl92c_dm_init(hw); 1046 rtl92c_dm_init(hw);
1047exit:
1048 local_irq_restore(flags);
1035 rtlpci->being_init_adapter = false; 1049 rtlpci->being_init_adapter = false;
1036 return err; 1050 return err;
1037} 1051}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 875b7b6f0d2a..5b3c24f3cde5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -24,7 +24,11 @@ MODULE_LICENSE("GPL");
24 24
25static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) 25static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
26{ 26{
27 phydev->supported |= PHY_DEFAULT_FEATURES; 27 /* The default values for phydev->supported are provided by the PHY
28 * driver "features" member, we want to reset to sane defaults fist
29 * before supporting higher speeds.
30 */
31 phydev->supported &= PHY_DEFAULT_FEATURES;
28 32
29 switch (max_speed) { 33 switch (max_speed) {
30 default: 34 default:
@@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
44{ 48{
45 struct phy_device *phy; 49 struct phy_device *phy;
46 bool is_c45; 50 bool is_c45;
47 int rc, prev_irq; 51 int rc;
48 u32 max_speed = 0; 52 u32 max_speed = 0;
49 53
50 is_c45 = of_device_is_compatible(child, 54 is_c45 = of_device_is_compatible(child,
@@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
54 if (!phy || IS_ERR(phy)) 58 if (!phy || IS_ERR(phy))
55 return 1; 59 return 1;
56 60
57 if (mdio->irq) { 61 rc = irq_of_parse_and_map(child, 0);
58 prev_irq = mdio->irq[addr]; 62 if (rc > 0) {
59 mdio->irq[addr] = 63 phy->irq = rc;
60 irq_of_parse_and_map(child, 0); 64 if (mdio->irq)
61 if (!mdio->irq[addr]) 65 mdio->irq[addr] = rc;
62 mdio->irq[addr] = prev_irq; 66 } else {
67 if (mdio->irq)
68 phy->irq = mdio->irq[addr];
63 } 69 }
64 70
65 /* Associate the OF node with the device structure so it 71 /* Associate the OF node with the device structure so it
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..62ec84b42e31 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1684 1684
1685 host_dev = scsi_get_device(shost); 1685 host_dev = scsi_get_device(shost);
1686 if (host_dev && host_dev->dma_mask) 1686 if (host_dev && host_dev->dma_mask)
1687 bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; 1687 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
1688 1688
1689 return bounce_limit; 1689 return bounce_limit;
1690} 1690}
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd2732bdc..95a2358267ba 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv) 43 void *accel_priv, select_queue_fallback_t fallback)
44{ 44{
45 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
46} 46}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index d8ea25486a33..31b269a5fff7 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv) 310 void *accel_priv, select_queue_fallback_t fallback)
311{ 311{
312 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
313} 313}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa114d2..7c9ee58f47bb 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv) 656 void *accel_priv, select_queue_fallback_t fallback)
657{ 657{
658 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9a68409580d5..a0fa5de210cf 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@ enum {
70}; 70};
71 71
72struct vhost_net_ubuf_ref { 72struct vhost_net_ubuf_ref {
73 struct kref kref; 73 /* refcount follows semantics similar to kref:
74 * 0: object is released
75 * 1: no outstanding ubufs
76 * >1: outstanding ubufs
77 */
78 atomic_t refcount;
74 wait_queue_head_t wait; 79 wait_queue_head_t wait;
75 struct vhost_virtqueue *vq; 80 struct vhost_virtqueue *vq;
76}; 81};
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
116 vhost_net_zcopy_mask |= 0x1 << vq; 121 vhost_net_zcopy_mask |= 0x1 << vq;
117} 122}
118 123
119static void vhost_net_zerocopy_done_signal(struct kref *kref)
120{
121 struct vhost_net_ubuf_ref *ubufs;
122
123 ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
124 wake_up(&ubufs->wait);
125}
126
127static struct vhost_net_ubuf_ref * 124static struct vhost_net_ubuf_ref *
128vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) 125vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
129{ 126{
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
134 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); 131 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
135 if (!ubufs) 132 if (!ubufs)
136 return ERR_PTR(-ENOMEM); 133 return ERR_PTR(-ENOMEM);
137 kref_init(&ubufs->kref); 134 atomic_set(&ubufs->refcount, 1);
138 init_waitqueue_head(&ubufs->wait); 135 init_waitqueue_head(&ubufs->wait);
139 ubufs->vq = vq; 136 ubufs->vq = vq;
140 return ubufs; 137 return ubufs;
141} 138}
142 139
143static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 140static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
144{ 141{
145 kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 142 int r = atomic_sub_return(1, &ubufs->refcount);
143 if (unlikely(!r))
144 wake_up(&ubufs->wait);
145 return r;
146} 146}
147 147
148static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) 148static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
149{ 149{
150 kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 150 vhost_net_ubuf_put(ubufs);
151 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 151 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
152} 152}
153 153
154static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) 154static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
306{ 306{
307 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; 307 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
308 struct vhost_virtqueue *vq = ubufs->vq; 308 struct vhost_virtqueue *vq = ubufs->vq;
309 int cnt = atomic_read(&ubufs->kref.refcount); 309 int cnt;
310
311 rcu_read_lock_bh();
310 312
311 /* set len to mark this desc buffers done DMA */ 313 /* set len to mark this desc buffers done DMA */
312 vq->heads[ubuf->desc].len = success ? 314 vq->heads[ubuf->desc].len = success ?
313 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; 315 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
314 vhost_net_ubuf_put(ubufs); 316 cnt = vhost_net_ubuf_put(ubufs);
315 317
316 /* 318 /*
317 * Trigger polling thread if guest stopped submitting new buffers: 319 * Trigger polling thread if guest stopped submitting new buffers:
318 * in this case, the refcount after decrement will eventually reach 1 320 * in this case, the refcount after decrement will eventually reach 1.
319 * so here it is 2.
320 * We also trigger polling periodically after each 16 packets 321 * We also trigger polling periodically after each 16 packets
321 * (the value 16 here is more or less arbitrary, it's tuned to trigger 322 * (the value 16 here is more or less arbitrary, it's tuned to trigger
322 * less than 10% of times). 323 * less than 10% of times).
323 */ 324 */
324 if (cnt <= 2 || !(cnt % 16)) 325 if (cnt <= 1 || !(cnt % 16))
325 vhost_poll_queue(&vq->poll); 326 vhost_poll_queue(&vq->poll);
327
328 rcu_read_unlock_bh();
326} 329}
327 330
328/* Expects to be always run from workqueue - which acts as 331/* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
420 msg.msg_control = ubuf; 423 msg.msg_control = ubuf;
421 msg.msg_controllen = sizeof(ubuf); 424 msg.msg_controllen = sizeof(ubuf);
422 ubufs = nvq->ubufs; 425 ubufs = nvq->ubufs;
423 kref_get(&ubufs->kref); 426 atomic_inc(&ubufs->refcount);
424 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 427 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
425 } else { 428 } else {
426 msg.msg_control = NULL; 429 msg.msg_control = NULL;
@@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n)
780 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); 783 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
781 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 784 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
782 n->tx_flush = false; 785 n->tx_flush = false;
783 kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); 786 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
784 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 787 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
785 } 788 }
786} 789}
@@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
800 fput(tx_sock->file); 803 fput(tx_sock->file);
801 if (rx_sock) 804 if (rx_sock)
802 fput(rx_sock->file); 805 fput(rx_sock->file);
806 /* Make sure no callbacks are outstanding */
807 synchronize_rcu_bh();
803 /* We do an extra flush before freeing memory, 808 /* We do an extra flush before freeing memory,
804 * since jobs can re-queue themselves. */ 809 * since jobs can re-queue themselves. */
805 vhost_net_flush(n); 810 vhost_net_flush(n);
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4c2d452c4bfc..21887d63dad5 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -54,11 +54,6 @@ static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
54 return acl; 54 return acl;
55} 55}
56 56
57void ceph_forget_all_cached_acls(struct inode *inode)
58{
59 forget_all_cached_acls(inode);
60}
61
62struct posix_acl *ceph_get_acl(struct inode *inode, int type) 57struct posix_acl *ceph_get_acl(struct inode *inode, int type)
63{ 58{
64 int size; 59 int size;
@@ -160,11 +155,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
160 goto out_dput; 155 goto out_dput;
161 } 156 }
162 157
163 if (value) 158 ret = __ceph_setxattr(dentry, name, value, size, 0);
164 ret = __ceph_setxattr(dentry, name, value, size, 0);
165 else
166 ret = __ceph_removexattr(dentry, name);
167
168 if (ret) { 159 if (ret) {
169 if (new_mode != old_mode) { 160 if (new_mode != old_mode) {
170 newattrs.ia_mode = old_mode; 161 newattrs.ia_mode = old_mode;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6da4df84ba30..45eda6d7a40c 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -100,6 +100,14 @@ static unsigned fpos_off(loff_t p)
100 return p & 0xffffffff; 100 return p & 0xffffffff;
101} 101}
102 102
103static int fpos_cmp(loff_t l, loff_t r)
104{
105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
106 if (v)
107 return v;
108 return (int)(fpos_off(l) - fpos_off(r));
109}
110
103/* 111/*
104 * When possible, we try to satisfy a readdir by peeking at the 112 * When possible, we try to satisfy a readdir by peeking at the
105 * dcache. We make this work by carefully ordering dentries on 113 * dcache. We make this work by carefully ordering dentries on
@@ -156,7 +164,7 @@ more:
156 if (!d_unhashed(dentry) && dentry->d_inode && 164 if (!d_unhashed(dentry) && dentry->d_inode &&
157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 165 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 166 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
159 ctx->pos <= di->offset) 167 fpos_cmp(ctx->pos, di->offset) <= 0)
160 break; 168 break;
161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 169 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
162 dentry->d_name.len, dentry->d_name.name, di->offset, 170 dentry->d_name.len, dentry->d_name.name, di->offset,
@@ -695,9 +703,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
695 ceph_mdsc_put_request(req); 703 ceph_mdsc_put_request(req);
696 704
697 if (!err) 705 if (!err)
698 err = ceph_init_acl(dentry, dentry->d_inode, dir); 706 ceph_init_acl(dentry, dentry->d_inode, dir);
699 707 else
700 if (err)
701 d_drop(dentry); 708 d_drop(dentry);
702 return err; 709 return err;
703} 710}
@@ -735,7 +742,9 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
735 if (!err && !req->r_reply_info.head->is_dentry) 742 if (!err && !req->r_reply_info.head->is_dentry)
736 err = ceph_handle_notrace_create(dir, dentry); 743 err = ceph_handle_notrace_create(dir, dentry);
737 ceph_mdsc_put_request(req); 744 ceph_mdsc_put_request(req);
738 if (err) 745 if (!err)
746 ceph_init_acl(dentry, dentry->d_inode, dir);
747 else
739 d_drop(dentry); 748 d_drop(dentry);
740 return err; 749 return err;
741} 750}
@@ -776,7 +785,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
776 err = ceph_handle_notrace_create(dir, dentry); 785 err = ceph_handle_notrace_create(dir, dentry);
777 ceph_mdsc_put_request(req); 786 ceph_mdsc_put_request(req);
778out: 787out:
779 if (err < 0) 788 if (!err)
789 ceph_init_acl(dentry, dentry->d_inode, dir);
790 else
780 d_drop(dentry); 791 d_drop(dentry);
781 return err; 792 return err;
782} 793}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index dfd2ce3419f8..09c7afe32e49 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -286,6 +286,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
286 } else { 286 } else {
287 dout("atomic_open finish_open on dn %p\n", dn); 287 dout("atomic_open finish_open on dn %p\n", dn);
288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
289 ceph_init_acl(dentry, dentry->d_inode, dir);
289 *opened |= FILE_CREATED; 290 *opened |= FILE_CREATED;
290 } 291 }
291 err = finish_open(file, dentry, ceph_open, opened); 292 err = finish_open(file, dentry, ceph_open, opened);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2df963f1cf5a..10a4ccbf38da 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -144,7 +144,11 @@ enum {
144 Opt_ino32, 144 Opt_ino32,
145 Opt_noino32, 145 Opt_noino32,
146 Opt_fscache, 146 Opt_fscache,
147 Opt_nofscache 147 Opt_nofscache,
148#ifdef CONFIG_CEPH_FS_POSIX_ACL
149 Opt_acl,
150#endif
151 Opt_noacl
148}; 152};
149 153
150static match_table_t fsopt_tokens = { 154static match_table_t fsopt_tokens = {
@@ -172,6 +176,10 @@ static match_table_t fsopt_tokens = {
172 {Opt_noino32, "noino32"}, 176 {Opt_noino32, "noino32"},
173 {Opt_fscache, "fsc"}, 177 {Opt_fscache, "fsc"},
174 {Opt_nofscache, "nofsc"}, 178 {Opt_nofscache, "nofsc"},
179#ifdef CONFIG_CEPH_FS_POSIX_ACL
180 {Opt_acl, "acl"},
181#endif
182 {Opt_noacl, "noacl"},
175 {-1, NULL} 183 {-1, NULL}
176}; 184};
177 185
@@ -271,6 +279,14 @@ static int parse_fsopt_token(char *c, void *private)
271 case Opt_nofscache: 279 case Opt_nofscache:
272 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; 280 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
273 break; 281 break;
282#ifdef CONFIG_CEPH_FS_POSIX_ACL
283 case Opt_acl:
284 fsopt->sb_flags |= MS_POSIXACL;
285 break;
286#endif
287 case Opt_noacl:
288 fsopt->sb_flags &= ~MS_POSIXACL;
289 break;
274 default: 290 default:
275 BUG_ON(token); 291 BUG_ON(token);
276 } 292 }
@@ -438,6 +454,13 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
438 else 454 else
439 seq_puts(m, ",nofsc"); 455 seq_puts(m, ",nofsc");
440 456
457#ifdef CONFIG_CEPH_FS_POSIX_ACL
458 if (fsopt->sb_flags & MS_POSIXACL)
459 seq_puts(m, ",acl");
460 else
461 seq_puts(m, ",noacl");
462#endif
463
441 if (fsopt->wsize) 464 if (fsopt->wsize)
442 seq_printf(m, ",wsize=%d", fsopt->wsize); 465 seq_printf(m, ",wsize=%d", fsopt->wsize);
443 if (fsopt->rsize != CEPH_RSIZE_DEFAULT) 466 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -819,9 +842,6 @@ static int ceph_set_super(struct super_block *s, void *data)
819 842
820 s->s_flags = fsc->mount_options->sb_flags; 843 s->s_flags = fsc->mount_options->sb_flags;
821 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ 844 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
822#ifdef CONFIG_CEPH_FS_POSIX_ACL
823 s->s_flags |= MS_POSIXACL;
824#endif
825 845
826 s->s_xattr = ceph_xattr_handlers; 846 s->s_xattr = ceph_xattr_handlers;
827 s->s_fs_info = fsc; 847 s->s_fs_info = fsc;
@@ -911,6 +931,10 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
911 struct ceph_options *opt = NULL; 931 struct ceph_options *opt = NULL;
912 932
913 dout("ceph_mount\n"); 933 dout("ceph_mount\n");
934
935#ifdef CONFIG_CEPH_FS_POSIX_ACL
936 flags |= MS_POSIXACL;
937#endif
914 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); 938 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
915 if (err < 0) { 939 if (err < 0) {
916 res = ERR_PTR(err); 940 res = ERR_PTR(err);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 19793b56d0a7..d8801a95b685 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -13,6 +13,7 @@
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/writeback.h> 14#include <linux/writeback.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/posix_acl.h>
16 17
17#include <linux/ceph/libceph.h> 18#include <linux/ceph/libceph.h>
18 19
@@ -743,7 +744,11 @@ extern const struct xattr_handler *ceph_xattr_handlers[];
743struct posix_acl *ceph_get_acl(struct inode *, int); 744struct posix_acl *ceph_get_acl(struct inode *, int);
744int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); 745int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
745int ceph_init_acl(struct dentry *, struct inode *, struct inode *); 746int ceph_init_acl(struct dentry *, struct inode *, struct inode *);
746void ceph_forget_all_cached_acls(struct inode *inode); 747
748static inline void ceph_forget_all_cached_acls(struct inode *inode)
749{
750 forget_all_cached_acls(inode);
751}
747 752
748#else 753#else
749 754
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 898b6565ad3e..a55ec37378c6 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -12,6 +12,9 @@
12#define XATTR_CEPH_PREFIX "ceph." 12#define XATTR_CEPH_PREFIX "ceph."
13#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 13#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
14 14
15static int __remove_xattr(struct ceph_inode_info *ci,
16 struct ceph_inode_xattr *xattr);
17
15/* 18/*
16 * List of handlers for synthetic system.* attributes. Other 19 * List of handlers for synthetic system.* attributes. Other
17 * attributes are handled directly. 20 * attributes are handled directly.
@@ -319,8 +322,7 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
319static int __set_xattr(struct ceph_inode_info *ci, 322static int __set_xattr(struct ceph_inode_info *ci,
320 const char *name, int name_len, 323 const char *name, int name_len,
321 const char *val, int val_len, 324 const char *val, int val_len,
322 int dirty, 325 int flags, int update_xattr,
323 int should_free_name, int should_free_val,
324 struct ceph_inode_xattr **newxattr) 326 struct ceph_inode_xattr **newxattr)
325{ 327{
326 struct rb_node **p; 328 struct rb_node **p;
@@ -349,12 +351,31 @@ static int __set_xattr(struct ceph_inode_info *ci,
349 xattr = NULL; 351 xattr = NULL;
350 } 352 }
351 353
354 if (update_xattr) {
355 int err = 0;
356 if (xattr && (flags & XATTR_CREATE))
357 err = -EEXIST;
358 else if (!xattr && (flags & XATTR_REPLACE))
359 err = -ENODATA;
360 if (err) {
361 kfree(name);
362 kfree(val);
363 return err;
364 }
365 if (update_xattr < 0) {
366 if (xattr)
367 __remove_xattr(ci, xattr);
368 kfree(name);
369 return 0;
370 }
371 }
372
352 if (!xattr) { 373 if (!xattr) {
353 new = 1; 374 new = 1;
354 xattr = *newxattr; 375 xattr = *newxattr;
355 xattr->name = name; 376 xattr->name = name;
356 xattr->name_len = name_len; 377 xattr->name_len = name_len;
357 xattr->should_free_name = should_free_name; 378 xattr->should_free_name = update_xattr;
358 379
359 ci->i_xattrs.count++; 380 ci->i_xattrs.count++;
360 dout("__set_xattr count=%d\n", ci->i_xattrs.count); 381 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
@@ -364,7 +385,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
364 if (xattr->should_free_val) 385 if (xattr->should_free_val)
365 kfree((void *)xattr->val); 386 kfree((void *)xattr->val);
366 387
367 if (should_free_name) { 388 if (update_xattr) {
368 kfree((void *)name); 389 kfree((void *)name);
369 name = xattr->name; 390 name = xattr->name;
370 } 391 }
@@ -379,8 +400,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
379 xattr->val = ""; 400 xattr->val = "";
380 401
381 xattr->val_len = val_len; 402 xattr->val_len = val_len;
382 xattr->dirty = dirty; 403 xattr->dirty = update_xattr;
383 xattr->should_free_val = (val && should_free_val); 404 xattr->should_free_val = (val && update_xattr);
384 405
385 if (new) { 406 if (new) {
386 rb_link_node(&xattr->node, parent, p); 407 rb_link_node(&xattr->node, parent, p);
@@ -442,7 +463,7 @@ static int __remove_xattr(struct ceph_inode_info *ci,
442 struct ceph_inode_xattr *xattr) 463 struct ceph_inode_xattr *xattr)
443{ 464{
444 if (!xattr) 465 if (!xattr)
445 return -EOPNOTSUPP; 466 return -ENODATA;
446 467
447 rb_erase(&xattr->node, &ci->i_xattrs.index); 468 rb_erase(&xattr->node, &ci->i_xattrs.index);
448 469
@@ -588,7 +609,7 @@ start:
588 p += len; 609 p += len;
589 610
590 err = __set_xattr(ci, name, namelen, val, len, 611 err = __set_xattr(ci, name, namelen, val, len,
591 0, 0, 0, &xattrs[numattr]); 612 0, 0, &xattrs[numattr]);
592 613
593 if (err < 0) 614 if (err < 0)
594 goto bad; 615 goto bad;
@@ -850,6 +871,9 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
850 871
851 dout("setxattr value=%.*s\n", (int)size, value); 872 dout("setxattr value=%.*s\n", (int)size, value);
852 873
874 if (!value)
875 flags |= CEPH_XATTR_REMOVE;
876
853 /* do request */ 877 /* do request */
854 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, 878 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
855 USE_AUTH_MDS); 879 USE_AUTH_MDS);
@@ -892,7 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
892 struct ceph_inode_info *ci = ceph_inode(inode); 916 struct ceph_inode_info *ci = ceph_inode(inode);
893 int issued; 917 int issued;
894 int err; 918 int err;
895 int dirty; 919 int dirty = 0;
896 int name_len = strlen(name); 920 int name_len = strlen(name);
897 int val_len = size; 921 int val_len = size;
898 char *newname = NULL; 922 char *newname = NULL;
@@ -953,12 +977,14 @@ retry:
953 goto retry; 977 goto retry;
954 } 978 }
955 979
956 err = __set_xattr(ci, newname, name_len, newval, 980 err = __set_xattr(ci, newname, name_len, newval, val_len,
957 val_len, 1, 1, 1, &xattr); 981 flags, value ? 1 : -1, &xattr);
958 982
959 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 983 if (!err) {
960 ci->i_xattrs.dirty = true; 984 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
961 inode->i_ctime = CURRENT_TIME; 985 ci->i_xattrs.dirty = true;
986 inode->i_ctime = CURRENT_TIME;
987 }
962 988
963 spin_unlock(&ci->i_ceph_lock); 989 spin_unlock(&ci->i_ceph_lock);
964 if (dirty) 990 if (dirty)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index c819b0bd491a..7ff866dbb89e 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -865,8 +865,8 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
865 return rc; 865 return rc;
866} 866}
867 867
868static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 868struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
869 __u16 fid, u32 *pacllen) 869 const struct cifs_fid *cifsfid, u32 *pacllen)
870{ 870{
871 struct cifs_ntsd *pntsd = NULL; 871 struct cifs_ntsd *pntsd = NULL;
872 unsigned int xid; 872 unsigned int xid;
@@ -877,7 +877,8 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
877 return ERR_CAST(tlink); 877 return ERR_CAST(tlink);
878 878
879 xid = get_xid(); 879 xid = get_xid();
880 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); 880 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
881 pacllen);
881 free_xid(xid); 882 free_xid(xid);
882 883
883 cifs_put_tlink(tlink); 884 cifs_put_tlink(tlink);
@@ -946,7 +947,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
946 if (!open_file) 947 if (!open_file)
947 return get_cifs_acl_by_path(cifs_sb, path, pacllen); 948 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
948 949
949 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen); 950 pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
950 cifsFileInfo_put(open_file); 951 cifsFileInfo_put(open_file);
951 return pntsd; 952 return pntsd;
952} 953}
@@ -1006,19 +1007,31 @@ out:
1006/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 1007/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1007int 1008int
1008cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 1009cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1009 struct inode *inode, const char *path, const __u16 *pfid) 1010 struct inode *inode, const char *path,
1011 const struct cifs_fid *pfid)
1010{ 1012{
1011 struct cifs_ntsd *pntsd = NULL; 1013 struct cifs_ntsd *pntsd = NULL;
1012 u32 acllen = 0; 1014 u32 acllen = 0;
1013 int rc = 0; 1015 int rc = 0;
1016 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1017 struct cifs_tcon *tcon;
1014 1018
1015 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1019 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
1016 1020
1017 if (pfid) 1021 if (IS_ERR(tlink))
1018 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); 1022 return PTR_ERR(tlink);
1019 else 1023 tcon = tlink_tcon(tlink);
1020 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1021 1024
1025 if (pfid && (tcon->ses->server->ops->get_acl_by_fid))
1026 pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid,
1027 &acllen);
1028 else if (tcon->ses->server->ops->get_acl)
1029 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
1030 &acllen);
1031 else {
1032 cifs_put_tlink(tlink);
1033 return -EOPNOTSUPP;
1034 }
1022 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 1035 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1023 if (IS_ERR(pntsd)) { 1036 if (IS_ERR(pntsd)) {
1024 rc = PTR_ERR(pntsd); 1037 rc = PTR_ERR(pntsd);
@@ -1030,6 +1043,8 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1030 cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); 1043 cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
1031 } 1044 }
1032 1045
1046 cifs_put_tlink(tlink);
1047
1033 return rc; 1048 return rc;
1034} 1049}
1035 1050
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 86dc28c7aa5c..cf32f0393369 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -398,6 +398,8 @@ struct smb_version_operations {
398 const struct nls_table *, int); 398 const struct nls_table *, int);
399 struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, 399 struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
400 const char *, u32 *); 400 const char *, u32 *);
401 struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
402 const struct cifs_fid *, u32 *);
401 int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, 403 int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
402 int); 404 int);
403}; 405};
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d00e09dfc452..acc4ee8ed075 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -151,7 +151,7 @@ extern struct inode *cifs_iget(struct super_block *sb,
151 151
152extern int cifs_get_inode_info(struct inode **inode, const char *full_path, 152extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
153 FILE_ALL_INFO *data, struct super_block *sb, 153 FILE_ALL_INFO *data, struct super_block *sb,
154 int xid, const __u16 *fid); 154 int xid, const struct cifs_fid *fid);
155extern int cifs_get_inode_info_unix(struct inode **pinode, 155extern int cifs_get_inode_info_unix(struct inode **pinode,
156 const unsigned char *search_path, 156 const unsigned char *search_path,
157 struct super_block *sb, unsigned int xid); 157 struct super_block *sb, unsigned int xid);
@@ -162,11 +162,13 @@ extern int cifs_rename_pending_delete(const char *full_path,
162 const unsigned int xid); 162 const unsigned int xid);
163extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 163extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
164 struct cifs_fattr *fattr, struct inode *inode, 164 struct cifs_fattr *fattr, struct inode *inode,
165 const char *path, const __u16 *pfid); 165 const char *path, const struct cifs_fid *pfid);
166extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, 166extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
167 kuid_t, kgid_t); 167 kuid_t, kgid_t);
168extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, 168extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
169 const char *, u32 *); 169 const char *, u32 *);
170extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
171 const struct cifs_fid *, u32 *);
170extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 172extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
171 const char *, int); 173 const char *, int);
172 174
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d3a6796caa5a..3db0c5fd9a11 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -378,7 +378,7 @@ cifs_create_get_file_info:
378 xid); 378 xid);
379 else { 379 else {
380 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, 380 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
381 xid, &fid->netfid); 381 xid, fid);
382 if (newinode) { 382 if (newinode) {
383 if (server->ops->set_lease_key) 383 if (server->ops->set_lease_key)
384 server->ops->set_lease_key(newinode, fid); 384 server->ops->set_lease_key(newinode, fid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 755584684f6c..53c15074bb36 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
244 xid); 244 xid);
245 else 245 else
246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
247 xid, &fid->netfid); 247 xid, fid);
248 248
249out: 249out:
250 kfree(buf); 250 kfree(buf);
@@ -2389,7 +2389,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2389 unsigned long nr_segs, loff_t *poffset) 2389 unsigned long nr_segs, loff_t *poffset)
2390{ 2390{
2391 unsigned long nr_pages, i; 2391 unsigned long nr_pages, i;
2392 size_t copied, len, cur_len; 2392 size_t bytes, copied, len, cur_len;
2393 ssize_t total_written = 0; 2393 ssize_t total_written = 0;
2394 loff_t offset; 2394 loff_t offset;
2395 struct iov_iter it; 2395 struct iov_iter it;
@@ -2444,14 +2444,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2444 2444
2445 save_len = cur_len; 2445 save_len = cur_len;
2446 for (i = 0; i < nr_pages; i++) { 2446 for (i = 0; i < nr_pages; i++) {
2447 copied = min_t(const size_t, cur_len, PAGE_SIZE); 2447 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2448 copied = iov_iter_copy_from_user(wdata->pages[i], &it, 2448 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2449 0, copied); 2449 0, bytes);
2450 cur_len -= copied; 2450 cur_len -= copied;
2451 iov_iter_advance(&it, copied); 2451 iov_iter_advance(&it, copied);
2452 /*
2453 * If we didn't copy as much as we expected, then that
2454 * may mean we trod into an unmapped area. Stop copying
2455 * at that point. On the next pass through the big
2456 * loop, we'll likely end up getting a zero-length
2457 * write and bailing out of it.
2458 */
2459 if (copied < bytes)
2460 break;
2452 } 2461 }
2453 cur_len = save_len - cur_len; 2462 cur_len = save_len - cur_len;
2454 2463
2464 /*
2465 * If we have no data to send, then that probably means that
2466 * the copy above failed altogether. That's most likely because
2467 * the address in the iovec was bogus. Set the rc to -EFAULT,
2468 * free anything we allocated and bail out.
2469 */
2470 if (!cur_len) {
2471 for (i = 0; i < nr_pages; i++)
2472 put_page(wdata->pages[i]);
2473 kfree(wdata);
2474 rc = -EFAULT;
2475 break;
2476 }
2477
2478 /*
2479 * i + 1 now represents the number of pages we actually used in
2480 * the copy phase above. Bring nr_pages down to that, and free
2481 * any pages that we didn't use.
2482 */
2483 for ( ; nr_pages > i + 1; nr_pages--)
2484 put_page(wdata->pages[nr_pages - 1]);
2485
2455 wdata->sync_mode = WB_SYNC_ALL; 2486 wdata->sync_mode = WB_SYNC_ALL;
2456 wdata->nr_pages = nr_pages; 2487 wdata->nr_pages = nr_pages;
2457 wdata->offset = (__u64)offset; 2488 wdata->offset = (__u64)offset;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index be58b8fcdb3c..aadc2b68678b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -677,7 +677,7 @@ cgfi_exit:
677int 677int
678cifs_get_inode_info(struct inode **inode, const char *full_path, 678cifs_get_inode_info(struct inode **inode, const char *full_path,
679 FILE_ALL_INFO *data, struct super_block *sb, int xid, 679 FILE_ALL_INFO *data, struct super_block *sb, int xid,
680 const __u16 *fid) 680 const struct cifs_fid *fid)
681{ 681{
682 bool validinum = false; 682 bool validinum = false;
683 __u16 srchflgs; 683 __u16 srchflgs;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index bfd66d84831e..526fb89f9230 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1073,6 +1073,7 @@ struct smb_version_operations smb1_operations = {
1073#endif /* CIFS_XATTR */ 1073#endif /* CIFS_XATTR */
1074#ifdef CONFIG_CIFS_ACL 1074#ifdef CONFIG_CIFS_ACL
1075 .get_acl = get_cifs_acl, 1075 .get_acl = get_cifs_acl,
1076 .get_acl_by_fid = get_cifs_acl_by_fid,
1076 .set_acl = set_cifs_acl, 1077 .set_acl = set_cifs_acl,
1077#endif /* CIFS_ACL */ 1078#endif /* CIFS_ACL */
1078}; 1079};
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index c38350851b08..bc0bb9c34f72 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -57,4 +57,7 @@
57#define SMB2_CMACAES_SIZE (16) 57#define SMB2_CMACAES_SIZE (16)
58#define SMB3_SIGNKEY_SIZE (16) 58#define SMB3_SIGNKEY_SIZE (16)
59 59
60/* Maximum buffer size value we can send with 1 credit */
61#define SMB2_MAX_BUFFER_SIZE 65536
62
60#endif /* _SMB2_GLOB_H */ 63#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 757da3e54d3d..192f51a12cf1 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
182 /* start with specified wsize, or default */ 182 /* start with specified wsize, or default */
183 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 183 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
184 wsize = min_t(unsigned int, wsize, server->max_write); 184 wsize = min_t(unsigned int, wsize, server->max_write);
185 /* 185 /* set it to the maximum buffer size value we can send with 1 credit */
186 * limit write size to 2 ** 16, because we don't support multicredit 186 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
187 * requests now.
188 */
189 wsize = min_t(unsigned int, wsize, 2 << 15);
190 187
191 return wsize; 188 return wsize;
192} 189}
@@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
200 /* start with specified rsize, or default */ 197 /* start with specified rsize, or default */
201 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 198 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
202 rsize = min_t(unsigned int, rsize, server->max_read); 199 rsize = min_t(unsigned int, rsize, server->max_read);
203 /* 200 /* set it to the maximum buffer size value we can send with 1 credit */
204 * limit write size to 2 ** 16, because we don't support multicredit 201 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
205 * requests now.
206 */
207 rsize = min_t(unsigned int, rsize, 2 << 15);
208 202
209 return rsize; 203 return rsize;
210} 204}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index a3f7a9c3cc69..860344701067 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
413 413
414 /* SMB2 only has an extended negflavor */ 414 /* SMB2 only has an extended negflavor */
415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
416 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); 416 /* set it to the maximum buffer size value we can send with 1 credit */
417 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
418 SMB2_MAX_BUFFER_SIZE);
417 server->max_read = le32_to_cpu(rsp->MaxReadSize); 419 server->max_read = le32_to_cpu(rsp->MaxReadSize);
418 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 420 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
419 /* BB Do we need to validate the SecurityMode? */ 421 /* BB Do we need to validate the SecurityMode? */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ece55565b9cd..d3a534fdc5ff 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -771,6 +771,8 @@ do { \
771 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ 771 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
772 (einode)->xtime.tv_sec = \ 772 (einode)->xtime.tv_sec = \
773 (signed)le32_to_cpu((raw_inode)->xtime); \ 773 (signed)le32_to_cpu((raw_inode)->xtime); \
774 else \
775 (einode)->xtime.tv_sec = 0; \
774 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ 776 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
775 ext4_decode_extra_time(&(einode)->xtime, \ 777 ext4_decode_extra_time(&(einode)->xtime, \
776 raw_inode->xtime ## _extra); \ 778 raw_inode->xtime ## _extra); \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 10cff4736b11..74bc2d549c58 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3906,6 +3906,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3906 } else 3906 } else
3907 err = ret; 3907 err = ret;
3908 map->m_flags |= EXT4_MAP_MAPPED; 3908 map->m_flags |= EXT4_MAP_MAPPED;
3909 map->m_pblk = newblock;
3909 if (allocated > map->m_len) 3910 if (allocated > map->m_len)
3910 allocated = map->m_len; 3911 allocated = map->m_len;
3911 map->m_len = allocated; 3912 map->m_len = allocated;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6bea80614d77..a2a837f00407 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -140,7 +140,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
140 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); 140 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
141 if (IS_ERR(handle)) { 141 if (IS_ERR(handle)) {
142 err = -EINVAL; 142 err = -EINVAL;
143 goto swap_boot_out; 143 goto journal_err_out;
144 } 144 }
145 145
146 /* Protect extent tree against block allocations via delalloc */ 146 /* Protect extent tree against block allocations via delalloc */
@@ -198,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
198 198
199 ext4_double_up_write_data_sem(inode, inode_bl); 199 ext4_double_up_write_data_sem(inode, inode_bl);
200 200
201journal_err_out:
201 ext4_inode_resume_unlocked_dio(inode); 202 ext4_inode_resume_unlocked_dio(inode);
202 ext4_inode_resume_unlocked_dio(inode_bl); 203 ext4_inode_resume_unlocked_dio(inode_bl);
203 204
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c5adbb318a90..f3b84cd9de56 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
243 ext4_group_t group; 243 ext4_group_t group;
244 ext4_group_t last_group; 244 ext4_group_t last_group;
245 unsigned overhead; 245 unsigned overhead;
246 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
246 247
247 BUG_ON(flex_gd->count == 0 || group_data == NULL); 248 BUG_ON(flex_gd->count == 0 || group_data == NULL);
248 249
@@ -266,7 +267,7 @@ next_group:
266 src_group++; 267 src_group++;
267 for (; src_group <= last_group; src_group++) { 268 for (; src_group <= last_group; src_group++) {
268 overhead = ext4_group_overhead_blocks(sb, src_group); 269 overhead = ext4_group_overhead_blocks(sb, src_group);
269 if (overhead != 0) 270 if (overhead == 0)
270 last_blk += group_data[src_group - group].blocks_count; 271 last_blk += group_data[src_group - group].blocks_count;
271 else 272 else
272 break; 273 break;
@@ -280,8 +281,7 @@ next_group:
280 group = ext4_get_group_number(sb, start_blk - 1); 281 group = ext4_get_group_number(sb, start_blk - 1);
281 group -= group_data[0].group; 282 group -= group_data[0].group;
282 group_data[group].free_blocks_count--; 283 group_data[group].free_blocks_count--;
283 if (flexbg_size > 1) 284 flex_gd->bg_flags[group] &= uninit_mask;
284 flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
285 } 285 }
286 286
287 /* Allocate inode bitmaps */ 287 /* Allocate inode bitmaps */
@@ -292,22 +292,30 @@ next_group:
292 group = ext4_get_group_number(sb, start_blk - 1); 292 group = ext4_get_group_number(sb, start_blk - 1);
293 group -= group_data[0].group; 293 group -= group_data[0].group;
294 group_data[group].free_blocks_count--; 294 group_data[group].free_blocks_count--;
295 if (flexbg_size > 1) 295 flex_gd->bg_flags[group] &= uninit_mask;
296 flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
297 } 296 }
298 297
299 /* Allocate inode tables */ 298 /* Allocate inode tables */
300 for (; it_index < flex_gd->count; it_index++) { 299 for (; it_index < flex_gd->count; it_index++) {
301 if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) 300 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
301 ext4_fsblk_t next_group_start;
302
303 if (start_blk + itb > last_blk)
302 goto next_group; 304 goto next_group;
303 group_data[it_index].inode_table = start_blk; 305 group_data[it_index].inode_table = start_blk;
304 group = ext4_get_group_number(sb, start_blk - 1); 306 group = ext4_get_group_number(sb, start_blk);
307 next_group_start = ext4_group_first_block_no(sb, group + 1);
305 group -= group_data[0].group; 308 group -= group_data[0].group;
306 group_data[group].free_blocks_count -=
307 EXT4_SB(sb)->s_itb_per_group;
308 if (flexbg_size > 1)
309 flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
310 309
310 if (start_blk + itb > next_group_start) {
311 flex_gd->bg_flags[group + 1] &= uninit_mask;
312 overhead = start_blk + itb - next_group_start;
313 group_data[group + 1].free_blocks_count -= overhead;
314 itb -= overhead;
315 }
316
317 group_data[group].free_blocks_count -= itb;
318 flex_gd->bg_flags[group] &= uninit_mask;
311 start_blk += EXT4_SB(sb)->s_itb_per_group; 319 start_blk += EXT4_SB(sb)->s_itb_per_group;
312 } 320 }
313 321
@@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
401 start = ext4_group_first_block_no(sb, group); 409 start = ext4_group_first_block_no(sb, group);
402 group -= flex_gd->groups[0].group; 410 group -= flex_gd->groups[0].group;
403 411
404 count2 = sb->s_blocksize * 8 - (block - start); 412 count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
405 if (count2 > count) 413 if (count2 > count)
406 count2 = count; 414 count2 = count;
407 415
@@ -620,7 +628,7 @@ handle_ib:
620 if (err) 628 if (err)
621 goto out; 629 goto out;
622 count = group_table_count[j]; 630 count = group_table_count[j];
623 start = group_data[i].block_bitmap; 631 start = (&group_data[i].block_bitmap)[j];
624 block = start; 632 block = start;
625 } 633 }
626 634
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1f7784de05b6..710fed2377d4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3695,16 +3695,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3695 for (i = 0; i < 4; i++) 3695 for (i = 0; i < 4; i++)
3696 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 3696 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
3697 sbi->s_def_hash_version = es->s_def_hash_version; 3697 sbi->s_def_hash_version = es->s_def_hash_version;
3698 i = le32_to_cpu(es->s_flags); 3698 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
3699 if (i & EXT2_FLAGS_UNSIGNED_HASH) 3699 i = le32_to_cpu(es->s_flags);
3700 sbi->s_hash_unsigned = 3; 3700 if (i & EXT2_FLAGS_UNSIGNED_HASH)
3701 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 3701 sbi->s_hash_unsigned = 3;
3702 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3702#ifdef __CHAR_UNSIGNED__ 3703#ifdef __CHAR_UNSIGNED__
3703 es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 3704 if (!(sb->s_flags & MS_RDONLY))
3704 sbi->s_hash_unsigned = 3; 3705 es->s_flags |=
3706 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
3707 sbi->s_hash_unsigned = 3;
3705#else 3708#else
3706 es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 3709 if (!(sb->s_flags & MS_RDONLY))
3710 es->s_flags |=
3711 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3707#endif 3712#endif
3713 }
3708 } 3714 }
3709 3715
3710 /* Handle clustersize */ 3716 /* Handle clustersize */
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e1959efad64f..b5ebc2d7d80d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj)
50 struct fscache_object *xobj; 50 struct fscache_object *xobj;
51 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; 51 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
52 52
53 ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
54
53 write_lock(&fscache_object_list_lock); 55 write_lock(&fscache_object_list_lock);
54 56
55 while (*p) { 57 while (*p) {
@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj)
75 */ 77 */
76void fscache_objlist_remove(struct fscache_object *obj) 78void fscache_objlist_remove(struct fscache_object *obj)
77{ 79{
80 if (RB_EMPTY_NODE(&obj->objlist_link))
81 return;
82
78 write_lock(&fscache_object_list_lock); 83 write_lock(&fscache_object_list_lock);
79 84
80 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); 85 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 53d35c504240..d3b4539f1651 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object,
314 object->cache = cache; 314 object->cache = cache;
315 object->cookie = cookie; 315 object->cookie = cookie;
316 object->parent = NULL; 316 object->parent = NULL;
317#ifdef CONFIG_FSCACHE_OBJECT_LIST
318 RB_CLEAR_NODE(&object->objlist_link);
319#endif
317 320
318 object->oob_event_mask = 0; 321 object->oob_event_mask = 0;
319 for (t = object->oob_table; t->events; t++) 322 for (t = object->oob_table; t->events; t++)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 8360674c85bc..60bb365f54a5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
514 * similarly constrained call sites 514 * similarly constrained call sites
515 */ 515 */
516 ret = start_this_handle(journal, handle, GFP_NOFS); 516 ret = start_this_handle(journal, handle, GFP_NOFS);
517 if (ret < 0) 517 if (ret < 0) {
518 jbd2_journal_free_reserved(handle); 518 jbd2_journal_free_reserved(handle);
519 return ret;
520 }
519 handle->h_type = type; 521 handle->h_type = type;
520 handle->h_line_no = line_no; 522 handle->h_line_no = line_no;
521 return ret; 523 return 0;
522} 524}
523EXPORT_SYMBOL(jbd2_journal_start_reserved); 525EXPORT_SYMBOL(jbd2_journal_start_reserved);
524 526
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index e973b85d6afd..5a8ea16eedbc 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -86,6 +86,8 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
86 rc = posix_acl_equiv_mode(acl, &inode->i_mode); 86 rc = posix_acl_equiv_mode(acl, &inode->i_mode);
87 if (rc < 0) 87 if (rc < 0)
88 return rc; 88 return rc;
89 inode->i_ctime = CURRENT_TIME;
90 mark_inode_dirty(inode);
89 if (rc == 0) 91 if (rc == 0)
90 acl = NULL; 92 acl = NULL;
91 break; 93 break;
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 2b7882b508db..9a3c68cf6026 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -324,23 +324,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
324 switch (flag) { 324 switch (flag) {
325 case M_INSERT: /* insert item into L[0] */ 325 case M_INSERT: /* insert item into L[0] */
326 326
327 if (item_pos == tb->lnum[0] - 1 327 if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
328 && tb->lbytes != -1) {
329 /* part of new item falls into L[0] */ 328 /* part of new item falls into L[0] */
330 int new_item_len; 329 int new_item_len;
331 int version; 330 int version;
332 331
333 ret_val = 332 ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
334 leaf_shift_left(tb, tb->lnum[0] - 1,
335 -1);
336 333
337 /* Calculate item length to insert to S[0] */ 334 /* Calculate item length to insert to S[0] */
338 new_item_len = 335 new_item_len = ih_item_len(ih) - tb->lbytes;
339 ih_item_len(ih) - tb->lbytes;
340 /* Calculate and check item length to insert to L[0] */ 336 /* Calculate and check item length to insert to L[0] */
341 put_ih_item_len(ih, 337 put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
342 ih_item_len(ih) -
343 new_item_len);
344 338
345 RFALSE(ih_item_len(ih) <= 0, 339 RFALSE(ih_item_len(ih) <= 0,
346 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", 340 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
@@ -349,30 +343,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
349 /* Insert new item into L[0] */ 343 /* Insert new item into L[0] */
350 buffer_info_init_left(tb, &bi); 344 buffer_info_init_left(tb, &bi);
351 leaf_insert_into_buf(&bi, 345 leaf_insert_into_buf(&bi,
352 n + item_pos - 346 n + item_pos - ret_val, ih, body,
353 ret_val, ih, body, 347 zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
354 zeros_num >
355 ih_item_len(ih) ?
356 ih_item_len(ih) :
357 zeros_num);
358 348
359 version = ih_version(ih); 349 version = ih_version(ih);
360 350
361 /* Calculate key component, item length and body to insert into S[0] */ 351 /* Calculate key component, item length and body to insert into S[0] */
362 set_le_ih_k_offset(ih, 352 set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
363 le_ih_k_offset(ih) + 353 (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
364 (tb->
365 lbytes <<
366 (is_indirect_le_ih
367 (ih) ? tb->tb_sb->
368 s_blocksize_bits -
369 UNFM_P_SHIFT :
370 0)));
371 354
372 put_ih_item_len(ih, new_item_len); 355 put_ih_item_len(ih, new_item_len);
373 if (tb->lbytes > zeros_num) { 356 if (tb->lbytes > zeros_num) {
374 body += 357 body += (tb->lbytes - zeros_num);
375 (tb->lbytes - zeros_num);
376 zeros_num = 0; 358 zeros_num = 0;
377 } else 359 } else
378 zeros_num -= tb->lbytes; 360 zeros_num -= tb->lbytes;
@@ -383,15 +365,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
383 } else { 365 } else {
384 /* new item in whole falls into L[0] */ 366 /* new item in whole falls into L[0] */
385 /* Shift lnum[0]-1 items to L[0] */ 367 /* Shift lnum[0]-1 items to L[0] */
386 ret_val = 368 ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
387 leaf_shift_left(tb, tb->lnum[0] - 1,
388 tb->lbytes);
389 /* Insert new item into L[0] */ 369 /* Insert new item into L[0] */
390 buffer_info_init_left(tb, &bi); 370 buffer_info_init_left(tb, &bi);
391 leaf_insert_into_buf(&bi, 371 leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num);
392 n + item_pos -
393 ret_val, ih, body,
394 zeros_num);
395 tb->insert_size[0] = 0; 372 tb->insert_size[0] = 0;
396 zeros_num = 0; 373 zeros_num = 0;
397 } 374 }
@@ -399,264 +376,117 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
399 376
400 case M_PASTE: /* append item in L[0] */ 377 case M_PASTE: /* append item in L[0] */
401 378
402 if (item_pos == tb->lnum[0] - 1 379 if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
403 && tb->lbytes != -1) {
404 /* we must shift the part of the appended item */ 380 /* we must shift the part of the appended item */
405 if (is_direntry_le_ih 381 if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) {
406 (B_N_PITEM_HEAD(tbS0, item_pos))) {
407 382
408 RFALSE(zeros_num, 383 RFALSE(zeros_num,
409 "PAP-12090: invalid parameter in case of a directory"); 384 "PAP-12090: invalid parameter in case of a directory");
410 /* directory item */ 385 /* directory item */
411 if (tb->lbytes > pos_in_item) { 386 if (tb->lbytes > pos_in_item) {
412 /* new directory entry falls into L[0] */ 387 /* new directory entry falls into L[0] */
413 struct item_head 388 struct item_head *pasted;
414 *pasted; 389 int l_pos_in_item = pos_in_item;
415 int l_pos_in_item =
416 pos_in_item;
417 390
418 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ 391 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
419 ret_val = 392 ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1);
420 leaf_shift_left(tb, 393 if (ret_val && !item_pos) {
421 tb-> 394 pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
422 lnum 395 l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1);
423 [0],
424 tb->
425 lbytes
426 -
427 1);
428 if (ret_val
429 && !item_pos) {
430 pasted =
431 B_N_PITEM_HEAD
432 (tb->L[0],
433 B_NR_ITEMS
434 (tb->
435 L[0]) -
436 1);
437 l_pos_in_item +=
438 I_ENTRY_COUNT
439 (pasted) -
440 (tb->
441 lbytes -
442 1);
443 } 396 }
444 397
445 /* Append given directory entry to directory item */ 398 /* Append given directory entry to directory item */
446 buffer_info_init_left(tb, &bi); 399 buffer_info_init_left(tb, &bi);
447 leaf_paste_in_buffer 400 leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num);
448 (&bi,
449 n + item_pos -
450 ret_val,
451 l_pos_in_item,
452 tb->insert_size[0],
453 body, zeros_num);
454 401
455 /* previous string prepared space for pasting new entry, following string pastes this entry */ 402 /* previous string prepared space for pasting new entry, following string pastes this entry */
456 403
457 /* when we have merge directory item, pos_in_item has been changed too */ 404 /* when we have merge directory item, pos_in_item has been changed too */
458 405
459 /* paste new directory entry. 1 is entry number */ 406 /* paste new directory entry. 1 is entry number */
460 leaf_paste_entries(&bi, 407 leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item,
461 n + 408 1, (struct reiserfs_de_head *) body,
462 item_pos 409 body + DEH_SIZE, tb->insert_size[0]);
463 -
464 ret_val,
465 l_pos_in_item,
466 1,
467 (struct
468 reiserfs_de_head
469 *)
470 body,
471 body
472 +
473 DEH_SIZE,
474 tb->
475 insert_size
476 [0]
477 );
478 tb->insert_size[0] = 0; 410 tb->insert_size[0] = 0;
479 } else { 411 } else {
480 /* new directory item doesn't fall into L[0] */ 412 /* new directory item doesn't fall into L[0] */
481 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ 413 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
482 leaf_shift_left(tb, 414 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
483 tb->
484 lnum[0],
485 tb->
486 lbytes);
487 } 415 }
488 /* Calculate new position to append in item body */ 416 /* Calculate new position to append in item body */
489 pos_in_item -= tb->lbytes; 417 pos_in_item -= tb->lbytes;
490 } else { 418 } else {
491 /* regular object */ 419 /* regular object */
492 RFALSE(tb->lbytes <= 0, 420 RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes);
493 "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", 421 RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
494 tb->lbytes);
495 RFALSE(pos_in_item !=
496 ih_item_len
497 (B_N_PITEM_HEAD
498 (tbS0, item_pos)),
499 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", 422 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
500 ih_item_len 423 ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item);
501 (B_N_PITEM_HEAD
502 (tbS0, item_pos)),
503 pos_in_item);
504 424
505 if (tb->lbytes >= pos_in_item) { 425 if (tb->lbytes >= pos_in_item) {
506 /* appended item will be in L[0] in whole */ 426 /* appended item will be in L[0] in whole */
507 int l_n; 427 int l_n;
508 428
509 /* this bytes number must be appended to the last item of L[h] */ 429 /* this bytes number must be appended to the last item of L[h] */
510 l_n = 430 l_n = tb->lbytes - pos_in_item;
511 tb->lbytes -
512 pos_in_item;
513 431
514 /* Calculate new insert_size[0] */ 432 /* Calculate new insert_size[0] */
515 tb->insert_size[0] -= 433 tb->insert_size[0] -= l_n;
516 l_n;
517 434
518 RFALSE(tb-> 435 RFALSE(tb->insert_size[0] <= 0,
519 insert_size[0] <=
520 0,
521 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", 436 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
522 tb-> 437 tb->insert_size[0]);
523 insert_size[0]); 438 ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len
524 ret_val = 439 (B_N_PITEM_HEAD(tbS0, item_pos)));
525 leaf_shift_left(tb,
526 tb->
527 lnum
528 [0],
529 ih_item_len
530 (B_N_PITEM_HEAD
531 (tbS0,
532 item_pos)));
533 /* Append to body of item in L[0] */ 440 /* Append to body of item in L[0] */
534 buffer_info_init_left(tb, &bi); 441 buffer_info_init_left(tb, &bi);
535 leaf_paste_in_buffer 442 leaf_paste_in_buffer
536 (&bi, 443 (&bi, n + item_pos - ret_val, ih_item_len
537 n + item_pos - 444 (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)),
538 ret_val, 445 l_n, body,
539 ih_item_len 446 zeros_num > l_n ? l_n : zeros_num);
540 (B_N_PITEM_HEAD
541 (tb->L[0],
542 n + item_pos -
543 ret_val)), l_n,
544 body,
545 zeros_num >
546 l_n ? l_n :
547 zeros_num);
548 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ 447 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
549 { 448 {
550 int version; 449 int version;
551 int temp_l = 450 int temp_l = l_n;
552 l_n; 451
553 452 RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)),
554 RFALSE
555 (ih_item_len
556 (B_N_PITEM_HEAD
557 (tbS0,
558 0)),
559 "PAP-12106: item length must be 0"); 453 "PAP-12106: item length must be 0");
560 RFALSE 454 RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY
561 (comp_short_le_keys 455 (tb->L[0], n + item_pos - ret_val)),
562 (B_N_PKEY
563 (tbS0, 0),
564 B_N_PKEY
565 (tb->L[0],
566 n +
567 item_pos
568 -
569 ret_val)),
570 "PAP-12107: items must be of the same file"); 456 "PAP-12107: items must be of the same file");
571 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { 457 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
572 temp_l = 458 temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT);
573 l_n
574 <<
575 (tb->
576 tb_sb->
577 s_blocksize_bits
578 -
579 UNFM_P_SHIFT);
580 } 459 }
581 /* update key of first item in S0 */ 460 /* update key of first item in S0 */
582 version = 461 version = ih_version(B_N_PITEM_HEAD(tbS0, 0));
583 ih_version 462 set_le_key_k_offset(version, B_N_PKEY(tbS0, 0),
584 (B_N_PITEM_HEAD 463 le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l);
585 (tbS0, 0));
586 set_le_key_k_offset
587 (version,
588 B_N_PKEY
589 (tbS0, 0),
590 le_key_k_offset
591 (version,
592 B_N_PKEY
593 (tbS0,
594 0)) +
595 temp_l);
596 /* update left delimiting key */ 464 /* update left delimiting key */
597 set_le_key_k_offset 465 set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
598 (version, 466 le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l);
599 B_N_PDELIM_KEY
600 (tb->
601 CFL[0],
602 tb->
603 lkey[0]),
604 le_key_k_offset
605 (version,
606 B_N_PDELIM_KEY
607 (tb->
608 CFL[0],
609 tb->
610 lkey[0]))
611 + temp_l);
612 } 467 }
613 468
614 /* Calculate new body, position in item and insert_size[0] */ 469 /* Calculate new body, position in item and insert_size[0] */
615 if (l_n > zeros_num) { 470 if (l_n > zeros_num) {
616 body += 471 body += (l_n - zeros_num);
617 (l_n -
618 zeros_num);
619 zeros_num = 0; 472 zeros_num = 0;
620 } else 473 } else
621 zeros_num -= 474 zeros_num -= l_n;
622 l_n;
623 pos_in_item = 0; 475 pos_in_item = 0;
624 476
625 RFALSE 477 RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1))
626 (comp_short_le_keys 478 || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)
627 (B_N_PKEY(tbS0, 0), 479 || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size),
628 B_N_PKEY(tb->L[0],
629 B_NR_ITEMS
630 (tb->
631 L[0]) -
632 1))
633 ||
634 !op_is_left_mergeable
635 (B_N_PKEY(tbS0, 0),
636 tbS0->b_size)
637 ||
638 !op_is_left_mergeable
639 (B_N_PDELIM_KEY
640 (tb->CFL[0],
641 tb->lkey[0]),
642 tbS0->b_size),
643 "PAP-12120: item must be merge-able with left neighboring item"); 480 "PAP-12120: item must be merge-able with left neighboring item");
644 } else { /* only part of the appended item will be in L[0] */ 481 } else { /* only part of the appended item will be in L[0] */
645 482
646 /* Calculate position in item for append in S[0] */ 483 /* Calculate position in item for append in S[0] */
647 pos_in_item -= 484 pos_in_item -= tb->lbytes;
648 tb->lbytes;
649 485
650 RFALSE(pos_in_item <= 0, 486 RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
651 "PAP-12125: no place for paste. pos_in_item=%d",
652 pos_in_item);
653 487
654 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 488 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
655 leaf_shift_left(tb, 489 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
656 tb->
657 lnum[0],
658 tb->
659 lbytes);
660 } 490 }
661 } 491 }
662 } else { /* appended item will be in L[0] in whole */ 492 } else { /* appended item will be in L[0] in whole */
@@ -665,52 +495,30 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
665 495
666 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ 496 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */
667 /* then increment pos_in_item by the size of the last item in L[0] */ 497 /* then increment pos_in_item by the size of the last item in L[0] */
668 pasted = 498 pasted = B_N_PITEM_HEAD(tb->L[0], n - 1);
669 B_N_PITEM_HEAD(tb->L[0],
670 n - 1);
671 if (is_direntry_le_ih(pasted)) 499 if (is_direntry_le_ih(pasted))
672 pos_in_item += 500 pos_in_item += ih_entry_count(pasted);
673 ih_entry_count
674 (pasted);
675 else 501 else
676 pos_in_item += 502 pos_in_item += ih_item_len(pasted);
677 ih_item_len(pasted);
678 } 503 }
679 504
680 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 505 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
681 ret_val = 506 ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
682 leaf_shift_left(tb, tb->lnum[0],
683 tb->lbytes);
684 /* Append to body of item in L[0] */ 507 /* Append to body of item in L[0] */
685 buffer_info_init_left(tb, &bi); 508 buffer_info_init_left(tb, &bi);
686 leaf_paste_in_buffer(&bi, 509 leaf_paste_in_buffer(&bi, n + item_pos - ret_val,
687 n + item_pos -
688 ret_val,
689 pos_in_item, 510 pos_in_item,
690 tb->insert_size[0], 511 tb->insert_size[0],
691 body, zeros_num); 512 body, zeros_num);
692 513
693 /* if appended item is directory, paste entry */ 514 /* if appended item is directory, paste entry */
694 pasted = 515 pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val);
695 B_N_PITEM_HEAD(tb->L[0],
696 n + item_pos -
697 ret_val);
698 if (is_direntry_le_ih(pasted)) 516 if (is_direntry_le_ih(pasted))
699 leaf_paste_entries(&bi, 517 leaf_paste_entries(&bi, n + item_pos - ret_val,
700 n + 518 pos_in_item, 1,
701 item_pos - 519 (struct reiserfs_de_head *) body,
702 ret_val, 520 body + DEH_SIZE,
703 pos_in_item, 521 tb->insert_size[0]);
704 1,
705 (struct
706 reiserfs_de_head
707 *)body,
708 body +
709 DEH_SIZE,
710 tb->
711 insert_size
712 [0]
713 );
714 /* if appended item is indirect item, put unformatted node into un list */ 522 /* if appended item is indirect item, put unformatted node into un list */
715 if (is_indirect_le_ih(pasted)) 523 if (is_indirect_le_ih(pasted))
716 set_ih_free_space(pasted, 0); 524 set_ih_free_space(pasted, 0);
@@ -722,13 +530,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
722 reiserfs_panic(tb->tb_sb, "PAP-12130", 530 reiserfs_panic(tb->tb_sb, "PAP-12130",
723 "lnum > 0: unexpected mode: " 531 "lnum > 0: unexpected mode: "
724 " %s(%d)", 532 " %s(%d)",
725 (flag == 533 (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
726 M_DELETE) ? "DELETE" : ((flag ==
727 M_CUT)
728 ? "CUT"
729 :
730 "UNKNOWN"),
731 flag);
732 } 534 }
733 } else { 535 } else {
734 /* new item doesn't fall into L[0] */ 536 /* new item doesn't fall into L[0] */
@@ -748,14 +550,12 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
748 case M_INSERT: /* insert item */ 550 case M_INSERT: /* insert item */
749 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ 551 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */
750 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ 552 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */
751 loff_t old_key_comp, old_len, 553 loff_t old_key_comp, old_len, r_zeros_number;
752 r_zeros_number;
753 const char *r_body; 554 const char *r_body;
754 int version; 555 int version;
755 loff_t offset; 556 loff_t offset;
756 557
757 leaf_shift_right(tb, tb->rnum[0] - 1, 558 leaf_shift_right(tb, tb->rnum[0] - 1, -1);
758 -1);
759 559
760 version = ih_version(ih); 560 version = ih_version(ih);
761 /* Remember key component and item length */ 561 /* Remember key component and item length */
@@ -763,29 +563,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
763 old_len = ih_item_len(ih); 563 old_len = ih_item_len(ih);
764 564
765 /* Calculate key component and item length to insert into R[0] */ 565 /* Calculate key component and item length to insert into R[0] */
766 offset = 566 offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0));
767 le_ih_k_offset(ih) +
768 ((old_len -
769 tb->
770 rbytes) << (is_indirect_le_ih(ih)
771 ? tb->tb_sb->
772 s_blocksize_bits -
773 UNFM_P_SHIFT : 0));
774 set_le_ih_k_offset(ih, offset); 567 set_le_ih_k_offset(ih, offset);
775 put_ih_item_len(ih, tb->rbytes); 568 put_ih_item_len(ih, tb->rbytes);
776 /* Insert part of the item into R[0] */ 569 /* Insert part of the item into R[0] */
777 buffer_info_init_right(tb, &bi); 570 buffer_info_init_right(tb, &bi);
778 if ((old_len - tb->rbytes) > zeros_num) { 571 if ((old_len - tb->rbytes) > zeros_num) {
779 r_zeros_number = 0; 572 r_zeros_number = 0;
780 r_body = 573 r_body = body + (old_len - tb->rbytes) - zeros_num;
781 body + (old_len -
782 tb->rbytes) -
783 zeros_num;
784 } else { 574 } else {
785 r_body = body; 575 r_body = body;
786 r_zeros_number = 576 r_zeros_number = zeros_num - (old_len - tb->rbytes);
787 zeros_num - (old_len -
788 tb->rbytes);
789 zeros_num -= r_zeros_number; 577 zeros_num -= r_zeros_number;
790 } 578 }
791 579
@@ -798,25 +586,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
798 586
799 /* Calculate key component and item length to insert into S[0] */ 587 /* Calculate key component and item length to insert into S[0] */
800 set_le_ih_k_offset(ih, old_key_comp); 588 set_le_ih_k_offset(ih, old_key_comp);
801 put_ih_item_len(ih, 589 put_ih_item_len(ih, old_len - tb->rbytes);
802 old_len - tb->rbytes);
803 590
804 tb->insert_size[0] -= tb->rbytes; 591 tb->insert_size[0] -= tb->rbytes;
805 592
806 } else { /* whole new item falls into R[0] */ 593 } else { /* whole new item falls into R[0] */
807 594
808 /* Shift rnum[0]-1 items to R[0] */ 595 /* Shift rnum[0]-1 items to R[0] */
809 ret_val = 596 ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
810 leaf_shift_right(tb,
811 tb->rnum[0] - 1,
812 tb->rbytes);
813 /* Insert new item into R[0] */ 597 /* Insert new item into R[0] */
814 buffer_info_init_right(tb, &bi); 598 buffer_info_init_right(tb, &bi);
815 leaf_insert_into_buf(&bi, 599 leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1,
816 item_pos - n + 600 ih, body, zeros_num);
817 tb->rnum[0] - 1,
818 ih, body,
819 zeros_num);
820 601
821 if (item_pos - n + tb->rnum[0] - 1 == 0) { 602 if (item_pos - n + tb->rnum[0] - 1 == 0) {
822 replace_key(tb, tb->CFR[0], 603 replace_key(tb, tb->CFR[0],
@@ -841,200 +622,97 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
841 622
842 RFALSE(zeros_num, 623 RFALSE(zeros_num,
843 "PAP-12145: invalid parameter in case of a directory"); 624 "PAP-12145: invalid parameter in case of a directory");
844 entry_count = 625 entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD
845 I_ENTRY_COUNT(B_N_PITEM_HEAD 626 (tbS0, item_pos));
846 (tbS0,
847 item_pos));
848 if (entry_count - tb->rbytes < 627 if (entry_count - tb->rbytes <
849 pos_in_item) 628 pos_in_item)
850 /* new directory entry falls into R[0] */ 629 /* new directory entry falls into R[0] */
851 { 630 {
852 int paste_entry_position; 631 int paste_entry_position;
853 632
854 RFALSE(tb->rbytes - 1 >= 633 RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0],
855 entry_count
856 || !tb->
857 insert_size[0],
858 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", 634 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
859 tb->rbytes, 635 tb->rbytes, entry_count);
860 entry_count);
861 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ 636 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
862 leaf_shift_right(tb, 637 leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
863 tb->
864 rnum
865 [0],
866 tb->
867 rbytes
868 - 1);
869 /* Paste given directory entry to directory item */ 638 /* Paste given directory entry to directory item */
870 paste_entry_position = 639 paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
871 pos_in_item -
872 entry_count +
873 tb->rbytes - 1;
874 buffer_info_init_right(tb, &bi); 640 buffer_info_init_right(tb, &bi);
875 leaf_paste_in_buffer 641 leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num);
876 (&bi, 0,
877 paste_entry_position,
878 tb->insert_size[0],
879 body, zeros_num);
880 /* paste entry */ 642 /* paste entry */
881 leaf_paste_entries(&bi, 643 leaf_paste_entries(&bi, 0, paste_entry_position, 1,
882 0, 644 (struct reiserfs_de_head *) body,
883 paste_entry_position, 645 body + DEH_SIZE, tb->insert_size[0]);
884 1, 646
885 (struct 647 if (paste_entry_position == 0) {
886 reiserfs_de_head
887 *)
888 body,
889 body
890 +
891 DEH_SIZE,
892 tb->
893 insert_size
894 [0]
895 );
896
897 if (paste_entry_position
898 == 0) {
899 /* change delimiting keys */ 648 /* change delimiting keys */
900 replace_key(tb, 649 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0);
901 tb->
902 CFR
903 [0],
904 tb->
905 rkey
906 [0],
907 tb->
908 R
909 [0],
910 0);
911 } 650 }
912 651
913 tb->insert_size[0] = 0; 652 tb->insert_size[0] = 0;
914 pos_in_item++; 653 pos_in_item++;
915 } else { /* new directory entry doesn't fall into R[0] */ 654 } else { /* new directory entry doesn't fall into R[0] */
916 655
917 leaf_shift_right(tb, 656 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
918 tb->
919 rnum
920 [0],
921 tb->
922 rbytes);
923 } 657 }
924 } else { /* regular object */ 658 } else { /* regular object */
925 659
926 int n_shift, n_rem, 660 int n_shift, n_rem, r_zeros_number;
927 r_zeros_number;
928 const char *r_body; 661 const char *r_body;
929 662
930 /* Calculate number of bytes which must be shifted from appended item */ 663 /* Calculate number of bytes which must be shifted from appended item */
931 if ((n_shift = 664 if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0)
932 tb->rbytes -
933 tb->insert_size[0]) < 0)
934 n_shift = 0; 665 n_shift = 0;
935 666
936 RFALSE(pos_in_item != 667 RFALSE(pos_in_item != ih_item_len
937 ih_item_len 668 (B_N_PITEM_HEAD(tbS0, item_pos)),
938 (B_N_PITEM_HEAD
939 (tbS0, item_pos)),
940 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", 669 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
941 pos_in_item, 670 pos_in_item, ih_item_len
942 ih_item_len 671 (B_N_PITEM_HEAD(tbS0, item_pos)));
943 (B_N_PITEM_HEAD 672
944 (tbS0, item_pos))); 673 leaf_shift_right(tb, tb->rnum[0], n_shift);
945
946 leaf_shift_right(tb,
947 tb->rnum[0],
948 n_shift);
949 /* Calculate number of bytes which must remain in body after appending to R[0] */ 674 /* Calculate number of bytes which must remain in body after appending to R[0] */
950 if ((n_rem = 675 if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0)
951 tb->insert_size[0] -
952 tb->rbytes) < 0)
953 n_rem = 0; 676 n_rem = 0;
954 677
955 { 678 {
956 int version; 679 int version;
957 unsigned long temp_rem = 680 unsigned long temp_rem = n_rem;
958 n_rem; 681
959 682 version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0));
960 version = 683 if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) {
961 ih_version 684 temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
962 (B_N_PITEM_HEAD
963 (tb->R[0], 0));
964 if (is_indirect_le_key
965 (version,
966 B_N_PKEY(tb->R[0],
967 0))) {
968 temp_rem =
969 n_rem <<
970 (tb->tb_sb->
971 s_blocksize_bits
972 -
973 UNFM_P_SHIFT);
974 } 685 }
975 set_le_key_k_offset 686 set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0),
976 (version, 687 le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem);
977 B_N_PKEY(tb->R[0], 688 set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]),
978 0), 689 le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem);
979 le_key_k_offset
980 (version,
981 B_N_PKEY(tb->R[0],
982 0)) +
983 temp_rem);
984 set_le_key_k_offset
985 (version,
986 B_N_PDELIM_KEY(tb->
987 CFR
988 [0],
989 tb->
990 rkey
991 [0]),
992 le_key_k_offset
993 (version,
994 B_N_PDELIM_KEY
995 (tb->CFR[0],
996 tb->rkey[0])) +
997 temp_rem);
998 } 690 }
999/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; 691/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
1000 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ 692 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
1001 do_balance_mark_internal_dirty 693 do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
1002 (tb, tb->CFR[0], 0);
1003 694
1004 /* Append part of body into R[0] */ 695 /* Append part of body into R[0] */
1005 buffer_info_init_right(tb, &bi); 696 buffer_info_init_right(tb, &bi);
1006 if (n_rem > zeros_num) { 697 if (n_rem > zeros_num) {
1007 r_zeros_number = 0; 698 r_zeros_number = 0;
1008 r_body = 699 r_body = body + n_rem - zeros_num;
1009 body + n_rem -
1010 zeros_num;
1011 } else { 700 } else {
1012 r_body = body; 701 r_body = body;
1013 r_zeros_number = 702 r_zeros_number = zeros_num - n_rem;
1014 zeros_num - n_rem; 703 zeros_num -= r_zeros_number;
1015 zeros_num -=
1016 r_zeros_number;
1017 } 704 }
1018 705
1019 leaf_paste_in_buffer(&bi, 0, 706 leaf_paste_in_buffer(&bi, 0, n_shift,
1020 n_shift, 707 tb->insert_size[0] - n_rem,
1021 tb-> 708 r_body, r_zeros_number);
1022 insert_size 709
1023 [0] - 710 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) {
1024 n_rem,
1025 r_body,
1026 r_zeros_number);
1027
1028 if (is_indirect_le_ih
1029 (B_N_PITEM_HEAD
1030 (tb->R[0], 0))) {
1031#if 0 711#if 0
1032 RFALSE(n_rem, 712 RFALSE(n_rem,
1033 "PAP-12160: paste more than one unformatted node pointer"); 713 "PAP-12160: paste more than one unformatted node pointer");
1034#endif 714#endif
1035 set_ih_free_space 715 set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0);
1036 (B_N_PITEM_HEAD
1037 (tb->R[0], 0), 0);
1038 } 716 }
1039 tb->insert_size[0] = n_rem; 717 tb->insert_size[0] = n_rem;
1040 if (!n_rem) 718 if (!n_rem)
@@ -1044,58 +722,28 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1044 722
1045 struct item_head *pasted; 723 struct item_head *pasted;
1046 724
1047 ret_val = 725 ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
1048 leaf_shift_right(tb, tb->rnum[0],
1049 tb->rbytes);
1050 /* append item in R[0] */ 726 /* append item in R[0] */
1051 if (pos_in_item >= 0) { 727 if (pos_in_item >= 0) {
1052 buffer_info_init_right(tb, &bi); 728 buffer_info_init_right(tb, &bi);
1053 leaf_paste_in_buffer(&bi, 729 leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item,
1054 item_pos - 730 tb->insert_size[0], body, zeros_num);
1055 n +
1056 tb->
1057 rnum[0],
1058 pos_in_item,
1059 tb->
1060 insert_size
1061 [0], body,
1062 zeros_num);
1063 } 731 }
1064 732
1065 /* paste new entry, if item is directory item */ 733 /* paste new entry, if item is directory item */
1066 pasted = 734 pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
1067 B_N_PITEM_HEAD(tb->R[0], 735 if (is_direntry_le_ih(pasted) && pos_in_item >= 0) {
1068 item_pos - n + 736 leaf_paste_entries(&bi, item_pos - n + tb->rnum[0],
1069 tb->rnum[0]); 737 pos_in_item, 1,
1070 if (is_direntry_le_ih(pasted) 738 (struct reiserfs_de_head *) body,
1071 && pos_in_item >= 0) { 739 body + DEH_SIZE, tb->insert_size[0]);
1072 leaf_paste_entries(&bi,
1073 item_pos -
1074 n +
1075 tb->rnum[0],
1076 pos_in_item,
1077 1,
1078 (struct
1079 reiserfs_de_head
1080 *)body,
1081 body +
1082 DEH_SIZE,
1083 tb->
1084 insert_size
1085 [0]
1086 );
1087 if (!pos_in_item) { 740 if (!pos_in_item) {
1088 741
1089 RFALSE(item_pos - n + 742 RFALSE(item_pos - n + tb->rnum[0],
1090 tb->rnum[0],
1091 "PAP-12165: directory item must be first item of node when pasting is in 0th position"); 743 "PAP-12165: directory item must be first item of node when pasting is in 0th position");
1092 744
1093 /* update delimiting keys */ 745 /* update delimiting keys */
1094 replace_key(tb, 746 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
1095 tb->CFR[0],
1096 tb->rkey[0],
1097 tb->R[0],
1098 0);
1099 } 747 }
1100 } 748 }
1101 749
@@ -1111,22 +759,16 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1111 default: /* cases d and t */ 759 default: /* cases d and t */
1112 reiserfs_panic(tb->tb_sb, "PAP-12175", 760 reiserfs_panic(tb->tb_sb, "PAP-12175",
1113 "rnum > 0: unexpected mode: %s(%d)", 761 "rnum > 0: unexpected mode: %s(%d)",
1114 (flag == 762 (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
1115 M_DELETE) ? "DELETE" : ((flag ==
1116 M_CUT) ? "CUT"
1117 : "UNKNOWN"),
1118 flag);
1119 } 763 }
1120 764
1121 } 765 }
1122 766
1123 /* tb->rnum[0] > 0 */ 767 /* tb->rnum[0] > 0 */
1124 RFALSE(tb->blknum[0] > 3, 768 RFALSE(tb->blknum[0] > 3,
1125 "PAP-12180: blknum can not be %d. It must be <= 3", 769 "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
1126 tb->blknum[0]);
1127 RFALSE(tb->blknum[0] < 0, 770 RFALSE(tb->blknum[0] < 0,
1128 "PAP-12185: blknum can not be %d. It must be >= 0", 771 "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
1129 tb->blknum[0]);
1130 772
1131 /* if while adding to a node we discover that it is possible to split 773 /* if while adding to a node we discover that it is possible to split
1132 it in two, and merge the left part into the left neighbor and the 774 it in two, and merge the left part into the left neighbor and the
@@ -1177,8 +819,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1177 819
1178 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ 820 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */
1179 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ 821 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */
1180 int old_key_comp, old_len, 822 int old_key_comp, old_len, r_zeros_number;
1181 r_zeros_number;
1182 const char *r_body; 823 const char *r_body;
1183 int version; 824 int version;
1184 825
@@ -1192,15 +833,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1192 old_len = ih_item_len(ih); 833 old_len = ih_item_len(ih);
1193 834
1194 /* Calculate key component and item length to insert into S_new[i] */ 835 /* Calculate key component and item length to insert into S_new[i] */
1195 set_le_ih_k_offset(ih, 836 set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
1196 le_ih_k_offset(ih) + 837 ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
1197 ((old_len -
1198 sbytes[i]) <<
1199 (is_indirect_le_ih
1200 (ih) ? tb->tb_sb->
1201 s_blocksize_bits -
1202 UNFM_P_SHIFT :
1203 0)));
1204 838
1205 put_ih_item_len(ih, sbytes[i]); 839 put_ih_item_len(ih, sbytes[i]);
1206 840
@@ -1209,39 +843,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1209 843
1210 if ((old_len - sbytes[i]) > zeros_num) { 844 if ((old_len - sbytes[i]) > zeros_num) {
1211 r_zeros_number = 0; 845 r_zeros_number = 0;
1212 r_body = 846 r_body = body + (old_len - sbytes[i]) - zeros_num;
1213 body + (old_len -
1214 sbytes[i]) -
1215 zeros_num;
1216 } else { 847 } else {
1217 r_body = body; 848 r_body = body;
1218 r_zeros_number = 849 r_zeros_number = zeros_num - (old_len - sbytes[i]);
1219 zeros_num - (old_len -
1220 sbytes[i]);
1221 zeros_num -= r_zeros_number; 850 zeros_num -= r_zeros_number;
1222 } 851 }
1223 852
1224 leaf_insert_into_buf(&bi, 0, ih, r_body, 853 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number);
1225 r_zeros_number);
1226 854
1227 /* Calculate key component and item length to insert into S[i] */ 855 /* Calculate key component and item length to insert into S[i] */
1228 set_le_ih_k_offset(ih, old_key_comp); 856 set_le_ih_k_offset(ih, old_key_comp);
1229 put_ih_item_len(ih, 857 put_ih_item_len(ih, old_len - sbytes[i]);
1230 old_len - sbytes[i]);
1231 tb->insert_size[0] -= sbytes[i]; 858 tb->insert_size[0] -= sbytes[i];
1232 } else { /* whole new item falls into S_new[i] */ 859 } else { /* whole new item falls into S_new[i] */
1233 860
1234 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ 861 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
1235 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 862 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
1236 snum[i] - 1, sbytes[i], 863 snum[i] - 1, sbytes[i], S_new[i]);
1237 S_new[i]);
1238 864
1239 /* Insert new item into S_new[i] */ 865 /* Insert new item into S_new[i] */
1240 buffer_info_init_bh(tb, &bi, S_new[i]); 866 buffer_info_init_bh(tb, &bi, S_new[i]);
1241 leaf_insert_into_buf(&bi, 867 leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1,
1242 item_pos - n + 868 ih, body, zeros_num);
1243 snum[i] - 1, ih,
1244 body, zeros_num);
1245 869
1246 zeros_num = tb->insert_size[0] = 0; 870 zeros_num = tb->insert_size[0] = 0;
1247 } 871 }
@@ -1268,150 +892,73 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1268 892
1269 int entry_count; 893 int entry_count;
1270 894
1271 entry_count = 895 entry_count = ih_entry_count(aux_ih);
1272 ih_entry_count(aux_ih);
1273 896
1274 if (entry_count - sbytes[i] < 897 if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) {
1275 pos_in_item
1276 && pos_in_item <=
1277 entry_count) {
1278 /* new directory entry falls into S_new[i] */ 898 /* new directory entry falls into S_new[i] */
1279 899
1280 RFALSE(!tb-> 900 RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0");
1281 insert_size[0], 901 RFALSE(sbytes[i] - 1 >= entry_count,
1282 "PAP-12215: insert_size is already 0");
1283 RFALSE(sbytes[i] - 1 >=
1284 entry_count,
1285 "PAP-12220: there are no so much entries (%d), only %d", 902 "PAP-12220: there are no so much entries (%d), only %d",
1286 sbytes[i] - 1, 903 sbytes[i] - 1, entry_count);
1287 entry_count);
1288 904
1289 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ 905 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
1290 leaf_move_items 906 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]);
1291 (LEAF_FROM_S_TO_SNEW,
1292 tb, snum[i],
1293 sbytes[i] - 1,
1294 S_new[i]);
1295 /* Paste given directory entry to directory item */ 907 /* Paste given directory entry to directory item */
1296 buffer_info_init_bh(tb, &bi, S_new[i]); 908 buffer_info_init_bh(tb, &bi, S_new[i]);
1297 leaf_paste_in_buffer 909 leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
1298 (&bi, 0, 910 tb->insert_size[0], body, zeros_num);
1299 pos_in_item -
1300 entry_count +
1301 sbytes[i] - 1,
1302 tb->insert_size[0],
1303 body, zeros_num);
1304 /* paste new directory entry */ 911 /* paste new directory entry */
1305 leaf_paste_entries(&bi, 912 leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1,
1306 0, 913 (struct reiserfs_de_head *) body,
1307 pos_in_item 914 body + DEH_SIZE, tb->insert_size[0]);
1308 -
1309 entry_count
1310 +
1311 sbytes
1312 [i] -
1313 1, 1,
1314 (struct
1315 reiserfs_de_head
1316 *)
1317 body,
1318 body
1319 +
1320 DEH_SIZE,
1321 tb->
1322 insert_size
1323 [0]
1324 );
1325 tb->insert_size[0] = 0; 915 tb->insert_size[0] = 0;
1326 pos_in_item++; 916 pos_in_item++;
1327 } else { /* new directory entry doesn't fall into S_new[i] */ 917 } else { /* new directory entry doesn't fall into S_new[i] */
1328 leaf_move_items 918 leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]);
1329 (LEAF_FROM_S_TO_SNEW,
1330 tb, snum[i],
1331 sbytes[i],
1332 S_new[i]);
1333 } 919 }
1334 } else { /* regular object */ 920 } else { /* regular object */
1335 921
1336 int n_shift, n_rem, 922 int n_shift, n_rem, r_zeros_number;
1337 r_zeros_number;
1338 const char *r_body; 923 const char *r_body;
1339 924
1340 RFALSE(pos_in_item != 925 RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0,
1341 ih_item_len
1342 (B_N_PITEM_HEAD
1343 (tbS0, item_pos))
1344 || tb->insert_size[0] <=
1345 0,
1346 "PAP-12225: item too short or insert_size <= 0"); 926 "PAP-12225: item too short or insert_size <= 0");
1347 927
1348 /* Calculate number of bytes which must be shifted from appended item */ 928 /* Calculate number of bytes which must be shifted from appended item */
1349 n_shift = 929 n_shift = sbytes[i] - tb->insert_size[0];
1350 sbytes[i] -
1351 tb->insert_size[0];
1352 if (n_shift < 0) 930 if (n_shift < 0)
1353 n_shift = 0; 931 n_shift = 0;
1354 leaf_move_items 932 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
1355 (LEAF_FROM_S_TO_SNEW, tb,
1356 snum[i], n_shift,
1357 S_new[i]);
1358 933
1359 /* Calculate number of bytes which must remain in body after append to S_new[i] */ 934 /* Calculate number of bytes which must remain in body after append to S_new[i] */
1360 n_rem = 935 n_rem = tb->insert_size[0] - sbytes[i];
1361 tb->insert_size[0] -
1362 sbytes[i];
1363 if (n_rem < 0) 936 if (n_rem < 0)
1364 n_rem = 0; 937 n_rem = 0;
1365 /* Append part of body into S_new[0] */ 938 /* Append part of body into S_new[0] */
1366 buffer_info_init_bh(tb, &bi, S_new[i]); 939 buffer_info_init_bh(tb, &bi, S_new[i]);
1367 if (n_rem > zeros_num) { 940 if (n_rem > zeros_num) {
1368 r_zeros_number = 0; 941 r_zeros_number = 0;
1369 r_body = 942 r_body = body + n_rem - zeros_num;
1370 body + n_rem -
1371 zeros_num;
1372 } else { 943 } else {
1373 r_body = body; 944 r_body = body;
1374 r_zeros_number = 945 r_zeros_number = zeros_num - n_rem;
1375 zeros_num - n_rem; 946 zeros_num -= r_zeros_number;
1376 zeros_num -=
1377 r_zeros_number;
1378 } 947 }
1379 948
1380 leaf_paste_in_buffer(&bi, 0, 949 leaf_paste_in_buffer(&bi, 0, n_shift,
1381 n_shift, 950 tb->insert_size[0] - n_rem,
1382 tb-> 951 r_body, r_zeros_number);
1383 insert_size
1384 [0] -
1385 n_rem,
1386 r_body,
1387 r_zeros_number);
1388 { 952 {
1389 struct item_head *tmp; 953 struct item_head *tmp;
1390 954
1391 tmp = 955 tmp = B_N_PITEM_HEAD(S_new[i], 0);
1392 B_N_PITEM_HEAD(S_new
1393 [i],
1394 0);
1395 if (is_indirect_le_ih 956 if (is_indirect_le_ih
1396 (tmp)) { 957 (tmp)) {
1397 set_ih_free_space 958 set_ih_free_space(tmp, 0);
1398 (tmp, 0); 959 set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
1399 set_le_ih_k_offset
1400 (tmp,
1401 le_ih_k_offset
1402 (tmp) +
1403 (n_rem <<
1404 (tb->
1405 tb_sb->
1406 s_blocksize_bits
1407 -
1408 UNFM_P_SHIFT)));
1409 } else { 960 } else {
1410 set_le_ih_k_offset 961 set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem);
1411 (tmp,
1412 le_ih_k_offset
1413 (tmp) +
1414 n_rem);
1415 } 962 }
1416 } 963 }
1417 964
@@ -1426,8 +973,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1426 struct item_head *pasted; 973 struct item_head *pasted;
1427 974
1428#ifdef CONFIG_REISERFS_CHECK 975#ifdef CONFIG_REISERFS_CHECK
1429 struct item_head *ih_check = 976 struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos);
1430 B_N_PITEM_HEAD(tbS0, item_pos);
1431 977
1432 if (!is_direntry_le_ih(ih_check) 978 if (!is_direntry_le_ih(ih_check)
1433 && (pos_in_item != ih_item_len(ih_check) 979 && (pos_in_item != ih_item_len(ih_check)
@@ -1439,8 +985,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1439 "to ih_item_len"); 985 "to ih_item_len");
1440#endif /* CONFIG_REISERFS_CHECK */ 986#endif /* CONFIG_REISERFS_CHECK */
1441 987
1442 leaf_mi = 988 leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW,
1443 leaf_move_items(LEAF_FROM_S_TO_SNEW,
1444 tb, snum[i], 989 tb, snum[i],
1445 sbytes[i], 990 sbytes[i],
1446 S_new[i]); 991 S_new[i]);
@@ -1452,30 +997,19 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1452 /* paste into item */ 997 /* paste into item */
1453 buffer_info_init_bh(tb, &bi, S_new[i]); 998 buffer_info_init_bh(tb, &bi, S_new[i]);
1454 leaf_paste_in_buffer(&bi, 999 leaf_paste_in_buffer(&bi,
1455 item_pos - n + 1000 item_pos - n + snum[i],
1456 snum[i],
1457 pos_in_item, 1001 pos_in_item,
1458 tb->insert_size[0], 1002 tb->insert_size[0],
1459 body, zeros_num); 1003 body, zeros_num);
1460 1004
1461 pasted = 1005 pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
1462 B_N_PITEM_HEAD(S_new[i],
1463 item_pos - n +
1464 snum[i]);
1465 if (is_direntry_le_ih(pasted)) { 1006 if (is_direntry_le_ih(pasted)) {
1466 leaf_paste_entries(&bi, 1007 leaf_paste_entries(&bi,
1467 item_pos - 1008 item_pos - n + snum[i],
1468 n + snum[i], 1009 pos_in_item, 1,
1469 pos_in_item, 1010 (struct reiserfs_de_head *)body,
1470 1, 1011 body + DEH_SIZE,
1471 (struct 1012 tb->insert_size[0]
1472 reiserfs_de_head
1473 *)body,
1474 body +
1475 DEH_SIZE,
1476 tb->
1477 insert_size
1478 [0]
1479 ); 1013 );
1480 } 1014 }
1481 1015
@@ -1495,11 +1029,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1495 default: /* cases d and t */ 1029 default: /* cases d and t */
1496 reiserfs_panic(tb->tb_sb, "PAP-12245", 1030 reiserfs_panic(tb->tb_sb, "PAP-12245",
1497 "blknum > 2: unexpected mode: %s(%d)", 1031 "blknum > 2: unexpected mode: %s(%d)",
1498 (flag == 1032 (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
1499 M_DELETE) ? "DELETE" : ((flag ==
1500 M_CUT) ? "CUT"
1501 : "UNKNOWN"),
1502 flag);
1503 } 1033 }
1504 1034
1505 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); 1035 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
@@ -1524,9 +1054,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1524 /* If we insert the first key change the delimiting key */ 1054 /* If we insert the first key change the delimiting key */
1525 if (item_pos == 0) { 1055 if (item_pos == 0) {
1526 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1056 if (tb->CFL[0]) /* can be 0 in reiserfsck */
1527 replace_key(tb, tb->CFL[0], tb->lkey[0], 1057 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
1528 tbS0, 0);
1529
1530 } 1058 }
1531 break; 1059 break;
1532 1060
@@ -1536,53 +1064,27 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1536 pasted = B_N_PITEM_HEAD(tbS0, item_pos); 1064 pasted = B_N_PITEM_HEAD(tbS0, item_pos);
1537 /* when directory, may be new entry already pasted */ 1065 /* when directory, may be new entry already pasted */
1538 if (is_direntry_le_ih(pasted)) { 1066 if (is_direntry_le_ih(pasted)) {
1539 if (pos_in_item >= 0 && 1067 if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) {
1540 pos_in_item <=
1541 ih_entry_count(pasted)) {
1542 1068
1543 RFALSE(!tb->insert_size[0], 1069 RFALSE(!tb->insert_size[0],
1544 "PAP-12260: insert_size is 0 already"); 1070 "PAP-12260: insert_size is 0 already");
1545 1071
1546 /* prepare space */ 1072 /* prepare space */
1547 buffer_info_init_tbS0(tb, &bi); 1073 buffer_info_init_tbS0(tb, &bi);
1548 leaf_paste_in_buffer(&bi, 1074 leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
1549 item_pos, 1075 tb->insert_size[0], body,
1550 pos_in_item,
1551 tb->
1552 insert_size
1553 [0], body,
1554 zeros_num); 1076 zeros_num);
1555 1077
1556 /* paste entry */ 1078 /* paste entry */
1557 leaf_paste_entries(&bi, 1079 leaf_paste_entries(&bi, item_pos, pos_in_item, 1,
1558 item_pos, 1080 (struct reiserfs_de_head *)body,
1559 pos_in_item, 1081 body + DEH_SIZE,
1560 1, 1082 tb->insert_size[0]);
1561 (struct
1562 reiserfs_de_head
1563 *)body,
1564 body +
1565 DEH_SIZE,
1566 tb->
1567 insert_size
1568 [0]
1569 );
1570 if (!item_pos && !pos_in_item) { 1083 if (!item_pos && !pos_in_item) {
1571 RFALSE(!tb->CFL[0] 1084 RFALSE(!tb->CFL[0] || !tb->L[0],
1572 || !tb->L[0],
1573 "PAP-12270: CFL[0]/L[0] must be specified"); 1085 "PAP-12270: CFL[0]/L[0] must be specified");
1574 if (tb->CFL[0]) { 1086 if (tb->CFL[0])
1575 replace_key(tb, 1087 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
1576 tb->
1577 CFL
1578 [0],
1579 tb->
1580 lkey
1581 [0],
1582 tbS0,
1583 0);
1584
1585 }
1586 } 1088 }
1587 tb->insert_size[0] = 0; 1089 tb->insert_size[0] = 0;
1588 } 1090 }
@@ -1593,13 +1095,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1593 "PAP-12275: insert size must not be %d", 1095 "PAP-12275: insert size must not be %d",
1594 tb->insert_size[0]); 1096 tb->insert_size[0]);
1595 buffer_info_init_tbS0(tb, &bi); 1097 buffer_info_init_tbS0(tb, &bi);
1596 leaf_paste_in_buffer(&bi, 1098 leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
1597 item_pos, 1099 tb->insert_size[0], body, zeros_num);
1598 pos_in_item,
1599 tb->
1600 insert_size
1601 [0], body,
1602 zeros_num);
1603 1100
1604 if (is_indirect_le_ih(pasted)) { 1101 if (is_indirect_le_ih(pasted)) {
1605#if 0 1102#if 0
@@ -1611,8 +1108,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1611 tb-> 1108 tb->
1612 insert_size[0]); 1109 insert_size[0]);
1613#endif 1110#endif
1614 set_ih_free_space 1111 set_ih_free_space(pasted, 0);
1615 (pasted, 0);
1616 } 1112 }
1617 tb->insert_size[0] = 0; 1113 tb->insert_size[0] = 0;
1618 } 1114 }
@@ -1620,8 +1116,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1620 else { 1116 else {
1621 if (tb->insert_size[0]) { 1117 if (tb->insert_size[0]) {
1622 print_cur_tb("12285"); 1118 print_cur_tb("12285");
1623 reiserfs_panic(tb-> 1119 reiserfs_panic(tb->tb_sb,
1624 tb_sb,
1625 "PAP-12285", 1120 "PAP-12285",
1626 "insert_size " 1121 "insert_size "
1627 "must be 0 " 1122 "must be 0 "
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 2623cffc73a1..25bfb0eff772 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op);
373/* 373/*
374 * Ceph setxattr request flags. 374 * Ceph setxattr request flags.
375 */ 375 */
376#define CEPH_XATTR_CREATE 1 376#define CEPH_XATTR_CREATE (1 << 0)
377#define CEPH_XATTR_REPLACE 2 377#define CEPH_XATTR_REPLACE (1 << 1)
378#define CEPH_XATTR_REMOVE (1 << 31)
378 379
379union ceph_mds_request_args { 380union ceph_mds_request_args {
380 struct { 381 struct {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 440a02ee6f92..e8eeebd49a98 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -752,6 +752,9 @@ struct netdev_phys_port_id {
752 unsigned char id_len; 752 unsigned char id_len;
753}; 753};
754 754
755typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
756 struct sk_buff *skb);
757
755/* 758/*
756 * This structure defines the management hooks for network devices. 759 * This structure defines the management hooks for network devices.
757 * The following hooks can be defined; unless noted otherwise, they are 760 * The following hooks can be defined; unless noted otherwise, they are
@@ -783,7 +786,7 @@ struct netdev_phys_port_id {
783 * Required can not be NULL. 786 * Required can not be NULL.
784 * 787 *
785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 788 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
786 * void *accel_priv); 789 * void *accel_priv, select_queue_fallback_t fallback);
787 * Called to decide which queue to when device supports multiple 790 * Called to decide which queue to when device supports multiple
788 * transmit queues. 791 * transmit queues.
789 * 792 *
@@ -1005,7 +1008,8 @@ struct net_device_ops {
1005 struct net_device *dev); 1008 struct net_device *dev);
1006 u16 (*ndo_select_queue)(struct net_device *dev, 1009 u16 (*ndo_select_queue)(struct net_device *dev,
1007 struct sk_buff *skb, 1010 struct sk_buff *skb,
1008 void *accel_priv); 1011 void *accel_priv,
1012 select_queue_fallback_t fallback);
1009 void (*ndo_change_rx_flags)(struct net_device *dev, 1013 void (*ndo_change_rx_flags)(struct net_device *dev,
1010 int flags); 1014 int flags);
1011 void (*ndo_set_rx_mode)(struct net_device *dev); 1015 void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1551,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1551struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1555struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1552 struct sk_buff *skb, 1556 struct sk_buff *skb,
1553 void *accel_priv); 1557 void *accel_priv);
1554u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1555 1558
1556/* 1559/*
1557 * Net namespace inlines 1560 * Net namespace inlines
@@ -2276,6 +2279,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
2276} 2279}
2277 2280
2278/** 2281/**
2282 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2283 * @dev: network device
2284 * @queue_index: given tx queue index
2285 *
2286 * Returns 0 if given tx queue index >= number of device tx queues,
2287 * otherwise returns the originally passed tx queue index.
2288 */
2289static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2290{
2291 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2292 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2293 dev->name, queue_index,
2294 dev->real_num_tx_queues);
2295 return 0;
2296 }
2297
2298 return queue_index;
2299}
2300
2301/**
2279 * netif_running - test if up 2302 * netif_running - test if up
2280 * @dev: network device 2303 * @dev: network device
2281 * 2304 *
@@ -3068,7 +3091,12 @@ void netdev_change_features(struct net_device *dev);
3068void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3091void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3069 struct net_device *dev); 3092 struct net_device *dev);
3070 3093
3071netdev_features_t netif_skb_features(struct sk_buff *skb); 3094netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
3095 const struct net_device *dev);
3096static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
3097{
3098 return netif_skb_dev_features(skb, skb->dev);
3099}
3072 3100
3073static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3101static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3074{ 3102{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f589c9af8cbf..3ebbbe7b6d05 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
2916{ 2916{
2917 return !skb->head_frag || skb_cloned(skb); 2917 return !skb->head_frag || skb_cloned(skb);
2918} 2918}
2919
2920/**
2921 * skb_gso_network_seglen - Return length of individual segments of a gso packet
2922 *
2923 * @skb: GSO skb
2924 *
2925 * skb_gso_network_seglen is used to determine the real size of the
2926 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
2927 *
2928 * The MAC/L2 header is not accounted for.
2929 */
2930static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
2931{
2932 unsigned int hdr_len = skb_transport_header(skb) -
2933 skb_network_header(skb);
2934 return hdr_len + skb_gso_transport_seglen(skb);
2935}
2919#endif /* __KERNEL__ */ 2936#endif /* __KERNEL__ */
2920#endif /* _LINUX_SKBUFF_H */ 2937#endif /* _LINUX_SKBUFF_H */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index d992ca3145fe..6ee76c804893 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1653,17 +1653,6 @@ struct sctp_association {
1653 /* This is the last advertised value of rwnd over a SACK chunk. */ 1653 /* This is the last advertised value of rwnd over a SACK chunk. */
1654 __u32 a_rwnd; 1654 __u32 a_rwnd;
1655 1655
1656 /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
1657 * to slop over a maximum of the association's frag_point.
1658 */
1659 __u32 rwnd_over;
1660
1661 /* Keeps treack of rwnd pressure. This happens when we have
1662 * a window, but not recevie buffer (i.e small packets). This one
1663 * is releases slowly (1 PMTU at a time ).
1664 */
1665 __u32 rwnd_press;
1666
1667 /* This is the sndbuf size in use for the association. 1656 /* This is the sndbuf size in use for the association.
1668 * This corresponds to the sndbuf size for the association, 1657 * This corresponds to the sndbuf size for the association,
1669 * as specified in the sk->sndbuf. 1658 * as specified in the sk->sndbuf.
@@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old,
1892__u32 sctp_association_get_next_tsn(struct sctp_association *); 1881__u32 sctp_association_get_next_tsn(struct sctp_association *);
1893 1882
1894void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1883void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
1895void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1884void sctp_assoc_rwnd_update(struct sctp_association *, bool);
1896void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1897void sctp_assoc_set_primary(struct sctp_association *, 1885void sctp_assoc_set_primary(struct sctp_association *,
1898 struct sctp_transport *); 1886 struct sctp_transport *);
1899void sctp_assoc_del_nonprimary_peers(struct sctp_association *, 1887void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 512159bf607f..8323bced8e5b 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
241 size = bat_priv->num_ifaces * sizeof(uint8_t); 241 size = bat_priv->num_ifaces * sizeof(uint8_t);
242 orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); 242 orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
243 if (!orig_node->bat_iv.bcast_own_sum) 243 if (!orig_node->bat_iv.bcast_own_sum)
244 goto free_bcast_own; 244 goto free_orig_node;
245 245
246 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, 246 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
247 batadv_choose_orig, orig_node, 247 batadv_choose_orig, orig_node,
248 &orig_node->hash_entry); 248 &orig_node->hash_entry);
249 if (hash_added != 0) 249 if (hash_added != 0)
250 goto free_bcast_own; 250 goto free_orig_node;
251 251
252 return orig_node; 252 return orig_node;
253 253
254free_bcast_own:
255 kfree(orig_node->bat_iv.bcast_own);
256free_orig_node: 254free_orig_node:
255 /* free twice, as batadv_orig_node_new sets refcount to 2 */
256 batadv_orig_node_free_ref(orig_node);
257 batadv_orig_node_free_ref(orig_node); 257 batadv_orig_node_free_ref(orig_node);
258 258
259 return NULL; 259 return NULL;
@@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
266 struct batadv_orig_node *orig_neigh) 266 struct batadv_orig_node *orig_neigh)
267{ 267{
268 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 268 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
269 struct batadv_neigh_node *neigh_node; 269 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
270 270
271 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); 271 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
272 if (!neigh_node) 272 if (!neigh_node)
@@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
281 neigh_node->orig_node = orig_neigh; 281 neigh_node->orig_node = orig_neigh;
282 neigh_node->if_incoming = hard_iface; 282 neigh_node->if_incoming = hard_iface;
283 283
284 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
285 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
286 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
287
288 spin_lock_bh(&orig_node->neigh_list_lock); 284 spin_lock_bh(&orig_node->neigh_list_lock);
289 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 285 tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
286 neigh_addr);
287 if (!tmp_neigh_node) {
288 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
289 } else {
290 kfree(neigh_node);
291 batadv_hardif_free_ref(hard_iface);
292 neigh_node = tmp_neigh_node;
293 }
290 spin_unlock_bh(&orig_node->neigh_list_lock); 294 spin_unlock_bh(&orig_node->neigh_list_lock);
291 295
296 if (!tmp_neigh_node)
297 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
298 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
299 neigh_addr, orig_node->orig,
300 hard_iface->net_dev->name);
301
292out: 302out:
293 return neigh_node; 303 return neigh_node;
294} 304}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3d417d3641c6..b851cc580853 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
241{ 241{
242 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 242 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
243 const struct batadv_hard_iface *hard_iface; 243 const struct batadv_hard_iface *hard_iface;
244 int min_mtu = ETH_DATA_LEN; 244 int min_mtu = INT_MAX;
245 245
246 rcu_read_lock(); 246 rcu_read_lock();
247 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 247 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
256 } 256 }
257 rcu_read_unlock(); 257 rcu_read_unlock();
258 258
259 atomic_set(&bat_priv->packet_size_max, min_mtu);
260
261 if (atomic_read(&bat_priv->fragmentation) == 0) 259 if (atomic_read(&bat_priv->fragmentation) == 0)
262 goto out; 260 goto out;
263 261
@@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
268 min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); 266 min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
269 min_mtu -= sizeof(struct batadv_frag_packet); 267 min_mtu -= sizeof(struct batadv_frag_packet);
270 min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; 268 min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
271 atomic_set(&bat_priv->packet_size_max, min_mtu);
272
273 /* with fragmentation enabled we can fragment external packets easily */
274 min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
275 269
276out: 270out:
277 return min_mtu - batadv_max_header_len(); 271 /* report to the other components the maximum amount of bytes that
272 * batman-adv can send over the wire (without considering the payload
273 * overhead). For example, this value is used by TT to compute the
274 * maximum local table table size
275 */
276 atomic_set(&bat_priv->packet_size_max, min_mtu);
277
278 /* the real soft-interface MTU is computed by removing the payload
279 * overhead from the maximum amount of bytes that was just computed.
280 *
281 * However batman-adv does not support MTUs bigger than ETH_DATA_LEN
282 */
283 return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
278} 284}
279 285
280/* adjusts the MTU if a new interface with a smaller MTU appeared. */ 286/* adjusts the MTU if a new interface with a smaller MTU appeared. */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6df12a2e3605..853941629dc1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -458,6 +458,42 @@ out:
458} 458}
459 459
460/** 460/**
461 * batadv_neigh_node_get - retrieve a neighbour from the list
462 * @orig_node: originator which the neighbour belongs to
463 * @hard_iface: the interface where this neighbour is connected to
464 * @addr: the address of the neighbour
465 *
466 * Looks for and possibly returns a neighbour belonging to this originator list
467 * which is connected through the provided hard interface.
468 * Returns NULL if the neighbour is not found.
469 */
470struct batadv_neigh_node *
471batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
472 const struct batadv_hard_iface *hard_iface,
473 const uint8_t *addr)
474{
475 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
476
477 rcu_read_lock();
478 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
479 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
480 continue;
481
482 if (tmp_neigh_node->if_incoming != hard_iface)
483 continue;
484
485 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
486 continue;
487
488 res = tmp_neigh_node;
489 break;
490 }
491 rcu_read_unlock();
492
493 return res;
494}
495
496/**
461 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object 497 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
462 * @rcu: rcu pointer of the orig_ifinfo object 498 * @rcu: rcu pointer of the orig_ifinfo object
463 */ 499 */
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 37be290f63f6..db3a9ed734cb 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
29struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, 29struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
30 const uint8_t *addr); 30 const uint8_t *addr);
31struct batadv_neigh_node * 31struct batadv_neigh_node *
32batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
33 const struct batadv_hard_iface *hard_iface,
34 const uint8_t *addr);
35struct batadv_neigh_node *
32batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 36batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
33 const uint8_t *neigh_addr, 37 const uint8_t *neigh_addr,
34 struct batadv_orig_node *orig_node); 38 struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1ed9f7c9ecea..a953d5b196a3 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
688 int is_old_ttvn; 688 int is_old_ttvn;
689 689
690 /* check if there is enough data before accessing it */ 690 /* check if there is enough data before accessing it */
691 if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) 691 if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
692 return 0; 692 return 0;
693 693
694 /* create a copy of the skb (in case of for re-routing) to modify it. */ 694 /* create a copy of the skb (in case of for re-routing) to modify it. */
@@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
918 918
919 if (ret != NET_RX_SUCCESS) 919 if (ret != NET_RX_SUCCESS)
920 ret = batadv_route_unicast_packet(skb, recv_if); 920 ret = batadv_route_unicast_packet(skb, recv_if);
921 else
922 consume_skb(skb);
921 923
922 return ret; 924 return ret;
923} 925}
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 579f5f00a385..843febd1e519 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
254 struct batadv_orig_node *orig_node, 254 struct batadv_orig_node *orig_node,
255 unsigned short vid) 255 unsigned short vid)
256{ 256{
257 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 257 struct ethhdr *ethhdr;
258 struct batadv_unicast_packet *unicast_packet; 258 struct batadv_unicast_packet *unicast_packet;
259 int ret = NET_XMIT_DROP; 259 int ret = NET_XMIT_DROP, hdr_size;
260 260
261 if (!orig_node) 261 if (!orig_node)
262 goto out; 262 goto out;
@@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
265 case BATADV_UNICAST: 265 case BATADV_UNICAST:
266 if (!batadv_send_skb_prepare_unicast(skb, orig_node)) 266 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
267 goto out; 267 goto out;
268
269 hdr_size = sizeof(*unicast_packet);
268 break; 270 break;
269 case BATADV_UNICAST_4ADDR: 271 case BATADV_UNICAST_4ADDR:
270 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, 272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
271 orig_node, 273 orig_node,
272 packet_subtype)) 274 packet_subtype))
273 goto out; 275 goto out;
276
277 hdr_size = sizeof(struct batadv_unicast_4addr_packet);
274 break; 278 break;
275 default: 279 default:
276 /* this function supports UNICAST and UNICAST_4ADDR only. It 280 /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
279 goto out; 283 goto out;
280 } 284 }
281 285
286 ethhdr = (struct ethhdr *)(skb->data + hdr_size);
282 unicast_packet = (struct batadv_unicast_packet *)skb->data; 287 unicast_packet = (struct batadv_unicast_packet *)skb->data;
283 288
284 /* inform the destination node that we are still missing a correct route 289 /* inform the destination node that we are still missing a correct route
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b6071f675a3e..959dde721c46 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1975 struct hlist_head *head; 1975 struct hlist_head *head;
1976 uint32_t i, crc_tmp, crc = 0; 1976 uint32_t i, crc_tmp, crc = 0;
1977 uint8_t flags; 1977 uint8_t flags;
1978 __be16 tmp_vid;
1978 1979
1979 for (i = 0; i < hash->size; i++) { 1980 for (i = 0; i < hash->size; i++) {
1980 head = &hash->table[i]; 1981 head = &hash->table[i];
@@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
2011 orig_node)) 2012 orig_node))
2012 continue; 2013 continue;
2013 2014
2014 crc_tmp = crc32c(0, &tt_common->vid, 2015 /* use network order to read the VID: this ensures that
2015 sizeof(tt_common->vid)); 2016 * every node reads the bytes in the same order.
2017 */
2018 tmp_vid = htons(tt_common->vid);
2019 crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
2016 2020
2017 /* compute the CRC on flags that have to be kept in sync 2021 /* compute the CRC on flags that have to be kept in sync
2018 * among nodes 2022 * among nodes
@@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
2046 struct hlist_head *head; 2050 struct hlist_head *head;
2047 uint32_t i, crc_tmp, crc = 0; 2051 uint32_t i, crc_tmp, crc = 0;
2048 uint8_t flags; 2052 uint8_t flags;
2053 __be16 tmp_vid;
2049 2054
2050 for (i = 0; i < hash->size; i++) { 2055 for (i = 0; i < hash->size; i++) {
2051 head = &hash->table[i]; 2056 head = &hash->table[i];
@@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
2064 if (tt_common->flags & BATADV_TT_CLIENT_NEW) 2069 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
2065 continue; 2070 continue;
2066 2071
2067 crc_tmp = crc32c(0, &tt_common->vid, 2072 /* use network order to read the VID: this ensures that
2068 sizeof(tt_common->vid)); 2073 * every node reads the bytes in the same order.
2074 */
2075 tmp_vid = htons(tt_common->vid);
2076 crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
2069 2077
2070 /* compute the CRC on flags that have to be kept in sync 2078 /* compute the CRC on flags that have to be kept in sync
2071 * among nodes 2079 * among nodes
@@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
2262{ 2270{
2263 struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; 2271 struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
2264 struct batadv_orig_node_vlan *vlan; 2272 struct batadv_orig_node_vlan *vlan;
2273 uint32_t crc;
2265 int i; 2274 int i;
2266 2275
2267 /* check if each received CRC matches the locally stored one */ 2276 /* check if each received CRC matches the locally stored one */
@@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
2281 if (!vlan) 2290 if (!vlan)
2282 return false; 2291 return false;
2283 2292
2284 if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) 2293 crc = vlan->tt.crc;
2294 batadv_orig_node_vlan_free_ref(vlan);
2295
2296 if (crc != ntohl(tt_vlan_tmp->crc))
2285 return false; 2297 return false;
2286 } 2298 }
2287 2299
@@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
3218 3230
3219 spin_lock_bh(&orig_node->tt_lock); 3231 spin_lock_bh(&orig_node->tt_lock);
3220 3232
3221 tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
3222 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, 3233 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
3223 ttvn, tt_change); 3234 ttvn, tt_change);
3224 3235
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 292e619db896..d9fb93451442 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session)
430 del_timer(&session->timer); 430 del_timer(&session->timer);
431} 431}
432 432
433static void hidp_process_report(struct hidp_session *session,
434 int type, const u8 *data, int len, int intr)
435{
436 if (len > HID_MAX_BUFFER_SIZE)
437 len = HID_MAX_BUFFER_SIZE;
438
439 memcpy(session->input_buf, data, len);
440 hid_input_report(session->hid, type, session->input_buf, len, intr);
441}
442
433static void hidp_process_handshake(struct hidp_session *session, 443static void hidp_process_handshake(struct hidp_session *session,
434 unsigned char param) 444 unsigned char param)
435{ 445{
@@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
502 hidp_input_report(session, skb); 512 hidp_input_report(session, skb);
503 513
504 if (session->hid) 514 if (session->hid)
505 hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); 515 hidp_process_report(session, HID_INPUT_REPORT,
516 skb->data, skb->len, 0);
506 break; 517 break;
507 518
508 case HIDP_DATA_RTYPE_OTHER: 519 case HIDP_DATA_RTYPE_OTHER:
@@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
584 hidp_input_report(session, skb); 595 hidp_input_report(session, skb);
585 596
586 if (session->hid) { 597 if (session->hid) {
587 hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); 598 hidp_process_report(session, HID_INPUT_REPORT,
599 skb->data, skb->len, 1);
588 BT_DBG("report len %d", skb->len); 600 BT_DBG("report len %d", skb->len);
589 } 601 }
590 } else { 602 } else {
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index ab5241400cf7..8798492a6e99 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,6 +24,7 @@
24#define __HIDP_H 24#define __HIDP_H
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/hid.h>
27#include <linux/kref.h> 28#include <linux/kref.h>
28#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/l2cap.h> 30#include <net/bluetooth/l2cap.h>
@@ -179,6 +180,9 @@ struct hidp_session {
179 180
180 /* Used in hidp_output_raw_report() */ 181 /* Used in hidp_output_raw_report() */
181 int output_report_success; /* boolean */ 182 int output_report_success; /* boolean */
183
184 /* temporary input buffer */
185 u8 input_buf[HID_MAX_BUFFER_SIZE];
182}; 186};
183 187
184/* HIDP init defines */ 188/* HIDP init defines */
diff --git a/net/core/dev.c b/net/core/dev.c
index 4ad1b78c9c77..b1b0c8d4d7df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2420 * 2. No high memory really exists on this machine. 2420 * 2. No high memory really exists on this machine.
2421 */ 2421 */
2422 2422
2423static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2423static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
2424{ 2424{
2425#ifdef CONFIG_HIGHMEM 2425#ifdef CONFIG_HIGHMEM
2426 int i; 2426 int i;
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2495} 2495}
2496 2496
2497static netdev_features_t harmonize_features(struct sk_buff *skb, 2497static netdev_features_t harmonize_features(struct sk_buff *skb,
2498 netdev_features_t features) 2498 const struct net_device *dev,
2499 netdev_features_t features)
2499{ 2500{
2500 if (skb->ip_summed != CHECKSUM_NONE && 2501 if (skb->ip_summed != CHECKSUM_NONE &&
2501 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2502 !can_checksum_protocol(features, skb_network_protocol(skb))) {
2502 features &= ~NETIF_F_ALL_CSUM; 2503 features &= ~NETIF_F_ALL_CSUM;
2503 } else if (illegal_highdma(skb->dev, skb)) { 2504 } else if (illegal_highdma(dev, skb)) {
2504 features &= ~NETIF_F_SG; 2505 features &= ~NETIF_F_SG;
2505 } 2506 }
2506 2507
2507 return features; 2508 return features;
2508} 2509}
2509 2510
2510netdev_features_t netif_skb_features(struct sk_buff *skb) 2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2512 const struct net_device *dev)
2511{ 2513{
2512 __be16 protocol = skb->protocol; 2514 __be16 protocol = skb->protocol;
2513 netdev_features_t features = skb->dev->features; 2515 netdev_features_t features = dev->features;
2514 2516
2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
2516 features &= ~NETIF_F_GSO_MASK; 2518 features &= ~NETIF_F_GSO_MASK;
2517 2519
2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2520 protocol = veh->h_vlan_encapsulated_proto; 2522 protocol = veh->h_vlan_encapsulated_proto;
2521 } else if (!vlan_tx_tag_present(skb)) { 2523 } else if (!vlan_tx_tag_present(skb)) {
2522 return harmonize_features(skb, features); 2524 return harmonize_features(skb, dev, features);
2523 } 2525 }
2524 2526
2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2526 NETIF_F_HW_VLAN_STAG_TX); 2528 NETIF_F_HW_VLAN_STAG_TX);
2527 2529
2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2531 NETIF_F_HW_VLAN_STAG_TX; 2533 NETIF_F_HW_VLAN_STAG_TX;
2532 2534
2533 return harmonize_features(skb, features); 2535 return harmonize_features(skb, dev, features);
2534} 2536}
2535EXPORT_SYMBOL(netif_skb_features); 2537EXPORT_SYMBOL(netif_skb_dev_features);
2536 2538
2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2538 struct netdev_queue *txq) 2540 struct netdev_queue *txq)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87577d447554..e29e810663d7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb)
323 return poff; 323 return poff;
324} 324}
325 325
326static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
327{
328 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
329 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
330 dev->name, queue_index,
331 dev->real_num_tx_queues);
332 return 0;
333 }
334 return queue_index;
335}
336
337static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 326static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
338{ 327{
339#ifdef CONFIG_XPS 328#ifdef CONFIG_XPS
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
372#endif 361#endif
373} 362}
374 363
375u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 364static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
376{ 365{
377 struct sock *sk = skb->sk; 366 struct sock *sk = skb->sk;
378 int queue_index = sk_tx_queue_get(sk); 367 int queue_index = sk_tx_queue_get(sk);
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
392 381
393 return queue_index; 382 return queue_index;
394} 383}
395EXPORT_SYMBOL(__netdev_pick_tx);
396 384
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 385struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb, 386 struct sk_buff *skb,
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
403 if (dev->real_num_tx_queues != 1) { 391 if (dev->real_num_tx_queues != 1) {
404 const struct net_device_ops *ops = dev->netdev_ops; 392 const struct net_device_ops *ops = dev->netdev_ops;
405 if (ops->ndo_select_queue) 393 if (ops->ndo_select_queue)
406 queue_index = ops->ndo_select_queue(dev, skb, 394 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
407 accel_priv); 395 __netdev_pick_tx);
408 else 396 else
409 queue_index = __netdev_pick_tx(dev, skb); 397 queue_index = __netdev_pick_tx(dev, skb);
410 398
411 if (!accel_priv) 399 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index); 400 queue_index = netdev_cap_txqueue(dev, queue_index);
413 } 401 }
414 402
415 skb_set_queue_mapping(skb, queue_index); 403 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 048dc8d183aa..1a0dac2ef9ad 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1963,16 +1963,21 @@ replay:
1963 1963
1964 dev->ifindex = ifm->ifi_index; 1964 dev->ifindex = ifm->ifi_index;
1965 1965
1966 if (ops->newlink) 1966 if (ops->newlink) {
1967 err = ops->newlink(net, dev, tb, data); 1967 err = ops->newlink(net, dev, tb, data);
1968 else 1968 /* Drivers should call free_netdev() in ->destructor
1969 * and unregister it on failure so that device could be
1970 * finally freed in rtnl_unlock.
1971 */
1972 if (err < 0)
1973 goto out;
1974 } else {
1969 err = register_netdevice(dev); 1975 err = register_netdevice(dev);
1970 1976 if (err < 0) {
1971 if (err < 0) { 1977 free_netdev(dev);
1972 free_netdev(dev); 1978 goto out;
1973 goto out; 1979 }
1974 } 1980 }
1975
1976 err = rtnl_configure_link(dev, ifm); 1981 err = rtnl_configure_link(dev, ifm);
1977 if (err < 0) 1982 if (err < 0)
1978 unregister_netdevice(dev); 1983 unregister_netdevice(dev);
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index c073b81a1f3e..62b5828acde0 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
8#include "tfrc.h" 8#include "tfrc.h"
9 9
10#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 10#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
11static bool tfrc_debug; 11bool tfrc_debug;
12module_param(tfrc_debug, bool, 0644); 12module_param(tfrc_debug, bool, 0644);
13MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); 13MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
14#endif 14#endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index a3d8f7c76ae0..40ee7d62b652 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,6 +21,7 @@
21#include "packet_history.h" 21#include "packet_history.h"
22 22
23#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 23#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
24extern bool tfrc_debug;
24#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) 25#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
25#else 26#else
26#define tfrc_pr_debug(format, a...) 27#define tfrc_pr_debug(format, a...)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e9f1217a8afd..f3869c186d97 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,6 +39,71 @@
39#include <net/route.h> 39#include <net/route.h>
40#include <net/xfrm.h> 40#include <net/xfrm.h>
41 41
42static bool ip_may_fragment(const struct sk_buff *skb)
43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 !skb->local_df;
46}
47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
49{
50 if (skb->len <= mtu || skb->local_df)
51 return false;
52
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
54 return false;
55
56 return true;
57}
58
59static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
60{
61 unsigned int mtu;
62
63 if (skb->local_df || !skb_is_gso(skb))
64 return false;
65
66 mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
67
68 /* if seglen > mtu, do software segmentation for IP fragmentation on
69 * output. DF bit cannot be set since ip_forward would have sent
70 * icmp error.
71 */
72 return skb_gso_network_seglen(skb) > mtu;
73}
74
75/* called if GSO skb needs to be fragmented on forward */
76static int ip_forward_finish_gso(struct sk_buff *skb)
77{
78 struct dst_entry *dst = skb_dst(skb);
79 netdev_features_t features;
80 struct sk_buff *segs;
81 int ret = 0;
82
83 features = netif_skb_dev_features(skb, dst->dev);
84 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
85 if (IS_ERR(segs)) {
86 kfree_skb(skb);
87 return -ENOMEM;
88 }
89
90 consume_skb(skb);
91
92 do {
93 struct sk_buff *nskb = segs->next;
94 int err;
95
96 segs->next = NULL;
97 err = dst_output(segs);
98
99 if (err && ret == 0)
100 ret = err;
101 segs = nskb;
102 } while (segs);
103
104 return ret;
105}
106
42static int ip_forward_finish(struct sk_buff *skb) 107static int ip_forward_finish(struct sk_buff *skb)
43{ 108{
44 struct ip_options *opt = &(IPCB(skb)->opt); 109 struct ip_options *opt = &(IPCB(skb)->opt);
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
49 if (unlikely(opt->optlen)) 114 if (unlikely(opt->optlen))
50 ip_forward_options(skb); 115 ip_forward_options(skb);
51 116
117 if (ip_gso_exceeds_dst_mtu(skb))
118 return ip_forward_finish_gso(skb);
119
52 return dst_output(skb); 120 return dst_output(skb);
53} 121}
54 122
@@ -91,8 +159,7 @@ int ip_forward(struct sk_buff *skb)
91 159
92 IPCB(skb)->flags |= IPSKB_FORWARDED; 160 IPCB(skb)->flags |= IPSKB_FORWARDED;
93 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 161 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
94 if (unlikely(skb->len > mtu && !skb_is_gso(skb) && 162 if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
95 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
96 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); 163 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
97 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 164 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
98 htonl(mtu)); 165 htonl(mtu));
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index efa1138fa523..b3e86ea7b71b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -273,7 +273,7 @@ static int __init ic_open_devs(void)
273 273
274 msleep(1); 274 msleep(1);
275 275
276 if time_before(jiffies, next_msg) 276 if (time_before(jiffies, next_msg))
277 continue; 277 continue;
278 278
279 elapsed = jiffies_to_msecs(jiffies - start); 279 elapsed = jiffies_to_msecs(jiffies - start);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 25071b48921c..4c011ec69ed4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1597,6 +1597,7 @@ static int __mkroute_input(struct sk_buff *skb,
1597 rth->rt_gateway = 0; 1597 rth->rt_gateway = 0;
1598 rth->rt_uses_gateway = 0; 1598 rth->rt_uses_gateway = 0;
1599 INIT_LIST_HEAD(&rth->rt_uncached); 1599 INIT_LIST_HEAD(&rth->rt_uncached);
1600 RT_CACHE_STAT_INC(in_slow_tot);
1600 1601
1601 rth->dst.input = ip_forward; 1602 rth->dst.input = ip_forward;
1602 rth->dst.output = ip_output; 1603 rth->dst.output = ip_output;
@@ -1695,10 +1696,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1695 fl4.daddr = daddr; 1696 fl4.daddr = daddr;
1696 fl4.saddr = saddr; 1697 fl4.saddr = saddr;
1697 err = fib_lookup(net, &fl4, &res); 1698 err = fib_lookup(net, &fl4, &res);
1698 if (err != 0) 1699 if (err != 0) {
1700 if (!IN_DEV_FORWARD(in_dev))
1701 err = -EHOSTUNREACH;
1699 goto no_route; 1702 goto no_route;
1700 1703 }
1701 RT_CACHE_STAT_INC(in_slow_tot);
1702 1704
1703 if (res.type == RTN_BROADCAST) 1705 if (res.type == RTN_BROADCAST)
1704 goto brd_input; 1706 goto brd_input;
@@ -1712,8 +1714,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1712 goto local_input; 1714 goto local_input;
1713 } 1715 }
1714 1716
1715 if (!IN_DEV_FORWARD(in_dev)) 1717 if (!IN_DEV_FORWARD(in_dev)) {
1718 err = -EHOSTUNREACH;
1716 goto no_route; 1719 goto no_route;
1720 }
1717 if (res.type != RTN_UNICAST) 1721 if (res.type != RTN_UNICAST)
1718 goto martian_destination; 1722 goto martian_destination;
1719 1723
@@ -1768,6 +1772,7 @@ local_input:
1768 rth->rt_gateway = 0; 1772 rth->rt_gateway = 0;
1769 rth->rt_uses_gateway = 0; 1773 rth->rt_uses_gateway = 0;
1770 INIT_LIST_HEAD(&rth->rt_uncached); 1774 INIT_LIST_HEAD(&rth->rt_uncached);
1775 RT_CACHE_STAT_INC(in_slow_tot);
1771 if (res.type == RTN_UNREACHABLE) { 1776 if (res.type == RTN_UNREACHABLE) {
1772 rth->dst.input= ip_error; 1777 rth->dst.input= ip_error;
1773 rth->dst.error= -err; 1778 rth->dst.error= -err;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ad235690684c..fdbfeca36d63 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev)
2783 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 2783 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
2784 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) 2784 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2785 addrconf_add_linklocal(idev, &addr); 2785 addrconf_add_linklocal(idev, &addr);
2786 else
2787 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2786} 2788}
2787#endif 2789#endif
2788 2790
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef02b26ccf81..070a2fae2375 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
342 return mtu; 342 return mtu;
343} 343}
344 344
345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346{
347 if (skb->len <= mtu || skb->local_df)
348 return false;
349
350 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 return true;
352
353 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 return false;
355
356 return true;
357}
358
345int ip6_forward(struct sk_buff *skb) 359int ip6_forward(struct sk_buff *skb)
346{ 360{
347 struct dst_entry *dst = skb_dst(skb); 361 struct dst_entry *dst = skb_dst(skb);
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb)
466 if (mtu < IPV6_MIN_MTU) 480 if (mtu < IPV6_MIN_MTU)
467 mtu = IPV6_MIN_MTU; 481 mtu = IPV6_MIN_MTU;
468 482
469 if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || 483 if (ip6_pkt_too_big(skb, mtu)) {
470 (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
471 /* Again, force OUTPUT device used as source address */ 484 /* Again, force OUTPUT device used as source address */
472 skb->dev = dst->dev; 485 skb->dev = dst->dev;
473 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 486 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d6d1f1df9119..ce1c44370610 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1057,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev)
1057 1057
1058static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1058static u16 ieee80211_netdev_select_queue(struct net_device *dev,
1059 struct sk_buff *skb, 1059 struct sk_buff *skb,
1060 void *accel_priv) 1060 void *accel_priv,
1061 select_queue_fallback_t fallback)
1061{ 1062{
1062 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1063 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1063} 1064}
@@ -1075,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
1075 1076
1076static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1077static u16 ieee80211_monitor_select_queue(struct net_device *dev,
1077 struct sk_buff *skb, 1078 struct sk_buff *skb,
1078 void *accel_priv) 1079 void *accel_priv,
1080 select_queue_fallback_t fallback)
1079{ 1081{
1080 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1082 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1081 struct ieee80211_local *local = sdata->local; 1083 struct ieee80211_local *local = sdata->local;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a2bb37506c5..48a6a93db296 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
308 return po->xmit == packet_direct_xmit; 308 return po->xmit == packet_direct_xmit;
309} 309}
310 310
311static u16 packet_pick_tx_queue(struct net_device *dev) 311static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
312{ 312{
313 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 313 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
314} 314}
315 315
316static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
317{
318 const struct net_device_ops *ops = dev->netdev_ops;
319 u16 queue_index;
320
321 if (ops->ndo_select_queue) {
322 queue_index = ops->ndo_select_queue(dev, skb, NULL,
323 __packet_pick_tx_queue);
324 queue_index = netdev_cap_txqueue(dev, queue_index);
325 } else {
326 queue_index = __packet_pick_tx_queue(dev, skb);
327 }
328
329 skb_set_queue_mapping(skb, queue_index);
330}
331
316/* register_prot_hook must be invoked with the po->bind_lock held, 332/* register_prot_hook must be invoked with the po->bind_lock held,
317 * or from a context in which asynchronous accesses to the packet 333 * or from a context in which asynchronous accesses to the packet
318 * socket is not possible (packet_create()). 334 * socket is not possible (packet_create()).
@@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2285 } 2301 }
2286 } 2302 }
2287 2303
2288 skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2304 packet_pick_tx_queue(dev, skb);
2305
2289 skb->destructor = tpacket_destruct_skb; 2306 skb->destructor = tpacket_destruct_skb;
2290 __packet_set_status(po, ph, TP_STATUS_SENDING); 2307 __packet_set_status(po, ph, TP_STATUS_SENDING);
2291 packet_inc_pending(&po->tx_ring); 2308 packet_inc_pending(&po->tx_ring);
@@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2499 skb->dev = dev; 2516 skb->dev = dev;
2500 skb->priority = sk->sk_priority; 2517 skb->priority = sk->sk_priority;
2501 skb->mark = sk->sk_mark; 2518 skb->mark = sk->sk_mark;
2502 skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2519
2520 packet_pick_tx_queue(dev, skb);
2503 2521
2504 if (po->has_vnet_hdr) { 2522 if (po->has_vnet_hdr) {
2505 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 2523 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3786 */ 3804 */
3787 if (!tx_ring) 3805 if (!tx_ring)
3788 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); 3806 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3789 break; 3807 break;
3790 default: 3808 default:
3791 break; 3809 break;
3792 } 3810 }
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a255d0200a59..fefeeb73f15f 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -15,6 +15,11 @@
15 * 15 *
16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> 16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
17 * University of Oslo, Norway. 17 * University of Oslo, Norway.
18 *
19 * References:
20 * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
21 * IEEE Conference on High Performance Switching and Routing 2013 :
22 * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
18 */ 23 */
19 24
20#include <linux/module.h> 25#include <linux/module.h>
@@ -36,7 +41,7 @@ struct pie_params {
36 psched_time_t target; /* user specified target delay in pschedtime */ 41 psched_time_t target; /* user specified target delay in pschedtime */
37 u32 tupdate; /* timer frequency (in jiffies) */ 42 u32 tupdate; /* timer frequency (in jiffies) */
38 u32 limit; /* number of packets that can be enqueued */ 43 u32 limit; /* number of packets that can be enqueued */
39 u32 alpha; /* alpha and beta are between -4 and 4 */ 44 u32 alpha; /* alpha and beta are between 0 and 32 */
40 u32 beta; /* and are used for shift relative to 1 */ 45 u32 beta; /* and are used for shift relative to 1 */
41 bool ecn; /* true if ecn is enabled */ 46 bool ecn; /* true if ecn is enabled */
42 bool bytemode; /* to scale drop early prob based on pkt size */ 47 bool bytemode; /* to scale drop early prob based on pkt size */
@@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch)
326 if (qdelay == 0 && qlen != 0) 331 if (qdelay == 0 && qlen != 0)
327 update_prob = false; 332 update_prob = false;
328 333
329 /* Add ranges for alpha and beta, more aggressive for high dropping 334 /* In the algorithm, alpha and beta are between 0 and 2 with typical
330 * mode and gentle steps for light dropping mode 335 * value for alpha as 0.125. In this implementation, we use values 0-32
331 * In light dropping mode, take gentle steps; in medium dropping mode, 336 * passed from user space to represent this. Also, alpha and beta have
332 * take medium steps; in high dropping mode, take big steps. 337 * unit of HZ and need to be scaled before they can used to update
338 * probability. alpha/beta are updated locally below by 1) scaling them
339 * appropriately 2) scaling down by 16 to come to 0-2 range.
340 * Please see paper for details.
341 *
342 * We scale alpha and beta differently depending on whether we are in
343 * light, medium or high dropping mode.
333 */ 344 */
334 if (q->vars.prob < MAX_PROB / 100) { 345 if (q->vars.prob < MAX_PROB / 100) {
335 alpha = 346 alpha =
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5ae609200674..f558433537b8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1367 return false; 1367 return false;
1368} 1368}
1369 1369
1370/* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1370/* Update asoc's rwnd for the approximated state in the buffer,
1371void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1371 * and check whether SACK needs to be sent.
1372 */
1373void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
1372{ 1374{
1375 int rx_count;
1373 struct sctp_chunk *sack; 1376 struct sctp_chunk *sack;
1374 struct timer_list *timer; 1377 struct timer_list *timer;
1375 1378
1376 if (asoc->rwnd_over) { 1379 if (asoc->ep->rcvbuf_policy)
1377 if (asoc->rwnd_over >= len) { 1380 rx_count = atomic_read(&asoc->rmem_alloc);
1378 asoc->rwnd_over -= len; 1381 else
1379 } else { 1382 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1380 asoc->rwnd += (len - asoc->rwnd_over);
1381 asoc->rwnd_over = 0;
1382 }
1383 } else {
1384 asoc->rwnd += len;
1385 }
1386 1383
1387 /* If we had window pressure, start recovering it 1384 if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
1388 * once our rwnd had reached the accumulated pressure 1385 asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
1389 * threshold. The idea is to recover slowly, but up 1386 else
1390 * to the initial advertised window. 1387 asoc->rwnd = 0;
1391 */
1392 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1393 int change = min(asoc->pathmtu, asoc->rwnd_press);
1394 asoc->rwnd += change;
1395 asoc->rwnd_press -= change;
1396 }
1397 1388
1398 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1389 pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
1399 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1390 __func__, asoc, asoc->rwnd, rx_count,
1400 asoc->a_rwnd); 1391 asoc->base.sk->sk_rcvbuf);
1401 1392
1402 /* Send a window update SACK if the rwnd has increased by at least the 1393 /* Send a window update SACK if the rwnd has increased by at least the
1403 * minimum of the association's PMTU and half of the receive buffer. 1394 * minimum of the association's PMTU and half of the receive buffer.
1404 * The algorithm used is similar to the one described in 1395 * The algorithm used is similar to the one described in
1405 * Section 4.2.3.3 of RFC 1122. 1396 * Section 4.2.3.3 of RFC 1122.
1406 */ 1397 */
1407 if (sctp_peer_needs_update(asoc)) { 1398 if (update_peer && sctp_peer_needs_update(asoc)) {
1408 asoc->a_rwnd = asoc->rwnd; 1399 asoc->a_rwnd = asoc->rwnd;
1409 1400
1410 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " 1401 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
@@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1426 } 1417 }
1427} 1418}
1428 1419
1429/* Decrease asoc's rwnd by len. */
1430void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1431{
1432 int rx_count;
1433 int over = 0;
1434
1435 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1436 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1437 "asoc->rwnd_over:%u!\n", __func__, asoc,
1438 asoc->rwnd, asoc->rwnd_over);
1439
1440 if (asoc->ep->rcvbuf_policy)
1441 rx_count = atomic_read(&asoc->rmem_alloc);
1442 else
1443 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1444
1445 /* If we've reached or overflowed our receive buffer, announce
1446 * a 0 rwnd if rwnd would still be positive. Store the
1447 * the potential pressure overflow so that the window can be restored
1448 * back to original value.
1449 */
1450 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1451 over = 1;
1452
1453 if (asoc->rwnd >= len) {
1454 asoc->rwnd -= len;
1455 if (over) {
1456 asoc->rwnd_press += asoc->rwnd;
1457 asoc->rwnd = 0;
1458 }
1459 } else {
1460 asoc->rwnd_over = len - asoc->rwnd;
1461 asoc->rwnd = 0;
1462 }
1463
1464 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1465 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1466 asoc->rwnd_press);
1467}
1468 1420
1469/* Build the bind address list for the association based on info from the 1421/* Build the bind address list for the association based on info from the
1470 * local endpoint and the remote peer. 1422 * local endpoint and the remote peer.
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 483dcd71b3c5..591b44d3b7de 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6176 * PMTU. In cases, such as loopback, this might be a rather 6176 * PMTU. In cases, such as loopback, this might be a rather
6177 * large spill over. 6177 * large spill over.
6178 */ 6178 */
6179 if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || 6179 if ((!chunk->data_accepted) && (!asoc->rwnd ||
6180 (datalen > asoc->rwnd + asoc->frag_point))) { 6180 (datalen > asoc->rwnd + asoc->frag_point))) {
6181 6181
6182 /* If this is the next TSN, consider reneging to make 6182 /* If this is the next TSN, consider reneging to make
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e91d6e5df63..981aaf8b6ace 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -64,6 +64,7 @@
64#include <linux/crypto.h> 64#include <linux/crypto.h>
65#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/file.h> 66#include <linux/file.h>
67#include <linux/compat.h>
67 68
68#include <net/ip.h> 69#include <net/ip.h>
69#include <net/icmp.h> 70#include <net/icmp.h>
@@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk,
1368/* 1369/*
1369 * New (hopefully final) interface for the API. 1370 * New (hopefully final) interface for the API.
1370 * We use the sctp_getaddrs_old structure so that use-space library 1371 * We use the sctp_getaddrs_old structure so that use-space library
1371 * can avoid any unnecessary allocations. The only defferent part 1372 * can avoid any unnecessary allocations. The only different part
1372 * is that we store the actual length of the address buffer into the 1373 * is that we store the actual length of the address buffer into the
1373 * addrs_num structure member. That way we can re-use the existing 1374 * addrs_num structure member. That way we can re-use the existing
1374 * code. 1375 * code.
1375 */ 1376 */
1377#ifdef CONFIG_COMPAT
1378struct compat_sctp_getaddrs_old {
1379 sctp_assoc_t assoc_id;
1380 s32 addr_num;
1381 compat_uptr_t addrs; /* struct sockaddr * */
1382};
1383#endif
1384
1376static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1385static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1377 char __user *optval, 1386 char __user *optval,
1378 int __user *optlen) 1387 int __user *optlen)
@@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1381 sctp_assoc_t assoc_id = 0; 1390 sctp_assoc_t assoc_id = 0;
1382 int err = 0; 1391 int err = 0;
1383 1392
1384 if (len < sizeof(param)) 1393#ifdef CONFIG_COMPAT
1385 return -EINVAL; 1394 if (is_compat_task()) {
1395 struct compat_sctp_getaddrs_old param32;
1386 1396
1387 if (copy_from_user(&param, optval, sizeof(param))) 1397 if (len < sizeof(param32))
1388 return -EFAULT; 1398 return -EINVAL;
1399 if (copy_from_user(&param32, optval, sizeof(param32)))
1400 return -EFAULT;
1389 1401
1390 err = __sctp_setsockopt_connectx(sk, 1402 param.assoc_id = param32.assoc_id;
1391 (struct sockaddr __user *)param.addrs, 1403 param.addr_num = param32.addr_num;
1392 param.addr_num, &assoc_id); 1404 param.addrs = compat_ptr(param32.addrs);
1405 } else
1406#endif
1407 {
1408 if (len < sizeof(param))
1409 return -EINVAL;
1410 if (copy_from_user(&param, optval, sizeof(param)))
1411 return -EFAULT;
1412 }
1393 1413
1414 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1415 param.addrs, param.addr_num,
1416 &assoc_id);
1394 if (err == 0 || err == -EINPROGRESS) { 1417 if (err == 0 || err == -EINPROGRESS) {
1395 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1418 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1396 return -EFAULT; 1419 return -EFAULT;
@@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
2092 sctp_skb_pull(skb, copied); 2115 sctp_skb_pull(skb, copied);
2093 skb_queue_head(&sk->sk_receive_queue, skb); 2116 skb_queue_head(&sk->sk_receive_queue, skb);
2094 2117
2095 /* When only partial message is copied to the user, increase
2096 * rwnd by that amount. If all the data in the skb is read,
2097 * rwnd is updated when the event is freed.
2098 */
2099 if (!sctp_ulpevent_is_notification(event))
2100 sctp_assoc_rwnd_increase(event->asoc, copied);
2101 goto out; 2118 goto out;
2102 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2119 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2103 (event->msg_flags & MSG_EOR)) 2120 (event->msg_flags & MSG_EOR))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7135e617ab0f..35c8923b5554 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = {
151 }, 151 },
152 { 152 {
153 .procname = "cookie_hmac_alg", 153 .procname = "cookie_hmac_alg",
154 .data = &init_net.sctp.sctp_hmac_alg,
154 .maxlen = 8, 155 .maxlen = 8,
155 .mode = 0644, 156 .mode = 0644,
156 .proc_handler = proc_sctp_do_hmac_alg, 157 .proc_handler = proc_sctp_do_hmac_alg,
@@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
401 402
402int sctp_sysctl_net_register(struct net *net) 403int sctp_sysctl_net_register(struct net *net)
403{ 404{
404 struct ctl_table *table; 405 struct ctl_table *table = sctp_net_table;
405 int i; 406
407 if (!net_eq(net, &init_net)) {
408 int i;
406 409
407 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 410 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
408 if (!table) 411 if (!table)
409 return -ENOMEM; 412 return -ENOMEM;
410 413
411 for (i = 0; table[i].data; i++) 414 for (i = 0; table[i].data; i++)
412 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 415 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
416 }
413 417
414 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); 418 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
415 return 0; 419 return 0;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..8d198ae03606 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
989 skb = sctp_event2skb(event); 989 skb = sctp_event2skb(event);
990 /* Set the owner and charge rwnd for bytes received. */ 990 /* Set the owner and charge rwnd for bytes received. */
991 sctp_ulpevent_set_owner(event, asoc); 991 sctp_ulpevent_set_owner(event, asoc);
992 sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); 992 sctp_assoc_rwnd_update(asoc, false);
993 993
994 if (!skb->data_len) 994 if (!skb->data_len)
995 return; 995 return;
@@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1011{ 1011{
1012 struct sk_buff *skb, *frag; 1012 struct sk_buff *skb, *frag;
1013 unsigned int len; 1013 unsigned int len;
1014 struct sctp_association *asoc;
1014 1015
1015 /* Current stack structures assume that the rcv buffer is 1016 /* Current stack structures assume that the rcv buffer is
1016 * per socket. For UDP style sockets this is not true as 1017 * per socket. For UDP style sockets this is not true as
@@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1035 } 1036 }
1036 1037
1037done: 1038done:
1038 sctp_assoc_rwnd_increase(event->asoc, len); 1039 asoc = event->asoc;
1040 sctp_association_hold(asoc);
1039 sctp_ulpevent_release_owner(event); 1041 sctp_ulpevent_release_owner(event);
1042 sctp_assoc_rwnd_update(asoc, true);
1043 sctp_association_put(asoc);
1040} 1044}
1041 1045
1042static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) 1046static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1ff477b0450d..5569d96b4da3 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer)
192 192
193struct tipc_skb_cb { 193struct tipc_skb_cb {
194 void *handle; 194 void *handle;
195 bool deferred;
195}; 196};
196 197
197#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 198#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d4b5de41b682..da6018beb6eb 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1391,6 +1391,12 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1391 u32 hdr_size; 1391 u32 hdr_size;
1392 u32 min_hdr_size; 1392 u32 min_hdr_size;
1393 1393
1394 /* If this packet comes from the defer queue, the skb has already
1395 * been validated
1396 */
1397 if (unlikely(TIPC_SKB_CB(buf)->deferred))
1398 return 1;
1399
1394 if (unlikely(buf->len < MIN_H_SIZE)) 1400 if (unlikely(buf->len < MIN_H_SIZE))
1395 return 0; 1401 return 0;
1396 1402
@@ -1703,6 +1709,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1703 &l_ptr->newest_deferred_in, buf)) { 1709 &l_ptr->newest_deferred_in, buf)) {
1704 l_ptr->deferred_inqueue_sz++; 1710 l_ptr->deferred_inqueue_sz++;
1705 l_ptr->stats.deferred_recv++; 1711 l_ptr->stats.deferred_recv++;
1712 TIPC_SKB_CB(buf)->deferred = true;
1706 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1713 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1707 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1714 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1708 } else 1715 } else