aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-06 13:37:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-06 13:37:38 -0400
commit23f347ef63aa36b5a001b6791f657cd0e2a04de3 (patch)
treece06ebdccd16b99265b3e74f8e9b7bd1e29cf465
parent314489bd4c7780fde6a069783d5128f6cef52919 (diff)
parent110c43304db6f06490961529536c362d9ac5732f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: 1) Fix inaccuracies in network driver interface documentation, from Ben Hutchings. 2) Fix handling of negative offsets in BPF JITs, from Jan Seiffert. 3) Compile warning, locking, and refcounting fixes in netfilter's xt_CT, from Pablo Neira Ayuso. 4) phonet sendmsg needs to validate user length just like any other datagram protocol, fix from Sasha Levin. 5) Ipv6 multicast code uses wrong loop index, from RongQing Li. 6) Link handling and firmware fixes in bnx2x driver from Yaniv Rosner and Yuval Mintz. 7) mlx4 erroneously allocates 4 pages at a time, regardless of page size, fix from Thadeu Lima de Souza Cascardo. 8) SCTP socket option wasn't extended in a backwards compatible way, fix from Thomas Graf. 9) Add missing address change event emissions to bonding, from Shlomo Pongratz. 10) /proc/net/dev regressed because it uses a private offset to track where we are in the hash table, but this doesn't track the offset pullback that the seq_file code does resulting in some entries being missed in large dumps. Fix from Eric Dumazet. 11) do_tcp_sendpage() unloads the send queue way too fast, because it invokes tcp_push() when it shouldn't. Let the natural sequence generated by the splice paths, and the assosciated MSG_MORE settings, guide the tcp_push() calls. Otherwise what goes out of TCP is spaghetti and doesn't batch effectively into GSO/TSO clusters. From Eric Dumazet. 12) Once we put a SKB into either the netlink receiver's queue or a socket error queue, it can be consumed and freed up, therefore we cannot touch it after queueing it like that. Fixes from Eric Dumazet. 13) PPP has this annoying behavior in that for every transmit call it immediately stops the TX queue, then calls down into the next layer to transmit the PPP frame. But if that next layer can take it immediately, it just un-stops the TX queue right before returning from the transmit method. Besides being useless work, it makes several facilities unusable, in particular things like the equalizers. Well behaved devices should only stop the TX queue when they really are full, and in PPP's case when it gets backlogged to the downstream device. David Woodhouse therefore fixed PPP to not stop the TX queue until it's downstream can't take data any more. 14) IFF_UNICAST_FLT got accidently lost in some recent stmmac driver changes, re-add. From Marc Kleine-Budde. 15) Fix link flaps in ixgbe, from Eric W. Multanen. 16) Descriptor writeback fixes in e1000e from Matthew Vick. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits) net: fix a race in sock_queue_err_skb() netlink: fix races after skb queueing doc, net: Update ndo_start_xmit return type and values doc, net: Remove instruction to set net_device::trans_start doc, net: Update netdev operation names doc, net: Update documentation of synchronisation for TX multiqueue doc, net: Remove obsolete reference to dev->poll ethtool: Remove exception to the requirement of holding RTNL lock MAINTAINERS: update for Marvell Ethernet drivers bonding: properly unset current_arp_slave on slave link up phonet: Check input from user before allocating tcp: tcp_sendpages() should call tcp_push() once ipv6: fix array index in ip6_mc_add_src() mlx4: allocate just enough pages instead of always 4 pages stmmac: re-add IFF_UNICAST_FLT for dwmac1000 bnx2x: Clear MDC/MDIO warning message bnx2x: Fix BCM57711+BCM84823 link issue bnx2x: Clear BCM84833 LED after fan failure bnx2x: Fix BCM84833 PHY FW version presentation bnx2x: Fix link issue for BCM8727 boards. ...
-rw-r--r--Documentation/networking/driver.txt31
-rw-r--r--Documentation/networking/ip-sysctl.txt11
-rw-r--r--Documentation/networking/netdevices.txt25
-rw-r--r--MAINTAINERS19
-rw-r--r--arch/x86/net/bpf_jit.S122
-rw-r--r--arch/x86/net/bpf_jit_comp.c41
-rw-r--r--drivers/net/bonding/bond_main.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c147
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c164
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/phy/icplus.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c14
-rw-r--r--fs/splice.c5
-rw-r--r--include/linux/ethtool.h3
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/xt_set.h4
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/net/netfilter/xt_log.h2
-rw-r--r--net/core/dev.c58
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/core/filter.c9
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c1
-rw-r--r--net/netfilter/xt_CT.c28
-rw-r--r--net/netlink/af_netlink.c24
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/socket.c6
38 files changed, 602 insertions, 376 deletions
diff --git a/Documentation/networking/driver.txt b/Documentation/networking/driver.txt
index 03283daa64fe..da59e2884130 100644
--- a/Documentation/networking/driver.txt
+++ b/Documentation/networking/driver.txt
@@ -2,16 +2,16 @@ Document about softnet driver issues
2 2
3Transmit path guidelines: 3Transmit path guidelines:
4 4
51) The hard_start_xmit method must never return '1' under any 51) The ndo_start_xmit method must not return NETDEV_TX_BUSY under
6 normal circumstances. It is considered a hard error unless 6 any normal circumstances. It is considered a hard error unless
7 there is no way your device can tell ahead of time when it's 7 there is no way your device can tell ahead of time when it's
8 transmit function will become busy. 8 transmit function will become busy.
9 9
10 Instead it must maintain the queue properly. For example, 10 Instead it must maintain the queue properly. For example,
11 for a driver implementing scatter-gather this means: 11 for a driver implementing scatter-gather this means:
12 12
13 static int drv_hard_start_xmit(struct sk_buff *skb, 13 static netdev_tx_t drv_hard_start_xmit(struct sk_buff *skb,
14 struct net_device *dev) 14 struct net_device *dev)
15 { 15 {
16 struct drv *dp = netdev_priv(dev); 16 struct drv *dp = netdev_priv(dev);
17 17
@@ -23,7 +23,7 @@ Transmit path guidelines:
23 unlock_tx(dp); 23 unlock_tx(dp);
24 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 24 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
25 dev->name); 25 dev->name);
26 return 1; 26 return NETDEV_TX_BUSY;
27 } 27 }
28 28
29 ... queue packet to card ... 29 ... queue packet to card ...
@@ -35,6 +35,7 @@ Transmit path guidelines:
35 ... 35 ...
36 unlock_tx(dp); 36 unlock_tx(dp);
37 ... 37 ...
38 return NETDEV_TX_OK;
38 } 39 }
39 40
40 And then at the end of your TX reclamation event handling: 41 And then at the end of your TX reclamation event handling:
@@ -58,15 +59,12 @@ Transmit path guidelines:
58 TX_BUFFS_AVAIL(dp) > 0) 59 TX_BUFFS_AVAIL(dp) > 0)
59 netif_wake_queue(dp->dev); 60 netif_wake_queue(dp->dev);
60 61
612) Do not forget to update netdev->trans_start to jiffies after 622) An ndo_start_xmit method must not modify the shared parts of a
62 each new tx packet is given to the hardware.
63
643) A hard_start_xmit method must not modify the shared parts of a
65 cloned SKB. 63 cloned SKB.
66 64
674) Do not forget that once you return 0 from your hard_start_xmit 653) Do not forget that once you return NETDEV_TX_OK from your
68 method, it is your driver's responsibility to free up the SKB 66 ndo_start_xmit method, it is your driver's responsibility to free
69 and in some finite amount of time. 67 up the SKB and in some finite amount of time.
70 68
71 For example, this means that it is not allowed for your TX 69 For example, this means that it is not allowed for your TX
72 mitigation scheme to let TX packets "hang out" in the TX 70 mitigation scheme to let TX packets "hang out" in the TX
@@ -74,8 +72,9 @@ Transmit path guidelines:
74 This error can deadlock sockets waiting for send buffer room 72 This error can deadlock sockets waiting for send buffer room
75 to be freed up. 73 to be freed up.
76 74
77 If you return 1 from the hard_start_xmit method, you must not keep 75 If you return NETDEV_TX_BUSY from the ndo_start_xmit method, you
78 any reference to that SKB and you must not attempt to free it up. 76 must not keep any reference to that SKB and you must not attempt
77 to free it up.
79 78
80Probing guidelines: 79Probing guidelines:
81 80
@@ -85,10 +84,10 @@ Probing guidelines:
85 84
86Close/stop guidelines: 85Close/stop guidelines:
87 86
881) After the dev->stop routine has been called, the hardware must 871) After the ndo_stop routine has been called, the hardware must
89 not receive or transmit any data. All in flight packets must 88 not receive or transmit any data. All in flight packets must
90 be aborted. If necessary, poll or wait for completion of 89 be aborted. If necessary, poll or wait for completion of
91 any reset commands. 90 any reset commands.
92 91
932) The dev->stop routine will be called by unregister_netdevice 922) The ndo_stop routine will be called by unregister_netdevice
94 if device is still UP. 93 if device is still UP.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ad3e80e17b4f..bd80ba5847d2 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -604,15 +604,8 @@ IP Variables:
604ip_local_port_range - 2 INTEGERS 604ip_local_port_range - 2 INTEGERS
605 Defines the local port range that is used by TCP and UDP to 605 Defines the local port range that is used by TCP and UDP to
606 choose the local port. The first number is the first, the 606 choose the local port. The first number is the first, the
607 second the last local port number. Default value depends on 607 second the last local port number. The default values are
608 amount of memory available on the system: 608 32768 and 61000 respectively.
609 > 128Mb 32768-61000
610 < 128Mb 1024-4999 or even less.
611 This number defines number of active connections, which this
612 system can issue simultaneously to systems not supporting
613 TCP extensions (timestamps). With tcp_tw_recycle enabled
614 (i.e. by default) range 1024-4999 is enough to issue up to
615 2000 connections per second to systems supporting timestamps.
616 609
617ip_local_reserved_ports - list of comma separated ranges 610ip_local_reserved_ports - list of comma separated ranges
618 Specify the ports which are reserved for known third-party 611 Specify the ports which are reserved for known third-party
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index 89358341682a..c7ecc7080494 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -47,26 +47,25 @@ packets is preferred.
47 47
48struct net_device synchronization rules 48struct net_device synchronization rules
49======================================= 49=======================================
50dev->open: 50ndo_open:
51 Synchronization: rtnl_lock() semaphore. 51 Synchronization: rtnl_lock() semaphore.
52 Context: process 52 Context: process
53 53
54dev->stop: 54ndo_stop:
55 Synchronization: rtnl_lock() semaphore. 55 Synchronization: rtnl_lock() semaphore.
56 Context: process 56 Context: process
57 Note1: netif_running() is guaranteed false 57 Note: netif_running() is guaranteed false
58 Note2: dev->poll() is guaranteed to be stopped
59 58
60dev->do_ioctl: 59ndo_do_ioctl:
61 Synchronization: rtnl_lock() semaphore. 60 Synchronization: rtnl_lock() semaphore.
62 Context: process 61 Context: process
63 62
64dev->get_stats: 63ndo_get_stats:
65 Synchronization: dev_base_lock rwlock. 64 Synchronization: dev_base_lock rwlock.
66 Context: nominally process, but don't sleep inside an rwlock 65 Context: nominally process, but don't sleep inside an rwlock
67 66
68dev->hard_start_xmit: 67ndo_start_xmit:
69 Synchronization: netif_tx_lock spinlock. 68 Synchronization: __netif_tx_lock spinlock.
70 69
71 When the driver sets NETIF_F_LLTX in dev->features this will be 70 When the driver sets NETIF_F_LLTX in dev->features this will be
72 called without holding netif_tx_lock. In this case the driver 71 called without holding netif_tx_lock. In this case the driver
@@ -87,20 +86,20 @@ dev->hard_start_xmit:
87 o NETDEV_TX_LOCKED Locking failed, please retry quickly. 86 o NETDEV_TX_LOCKED Locking failed, please retry quickly.
88 Only valid when NETIF_F_LLTX is set. 87 Only valid when NETIF_F_LLTX is set.
89 88
90dev->tx_timeout: 89ndo_tx_timeout:
91 Synchronization: netif_tx_lock spinlock. 90 Synchronization: netif_tx_lock spinlock; all TX queues frozen.
92 Context: BHs disabled 91 Context: BHs disabled
93 Notes: netif_queue_stopped() is guaranteed true 92 Notes: netif_queue_stopped() is guaranteed true
94 93
95dev->set_rx_mode: 94ndo_set_rx_mode:
96 Synchronization: netif_tx_lock spinlock. 95 Synchronization: netif_addr_lock spinlock.
97 Context: BHs disabled 96 Context: BHs disabled
98 97
99struct napi_struct synchronization rules 98struct napi_struct synchronization rules
100======================================== 99========================================
101napi->poll: 100napi->poll:
102 Synchronization: NAPI_STATE_SCHED bit in napi->state. Device 101 Synchronization: NAPI_STATE_SCHED bit in napi->state. Device
103 driver's dev->close method will invoke napi_disable() on 102 driver's ndo_stop method will invoke napi_disable() on
104 all NAPI instances which will do a sleeping poll on the 103 all NAPI instances which will do a sleeping poll on the
105 NAPI_STATE_SCHED napi->state bit, waiting for all pending 104 NAPI_STATE_SCHED napi->state bit, waiting for all pending
106 NAPI activity to cease. 105 NAPI activity to cease.
diff --git a/MAINTAINERS b/MAINTAINERS
index 3e25ba82e0fa..6d05ae236036 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4309,6 +4309,13 @@ W: http://www.kernel.org/doc/man-pages
4309L: linux-man@vger.kernel.org 4309L: linux-man@vger.kernel.org
4310S: Maintained 4310S: Maintained
4311 4311
4312MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
4313M: Mirko Lindner <mlindner@marvell.com>
4314M: Stephen Hemminger <shemminger@vyatta.com>
4315L: netdev@vger.kernel.org
4316S: Maintained
4317F: drivers/net/ethernet/marvell/sk*
4318
4312MARVELL LIBERTAS WIRELESS DRIVER 4319MARVELL LIBERTAS WIRELESS DRIVER
4313M: Dan Williams <dcbw@redhat.com> 4320M: Dan Williams <dcbw@redhat.com>
4314L: libertas-dev@lists.infradead.org 4321L: libertas-dev@lists.infradead.org
@@ -4339,12 +4346,6 @@ M: Nicolas Pitre <nico@fluxnic.net>
4339S: Odd Fixes 4346S: Odd Fixes
4340F: drivers/mmc/host/mvsdio.* 4347F: drivers/mmc/host/mvsdio.*
4341 4348
4342MARVELL YUKON / SYSKONNECT DRIVER
4343M: Mirko Lindner <mlindner@syskonnect.de>
4344M: Ralph Roesler <rroesler@syskonnect.de>
4345W: http://www.syskonnect.com
4346S: Supported
4347
4348MATROX FRAMEBUFFER DRIVER 4349MATROX FRAMEBUFFER DRIVER
4349L: linux-fbdev@vger.kernel.org 4350L: linux-fbdev@vger.kernel.org
4350S: Orphan 4351S: Orphan
@@ -6116,12 +6117,6 @@ W: http://www.winischhofer.at/linuxsisusbvga.shtml
6116S: Maintained 6117S: Maintained
6117F: drivers/usb/misc/sisusbvga/ 6118F: drivers/usb/misc/sisusbvga/
6118 6119
6119SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
6120M: Stephen Hemminger <shemminger@vyatta.com>
6121L: netdev@vger.kernel.org
6122S: Maintained
6123F: drivers/net/ethernet/marvell/sk*
6124
6125SLAB ALLOCATOR 6120SLAB ALLOCATOR
6126M: Christoph Lameter <cl@linux-foundation.org> 6121M: Christoph Lameter <cl@linux-foundation.org>
6127M: Pekka Enberg <penberg@kernel.org> 6122M: Pekka Enberg <penberg@kernel.org>
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 66870223f8c5..877b9a1b2152 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -18,17 +18,17 @@
18 * r9d : hlen = skb->len - skb->data_len 18 * r9d : hlen = skb->len - skb->data_len
19 */ 19 */
20#define SKBDATA %r8 20#define SKBDATA %r8
21 21#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
22sk_load_word_ind:
23 .globl sk_load_word_ind
24
25 add %ebx,%esi /* offset += X */
26# test %esi,%esi /* if (offset < 0) goto bpf_error; */
27 js bpf_error
28 22
29sk_load_word: 23sk_load_word:
30 .globl sk_load_word 24 .globl sk_load_word
31 25
26 test %esi,%esi
27 js bpf_slow_path_word_neg
28
29sk_load_word_positive_offset:
30 .globl sk_load_word_positive_offset
31
32 mov %r9d,%eax # hlen 32 mov %r9d,%eax # hlen
33 sub %esi,%eax # hlen - offset 33 sub %esi,%eax # hlen - offset
34 cmp $3,%eax 34 cmp $3,%eax
@@ -37,16 +37,15 @@ sk_load_word:
37 bswap %eax /* ntohl() */ 37 bswap %eax /* ntohl() */
38 ret 38 ret
39 39
40
41sk_load_half_ind:
42 .globl sk_load_half_ind
43
44 add %ebx,%esi /* offset += X */
45 js bpf_error
46
47sk_load_half: 40sk_load_half:
48 .globl sk_load_half 41 .globl sk_load_half
49 42
43 test %esi,%esi
44 js bpf_slow_path_half_neg
45
46sk_load_half_positive_offset:
47 .globl sk_load_half_positive_offset
48
50 mov %r9d,%eax 49 mov %r9d,%eax
51 sub %esi,%eax # hlen - offset 50 sub %esi,%eax # hlen - offset
52 cmp $1,%eax 51 cmp $1,%eax
@@ -55,14 +54,15 @@ sk_load_half:
55 rol $8,%ax # ntohs() 54 rol $8,%ax # ntohs()
56 ret 55 ret
57 56
58sk_load_byte_ind:
59 .globl sk_load_byte_ind
60 add %ebx,%esi /* offset += X */
61 js bpf_error
62
63sk_load_byte: 57sk_load_byte:
64 .globl sk_load_byte 58 .globl sk_load_byte
65 59
60 test %esi,%esi
61 js bpf_slow_path_byte_neg
62
63sk_load_byte_positive_offset:
64 .globl sk_load_byte_positive_offset
65
66 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ 66 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
67 jle bpf_slow_path_byte 67 jle bpf_slow_path_byte
68 movzbl (SKBDATA,%rsi),%eax 68 movzbl (SKBDATA,%rsi),%eax
@@ -73,25 +73,21 @@ sk_load_byte:
73 * 73 *
74 * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) 74 * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
75 * Must preserve A accumulator (%eax) 75 * Must preserve A accumulator (%eax)
76 * Inputs : %esi is the offset value, already known positive 76 * Inputs : %esi is the offset value
77 */ 77 */
78ENTRY(sk_load_byte_msh) 78sk_load_byte_msh:
79 CFI_STARTPROC 79 .globl sk_load_byte_msh
80 test %esi,%esi
81 js bpf_slow_path_byte_msh_neg
82
83sk_load_byte_msh_positive_offset:
84 .globl sk_load_byte_msh_positive_offset
80 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ 85 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
81 jle bpf_slow_path_byte_msh 86 jle bpf_slow_path_byte_msh
82 movzbl (SKBDATA,%rsi),%ebx 87 movzbl (SKBDATA,%rsi),%ebx
83 and $15,%bl 88 and $15,%bl
84 shl $2,%bl 89 shl $2,%bl
85 ret 90 ret
86 CFI_ENDPROC
87ENDPROC(sk_load_byte_msh)
88
89bpf_error:
90# force a return 0 from jit handler
91 xor %eax,%eax
92 mov -8(%rbp),%rbx
93 leaveq
94 ret
95 91
96/* rsi contains offset and can be scratched */ 92/* rsi contains offset and can be scratched */
97#define bpf_slow_path_common(LEN) \ 93#define bpf_slow_path_common(LEN) \
@@ -138,3 +134,67 @@ bpf_slow_path_byte_msh:
138 shl $2,%al 134 shl $2,%al
139 xchg %eax,%ebx 135 xchg %eax,%ebx
140 ret 136 ret
137
138#define sk_negative_common(SIZE) \
139 push %rdi; /* save skb */ \
140 push %r9; \
141 push SKBDATA; \
142/* rsi already has offset */ \
143 mov $SIZE,%ecx; /* size */ \
144 call bpf_internal_load_pointer_neg_helper; \
145 test %rax,%rax; \
146 pop SKBDATA; \
147 pop %r9; \
148 pop %rdi; \
149 jz bpf_error
150
151
152bpf_slow_path_word_neg:
153 cmp SKF_MAX_NEG_OFF, %esi /* test range */
154 jl bpf_error /* offset lower -> error */
155sk_load_word_negative_offset:
156 .globl sk_load_word_negative_offset
157 sk_negative_common(4)
158 mov (%rax), %eax
159 bswap %eax
160 ret
161
162bpf_slow_path_half_neg:
163 cmp SKF_MAX_NEG_OFF, %esi
164 jl bpf_error
165sk_load_half_negative_offset:
166 .globl sk_load_half_negative_offset
167 sk_negative_common(2)
168 mov (%rax),%ax
169 rol $8,%ax
170 movzwl %ax,%eax
171 ret
172
173bpf_slow_path_byte_neg:
174 cmp SKF_MAX_NEG_OFF, %esi
175 jl bpf_error
176sk_load_byte_negative_offset:
177 .globl sk_load_byte_negative_offset
178 sk_negative_common(1)
179 movzbl (%rax), %eax
180 ret
181
182bpf_slow_path_byte_msh_neg:
183 cmp SKF_MAX_NEG_OFF, %esi
184 jl bpf_error
185sk_load_byte_msh_negative_offset:
186 .globl sk_load_byte_msh_negative_offset
187 xchg %eax,%ebx /* dont lose A , X is about to be scratched */
188 sk_negative_common(1)
189 movzbl (%rax),%eax
190 and $15,%al
191 shl $2,%al
192 xchg %eax,%ebx
193 ret
194
195bpf_error:
196# force a return 0 from jit handler
197 xor %eax,%eax
198 mov -8(%rbp),%rbx
199 leaveq
200 ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5a5b6e4dd738..0597f95b6da6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -30,7 +30,10 @@ int bpf_jit_enable __read_mostly;
30 * assembly code in arch/x86/net/bpf_jit.S 30 * assembly code in arch/x86/net/bpf_jit.S
31 */ 31 */
32extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; 32extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
33extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; 33extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
35extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
36extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
34 37
35static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 38static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
36{ 39{
@@ -117,6 +120,8 @@ static inline void bpf_flush_icache(void *start, void *end)
117 set_fs(old_fs); 120 set_fs(old_fs);
118} 121}
119 122
123#define CHOOSE_LOAD_FUNC(K, func) \
124 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
120 125
121void bpf_jit_compile(struct sk_filter *fp) 126void bpf_jit_compile(struct sk_filter *fp)
122{ 127{
@@ -473,44 +478,46 @@ void bpf_jit_compile(struct sk_filter *fp)
473#endif 478#endif
474 break; 479 break;
475 case BPF_S_LD_W_ABS: 480 case BPF_S_LD_W_ABS:
476 func = sk_load_word; 481 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
477common_load: seen |= SEEN_DATAREF; 482common_load: seen |= SEEN_DATAREF;
478 if ((int)K < 0) {
479 /* Abort the JIT because __load_pointer() is needed. */
480 goto out;
481 }
482 t_offset = func - (image + addrs[i]); 483 t_offset = func - (image + addrs[i]);
483 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 484 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
484 EMIT1_off32(0xe8, t_offset); /* call */ 485 EMIT1_off32(0xe8, t_offset); /* call */
485 break; 486 break;
486 case BPF_S_LD_H_ABS: 487 case BPF_S_LD_H_ABS:
487 func = sk_load_half; 488 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
488 goto common_load; 489 goto common_load;
489 case BPF_S_LD_B_ABS: 490 case BPF_S_LD_B_ABS:
490 func = sk_load_byte; 491 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
491 goto common_load; 492 goto common_load;
492 case BPF_S_LDX_B_MSH: 493 case BPF_S_LDX_B_MSH:
493 if ((int)K < 0) { 494 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
494 /* Abort the JIT because __load_pointer() is needed. */
495 goto out;
496 }
497 seen |= SEEN_DATAREF | SEEN_XREG; 495 seen |= SEEN_DATAREF | SEEN_XREG;
498 t_offset = sk_load_byte_msh - (image + addrs[i]); 496 t_offset = func - (image + addrs[i]);
499 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 497 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
500 EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ 498 EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
501 break; 499 break;
502 case BPF_S_LD_W_IND: 500 case BPF_S_LD_W_IND:
503 func = sk_load_word_ind; 501 func = sk_load_word;
504common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; 502common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
505 t_offset = func - (image + addrs[i]); 503 t_offset = func - (image + addrs[i]);
506 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 504 if (K) {
505 if (is_imm8(K)) {
506 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
507 } else {
508 EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
509 EMIT(K, 4);
510 }
511 } else {
512 EMIT2(0x89,0xde); /* mov %ebx,%esi */
513 }
507 EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ 514 EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
508 break; 515 break;
509 case BPF_S_LD_H_IND: 516 case BPF_S_LD_H_IND:
510 func = sk_load_half_ind; 517 func = sk_load_half;
511 goto common_load_ind; 518 goto common_load_ind;
512 case BPF_S_LD_B_IND: 519 case BPF_S_LD_B_IND:
513 func = sk_load_byte_ind; 520 func = sk_load_byte;
514 goto common_load_ind; 521 goto common_load_ind;
515 case BPF_S_JMP_JA: 522 case BPF_S_JMP_JA:
516 t_offset = addrs[i + K] - addrs[i]; 523 t_offset = addrs[i + K] - addrs[i];
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941b4e189adf..62d2409bb293 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2034,6 +2034,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2034 write_unlock_bh(&bond->lock); 2034 write_unlock_bh(&bond->lock);
2035 unblock_netpoll_tx(); 2035 unblock_netpoll_tx();
2036 2036
2037 if (bond->slave_cnt == 0)
2038 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2039
2037 bond_compute_features(bond); 2040 bond_compute_features(bond);
2038 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2041 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2039 (old_features & NETIF_F_VLAN_CHALLENGED)) 2042 (old_features & NETIF_F_VLAN_CHALLENGED))
@@ -3007,7 +3010,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3007 trans_start + delta_in_ticks)) || 3010 trans_start + delta_in_ticks)) ||
3008 bond->curr_active_slave != slave) { 3011 bond->curr_active_slave != slave) {
3009 slave->link = BOND_LINK_UP; 3012 slave->link = BOND_LINK_UP;
3010 bond->current_arp_slave = NULL; 3013 if (bond->current_arp_slave) {
3014 bond_set_slave_inactive_flags(
3015 bond->current_arp_slave);
3016 bond->current_arp_slave = NULL;
3017 }
3011 3018
3012 pr_info("%s: link status definitely up for interface %s.\n", 3019 pr_info("%s: link status definitely up for interface %s.\n",
3013 bond->dev->name, slave->dev->name); 3020 bond->dev->name, slave->dev->name);
@@ -3701,17 +3708,52 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
3701 read_unlock(&bond->lock); 3708 read_unlock(&bond->lock);
3702} 3709}
3703 3710
3704static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms) 3711static int bond_neigh_init(struct neighbour *n)
3705{ 3712{
3706 struct bonding *bond = netdev_priv(dev); 3713 struct bonding *bond = netdev_priv(n->dev);
3707 struct slave *slave = bond->first_slave; 3714 struct slave *slave = bond->first_slave;
3715 const struct net_device_ops *slave_ops;
3716 struct neigh_parms parms;
3717 int ret;
3718
3719 if (!slave)
3720 return 0;
3721
3722 slave_ops = slave->dev->netdev_ops;
3723
3724 if (!slave_ops->ndo_neigh_setup)
3725 return 0;
3726
3727 parms.neigh_setup = NULL;
3728 parms.neigh_cleanup = NULL;
3729 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
3730 if (ret)
3731 return ret;
3732
3733 /*
3734 * Assign slave's neigh_cleanup to neighbour in case cleanup is called
3735 * after the last slave has been detached. Assumes that all slaves
3736 * utilize the same neigh_cleanup (true at this writing as only user
3737 * is ipoib).
3738 */
3739 n->parms->neigh_cleanup = parms.neigh_cleanup;
3740
3741 if (!parms.neigh_setup)
3742 return 0;
3743
3744 return parms.neigh_setup(n);
3745}
3746
3747/*
3748 * The bonding ndo_neigh_setup is called at init time beofre any
3749 * slave exists. So we must declare proxy setup function which will
3750 * be used at run time to resolve the actual slave neigh param setup.
3751 */
3752static int bond_neigh_setup(struct net_device *dev,
3753 struct neigh_parms *parms)
3754{
3755 parms->neigh_setup = bond_neigh_init;
3708 3756
3709 if (slave) {
3710 const struct net_device_ops *slave_ops
3711 = slave->dev->netdev_ops;
3712 if (slave_ops->ndo_neigh_setup)
3713 return slave_ops->ndo_neigh_setup(slave->dev, parms);
3714 }
3715 return 0; 3757 return 0;
3716} 3758}
3717 3759
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 44556b719e81..4b054812713a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1874,7 +1874,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1874 * bnx2x_periodic_task(). 1874 * bnx2x_periodic_task().
1875 */ 1875 */
1876 smp_mb(); 1876 smp_mb();
1877 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1878 } else 1877 } else
1879 bp->port.pmf = 0; 1878 bp->port.pmf = 0;
1880 1879
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index cd6dfa9eaa3a..b9b263323436 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -25,31 +25,31 @@
25 (IRO[149].base + ((funcId) * IRO[149].m1)) 25 (IRO[149].base + ((funcId) * IRO[149].m1))
26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
28 (IRO[315].base + ((pfId) * IRO[315].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[316].base + ((pfId) * IRO[316].m1)) 28 (IRO[316].base + ((pfId) * IRO[316].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[317].base + ((pfId) * IRO[317].m1))
31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ 31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
32 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) 32 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ 33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
34 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) 34 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ 35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
36 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) 36 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ 37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
38 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) 38 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ 39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
40 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) 40 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ 41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
42 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) 42 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ 43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
44 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) 44 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
46 (IRO[314].base + ((pfId) * IRO[314].m1)) 46 (IRO[315].base + ((pfId) * IRO[315].m1))
47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
48 (IRO[306].base + ((pfId) * IRO[306].m1)) 48 (IRO[307].base + ((pfId) * IRO[307].m1))
49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
50 (IRO[305].base + ((pfId) * IRO[305].m1)) 50 (IRO[306].base + ((pfId) * IRO[306].m1))
51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
52 (IRO[304].base + ((pfId) * IRO[304].m1)) 52 (IRO[305].base + ((pfId) * IRO[305].m1))
53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
54 (IRO[151].base + ((funcId) * IRO[151].m1)) 54 (IRO[151].base + ((funcId) * IRO[151].m1))
55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ 55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -96,37 +96,37 @@
96#define TSTORM_FUNC_EN_OFFSET(funcId) \ 96#define TSTORM_FUNC_EN_OFFSET(funcId) \
97 (IRO[103].base + ((funcId) * IRO[103].m1)) 97 (IRO[103].base + ((funcId) * IRO[103].m1))
98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
99 (IRO[271].base + ((pfId) * IRO[271].m1))
100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
101 (IRO[272].base + ((pfId) * IRO[272].m1)) 99 (IRO[272].base + ((pfId) * IRO[272].m1))
102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ 100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
103 (IRO[273].base + ((pfId) * IRO[273].m1)) 101 (IRO[273].base + ((pfId) * IRO[273].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ 102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
105 (IRO[274].base + ((pfId) * IRO[274].m1)) 103 (IRO[274].base + ((pfId) * IRO[274].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
105 (IRO[275].base + ((pfId) * IRO[275].m1))
106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
107 (IRO[270].base + ((pfId) * IRO[270].m1)) 107 (IRO[271].base + ((pfId) * IRO[271].m1))
108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
109 (IRO[269].base + ((pfId) * IRO[269].m1)) 109 (IRO[270].base + ((pfId) * IRO[270].m1))
110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
111 (IRO[268].base + ((pfId) * IRO[268].m1)) 111 (IRO[269].base + ((pfId) * IRO[269].m1))
112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
113 (IRO[267].base + ((pfId) * IRO[267].m1)) 113 (IRO[268].base + ((pfId) * IRO[268].m1))
114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ 114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
115 (IRO[276].base + ((pfId) * IRO[276].m1)) 115 (IRO[277].base + ((pfId) * IRO[277].m1))
116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
117 (IRO[263].base + ((pfId) * IRO[263].m1))
118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
119 (IRO[264].base + ((pfId) * IRO[264].m1)) 117 (IRO[264].base + ((pfId) * IRO[264].m1))
120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ 118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
121 (IRO[265].base + ((pfId) * IRO[265].m1)) 119 (IRO[265].base + ((pfId) * IRO[265].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[266].base + ((pfId) * IRO[266].m1)) 121 (IRO[266].base + ((pfId) * IRO[266].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[267].base + ((pfId) * IRO[267].m1))
124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ 124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
125 (IRO[202].base + ((pfId) * IRO[202].m1)) 125 (IRO[202].base + ((pfId) * IRO[202].m1))
126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
127 (IRO[105].base + ((funcId) * IRO[105].m1)) 127 (IRO[105].base + ((funcId) * IRO[105].m1))
128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ 128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
129 (IRO[216].base + ((pfId) * IRO[216].m1)) 129 (IRO[217].base + ((pfId) * IRO[217].m1))
130#define TSTORM_VF_TO_PF_OFFSET(funcId) \ 130#define TSTORM_VF_TO_PF_OFFSET(funcId) \
131 (IRO[104].base + ((funcId) * IRO[104].m1)) 131 (IRO[104].base + ((funcId) * IRO[104].m1))
132#define USTORM_AGG_DATA_OFFSET (IRO[206].base) 132#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
@@ -140,29 +140,29 @@
140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ 140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
141 (IRO[183].base + ((portId) * IRO[183].m1)) 141 (IRO[183].base + ((portId) * IRO[183].m1))
142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ 142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
143 (IRO[317].base + ((pfId) * IRO[317].m1)) 143 (IRO[318].base + ((pfId) * IRO[318].m1))
144#define USTORM_FUNC_EN_OFFSET(funcId) \ 144#define USTORM_FUNC_EN_OFFSET(funcId) \
145 (IRO[178].base + ((funcId) * IRO[178].m1)) 145 (IRO[178].base + ((funcId) * IRO[178].m1))
146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
147 (IRO[281].base + ((pfId) * IRO[281].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[282].base + ((pfId) * IRO[282].m1)) 147 (IRO[282].base + ((pfId) * IRO[282].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[283].base + ((pfId) * IRO[283].m1))
150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
151 (IRO[286].base + ((pfId) * IRO[286].m1)) 151 (IRO[287].base + ((pfId) * IRO[287].m1))
152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ 152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
153 (IRO[283].base + ((pfId) * IRO[283].m1)) 153 (IRO[284].base + ((pfId) * IRO[284].m1))
154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
155 (IRO[279].base + ((pfId) * IRO[279].m1)) 155 (IRO[280].base + ((pfId) * IRO[280].m1))
156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
157 (IRO[278].base + ((pfId) * IRO[278].m1)) 157 (IRO[279].base + ((pfId) * IRO[279].m1))
158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
159 (IRO[277].base + ((pfId) * IRO[277].m1)) 159 (IRO[278].base + ((pfId) * IRO[278].m1))
160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
161 (IRO[280].base + ((pfId) * IRO[280].m1)) 161 (IRO[281].base + ((pfId) * IRO[281].m1))
162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ 162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
163 (IRO[284].base + ((pfId) * IRO[284].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[285].base + ((pfId) * IRO[285].m1)) 163 (IRO[285].base + ((pfId) * IRO[285].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[286].base + ((pfId) * IRO[286].m1))
166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ 166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
167 (IRO[182].base + ((pfId) * IRO[182].m1)) 167 (IRO[182].base + ((pfId) * IRO[182].m1))
168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -188,39 +188,39 @@
188#define XSTORM_FUNC_EN_OFFSET(funcId) \ 188#define XSTORM_FUNC_EN_OFFSET(funcId) \
189 (IRO[47].base + ((funcId) * IRO[47].m1)) 189 (IRO[47].base + ((funcId) * IRO[47].m1))
190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
191 (IRO[294].base + ((pfId) * IRO[294].m1)) 191 (IRO[295].base + ((pfId) * IRO[295].m1))
192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ 192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
193 (IRO[297].base + ((pfId) * IRO[297].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
195 (IRO[298].base + ((pfId) * IRO[298].m1)) 193 (IRO[298].base + ((pfId) * IRO[298].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ 194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
197 (IRO[299].base + ((pfId) * IRO[299].m1)) 195 (IRO[299].base + ((pfId) * IRO[299].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ 196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
199 (IRO[300].base + ((pfId) * IRO[300].m1)) 197 (IRO[300].base + ((pfId) * IRO[300].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ 198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
201 (IRO[301].base + ((pfId) * IRO[301].m1)) 199 (IRO[301].base + ((pfId) * IRO[301].m1))
202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ 200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
203 (IRO[302].base + ((pfId) * IRO[302].m1)) 201 (IRO[302].base + ((pfId) * IRO[302].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ 202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
205 (IRO[303].base + ((pfId) * IRO[303].m1)) 203 (IRO[303].base + ((pfId) * IRO[303].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
205 (IRO[304].base + ((pfId) * IRO[304].m1))
206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
207 (IRO[293].base + ((pfId) * IRO[293].m1)) 207 (IRO[294].base + ((pfId) * IRO[294].m1))
208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
209 (IRO[292].base + ((pfId) * IRO[292].m1)) 209 (IRO[293].base + ((pfId) * IRO[293].m1))
210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
211 (IRO[291].base + ((pfId) * IRO[291].m1)) 211 (IRO[292].base + ((pfId) * IRO[292].m1))
212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
213 (IRO[296].base + ((pfId) * IRO[296].m1)) 213 (IRO[297].base + ((pfId) * IRO[297].m1))
214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ 214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
215 (IRO[295].base + ((pfId) * IRO[295].m1)) 215 (IRO[296].base + ((pfId) * IRO[296].m1))
216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ 216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
217 (IRO[290].base + ((pfId) * IRO[290].m1)) 217 (IRO[291].base + ((pfId) * IRO[291].m1))
218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
219 (IRO[289].base + ((pfId) * IRO[289].m1)) 219 (IRO[290].base + ((pfId) * IRO[290].m1))
220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ 220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
221 (IRO[288].base + ((pfId) * IRO[288].m1)) 221 (IRO[289].base + ((pfId) * IRO[289].m1))
222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ 222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
223 (IRO[287].base + ((pfId) * IRO[287].m1)) 223 (IRO[288].base + ((pfId) * IRO[288].m1))
224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ 224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
225 (IRO[44].base + ((pfId) * IRO[44].m1)) 225 (IRO[44].base + ((pfId) * IRO[44].m1))
226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index efa557b76ac7..ad95324dc042 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1371,7 +1371,14 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
1371 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | 1371 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
1372 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | 1372 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
1373 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | 1373 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
1374 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; 1374 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
1375 XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
1376 /* Write pause and PFC registers */
1377 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
1378 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
1379 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
1380 pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
1381
1375 } 1382 }
1376 1383
1377 /* Write pause and PFC registers */ 1384 /* Write pause and PFC registers */
@@ -3648,6 +3655,33 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3648 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { 3655 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
3649 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); 3656 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
3650 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); 3657 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
3658 } else if (CHIP_IS_E3(bp) &&
3659 SINGLE_MEDIA_DIRECT(params)) {
3660 u8 lane = bnx2x_get_warpcore_lane(phy, params);
3661 u16 gp_status, gp_mask;
3662 bnx2x_cl45_read(bp, phy,
3663 MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
3664 &gp_status);
3665 gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
3666 MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
3667 lane;
3668 if ((gp_status & gp_mask) == gp_mask) {
3669 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3670 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3671 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3672 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3673 } else {
3674 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3675 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
3676 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3677 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
3678 ld_pause = ((ld_pause &
3679 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
3680 << 3);
3681 lp_pause = ((lp_pause &
3682 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
3683 << 3);
3684 }
3651 } else { 3685 } else {
3652 bnx2x_cl45_read(bp, phy, 3686 bnx2x_cl45_read(bp, phy,
3653 MDIO_AN_DEVAD, 3687 MDIO_AN_DEVAD,
@@ -3698,7 +3732,23 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3698 u16 val16 = 0, lane, bam37 = 0; 3732 u16 val16 = 0, lane, bam37 = 0;
3699 struct bnx2x *bp = params->bp; 3733 struct bnx2x *bp = params->bp;
3700 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3734 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3701 3735 /* Set to default registers that may be overriden by 10G force */
3736 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3737 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
3738 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3739 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
3740 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3741 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
3742 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3743 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
3744 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3745 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
3746 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3747 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
3748 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3749 MDIO_WC_REG_RX66_CONTROL, 0x7415);
3750 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3751 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
3702 /* Disable Autoneg: re-enable it after adv is done. */ 3752 /* Disable Autoneg: re-enable it after adv is done. */
3703 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3753 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3704 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0); 3754 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
@@ -3944,13 +3994,13 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3944 3994
3945 } else { 3995 } else {
3946 misc1_val |= 0x9; 3996 misc1_val |= 0x9;
3947 tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3997 tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3948 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3998 (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
3949 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); 3999 (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3950 tx_driver_val = 4000 tx_driver_val =
3951 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 4001 ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3952 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | 4002 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3953 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); 4003 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3954 } 4004 }
3955 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4005 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3956 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); 4006 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4368,7 +4418,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4368 switch (serdes_net_if) { 4418 switch (serdes_net_if) {
4369 case PORT_HW_CFG_NET_SERDES_IF_KR: 4419 case PORT_HW_CFG_NET_SERDES_IF_KR:
4370 /* Enable KR Auto Neg */ 4420 /* Enable KR Auto Neg */
4371 if (params->loopback_mode == LOOPBACK_NONE) 4421 if (params->loopback_mode != LOOPBACK_EXT)
4372 bnx2x_warpcore_enable_AN_KR(phy, params, vars); 4422 bnx2x_warpcore_enable_AN_KR(phy, params, vars);
4373 else { 4423 else {
4374 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); 4424 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
@@ -6166,12 +6216,14 @@ int bnx2x_set_led(struct link_params *params,
6166 6216
6167 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6217 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6168 if (params->phy[EXT_PHY1].type == 6218 if (params->phy[EXT_PHY1].type ==
6169 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) 6219 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
6170 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1); 6220 tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
6171 else { 6221 EMAC_LED_100MB_OVERRIDE |
6172 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6222 EMAC_LED_10MB_OVERRIDE);
6173 (tmp | EMAC_LED_OVERRIDE)); 6223 else
6174 } 6224 tmp |= EMAC_LED_OVERRIDE;
6225
6226 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
6175 break; 6227 break;
6176 6228
6177 case LED_MODE_OPER: 6229 case LED_MODE_OPER:
@@ -6226,10 +6278,15 @@ int bnx2x_set_led(struct link_params *params,
6226 hw_led_mode); 6278 hw_led_mode);
6227 } else if ((params->phy[EXT_PHY1].type == 6279 } else if ((params->phy[EXT_PHY1].type ==
6228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
6229 (mode != LED_MODE_OPER)) { 6281 (mode == LED_MODE_ON)) {
6230 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6282 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
6231 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6283 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6232 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3); 6284 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
6285 EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
6286 /* Break here; otherwise, it'll disable the
6287 * intended override.
6288 */
6289 break;
6233 } else 6290 } else
6234 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6291 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
6235 hw_led_mode); 6292 hw_led_mode);
@@ -6244,13 +6301,9 @@ int bnx2x_set_led(struct link_params *params,
6244 LED_BLINK_RATE_VAL_E1X_E2); 6301 LED_BLINK_RATE_VAL_E1X_E2);
6245 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 6302 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
6246 port*4, 1); 6303 port*4, 1);
6247 if ((params->phy[EXT_PHY1].type != 6304 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6248 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6305 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6249 (mode != LED_MODE_OPER)) { 6306 (tmp & (~EMAC_LED_OVERRIDE)));
6250 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6251 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6252 (tmp & (~EMAC_LED_OVERRIDE)));
6253 }
6254 6307
6255 if (CHIP_IS_E1(bp) && 6308 if (CHIP_IS_E1(bp) &&
6256 ((speed == SPEED_2500) || 6309 ((speed == SPEED_2500) ||
@@ -6843,6 +6896,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6843 SINGLE_MEDIA_DIRECT(params)) && 6896 SINGLE_MEDIA_DIRECT(params)) &&
6844 (phy_vars[active_external_phy].fault_detected == 0)); 6897 (phy_vars[active_external_phy].fault_detected == 0));
6845 6898
6899 /* Update the PFC configuration in case it was changed */
6900 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
6901 vars->link_status |= LINK_STATUS_PFC_ENABLED;
6902 else
6903 vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
6904
6846 if (vars->link_up) 6905 if (vars->link_up)
6847 rc = bnx2x_update_link_up(params, vars, link_10g_plus); 6906 rc = bnx2x_update_link_up(params, vars, link_10g_plus);
6848 else 6907 else
@@ -8030,7 +8089,9 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8030 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," 8089 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
8031 " Port %d from %s part number %s\n", 8090 " Port %d from %s part number %s\n",
8032 params->port, vendor_name, vendor_pn); 8091 params->port, vendor_name, vendor_pn);
8033 phy->flags |= FLAGS_SFP_NOT_APPROVED; 8092 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
8093 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
8094 phy->flags |= FLAGS_SFP_NOT_APPROVED;
8034 return -EINVAL; 8095 return -EINVAL;
8035} 8096}
8036 8097
@@ -9090,6 +9151,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9090 tmp2 &= 0xFFEF; 9151 tmp2 &= 0xFFEF;
9091 bnx2x_cl45_write(bp, phy, 9152 bnx2x_cl45_write(bp, phy,
9092 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); 9153 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
9154 bnx2x_cl45_read(bp, phy,
9155 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
9156 &tmp2);
9157 bnx2x_cl45_write(bp, phy,
9158 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
9159 (tmp2 & 0x7fff));
9093 } 9160 }
9094 9161
9095 return 0; 9162 return 0;
@@ -9270,12 +9337,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9270 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9337 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
9271 ((1<<5) | (1<<2))); 9338 ((1<<5) | (1<<2)));
9272 } 9339 }
9273 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); 9340
9274 bnx2x_8727_specific_func(phy, params, ENABLE_TX); 9341 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
9275 /* If transmitter is disabled, ignore false link up indication */ 9342 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
9276 bnx2x_cl45_read(bp, phy, 9343 bnx2x_sfp_set_transmitter(params, phy, 1);
9277 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1); 9344 } else {
9278 if (val1 & (1<<15)) {
9279 DP(NETIF_MSG_LINK, "Tx is disabled\n"); 9345 DP(NETIF_MSG_LINK, "Tx is disabled\n");
9280 return 0; 9346 return 0;
9281 } 9347 }
@@ -9369,8 +9435,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9369 9435
9370 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9436 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9371 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9437 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9372 bnx2x_save_spirom_version(bp, port, 9438 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
9373 ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
9374 phy->ver_addr); 9439 phy->ver_addr);
9375 } else { 9440 } else {
9376 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9441 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
@@ -9793,6 +9858,15 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9793 other_shmem_base_addr)); 9858 other_shmem_base_addr));
9794 9859
9795 u32 shmem_base_path[2]; 9860 u32 shmem_base_path[2];
9861
9862 /* Work around for 84833 LED failure inside RESET status */
9863 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
9864 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
9865 MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
9866 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
9867 MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
9868 MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
9869
9796 shmem_base_path[0] = params->shmem_base; 9870 shmem_base_path[0] = params->shmem_base;
9797 shmem_base_path[1] = other_shmem_base_addr; 9871 shmem_base_path[1] = other_shmem_base_addr;
9798 9872
@@ -10103,7 +10177,7 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
10103 u8 port; 10177 u8 port;
10104 u16 val16; 10178 u16 val16;
10105 10179
10106 if (!(CHIP_IS_E1(bp))) 10180 if (!(CHIP_IS_E1x(bp)))
10107 port = BP_PATH(bp); 10181 port = BP_PATH(bp);
10108 else 10182 else
10109 port = params->port; 10183 port = params->port;
@@ -10130,7 +10204,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10130 u16 val; 10204 u16 val;
10131 u8 port; 10205 u8 port;
10132 10206
10133 if (!(CHIP_IS_E1(bp))) 10207 if (!(CHIP_IS_E1x(bp)))
10134 port = BP_PATH(bp); 10208 port = BP_PATH(bp);
10135 else 10209 else
10136 port = params->port; 10210 port = params->port;
@@ -12049,6 +12123,9 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12049 12123
12050 bnx2x_emac_init(params, vars); 12124 bnx2x_emac_init(params, vars);
12051 12125
12126 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
12127 vars->link_status |= LINK_STATUS_PFC_ENABLED;
12128
12052 if (params->num_phys == 0) { 12129 if (params->num_phys == 0) {
12053 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); 12130 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
12054 return -EINVAL; 12131 return -EINVAL;
@@ -12128,10 +12205,10 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12128 * Hold it as vars low 12205 * Hold it as vars low
12129 */ 12206 */
12130 /* clear link led */ 12207 /* clear link led */
12208 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12131 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12209 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
12132 12210
12133 if (reset_ext_phy) { 12211 if (reset_ext_phy) {
12134 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12135 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 12212 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
12136 phy_index++) { 12213 phy_index++) {
12137 if (params->phy[phy_index].link_reset) { 12214 if (params->phy[phy_index].link_reset) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index ab0a250f95fa..c25803b9c0ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5354,6 +5354,7 @@
5354#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5354#define XMAC_CTRL_REG_TX_EN (0x1<<0)
5355#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) 5355#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
5356#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) 5356#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
5357#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1)
5357#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) 5358#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
5358#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) 5359#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
5359#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) 5360#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
@@ -6820,10 +6821,13 @@ Theotherbitsarereservedandshouldbezero*/
6820 6821
6821#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 6822#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
6822#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 6823#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
6824#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
6823#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 6825#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
6824#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 6826#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
6825#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 6827#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
6826#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 6828#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
6829#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
6830#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
6827#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 6831#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
6828#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 6832#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
6829#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 6833#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
@@ -6943,6 +6947,10 @@ Theotherbitsarereservedandshouldbezero*/
6943#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 6947#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
6944#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 6948#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
6945#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 6949#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
6950#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
6951#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
6952#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
6953#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
6946#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE 6954#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
6947#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 6955#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
6948#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 6956#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 86cdd4793992..b83897f76ee3 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,6 +161,12 @@ struct e1000_info;
161/* Time to wait before putting the device into D3 if there's no link (in ms). */ 161/* Time to wait before putting the device into D3 if there's no link (in ms). */
162#define LINK_TIMEOUT 100 162#define LINK_TIMEOUT 100
163 163
164/*
165 * Count for polling __E1000_RESET condition every 10-20msec.
166 * Experimentation has shown the reset can take approximately 210msec.
167 */
168#define E1000_CHECK_RESET_COUNT 25
169
164#define DEFAULT_RDTR 0 170#define DEFAULT_RDTR 0
165#define DEFAULT_RADV 8 171#define DEFAULT_RADV 8
166#define BURST_RDTR 0x20 172#define BURST_RDTR 0x20
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2c38a65ade87..19ab2154802c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1059,6 +1059,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1060 /* execute the writes immediately */ 1060 /* execute the writes immediately */
1061 e1e_flush(); 1061 e1e_flush();
1062 /*
1063 * Due to rare timing issues, write to TIDV again to ensure
1064 * the write is successful
1065 */
1066 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1067 /* execute the writes immediately */
1068 e1e_flush();
1062 adapter->tx_hang_recheck = true; 1069 adapter->tx_hang_recheck = true;
1063 return; 1070 return;
1064 } 1071 }
@@ -3616,6 +3623,16 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3616 3623
3617 /* execute the writes immediately */ 3624 /* execute the writes immediately */
3618 e1e_flush(); 3625 e1e_flush();
3626
3627 /*
3628 * due to rare timing issues, write to TIDV/RDTR again to ensure the
3629 * write is successful
3630 */
3631 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3632 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3633
3634 /* execute the writes immediately */
3635 e1e_flush();
3619} 3636}
3620 3637
3621static void e1000e_update_stats(struct e1000_adapter *adapter); 3638static void e1000e_update_stats(struct e1000_adapter *adapter);
@@ -3968,6 +3985,10 @@ static int e1000_close(struct net_device *netdev)
3968{ 3985{
3969 struct e1000_adapter *adapter = netdev_priv(netdev); 3986 struct e1000_adapter *adapter = netdev_priv(netdev);
3970 struct pci_dev *pdev = adapter->pdev; 3987 struct pci_dev *pdev = adapter->pdev;
3988 int count = E1000_CHECK_RESET_COUNT;
3989
3990 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
3991 usleep_range(10000, 20000);
3971 3992
3972 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3993 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3973 3994
@@ -5472,6 +5493,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5472 netif_device_detach(netdev); 5493 netif_device_detach(netdev);
5473 5494
5474 if (netif_running(netdev)) { 5495 if (netif_running(netdev)) {
5496 int count = E1000_CHECK_RESET_COUNT;
5497
5498 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5499 usleep_range(10000, 20000);
5500
5475 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5501 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5476 e1000e_down(adapter); 5502 e1000e_down(adapter);
5477 e1000_free_irq(adapter); 5503 e1000_free_irq(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index dde65f951400..652e4b09546d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,62 +44,94 @@
44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
46 46
47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
48 struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) 48 struct ixgbe_dcb_config *dcfg, int tc_max)
49{ 49{
50 struct tc_configuration *src_tc_cfg = NULL; 50 struct tc_configuration *src = NULL;
51 struct tc_configuration *dst_tc_cfg = NULL; 51 struct tc_configuration *dst = NULL;
52 int i; 52 int i, j;
53 int tx = DCB_TX_CONFIG;
54 int rx = DCB_RX_CONFIG;
55 int changes = 0;
53 56
54 if (!src_dcb_cfg || !dst_dcb_cfg) 57 if (!scfg || !dcfg)
55 return -EINVAL; 58 return changes;
56 59
57 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 60 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
58 src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; 61 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
59 dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; 62 dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
60 63
61 dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = 64 if (dst->path[tx].prio_type != src->path[tx].prio_type) {
62 src_tc_cfg->path[DCB_TX_CONFIG].prio_type; 65 dst->path[tx].prio_type = src->path[tx].prio_type;
66 changes |= BIT_PG_TX;
67 }
63 68
64 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = 69 if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
65 src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; 70 dst->path[tx].bwg_id = src->path[tx].bwg_id;
71 changes |= BIT_PG_TX;
72 }
66 73
67 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = 74 if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
68 src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; 75 dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
76 changes |= BIT_PG_TX;
77 }
69 78
70 dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = 79 if (dst->path[tx].up_to_tc_bitmap !=
71 src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; 80 src->path[tx].up_to_tc_bitmap) {
81 dst->path[tx].up_to_tc_bitmap =
82 src->path[tx].up_to_tc_bitmap;
83 changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
84 }
72 85
73 dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = 86 if (dst->path[rx].prio_type != src->path[rx].prio_type) {
74 src_tc_cfg->path[DCB_RX_CONFIG].prio_type; 87 dst->path[rx].prio_type = src->path[rx].prio_type;
88 changes |= BIT_PG_RX;
89 }
75 90
76 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = 91 if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
77 src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; 92 dst->path[rx].bwg_id = src->path[rx].bwg_id;
93 changes |= BIT_PG_RX;
94 }
78 95
79 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = 96 if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
80 src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; 97 dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
98 changes |= BIT_PG_RX;
99 }
81 100
82 dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = 101 if (dst->path[rx].up_to_tc_bitmap !=
83 src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; 102 src->path[rx].up_to_tc_bitmap) {
103 dst->path[rx].up_to_tc_bitmap =
104 src->path[rx].up_to_tc_bitmap;
105 changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
106 }
84 } 107 }
85 108
86 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { 109 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
87 dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] 110 j = i - DCB_PG_ATTR_BW_ID_0;
88 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage 111 if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
89 [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; 112 dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
90 dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] 113 changes |= BIT_PG_TX;
91 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage 114 }
92 [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; 115 if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
116 dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
117 changes |= BIT_PG_RX;
118 }
93 } 119 }
94 120
95 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { 121 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
96 dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = 122 j = i - DCB_PFC_UP_ATTR_0;
97 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; 123 if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) {
124 dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc;
125 changes |= BIT_PFC;
126 }
98 } 127 }
99 128
100 dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; 129 if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
130 dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
131 changes |= BIT_PFC;
132 }
101 133
102 return 0; 134 return changes;
103} 135}
104 136
105static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) 137static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
@@ -179,20 +211,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
179 if (up_map != DCB_ATTR_VALUE_UNDEFINED) 211 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
180 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = 212 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
181 up_map; 213 up_map;
182
183 if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
184 adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
185 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
186 adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
187 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
188 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
189 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
190 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
191 adapter->dcb_set_bitmap |= BIT_PG_TX;
192
193 if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
194 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
195 adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
196} 214}
197 215
198static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 216static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -201,10 +219,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
201 struct ixgbe_adapter *adapter = netdev_priv(netdev); 219 struct ixgbe_adapter *adapter = netdev_priv(netdev);
202 220
203 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 221 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
204
205 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
206 adapter->dcb_cfg.bw_percentage[0][bwg_id])
207 adapter->dcb_set_bitmap |= BIT_PG_TX;
208} 222}
209 223
210static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 224static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -223,20 +237,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
223 if (up_map != DCB_ATTR_VALUE_UNDEFINED) 237 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
224 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = 238 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
225 up_map; 239 up_map;
226
227 if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
228 adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
229 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
230 adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
231 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
232 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
233 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
234 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
235 adapter->dcb_set_bitmap |= BIT_PG_RX;
236
237 if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
238 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
239 adapter->dcb_set_bitmap |= BIT_PFC;
240} 240}
241 241
242static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 242static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -245,10 +245,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
245 struct ixgbe_adapter *adapter = netdev_priv(netdev); 245 struct ixgbe_adapter *adapter = netdev_priv(netdev);
246 246
247 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 247 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
248
249 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
250 adapter->dcb_cfg.bw_percentage[1][bwg_id])
251 adapter->dcb_set_bitmap |= BIT_PG_RX;
252} 248}
253 249
254static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 250static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -298,10 +294,8 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
298 294
299 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; 295 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
300 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != 296 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
301 adapter->dcb_cfg.tc_config[priority].dcb_pfc) { 297 adapter->dcb_cfg.tc_config[priority].dcb_pfc)
302 adapter->dcb_set_bitmap |= BIT_PFC;
303 adapter->temp_dcb_cfg.pfc_mode_enable = true; 298 adapter->temp_dcb_cfg.pfc_mode_enable = true;
304 }
305} 299}
306 300
307static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 301static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -336,7 +330,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
336static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 330static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
337{ 331{
338 struct ixgbe_adapter *adapter = netdev_priv(netdev); 332 struct ixgbe_adapter *adapter = netdev_priv(netdev);
339 int ret, i; 333 int ret = DCB_NO_HW_CHG;
334 int i;
340#ifdef IXGBE_FCOE 335#ifdef IXGBE_FCOE
341 struct dcb_app app = { 336 struct dcb_app app = {
342 .selector = DCB_APP_IDTYPE_ETHTYPE, 337 .selector = DCB_APP_IDTYPE_ETHTYPE,
@@ -355,12 +350,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
355 350
356 /* Fail command if not in CEE mode */ 351 /* Fail command if not in CEE mode */
357 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 352 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
358 return 1; 353 return ret;
359 354
360 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 355 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
361 MAX_TRAFFIC_CLASS); 356 &adapter->dcb_cfg,
362 if (ret) 357 MAX_TRAFFIC_CLASS);
363 return DCB_NO_HW_CHG; 358 if (!adapter->dcb_set_bitmap)
359 return ret;
364 360
365 if (adapter->dcb_cfg.pfc_mode_enable) { 361 if (adapter->dcb_cfg.pfc_mode_enable) {
366 switch (adapter->hw.mac.type) { 362 switch (adapter->hw.mac.type) {
@@ -420,6 +416,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
420 416
421 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 417 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
422 netdev_set_prio_tc_map(netdev, i, prio_tc[i]); 418 netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
419
420 ret = DCB_HW_CHG_RST;
423 } 421 }
424 422
425 if (adapter->dcb_set_bitmap & BIT_PFC) { 423 if (adapter->dcb_set_bitmap & BIT_PFC) {
@@ -430,7 +428,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
430 DCB_TX_CONFIG, prio_tc); 428 DCB_TX_CONFIG, prio_tc);
431 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); 429 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
432 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc); 430 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
433 ret = DCB_HW_CHG; 431 if (ret != DCB_HW_CHG_RST)
432 ret = DCB_HW_CHG;
434 } 433 }
435 434
436 if (adapter->dcb_cfg.pfc_mode_enable) 435 if (adapter->dcb_cfg.pfc_mode_enable)
@@ -531,9 +530,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
531 struct ixgbe_adapter *adapter = netdev_priv(netdev); 530 struct ixgbe_adapter *adapter = netdev_priv(netdev);
532 531
533 adapter->temp_dcb_cfg.pfc_mode_enable = state; 532 adapter->temp_dcb_cfg.pfc_mode_enable = state;
534 if (adapter->temp_dcb_cfg.pfc_mode_enable !=
535 adapter->dcb_cfg.pfc_mode_enable)
536 adapter->dcb_set_bitmap |= BIT_PFC;
537} 533}
538 534
539/** 535/**
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b806d9b4defb..c9b504e2dfc3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2469,6 +2469,17 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2469 return err; 2469 return err;
2470} 2470}
2471 2471
2472static inline bool needs_copy(const struct rx_ring_info *re,
2473 unsigned length)
2474{
2475#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2476 /* Some architectures need the IP header to be aligned */
2477 if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
2478 return true;
2479#endif
2480 return length < copybreak;
2481}
2482
2472/* For small just reuse existing skb for next receive */ 2483/* For small just reuse existing skb for next receive */
2473static struct sk_buff *receive_copy(struct sky2_port *sky2, 2484static struct sk_buff *receive_copy(struct sky2_port *sky2,
2474 const struct rx_ring_info *re, 2485 const struct rx_ring_info *re,
@@ -2599,7 +2610,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2599 goto error; 2610 goto error;
2600 2611
2601okay: 2612okay:
2602 if (length < copybreak) 2613 if (needs_copy(re, length))
2603 skb = receive_copy(sky2, re, length); 2614 skb = receive_copy(sky2, re, length);
2604 else 2615 else
2605 skb = receive_new(sky2, re, length); 2616 skb = receive_new(sky2, re, length);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9e2b911a1230..d69fee41f24a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -83,8 +83,9 @@
83 83
84#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) 84#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
85 85
86#define MLX4_EN_ALLOC_ORDER 2 86/* Use the maximum between 16384 and a single page */
87#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER) 87#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
88#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
88 89
89#define MLX4_EN_MAX_LRO_DESCRIPTORS 32 90#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
90 91
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7b23554f80b6..f54509377efa 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5810,7 +5810,10 @@ static void __rtl8169_resume(struct net_device *dev)
5810 5810
5811 rtl_pll_power_up(tp); 5811 rtl_pll_power_up(tp);
5812 5812
5813 rtl_lock_work(tp);
5814 napi_enable(&tp->napi);
5813 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 5815 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5816 rtl_unlock_work(tp);
5814 5817
5815 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); 5818 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5816} 5819}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e85ffbd54830..48d56da62f08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1737,10 +1737,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1737 struct mac_device_info *mac; 1737 struct mac_device_info *mac;
1738 1738
1739 /* Identify the MAC HW device */ 1739 /* Identify the MAC HW device */
1740 if (priv->plat->has_gmac) 1740 if (priv->plat->has_gmac) {
1741 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1741 mac = dwmac1000_setup(priv->ioaddr); 1742 mac = dwmac1000_setup(priv->ioaddr);
1742 else 1743 } else {
1743 mac = dwmac100_setup(priv->ioaddr); 1744 mac = dwmac100_setup(priv->ioaddr);
1745 }
1744 if (!mac) 1746 if (!mac)
1745 return -ENOMEM; 1747 return -ENOMEM;
1746 1748
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 0856e1b7a849..f08c85acf761 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -162,7 +162,8 @@ static int ip101a_g_config_init(struct phy_device *phydev)
162 /* Enable Auto Power Saving mode */ 162 /* Enable Auto Power Saving mode */
163 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); 163 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
164 c |= IP101A_G_APS_ON; 164 c |= IP101A_G_APS_ON;
165 return c; 165
166 return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
166} 167}
167 168
168static int ip175c_read_status(struct phy_device *phydev) 169static int ip175c_read_status(struct phy_device *phydev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 159da2905fe9..33f8c51968b6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@ struct ppp_net {
235/* Prototypes. */ 235/* Prototypes. */
236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
237 struct file *file, unsigned int cmd, unsigned long arg); 237 struct file *file, unsigned int cmd, unsigned long arg);
238static void ppp_xmit_process(struct ppp *ppp); 238static int ppp_xmit_process(struct ppp *ppp);
239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
240static void ppp_push(struct ppp *ppp); 240static void ppp_push(struct ppp *ppp);
241static void ppp_channel_push(struct channel *pch); 241static void ppp_channel_push(struct channel *pch);
@@ -968,9 +968,9 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
968 proto = npindex_to_proto[npi]; 968 proto = npindex_to_proto[npi];
969 put_unaligned_be16(proto, pp); 969 put_unaligned_be16(proto, pp);
970 970
971 netif_stop_queue(dev);
972 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
973 ppp_xmit_process(ppp); 972 if (!ppp_xmit_process(ppp))
973 netif_stop_queue(dev);
974 return NETDEV_TX_OK; 974 return NETDEV_TX_OK;
975 975
976 outf: 976 outf:
@@ -1048,10 +1048,11 @@ static void ppp_setup(struct net_device *dev)
1048 * Called to do any work queued up on the transmit side 1048 * Called to do any work queued up on the transmit side
1049 * that can now be done. 1049 * that can now be done.
1050 */ 1050 */
1051static void 1051static int
1052ppp_xmit_process(struct ppp *ppp) 1052ppp_xmit_process(struct ppp *ppp)
1053{ 1053{
1054 struct sk_buff *skb; 1054 struct sk_buff *skb;
1055 int ret = 0;
1055 1056
1056 ppp_xmit_lock(ppp); 1057 ppp_xmit_lock(ppp);
1057 if (!ppp->closing) { 1058 if (!ppp->closing) {
@@ -1061,10 +1062,13 @@ ppp_xmit_process(struct ppp *ppp)
1061 ppp_send_frame(ppp, skb); 1062 ppp_send_frame(ppp, skb);
1062 /* If there's no work left to do, tell the core net 1063 /* If there's no work left to do, tell the core net
1063 code that we can accept some more. */ 1064 code that we can accept some more. */
1064 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1065 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
1065 netif_wake_queue(ppp->dev); 1066 netif_wake_queue(ppp->dev);
1067 ret = 1;
1068 }
1066 } 1069 }
1067 ppp_xmit_unlock(ppp); 1070 ppp_xmit_unlock(ppp);
1071 return ret;
1068} 1072}
1069 1073
1070static inline struct sk_buff * 1074static inline struct sk_buff *
diff --git a/fs/splice.c b/fs/splice.c
index 5f883de7ef3a..f8476841eb04 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -30,6 +30,7 @@
30#include <linux/uio.h> 30#include <linux/uio.h>
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/socket.h>
33 34
34/* 35/*
35 * Attempt to steal a page from a pipe buffer. This should perhaps go into 36 * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -690,7 +691,9 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
690 if (!likely(file->f_op && file->f_op->sendpage)) 691 if (!likely(file->f_op && file->f_op->sendpage))
691 return -EINVAL; 692 return -EINVAL;
692 693
693 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; 694 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
695 if (sd->len < sd->total_len)
696 more |= MSG_SENDPAGE_NOTLAST;
694 return file->f_op->sendpage(file, buf->page, buf->offset, 697 return file->f_op->sendpage(file, buf->page, buf->offset,
695 sd->len, &pos, more); 698 sd->len, &pos, more);
696} 699}
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e1d9e0ede309..f5647b59a90e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -896,8 +896,7 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
896 * 896 *
897 * All operations are optional (i.e. the function pointer may be set 897 * All operations are optional (i.e. the function pointer may be set
898 * to %NULL) and callers must take this into account. Callers must 898 * to %NULL) and callers must take this into account. Callers must
899 * hold the RTNL, except that for @get_drvinfo the caller may or may 899 * hold the RTNL lock.
900 * not hold the RTNL.
901 * 900 *
902 * See the structures used by these operations for further documentation. 901 * See the structures used by these operations for further documentation.
903 * 902 *
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1f77540bdc95..5cbaa20f1659 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2604,8 +2604,6 @@ extern void net_disable_timestamp(void);
2604extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); 2604extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2605extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); 2605extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2606extern void dev_seq_stop(struct seq_file *seq, void *v); 2606extern void dev_seq_stop(struct seq_file *seq, void *v);
2607extern int dev_seq_open_ops(struct inode *inode, struct file *file,
2608 const struct seq_operations *ops);
2609#endif 2607#endif
2610 2608
2611extern int netdev_class_create_file(struct class_attribute *class_attr); 2609extern int netdev_class_create_file(struct class_attribute *class_attr);
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
index c0405ac92870..e3a9978f259f 100644
--- a/include/linux/netfilter/xt_set.h
+++ b/include/linux/netfilter/xt_set.h
@@ -58,8 +58,8 @@ struct xt_set_info_target_v1 {
58struct xt_set_info_target_v2 { 58struct xt_set_info_target_v2 {
59 struct xt_set_info add_set; 59 struct xt_set_info add_set;
60 struct xt_set_info del_set; 60 struct xt_set_info del_set;
61 u32 flags; 61 __u32 flags;
62 u32 timeout; 62 __u32 timeout;
63}; 63};
64 64
65#endif /*_XT_SET_H*/ 65#endif /*_XT_SET_H*/
diff --git a/include/linux/socket.h b/include/linux/socket.h
index da2d3e2543f3..b84bbd48b874 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -265,7 +265,7 @@ struct ucred {
265#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ 265#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
266#define MSG_MORE 0x8000 /* Sender will send more */ 266#define MSG_MORE 0x8000 /* Sender will send more */
267#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ 267#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
268 268#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
269#define MSG_EOF MSG_FIN 269#define MSG_EOF MSG_FIN
270 270
271#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file 271#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h
index 7e1544e8f70d..9d9756cca013 100644
--- a/include/net/netfilter/xt_log.h
+++ b/include/net/netfilter/xt_log.h
@@ -47,7 +47,7 @@ static void sb_close(struct sbuff *m)
47 if (likely(m != &emergency)) 47 if (likely(m != &emergency))
48 kfree(m); 48 kfree(m);
49 else { 49 else {
50 xchg(&emergency_ptr, m); 50 emergency_ptr = m;
51 local_bh_enable(); 51 local_bh_enable();
52 } 52 }
53} 53}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6c7dc9d78e10..c25d453b2803 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4028,54 +4028,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
4028 4028
4029#ifdef CONFIG_PROC_FS 4029#ifdef CONFIG_PROC_FS
4030 4030
4031#define BUCKET_SPACE (32 - NETDEV_HASHBITS) 4031#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4032
4033struct dev_iter_state {
4034 struct seq_net_private p;
4035 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4036};
4037 4032
4038#define get_bucket(x) ((x) >> BUCKET_SPACE) 4033#define get_bucket(x) ((x) >> BUCKET_SPACE)
4039#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4034#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4040#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4035#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4041 4036
4042static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) 4037static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4043{ 4038{
4044 struct dev_iter_state *state = seq->private;
4045 struct net *net = seq_file_net(seq); 4039 struct net *net = seq_file_net(seq);
4046 struct net_device *dev; 4040 struct net_device *dev;
4047 struct hlist_node *p; 4041 struct hlist_node *p;
4048 struct hlist_head *h; 4042 struct hlist_head *h;
4049 unsigned int count, bucket, offset; 4043 unsigned int count = 0, offset = get_offset(*pos);
4050 4044
4051 bucket = get_bucket(state->pos); 4045 h = &net->dev_name_head[get_bucket(*pos)];
4052 offset = get_offset(state->pos);
4053 h = &net->dev_name_head[bucket];
4054 count = 0;
4055 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4046 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4056 if (count++ == offset) { 4047 if (++count == offset)
4057 state->pos = set_bucket_offset(bucket, count);
4058 return dev; 4048 return dev;
4059 }
4060 } 4049 }
4061 4050
4062 return NULL; 4051 return NULL;
4063} 4052}
4064 4053
4065static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) 4054static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4066{ 4055{
4067 struct dev_iter_state *state = seq->private;
4068 struct net_device *dev; 4056 struct net_device *dev;
4069 unsigned int bucket; 4057 unsigned int bucket;
4070 4058
4071 bucket = get_bucket(state->pos);
4072 do { 4059 do {
4073 dev = dev_from_same_bucket(seq); 4060 dev = dev_from_same_bucket(seq, pos);
4074 if (dev) 4061 if (dev)
4075 return dev; 4062 return dev;
4076 4063
4077 bucket++; 4064 bucket = get_bucket(*pos) + 1;
4078 state->pos = set_bucket_offset(bucket, 0); 4065 *pos = set_bucket_offset(bucket, 1);
4079 } while (bucket < NETDEV_HASHENTRIES); 4066 } while (bucket < NETDEV_HASHENTRIES);
4080 4067
4081 return NULL; 4068 return NULL;
@@ -4088,33 +4075,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4088void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4075void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4089 __acquires(RCU) 4076 __acquires(RCU)
4090{ 4077{
4091 struct dev_iter_state *state = seq->private;
4092
4093 rcu_read_lock(); 4078 rcu_read_lock();
4094 if (!*pos) 4079 if (!*pos)
4095 return SEQ_START_TOKEN; 4080 return SEQ_START_TOKEN;
4096 4081
4097 /* check for end of the hash */ 4082 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4098 if (state->pos == 0 && *pos > 1)
4099 return NULL; 4083 return NULL;
4100 4084
4101 return dev_from_new_bucket(seq); 4085 return dev_from_bucket(seq, pos);
4102} 4086}
4103 4087
4104void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4088void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4105{ 4089{
4106 struct net_device *dev;
4107
4108 ++*pos; 4090 ++*pos;
4109 4091 return dev_from_bucket(seq, pos);
4110 if (v == SEQ_START_TOKEN)
4111 return dev_from_new_bucket(seq);
4112
4113 dev = dev_from_same_bucket(seq);
4114 if (dev)
4115 return dev;
4116
4117 return dev_from_new_bucket(seq);
4118} 4092}
4119 4093
4120void dev_seq_stop(struct seq_file *seq, void *v) 4094void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4213,13 +4187,7 @@ static const struct seq_operations dev_seq_ops = {
4213static int dev_seq_open(struct inode *inode, struct file *file) 4187static int dev_seq_open(struct inode *inode, struct file *file)
4214{ 4188{
4215 return seq_open_net(inode, file, &dev_seq_ops, 4189 return seq_open_net(inode, file, &dev_seq_ops,
4216 sizeof(struct dev_iter_state)); 4190 sizeof(struct seq_net_private));
4217}
4218
4219int dev_seq_open_ops(struct inode *inode, struct file *file,
4220 const struct seq_operations *ops)
4221{
4222 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4223} 4191}
4224 4192
4225static const struct file_operations dev_seq_fops = { 4193static const struct file_operations dev_seq_fops = {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 29c07fef9228..626698f0db8b 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = {
696 696
697static int dev_mc_seq_open(struct inode *inode, struct file *file) 697static int dev_mc_seq_open(struct inode *inode, struct file *file)
698{ 698{
699 return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); 699 return seq_open_net(inode, file, &dev_mc_seq_ops,
700 sizeof(struct seq_net_private));
700} 701}
701 702
702static const struct file_operations dev_mc_seq_fops = { 703static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/filter.c b/net/core/filter.c
index cf4989ac503b..6f755cca4520 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,8 +39,11 @@
39#include <linux/reciprocal_div.h> 39#include <linux/reciprocal_div.h>
40#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41 41
42/* No hurry in this branch */ 42/* No hurry in this branch
43static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 43 *
44 * Exported for the bpf jit load helper.
45 */
46void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
44{ 47{
45 u8 *ptr = NULL; 48 u8 *ptr = NULL;
46 49
@@ -59,7 +62,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
59{ 62{
60 if (k >= 0) 63 if (k >= 0)
61 return skb_header_pointer(skb, k, size, buffer); 64 return skb_header_pointer(skb, k, size, buffer);
62 return __load_pointer(skb, k, size); 65 return bpf_internal_load_pointer_neg_helper(skb, k, size);
63} 66}
64 67
65/** 68/**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f223cdc75da6..baf8d281152c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3161,6 +3161,8 @@ static void sock_rmem_free(struct sk_buff *skb)
3161 */ 3161 */
3162int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3162int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3163{ 3163{
3164 int len = skb->len;
3165
3164 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3166 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3165 (unsigned)sk->sk_rcvbuf) 3167 (unsigned)sk->sk_rcvbuf)
3166 return -ENOMEM; 3168 return -ENOMEM;
@@ -3175,7 +3177,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3175 3177
3176 skb_queue_tail(&sk->sk_error_queue, skb); 3178 skb_queue_tail(&sk->sk_error_queue, skb);
3177 if (!sock_flag(sk, SOCK_DEAD)) 3179 if (!sock_flag(sk, SOCK_DEAD))
3178 sk->sk_data_ready(sk, skb->len); 3180 sk->sk_data_ready(sk, len);
3179 return 0; 3181 return 0;
3180} 3182}
3181EXPORT_SYMBOL(sock_queue_err_skb); 3183EXPORT_SYMBOL(sock_queue_err_skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cfd7edda0a8e..5d54ed30e821 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -860,7 +860,7 @@ wait_for_memory:
860 } 860 }
861 861
862out: 862out:
863 if (copied) 863 if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
864 tcp_push(sk, flags, mss_now, tp->nonagle); 864 tcp_push(sk, flags, mss_now, tp->nonagle);
865 return copied; 865 return copied;
866 866
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 16c33e308121..b2869cab2092 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2044,7 +2044,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2044 if (!delta) 2044 if (!delta)
2045 pmc->mca_sfcount[sfmode]--; 2045 pmc->mca_sfcount[sfmode]--;
2046 for (j=0; j<i; j++) 2046 for (j=0; j<i; j++)
2047 (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2047 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2049 struct ip6_sf_list *psf; 2049 struct ip6_sf_list *psf;
2050 2050
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cbdb754dbb10..3cc4487ac349 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -735,6 +735,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
735 735
736#ifdef CONFIG_NF_CONNTRACK_ZONES 736#ifdef CONFIG_NF_CONNTRACK_ZONES
737out_free: 737out_free:
738 atomic_dec(&net->ct.count);
738 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 739 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
739 return ERR_PTR(-ENOMEM); 740 return ERR_PTR(-ENOMEM);
740#endif 741#endif
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0c8e43810ce3..59530e93fa58 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -150,6 +150,17 @@ err1:
150 return ret; 150 return ret;
151} 151}
152 152
153#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
154static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
155{
156 typeof(nf_ct_timeout_put_hook) timeout_put;
157
158 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
159 if (timeout_put)
160 timeout_put(timeout);
161}
162#endif
163
153static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) 164static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
154{ 165{
155 struct xt_ct_target_info_v1 *info = par->targinfo; 166 struct xt_ct_target_info_v1 *info = par->targinfo;
@@ -158,7 +169,9 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
158 struct nf_conn *ct; 169 struct nf_conn *ct;
159 int ret = 0; 170 int ret = 0;
160 u8 proto; 171 u8 proto;
161 172#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
173 struct ctnl_timeout *timeout;
174#endif
162 if (info->flags & ~XT_CT_NOTRACK) 175 if (info->flags & ~XT_CT_NOTRACK)
163 return -EINVAL; 176 return -EINVAL;
164 177
@@ -216,7 +229,6 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
216#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
217 if (info->timeout) { 230 if (info->timeout) {
218 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
219 struct ctnl_timeout *timeout;
220 struct nf_conn_timeout *timeout_ext; 232 struct nf_conn_timeout *timeout_ext;
221 233
222 rcu_read_lock(); 234 rcu_read_lock();
@@ -245,7 +257,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
245 pr_info("Timeout policy `%s' can only be " 257 pr_info("Timeout policy `%s' can only be "
246 "used by L3 protocol number %d\n", 258 "used by L3 protocol number %d\n",
247 info->timeout, timeout->l3num); 259 info->timeout, timeout->l3num);
248 goto err4; 260 goto err5;
249 } 261 }
250 /* Make sure the timeout policy matches any existing 262 /* Make sure the timeout policy matches any existing
251 * protocol tracker, otherwise default to generic. 263 * protocol tracker, otherwise default to generic.
@@ -258,13 +270,13 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
258 "used by L4 protocol number %d\n", 270 "used by L4 protocol number %d\n",
259 info->timeout, 271 info->timeout,
260 timeout->l4proto->l4proto); 272 timeout->l4proto->l4proto);
261 goto err4; 273 goto err5;
262 } 274 }
263 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, 275 timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
264 GFP_KERNEL); 276 GFP_ATOMIC);
265 if (timeout_ext == NULL) { 277 if (timeout_ext == NULL) {
266 ret = -ENOMEM; 278 ret = -ENOMEM;
267 goto err4; 279 goto err5;
268 } 280 }
269 } else { 281 } else {
270 ret = -ENOENT; 282 ret = -ENOENT;
@@ -281,8 +293,12 @@ out:
281 info->ct = ct; 293 info->ct = ct;
282 return 0; 294 return 0;
283 295
296#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
297err5:
298 __xt_ct_tg_timeout_put(timeout);
284err4: 299err4:
285 rcu_read_unlock(); 300 rcu_read_unlock();
301#endif
286err3: 302err3:
287 nf_conntrack_free(ct); 303 nf_conntrack_free(ct);
288err2: 304err2:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 32bb75324e76..faa48f70b7c9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -829,12 +829,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
829 return 0; 829 return 0;
830} 830}
831 831
832int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 832static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
833{ 833{
834 int len = skb->len; 834 int len = skb->len;
835 835
836 skb_queue_tail(&sk->sk_receive_queue, skb); 836 skb_queue_tail(&sk->sk_receive_queue, skb);
837 sk->sk_data_ready(sk, len); 837 sk->sk_data_ready(sk, len);
838 return len;
839}
840
841int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
842{
843 int len = __netlink_sendskb(sk, skb);
844
838 sock_put(sk); 845 sock_put(sk);
839 return len; 846 return len;
840} 847}
@@ -957,8 +964,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
957 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 964 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
958 !test_bit(0, &nlk->state)) { 965 !test_bit(0, &nlk->state)) {
959 skb_set_owner_r(skb, sk); 966 skb_set_owner_r(skb, sk);
960 skb_queue_tail(&sk->sk_receive_queue, skb); 967 __netlink_sendskb(sk, skb);
961 sk->sk_data_ready(sk, skb->len);
962 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 968 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
963 } 969 }
964 return -1; 970 return -1;
@@ -1698,10 +1704,8 @@ static int netlink_dump(struct sock *sk)
1698 1704
1699 if (sk_filter(sk, skb)) 1705 if (sk_filter(sk, skb))
1700 kfree_skb(skb); 1706 kfree_skb(skb);
1701 else { 1707 else
1702 skb_queue_tail(&sk->sk_receive_queue, skb); 1708 __netlink_sendskb(sk, skb);
1703 sk->sk_data_ready(sk, skb->len);
1704 }
1705 return 0; 1709 return 0;
1706 } 1710 }
1707 1711
@@ -1715,10 +1719,8 @@ static int netlink_dump(struct sock *sk)
1715 1719
1716 if (sk_filter(sk, skb)) 1720 if (sk_filter(sk, skb))
1717 kfree_skb(skb); 1721 kfree_skb(skb);
1718 else { 1722 else
1719 skb_queue_tail(&sk->sk_receive_queue, skb); 1723 __netlink_sendskb(sk, skb);
1720 sk->sk_data_ready(sk, skb->len);
1721 }
1722 1724
1723 if (cb->done) 1725 if (cb->done)
1724 cb->done(cb); 1726 cb->done(cb);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9f60008740e3..9726fe684ab8 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1130,6 +1130,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
1130 int flags = msg->msg_flags; 1130 int flags = msg->msg_flags;
1131 int err, done; 1131 int err, done;
1132 1132
1133 if (len > USHRT_MAX)
1134 return -EMSGSIZE;
1135
1133 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| 1136 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1134 MSG_CMSG_COMPAT)) || 1137 MSG_CMSG_COMPAT)) ||
1135 !(msg->msg_flags & MSG_EOR)) 1138 !(msg->msg_flags & MSG_EOR))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 06b42b7f5a02..92ba71dfe080 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4133static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4133static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4134 int __user *optlen) 4134 int __user *optlen)
4135{ 4135{
4136 if (len < sizeof(struct sctp_event_subscribe)) 4136 if (len <= 0)
4137 return -EINVAL; 4137 return -EINVAL;
4138 len = sizeof(struct sctp_event_subscribe); 4138 if (len > sizeof(struct sctp_event_subscribe))
4139 len = sizeof(struct sctp_event_subscribe);
4139 if (put_user(len, optlen)) 4140 if (put_user(len, optlen))
4140 return -EFAULT; 4141 return -EFAULT;
4141 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4142 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
diff --git a/net/socket.c b/net/socket.c
index 484cc6953fc6..851edcd6b098 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -811,9 +811,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
811 811
812 sock = file->private_data; 812 sock = file->private_data;
813 813
814 flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; 814 flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
815 if (more) 815 /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
816 flags |= MSG_MORE; 816 flags |= more;
817 817
818 return kernel_sendpage(sock, page, offset, size, flags); 818 return kernel_sendpage(sock, page, offset, size, flags);
819} 819}