diff options
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 10 | ||||
-rw-r--r-- | include/linux/netdevice.h | 26 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 30 |
6 files changed, 67 insertions, 21 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 664568420c9b..d60a2ea3da19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -6501,13 +6501,10 @@ static int bnx2x_link_initialize(struct link_params *params, | |||
6501 | struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; | 6501 | struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; |
6502 | if (vars->line_speed == SPEED_AUTO_NEG && | 6502 | if (vars->line_speed == SPEED_AUTO_NEG && |
6503 | (CHIP_IS_E1x(bp) || | 6503 | (CHIP_IS_E1x(bp) || |
6504 | CHIP_IS_E2(bp))) { | 6504 | CHIP_IS_E2(bp))) |
6505 | bnx2x_set_parallel_detection(phy, params); | 6505 | bnx2x_set_parallel_detection(phy, params); |
6506 | if (params->phy[INT_PHY].config_init) | 6506 | if (params->phy[INT_PHY].config_init) |
6507 | params->phy[INT_PHY].config_init(phy, | 6507 | params->phy[INT_PHY].config_init(phy, params, vars); |
6508 | params, | ||
6509 | vars); | ||
6510 | } | ||
6511 | } | 6508 | } |
6512 | 6509 | ||
6513 | /* Init external phy*/ | 6510 | /* Init external phy*/ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 2a8c1dc65d9c..059f0d460af2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -816,6 +816,8 @@ static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp | |||
816 | static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } | 816 | static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } |
817 | static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, | 817 | static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, |
818 | u8 vf_qid, bool set) {return 0; } | 818 | u8 vf_qid, bool set) {return 0; } |
819 | static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, | ||
820 | struct bnx2x_config_rss_params *params) {return 0; } | ||
819 | static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } | 821 | static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } |
820 | static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } | 822 | static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } |
821 | static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } | 823 | static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 21962828925a..157fe8df2c3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig | |||
@@ -6,13 +6,3 @@ config MLX5_CORE | |||
6 | tristate | 6 | tristate |
7 | depends on PCI && X86 | 7 | depends on PCI && X86 |
8 | default n | 8 | default n |
9 | |||
10 | config MLX5_DEBUG | ||
11 | bool "Verbose debugging output" if (MLX5_CORE && EXPERT) | ||
12 | depends on MLX5_CORE | ||
13 | default y | ||
14 | ---help--- | ||
15 | This option causes debugging code to be compiled into the | ||
16 | mlx5_core driver. The output can be turned on via the | ||
17 | debug_mask module parameter (which can also be set after | ||
18 | the driver is loaded through sysfs). | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8ed4ae943053..041b42a305f6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2101,6 +2101,15 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, | |||
2101 | #endif | 2101 | #endif |
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | /** | ||
2105 | * netdev_sent_queue - report the number of bytes queued to hardware | ||
2106 | * @dev: network device | ||
2107 | * @bytes: number of bytes queued to the hardware device queue | ||
2108 | * | ||
2109 | * Report the number of bytes queued for sending/completion to the network | ||
2110 | * device hardware queue. @bytes should be a good approximation and should | ||
2111 | * exactly match netdev_completed_queue() @bytes | ||
2112 | */ | ||
2104 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) | 2113 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
2105 | { | 2114 | { |
2106 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); | 2115 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); |
@@ -2130,6 +2139,16 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, | |||
2130 | #endif | 2139 | #endif |
2131 | } | 2140 | } |
2132 | 2141 | ||
2142 | /** | ||
2143 | * netdev_completed_queue - report bytes and packets completed by device | ||
2144 | * @dev: network device | ||
2145 | * @pkts: actual number of packets sent over the medium | ||
2146 | * @bytes: actual number of bytes sent over the medium | ||
2147 | * | ||
2148 | * Report the number of bytes and packets transmitted by the network device | ||
2149 | * hardware queue over the physical medium, @bytes must exactly match the | ||
2150 | * @bytes amount passed to netdev_sent_queue() | ||
2151 | */ | ||
2133 | static inline void netdev_completed_queue(struct net_device *dev, | 2152 | static inline void netdev_completed_queue(struct net_device *dev, |
2134 | unsigned int pkts, unsigned int bytes) | 2153 | unsigned int pkts, unsigned int bytes) |
2135 | { | 2154 | { |
@@ -2144,6 +2163,13 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q) | |||
2144 | #endif | 2163 | #endif |
2145 | } | 2164 | } |
2146 | 2165 | ||
2166 | /** | ||
2167 | * netdev_reset_queue - reset the packets and bytes count of a network device | ||
2168 | * @dev_queue: network device | ||
2169 | * | ||
2170 | * Reset the bytes and packet count of a network device and clear the | ||
2171 | * software flow control OFF bit for this network device | ||
2172 | */ | ||
2147 | static inline void netdev_reset_queue(struct net_device *dev_queue) | 2173 | static inline void netdev_reset_queue(struct net_device *dev_queue) |
2148 | { | 2174 | { |
2149 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | 2175 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1969e16d936d..25a89eaa669d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) | |||
3162 | 3162 | ||
3163 | /* If reordering is high then always grow cwnd whenever data is | 3163 | /* If reordering is high then always grow cwnd whenever data is |
3164 | * delivered regardless of its ordering. Otherwise stay conservative | 3164 | * delivered regardless of its ordering. Otherwise stay conservative |
3165 | * and only grow cwnd on in-order delivery in Open state, and retain | 3165 | * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ |
3166 | * cwnd in Disordered state (RFC5681). A stretched ACK with | ||
3167 | * new SACK or ECE mark may first advance cwnd here and later reduce | 3166 | * new SACK or ECE mark may first advance cwnd here and later reduce |
3168 | * cwnd in tcp_fastretrans_alert() based on more states. | 3167 | * cwnd in tcp_fastretrans_alert() based on more states. |
3169 | */ | 3168 | */ |
3170 | if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) | 3169 | if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) |
3171 | return flag & FLAG_FORWARD_PROGRESS; | 3170 | return flag & FLAG_FORWARD_PROGRESS; |
3172 | 3171 | ||
3173 | return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && | 3172 | return flag & FLAG_DATA_ACKED; |
3174 | flag & FLAG_DATA_ACKED; | ||
3175 | } | 3173 | } |
3176 | 3174 | ||
3177 | /* Check that window update is acceptable. | 3175 | /* Check that window update is acceptable. |
@@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4141 | if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { | 4139 | if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { |
4142 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | 4140 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); |
4143 | } else { | 4141 | } else { |
4142 | tcp_grow_window(sk, skb); | ||
4144 | kfree_skb_partial(skb, fragstolen); | 4143 | kfree_skb_partial(skb, fragstolen); |
4145 | skb = NULL; | 4144 | skb = NULL; |
4146 | } | 4145 | } |
@@ -4216,8 +4215,10 @@ add_sack: | |||
4216 | if (tcp_is_sack(tp)) | 4215 | if (tcp_is_sack(tp)) |
4217 | tcp_sack_new_ofo_skb(sk, seq, end_seq); | 4216 | tcp_sack_new_ofo_skb(sk, seq, end_seq); |
4218 | end: | 4217 | end: |
4219 | if (skb) | 4218 | if (skb) { |
4219 | tcp_grow_window(sk, skb); | ||
4220 | skb_set_owner_r(skb, sk); | 4220 | skb_set_owner_r(skb, sk); |
4221 | } | ||
4221 | } | 4222 | } |
4222 | 4223 | ||
4223 | static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, | 4224 | static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a17dda1bbee0..8df7f64c6db3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt) | |||
168 | } | 168 | } |
169 | EXPORT_SYMBOL_GPL(netlink_remove_tap); | 169 | EXPORT_SYMBOL_GPL(netlink_remove_tap); |
170 | 170 | ||
171 | static bool netlink_filter_tap(const struct sk_buff *skb) | ||
172 | { | ||
173 | struct sock *sk = skb->sk; | ||
174 | bool pass = false; | ||
175 | |||
176 | /* We take the more conservative approach and | ||
177 | * whitelist socket protocols that may pass. | ||
178 | */ | ||
179 | switch (sk->sk_protocol) { | ||
180 | case NETLINK_ROUTE: | ||
181 | case NETLINK_USERSOCK: | ||
182 | case NETLINK_SOCK_DIAG: | ||
183 | case NETLINK_NFLOG: | ||
184 | case NETLINK_XFRM: | ||
185 | case NETLINK_FIB_LOOKUP: | ||
186 | case NETLINK_NETFILTER: | ||
187 | case NETLINK_GENERIC: | ||
188 | pass = true; | ||
189 | break; | ||
190 | } | ||
191 | |||
192 | return pass; | ||
193 | } | ||
194 | |||
171 | static int __netlink_deliver_tap_skb(struct sk_buff *skb, | 195 | static int __netlink_deliver_tap_skb(struct sk_buff *skb, |
172 | struct net_device *dev) | 196 | struct net_device *dev) |
173 | { | 197 | { |
174 | struct sk_buff *nskb; | 198 | struct sk_buff *nskb; |
199 | struct sock *sk = skb->sk; | ||
175 | int ret = -ENOMEM; | 200 | int ret = -ENOMEM; |
176 | 201 | ||
177 | dev_hold(dev); | 202 | dev_hold(dev); |
178 | nskb = skb_clone(skb, GFP_ATOMIC); | 203 | nskb = skb_clone(skb, GFP_ATOMIC); |
179 | if (nskb) { | 204 | if (nskb) { |
180 | nskb->dev = dev; | 205 | nskb->dev = dev; |
206 | nskb->protocol = htons((u16) sk->sk_protocol); | ||
207 | |||
181 | ret = dev_queue_xmit(nskb); | 208 | ret = dev_queue_xmit(nskb); |
182 | if (unlikely(ret > 0)) | 209 | if (unlikely(ret > 0)) |
183 | ret = net_xmit_errno(ret); | 210 | ret = net_xmit_errno(ret); |
@@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb) | |||
192 | int ret; | 219 | int ret; |
193 | struct netlink_tap *tmp; | 220 | struct netlink_tap *tmp; |
194 | 221 | ||
222 | if (!netlink_filter_tap(skb)) | ||
223 | return; | ||
224 | |||
195 | list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { | 225 | list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { |
196 | ret = __netlink_deliver_tap_skb(skb, tmp->dev); | 226 | ret = __netlink_deliver_tap_skb(skb, tmp->dev); |
197 | if (unlikely(ret)) | 227 | if (unlikely(ret)) |