aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 16:31:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 16:31:08 -0400
commit388f997620cb57372c494a194e9698b28cc179b8 (patch)
tree31f2b7f01793f1711794193450f9047f78ee5370 /net
parente2fdae7e7c5a690b10b2d2891ec819e554dc033d (diff)
parente3122b7fae7b4e3d1d49fa84f6515bcbe6cbc6fc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix verifier memory corruption and other bugs in BPF layer, from Alexei Starovoitov. 2) Add a conservative fix for doing BPF properly in the BPF classifier of the packet scheduler on ingress. Also from Alexei. 3) The SKB scrubber should not clear out the packet MARK and security label, from Herbert Xu. 4) Fix oops on rmmod in stmmac driver, from Bryan O'Donoghue. 5) Pause handling is not correct in the stmmac driver because it doesn't take into consideration the RX and TX fifo sizes. From Vince Bridgers. 6) Failure path missing unlock in FOU driver, from Wang Cong. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) net: dsa: use DEVICE_ATTR_RW to declare temp1_max netns: remove BUG_ONs from net_generic() IB/ipoib: Fix ndo_get_iflink sfc: Fix memcpy() with const destination compiler warning. altera tse: Fix network-delays and -retransmissions after high throughput. net: remove unused 'dev' argument from netif_needs_gso() act_mirred: Fix bogus header when redirecting from VLAN inet_diag: fix access to tcp cc information tcp: tcp_get_info() should fetch socket fields once net: dsa: mv88e6xxx: Add missing initialization in mv88e6xxx_set_port_state() skbuff: Do not scrub skb mark within the same name space Revert "net: Reset secmark when scrubbing packet" bpf: fix two bugs in verification logic when accessing 'ctx' pointer bpf: fix bpf helpers to use skb->mac_header relative offsets stmmac: Configure Flow Control to work correctly based on rxfifo size stmmac: Enable unicast pause frame detect in GMAC Register 6 stmmac: Read tx-fifo-depth and rx-fifo-depth from the devicetree stmmac: Add defines and documentation for enabling flow control stmmac: Add properties for transmit and receive fifo sizes stmmac: fix oops on rmmod after assigning ip addr ...
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c41
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/ipv4/fou.c3
-rw-r--r--net/ipv4/inet_diag.c28
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_dctcp.c5
-rw-r--r--net/ipv4/tcp_illinois.c6
-rw-r--r--net/ipv4/tcp_vegas.c5
-rw-r--r--net/ipv4/tcp_vegas.h2
-rw-r--r--net/ipv4/tcp_westwood.c6
-rw-r--r--net/sched/act_bpf.c3
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/cls_bpf.c3
16 files changed, 91 insertions, 42 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index af4a1b0adc10..1796cef55ab5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2713,7 +2713,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
2713 if (unlikely(!skb)) 2713 if (unlikely(!skb))
2714 goto out_null; 2714 goto out_null;
2715 2715
2716 if (netif_needs_gso(dev, skb, features)) { 2716 if (netif_needs_gso(skb, features)) {
2717 struct sk_buff *segs; 2717 struct sk_buff *segs;
2718 2718
2719 segs = skb_gso_segment(skb, features); 2719 segs = skb_gso_segment(skb, features);
diff --git a/net/core/filter.c b/net/core/filter.c
index b669e75d2b36..bf831a85c315 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1175,12 +1175,27 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
1175 return 0; 1175 return 0;
1176} 1176}
1177 1177
1178/**
1179 * bpf_skb_clone_not_writable - is the header of a clone not writable
1180 * @skb: buffer to check
1181 * @len: length up to which to write, can be negative
1182 *
1183 * Returns true if modifying the header part of the cloned buffer
1184 * does require the data to be copied. I.e. this version works with
1185 * negative lengths needed for eBPF case!
1186 */
1187static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
1188{
1189 return skb_header_cloned(skb) ||
1190 (int) skb_headroom(skb) + len > skb->hdr_len;
1191}
1192
1178#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) 1193#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
1179 1194
1180static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) 1195static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1181{ 1196{
1182 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1197 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1183 unsigned int offset = (unsigned int) r2; 1198 int offset = (int) r2;
1184 void *from = (void *) (long) r3; 1199 void *from = (void *) (long) r3;
1185 unsigned int len = (unsigned int) r4; 1200 unsigned int len = (unsigned int) r4;
1186 char buf[16]; 1201 char buf[16];
@@ -1194,10 +1209,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1194 * 1209 *
1195 * so check for invalid 'offset' and too large 'len' 1210 * so check for invalid 'offset' and too large 'len'
1196 */ 1211 */
1197 if (unlikely(offset > 0xffff || len > sizeof(buf))) 1212 if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
1198 return -EFAULT; 1213 return -EFAULT;
1199 1214
1200 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) 1215 offset -= skb->data - skb_mac_header(skb);
1216 if (unlikely(skb_cloned(skb) &&
1217 bpf_skb_clone_unwritable(skb, offset + len)))
1201 return -EFAULT; 1218 return -EFAULT;
1202 1219
1203 ptr = skb_header_pointer(skb, offset, len, buf); 1220 ptr = skb_header_pointer(skb, offset, len, buf);
@@ -1232,15 +1249,18 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1232#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) 1249#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
1233#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) 1250#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
1234 1251
1235static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) 1252static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1236{ 1253{
1237 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1254 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1255 int offset = (int) r2;
1238 __sum16 sum, *ptr; 1256 __sum16 sum, *ptr;
1239 1257
1240 if (unlikely(offset > 0xffff)) 1258 if (unlikely((u32) offset > 0xffff))
1241 return -EFAULT; 1259 return -EFAULT;
1242 1260
1243 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) 1261 offset -= skb->data - skb_mac_header(skb);
1262 if (unlikely(skb_cloned(skb) &&
1263 bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
1244 return -EFAULT; 1264 return -EFAULT;
1245 1265
1246 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); 1266 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1276,16 +1296,19 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1276 .arg5_type = ARG_ANYTHING, 1296 .arg5_type = ARG_ANYTHING,
1277}; 1297};
1278 1298
1279static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) 1299static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1280{ 1300{
1281 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1301 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1282 u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); 1302 u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
1303 int offset = (int) r2;
1283 __sum16 sum, *ptr; 1304 __sum16 sum, *ptr;
1284 1305
1285 if (unlikely(offset > 0xffff)) 1306 if (unlikely((u32) offset > 0xffff))
1286 return -EFAULT; 1307 return -EFAULT;
1287 1308
1288 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) 1309 offset -= skb->data - skb_mac_header(skb);
1310 if (unlikely(skb_cloned(skb) &&
1311 bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
1289 return -EFAULT; 1312 return -EFAULT;
1290 1313
1291 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); 1314 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index a3abb719221f..78fc04ad36fc 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -16,7 +16,6 @@
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/user_namespace.h> 17#include <linux/user_namespace.h>
18#include <linux/net_namespace.h> 18#include <linux/net_namespace.h>
19#include <linux/rtnetlink.h>
20#include <net/sock.h> 19#include <net/sock.h>
21#include <net/netlink.h> 20#include <net/netlink.h>
22#include <net/net_namespace.h> 21#include <net/net_namespace.h>
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3b6e5830256e..d1967dab9cc6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4124,19 +4124,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
4124 */ 4124 */
4125void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4125void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4126{ 4126{
4127 if (xnet)
4128 skb_orphan(skb);
4129 skb->tstamp.tv64 = 0; 4127 skb->tstamp.tv64 = 0;
4130 skb->pkt_type = PACKET_HOST; 4128 skb->pkt_type = PACKET_HOST;
4131 skb->skb_iif = 0; 4129 skb->skb_iif = 0;
4132 skb->ignore_df = 0; 4130 skb->ignore_df = 0;
4133 skb_dst_drop(skb); 4131 skb_dst_drop(skb);
4134 skb->mark = 0;
4135 skb_sender_cpu_clear(skb); 4132 skb_sender_cpu_clear(skb);
4136 skb_init_secmark(skb);
4137 secpath_reset(skb); 4133 secpath_reset(skb);
4138 nf_reset(skb); 4134 nf_reset(skb);
4139 nf_reset_trace(skb); 4135 nf_reset_trace(skb);
4136
4137 if (!xnet)
4138 return;
4139
4140 skb_orphan(skb);
4141 skb->mark = 0;
4140} 4142}
4141EXPORT_SYMBOL_GPL(skb_scrub_packet); 4143EXPORT_SYMBOL_GPL(skb_scrub_packet);
4142 4144
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 5eaadabe23a1..079a224471e7 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -124,7 +124,7 @@ static ssize_t temp1_max_store(struct device *dev,
124 124
125 return count; 125 return count;
126} 126}
127static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store); 127static DEVICE_ATTR_RW(temp1_max);
128 128
129static ssize_t temp1_max_alarm_show(struct device *dev, 129static ssize_t temp1_max_alarm_show(struct device *dev,
130 struct device_attribute *attr, char *buf) 130 struct device_attribute *attr, char *buf)
@@ -159,8 +159,8 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
159 if (index == 1) { 159 if (index == 1) {
160 if (!drv->get_temp_limit) 160 if (!drv->get_temp_limit)
161 mode = 0; 161 mode = 0;
162 else if (drv->set_temp_limit) 162 else if (!drv->set_temp_limit)
163 mode |= S_IWUSR; 163 mode &= ~S_IWUSR;
164 } else if (index == 2 && !drv->get_temp_alarm) { 164 } else if (index == 2 && !drv->get_temp_alarm) {
165 mode = 0; 165 mode = 0;
166 } 166 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index af150b43b214..34968cd5c146 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -711,11 +711,10 @@ static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
711 cb->nlh->nlmsg_seq, NLM_F_MULTI, 711 cb->nlh->nlmsg_seq, NLM_F_MULTI,
712 skb, FOU_CMD_GET); 712 skb, FOU_CMD_GET);
713 if (ret) 713 if (ret)
714 goto done; 714 break;
715 } 715 }
716 mutex_unlock(&fn->fou_lock); 716 mutex_unlock(&fn->fou_lock);
717 717
718done:
719 cb->args[0] = idx; 718 cb->args[0] = idx;
720 return skb->len; 719 return skb->len;
721} 720}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 70e8b3c308ec..bb77ebdae3b3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -111,6 +111,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
111 const struct nlmsghdr *unlh) 111 const struct nlmsghdr *unlh)
112{ 112{
113 const struct inet_sock *inet = inet_sk(sk); 113 const struct inet_sock *inet = inet_sk(sk);
114 const struct tcp_congestion_ops *ca_ops;
114 const struct inet_diag_handler *handler; 115 const struct inet_diag_handler *handler;
115 int ext = req->idiag_ext; 116 int ext = req->idiag_ext;
116 struct inet_diag_msg *r; 117 struct inet_diag_msg *r;
@@ -208,16 +209,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
208 info = nla_data(attr); 209 info = nla_data(attr);
209 } 210 }
210 211
211 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) 212 if (ext & (1 << (INET_DIAG_CONG - 1))) {
212 if (nla_put_string(skb, INET_DIAG_CONG, 213 int err = 0;
213 icsk->icsk_ca_ops->name) < 0) 214
215 rcu_read_lock();
216 ca_ops = READ_ONCE(icsk->icsk_ca_ops);
217 if (ca_ops)
218 err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
219 rcu_read_unlock();
220 if (err < 0)
214 goto errout; 221 goto errout;
222 }
215 223
216 handler->idiag_get_info(sk, r, info); 224 handler->idiag_get_info(sk, r, info);
217 225
218 if (sk->sk_state < TCP_TIME_WAIT && 226 if (sk->sk_state < TCP_TIME_WAIT) {
219 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 227 int err = 0;
220 icsk->icsk_ca_ops->get_info(sk, ext, skb); 228
229 rcu_read_lock();
230 ca_ops = READ_ONCE(icsk->icsk_ca_ops);
231 if (ca_ops && ca_ops->get_info)
232 err = ca_ops->get_info(sk, ext, skb);
233 rcu_read_unlock();
234 if (err < 0)
235 goto errout;
236 }
221 237
222out: 238out:
223 nlmsg_end(skb, nlh); 239 nlmsg_end(skb, nlh);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 18e3a12eb1b2..59c8a027721b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2595,6 +2595,7 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2595 const struct tcp_sock *tp = tcp_sk(sk); 2595 const struct tcp_sock *tp = tcp_sk(sk);
2596 const struct inet_connection_sock *icsk = inet_csk(sk); 2596 const struct inet_connection_sock *icsk = inet_csk(sk);
2597 u32 now = tcp_time_stamp; 2597 u32 now = tcp_time_stamp;
2598 u32 rate;
2598 2599
2599 memset(info, 0, sizeof(*info)); 2600 memset(info, 0, sizeof(*info));
2600 2601
@@ -2655,10 +2656,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2655 2656
2656 info->tcpi_total_retrans = tp->total_retrans; 2657 info->tcpi_total_retrans = tp->total_retrans;
2657 2658
2658 info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ? 2659 rate = READ_ONCE(sk->sk_pacing_rate);
2659 sk->sk_pacing_rate : ~0ULL; 2660 info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
2660 info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ? 2661
2661 sk->sk_max_pacing_rate : ~0ULL; 2662 rate = READ_ONCE(sk->sk_max_pacing_rate);
2663 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
2662} 2664}
2663EXPORT_SYMBOL_GPL(tcp_get_info); 2665EXPORT_SYMBOL_GPL(tcp_get_info);
2664 2666
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index b504371af742..4376016f7fa5 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -277,7 +277,7 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
277 } 277 }
278} 278}
279 279
280static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) 280static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
281{ 281{
282 const struct dctcp *ca = inet_csk_ca(sk); 282 const struct dctcp *ca = inet_csk_ca(sk);
283 283
@@ -297,8 +297,9 @@ static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
297 info.dctcp_ab_tot = ca->acked_bytes_total; 297 info.dctcp_ab_tot = ca->acked_bytes_total;
298 } 298 }
299 299
300 nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); 300 return nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info);
301 } 301 }
302 return 0;
302} 303}
303 304
304static struct tcp_congestion_ops dctcp __read_mostly = { 305static struct tcp_congestion_ops dctcp __read_mostly = {
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 1d5a30a90adf..67476f085e48 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -300,8 +300,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
300} 300}
301 301
302/* Extract info for Tcp socket info provided via netlink. */ 302/* Extract info for Tcp socket info provided via netlink. */
303static void tcp_illinois_info(struct sock *sk, u32 ext, 303static int tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb)
304 struct sk_buff *skb)
305{ 304{
306 const struct illinois *ca = inet_csk_ca(sk); 305 const struct illinois *ca = inet_csk_ca(sk);
307 306
@@ -318,8 +317,9 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
318 do_div(t, info.tcpv_rttcnt); 317 do_div(t, info.tcpv_rttcnt);
319 info.tcpv_rtt = t; 318 info.tcpv_rtt = t;
320 } 319 }
321 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 320 return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
322 } 321 }
322 return 0;
323} 323}
324 324
325static struct tcp_congestion_ops tcp_illinois __read_mostly = { 325static struct tcp_congestion_ops tcp_illinois __read_mostly = {
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6afde666ab1..c71a1b8f7bde 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -286,7 +286,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
286} 286}
287 287
288/* Extract info for Tcp socket info provided via netlink. */ 288/* Extract info for Tcp socket info provided via netlink. */
289void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) 289int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
290{ 290{
291 const struct vegas *ca = inet_csk_ca(sk); 291 const struct vegas *ca = inet_csk_ca(sk);
292 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 292 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
@@ -297,8 +297,9 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
297 .tcpv_minrtt = ca->minRTT, 297 .tcpv_minrtt = ca->minRTT,
298 }; 298 };
299 299
300 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 300 return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
301 } 301 }
302 return 0;
302} 303}
303EXPORT_SYMBOL_GPL(tcp_vegas_get_info); 304EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
304 305
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 0531b99d8637..e8a6b33cc61d 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -19,6 +19,6 @@ void tcp_vegas_init(struct sock *sk);
19void tcp_vegas_state(struct sock *sk, u8 ca_state); 19void tcp_vegas_state(struct sock *sk, u8 ca_state);
20void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); 20void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
21void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); 21void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
22void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); 22int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
23 23
24#endif /* __TCP_VEGAS_H */ 24#endif /* __TCP_VEGAS_H */
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index bb63fba47d47..b3c57cceb990 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -256,8 +256,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
256} 256}
257 257
258/* Extract info for Tcp socket info provided via netlink. */ 258/* Extract info for Tcp socket info provided via netlink. */
259static void tcp_westwood_info(struct sock *sk, u32 ext, 259static int tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb)
260 struct sk_buff *skb)
261{ 260{
262 const struct westwood *ca = inet_csk_ca(sk); 261 const struct westwood *ca = inet_csk_ca(sk);
263 262
@@ -268,8 +267,9 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
268 .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), 267 .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
269 }; 268 };
270 269
271 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 270 return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
272 } 271 }
272 return 0;
273} 273}
274 274
275static struct tcp_congestion_ops tcp_westwood __read_mostly = { 275static struct tcp_congestion_ops tcp_westwood __read_mostly = {
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 4d2cede17468..dc6a2d324bd8 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -38,6 +38,9 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
38 struct tcf_bpf *prog = act->priv; 38 struct tcf_bpf *prog = act->priv;
39 int action, filter_res; 39 int action, filter_res;
40 40
41 if (unlikely(!skb_mac_header_was_set(skb)))
42 return TC_ACT_UNSPEC;
43
41 spin_lock(&prog->tcf_lock); 44 spin_lock(&prog->tcf_lock);
42 45
43 prog->tcf_tm.lastuse = jiffies; 46 prog->tcf_tm.lastuse = jiffies;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 5953517ec059..3f63ceac8e01 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -157,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
157 157
158 if (!(at & AT_EGRESS)) { 158 if (!(at & AT_EGRESS)) {
159 if (m->tcfm_ok_push) 159 if (m->tcfm_ok_push)
160 skb_push(skb2, skb2->dev->hard_header_len); 160 skb_push(skb2, skb->mac_len);
161 } 161 }
162 162
163 /* mirror is always swallowed */ 163 /* mirror is always swallowed */
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 5c4171c5d2bd..91bd9c19471d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -66,6 +66,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
66 struct cls_bpf_prog *prog; 66 struct cls_bpf_prog *prog;
67 int ret = -1; 67 int ret = -1;
68 68
69 if (unlikely(!skb_mac_header_was_set(skb)))
70 return -1;
71
69 /* Needed here for accessing maps. */ 72 /* Needed here for accessing maps. */
70 rcu_read_lock(); 73 rcu_read_lock();
71 list_for_each_entry_rcu(prog, &head->plist, link) { 74 list_for_each_entry_rcu(prog, &head->plist, link) {