aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-04-05 16:29:38 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-05 16:29:38 -0400
commit66f87790361c2c33dd7948a618da0cb632eed20d (patch)
tree4a4d64af861781920b2b4ef6d0e7b4423b787292
parente43d15c8d3c8680fbf142360e5958f2ddd437047 (diff)
parent627d2d6b550094d88f9e518e15967e7bf906ebbf (diff)
Merge branch 'udp-peek'
Willem de Bruijn says: ==================== udp: support SO_PEEK_OFF Support peeking at a non-zero offset for UDP sockets. Match the existing behavior on Unix datagram sockets. 1/3 makes the sk_peek_offset functions safe to use outside locks 2/3 removes udp headers before enqueue, to simplify offset arithmetic 3/3 introduces SO_PEEK_OFFSET support, with Unix socket peek semantics. Changes v1->v2 - squash patches 3 and 4 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/net/sock.h27
-rw-r--r--include/net/udp.h9
-rw-r--r--net/core/datagram.c9
-rw-r--r--net/core/sock.c28
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/udp.c38
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/udp.c30
9 files changed, 97 insertions, 53 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 15d0df943466..007381270ff8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2949,7 +2949,12 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2949 struct iov_iter *from, int len); 2949 struct iov_iter *from, int len);
2950int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); 2950int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2951void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2951void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2952void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); 2952void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
2953static inline void skb_free_datagram_locked(struct sock *sk,
2954 struct sk_buff *skb)
2955{
2956 __skb_free_datagram_locked(sk, skb, 0);
2957}
2953int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); 2958int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2954int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); 2959int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2955int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); 2960int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
diff --git a/include/net/sock.h b/include/net/sock.h
index 310c4367ea83..1decb7a22261 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -457,28 +457,32 @@ struct sock {
457#define SK_CAN_REUSE 1 457#define SK_CAN_REUSE 1
458#define SK_FORCE_REUSE 2 458#define SK_FORCE_REUSE 2
459 459
460int sk_set_peek_off(struct sock *sk, int val);
461
460static inline int sk_peek_offset(struct sock *sk, int flags) 462static inline int sk_peek_offset(struct sock *sk, int flags)
461{ 463{
462 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) 464 if (unlikely(flags & MSG_PEEK)) {
463 return sk->sk_peek_off; 465 s32 off = READ_ONCE(sk->sk_peek_off);
464 else 466 if (off >= 0)
465 return 0; 467 return off;
468 }
469
470 return 0;
466} 471}
467 472
468static inline void sk_peek_offset_bwd(struct sock *sk, int val) 473static inline void sk_peek_offset_bwd(struct sock *sk, int val)
469{ 474{
470 if (sk->sk_peek_off >= 0) { 475 s32 off = READ_ONCE(sk->sk_peek_off);
471 if (sk->sk_peek_off >= val) 476
472 sk->sk_peek_off -= val; 477 if (unlikely(off >= 0)) {
473 else 478 off = max_t(s32, off - val, 0);
474 sk->sk_peek_off = 0; 479 WRITE_ONCE(sk->sk_peek_off, off);
475 } 480 }
476} 481}
477 482
478static inline void sk_peek_offset_fwd(struct sock *sk, int val) 483static inline void sk_peek_offset_fwd(struct sock *sk, int val)
479{ 484{
480 if (sk->sk_peek_off >= 0) 485 sk_peek_offset_bwd(sk, -val);
481 sk->sk_peek_off += val;
482} 486}
483 487
484/* 488/*
@@ -1862,6 +1866,7 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
1862 1866
1863void sk_stop_timer(struct sock *sk, struct timer_list *timer); 1867void sk_stop_timer(struct sock *sk, struct timer_list *timer);
1864 1868
1869int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1865int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1870int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1866 1871
1867int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 1872int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/udp.h b/include/net/udp.h
index d870ec1611c4..a0b0da97164c 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -158,6 +158,15 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
158void udp_set_csum(bool nocheck, struct sk_buff *skb, 158void udp_set_csum(bool nocheck, struct sk_buff *skb,
159 __be32 saddr, __be32 daddr, int len); 159 __be32 saddr, __be32 daddr, int len);
160 160
161static inline void udp_csum_pull_header(struct sk_buff *skb)
162{
163 if (skb->ip_summed == CHECKSUM_NONE)
164 skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr),
165 skb->csum);
166 skb_pull_rcsum(skb, sizeof(struct udphdr));
167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
168}
169
161struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 170struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
162 struct udphdr *uh); 171 struct udphdr *uh);
163int udp_gro_complete(struct sk_buff *skb, int nhoff); 172int udp_gro_complete(struct sk_buff *skb, int nhoff);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index fa9dc6450b08..b7de71f8d5d3 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -301,16 +301,19 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
301} 301}
302EXPORT_SYMBOL(skb_free_datagram); 302EXPORT_SYMBOL(skb_free_datagram);
303 303
304void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 304void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
305{ 305{
306 bool slow; 306 bool slow;
307 307
308 if (likely(atomic_read(&skb->users) == 1)) 308 if (likely(atomic_read(&skb->users) == 1))
309 smp_rmb(); 309 smp_rmb();
310 else if (likely(!atomic_dec_and_test(&skb->users))) 310 else if (likely(!atomic_dec_and_test(&skb->users))) {
311 sk_peek_offset_bwd(sk, len);
311 return; 312 return;
313 }
312 314
313 slow = lock_sock_fast(sk); 315 slow = lock_sock_fast(sk);
316 sk_peek_offset_bwd(sk, len);
314 skb_orphan(skb); 317 skb_orphan(skb);
315 sk_mem_reclaim_partial(sk); 318 sk_mem_reclaim_partial(sk);
316 unlock_sock_fast(sk, slow); 319 unlock_sock_fast(sk, slow);
@@ -318,7 +321,7 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
318 /* skb is now orphaned, can be freed outside of locked section */ 321 /* skb is now orphaned, can be freed outside of locked section */
319 __kfree_skb(skb); 322 __kfree_skb(skb);
320} 323}
321EXPORT_SYMBOL(skb_free_datagram_locked); 324EXPORT_SYMBOL(__skb_free_datagram_locked);
322 325
323/** 326/**
324 * skb_kill_datagram - Free a datagram skbuff forcibly 327 * skb_kill_datagram - Free a datagram skbuff forcibly
diff --git a/net/core/sock.c b/net/core/sock.c
index 2f517ea56786..2ce76e82857f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -402,9 +402,8 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
402} 402}
403 403
404 404
405int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 405int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
406{ 406{
407 int err;
408 unsigned long flags; 407 unsigned long flags;
409 struct sk_buff_head *list = &sk->sk_receive_queue; 408 struct sk_buff_head *list = &sk->sk_receive_queue;
410 409
@@ -414,10 +413,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
414 return -ENOMEM; 413 return -ENOMEM;
415 } 414 }
416 415
417 err = sk_filter(sk, skb);
418 if (err)
419 return err;
420
421 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 416 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
422 atomic_inc(&sk->sk_drops); 417 atomic_inc(&sk->sk_drops);
423 return -ENOBUFS; 418 return -ENOBUFS;
@@ -440,6 +435,18 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
440 sk->sk_data_ready(sk); 435 sk->sk_data_ready(sk);
441 return 0; 436 return 0;
442} 437}
438EXPORT_SYMBOL(__sock_queue_rcv_skb);
439
440int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
441{
442 int err;
443
444 err = sk_filter(sk, skb);
445 if (err)
446 return err;
447
448 return __sock_queue_rcv_skb(sk, skb);
449}
443EXPORT_SYMBOL(sock_queue_rcv_skb); 450EXPORT_SYMBOL(sock_queue_rcv_skb);
444 451
445int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 452int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
@@ -2180,6 +2187,15 @@ void __sk_mem_reclaim(struct sock *sk, int amount)
2180} 2187}
2181EXPORT_SYMBOL(__sk_mem_reclaim); 2188EXPORT_SYMBOL(__sk_mem_reclaim);
2182 2189
2190int sk_set_peek_off(struct sock *sk, int val)
2191{
2192 if (val < 0)
2193 return -EINVAL;
2194
2195 sk->sk_peek_off = val;
2196 return 0;
2197}
2198EXPORT_SYMBOL_GPL(sk_set_peek_off);
2183 2199
2184/* 2200/*
2185 * Set of default routines for initialising struct proto_ops when 2201 * Set of default routines for initialising struct proto_ops when
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9e481992dbae..a38b9910af60 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -948,6 +948,7 @@ const struct proto_ops inet_dgram_ops = {
948 .recvmsg = inet_recvmsg, 948 .recvmsg = inet_recvmsg,
949 .mmap = sock_no_mmap, 949 .mmap = sock_no_mmap,
950 .sendpage = inet_sendpage, 950 .sendpage = inet_sendpage,
951 .set_peek_off = sk_set_peek_off,
951#ifdef CONFIG_COMPAT 952#ifdef CONFIG_COMPAT
952 .compat_setsockopt = compat_sock_common_setsockopt, 953 .compat_setsockopt = compat_sock_common_setsockopt,
953 .compat_getsockopt = compat_sock_common_getsockopt, 954 .compat_getsockopt = compat_sock_common_getsockopt,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 355bdb221057..d80312ddbb8a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1294,7 +1294,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
1294 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1294 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
1295 struct sk_buff *skb; 1295 struct sk_buff *skb;
1296 unsigned int ulen, copied; 1296 unsigned int ulen, copied;
1297 int peeked, off = 0; 1297 int peeked, peeking, off;
1298 int err; 1298 int err;
1299 int is_udplite = IS_UDPLITE(sk); 1299 int is_udplite = IS_UDPLITE(sk);
1300 bool checksum_valid = false; 1300 bool checksum_valid = false;
@@ -1304,15 +1304,16 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
1304 return ip_recv_error(sk, msg, len, addr_len); 1304 return ip_recv_error(sk, msg, len, addr_len);
1305 1305
1306try_again: 1306try_again:
1307 peeking = off = sk_peek_offset(sk, flags);
1307 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 1308 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
1308 &peeked, &off, &err); 1309 &peeked, &off, &err);
1309 if (!skb) 1310 if (!skb)
1310 goto out; 1311 return err;
1311 1312
1312 ulen = skb->len - sizeof(struct udphdr); 1313 ulen = skb->len;
1313 copied = len; 1314 copied = len;
1314 if (copied > ulen) 1315 if (copied > ulen - off)
1315 copied = ulen; 1316 copied = ulen - off;
1316 else if (copied < ulen) 1317 else if (copied < ulen)
1317 msg->msg_flags |= MSG_TRUNC; 1318 msg->msg_flags |= MSG_TRUNC;
1318 1319
@@ -1322,18 +1323,16 @@ try_again:
1322 * coverage checksum (UDP-Lite), do it before the copy. 1323 * coverage checksum (UDP-Lite), do it before the copy.
1323 */ 1324 */
1324 1325
1325 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 1326 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) {
1326 checksum_valid = !udp_lib_checksum_complete(skb); 1327 checksum_valid = !udp_lib_checksum_complete(skb);
1327 if (!checksum_valid) 1328 if (!checksum_valid)
1328 goto csum_copy_err; 1329 goto csum_copy_err;
1329 } 1330 }
1330 1331
1331 if (checksum_valid || skb_csum_unnecessary(skb)) 1332 if (checksum_valid || skb_csum_unnecessary(skb))
1332 err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), 1333 err = skb_copy_datagram_msg(skb, off, msg, copied);
1333 msg, copied);
1334 else { 1334 else {
1335 err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), 1335 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
1336 msg);
1337 1336
1338 if (err == -EINVAL) 1337 if (err == -EINVAL)
1339 goto csum_copy_err; 1338 goto csum_copy_err;
@@ -1346,7 +1345,8 @@ try_again:
1346 UDP_INC_STATS_USER(sock_net(sk), 1345 UDP_INC_STATS_USER(sock_net(sk),
1347 UDP_MIB_INERRORS, is_udplite); 1346 UDP_MIB_INERRORS, is_udplite);
1348 } 1347 }
1349 goto out_free; 1348 skb_free_datagram_locked(sk, skb);
1349 return err;
1350 } 1350 }
1351 1351
1352 if (!peeked) 1352 if (!peeked)
@@ -1370,9 +1370,7 @@ try_again:
1370 if (flags & MSG_TRUNC) 1370 if (flags & MSG_TRUNC)
1371 err = ulen; 1371 err = ulen;
1372 1372
1373out_free: 1373 __skb_free_datagram_locked(sk, skb, peeking ? -err : err);
1374 skb_free_datagram_locked(sk, skb);
1375out:
1376 return err; 1374 return err;
1377 1375
1378csum_copy_err: 1376csum_copy_err:
@@ -1500,7 +1498,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1500 sk_incoming_cpu_update(sk); 1498 sk_incoming_cpu_update(sk);
1501 } 1499 }
1502 1500
1503 rc = sock_queue_rcv_skb(sk, skb); 1501 rc = __sock_queue_rcv_skb(sk, skb);
1504 if (rc < 0) { 1502 if (rc < 0) {
1505 int is_udplite = IS_UDPLITE(sk); 1503 int is_udplite = IS_UDPLITE(sk);
1506 1504
@@ -1616,10 +1614,14 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1616 } 1614 }
1617 } 1615 }
1618 1616
1619 if (rcu_access_pointer(sk->sk_filter) && 1617 if (rcu_access_pointer(sk->sk_filter)) {
1620 udp_lib_checksum_complete(skb)) 1618 if (udp_lib_checksum_complete(skb))
1621 goto csum_error; 1619 goto csum_error;
1620 if (sk_filter(sk, skb))
1621 goto drop;
1622 }
1622 1623
1624 udp_csum_pull_header(skb);
1623 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 1625 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
1624 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, 1626 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1625 is_udplite); 1627 is_udplite);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b11c37cfd67c..2b78aad0d52f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -561,6 +561,7 @@ const struct proto_ops inet6_dgram_ops = {
561 .recvmsg = inet_recvmsg, /* ok */ 561 .recvmsg = inet_recvmsg, /* ok */
562 .mmap = sock_no_mmap, 562 .mmap = sock_no_mmap,
563 .sendpage = sock_no_sendpage, 563 .sendpage = sock_no_sendpage,
564 .set_peek_off = sk_set_peek_off,
564#ifdef CONFIG_COMPAT 565#ifdef CONFIG_COMPAT
565 .compat_setsockopt = compat_sock_common_setsockopt, 566 .compat_setsockopt = compat_sock_common_setsockopt,
566 .compat_getsockopt = compat_sock_common_getsockopt, 567 .compat_getsockopt = compat_sock_common_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 78a7dfd12707..87bd7aff88b4 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -357,7 +357,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
357 struct inet_sock *inet = inet_sk(sk); 357 struct inet_sock *inet = inet_sk(sk);
358 struct sk_buff *skb; 358 struct sk_buff *skb;
359 unsigned int ulen, copied; 359 unsigned int ulen, copied;
360 int peeked, off = 0; 360 int peeked, peeking, off;
361 int err; 361 int err;
362 int is_udplite = IS_UDPLITE(sk); 362 int is_udplite = IS_UDPLITE(sk);
363 bool checksum_valid = false; 363 bool checksum_valid = false;
@@ -371,15 +371,16 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
371 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 371 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
372 372
373try_again: 373try_again:
374 peeking = off = sk_peek_offset(sk, flags);
374 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 375 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
375 &peeked, &off, &err); 376 &peeked, &off, &err);
376 if (!skb) 377 if (!skb)
377 goto out; 378 return err;
378 379
379 ulen = skb->len - sizeof(struct udphdr); 380 ulen = skb->len;
380 copied = len; 381 copied = len;
381 if (copied > ulen) 382 if (copied > ulen - off)
382 copied = ulen; 383 copied = ulen - off;
383 else if (copied < ulen) 384 else if (copied < ulen)
384 msg->msg_flags |= MSG_TRUNC; 385 msg->msg_flags |= MSG_TRUNC;
385 386
@@ -391,17 +392,16 @@ try_again:
391 * coverage checksum (UDP-Lite), do it before the copy. 392 * coverage checksum (UDP-Lite), do it before the copy.
392 */ 393 */
393 394
394 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 395 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) {
395 checksum_valid = !udp_lib_checksum_complete(skb); 396 checksum_valid = !udp_lib_checksum_complete(skb);
396 if (!checksum_valid) 397 if (!checksum_valid)
397 goto csum_copy_err; 398 goto csum_copy_err;
398 } 399 }
399 400
400 if (checksum_valid || skb_csum_unnecessary(skb)) 401 if (checksum_valid || skb_csum_unnecessary(skb))
401 err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), 402 err = skb_copy_datagram_msg(skb, off, msg, copied);
402 msg, copied);
403 else { 403 else {
404 err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg); 404 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
405 if (err == -EINVAL) 405 if (err == -EINVAL)
406 goto csum_copy_err; 406 goto csum_copy_err;
407 } 407 }
@@ -418,7 +418,8 @@ try_again:
418 UDP_MIB_INERRORS, 418 UDP_MIB_INERRORS,
419 is_udplite); 419 is_udplite);
420 } 420 }
421 goto out_free; 421 skb_free_datagram_locked(sk, skb);
422 return err;
422 } 423 }
423 if (!peeked) { 424 if (!peeked) {
424 if (is_udp4) 425 if (is_udp4)
@@ -466,9 +467,7 @@ try_again:
466 if (flags & MSG_TRUNC) 467 if (flags & MSG_TRUNC)
467 err = ulen; 468 err = ulen;
468 469
469out_free: 470 __skb_free_datagram_locked(sk, skb, peeking ? -err : err);
470 skb_free_datagram_locked(sk, skb);
471out:
472 return err; 471 return err;
473 472
474csum_copy_err: 473csum_copy_err:
@@ -554,7 +553,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
554 sk_incoming_cpu_update(sk); 553 sk_incoming_cpu_update(sk);
555 } 554 }
556 555
557 rc = sock_queue_rcv_skb(sk, skb); 556 rc = __sock_queue_rcv_skb(sk, skb);
558 if (rc < 0) { 557 if (rc < 0) {
559 int is_udplite = IS_UDPLITE(sk); 558 int is_udplite = IS_UDPLITE(sk);
560 559
@@ -648,8 +647,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
648 if (rcu_access_pointer(sk->sk_filter)) { 647 if (rcu_access_pointer(sk->sk_filter)) {
649 if (udp_lib_checksum_complete(skb)) 648 if (udp_lib_checksum_complete(skb))
650 goto csum_error; 649 goto csum_error;
650 if (sk_filter(sk, skb))
651 goto drop;
651 } 652 }
652 653
654 udp_csum_pull_header(skb);
653 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 655 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
654 UDP6_INC_STATS_BH(sock_net(sk), 656 UDP6_INC_STATS_BH(sock_net(sk),
655 UDP_MIB_RCVBUFERRORS, is_udplite); 657 UDP_MIB_RCVBUFERRORS, is_udplite);