aboutsummaryrefslogtreecommitdiffstats
path: root/net/packet/af_packet.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/packet/af_packet.c')
-rw-r--r--net/packet/af_packet.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d7ecca0a0c07..d398a9bf6903 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -524,6 +524,31 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
524} 524}
525 525
526/* 526/*
527 * If we've lost frames since the last time we queued one to the
528 * sk_receive_queue, we need to record it here.
529 * This must be called under the protection of the socket lock
530 * to prevent racing with other softirqs and user space
531 */
532static inline void record_packet_gap(struct sk_buff *skb,
533 struct packet_sock *po)
534{
535 /*
536 * We overload the mark field here, since we're about
537 * to enqueue to a receive queue and no body else will
538 * use this field at this point
539 */
540 skb->mark = po->stats.tp_gap;
541 po->stats.tp_gap = 0;
542 return;
543
544}
545
546static inline __u32 check_packet_gap(struct sk_buff *skb)
547{
548 return skb->mark;
549}
550
551/*
527 This function makes lazy skb cloning in hope that most of packets 552 This function makes lazy skb cloning in hope that most of packets
528 are discarded by BPF. 553 are discarded by BPF.
529 554
@@ -626,6 +651,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
626 651
627 spin_lock(&sk->sk_receive_queue.lock); 652 spin_lock(&sk->sk_receive_queue.lock);
628 po->stats.tp_packets++; 653 po->stats.tp_packets++;
654 record_packet_gap(skb, po);
629 __skb_queue_tail(&sk->sk_receive_queue, skb); 655 __skb_queue_tail(&sk->sk_receive_queue, skb);
630 spin_unlock(&sk->sk_receive_queue.lock); 656 spin_unlock(&sk->sk_receive_queue.lock);
631 sk->sk_data_ready(sk, skb->len); 657 sk->sk_data_ready(sk, skb->len);
@@ -634,6 +660,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
634drop_n_acct: 660drop_n_acct:
635 spin_lock(&sk->sk_receive_queue.lock); 661 spin_lock(&sk->sk_receive_queue.lock);
636 po->stats.tp_drops++; 662 po->stats.tp_drops++;
663 po->stats.tp_gap++;
637 spin_unlock(&sk->sk_receive_queue.lock); 664 spin_unlock(&sk->sk_receive_queue.lock);
638 665
639drop_n_restore: 666drop_n_restore:
@@ -811,6 +838,7 @@ drop:
811 838
812ring_is_full: 839ring_is_full:
813 po->stats.tp_drops++; 840 po->stats.tp_drops++;
841 po->stats.tp_gap++;
814 spin_unlock(&sk->sk_receive_queue.lock); 842 spin_unlock(&sk->sk_receive_queue.lock);
815 843
816 sk->sk_data_ready(sk, 0); 844 sk->sk_data_ready(sk, 0);
@@ -1418,6 +1446,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1418 struct sk_buff *skb; 1446 struct sk_buff *skb;
1419 int copied, err; 1447 int copied, err;
1420 struct sockaddr_ll *sll; 1448 struct sockaddr_ll *sll;
1449 __u32 gap;
1421 1450
1422 err = -EINVAL; 1451 err = -EINVAL;
1423 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1452 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1496,6 +1525,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1496 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1525 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1497 } 1526 }
1498 1527
1528 gap = check_packet_gap(skb);
1529 if (gap)
1530 put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);
1531
1499 /* 1532 /*
1500 * Free or return the buffer as appropriate. Again this 1533 * Free or return the buffer as appropriate. Again this
1501 * hides all the races and re-entrancy issues from us. 1534 * hides all the races and re-entrancy issues from us.