aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-10-12 06:00:31 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-12 06:00:31 -0400
commitd5e63bded6e819ca77ee1a1d97c783a31f6caf30 (patch)
treebc8d38eb84b48476748e80e19cbfed102fc41953 /net
parent91b2a3f9bb0fa8d64b365a10b0624b0341e1a338 (diff)
Revert "af_packet: add interframe drop cmsg (v6)"
This reverts commit 977750076d98c7ff6cbda51858bb5a5894a9d9ab. Neil is reimplementing this generically, outside of AF_PACKET. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/packet/af_packet.c33
1 files changed, 0 insertions, 33 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 70073a0dea5d..f87ed4803c11 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -525,31 +525,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
525} 525}
526 526
527/* 527/*
528 * If we've lost frames since the last time we queued one to the
529 * sk_receive_queue, we need to record it here.
530 * This must be called under the protection of the socket lock
531 * to prevent racing with other softirqs and user space
532 */
533static inline void record_packet_gap(struct sk_buff *skb,
534 struct packet_sock *po)
535{
536 /*
537 * We overload the mark field here, since we're about
538 * to enqueue to a receive queue and no body else will
539 * use this field at this point
540 */
541 skb->mark = po->stats.tp_gap;
542 po->stats.tp_gap = 0;
543 return;
544
545}
546
547static inline __u32 check_packet_gap(struct sk_buff *skb)
548{
549 return skb->mark;
550}
551
552/*
553 This function makes lazy skb cloning in hope that most of packets 528 This function makes lazy skb cloning in hope that most of packets
554 are discarded by BPF. 529 are discarded by BPF.
555 530
@@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
652 627
653 spin_lock(&sk->sk_receive_queue.lock); 628 spin_lock(&sk->sk_receive_queue.lock);
654 po->stats.tp_packets++; 629 po->stats.tp_packets++;
655 record_packet_gap(skb, po);
656 __skb_queue_tail(&sk->sk_receive_queue, skb); 630 __skb_queue_tail(&sk->sk_receive_queue, skb);
657 spin_unlock(&sk->sk_receive_queue.lock); 631 spin_unlock(&sk->sk_receive_queue.lock);
658 sk->sk_data_ready(sk, skb->len); 632 sk->sk_data_ready(sk, skb->len);
@@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
661drop_n_acct: 635drop_n_acct:
662 spin_lock(&sk->sk_receive_queue.lock); 636 spin_lock(&sk->sk_receive_queue.lock);
663 po->stats.tp_drops++; 637 po->stats.tp_drops++;
664 po->stats.tp_gap++;
665 spin_unlock(&sk->sk_receive_queue.lock); 638 spin_unlock(&sk->sk_receive_queue.lock);
666 639
667drop_n_restore: 640drop_n_restore:
@@ -839,7 +812,6 @@ drop:
839 812
840ring_is_full: 813ring_is_full:
841 po->stats.tp_drops++; 814 po->stats.tp_drops++;
842 po->stats.tp_gap++;
843 spin_unlock(&sk->sk_receive_queue.lock); 815 spin_unlock(&sk->sk_receive_queue.lock);
844 816
845 sk->sk_data_ready(sk, 0); 817 sk->sk_data_ready(sk, 0);
@@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1449 struct sk_buff *skb; 1421 struct sk_buff *skb;
1450 int copied, err; 1422 int copied, err;
1451 struct sockaddr_ll *sll; 1423 struct sockaddr_ll *sll;
1452 __u32 gap;
1453 1424
1454 err = -EINVAL; 1425 err = -EINVAL;
1455 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1426 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1528 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1499 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1529 } 1500 }
1530 1501
1531 gap = check_packet_gap(skb);
1532 if (gap)
1533 put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);
1534
1535 /* 1502 /*
1536 * Free or return the buffer as appropriate. Again this 1503 * Free or return the buffer as appropriate. Again this
1537 * hides all the races and re-entrancy issues from us. 1504 * hides all the races and re-entrancy issues from us.