diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-04-28 17:35:48 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-28 17:35:48 -0400 |
commit | 4b0b72f7dd617b13abd1b04c947e15873e011a24 (patch) | |
tree | 16fc7bc990fa47cccb62bdb34cb23bd3c26b7a50 /net/ipv4 | |
parent | cfc1fbb079b265bf69d4ceba590a2e2c1a1cde33 (diff) |
net: speedup udp receive path
Since commit 95766fff ([UDP]: Add memory accounting.),
each received packet needs one extra sock_lock()/sock_release() pair.
This added latency because of possible backlog handling. Then later,
ticket spinlocks added yet another latency source in case of DDOS.
This patch introduces lock_sock_bh() and unlock_sock_bh()
synchronization primitives, avoiding one atomic operation and backlog
processing.
skb_free_datagram_locked() uses them instead of full blown
lock_sock()/release_sock(). skb is orphaned inside locked section for
proper socket memory reclaim, and finally freed outside of it.
UDP receive path now take the socket spinlock only once.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/udp.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 63eb56b2d873..1f86965ba7d7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1062,10 +1062,10 @@ static unsigned int first_packet_length(struct sock *sk) | |||
1062 | spin_unlock_bh(&rcvq->lock); | 1062 | spin_unlock_bh(&rcvq->lock); |
1063 | 1063 | ||
1064 | if (!skb_queue_empty(&list_kill)) { | 1064 | if (!skb_queue_empty(&list_kill)) { |
1065 | lock_sock(sk); | 1065 | lock_sock_bh(sk); |
1066 | __skb_queue_purge(&list_kill); | 1066 | __skb_queue_purge(&list_kill); |
1067 | sk_mem_reclaim_partial(sk); | 1067 | sk_mem_reclaim_partial(sk); |
1068 | release_sock(sk); | 1068 | unlock_sock_bh(sk); |
1069 | } | 1069 | } |
1070 | return res; | 1070 | return res; |
1071 | } | 1071 | } |
@@ -1196,10 +1196,10 @@ out: | |||
1196 | return err; | 1196 | return err; |
1197 | 1197 | ||
1198 | csum_copy_err: | 1198 | csum_copy_err: |
1199 | lock_sock(sk); | 1199 | lock_sock_bh(sk); |
1200 | if (!skb_kill_datagram(sk, skb, flags)) | 1200 | if (!skb_kill_datagram(sk, skb, flags)) |
1201 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1201 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1202 | release_sock(sk); | 1202 | unlock_sock_bh(sk); |
1203 | 1203 | ||
1204 | if (noblock) | 1204 | if (noblock) |
1205 | return -EAGAIN; | 1205 | return -EAGAIN; |
@@ -1624,9 +1624,9 @@ int udp_rcv(struct sk_buff *skb) | |||
1624 | 1624 | ||
1625 | void udp_destroy_sock(struct sock *sk) | 1625 | void udp_destroy_sock(struct sock *sk) |
1626 | { | 1626 | { |
1627 | lock_sock(sk); | 1627 | lock_sock_bh(sk); |
1628 | udp_flush_pending_frames(sk); | 1628 | udp_flush_pending_frames(sk); |
1629 | release_sock(sk); | 1629 | unlock_sock_bh(sk); |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | /* | 1632 | /* |