aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-01 02:44:05 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-01 02:44:05 -0400
commitb1faf5666438090a4dc4fceac8502edc7788b7e3 (patch)
treef90808dea27cc38aff6feed1782e2a5666fa6ee9
parentbc284f94f84c3d76e49c6f3df9028c503f9589d9 (diff)
net: sock_queue_err_skb() dont mess with sk_forward_alloc
Correct sk_forward_alloc handling for error_queue would need to use a backlog of frames that softirq handler could not deliver because socket is owned by user thread. Or extend backlog processing to be able to process normal and error packets. Another possibility is to not use mem charge for error queue, this is what I implemented in this patch. Note: this reverts commit 29030374 (net: fix sk_forward_alloc corruptions), since we dont need to lock socket anymore. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h15
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/udp.c6
4 files changed, 33 insertions, 24 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index ca241ea14875..731150d52799 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1524,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1524 1524
1525extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1525extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1526 1526
1527static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1527extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
1528{
1529 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1530 number of warnings when compiling with -W --ANK
1531 */
1532 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1533 (unsigned)sk->sk_rcvbuf)
1534 return -ENOMEM;
1535 skb_set_owner_r(skb, sk);
1536 skb_queue_tail(&sk->sk_error_queue, skb);
1537 if (!sock_flag(sk, SOCK_DEAD))
1538 sk->sk_data_ready(sk, skb->len);
1539 return 0;
1540}
1541 1528
1542/* 1529/*
1543 * Recover an error report and clear atomically 1530 * Recover an error report and clear atomically
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4e7ac09c281a..9f07e749d7b1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2965} 2965}
2966EXPORT_SYMBOL_GPL(skb_cow_data); 2966EXPORT_SYMBOL_GPL(skb_cow_data);
2967 2967
2968static void sock_rmem_free(struct sk_buff *skb)
2969{
2970 struct sock *sk = skb->sk;
2971
2972 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2973}
2974
2975/*
2976 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2977 */
2978int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2979{
2980 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2981 (unsigned)sk->sk_rcvbuf)
2982 return -ENOMEM;
2983
2984 skb_orphan(skb);
2985 skb->sk = sk;
2986 skb->destructor = sock_rmem_free;
2987 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2988
2989 skb_queue_tail(&sk->sk_error_queue, skb);
2990 if (!sock_flag(sk, SOCK_DEAD))
2991 sk->sk_data_ready(sk, skb->len);
2992 return 0;
2993}
2994EXPORT_SYMBOL(sock_queue_err_skb);
2995
2968void skb_tstamp_tx(struct sk_buff *orig_skb, 2996void skb_tstamp_tx(struct sk_buff *orig_skb,
2969 struct skb_shared_hwtstamps *hwtstamps) 2997 struct skb_shared_hwtstamps *hwtstamps)
2970{ 2998{
@@ -2997,9 +3025,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
2997 serr->ee.ee_errno = ENOMSG; 3025 serr->ee.ee_errno = ENOMSG;
2998 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3026 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
2999 3027
3000 bh_lock_sock(sk);
3001 err = sock_queue_err_skb(sk, skb); 3028 err = sock_queue_err_skb(sk, skb);
3002 bh_unlock_sock(sk);
3003 3029
3004 if (err) 3030 if (err)
3005 kfree_skb(skb); 3031 kfree_skb(skb);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 50678f9a2763..eec4ff456e33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,11 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
633 if (!inet->recverr) { 633 if (!inet->recverr) {
634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
635 goto out; 635 goto out;
636 } else { 636 } else
637 bh_lock_sock(sk);
638 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
639 bh_unlock_sock(sk); 638
640 }
641 sk->sk_err = err; 639 sk->sk_err = err;
642 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
643out: 641out:
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3048f906c042..87be58673b55 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -466,11 +466,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
466 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) 466 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
467 goto out; 467 goto out;
468 468
469 if (np->recverr) { 469 if (np->recverr)
470 bh_lock_sock(sk);
471 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 470 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
472 bh_unlock_sock(sk); 471
473 }
474 sk->sk_err = err; 472 sk->sk_err = err;
475 sk->sk_error_report(sk); 473 sk->sk_error_report(sk);
476out: 474out: