aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-08-22 23:30:12 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-23 00:08:50 -0400
commit884cf705c7e60bc6ade7ddafcbe943af4dc84604 (patch)
tree6eae5bde7592b9e7adf32eeff183571cb60086f8 /net/rxrpc
parentd2de875c6d4cbec8a99c880160181a3ed5b9992e (diff)
net: remove dead code after sk_data_ready change
As a followup to commit 676d23690fb ("net: Fix use after free by removing length arg from sk_data_ready callbacks"), we can remove some useless code in sock_queue_rcv_skb() and rxrpc_queue_rcv_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/ar-input.c9
1 files changed, 1 insertions, 8 deletions
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 63b21e580de9..481f89f93789 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -45,7 +45,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
45 struct rxrpc_skb_priv *sp; 45 struct rxrpc_skb_priv *sp;
46 struct rxrpc_sock *rx = call->socket; 46 struct rxrpc_sock *rx = call->socket;
47 struct sock *sk; 47 struct sock *sk;
48 int skb_len, ret; 48 int ret;
49 49
50 _enter(",,%d,%d", force, terminal); 50 _enter(",,%d,%d", force, terminal);
51 51
@@ -101,13 +101,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
101 rx->interceptor(sk, call->user_call_ID, skb); 101 rx->interceptor(sk, call->user_call_ID, skb);
102 spin_unlock_bh(&sk->sk_receive_queue.lock); 102 spin_unlock_bh(&sk->sk_receive_queue.lock);
103 } else { 103 } else {
104
105 /* Cache the SKB length before we tack it onto the
106 * receive queue. Once it is added it no longer
107 * belongs to us and may be freed by other threads of
108 * control pulling packets from the queue */
109 skb_len = skb->len;
110
111 _net("post skb %p", skb); 104 _net("post skb %p", skb);
112 __skb_queue_tail(&sk->sk_receive_queue, skb); 105 __skb_queue_tail(&sk->sk_receive_queue, skb);
113 spin_unlock_bh(&sk->sk_receive_queue.lock); 106 spin_unlock_bh(&sk->sk_receive_queue.lock);