aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-10-07 17:18:42 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-07 17:18:42 -0400
commitc57943a1c96214ee68f3890bb6772841ffbfd606 (patch)
treebfe79b29240d442c8ea104a89c2e827032b2824e
parentb339a47c370ec669f789c5989f54eec1d78574bb (diff)
net: wrap sk->sk_backlog_rcv()
Wrap calling sk->sk_backlog_rcv() in a function. This will allow extending the generic sk_backlog_rcv behaviour. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/tcp.h2
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_timer.c2
5 files changed, 10 insertions, 5 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 18f96708f3a6..ada50c04d09f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -482,6 +482,11 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
482 skb->next = NULL; 482 skb->next = NULL;
483} 483}
484 484
485static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
486{
487 return sk->sk_backlog_rcv(sk, skb);
488}
489
485#define sk_wait_event(__sk, __timeo, __condition) \ 490#define sk_wait_event(__sk, __timeo, __condition) \
486 ({ int __rc; \ 491 ({ int __rc; \
487 release_sock(__sk); \ 492 release_sock(__sk); \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f6cc34143154..438014d57610 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -896,7 +896,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
896 BUG_ON(sock_owned_by_user(sk)); 896 BUG_ON(sock_owned_by_user(sk));
897 897
898 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { 898 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
899 sk->sk_backlog_rcv(sk, skb1); 899 sk_backlog_rcv(sk, skb1);
900 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED); 900 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
901 } 901 }
902 902
diff --git a/net/core/sock.c b/net/core/sock.c
index 2d358dd8a03e..5e2a3132a8c9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -327,7 +327,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
327 */ 327 */
328 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 328 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
329 329
330 rc = sk->sk_backlog_rcv(sk, skb); 330 rc = sk_backlog_rcv(sk, skb);
331 331
332 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 332 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
333 } else 333 } else
@@ -1374,7 +1374,7 @@ static void __release_sock(struct sock *sk)
1374 struct sk_buff *next = skb->next; 1374 struct sk_buff *next = skb->next;
1375 1375
1376 skb->next = NULL; 1376 skb->next = NULL;
1377 sk->sk_backlog_rcv(sk, skb); 1377 sk_backlog_rcv(sk, skb);
1378 1378
1379 /* 1379 /*
1380 * We are in process context here with softirqs 1380 * We are in process context here with softirqs
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7d81a1ee5507..7d3fe571d15f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1161,7 +1161,7 @@ static void tcp_prequeue_process(struct sock *sk)
1161 * necessary */ 1161 * necessary */
1162 local_bh_disable(); 1162 local_bh_disable();
1163 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 1163 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1164 sk->sk_backlog_rcv(sk, skb); 1164 sk_backlog_rcv(sk, skb);
1165 local_bh_enable(); 1165 local_bh_enable();
1166 1166
1167 /* Clear memory counter. */ 1167 /* Clear memory counter. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5ab6ba19c3ce..6b6dff1164b9 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -201,7 +201,7 @@ static void tcp_delack_timer(unsigned long data)
201 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); 201 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
202 202
203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
204 sk->sk_backlog_rcv(sk, skb); 204 sk_backlog_rcv(sk, skb);
205 205
206 tp->ucopy.memory = 0; 206 tp->ucopy.memory = 0;
207 } 207 }