aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2010-03-04 13:01:47 -0500
committerDavid S. Miller <davem@davemloft.net>2010-03-05 16:34:03 -0500
commita3a858ff18a72a8d388e31ab0d98f7e944841a62 (patch)
treee51a59ce280f0aa818ac5e1d989bc8a137b7cd47
parent2499849ee8f513e795b9f2c19a42d6356e4943a4 (diff)
net: backlog functions rename
sk_add_backlog -> __sk_add_backlog sk_add_backlog_limited -> sk_add_backlog Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h6
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/x25/x25_dev.c2
13 files changed, 17 insertions, 17 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 2516d76f043c..170353dd9570 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -592,7 +592,7 @@ static inline int sk_stream_memory_free(struct sock *sk)
592} 592}
593 593
594/* OOB backlog add */ 594/* OOB backlog add */
595static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 595static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
596{ 596{
597 if (!sk->sk_backlog.tail) { 597 if (!sk->sk_backlog.tail) {
598 sk->sk_backlog.head = sk->sk_backlog.tail = skb; 598 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
@@ -604,12 +604,12 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
604} 604}
605 605
606/* The per-socket spinlock must be held here. */ 606/* The per-socket spinlock must be held here. */
607static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb) 607static inline int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
608{ 608{
609 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1)) 609 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
610 return -ENOBUFS; 610 return -ENOBUFS;
611 611
612 sk_add_backlog(sk, skb); 612 __sk_add_backlog(sk, skb);
613 sk->sk_backlog.len += skb->truesize; 613 sk->sk_backlog.len += skb->truesize;
614 return 0; 614 return 0;
615} 615}
diff --git a/net/core/sock.c b/net/core/sock.c
index 6e22dc973d23..61a65a2e0455 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
340 rc = sk_backlog_rcv(sk, skb); 340 rc = sk_backlog_rcv(sk, skb);
341 341
342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
343 } else if (sk_add_backlog_limited(sk, skb)) { 343 } else if (sk_add_backlog(sk, skb)) {
344 bh_unlock_sock(sk); 344 bh_unlock_sock(sk);
345 atomic_inc(&sk->sk_drops); 345 atomic_inc(&sk->sk_drops);
346 goto discard_and_relse; 346 goto discard_and_relse;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index af226a063141..0d508c359fa9 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
254 * in main socket hash table and lock on listening 254 * in main socket hash table and lock on listening
255 * socket does not protect us more. 255 * socket does not protect us more.
256 */ 256 */
257 sk_add_backlog(child, skb); 257 __sk_add_backlog(child, skb);
258 } 258 }
259 259
260 bh_unlock_sock(child); 260 bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4baf1943b1bd..1915f7dc30e6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1682,7 +1682,7 @@ process:
1682 if (!tcp_prequeue(sk, skb)) 1682 if (!tcp_prequeue(sk, skb))
1683 ret = tcp_v4_do_rcv(sk, skb); 1683 ret = tcp_v4_do_rcv(sk, skb);
1684 } 1684 }
1685 } else if (sk_add_backlog_limited(sk, skb)) { 1685 } else if (sk_add_backlog(sk, skb)) {
1686 bh_unlock_sock(sk); 1686 bh_unlock_sock(sk);
1687 goto discard_and_relse; 1687 goto discard_and_relse;
1688 } 1688 }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f206ee5dda80..4199bc6915c5 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
728 * in main socket hash table and lock on listening 728 * in main socket hash table and lock on listening
729 * socket does not protect us more. 729 * socket does not protect us more.
730 */ 730 */
731 sk_add_backlog(child, skb); 731 __sk_add_backlog(child, skb);
732 } 732 }
733 733
734 bh_unlock_sock(child); 734 bh_unlock_sock(child);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e7eb47f338d4..7af756d0f931 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1371,7 +1371,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1371 bh_lock_sock(sk); 1371 bh_lock_sock(sk);
1372 if (!sock_owned_by_user(sk)) 1372 if (!sock_owned_by_user(sk))
1373 rc = __udp_queue_rcv_skb(sk, skb); 1373 rc = __udp_queue_rcv_skb(sk, skb);
1374 else if (sk_add_backlog_limited(sk, skb)) { 1374 else if (sk_add_backlog(sk, skb)) {
1375 bh_unlock_sock(sk); 1375 bh_unlock_sock(sk);
1376 goto drop; 1376 goto drop;
1377 } 1377 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c4ea9d5cbfaa..2c378b1bd5cf 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1740,7 +1740,7 @@ process:
1740 if (!tcp_prequeue(sk, skb)) 1740 if (!tcp_prequeue(sk, skb))
1741 ret = tcp_v6_do_rcv(sk, skb); 1741 ret = tcp_v6_do_rcv(sk, skb);
1742 } 1742 }
1743 } else if (sk_add_backlog_limited(sk, skb)) { 1743 } else if (sk_add_backlog(sk, skb)) {
1744 bh_unlock_sock(sk); 1744 bh_unlock_sock(sk);
1745 goto discard_and_relse; 1745 goto discard_and_relse;
1746 } 1746 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 64804912b093..3c0c9c755c92 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -583,7 +583,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
583 bh_lock_sock(sk); 583 bh_lock_sock(sk);
584 if (!sock_owned_by_user(sk)) 584 if (!sock_owned_by_user(sk))
585 udpv6_queue_rcv_skb(sk, skb1); 585 udpv6_queue_rcv_skb(sk, skb1);
586 else if (sk_add_backlog_limited(sk, skb1)) { 586 else if (sk_add_backlog(sk, skb1)) {
587 kfree_skb(skb1); 587 kfree_skb(skb1);
588 bh_unlock_sock(sk); 588 bh_unlock_sock(sk);
589 goto drop; 589 goto drop;
@@ -758,7 +758,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
758 bh_lock_sock(sk); 758 bh_lock_sock(sk);
759 if (!sock_owned_by_user(sk)) 759 if (!sock_owned_by_user(sk))
760 udpv6_queue_rcv_skb(sk, skb); 760 udpv6_queue_rcv_skb(sk, skb);
761 else if (sk_add_backlog_limited(sk, skb)) { 761 else if (sk_add_backlog(sk, skb)) {
762 atomic_inc(&sk->sk_drops); 762 atomic_inc(&sk->sk_drops);
763 bh_unlock_sock(sk); 763 bh_unlock_sock(sk);
764 sock_put(sk); 764 sock_put(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 019c780512e8..86d6985b9d49 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
1437 llc_conn_state_process(sk, skb); 1437 llc_conn_state_process(sk, skb);
1438 else { 1438 else {
1439 llc_set_backlog_type(skb, LLC_EVENT); 1439 llc_set_backlog_type(skb, LLC_EVENT);
1440 sk_add_backlog(sk, skb); 1440 __sk_add_backlog(sk, skb);
1441 } 1441 }
1442 } 1442 }
1443} 1443}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c0539ffdb272..a12144da7974 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -827,7 +827,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
827 else { 827 else {
828 dprintk("%s: adding to backlog...\n", __func__); 828 dprintk("%s: adding to backlog...\n", __func__);
829 llc_set_backlog_type(skb, LLC_PACKET); 829 llc_set_backlog_type(skb, LLC_PACKET);
830 if (sk_add_backlog_limited(sk, skb)) 830 if (sk_add_backlog(sk, skb))
831 goto drop_unlock; 831 goto drop_unlock;
832 } 832 }
833out: 833out:
diff --git a/net/sctp/input.c b/net/sctp/input.c
index cbc063665e6b..3d74b264ea22 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -341,7 +341,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
341 sctp_bh_lock_sock(sk); 341 sctp_bh_lock_sock(sk);
342 342
343 if (sock_owned_by_user(sk)) { 343 if (sock_owned_by_user(sk)) {
344 if (sk_add_backlog_limited(sk, skb)) 344 if (sk_add_backlog(sk, skb))
345 sctp_chunk_free(chunk); 345 sctp_chunk_free(chunk);
346 else 346 else
347 backloged = 1; 347 backloged = 1;
@@ -375,7 +375,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
375 struct sctp_ep_common *rcvr = chunk->rcvr; 375 struct sctp_ep_common *rcvr = chunk->rcvr;
376 int ret; 376 int ret;
377 377
378 ret = sk_add_backlog_limited(sk, skb); 378 ret = sk_add_backlog(sk, skb);
379 if (!ret) { 379 if (!ret) {
380 /* Hold the assoc/ep while hanging on the backlog queue. 380 /* Hold the assoc/ep while hanging on the backlog queue.
381 * This way, we know structures we need will not disappear 381 * This way, we know structures we need will not disappear
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 22bfbc33a8ac..4b235fc1c70f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1322,7 +1322,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1322 if (!sock_owned_by_user(sk)) { 1322 if (!sock_owned_by_user(sk)) {
1323 res = filter_rcv(sk, buf); 1323 res = filter_rcv(sk, buf);
1324 } else { 1324 } else {
1325 if (sk_add_backlog_limited(sk, buf)) 1325 if (sk_add_backlog(sk, buf))
1326 res = TIPC_ERR_OVERLOAD; 1326 res = TIPC_ERR_OVERLOAD;
1327 else 1327 else
1328 res = TIPC_OK; 1328 res = TIPC_OK;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index a9da0dc26f4f..52e304212241 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
53 if (!sock_owned_by_user(sk)) { 53 if (!sock_owned_by_user(sk)) {
54 queued = x25_process_rx_frame(sk, skb); 54 queued = x25_process_rx_frame(sk, skb);
55 } else { 55 } else {
56 queued = !sk_add_backlog_limited(sk, skb); 56 queued = !sk_add_backlog(sk, skb);
57 } 57 }
58 bh_unlock_sock(sk); 58 bh_unlock_sock(sk);
59 sock_put(sk); 59 sock_put(sk);