aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2010-03-04 13:01:47 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-04-01 19:02:04 -0400
commita12a9a26ff389892df27e2ff4cbf03e5b2ed0d64 (patch)
tree365926473d8d9879f3b146e23668afee89b9b47d /net
parent51c5db4ddd330e96c9221392b1cf361ce1daa88c (diff)
net: backlog functions rename
[ Upstream commit a3a858ff18a72a8d388e31ab0d98f7e944841a62 ] sk_add_backlog -> __sk_add_backlog sk_add_backlog_limited -> sk_add_backlog Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/x25/x25_dev.c2
12 files changed, 14 insertions, 14 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 385d26210818..5779f315919f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
340 rc = sk_backlog_rcv(sk, skb); 340 rc = sk_backlog_rcv(sk, skb);
341 341
342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
343 } else if (sk_add_backlog_limited(sk, skb)) { 343 } else if (sk_add_backlog(sk, skb)) {
344 bh_unlock_sock(sk); 344 bh_unlock_sock(sk);
345 atomic_inc(&sk->sk_drops); 345 atomic_inc(&sk->sk_drops);
346 goto discard_and_relse; 346 goto discard_and_relse;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index af226a063141..0d508c359fa9 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
254 * in main socket hash table and lock on listening 254 * in main socket hash table and lock on listening
255 * socket does not protect us more. 255 * socket does not protect us more.
256 */ 256 */
257 sk_add_backlog(child, skb); 257 __sk_add_backlog(child, skb);
258 } 258 }
259 259
260 bh_unlock_sock(child); 260 bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index febfe1586e19..de935e391443 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1677,7 +1677,7 @@ process:
1677 if (!tcp_prequeue(sk, skb)) 1677 if (!tcp_prequeue(sk, skb))
1678 ret = tcp_v4_do_rcv(sk, skb); 1678 ret = tcp_v4_do_rcv(sk, skb);
1679 } 1679 }
1680 } else if (sk_add_backlog_limited(sk, skb)) { 1680 } else if (sk_add_backlog(sk, skb)) {
1681 bh_unlock_sock(sk); 1681 bh_unlock_sock(sk);
1682 goto discard_and_relse; 1682 goto discard_and_relse;
1683 } 1683 }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f206ee5dda80..4199bc6915c5 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
728 * in main socket hash table and lock on listening 728 * in main socket hash table and lock on listening
729 * socket does not protect us more. 729 * socket does not protect us more.
730 */ 730 */
731 sk_add_backlog(child, skb); 731 __sk_add_backlog(child, skb);
732 } 732 }
733 733
734 bh_unlock_sock(child); 734 bh_unlock_sock(child);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7bb45686f3c5..112c61135f92 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1372,7 +1372,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1372 bh_lock_sock(sk); 1372 bh_lock_sock(sk);
1373 if (!sock_owned_by_user(sk)) 1373 if (!sock_owned_by_user(sk))
1374 rc = __udp_queue_rcv_skb(sk, skb); 1374 rc = __udp_queue_rcv_skb(sk, skb);
1375 else if (sk_add_backlog_limited(sk, skb)) { 1375 else if (sk_add_backlog(sk, skb)) {
1376 bh_unlock_sock(sk); 1376 bh_unlock_sock(sk);
1377 goto drop; 1377 goto drop;
1378 } 1378 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d89f405528c2..548a06e66b4e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1732,7 +1732,7 @@ process:
1732 if (!tcp_prequeue(sk, skb)) 1732 if (!tcp_prequeue(sk, skb))
1733 ret = tcp_v6_do_rcv(sk, skb); 1733 ret = tcp_v6_do_rcv(sk, skb);
1734 } 1734 }
1735 } else if (sk_add_backlog_limited(sk, skb)) { 1735 } else if (sk_add_backlog(sk, skb)) {
1736 bh_unlock_sock(sk); 1736 bh_unlock_sock(sk);
1737 goto discard_and_relse; 1737 goto discard_and_relse;
1738 } 1738 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bf88ce073d26..d9714d20705d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -584,7 +584,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
584 bh_lock_sock(sk); 584 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 585 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 586 udpv6_queue_rcv_skb(sk, skb1);
587 else if (sk_add_backlog_limited(sk, skb1)) { 587 else if (sk_add_backlog(sk, skb1)) {
588 kfree_skb(skb1); 588 kfree_skb(skb1);
589 bh_unlock_sock(sk); 589 bh_unlock_sock(sk);
590 goto drop; 590 goto drop;
@@ -760,7 +760,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
760 bh_lock_sock(sk); 760 bh_lock_sock(sk);
761 if (!sock_owned_by_user(sk)) 761 if (!sock_owned_by_user(sk))
762 udpv6_queue_rcv_skb(sk, skb); 762 udpv6_queue_rcv_skb(sk, skb);
763 else if (sk_add_backlog_limited(sk, skb)) { 763 else if (sk_add_backlog(sk, skb)) {
764 atomic_inc(&sk->sk_drops); 764 atomic_inc(&sk->sk_drops);
765 bh_unlock_sock(sk); 765 bh_unlock_sock(sk);
766 sock_put(sk); 766 sock_put(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 019c780512e8..86d6985b9d49 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
1437 llc_conn_state_process(sk, skb); 1437 llc_conn_state_process(sk, skb);
1438 else { 1438 else {
1439 llc_set_backlog_type(skb, LLC_EVENT); 1439 llc_set_backlog_type(skb, LLC_EVENT);
1440 sk_add_backlog(sk, skb); 1440 __sk_add_backlog(sk, skb);
1441 } 1441 }
1442 } 1442 }
1443} 1443}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8f97546726b3..c61ca885794d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -756,7 +756,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
756 else { 756 else {
757 dprintk("%s: adding to backlog...\n", __func__); 757 dprintk("%s: adding to backlog...\n", __func__);
758 llc_set_backlog_type(skb, LLC_PACKET); 758 llc_set_backlog_type(skb, LLC_PACKET);
759 if (sk_add_backlog_limited(sk, skb)) 759 if (sk_add_backlog(sk, skb))
760 goto drop_unlock; 760 goto drop_unlock;
761 } 761 }
762out: 762out:
diff --git a/net/sctp/input.c b/net/sctp/input.c
index cbc063665e6b..3d74b264ea22 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -341,7 +341,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
341 sctp_bh_lock_sock(sk); 341 sctp_bh_lock_sock(sk);
342 342
343 if (sock_owned_by_user(sk)) { 343 if (sock_owned_by_user(sk)) {
344 if (sk_add_backlog_limited(sk, skb)) 344 if (sk_add_backlog(sk, skb))
345 sctp_chunk_free(chunk); 345 sctp_chunk_free(chunk);
346 else 346 else
347 backloged = 1; 347 backloged = 1;
@@ -375,7 +375,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
375 struct sctp_ep_common *rcvr = chunk->rcvr; 375 struct sctp_ep_common *rcvr = chunk->rcvr;
376 int ret; 376 int ret;
377 377
378 ret = sk_add_backlog_limited(sk, skb); 378 ret = sk_add_backlog(sk, skb);
379 if (!ret) { 379 if (!ret) {
380 /* Hold the assoc/ep while hanging on the backlog queue. 380 /* Hold the assoc/ep while hanging on the backlog queue.
381 * This way, we know structures we need will not disappear 381 * This way, we know structures we need will not disappear
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 22bfbc33a8ac..4b235fc1c70f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1322,7 +1322,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1322 if (!sock_owned_by_user(sk)) { 1322 if (!sock_owned_by_user(sk)) {
1323 res = filter_rcv(sk, buf); 1323 res = filter_rcv(sk, buf);
1324 } else { 1324 } else {
1325 if (sk_add_backlog_limited(sk, buf)) 1325 if (sk_add_backlog(sk, buf))
1326 res = TIPC_ERR_OVERLOAD; 1326 res = TIPC_ERR_OVERLOAD;
1327 else 1327 else
1328 res = TIPC_OK; 1328 res = TIPC_OK;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index a9da0dc26f4f..52e304212241 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
53 if (!sock_owned_by_user(sk)) { 53 if (!sock_owned_by_user(sk)) {
54 queued = x25_process_rx_frame(sk, skb); 54 queued = x25_process_rx_frame(sk, skb);
55 } else { 55 } else {
56 queued = !sk_add_backlog_limited(sk, skb); 56 queued = !sk_add_backlog(sk, skb);
57 } 57 }
58 bh_unlock_sock(sk); 58 bh_unlock_sock(sk);
59 sock_put(sk); 59 sock_put(sk);