diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-04-20 09:03:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-20 19:37:13 -0400 |
commit | aa395145165cb06a0d0885221bbe0ce4a564391d (patch) | |
tree | 118b0403621f10db8dc3dbf12079f9af5b19e05d /net/core/sock.c | |
parent | ab9304717f7624c41927f442e6b6d418b2d8b3e4 (diff) |
net: sk_sleep() helper
Define a new function to return the waitqueue of a "struct sock".
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
return sk->sk_sleep;
}
Change all read occurrences of sk_sleep by a call to this function.
Needed for a future RCU conversion. sk_sleep wont be a field directly
available.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 7effa1e689df..58ebd146ce5a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1395,7 +1395,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1395 | if (signal_pending(current)) | 1395 | if (signal_pending(current)) |
1396 | break; | 1396 | break; |
1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1398 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1398 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | 1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) |
1400 | break; | 1400 | break; |
1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
@@ -1404,7 +1404,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1404 | break; | 1404 | break; |
1405 | timeo = schedule_timeout(timeo); | 1405 | timeo = schedule_timeout(timeo); |
1406 | } | 1406 | } |
1407 | finish_wait(sk->sk_sleep, &wait); | 1407 | finish_wait(sk_sleep(sk), &wait); |
1408 | return timeo; | 1408 | return timeo; |
1409 | } | 1409 | } |
1410 | 1410 | ||
@@ -1570,11 +1570,11 @@ int sk_wait_data(struct sock *sk, long *timeo) | |||
1570 | int rc; | 1570 | int rc; |
1571 | DEFINE_WAIT(wait); | 1571 | DEFINE_WAIT(wait); |
1572 | 1572 | ||
1573 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1573 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); | 1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); |
1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1577 | finish_wait(sk->sk_sleep, &wait); | 1577 | finish_wait(sk_sleep(sk), &wait); |
1578 | return rc; | 1578 | return rc; |
1579 | } | 1579 | } |
1580 | EXPORT_SYMBOL(sk_wait_data); | 1580 | EXPORT_SYMBOL(sk_wait_data); |
@@ -1798,7 +1798,7 @@ static void sock_def_wakeup(struct sock *sk) | |||
1798 | { | 1798 | { |
1799 | read_lock(&sk->sk_callback_lock); | 1799 | read_lock(&sk->sk_callback_lock); |
1800 | if (sk_has_sleeper(sk)) | 1800 | if (sk_has_sleeper(sk)) |
1801 | wake_up_interruptible_all(sk->sk_sleep); | 1801 | wake_up_interruptible_all(sk_sleep(sk)); |
1802 | read_unlock(&sk->sk_callback_lock); | 1802 | read_unlock(&sk->sk_callback_lock); |
1803 | } | 1803 | } |
1804 | 1804 | ||
@@ -1806,7 +1806,7 @@ static void sock_def_error_report(struct sock *sk) | |||
1806 | { | 1806 | { |
1807 | read_lock(&sk->sk_callback_lock); | 1807 | read_lock(&sk->sk_callback_lock); |
1808 | if (sk_has_sleeper(sk)) | 1808 | if (sk_has_sleeper(sk)) |
1809 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); | 1809 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR); |
1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); | 1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1811 | read_unlock(&sk->sk_callback_lock); | 1811 | read_unlock(&sk->sk_callback_lock); |
1812 | } | 1812 | } |
@@ -1815,7 +1815,7 @@ static void sock_def_readable(struct sock *sk, int len) | |||
1815 | { | 1815 | { |
1816 | read_lock(&sk->sk_callback_lock); | 1816 | read_lock(&sk->sk_callback_lock); |
1817 | if (sk_has_sleeper(sk)) | 1817 | if (sk_has_sleeper(sk)) |
1818 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | | 1818 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | |
1819 | POLLRDNORM | POLLRDBAND); | 1819 | POLLRDNORM | POLLRDBAND); |
1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1821 | read_unlock(&sk->sk_callback_lock); | 1821 | read_unlock(&sk->sk_callback_lock); |
@@ -1830,7 +1830,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1830 | */ | 1830 | */ |
1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1832 | if (sk_has_sleeper(sk)) | 1832 | if (sk_has_sleeper(sk)) |
1833 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | | 1833 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT | |
1834 | POLLWRNORM | POLLWRBAND); | 1834 | POLLWRNORM | POLLWRBAND); |
1835 | 1835 | ||
1836 | /* Should agree with poll, otherwise some programs break */ | 1836 | /* Should agree with poll, otherwise some programs break */ |