diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-04-20 09:03:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-20 19:37:13 -0400 |
commit | aa395145165cb06a0d0885221bbe0ce4a564391d (patch) | |
tree | 118b0403621f10db8dc3dbf12079f9af5b19e05d /net/core | |
parent | ab9304717f7624c41927f442e6b6d418b2d8b3e4 (diff) |
net: sk_sleep() helper
Define a new function to return the waitqueue of a "struct sock".
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
return sk->sk_sleep;
}
Change all read occurrences of sk_sleep by a call to this function.
Needed for a future RCU conversion. sk_sleep wont be a field directly
available.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 6 | ||||
-rw-r--r-- | net/core/sock.c | 16 | ||||
-rw-r--r-- | net/core/stream.c | 16 |
3 files changed, 19 insertions, 19 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index 2dccd4ee591b..5574a5ddf908 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |||
86 | int error; | 86 | int error; |
87 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); | 87 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
88 | 88 | ||
89 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 89 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
90 | 90 | ||
91 | /* Socket errors? */ | 91 | /* Socket errors? */ |
92 | error = sock_error(sk); | 92 | error = sock_error(sk); |
@@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |||
115 | error = 0; | 115 | error = 0; |
116 | *timeo_p = schedule_timeout(*timeo_p); | 116 | *timeo_p = schedule_timeout(*timeo_p); |
117 | out: | 117 | out: |
118 | finish_wait(sk->sk_sleep, &wait); | 118 | finish_wait(sk_sleep(sk), &wait); |
119 | return error; | 119 | return error; |
120 | interrupted: | 120 | interrupted: |
121 | error = sock_intr_errno(*timeo_p); | 121 | error = sock_intr_errno(*timeo_p); |
@@ -726,7 +726,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, | |||
726 | struct sock *sk = sock->sk; | 726 | struct sock *sk = sock->sk; |
727 | unsigned int mask; | 727 | unsigned int mask; |
728 | 728 | ||
729 | sock_poll_wait(file, sk->sk_sleep, wait); | 729 | sock_poll_wait(file, sk_sleep(sk), wait); |
730 | mask = 0; | 730 | mask = 0; |
731 | 731 | ||
732 | /* exceptional events? */ | 732 | /* exceptional events? */ |
diff --git a/net/core/sock.c b/net/core/sock.c index 7effa1e689df..58ebd146ce5a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1395,7 +1395,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1395 | if (signal_pending(current)) | 1395 | if (signal_pending(current)) |
1396 | break; | 1396 | break; |
1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1398 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1398 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | 1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) |
1400 | break; | 1400 | break; |
1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
@@ -1404,7 +1404,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1404 | break; | 1404 | break; |
1405 | timeo = schedule_timeout(timeo); | 1405 | timeo = schedule_timeout(timeo); |
1406 | } | 1406 | } |
1407 | finish_wait(sk->sk_sleep, &wait); | 1407 | finish_wait(sk_sleep(sk), &wait); |
1408 | return timeo; | 1408 | return timeo; |
1409 | } | 1409 | } |
1410 | 1410 | ||
@@ -1570,11 +1570,11 @@ int sk_wait_data(struct sock *sk, long *timeo) | |||
1570 | int rc; | 1570 | int rc; |
1571 | DEFINE_WAIT(wait); | 1571 | DEFINE_WAIT(wait); |
1572 | 1572 | ||
1573 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1573 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); | 1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); |
1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1577 | finish_wait(sk->sk_sleep, &wait); | 1577 | finish_wait(sk_sleep(sk), &wait); |
1578 | return rc; | 1578 | return rc; |
1579 | } | 1579 | } |
1580 | EXPORT_SYMBOL(sk_wait_data); | 1580 | EXPORT_SYMBOL(sk_wait_data); |
@@ -1798,7 +1798,7 @@ static void sock_def_wakeup(struct sock *sk) | |||
1798 | { | 1798 | { |
1799 | read_lock(&sk->sk_callback_lock); | 1799 | read_lock(&sk->sk_callback_lock); |
1800 | if (sk_has_sleeper(sk)) | 1800 | if (sk_has_sleeper(sk)) |
1801 | wake_up_interruptible_all(sk->sk_sleep); | 1801 | wake_up_interruptible_all(sk_sleep(sk)); |
1802 | read_unlock(&sk->sk_callback_lock); | 1802 | read_unlock(&sk->sk_callback_lock); |
1803 | } | 1803 | } |
1804 | 1804 | ||
@@ -1806,7 +1806,7 @@ static void sock_def_error_report(struct sock *sk) | |||
1806 | { | 1806 | { |
1807 | read_lock(&sk->sk_callback_lock); | 1807 | read_lock(&sk->sk_callback_lock); |
1808 | if (sk_has_sleeper(sk)) | 1808 | if (sk_has_sleeper(sk)) |
1809 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); | 1809 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR); |
1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); | 1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1811 | read_unlock(&sk->sk_callback_lock); | 1811 | read_unlock(&sk->sk_callback_lock); |
1812 | } | 1812 | } |
@@ -1815,7 +1815,7 @@ static void sock_def_readable(struct sock *sk, int len) | |||
1815 | { | 1815 | { |
1816 | read_lock(&sk->sk_callback_lock); | 1816 | read_lock(&sk->sk_callback_lock); |
1817 | if (sk_has_sleeper(sk)) | 1817 | if (sk_has_sleeper(sk)) |
1818 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | | 1818 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | |
1819 | POLLRDNORM | POLLRDBAND); | 1819 | POLLRDNORM | POLLRDBAND); |
1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1821 | read_unlock(&sk->sk_callback_lock); | 1821 | read_unlock(&sk->sk_callback_lock); |
@@ -1830,7 +1830,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1830 | */ | 1830 | */ |
1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1832 | if (sk_has_sleeper(sk)) | 1832 | if (sk_has_sleeper(sk)) |
1833 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | | 1833 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT | |
1834 | POLLWRNORM | POLLWRBAND); | 1834 | POLLWRNORM | POLLWRBAND); |
1835 | 1835 | ||
1836 | /* Should agree with poll, otherwise some programs break */ | 1836 | /* Should agree with poll, otherwise some programs break */ |
diff --git a/net/core/stream.c b/net/core/stream.c index a37debfeb1b2..7b3c3f30b107 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -32,8 +32,8 @@ void sk_stream_write_space(struct sock *sk) | |||
32 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { | 32 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { |
33 | clear_bit(SOCK_NOSPACE, &sock->flags); | 33 | clear_bit(SOCK_NOSPACE, &sock->flags); |
34 | 34 | ||
35 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 35 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
36 | wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | | 36 | wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | |
37 | POLLWRNORM | POLLWRBAND); | 37 | POLLWRNORM | POLLWRBAND); |
38 | if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) | 38 | if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) |
39 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); | 39 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); |
@@ -66,13 +66,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) | |||
66 | if (signal_pending(tsk)) | 66 | if (signal_pending(tsk)) |
67 | return sock_intr_errno(*timeo_p); | 67 | return sock_intr_errno(*timeo_p); |
68 | 68 | ||
69 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 69 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
70 | sk->sk_write_pending++; | 70 | sk->sk_write_pending++; |
71 | done = sk_wait_event(sk, timeo_p, | 71 | done = sk_wait_event(sk, timeo_p, |
72 | !sk->sk_err && | 72 | !sk->sk_err && |
73 | !((1 << sk->sk_state) & | 73 | !((1 << sk->sk_state) & |
74 | ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); | 74 | ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); |
75 | finish_wait(sk->sk_sleep, &wait); | 75 | finish_wait(sk_sleep(sk), &wait); |
76 | sk->sk_write_pending--; | 76 | sk->sk_write_pending--; |
77 | } while (!done); | 77 | } while (!done); |
78 | return 0; | 78 | return 0; |
@@ -96,13 +96,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout) | |||
96 | DEFINE_WAIT(wait); | 96 | DEFINE_WAIT(wait); |
97 | 97 | ||
98 | do { | 98 | do { |
99 | prepare_to_wait(sk->sk_sleep, &wait, | 99 | prepare_to_wait(sk_sleep(sk), &wait, |
100 | TASK_INTERRUPTIBLE); | 100 | TASK_INTERRUPTIBLE); |
101 | if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) | 101 | if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) |
102 | break; | 102 | break; |
103 | } while (!signal_pending(current) && timeout); | 103 | } while (!signal_pending(current) && timeout); |
104 | 104 | ||
105 | finish_wait(sk->sk_sleep, &wait); | 105 | finish_wait(sk_sleep(sk), &wait); |
106 | } | 106 | } |
107 | } | 107 | } |
108 | 108 | ||
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
126 | while (1) { | 126 | while (1) { |
127 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 127 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
128 | 128 | ||
129 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 129 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
130 | 130 | ||
131 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 131 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
132 | goto do_error; | 132 | goto do_error; |
@@ -157,7 +157,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
157 | *timeo_p = current_timeo; | 157 | *timeo_p = current_timeo; |
158 | } | 158 | } |
159 | out: | 159 | out: |
160 | finish_wait(sk->sk_sleep, &wait); | 160 | finish_wait(sk_sleep(sk), &wait); |
161 | return err; | 161 | return err; |
162 | 162 | ||
163 | do_error: | 163 | do_error: |