aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/proto.c59
2 files changed, 59 insertions, 1 deletions
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 02af05ec23a2..ef29cef1dafe 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -34,6 +34,7 @@ static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED); 34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
35 dccp_fin(sk, skb); 35 dccp_fin(sk, skb);
36 dccp_set_state(sk, DCCP_CLOSED); 36 dccp_set_state(sk, DCCP_CLOSED);
37 sk_wake_async(sk, 1, POLL_HUP);
37} 38}
38 39
39static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) 40static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 2b6db18e607f..600dda51d995 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -140,6 +140,62 @@ int dccp_disconnect(struct sock *sk, int flags)
140 return err; 140 return err;
141} 141}
142 142
143/*
144 * Wait for a DCCP event.
145 *
146 * Note that we don't need to lock the socket, as the upper poll layers
147 * take care of normal races (between the test and the event) and we don't
148 * go look at any of the socket buffers directly.
149 */
150static unsigned int dccp_poll(struct file *file, struct socket *sock,
151 poll_table *wait)
152{
153 unsigned int mask;
154 struct sock *sk = sock->sk;
155
156 poll_wait(file, sk->sk_sleep, wait);
157 if (sk->sk_state == DCCP_LISTEN)
158 return inet_csk_listen_poll(sk);
159
160 /* Socket is not locked. We are protected from async events
161 by poll logic and correct handling of state changes
162 made by another threads is impossible in any case.
163 */
164
165 mask = 0;
166 if (sk->sk_err)
167 mask = POLLERR;
168
169 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
170 mask |= POLLHUP;
171 if (sk->sk_shutdown & RCV_SHUTDOWN)
172 mask |= POLLIN | POLLRDNORM;
173
174 /* Connected? */
175 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
176 if (atomic_read(&sk->sk_rmem_alloc) > 0)
177 mask |= POLLIN | POLLRDNORM;
178
179 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
180 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
181 mask |= POLLOUT | POLLWRNORM;
182 } else { /* send SIGIO later */
183 set_bit(SOCK_ASYNC_NOSPACE,
184 &sk->sk_socket->flags);
185 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
186
187 /* Race breaker. If space is freed after
188 * wspace test but before the flags are set,
189 * IO signal will be lost.
190 */
191 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
192 mask |= POLLOUT | POLLWRNORM;
193 }
194 }
195 }
196 return mask;
197}
198
143int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) 199int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
144{ 200{
145 dccp_pr_debug("entry\n"); 201 dccp_pr_debug("entry\n");
@@ -478,7 +534,8 @@ static struct proto_ops inet_dccp_ops = {
478 .socketpair = sock_no_socketpair, 534 .socketpair = sock_no_socketpair,
479 .accept = inet_accept, 535 .accept = inet_accept,
480 .getname = inet_getname, 536 .getname = inet_getname,
481 .poll = sock_no_poll, 537 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
538 .poll = dccp_poll,
482 .ioctl = inet_ioctl, 539 .ioctl = inet_ioctl,
483 /* FIXME: work on inet_listen to rename it to sock_common_listen */ 540 /* FIXME: work on inet_listen to rename it to sock_common_listen */
484 .listen = inet_dccp_listen, 541 .listen = inet_dccp_listen,