aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 44a1ab03a3f0..ca44917872d2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3720,9 +3720,6 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3720 3720
3721 SCTP_DBG_OBJCNT_INC(sock); 3721 SCTP_DBG_OBJCNT_INC(sock);
3722 3722
3723 /* Set socket backlog limit. */
3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3725
3726 local_bh_disable(); 3723 local_bh_disable();
3727 percpu_counter_inc(&sctp_sockets_allocated); 3724 percpu_counter_inc(&sctp_sockets_allocated);
3728 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3725 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -4387,7 +4384,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4387 transports) { 4384 transports) {
4388 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4385 memcpy(&temp, &from->ipaddr, sizeof(temp));
4389 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4386 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4390 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; 4387 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4391 if (space_left < addrlen) 4388 if (space_left < addrlen)
4392 return -ENOMEM; 4389 return -ENOMEM;
4393 if (copy_to_user(to, &temp, addrlen)) 4390 if (copy_to_user(to, &temp, addrlen))
@@ -5436,6 +5433,8 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5436 rover++; 5433 rover++;
5437 if ((rover < low) || (rover > high)) 5434 if ((rover < low) || (rover > high))
5438 rover = low; 5435 rover = low;
5436 if (inet_is_reserved_local_port(rover))
5437 continue;
5439 index = sctp_phashfn(rover); 5438 index = sctp_phashfn(rover);
5440 head = &sctp_port_hashtable[index]; 5439 head = &sctp_port_hashtable[index];
5441 sctp_spin_lock(&head->lock); 5440 sctp_spin_lock(&head->lock);
@@ -5482,7 +5481,6 @@ pp_found:
5482 */ 5481 */
5483 int reuse = sk->sk_reuse; 5482 int reuse = sk->sk_reuse;
5484 struct sock *sk2; 5483 struct sock *sk2;
5485 struct hlist_node *node;
5486 5484
5487 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); 5485 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
5488 if (pp->fastreuse && sk->sk_reuse && 5486 if (pp->fastreuse && sk->sk_reuse &&
@@ -5703,7 +5701,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
5703 struct sctp_sock *sp = sctp_sk(sk); 5701 struct sctp_sock *sp = sctp_sk(sk);
5704 unsigned int mask; 5702 unsigned int mask;
5705 5703
5706 poll_wait(file, sk->sk_sleep, wait); 5704 poll_wait(file, sk_sleep(sk), wait);
5707 5705
5708 /* A TCP-style listening socket becomes readable when the accept queue 5706 /* A TCP-style listening socket becomes readable when the accept queue
5709 * is not empty. 5707 * is not empty.
@@ -5944,7 +5942,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
5944 int error; 5942 int error;
5945 DEFINE_WAIT(wait); 5943 DEFINE_WAIT(wait);
5946 5944
5947 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 5945 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
5948 5946
5949 /* Socket errors? */ 5947 /* Socket errors? */
5950 error = sock_error(sk); 5948 error = sock_error(sk);
@@ -5981,14 +5979,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
5981 sctp_lock_sock(sk); 5979 sctp_lock_sock(sk);
5982 5980
5983ready: 5981ready:
5984 finish_wait(sk->sk_sleep, &wait); 5982 finish_wait(sk_sleep(sk), &wait);
5985 return 0; 5983 return 0;
5986 5984
5987interrupted: 5985interrupted:
5988 error = sock_intr_errno(*timeo_p); 5986 error = sock_intr_errno(*timeo_p);
5989 5987
5990out: 5988out:
5991 finish_wait(sk->sk_sleep, &wait); 5989 finish_wait(sk_sleep(sk), &wait);
5992 *err = error; 5990 *err = error;
5993 return error; 5991 return error;
5994} 5992}
@@ -6062,14 +6060,14 @@ static void __sctp_write_space(struct sctp_association *asoc)
6062 wake_up_interruptible(&asoc->wait); 6060 wake_up_interruptible(&asoc->wait);
6063 6061
6064 if (sctp_writeable(sk)) { 6062 if (sctp_writeable(sk)) {
6065 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 6063 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
6066 wake_up_interruptible(sk->sk_sleep); 6064 wake_up_interruptible(sk_sleep(sk));
6067 6065
6068 /* Note that we try to include the Async I/O support 6066 /* Note that we try to include the Async I/O support
6069 * here by modeling from the current TCP/UDP code. 6067 * here by modeling from the current TCP/UDP code.
6070 * We have not tested with it yet. 6068 * We have not tested with it yet.
6071 */ 6069 */
6072 if (sock->fasync_list && 6070 if (sock->wq->fasync_list &&
6073 !(sk->sk_shutdown & SEND_SHUTDOWN)) 6071 !(sk->sk_shutdown & SEND_SHUTDOWN))
6074 sock_wake_async(sock, 6072 sock_wake_async(sock,
6075 SOCK_WAKE_SPACE, POLL_OUT); 6073 SOCK_WAKE_SPACE, POLL_OUT);
@@ -6191,12 +6189,15 @@ do_nonblock:
6191 6189
6192void sctp_data_ready(struct sock *sk, int len) 6190void sctp_data_ready(struct sock *sk, int len)
6193{ 6191{
6194 read_lock_bh(&sk->sk_callback_lock); 6192 struct socket_wq *wq;
6195 if (sk_has_sleeper(sk)) 6193
6196 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | 6194 rcu_read_lock();
6195 wq = rcu_dereference(sk->sk_wq);
6196 if (wq_has_sleeper(wq))
6197 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
6197 POLLRDNORM | POLLRDBAND); 6198 POLLRDNORM | POLLRDBAND);
6198 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6199 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6199 read_unlock_bh(&sk->sk_callback_lock); 6200 rcu_read_unlock();
6200} 6201}
6201 6202
6202/* If socket sndbuf has changed, wake up all per association waiters. */ 6203/* If socket sndbuf has changed, wake up all per association waiters. */
@@ -6307,7 +6308,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
6307 6308
6308 6309
6309 for (;;) { 6310 for (;;) {
6310 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 6311 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
6311 TASK_INTERRUPTIBLE); 6312 TASK_INTERRUPTIBLE);
6312 6313
6313 if (list_empty(&ep->asocs)) { 6314 if (list_empty(&ep->asocs)) {
@@ -6333,7 +6334,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
6333 break; 6334 break;
6334 } 6335 }
6335 6336
6336 finish_wait(sk->sk_sleep, &wait); 6337 finish_wait(sk_sleep(sk), &wait);
6337 6338
6338 return err; 6339 return err;
6339} 6340}
@@ -6343,7 +6344,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
6343 DEFINE_WAIT(wait); 6344 DEFINE_WAIT(wait);
6344 6345
6345 do { 6346 do {
6346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 6347 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6347 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6348 if (list_empty(&sctp_sk(sk)->ep->asocs))
6348 break; 6349 break;
6349 sctp_release_sock(sk); 6350 sctp_release_sock(sk);
@@ -6351,7 +6352,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
6351 sctp_lock_sock(sk); 6352 sctp_lock_sock(sk);
6352 } while (!signal_pending(current) && timeout); 6353 } while (!signal_pending(current) && timeout);
6353 6354
6354 finish_wait(sk->sk_sleep, &wait); 6355 finish_wait(sk_sleep(sk), &wait);
6355} 6356}
6356 6357
6357static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6358static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)