diff options
Diffstat (limited to 'net/tipc/socket.c')
| -rw-r--r-- | net/tipc/socket.c | 76 |
1 files changed, 35 insertions, 41 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 2b02a3a80313..29d94d53198d 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * net/tipc/socket.c: TIPC socket API | 2 | * net/tipc/socket.c: TIPC socket API |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2001-2007, Ericsson AB | 4 | * Copyright (c) 2001-2007, Ericsson AB |
| 5 | * Copyright (c) 2004-2008, Wind River Systems | 5 | * Copyright (c) 2004-2008, 2010-2011, Wind River Systems |
| 6 | * All rights reserved. | 6 | * All rights reserved. |
| 7 | * | 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
| @@ -58,6 +58,9 @@ struct tipc_sock { | |||
| 58 | #define tipc_sk(sk) ((struct tipc_sock *)(sk)) | 58 | #define tipc_sk(sk) ((struct tipc_sock *)(sk)) |
| 59 | #define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p)) | 59 | #define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p)) |
| 60 | 60 | ||
| 61 | #define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \ | ||
| 62 | (sock->state == SS_DISCONNECTING)) | ||
| 63 | |||
| 61 | static int backlog_rcv(struct sock *sk, struct sk_buff *skb); | 64 | static int backlog_rcv(struct sock *sk, struct sk_buff *skb); |
| 62 | static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); | 65 | static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); |
| 63 | static void wakeupdispatch(struct tipc_port *tport); | 66 | static void wakeupdispatch(struct tipc_port *tport); |
| @@ -241,7 +244,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, | |||
| 241 | tipc_set_portunreliable(tp_ptr->ref, 1); | 244 | tipc_set_portunreliable(tp_ptr->ref, 1); |
| 242 | } | 245 | } |
| 243 | 246 | ||
| 244 | atomic_inc(&tipc_user_count); | ||
| 245 | return 0; | 247 | return 0; |
| 246 | } | 248 | } |
| 247 | 249 | ||
| @@ -290,7 +292,7 @@ static int release(struct socket *sock) | |||
| 290 | if (buf == NULL) | 292 | if (buf == NULL) |
| 291 | break; | 293 | break; |
| 292 | atomic_dec(&tipc_queue_size); | 294 | atomic_dec(&tipc_queue_size); |
| 293 | if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) | 295 | if (TIPC_SKB_CB(buf)->handle != 0) |
| 294 | buf_discard(buf); | 296 | buf_discard(buf); |
| 295 | else { | 297 | else { |
| 296 | if ((sock->state == SS_CONNECTING) || | 298 | if ((sock->state == SS_CONNECTING) || |
| @@ -321,7 +323,6 @@ static int release(struct socket *sock) | |||
| 321 | sock_put(sk); | 323 | sock_put(sk); |
| 322 | sock->sk = NULL; | 324 | sock->sk = NULL; |
| 323 | 325 | ||
| 324 | atomic_dec(&tipc_user_count); | ||
| 325 | return res; | 326 | return res; |
| 326 | } | 327 | } |
| 327 | 328 | ||
| @@ -495,6 +496,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) | |||
| 495 | if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) | 496 | if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) |
| 496 | return -EACCES; | 497 | return -EACCES; |
| 497 | 498 | ||
| 499 | if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr))) | ||
| 500 | return -EMSGSIZE; | ||
| 498 | if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) | 501 | if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) |
| 499 | return -EFAULT; | 502 | return -EFAULT; |
| 500 | if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) | 503 | if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) |
| @@ -911,15 +914,13 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
| 911 | struct tipc_port *tport = tipc_sk_port(sk); | 914 | struct tipc_port *tport = tipc_sk_port(sk); |
| 912 | struct sk_buff *buf; | 915 | struct sk_buff *buf; |
| 913 | struct tipc_msg *msg; | 916 | struct tipc_msg *msg; |
| 917 | long timeout; | ||
| 914 | unsigned int sz; | 918 | unsigned int sz; |
| 915 | u32 err; | 919 | u32 err; |
| 916 | int res; | 920 | int res; |
| 917 | 921 | ||
| 918 | /* Catch invalid receive requests */ | 922 | /* Catch invalid receive requests */ |
| 919 | 923 | ||
| 920 | if (m->msg_iovlen != 1) | ||
| 921 | return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */ | ||
| 922 | |||
| 923 | if (unlikely(!buf_len)) | 924 | if (unlikely(!buf_len)) |
| 924 | return -EINVAL; | 925 | return -EINVAL; |
| 925 | 926 | ||
| @@ -930,6 +931,7 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
| 930 | goto exit; | 931 | goto exit; |
| 931 | } | 932 | } |
| 932 | 933 | ||
| 934 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | ||
| 933 | restart: | 935 | restart: |
| 934 | 936 | ||
| 935 | /* Look for a message in receive queue; wait if necessary */ | 937 | /* Look for a message in receive queue; wait if necessary */ |
| @@ -939,17 +941,15 @@ restart: | |||
| 939 | res = -ENOTCONN; | 941 | res = -ENOTCONN; |
| 940 | goto exit; | 942 | goto exit; |
| 941 | } | 943 | } |
| 942 | if (flags & MSG_DONTWAIT) { | 944 | if (timeout <= 0L) { |
| 943 | res = -EWOULDBLOCK; | 945 | res = timeout ? timeout : -EWOULDBLOCK; |
| 944 | goto exit; | 946 | goto exit; |
| 945 | } | 947 | } |
| 946 | release_sock(sk); | 948 | release_sock(sk); |
| 947 | res = wait_event_interruptible(*sk_sleep(sk), | 949 | timeout = wait_event_interruptible_timeout(*sk_sleep(sk), |
| 948 | (!skb_queue_empty(&sk->sk_receive_queue) || | 950 | tipc_rx_ready(sock), |
| 949 | (sock->state == SS_DISCONNECTING))); | 951 | timeout); |
| 950 | lock_sock(sk); | 952 | lock_sock(sk); |
| 951 | if (res) | ||
| 952 | goto exit; | ||
| 953 | } | 953 | } |
| 954 | 954 | ||
| 955 | /* Look at first message in receive queue */ | 955 | /* Look at first message in receive queue */ |
| @@ -991,11 +991,10 @@ restart: | |||
| 991 | sz = buf_len; | 991 | sz = buf_len; |
| 992 | m->msg_flags |= MSG_TRUNC; | 992 | m->msg_flags |= MSG_TRUNC; |
| 993 | } | 993 | } |
| 994 | if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg), | 994 | res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), |
| 995 | sz))) { | 995 | m->msg_iov, sz); |
| 996 | res = -EFAULT; | 996 | if (res) |
| 997 | goto exit; | 997 | goto exit; |
| 998 | } | ||
| 999 | res = sz; | 998 | res = sz; |
| 1000 | } else { | 999 | } else { |
| 1001 | if ((sock->state == SS_READY) || | 1000 | if ((sock->state == SS_READY) || |
| @@ -1038,19 +1037,15 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1038 | struct tipc_port *tport = tipc_sk_port(sk); | 1037 | struct tipc_port *tport = tipc_sk_port(sk); |
| 1039 | struct sk_buff *buf; | 1038 | struct sk_buff *buf; |
| 1040 | struct tipc_msg *msg; | 1039 | struct tipc_msg *msg; |
| 1040 | long timeout; | ||
| 1041 | unsigned int sz; | 1041 | unsigned int sz; |
| 1042 | int sz_to_copy, target, needed; | 1042 | int sz_to_copy, target, needed; |
| 1043 | int sz_copied = 0; | 1043 | int sz_copied = 0; |
| 1044 | char __user *crs = m->msg_iov->iov_base; | ||
| 1045 | unsigned char *buf_crs; | ||
| 1046 | u32 err; | 1044 | u32 err; |
| 1047 | int res = 0; | 1045 | int res = 0; |
| 1048 | 1046 | ||
| 1049 | /* Catch invalid receive attempts */ | 1047 | /* Catch invalid receive attempts */ |
| 1050 | 1048 | ||
| 1051 | if (m->msg_iovlen != 1) | ||
| 1052 | return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */ | ||
| 1053 | |||
| 1054 | if (unlikely(!buf_len)) | 1049 | if (unlikely(!buf_len)) |
| 1055 | return -EINVAL; | 1050 | return -EINVAL; |
| 1056 | 1051 | ||
| @@ -1063,7 +1058,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1063 | } | 1058 | } |
| 1064 | 1059 | ||
| 1065 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); | 1060 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); |
| 1066 | 1061 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | |
| 1067 | restart: | 1062 | restart: |
| 1068 | 1063 | ||
| 1069 | /* Look for a message in receive queue; wait if necessary */ | 1064 | /* Look for a message in receive queue; wait if necessary */ |
| @@ -1073,17 +1068,15 @@ restart: | |||
| 1073 | res = -ENOTCONN; | 1068 | res = -ENOTCONN; |
| 1074 | goto exit; | 1069 | goto exit; |
| 1075 | } | 1070 | } |
| 1076 | if (flags & MSG_DONTWAIT) { | 1071 | if (timeout <= 0L) { |
| 1077 | res = -EWOULDBLOCK; | 1072 | res = timeout ? timeout : -EWOULDBLOCK; |
| 1078 | goto exit; | 1073 | goto exit; |
| 1079 | } | 1074 | } |
| 1080 | release_sock(sk); | 1075 | release_sock(sk); |
| 1081 | res = wait_event_interruptible(*sk_sleep(sk), | 1076 | timeout = wait_event_interruptible_timeout(*sk_sleep(sk), |
| 1082 | (!skb_queue_empty(&sk->sk_receive_queue) || | 1077 | tipc_rx_ready(sock), |
| 1083 | (sock->state == SS_DISCONNECTING))); | 1078 | timeout); |
| 1084 | lock_sock(sk); | 1079 | lock_sock(sk); |
| 1085 | if (res) | ||
| 1086 | goto exit; | ||
| 1087 | } | 1080 | } |
| 1088 | 1081 | ||
| 1089 | /* Look at first message in receive queue */ | 1082 | /* Look at first message in receive queue */ |
| @@ -1112,24 +1105,25 @@ restart: | |||
| 1112 | /* Capture message data (if valid) & compute return value (always) */ | 1105 | /* Capture message data (if valid) & compute return value (always) */ |
| 1113 | 1106 | ||
| 1114 | if (!err) { | 1107 | if (!err) { |
| 1115 | buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle); | 1108 | u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); |
| 1116 | sz = (unsigned char *)msg + msg_size(msg) - buf_crs; | ||
| 1117 | 1109 | ||
| 1110 | sz -= offset; | ||
| 1118 | needed = (buf_len - sz_copied); | 1111 | needed = (buf_len - sz_copied); |
| 1119 | sz_to_copy = (sz <= needed) ? sz : needed; | 1112 | sz_to_copy = (sz <= needed) ? sz : needed; |
| 1120 | if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) { | 1113 | |
| 1121 | res = -EFAULT; | 1114 | res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, |
| 1115 | m->msg_iov, sz_to_copy); | ||
| 1116 | if (res) | ||
| 1122 | goto exit; | 1117 | goto exit; |
| 1123 | } | 1118 | |
| 1124 | sz_copied += sz_to_copy; | 1119 | sz_copied += sz_to_copy; |
| 1125 | 1120 | ||
| 1126 | if (sz_to_copy < sz) { | 1121 | if (sz_to_copy < sz) { |
| 1127 | if (!(flags & MSG_PEEK)) | 1122 | if (!(flags & MSG_PEEK)) |
| 1128 | TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy; | 1123 | TIPC_SKB_CB(buf)->handle = |
| 1124 | (void *)(unsigned long)(offset + sz_to_copy); | ||
| 1129 | goto exit; | 1125 | goto exit; |
| 1130 | } | 1126 | } |
| 1131 | |||
| 1132 | crs += sz_to_copy; | ||
| 1133 | } else { | 1127 | } else { |
| 1134 | if (sz_copied != 0) | 1128 | if (sz_copied != 0) |
| 1135 | goto exit; /* can't add error msg to valid data */ | 1129 | goto exit; /* can't add error msg to valid data */ |
| @@ -1256,7 +1250,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
| 1256 | 1250 | ||
| 1257 | /* Enqueue message (finally!) */ | 1251 | /* Enqueue message (finally!) */ |
| 1258 | 1252 | ||
| 1259 | TIPC_SKB_CB(buf)->handle = msg_data(msg); | 1253 | TIPC_SKB_CB(buf)->handle = 0; |
| 1260 | atomic_inc(&tipc_queue_size); | 1254 | atomic_inc(&tipc_queue_size); |
| 1261 | __skb_queue_tail(&sk->sk_receive_queue, buf); | 1255 | __skb_queue_tail(&sk->sk_receive_queue, buf); |
| 1262 | 1256 | ||
| @@ -1608,7 +1602,7 @@ restart: | |||
| 1608 | buf = __skb_dequeue(&sk->sk_receive_queue); | 1602 | buf = __skb_dequeue(&sk->sk_receive_queue); |
| 1609 | if (buf) { | 1603 | if (buf) { |
| 1610 | atomic_dec(&tipc_queue_size); | 1604 | atomic_dec(&tipc_queue_size); |
| 1611 | if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) { | 1605 | if (TIPC_SKB_CB(buf)->handle != 0) { |
| 1612 | buf_discard(buf); | 1606 | buf_discard(buf); |
| 1613 | goto restart; | 1607 | goto restart; |
| 1614 | } | 1608 | } |
