aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorSjur Braendeland <sjur.brandeland@stericsson.com>2010-05-20 22:16:12 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-24 02:57:43 -0400
commitdcda138d2f27e32bd0d6250cc42839b0d70bb4b8 (patch)
tree679ff3b9ef79f7fbb5821bff2d050fd0638d2366 /net
parenta9a8f1070d8733b37418b3a2d58df4e771b61f88 (diff)
caif: Bugfix - use MSG_TRUNC in receive
Fixed handling when skb don't fit in user buffer, instead of returning -EMSGSIZE, the buffer is truncated (just as unix seqpakcet does). Signed-off-by: Sjur Braendeland <sjur.brandeland@stericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_socket.c47
1 files changed, 18 insertions, 29 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 691a5710974..3d0e09584fa 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -292,53 +292,42 @@ static void caif_check_flow_release(struct sock *sk)
292 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 292 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
293 } 293 }
294} 294}
295
295/* 296/*
296 * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer 297 * Copied from unix_dgram_recvmsg, but removed credit checks,
297 * has sufficient size. 298 * changed locking, address handling and added MSG_TRUNC.
298 */ 299 */
299
300static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 300static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
301 struct msghdr *m, size_t buf_len, int flags) 301 struct msghdr *m, size_t len, int flags)
302 302
303{ 303{
304 struct sock *sk = sock->sk; 304 struct sock *sk = sock->sk;
305 struct sk_buff *skb; 305 struct sk_buff *skb;
306 int ret = 0; 306 int ret;
307 int len; 307 int copylen;
308 308
309 if (unlikely(!buf_len)) 309 ret = -EOPNOTSUPP;
310 return -EINVAL; 310 if (m->msg_flags&MSG_OOB)
311 goto read_error;
311 312
312 skb = skb_recv_datagram(sk, flags, 0 , &ret); 313 skb = skb_recv_datagram(sk, flags, 0 , &ret);
313 if (!skb) 314 if (!skb)
314 goto read_error; 315 goto read_error;
315 316 copylen = skb->len;
316 len = skb->len; 317 if (len < copylen) {
317 318 m->msg_flags |= MSG_TRUNC;
318 if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { 319 copylen = len;
319 len = buf_len;
320 /*
321 * Push skb back on receive queue if buffer too small.
322 * This has a built-in race where multi-threaded receive
323 * may get packet in wrong order, but multiple read does
324 * not really guarantee ordered delivery anyway.
325 * Let's optimize for speed without taking locks.
326 */
327
328 skb_queue_head(&sk->sk_receive_queue, skb);
329 ret = -EMSGSIZE;
330 goto read_error;
331 } 320 }
332 321
333 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); 322 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
334 if (ret) 323 if (ret)
335 goto read_error; 324 goto out_free;
336 325
326 ret = (flags & MSG_TRUNC) ? skb->len : copylen;
327out_free:
337 skb_free_datagram(sk, skb); 328 skb_free_datagram(sk, skb);
338
339 caif_check_flow_release(sk); 329 caif_check_flow_release(sk);
340 330 return ret;
341 return len;
342 331
343read_error: 332read_error:
344 return ret; 333 return ret;