aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
authorMat Martineau <mathewm@codeaurora.org>2011-07-22 17:53:59 -0400
committerGustavo F. Padovan <gustavo@padovan.org>2011-09-27 17:16:07 -0400
commit5b668eb3270f3f9c13ddf6e4fb57bf20c83dccff (patch)
treee50403979ce54f9e7c4aead7075c4fc266ee396a /net/bluetooth
parent449357200c5d73d80a9c42dee5dafed684b3cd17 (diff)
Bluetooth: Handle fragmented skbs in bt_sock_stream_recvmsg()
ERTM reassembly will be more efficient when skbs are linked together rather than copying every incoming data byte. The existing stream recv function assumes skbs are linear, so it needs to know how to handle fragments before reassembly is changed. bt_sock_recvmsg() already handles fragmented skbs. Signed-off-by: Mat Martineau <mathewm@codeaurora.org> Signed-off-by: Gustavo F. Padovan <padovan@profusion.mobi>
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/af_bluetooth.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 117e0d161780..062124cd89cf 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -349,7 +349,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
349 } 349 }
350 350
351 chunk = min_t(unsigned int, skb->len, size); 351 chunk = min_t(unsigned int, skb->len, size);
352 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 352 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
353 skb_queue_head(&sk->sk_receive_queue, skb); 353 skb_queue_head(&sk->sk_receive_queue, skb);
354 if (!copied) 354 if (!copied)
355 copied = -EFAULT; 355 copied = -EFAULT;
@@ -361,7 +361,33 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
361 sock_recv_ts_and_drops(msg, sk, skb); 361 sock_recv_ts_and_drops(msg, sk, skb);
362 362
363 if (!(flags & MSG_PEEK)) { 363 if (!(flags & MSG_PEEK)) {
364 skb_pull(skb, chunk); 364 int skb_len = skb_headlen(skb);
365
366 if (chunk <= skb_len) {
367 __skb_pull(skb, chunk);
368 } else {
369 struct sk_buff *frag;
370
371 __skb_pull(skb, skb_len);
372 chunk -= skb_len;
373
374 skb_walk_frags(skb, frag) {
375 if (chunk <= frag->len) {
376 /* Pulling partial data */
377 skb->len -= chunk;
378 skb->data_len -= chunk;
379 __skb_pull(frag, chunk);
380 break;
381 } else if (frag->len) {
382 /* Pulling all frag data */
383 chunk -= frag->len;
384 skb->len -= frag->len;
385 skb->data_len -= frag->len;
386 __skb_pull(frag, frag->len);
387 }
388 }
389 }
390
365 if (skb->len) { 391 if (skb->len) {
366 skb_queue_head(&sk->sk_receive_queue, skb); 392 skb_queue_head(&sk->sk_receive_queue, skb);
367 break; 393 break;