aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorChris Leech <christopher.leech@intel.com>2006-05-23 21:05:53 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:25:56 -0400
commit1a2449a87bb7606113b1aa1a9d3c3e78ef189a1c (patch)
tree86e833a8694f201de138697646e7e8469e9c8ef6 /net/ipv4/tcp.c
parent9593782585e0cf70babe787a8463d492a68b1744 (diff)
[I/OAT]: TCP recv offload to I/OAT
Locks down user pages and sets up for DMA in tcp_recvmsg, then calls dma_async_try_early_copy in tcp_v4_do_rcv Signed-off-by: Chris Leech <christopher.leech@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c103
1 files changed, 91 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4e067d25a63c..ff6ccda9ff46 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -263,7 +263,7 @@
263#include <net/tcp.h> 263#include <net/tcp.h>
264#include <net/xfrm.h> 264#include <net/xfrm.h>
265#include <net/ip.h> 265#include <net/ip.h>
266 266#include <net/netdma.h>
267 267
268#include <asm/uaccess.h> 268#include <asm/uaccess.h>
269#include <asm/ioctls.h> 269#include <asm/ioctls.h>
@@ -1110,6 +1110,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1110 int target; /* Read at least this many bytes */ 1110 int target; /* Read at least this many bytes */
1111 long timeo; 1111 long timeo;
1112 struct task_struct *user_recv = NULL; 1112 struct task_struct *user_recv = NULL;
1113 int copied_early = 0;
1113 1114
1114 lock_sock(sk); 1115 lock_sock(sk);
1115 1116
@@ -1133,6 +1134,17 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1133 1134
1134 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1135 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1135 1136
1137#ifdef CONFIG_NET_DMA
1138 tp->ucopy.dma_chan = NULL;
1139 preempt_disable();
1140 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1141 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) {
1142 preempt_enable_no_resched();
1143 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1144 } else
1145 preempt_enable_no_resched();
1146#endif
1147
1136 do { 1148 do {
1137 struct sk_buff *skb; 1149 struct sk_buff *skb;
1138 u32 offset; 1150 u32 offset;
@@ -1274,6 +1286,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1274 } else 1286 } else
1275 sk_wait_data(sk, &timeo); 1287 sk_wait_data(sk, &timeo);
1276 1288
1289#ifdef CONFIG_NET_DMA
1290 tp->ucopy.wakeup = 0;
1291#endif
1292
1277 if (user_recv) { 1293 if (user_recv) {
1278 int chunk; 1294 int chunk;
1279 1295
@@ -1329,13 +1345,39 @@ do_prequeue:
1329 } 1345 }
1330 1346
1331 if (!(flags & MSG_TRUNC)) { 1347 if (!(flags & MSG_TRUNC)) {
1332 err = skb_copy_datagram_iovec(skb, offset, 1348#ifdef CONFIG_NET_DMA
1333 msg->msg_iov, used); 1349 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1334 if (err) { 1350 tp->ucopy.dma_chan = get_softnet_dma();
1335 /* Exception. Bailout! */ 1351
1336 if (!copied) 1352 if (tp->ucopy.dma_chan) {
1337 copied = -EFAULT; 1353 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1338 break; 1354 tp->ucopy.dma_chan, skb, offset,
1355 msg->msg_iov, used,
1356 tp->ucopy.pinned_list);
1357
1358 if (tp->ucopy.dma_cookie < 0) {
1359
1360 printk(KERN_ALERT "dma_cookie < 0\n");
1361
1362 /* Exception. Bailout! */
1363 if (!copied)
1364 copied = -EFAULT;
1365 break;
1366 }
1367 if ((offset + used) == skb->len)
1368 copied_early = 1;
1369
1370 } else
1371#endif
1372 {
1373 err = skb_copy_datagram_iovec(skb, offset,
1374 msg->msg_iov, used);
1375 if (err) {
1376 /* Exception. Bailout! */
1377 if (!copied)
1378 copied = -EFAULT;
1379 break;
1380 }
1339 } 1381 }
1340 } 1382 }
1341 1383
@@ -1355,15 +1397,19 @@ skip_copy:
1355 1397
1356 if (skb->h.th->fin) 1398 if (skb->h.th->fin)
1357 goto found_fin_ok; 1399 goto found_fin_ok;
1358 if (!(flags & MSG_PEEK)) 1400 if (!(flags & MSG_PEEK)) {
1359 sk_eat_skb(sk, skb, 0); 1401 sk_eat_skb(sk, skb, copied_early);
1402 copied_early = 0;
1403 }
1360 continue; 1404 continue;
1361 1405
1362 found_fin_ok: 1406 found_fin_ok:
1363 /* Process the FIN. */ 1407 /* Process the FIN. */
1364 ++*seq; 1408 ++*seq;
1365 if (!(flags & MSG_PEEK)) 1409 if (!(flags & MSG_PEEK)) {
1366 sk_eat_skb(sk, skb, 0); 1410 sk_eat_skb(sk, skb, copied_early);
1411 copied_early = 0;
1412 }
1367 break; 1413 break;
1368 } while (len > 0); 1414 } while (len > 0);
1369 1415
@@ -1386,6 +1432,36 @@ skip_copy:
1386 tp->ucopy.len = 0; 1432 tp->ucopy.len = 0;
1387 } 1433 }
1388 1434
1435#ifdef CONFIG_NET_DMA
1436 if (tp->ucopy.dma_chan) {
1437 struct sk_buff *skb;
1438 dma_cookie_t done, used;
1439
1440 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1441
1442 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1443 tp->ucopy.dma_cookie, &done,
1444 &used) == DMA_IN_PROGRESS) {
1445 /* do partial cleanup of sk_async_wait_queue */
1446 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1447 (dma_async_is_complete(skb->dma_cookie, done,
1448 used) == DMA_SUCCESS)) {
1449 __skb_dequeue(&sk->sk_async_wait_queue);
1450 kfree_skb(skb);
1451 }
1452 }
1453
1454 /* Safe to free early-copied skbs now */
1455 __skb_queue_purge(&sk->sk_async_wait_queue);
1456 dma_chan_put(tp->ucopy.dma_chan);
1457 tp->ucopy.dma_chan = NULL;
1458 }
1459 if (tp->ucopy.pinned_list) {
1460 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1461 tp->ucopy.pinned_list = NULL;
1462 }
1463#endif
1464
1389 /* According to UNIX98, msg_name/msg_namelen are ignored 1465 /* According to UNIX98, msg_name/msg_namelen are ignored
1390 * on connected socket. I was just happy when found this 8) --ANK 1466 * on connected socket. I was just happy when found this 8) --ANK
1391 */ 1467 */
@@ -1658,6 +1734,9 @@ int tcp_disconnect(struct sock *sk, int flags)
1658 __skb_queue_purge(&sk->sk_receive_queue); 1734 __skb_queue_purge(&sk->sk_receive_queue);
1659 sk_stream_writequeue_purge(sk); 1735 sk_stream_writequeue_purge(sk);
1660 __skb_queue_purge(&tp->out_of_order_queue); 1736 __skb_queue_purge(&tp->out_of_order_queue);
1737#ifdef CONFIG_NET_DMA
1738 __skb_queue_purge(&sk->sk_async_wait_queue);
1739#endif
1661 1740
1662 inet->dport = 0; 1741 inet->dport = 0;
1663 1742