aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorChris Leech <christopher.leech@intel.com>2007-03-08 12:57:36 -0500
committerDan Williams <dan.j.williams@intel.com>2007-07-11 19:10:53 -0400
commit2b1244a43be97f504494b557a7f7a65fe0d00dba (patch)
treed3be36597917dbbae664fc6eb97dbe876c5e44e3 /net/ipv4/tcp.c
parent72d0b7a81d60f5e64ee7197bc190b9b3265f99dd (diff)
I/OAT: Only offload copies for TCP when there will be a context switch
The performance wins come with having the DMA copy engine doing the copies in parallel with the context switch. If there is enough data ready on the socket at recv time just use a regular copy. Signed-off-by: Chris Leech <christopher.leech@intel.com>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 450f44bb2c8e..0eb7cf1002c1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1116,6 +1116,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1116 long timeo; 1116 long timeo;
1117 struct task_struct *user_recv = NULL; 1117 struct task_struct *user_recv = NULL;
1118 int copied_early = 0; 1118 int copied_early = 0;
1119 int available = 0;
1120 struct sk_buff *skb;
1119 1121
1120 lock_sock(sk); 1122 lock_sock(sk);
1121 1123
@@ -1142,7 +1144,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1142#ifdef CONFIG_NET_DMA 1144#ifdef CONFIG_NET_DMA
1143 tp->ucopy.dma_chan = NULL; 1145 tp->ucopy.dma_chan = NULL;
1144 preempt_disable(); 1146 preempt_disable();
1145 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1147 skb = skb_peek_tail(&sk->sk_receive_queue);
1148 if (skb)
1149 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1150 if ((available < target) &&
1151 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1146 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { 1152 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1147 preempt_enable_no_resched(); 1153 preempt_enable_no_resched();
1148 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1154 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1151,7 +1157,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1151#endif 1157#endif
1152 1158
1153 do { 1159 do {
1154 struct sk_buff *skb;
1155 u32 offset; 1160 u32 offset;
1156 1161
1157 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 1162 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
@@ -1439,7 +1444,6 @@ skip_copy:
1439 1444
1440#ifdef CONFIG_NET_DMA 1445#ifdef CONFIG_NET_DMA
1441 if (tp->ucopy.dma_chan) { 1446 if (tp->ucopy.dma_chan) {
1442 struct sk_buff *skb;
1443 dma_cookie_t done, used; 1447 dma_cookie_t done, used;
1444 1448
1445 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); 1449 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);