diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 67 |
1 files changed, 46 insertions, 21 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5901010fad55..0f8caf64caa3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -265,6 +265,7 @@ | |||
265 | #include <linux/err.h> | 265 | #include <linux/err.h> |
266 | #include <linux/crypto.h> | 266 | #include <linux/crypto.h> |
267 | #include <linux/time.h> | 267 | #include <linux/time.h> |
268 | #include <linux/slab.h> | ||
268 | 269 | ||
269 | #include <net/icmp.h> | 270 | #include <net/icmp.h> |
270 | #include <net/tcp.h> | 271 | #include <net/tcp.h> |
@@ -429,7 +430,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
429 | if (tp->urg_seq == tp->copied_seq && | 430 | if (tp->urg_seq == tp->copied_seq && |
430 | !sock_flag(sk, SOCK_URGINLINE) && | 431 | !sock_flag(sk, SOCK_URGINLINE) && |
431 | tp->urg_data) | 432 | tp->urg_data) |
432 | target--; | 433 | target++; |
433 | 434 | ||
434 | /* Potential race condition. If read of tp below will | 435 | /* Potential race condition. If read of tp below will |
435 | * escape above sk->sk_state, we can be illegally awaken | 436 | * escape above sk->sk_state, we can be illegally awaken |
@@ -1254,6 +1255,39 @@ static void tcp_prequeue_process(struct sock *sk) | |||
1254 | tp->ucopy.memory = 0; | 1255 | tp->ucopy.memory = 0; |
1255 | } | 1256 | } |
1256 | 1257 | ||
1258 | #ifdef CONFIG_NET_DMA | ||
1259 | static void tcp_service_net_dma(struct sock *sk, bool wait) | ||
1260 | { | ||
1261 | dma_cookie_t done, used; | ||
1262 | dma_cookie_t last_issued; | ||
1263 | struct tcp_sock *tp = tcp_sk(sk); | ||
1264 | |||
1265 | if (!tp->ucopy.dma_chan) | ||
1266 | return; | ||
1267 | |||
1268 | last_issued = tp->ucopy.dma_cookie; | ||
1269 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1270 | |||
1271 | do { | ||
1272 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1273 | last_issued, &done, | ||
1274 | &used) == DMA_SUCCESS) { | ||
1275 | /* Safe to free early-copied skbs now */ | ||
1276 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1277 | break; | ||
1278 | } else { | ||
1279 | struct sk_buff *skb; | ||
1280 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1281 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1282 | used) == DMA_SUCCESS)) { | ||
1283 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1284 | kfree_skb(skb); | ||
1285 | } | ||
1286 | } | ||
1287 | } while (wait); | ||
1288 | } | ||
1289 | #endif | ||
1290 | |||
1257 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) | 1291 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1258 | { | 1292 | { |
1259 | struct sk_buff *skb; | 1293 | struct sk_buff *skb; |
@@ -1335,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |||
1335 | sk_eat_skb(sk, skb, 0); | 1369 | sk_eat_skb(sk, skb, 0); |
1336 | if (!desc->count) | 1370 | if (!desc->count) |
1337 | break; | 1371 | break; |
1372 | tp->copied_seq = seq; | ||
1338 | } | 1373 | } |
1339 | tp->copied_seq = seq; | 1374 | tp->copied_seq = seq; |
1340 | 1375 | ||
@@ -1546,6 +1581,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1546 | /* __ Set realtime policy in scheduler __ */ | 1581 | /* __ Set realtime policy in scheduler __ */ |
1547 | } | 1582 | } |
1548 | 1583 | ||
1584 | #ifdef CONFIG_NET_DMA | ||
1585 | if (tp->ucopy.dma_chan) | ||
1586 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1587 | #endif | ||
1549 | if (copied >= target) { | 1588 | if (copied >= target) { |
1550 | /* Do not sleep, just process backlog. */ | 1589 | /* Do not sleep, just process backlog. */ |
1551 | release_sock(sk); | 1590 | release_sock(sk); |
@@ -1554,6 +1593,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1554 | sk_wait_data(sk, &timeo); | 1593 | sk_wait_data(sk, &timeo); |
1555 | 1594 | ||
1556 | #ifdef CONFIG_NET_DMA | 1595 | #ifdef CONFIG_NET_DMA |
1596 | tcp_service_net_dma(sk, false); /* Don't block */ | ||
1557 | tp->ucopy.wakeup = 0; | 1597 | tp->ucopy.wakeup = 0; |
1558 | #endif | 1598 | #endif |
1559 | 1599 | ||
@@ -1633,6 +1673,9 @@ do_prequeue: | |||
1633 | copied = -EFAULT; | 1673 | copied = -EFAULT; |
1634 | break; | 1674 | break; |
1635 | } | 1675 | } |
1676 | |||
1677 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1678 | |||
1636 | if ((offset + used) == skb->len) | 1679 | if ((offset + used) == skb->len) |
1637 | copied_early = 1; | 1680 | copied_early = 1; |
1638 | 1681 | ||
@@ -1702,27 +1745,9 @@ skip_copy: | |||
1702 | } | 1745 | } |
1703 | 1746 | ||
1704 | #ifdef CONFIG_NET_DMA | 1747 | #ifdef CONFIG_NET_DMA |
1705 | if (tp->ucopy.dma_chan) { | 1748 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1706 | dma_cookie_t done, used; | 1749 | tp->ucopy.dma_chan = NULL; |
1707 | |||
1708 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1709 | |||
1710 | while (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1711 | tp->ucopy.dma_cookie, &done, | ||
1712 | &used) == DMA_IN_PROGRESS) { | ||
1713 | /* do partial cleanup of sk_async_wait_queue */ | ||
1714 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1715 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1716 | used) == DMA_SUCCESS)) { | ||
1717 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1718 | kfree_skb(skb); | ||
1719 | } | ||
1720 | } | ||
1721 | 1750 | ||
1722 | /* Safe to free early-copied skbs now */ | ||
1723 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1724 | tp->ucopy.dma_chan = NULL; | ||
1725 | } | ||
1726 | if (tp->ucopy.pinned_list) { | 1751 | if (tp->ucopy.pinned_list) { |
1727 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | 1752 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); |
1728 | tp->ucopy.pinned_list = NULL; | 1753 | tp->ucopy.pinned_list = NULL; |