diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 65 |
1 files changed, 44 insertions, 21 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5901010fad55..6afb6d8662b2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
429 | if (tp->urg_seq == tp->copied_seq && | 429 | if (tp->urg_seq == tp->copied_seq && |
430 | !sock_flag(sk, SOCK_URGINLINE) && | 430 | !sock_flag(sk, SOCK_URGINLINE) && |
431 | tp->urg_data) | 431 | tp->urg_data) |
432 | target--; | 432 | target++; |
433 | 433 | ||
434 | /* Potential race condition. If read of tp below will | 434 | /* Potential race condition. If read of tp below will |
435 | * escape above sk->sk_state, we can be illegally awaken | 435 | * escape above sk->sk_state, we can be illegally awaken |
@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk) | |||
1254 | tp->ucopy.memory = 0; | 1254 | tp->ucopy.memory = 0; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | #ifdef CONFIG_NET_DMA | ||
1258 | static void tcp_service_net_dma(struct sock *sk, bool wait) | ||
1259 | { | ||
1260 | dma_cookie_t done, used; | ||
1261 | dma_cookie_t last_issued; | ||
1262 | struct tcp_sock *tp = tcp_sk(sk); | ||
1263 | |||
1264 | if (!tp->ucopy.dma_chan) | ||
1265 | return; | ||
1266 | |||
1267 | last_issued = tp->ucopy.dma_cookie; | ||
1268 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1269 | |||
1270 | do { | ||
1271 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1272 | last_issued, &done, | ||
1273 | &used) == DMA_SUCCESS) { | ||
1274 | /* Safe to free early-copied skbs now */ | ||
1275 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1276 | break; | ||
1277 | } else { | ||
1278 | struct sk_buff *skb; | ||
1279 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1280 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1281 | used) == DMA_SUCCESS)) { | ||
1282 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1283 | kfree_skb(skb); | ||
1284 | } | ||
1285 | } | ||
1286 | } while (wait); | ||
1287 | } | ||
1288 | #endif | ||
1289 | |||
1257 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) | 1290 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1258 | { | 1291 | { |
1259 | struct sk_buff *skb; | 1292 | struct sk_buff *skb; |
@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1546 | /* __ Set realtime policy in scheduler __ */ | 1579 | /* __ Set realtime policy in scheduler __ */ |
1547 | } | 1580 | } |
1548 | 1581 | ||
1582 | #ifdef CONFIG_NET_DMA | ||
1583 | if (tp->ucopy.dma_chan) | ||
1584 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1585 | #endif | ||
1549 | if (copied >= target) { | 1586 | if (copied >= target) { |
1550 | /* Do not sleep, just process backlog. */ | 1587 | /* Do not sleep, just process backlog. */ |
1551 | release_sock(sk); | 1588 | release_sock(sk); |
@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1554 | sk_wait_data(sk, &timeo); | 1591 | sk_wait_data(sk, &timeo); |
1555 | 1592 | ||
1556 | #ifdef CONFIG_NET_DMA | 1593 | #ifdef CONFIG_NET_DMA |
1594 | tcp_service_net_dma(sk, false); /* Don't block */ | ||
1557 | tp->ucopy.wakeup = 0; | 1595 | tp->ucopy.wakeup = 0; |
1558 | #endif | 1596 | #endif |
1559 | 1597 | ||
@@ -1633,6 +1671,9 @@ do_prequeue: | |||
1633 | copied = -EFAULT; | 1671 | copied = -EFAULT; |
1634 | break; | 1672 | break; |
1635 | } | 1673 | } |
1674 | |||
1675 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1676 | |||
1636 | if ((offset + used) == skb->len) | 1677 | if ((offset + used) == skb->len) |
1637 | copied_early = 1; | 1678 | copied_early = 1; |
1638 | 1679 | ||
@@ -1702,27 +1743,9 @@ skip_copy: | |||
1702 | } | 1743 | } |
1703 | 1744 | ||
1704 | #ifdef CONFIG_NET_DMA | 1745 | #ifdef CONFIG_NET_DMA |
1705 | if (tp->ucopy.dma_chan) { | 1746 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1706 | dma_cookie_t done, used; | 1747 | tp->ucopy.dma_chan = NULL; |
1707 | |||
1708 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1709 | |||
1710 | while (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1711 | tp->ucopy.dma_cookie, &done, | ||
1712 | &used) == DMA_IN_PROGRESS) { | ||
1713 | /* do partial cleanup of sk_async_wait_queue */ | ||
1714 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1715 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1716 | used) == DMA_SUCCESS)) { | ||
1717 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1718 | kfree_skb(skb); | ||
1719 | } | ||
1720 | } | ||
1721 | 1748 | ||
1722 | /* Safe to free early-copied skbs now */ | ||
1723 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1724 | tp->ucopy.dma_chan = NULL; | ||
1725 | } | ||
1726 | if (tp->ucopy.pinned_list) { | 1749 | if (tp->ucopy.pinned_list) { |
1727 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | 1750 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); |
1728 | tp->ucopy.pinned_list = NULL; | 1751 | tp->ucopy.pinned_list = NULL; |