aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c40
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
5 files changed, 5 insertions, 45 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e40b0d57f8ff..bbb07dbe1740 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4828,44 +4828,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4828 4828
4829#ifdef CONFIG_NET_DMA 4829#ifdef CONFIG_NET_DMA
4830/** 4830/**
4831 * net_dma_rebalance - try to maintain one DMA channel per CPU
4832 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4833 *
4834 * This is called when the number of channels allocated to the net_dma client
4835 * changes. The net_dma client tries to have one DMA channel per CPU.
4836 */
4837
4838static void net_dma_rebalance(struct net_dma *net_dma)
4839{
4840 unsigned int cpu, i, n, chan_idx;
4841 struct dma_chan *chan;
4842
4843 if (cpus_empty(net_dma->channel_mask)) {
4844 for_each_online_cpu(cpu)
4845 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4846 return;
4847 }
4848
4849 i = 0;
4850 cpu = first_cpu(cpu_online_map);
4851
4852 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4853 chan = net_dma->channels[chan_idx];
4854
4855 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4856 + (i < (num_online_cpus() %
4857 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4858
4859 while(n) {
4860 per_cpu(softnet_data, cpu).net_dma = chan;
4861 cpu = next_cpu(cpu, cpu_online_map);
4862 n--;
4863 }
4864 i++;
4865 }
4866}
4867
4868/**
4869 * netdev_dma_event - event callback for the net_dma_client 4831 * netdev_dma_event - event callback for the net_dma_client
4870 * @client: should always be net_dma_client 4832 * @client: should always be net_dma_client
4871 * @chan: DMA channel for the event 4833 * @chan: DMA channel for the event
@@ -4894,7 +4856,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4894 ack = DMA_ACK; 4856 ack = DMA_ACK;
4895 net_dma->channels[pos] = chan; 4857 net_dma->channels[pos] = chan;
4896 cpu_set(pos, net_dma->channel_mask); 4858 cpu_set(pos, net_dma->channel_mask);
4897 net_dma_rebalance(net_dma);
4898 } 4859 }
4899 break; 4860 break;
4900 case DMA_RESOURCE_REMOVED: 4861 case DMA_RESOURCE_REMOVED:
@@ -4909,7 +4870,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4909 ack = DMA_ACK; 4870 ack = DMA_ACK;
4910 cpu_clear(pos, net_dma->channel_mask); 4871 cpu_clear(pos, net_dma->channel_mask);
4911 net_dma->channels[i] = NULL; 4872 net_dma->channels[i] = NULL;
4912 net_dma_rebalance(net_dma);
4913 } 4873 }
4914 break; 4874 break;
4915 default: 4875 default:
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 75e0e0a2d8db..9b275abc8eb9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1317,7 +1317,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1317 if ((available < target) && 1317 if ((available < target) &&
1318 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1318 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1319 !sysctl_tcp_low_latency && 1319 !sysctl_tcp_low_latency &&
1320 __get_cpu_var(softnet_data).net_dma) { 1320 dma_find_channel(DMA_MEMCPY)) {
1321 preempt_enable_no_resched(); 1321 preempt_enable_no_resched();
1322 tp->ucopy.pinned_list = 1322 tp->ucopy.pinned_list =
1323 dma_pin_iovec_pages(msg->msg_iov, len); 1323 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1527,7 +1527,7 @@ do_prequeue:
1527 if (!(flags & MSG_TRUNC)) { 1527 if (!(flags & MSG_TRUNC)) {
1528#ifdef CONFIG_NET_DMA 1528#ifdef CONFIG_NET_DMA
1529 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1529 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1530 tp->ucopy.dma_chan = get_softnet_dma(); 1530 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1531 1531
1532 if (tp->ucopy.dma_chan) { 1532 if (tp->ucopy.dma_chan) {
1533 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1533 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5005 return 0; 5005 return 0;
5006 5006
5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5008 tp->ucopy.dma_chan = get_softnet_dma(); 5008 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5009 5009
5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5011 5011
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa9331e..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@ process:
1594#ifdef CONFIG_NET_DMA 1594#ifdef CONFIG_NET_DMA
1595 struct tcp_sock *tp = tcp_sk(sk); 1595 struct tcp_sock *tp = tcp_sk(sk);
1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1597 tp->ucopy.dma_chan = get_softnet_dma(); 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1598 if (tp->ucopy.dma_chan) 1598 if (tp->ucopy.dma_chan)
1599 ret = tcp_v4_do_rcv(sk, skb); 1599 ret = tcp_v4_do_rcv(sk, skb);
1600 else 1600 else
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..71cd70951d7d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1640,7 +1640,7 @@ process:
1640#ifdef CONFIG_NET_DMA 1640#ifdef CONFIG_NET_DMA
1641 struct tcp_sock *tp = tcp_sk(sk); 1641 struct tcp_sock *tp = tcp_sk(sk);
1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1643 tp->ucopy.dma_chan = get_softnet_dma(); 1643 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1644 if (tp->ucopy.dma_chan) 1644 if (tp->ucopy.dma_chan)
1645 ret = tcp_v6_do_rcv(sk, skb); 1645 ret = tcp_v6_do_rcv(sk, skb);
1646 else 1646 else