aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:15 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:15 -0500
commitf67b45999205164958de4ec0658d51fa4bee066d (patch)
tree2fc3c2e833fdfea2f029dca05d5401ba9ae63229
parent2ba05622b8b143b0c95968ba59bddfbd6d2f2559 (diff)
net_dma: convert to dma_find_channel
Use the general-purpose channel allocation provided by dmaengine. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/net/netdma.h11
-rw-r--r--net/core/dev.c40
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
7 files changed, 5 insertions, 59 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 41e1224651cf..bac2c458d9b8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1113,9 +1113,6 @@ struct softnet_data
1113 struct sk_buff *completion_queue; 1113 struct sk_buff *completion_queue;
1114 1114
1115 struct napi_struct backlog; 1115 struct napi_struct backlog;
1116#ifdef CONFIG_NET_DMA
1117 struct dma_chan *net_dma;
1118#endif
1119}; 1116};
1120 1117
1121DECLARE_PER_CPU(struct softnet_data,softnet_data); 1118DECLARE_PER_CPU(struct softnet_data,softnet_data);
diff --git a/include/net/netdma.h b/include/net/netdma.h
index cbe2737f4a61..8ba8ce284eeb 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -24,17 +24,6 @@
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26 26
27static inline struct dma_chan *get_softnet_dma(void)
28{
29 struct dma_chan *chan;
30
31 rcu_read_lock();
32 chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
33 rcu_read_unlock();
34
35 return chan;
36}
37
38int dma_skb_copy_datagram_iovec(struct dma_chan* chan, 27int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
39 struct sk_buff *skb, int offset, struct iovec *to, 28 struct sk_buff *skb, int offset, struct iovec *to,
40 size_t len, struct dma_pinned_list *pinned_list); 29 size_t len, struct dma_pinned_list *pinned_list);
diff --git a/net/core/dev.c b/net/core/dev.c
index e40b0d57f8ff..bbb07dbe1740 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4828,44 +4828,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4828 4828
4829#ifdef CONFIG_NET_DMA 4829#ifdef CONFIG_NET_DMA
4830/** 4830/**
4831 * net_dma_rebalance - try to maintain one DMA channel per CPU
4832 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4833 *
4834 * This is called when the number of channels allocated to the net_dma client
4835 * changes. The net_dma client tries to have one DMA channel per CPU.
4836 */
4837
4838static void net_dma_rebalance(struct net_dma *net_dma)
4839{
4840 unsigned int cpu, i, n, chan_idx;
4841 struct dma_chan *chan;
4842
4843 if (cpus_empty(net_dma->channel_mask)) {
4844 for_each_online_cpu(cpu)
4845 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4846 return;
4847 }
4848
4849 i = 0;
4850 cpu = first_cpu(cpu_online_map);
4851
4852 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4853 chan = net_dma->channels[chan_idx];
4854
4855 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4856 + (i < (num_online_cpus() %
4857 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4858
4859 while(n) {
4860 per_cpu(softnet_data, cpu).net_dma = chan;
4861 cpu = next_cpu(cpu, cpu_online_map);
4862 n--;
4863 }
4864 i++;
4865 }
4866}
4867
4868/**
4869 * netdev_dma_event - event callback for the net_dma_client 4831 * netdev_dma_event - event callback for the net_dma_client
4870 * @client: should always be net_dma_client 4832 * @client: should always be net_dma_client
4871 * @chan: DMA channel for the event 4833 * @chan: DMA channel for the event
@@ -4894,7 +4856,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4894 ack = DMA_ACK; 4856 ack = DMA_ACK;
4895 net_dma->channels[pos] = chan; 4857 net_dma->channels[pos] = chan;
4896 cpu_set(pos, net_dma->channel_mask); 4858 cpu_set(pos, net_dma->channel_mask);
4897 net_dma_rebalance(net_dma);
4898 } 4859 }
4899 break; 4860 break;
4900 case DMA_RESOURCE_REMOVED: 4861 case DMA_RESOURCE_REMOVED:
@@ -4909,7 +4870,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4909 ack = DMA_ACK; 4870 ack = DMA_ACK;
4910 cpu_clear(pos, net_dma->channel_mask); 4871 cpu_clear(pos, net_dma->channel_mask);
4911 net_dma->channels[i] = NULL; 4872 net_dma->channels[i] = NULL;
4912 net_dma_rebalance(net_dma);
4913 } 4873 }
4914 break; 4874 break;
4915 default: 4875 default:
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 75e0e0a2d8db..9b275abc8eb9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1317,7 +1317,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1317 if ((available < target) && 1317 if ((available < target) &&
1318 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1318 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1319 !sysctl_tcp_low_latency && 1319 !sysctl_tcp_low_latency &&
1320 __get_cpu_var(softnet_data).net_dma) { 1320 dma_find_channel(DMA_MEMCPY)) {
1321 preempt_enable_no_resched(); 1321 preempt_enable_no_resched();
1322 tp->ucopy.pinned_list = 1322 tp->ucopy.pinned_list =
1323 dma_pin_iovec_pages(msg->msg_iov, len); 1323 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1527,7 +1527,7 @@ do_prequeue:
1527 if (!(flags & MSG_TRUNC)) { 1527 if (!(flags & MSG_TRUNC)) {
1528#ifdef CONFIG_NET_DMA 1528#ifdef CONFIG_NET_DMA
1529 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1529 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1530 tp->ucopy.dma_chan = get_softnet_dma(); 1530 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1531 1531
1532 if (tp->ucopy.dma_chan) { 1532 if (tp->ucopy.dma_chan) {
1533 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1533 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5005 return 0; 5005 return 0;
5006 5006
5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5008 tp->ucopy.dma_chan = get_softnet_dma(); 5008 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5009 5009
5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5011 5011
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa9331e..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@ process:
1594#ifdef CONFIG_NET_DMA 1594#ifdef CONFIG_NET_DMA
1595 struct tcp_sock *tp = tcp_sk(sk); 1595 struct tcp_sock *tp = tcp_sk(sk);
1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1597 tp->ucopy.dma_chan = get_softnet_dma(); 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1598 if (tp->ucopy.dma_chan) 1598 if (tp->ucopy.dma_chan)
1599 ret = tcp_v4_do_rcv(sk, skb); 1599 ret = tcp_v4_do_rcv(sk, skb);
1600 else 1600 else
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..71cd70951d7d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1640,7 +1640,7 @@ process:
1640#ifdef CONFIG_NET_DMA 1640#ifdef CONFIG_NET_DMA
1641 struct tcp_sock *tp = tcp_sk(sk); 1641 struct tcp_sock *tp = tcp_sk(sk);
1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1643 tp->ucopy.dma_chan = get_softnet_dma(); 1643 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1644 if (tp->ucopy.dma_chan) 1644 if (tp->ucopy.dma_chan)
1645 ret = tcp_v6_do_rcv(sk, skb); 1645 ret = tcp_v6_do_rcv(sk, skb);
1646 else 1646 else