diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 14:52:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 14:52:14 -0500 |
commit | d9e8a3a5b8298a3c814ed37ac5756e6f67b6be41 (patch) | |
tree | ffa1cf8b367b3f32155f6336d7b86b781a368019 /net | |
parent | 2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff) | |
parent | b9bdcbba010c2e49c8f837ea7a49fe006b636f41 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (22 commits)
ioat: fix self test for multi-channel case
dmaengine: bump initcall level to arch_initcall
dmaengine: advertise all channels on a device to dma_filter_fn
dmaengine: use idr for registering dma device numbers
dmaengine: add a release for dma class devices and dependent infrastructure
ioat: do not perform removal actions at shutdown
iop-adma: enable module removal
iop-adma: kill debug BUG_ON
iop-adma: let devm do its job, don't duplicate free
dmaengine: kill enum dma_state_client
dmaengine: remove 'bigref' infrastructure
dmaengine: kill struct dma_client and supporting infrastructure
dmaengine: replace dma_async_client_register with dmaengine_get
atmel-mci: convert to dma_request_channel and down-level dma_slave
dmatest: convert to dma_request_channel
dmaengine: introduce dma_request_channel and private channels
net_dma: convert to dma_find_channel
dmaengine: provide a common 'issue_pending_all' implementation
dmaengine: centralize channel allocation, introduce dma_find_channel
dmaengine: up-level reference counting to the module level
...
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 149 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 2 |
5 files changed, 9 insertions, 151 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index bab8bcedd62e..5f736f1ceeae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock); | |||
170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
171 | static struct list_head ptype_all __read_mostly; /* Taps */ | 171 | static struct list_head ptype_all __read_mostly; /* Taps */ |
172 | 172 | ||
173 | #ifdef CONFIG_NET_DMA | ||
174 | struct net_dma { | ||
175 | struct dma_client client; | ||
176 | spinlock_t lock; | ||
177 | cpumask_t channel_mask; | ||
178 | struct dma_chan **channels; | ||
179 | }; | ||
180 | |||
181 | static enum dma_state_client | ||
182 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
183 | enum dma_state state); | ||
184 | |||
185 | static struct net_dma net_dma = { | ||
186 | .client = { | ||
187 | .event_callback = netdev_dma_event, | ||
188 | }, | ||
189 | }; | ||
190 | #endif | ||
191 | |||
192 | /* | 173 | /* |
193 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 174 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
194 | * semaphore. | 175 | * semaphore. |
@@ -2754,14 +2735,7 @@ out: | |||
2754 | * There may not be any more sk_buffs coming right now, so push | 2735 | * There may not be any more sk_buffs coming right now, so push |
2755 | * any pending DMA copies to hardware | 2736 | * any pending DMA copies to hardware |
2756 | */ | 2737 | */ |
2757 | if (!cpus_empty(net_dma.channel_mask)) { | 2738 | dma_issue_pending_all(); |
2758 | int chan_idx; | ||
2759 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { | ||
2760 | struct dma_chan *chan = net_dma.channels[chan_idx]; | ||
2761 | if (chan) | ||
2762 | dma_async_memcpy_issue_pending(chan); | ||
2763 | } | ||
2764 | } | ||
2765 | #endif | 2739 | #endif |
2766 | 2740 | ||
2767 | return; | 2741 | return; |
@@ -4952,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
4952 | return NOTIFY_OK; | 4926 | return NOTIFY_OK; |
4953 | } | 4927 | } |
4954 | 4928 | ||
4955 | #ifdef CONFIG_NET_DMA | ||
4956 | /** | ||
4957 | * net_dma_rebalance - try to maintain one DMA channel per CPU | ||
4958 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | ||
4959 | * | ||
4960 | * This is called when the number of channels allocated to the net_dma client | ||
4961 | * changes. The net_dma client tries to have one DMA channel per CPU. | ||
4962 | */ | ||
4963 | |||
4964 | static void net_dma_rebalance(struct net_dma *net_dma) | ||
4965 | { | ||
4966 | unsigned int cpu, i, n, chan_idx; | ||
4967 | struct dma_chan *chan; | ||
4968 | |||
4969 | if (cpus_empty(net_dma->channel_mask)) { | ||
4970 | for_each_online_cpu(cpu) | ||
4971 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); | ||
4972 | return; | ||
4973 | } | ||
4974 | |||
4975 | i = 0; | ||
4976 | cpu = first_cpu(cpu_online_map); | ||
4977 | |||
4978 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { | ||
4979 | chan = net_dma->channels[chan_idx]; | ||
4980 | |||
4981 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | ||
4982 | + (i < (num_online_cpus() % | ||
4983 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | ||
4984 | |||
4985 | while(n) { | ||
4986 | per_cpu(softnet_data, cpu).net_dma = chan; | ||
4987 | cpu = next_cpu(cpu, cpu_online_map); | ||
4988 | n--; | ||
4989 | } | ||
4990 | i++; | ||
4991 | } | ||
4992 | } | ||
4993 | |||
4994 | /** | ||
4995 | * netdev_dma_event - event callback for the net_dma_client | ||
4996 | * @client: should always be net_dma_client | ||
4997 | * @chan: DMA channel for the event | ||
4998 | * @state: DMA state to be handled | ||
4999 | */ | ||
5000 | static enum dma_state_client | ||
5001 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
5002 | enum dma_state state) | ||
5003 | { | ||
5004 | int i, found = 0, pos = -1; | ||
5005 | struct net_dma *net_dma = | ||
5006 | container_of(client, struct net_dma, client); | ||
5007 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
5008 | |||
5009 | spin_lock(&net_dma->lock); | ||
5010 | switch (state) { | ||
5011 | case DMA_RESOURCE_AVAILABLE: | ||
5012 | for (i = 0; i < nr_cpu_ids; i++) | ||
5013 | if (net_dma->channels[i] == chan) { | ||
5014 | found = 1; | ||
5015 | break; | ||
5016 | } else if (net_dma->channels[i] == NULL && pos < 0) | ||
5017 | pos = i; | ||
5018 | |||
5019 | if (!found && pos >= 0) { | ||
5020 | ack = DMA_ACK; | ||
5021 | net_dma->channels[pos] = chan; | ||
5022 | cpu_set(pos, net_dma->channel_mask); | ||
5023 | net_dma_rebalance(net_dma); | ||
5024 | } | ||
5025 | break; | ||
5026 | case DMA_RESOURCE_REMOVED: | ||
5027 | for (i = 0; i < nr_cpu_ids; i++) | ||
5028 | if (net_dma->channels[i] == chan) { | ||
5029 | found = 1; | ||
5030 | pos = i; | ||
5031 | break; | ||
5032 | } | ||
5033 | |||
5034 | if (found) { | ||
5035 | ack = DMA_ACK; | ||
5036 | cpu_clear(pos, net_dma->channel_mask); | ||
5037 | net_dma->channels[i] = NULL; | ||
5038 | net_dma_rebalance(net_dma); | ||
5039 | } | ||
5040 | break; | ||
5041 | default: | ||
5042 | break; | ||
5043 | } | ||
5044 | spin_unlock(&net_dma->lock); | ||
5045 | |||
5046 | return ack; | ||
5047 | } | ||
5048 | |||
5049 | /** | ||
5050 | * netdev_dma_register - register the networking subsystem as a DMA client | ||
5051 | */ | ||
5052 | static int __init netdev_dma_register(void) | ||
5053 | { | ||
5054 | net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), | ||
5055 | GFP_KERNEL); | ||
5056 | if (unlikely(!net_dma.channels)) { | ||
5057 | printk(KERN_NOTICE | ||
5058 | "netdev_dma: no memory for net_dma.channels\n"); | ||
5059 | return -ENOMEM; | ||
5060 | } | ||
5061 | spin_lock_init(&net_dma.lock); | ||
5062 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | ||
5063 | dma_async_client_register(&net_dma.client); | ||
5064 | dma_async_client_chan_request(&net_dma.client); | ||
5065 | return 0; | ||
5066 | } | ||
5067 | |||
5068 | #else | ||
5069 | static int __init netdev_dma_register(void) { return -ENODEV; } | ||
5070 | #endif /* CONFIG_NET_DMA */ | ||
5071 | 4929 | ||
5072 | /** | 4930 | /** |
5073 | * netdev_increment_features - increment feature set by one | 4931 | * netdev_increment_features - increment feature set by one |
@@ -5287,14 +5145,15 @@ static int __init net_dev_init(void) | |||
5287 | if (register_pernet_device(&default_device_ops)) | 5145 | if (register_pernet_device(&default_device_ops)) |
5288 | goto out; | 5146 | goto out; |
5289 | 5147 | ||
5290 | netdev_dma_register(); | ||
5291 | |||
5292 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); | 5148 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); |
5293 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 5149 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); |
5294 | 5150 | ||
5295 | hotcpu_notifier(dev_cpu_callback, 0); | 5151 | hotcpu_notifier(dev_cpu_callback, 0); |
5296 | dst_init(); | 5152 | dst_init(); |
5297 | dev_mcast_init(); | 5153 | dev_mcast_init(); |
5154 | #ifdef CONFIG_NET_DMA | ||
5155 | dmaengine_get(); | ||
5156 | #endif | ||
5298 | rc = 0; | 5157 | rc = 0; |
5299 | out: | 5158 | out: |
5300 | return rc; | 5159 | return rc; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bd6ff907d9e4..ce572f9dff02 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1313 | if ((available < target) && | 1313 | if ((available < target) && |
1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | 1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && |
1315 | !sysctl_tcp_low_latency && | 1315 | !sysctl_tcp_low_latency && |
1316 | __get_cpu_var(softnet_data).net_dma) { | 1316 | dma_find_channel(DMA_MEMCPY)) { |
1317 | preempt_enable_no_resched(); | 1317 | preempt_enable_no_resched(); |
1318 | tp->ucopy.pinned_list = | 1318 | tp->ucopy.pinned_list = |
1319 | dma_pin_iovec_pages(msg->msg_iov, len); | 1319 | dma_pin_iovec_pages(msg->msg_iov, len); |
@@ -1523,7 +1523,7 @@ do_prequeue: | |||
1523 | if (!(flags & MSG_TRUNC)) { | 1523 | if (!(flags & MSG_TRUNC)) { |
1524 | #ifdef CONFIG_NET_DMA | 1524 | #ifdef CONFIG_NET_DMA |
1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1526 | tp->ucopy.dma_chan = get_softnet_dma(); | 1526 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1527 | 1527 | ||
1528 | if (tp->ucopy.dma_chan) { | 1528 | if (tp->ucopy.dma_chan) { |
1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | 1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( |
@@ -1628,7 +1628,6 @@ skip_copy: | |||
1628 | 1628 | ||
1629 | /* Safe to free early-copied skbs now */ | 1629 | /* Safe to free early-copied skbs now */ |
1630 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1630 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1631 | dma_chan_put(tp->ucopy.dma_chan); | ||
1632 | tp->ucopy.dma_chan = NULL; | 1631 | tp->ucopy.dma_chan = NULL; |
1633 | } | 1632 | } |
1634 | if (tp->ucopy.pinned_list) { | 1633 | if (tp->ucopy.pinned_list) { |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 99b7ecbe8893..a6961d75c7ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, | |||
5005 | return 0; | 5005 | return 0; |
5006 | 5006 | ||
5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
5008 | tp->ucopy.dma_chan = get_softnet_dma(); | 5008 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
5009 | 5009 | ||
5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { | 5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { |
5011 | 5011 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9d839fa9331e..19d7b429a262 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1594,7 +1594,7 @@ process: | |||
1594 | #ifdef CONFIG_NET_DMA | 1594 | #ifdef CONFIG_NET_DMA |
1595 | struct tcp_sock *tp = tcp_sk(sk); | 1595 | struct tcp_sock *tp = tcp_sk(sk); |
1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1597 | tp->ucopy.dma_chan = get_softnet_dma(); | 1597 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1598 | if (tp->ucopy.dma_chan) | 1598 | if (tp->ucopy.dma_chan) |
1599 | ret = tcp_v4_do_rcv(sk, skb); | 1599 | ret = tcp_v4_do_rcv(sk, skb); |
1600 | else | 1600 | else |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1297306d729c..e5b85d45bee8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1675,7 +1675,7 @@ process: | |||
1675 | #ifdef CONFIG_NET_DMA | 1675 | #ifdef CONFIG_NET_DMA |
1676 | struct tcp_sock *tp = tcp_sk(sk); | 1676 | struct tcp_sock *tp = tcp_sk(sk); |
1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1678 | tp->ucopy.dma_chan = get_softnet_dma(); | 1678 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1679 | if (tp->ucopy.dma_chan) | 1679 | if (tp->ucopy.dma_chan) |
1680 | ret = tcp_v6_do_rcv(sk, skb); | 1680 | ret = tcp_v6_do_rcv(sk, skb); |
1681 | else | 1681 | else |