diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 14:52:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 14:52:14 -0500 |
commit | d9e8a3a5b8298a3c814ed37ac5756e6f67b6be41 (patch) | |
tree | ffa1cf8b367b3f32155f6336d7b86b781a368019 /net/core | |
parent | 2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff) | |
parent | b9bdcbba010c2e49c8f837ea7a49fe006b636f41 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (22 commits)
ioat: fix self test for multi-channel case
dmaengine: bump initcall level to arch_initcall
dmaengine: advertise all channels on a device to dma_filter_fn
dmaengine: use idr for registering dma device numbers
dmaengine: add a release for dma class devices and dependent infrastructure
ioat: do not perform removal actions at shutdown
iop-adma: enable module removal
iop-adma: kill debug BUG_ON
iop-adma: let devm do its job, don't duplicate free
dmaengine: kill enum dma_state_client
dmaengine: remove 'bigref' infrastructure
dmaengine: kill struct dma_client and supporting infrastructure
dmaengine: replace dma_async_client_register with dmaengine_get
atmel-mci: convert to dma_request_channel and down-level dma_slave
dmatest: convert to dma_request_channel
dmaengine: introduce dma_request_channel and private channels
net_dma: convert to dma_find_channel
dmaengine: provide a common 'issue_pending_all' implementation
dmaengine: centralize channel allocation, introduce dma_find_channel
dmaengine: up-level reference counting to the module level
...
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 149 |
1 files changed, 4 insertions, 145 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index bab8bcedd62e..5f736f1ceeae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock); | |||
170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
171 | static struct list_head ptype_all __read_mostly; /* Taps */ | 171 | static struct list_head ptype_all __read_mostly; /* Taps */ |
172 | 172 | ||
173 | #ifdef CONFIG_NET_DMA | ||
174 | struct net_dma { | ||
175 | struct dma_client client; | ||
176 | spinlock_t lock; | ||
177 | cpumask_t channel_mask; | ||
178 | struct dma_chan **channels; | ||
179 | }; | ||
180 | |||
181 | static enum dma_state_client | ||
182 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
183 | enum dma_state state); | ||
184 | |||
185 | static struct net_dma net_dma = { | ||
186 | .client = { | ||
187 | .event_callback = netdev_dma_event, | ||
188 | }, | ||
189 | }; | ||
190 | #endif | ||
191 | |||
192 | /* | 173 | /* |
193 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 174 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
194 | * semaphore. | 175 | * semaphore. |
@@ -2754,14 +2735,7 @@ out: | |||
2754 | * There may not be any more sk_buffs coming right now, so push | 2735 | * There may not be any more sk_buffs coming right now, so push |
2755 | * any pending DMA copies to hardware | 2736 | * any pending DMA copies to hardware |
2756 | */ | 2737 | */ |
2757 | if (!cpus_empty(net_dma.channel_mask)) { | 2738 | dma_issue_pending_all(); |
2758 | int chan_idx; | ||
2759 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { | ||
2760 | struct dma_chan *chan = net_dma.channels[chan_idx]; | ||
2761 | if (chan) | ||
2762 | dma_async_memcpy_issue_pending(chan); | ||
2763 | } | ||
2764 | } | ||
2765 | #endif | 2739 | #endif |
2766 | 2740 | ||
2767 | return; | 2741 | return; |
@@ -4952,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
4952 | return NOTIFY_OK; | 4926 | return NOTIFY_OK; |
4953 | } | 4927 | } |
4954 | 4928 | ||
4955 | #ifdef CONFIG_NET_DMA | ||
4956 | /** | ||
4957 | * net_dma_rebalance - try to maintain one DMA channel per CPU | ||
4958 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | ||
4959 | * | ||
4960 | * This is called when the number of channels allocated to the net_dma client | ||
4961 | * changes. The net_dma client tries to have one DMA channel per CPU. | ||
4962 | */ | ||
4963 | |||
4964 | static void net_dma_rebalance(struct net_dma *net_dma) | ||
4965 | { | ||
4966 | unsigned int cpu, i, n, chan_idx; | ||
4967 | struct dma_chan *chan; | ||
4968 | |||
4969 | if (cpus_empty(net_dma->channel_mask)) { | ||
4970 | for_each_online_cpu(cpu) | ||
4971 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); | ||
4972 | return; | ||
4973 | } | ||
4974 | |||
4975 | i = 0; | ||
4976 | cpu = first_cpu(cpu_online_map); | ||
4977 | |||
4978 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { | ||
4979 | chan = net_dma->channels[chan_idx]; | ||
4980 | |||
4981 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | ||
4982 | + (i < (num_online_cpus() % | ||
4983 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | ||
4984 | |||
4985 | while(n) { | ||
4986 | per_cpu(softnet_data, cpu).net_dma = chan; | ||
4987 | cpu = next_cpu(cpu, cpu_online_map); | ||
4988 | n--; | ||
4989 | } | ||
4990 | i++; | ||
4991 | } | ||
4992 | } | ||
4993 | |||
4994 | /** | ||
4995 | * netdev_dma_event - event callback for the net_dma_client | ||
4996 | * @client: should always be net_dma_client | ||
4997 | * @chan: DMA channel for the event | ||
4998 | * @state: DMA state to be handled | ||
4999 | */ | ||
5000 | static enum dma_state_client | ||
5001 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
5002 | enum dma_state state) | ||
5003 | { | ||
5004 | int i, found = 0, pos = -1; | ||
5005 | struct net_dma *net_dma = | ||
5006 | container_of(client, struct net_dma, client); | ||
5007 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
5008 | |||
5009 | spin_lock(&net_dma->lock); | ||
5010 | switch (state) { | ||
5011 | case DMA_RESOURCE_AVAILABLE: | ||
5012 | for (i = 0; i < nr_cpu_ids; i++) | ||
5013 | if (net_dma->channels[i] == chan) { | ||
5014 | found = 1; | ||
5015 | break; | ||
5016 | } else if (net_dma->channels[i] == NULL && pos < 0) | ||
5017 | pos = i; | ||
5018 | |||
5019 | if (!found && pos >= 0) { | ||
5020 | ack = DMA_ACK; | ||
5021 | net_dma->channels[pos] = chan; | ||
5022 | cpu_set(pos, net_dma->channel_mask); | ||
5023 | net_dma_rebalance(net_dma); | ||
5024 | } | ||
5025 | break; | ||
5026 | case DMA_RESOURCE_REMOVED: | ||
5027 | for (i = 0; i < nr_cpu_ids; i++) | ||
5028 | if (net_dma->channels[i] == chan) { | ||
5029 | found = 1; | ||
5030 | pos = i; | ||
5031 | break; | ||
5032 | } | ||
5033 | |||
5034 | if (found) { | ||
5035 | ack = DMA_ACK; | ||
5036 | cpu_clear(pos, net_dma->channel_mask); | ||
5037 | net_dma->channels[i] = NULL; | ||
5038 | net_dma_rebalance(net_dma); | ||
5039 | } | ||
5040 | break; | ||
5041 | default: | ||
5042 | break; | ||
5043 | } | ||
5044 | spin_unlock(&net_dma->lock); | ||
5045 | |||
5046 | return ack; | ||
5047 | } | ||
5048 | |||
5049 | /** | ||
5050 | * netdev_dma_register - register the networking subsystem as a DMA client | ||
5051 | */ | ||
5052 | static int __init netdev_dma_register(void) | ||
5053 | { | ||
5054 | net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), | ||
5055 | GFP_KERNEL); | ||
5056 | if (unlikely(!net_dma.channels)) { | ||
5057 | printk(KERN_NOTICE | ||
5058 | "netdev_dma: no memory for net_dma.channels\n"); | ||
5059 | return -ENOMEM; | ||
5060 | } | ||
5061 | spin_lock_init(&net_dma.lock); | ||
5062 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | ||
5063 | dma_async_client_register(&net_dma.client); | ||
5064 | dma_async_client_chan_request(&net_dma.client); | ||
5065 | return 0; | ||
5066 | } | ||
5067 | |||
5068 | #else | ||
5069 | static int __init netdev_dma_register(void) { return -ENODEV; } | ||
5070 | #endif /* CONFIG_NET_DMA */ | ||
5071 | 4929 | ||
5072 | /** | 4930 | /** |
5073 | * netdev_increment_features - increment feature set by one | 4931 | * netdev_increment_features - increment feature set by one |
@@ -5287,14 +5145,15 @@ static int __init net_dev_init(void) | |||
5287 | if (register_pernet_device(&default_device_ops)) | 5145 | if (register_pernet_device(&default_device_ops)) |
5288 | goto out; | 5146 | goto out; |
5289 | 5147 | ||
5290 | netdev_dma_register(); | ||
5291 | |||
5292 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); | 5148 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); |
5293 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 5149 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); |
5294 | 5150 | ||
5295 | hotcpu_notifier(dev_cpu_callback, 0); | 5151 | hotcpu_notifier(dev_cpu_callback, 0); |
5296 | dst_init(); | 5152 | dst_init(); |
5297 | dev_mcast_init(); | 5153 | dev_mcast_init(); |
5154 | #ifdef CONFIG_NET_DMA | ||
5155 | dmaengine_get(); | ||
5156 | #endif | ||
5298 | rc = 0; | 5157 | rc = 0; |
5299 | out: | 5158 | out: |
5300 | return rc; | 5159 | return rc; |