aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-13 13:52:27 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-13 13:52:27 -0400
commite030dbf91a87da7e8be3be3ca781558695bea683 (patch)
tree4ff2e01621a888be4098ca48c404775e56a55a0d /net/core
parent12a22960549979c10a95cc97f8ec63b461c55692 (diff)
parent3039f0735a280b54c7364fbfe6a9287f7f0b510a (diff)
Merge branch 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop
* 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop: (28 commits) ioatdma: add the unisys "i/oat" pci vendor/device id ARM: Add drivers/dma to arch/arm/Kconfig iop3xx: surface the iop3xx DMA and AAU units to the iop-adma driver iop13xx: surface the iop13xx adma units to the iop-adma driver dmaengine: driver for the iop32x, iop33x, and iop13xx raid engines md: remove raid5 compute_block and compute_parity5 md: handle_stripe5 - request io processing in raid5_run_ops md: handle_stripe5 - add request/completion logic for async expand ops md: handle_stripe5 - add request/completion logic for async read ops md: handle_stripe5 - add request/completion logic for async check ops md: handle_stripe5 - add request/completion logic for async compute ops md: handle_stripe5 - add request/completion logic for async write ops md: common infrastructure for running operations with raid5_run_ops md: raid5_run_ops - run stripe operations outside sh->lock raid5: replace custom debug PRINTKs with standard pr_debug raid5: refactor handle_stripe5 and handle_stripe6 (v3) async_tx: add the async_tx api xor: make 'xor_blocks' a library routine for use with async_tx dmaengine: make clients responsible for managing channels dmaengine: refactor dmaengine around dma_async_tx_descriptor ...
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c112
1 files changed, 78 insertions, 34 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4221dcda88d7..96443055324e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -151,9 +151,22 @@ static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
151static struct list_head ptype_all __read_mostly; /* Taps */ 151static struct list_head ptype_all __read_mostly; /* Taps */
152 152
153#ifdef CONFIG_NET_DMA 153#ifdef CONFIG_NET_DMA
154static struct dma_client *net_dma_client; 154struct net_dma {
155static unsigned int net_dma_count; 155 struct dma_client client;
156static spinlock_t net_dma_event_lock; 156 spinlock_t lock;
157 cpumask_t channel_mask;
158 struct dma_chan *channels[NR_CPUS];
159};
160
161static enum dma_state_client
162netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
163 enum dma_state state);
164
165static struct net_dma net_dma = {
166 .client = {
167 .event_callback = netdev_dma_event,
168 },
169};
157#endif 170#endif
158 171
159/* 172/*
@@ -2022,12 +2035,13 @@ out:
2022 * There may not be any more sk_buffs coming right now, so push 2035 * There may not be any more sk_buffs coming right now, so push
2023 * any pending DMA copies to hardware 2036 * any pending DMA copies to hardware
2024 */ 2037 */
2025 if (net_dma_client) { 2038 if (!cpus_empty(net_dma.channel_mask)) {
2026 struct dma_chan *chan; 2039 int chan_idx;
2027 rcu_read_lock(); 2040 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2028 list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node) 2041 struct dma_chan *chan = net_dma.channels[chan_idx];
2029 dma_async_memcpy_issue_pending(chan); 2042 if (chan)
2030 rcu_read_unlock(); 2043 dma_async_memcpy_issue_pending(chan);
2044 }
2031 } 2045 }
2032#endif 2046#endif
2033 return; 2047 return;
@@ -3775,12 +3789,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
3775 * This is called when the number of channels allocated to the net_dma_client 3789 * This is called when the number of channels allocated to the net_dma_client
3776 * changes. The net_dma_client tries to have one DMA channel per CPU. 3790 * changes. The net_dma_client tries to have one DMA channel per CPU.
3777 */ 3791 */
3778static void net_dma_rebalance(void) 3792
3793static void net_dma_rebalance(struct net_dma *net_dma)
3779{ 3794{
3780 unsigned int cpu, i, n; 3795 unsigned int cpu, i, n, chan_idx;
3781 struct dma_chan *chan; 3796 struct dma_chan *chan;
3782 3797
3783 if (net_dma_count == 0) { 3798 if (cpus_empty(net_dma->channel_mask)) {
3784 for_each_online_cpu(cpu) 3799 for_each_online_cpu(cpu)
3785 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); 3800 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3786 return; 3801 return;
@@ -3789,10 +3804,12 @@ static void net_dma_rebalance(void)
3789 i = 0; 3804 i = 0;
3790 cpu = first_cpu(cpu_online_map); 3805 cpu = first_cpu(cpu_online_map);
3791 3806
3792 rcu_read_lock(); 3807 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
3793 list_for_each_entry(chan, &net_dma_client->channels, client_node) { 3808 chan = net_dma->channels[chan_idx];
3794 n = ((num_online_cpus() / net_dma_count) 3809
3795 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3810 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
3811 + (i < (num_online_cpus() %
3812 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
3796 3813
3797 while(n) { 3814 while(n) {
3798 per_cpu(softnet_data, cpu).net_dma = chan; 3815 per_cpu(softnet_data, cpu).net_dma = chan;
@@ -3801,7 +3818,6 @@ static void net_dma_rebalance(void)
3801 } 3818 }
3802 i++; 3819 i++;
3803 } 3820 }
3804 rcu_read_unlock();
3805} 3821}
3806 3822
3807/** 3823/**
@@ -3810,23 +3826,53 @@ static void net_dma_rebalance(void)
3810 * @chan: DMA channel for the event 3826 * @chan: DMA channel for the event
3811 * @event: event type 3827 * @event: event type
3812 */ 3828 */
3813static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan, 3829static enum dma_state_client
3814 enum dma_event event) 3830netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3815{ 3831 enum dma_state state)
3816 spin_lock(&net_dma_event_lock); 3832{
3817 switch (event) { 3833 int i, found = 0, pos = -1;
3818 case DMA_RESOURCE_ADDED: 3834 struct net_dma *net_dma =
3819 net_dma_count++; 3835 container_of(client, struct net_dma, client);
3820 net_dma_rebalance(); 3836 enum dma_state_client ack = DMA_DUP; /* default: take no action */
3837
3838 spin_lock(&net_dma->lock);
3839 switch (state) {
3840 case DMA_RESOURCE_AVAILABLE:
3841 for (i = 0; i < NR_CPUS; i++)
3842 if (net_dma->channels[i] == chan) {
3843 found = 1;
3844 break;
3845 } else if (net_dma->channels[i] == NULL && pos < 0)
3846 pos = i;
3847
3848 if (!found && pos >= 0) {
3849 ack = DMA_ACK;
3850 net_dma->channels[pos] = chan;
3851 cpu_set(pos, net_dma->channel_mask);
3852 net_dma_rebalance(net_dma);
3853 }
3821 break; 3854 break;
3822 case DMA_RESOURCE_REMOVED: 3855 case DMA_RESOURCE_REMOVED:
3823 net_dma_count--; 3856 for (i = 0; i < NR_CPUS; i++)
3824 net_dma_rebalance(); 3857 if (net_dma->channels[i] == chan) {
3858 found = 1;
3859 pos = i;
3860 break;
3861 }
3862
3863 if (found) {
3864 ack = DMA_ACK;
3865 cpu_clear(pos, net_dma->channel_mask);
3866 net_dma->channels[i] = NULL;
3867 net_dma_rebalance(net_dma);
3868 }
3825 break; 3869 break;
3826 default: 3870 default:
3827 break; 3871 break;
3828 } 3872 }
3829 spin_unlock(&net_dma_event_lock); 3873 spin_unlock(&net_dma->lock);
3874
3875 return ack;
3830} 3876}
3831 3877
3832/** 3878/**
@@ -3834,12 +3880,10 @@ static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3834 */ 3880 */
3835static int __init netdev_dma_register(void) 3881static int __init netdev_dma_register(void)
3836{ 3882{
3837 spin_lock_init(&net_dma_event_lock); 3883 spin_lock_init(&net_dma.lock);
3838 net_dma_client = dma_async_client_register(netdev_dma_event); 3884 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
3839 if (net_dma_client == NULL) 3885 dma_async_client_register(&net_dma.client);
3840 return -ENOMEM; 3886 dma_async_client_chan_request(&net_dma.client);
3841
3842 dma_async_client_chan_request(net_dma_client, num_online_cpus());
3843 return 0; 3887 return 0;
3844} 3888}
3845 3889