aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c112
1 files changed, 78 insertions, 34 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ee051bb398a0..835202fb34c4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -151,9 +151,22 @@ static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
151static struct list_head ptype_all __read_mostly; /* Taps */ 151static struct list_head ptype_all __read_mostly; /* Taps */
152 152
153#ifdef CONFIG_NET_DMA 153#ifdef CONFIG_NET_DMA
154static struct dma_client *net_dma_client; 154struct net_dma {
155static unsigned int net_dma_count; 155 struct dma_client client;
156static spinlock_t net_dma_event_lock; 156 spinlock_t lock;
157 cpumask_t channel_mask;
158 struct dma_chan *channels[NR_CPUS];
159};
160
161static enum dma_state_client
162netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
163 enum dma_state state);
164
165static struct net_dma net_dma = {
166 .client = {
167 .event_callback = netdev_dma_event,
168 },
169};
157#endif 170#endif
158 171
159/* 172/*
@@ -2015,12 +2028,13 @@ out:
2015 * There may not be any more sk_buffs coming right now, so push 2028 * There may not be any more sk_buffs coming right now, so push
2016 * any pending DMA copies to hardware 2029 * any pending DMA copies to hardware
2017 */ 2030 */
2018 if (net_dma_client) { 2031 if (!cpus_empty(net_dma.channel_mask)) {
2019 struct dma_chan *chan; 2032 int chan_idx;
2020 rcu_read_lock(); 2033 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2021 list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node) 2034 struct dma_chan *chan = net_dma.channels[chan_idx];
2022 dma_async_memcpy_issue_pending(chan); 2035 if (chan)
2023 rcu_read_unlock(); 2036 dma_async_memcpy_issue_pending(chan);
2037 }
2024 } 2038 }
2025#endif 2039#endif
2026 return; 2040 return;
@@ -3563,12 +3577,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
3563 * This is called when the number of channels allocated to the net_dma_client 3577 * This is called when the number of channels allocated to the net_dma_client
3564 * changes. The net_dma_client tries to have one DMA channel per CPU. 3578 * changes. The net_dma_client tries to have one DMA channel per CPU.
3565 */ 3579 */
3566static void net_dma_rebalance(void) 3580
3581static void net_dma_rebalance(struct net_dma *net_dma)
3567{ 3582{
3568 unsigned int cpu, i, n; 3583 unsigned int cpu, i, n, chan_idx;
3569 struct dma_chan *chan; 3584 struct dma_chan *chan;
3570 3585
3571 if (net_dma_count == 0) { 3586 if (cpus_empty(net_dma->channel_mask)) {
3572 for_each_online_cpu(cpu) 3587 for_each_online_cpu(cpu)
3573 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); 3588 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3574 return; 3589 return;
@@ -3577,10 +3592,12 @@ static void net_dma_rebalance(void)
3577 i = 0; 3592 i = 0;
3578 cpu = first_cpu(cpu_online_map); 3593 cpu = first_cpu(cpu_online_map);
3579 3594
3580 rcu_read_lock(); 3595 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
3581 list_for_each_entry(chan, &net_dma_client->channels, client_node) { 3596 chan = net_dma->channels[chan_idx];
3582 n = ((num_online_cpus() / net_dma_count) 3597
3583 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3598 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
3599 + (i < (num_online_cpus() %
3600 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
3584 3601
3585 while(n) { 3602 while(n) {
3586 per_cpu(softnet_data, cpu).net_dma = chan; 3603 per_cpu(softnet_data, cpu).net_dma = chan;
@@ -3589,7 +3606,6 @@ static void net_dma_rebalance(void)
3589 } 3606 }
3590 i++; 3607 i++;
3591 } 3608 }
3592 rcu_read_unlock();
3593} 3609}
3594 3610
3595/** 3611/**
@@ -3598,23 +3614,53 @@ static void net_dma_rebalance(void)
3598 * @chan: DMA channel for the event 3614 * @chan: DMA channel for the event
3599 * @event: event type 3615 * @event: event type
3600 */ 3616 */
3601static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan, 3617static enum dma_state_client
3602 enum dma_event event) 3618netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3603{ 3619 enum dma_state state)
3604 spin_lock(&net_dma_event_lock); 3620{
3605 switch (event) { 3621 int i, found = 0, pos = -1;
3606 case DMA_RESOURCE_ADDED: 3622 struct net_dma *net_dma =
3607 net_dma_count++; 3623 container_of(client, struct net_dma, client);
3608 net_dma_rebalance(); 3624 enum dma_state_client ack = DMA_DUP; /* default: take no action */
3625
3626 spin_lock(&net_dma->lock);
3627 switch (state) {
3628 case DMA_RESOURCE_AVAILABLE:
3629 for (i = 0; i < NR_CPUS; i++)
3630 if (net_dma->channels[i] == chan) {
3631 found = 1;
3632 break;
3633 } else if (net_dma->channels[i] == NULL && pos < 0)
3634 pos = i;
3635
3636 if (!found && pos >= 0) {
3637 ack = DMA_ACK;
3638 net_dma->channels[pos] = chan;
3639 cpu_set(pos, net_dma->channel_mask);
3640 net_dma_rebalance(net_dma);
3641 }
3609 break; 3642 break;
3610 case DMA_RESOURCE_REMOVED: 3643 case DMA_RESOURCE_REMOVED:
3611 net_dma_count--; 3644 for (i = 0; i < NR_CPUS; i++)
3612 net_dma_rebalance(); 3645 if (net_dma->channels[i] == chan) {
3646 found = 1;
3647 pos = i;
3648 break;
3649 }
3650
3651 if (found) {
3652 ack = DMA_ACK;
3653 cpu_clear(pos, net_dma->channel_mask);
3654 net_dma->channels[i] = NULL;
3655 net_dma_rebalance(net_dma);
3656 }
3613 break; 3657 break;
3614 default: 3658 default:
3615 break; 3659 break;
3616 } 3660 }
3617 spin_unlock(&net_dma_event_lock); 3661 spin_unlock(&net_dma->lock);
3662
3663 return ack;
3618} 3664}
3619 3665
3620/** 3666/**
@@ -3622,12 +3668,10 @@ static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3622 */ 3668 */
3623static int __init netdev_dma_register(void) 3669static int __init netdev_dma_register(void)
3624{ 3670{
3625 spin_lock_init(&net_dma_event_lock); 3671 spin_lock_init(&net_dma.lock);
3626 net_dma_client = dma_async_client_register(netdev_dma_event); 3672 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
3627 if (net_dma_client == NULL) 3673 dma_async_client_register(&net_dma.client);
3628 return -ENOMEM; 3674 dma_async_client_chan_request(&net_dma.client);
3629
3630 dma_async_client_chan_request(net_dma_client, num_online_cpus());
3631 return 0; 3675 return 0;
3632} 3676}
3633 3677