aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorChris Leech <christopher.leech@intel.com>2006-06-18 00:24:58 -0400
committerDavid S. Miller <davem@davemloft.net>2006-06-18 00:24:58 -0400
commitdb21733488f84a596faaad0d05430b3f51804692 (patch)
treea2c1f6d39ce27d2e86b395f2bf536c1ab7396411 /net/core
parent57c651f74cd8383df10a648e677902849de1bc0b (diff)
[I/OAT]: Setup the networking subsystem as a DMA client
Attempts to allocate per-CPU DMA channels Signed-off-by: Chris Leech <christopher.leech@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c104
1 files changed, 104 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4fba549caf29..6bfa78c66c25 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -115,6 +115,7 @@
115#include <net/iw_handler.h> 115#include <net/iw_handler.h>
116#include <asm/current.h> 116#include <asm/current.h>
117#include <linux/audit.h> 117#include <linux/audit.h>
118#include <linux/dmaengine.h>
118 119
119/* 120/*
120 * The list of packet types we will receive (as opposed to discard) 121 * The list of packet types we will receive (as opposed to discard)
@@ -148,6 +149,12 @@ static DEFINE_SPINLOCK(ptype_lock);
148static struct list_head ptype_base[16]; /* 16 way hashed list */ 149static struct list_head ptype_base[16]; /* 16 way hashed list */
149static struct list_head ptype_all; /* Taps */ 150static struct list_head ptype_all; /* Taps */
150 151
152#ifdef CONFIG_NET_DMA
153static struct dma_client *net_dma_client;
154static unsigned int net_dma_count;
155static spinlock_t net_dma_event_lock;
156#endif
157
151/* 158/*
152 * The @dev_base list is protected by @dev_base_lock and the rtnl 159 * The @dev_base list is protected by @dev_base_lock and the rtnl
153 * semaphore. 160 * semaphore.
@@ -1846,6 +1853,19 @@ static void net_rx_action(struct softirq_action *h)
1846 } 1853 }
1847 } 1854 }
1848out: 1855out:
1856#ifdef CONFIG_NET_DMA
1857 /*
1858 * There may not be any more sk_buffs coming right now, so push
1859 * any pending DMA copies to hardware
1860 */
1861 if (net_dma_client) {
1862 struct dma_chan *chan;
1863 rcu_read_lock();
1864 list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
1865 dma_async_memcpy_issue_pending(chan);
1866 rcu_read_unlock();
1867 }
1868#endif
1849 local_irq_enable(); 1869 local_irq_enable();
1850 return; 1870 return;
1851 1871
@@ -3300,6 +3320,88 @@ static int dev_cpu_callback(struct notifier_block *nfb,
3300} 3320}
3301#endif /* CONFIG_HOTPLUG_CPU */ 3321#endif /* CONFIG_HOTPLUG_CPU */
3302 3322
3323#ifdef CONFIG_NET_DMA
3324/**
3325 * net_dma_rebalance -
3326 * This is called when the number of channels allocated to the net_dma_client
3327 * changes. The net_dma_client tries to have one DMA channel per CPU.
3328 */
3329static void net_dma_rebalance(void)
3330{
3331 unsigned int cpu, i, n;
3332 struct dma_chan *chan;
3333
3334 lock_cpu_hotplug();
3335
3336 if (net_dma_count == 0) {
3337 for_each_online_cpu(cpu)
3338 rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL);
3339 unlock_cpu_hotplug();
3340 return;
3341 }
3342
3343 i = 0;
3344 cpu = first_cpu(cpu_online_map);
3345
3346 rcu_read_lock();
3347 list_for_each_entry(chan, &net_dma_client->channels, client_node) {
3348 n = ((num_online_cpus() / net_dma_count)
3349 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
3350
3351 while(n) {
3352 per_cpu(softnet_data.net_dma, cpu) = chan;
3353 cpu = next_cpu(cpu, cpu_online_map);
3354 n--;
3355 }
3356 i++;
3357 }
3358 rcu_read_unlock();
3359
3360 unlock_cpu_hotplug();
3361}
3362
3363/**
3364 * netdev_dma_event - event callback for the net_dma_client
3365 * @client: should always be net_dma_client
3366 * @chan:
3367 * @event:
3368 */
3369static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3370 enum dma_event event)
3371{
3372 spin_lock(&net_dma_event_lock);
3373 switch (event) {
3374 case DMA_RESOURCE_ADDED:
3375 net_dma_count++;
3376 net_dma_rebalance();
3377 break;
3378 case DMA_RESOURCE_REMOVED:
3379 net_dma_count--;
3380 net_dma_rebalance();
3381 break;
3382 default:
3383 break;
3384 }
3385 spin_unlock(&net_dma_event_lock);
3386}
3387
3388/**
3389 * netdev_dma_regiser - register the networking subsystem as a DMA client
3390 */
3391static int __init netdev_dma_register(void)
3392{
3393 spin_lock_init(&net_dma_event_lock);
3394 net_dma_client = dma_async_client_register(netdev_dma_event);
3395 if (net_dma_client == NULL)
3396 return -ENOMEM;
3397
3398 dma_async_client_chan_request(net_dma_client, num_online_cpus());
3399 return 0;
3400}
3401
3402#else
3403static int __init netdev_dma_register(void) { return -ENODEV; }
3404#endif /* CONFIG_NET_DMA */
3303 3405
3304/* 3406/*
3305 * Initialize the DEV module. At boot time this walks the device list and 3407 * Initialize the DEV module. At boot time this walks the device list and
@@ -3353,6 +3455,8 @@ static int __init net_dev_init(void)
3353 atomic_set(&queue->backlog_dev.refcnt, 1); 3455 atomic_set(&queue->backlog_dev.refcnt, 1);
3354 } 3456 }
3355 3457
3458 netdev_dma_register();
3459
3356 dev_boot_phase = 0; 3460 dev_boot_phase = 0;
3357 3461
3358 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); 3462 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);