aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:17 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:17 -0500
commitaa1e6f1a385eb2b04171ec841f3b760091e4a8ee (patch)
tree1401e7f1e867e5d4a769b648605e0317d25d5ccb
parent209b84a88fe81341b4d8d465acc4a67cb7c3feb3 (diff)
dmaengine: kill struct dma_client and supporting infrastructure
All users have been converted to either the general-purpose allocator, dma_find_channel, or dma_request_channel. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/dmaengine.c74
-rw-r--r--drivers/dma/dw_dmac.c3
-rw-r--r--drivers/dma/fsldma.c3
-rw-r--r--drivers/dma/ioat_dma.c5
-rw-r--r--drivers/dma/iop-adma.c7
-rw-r--r--drivers/dma/mv_xor.c7
-rw-r--r--drivers/mmc/host/atmel-mci.c1
-rw-r--r--include/linux/dmaengine.h50
-rw-r--r--net/core/dev.c99
9 files changed, 17 insertions, 232 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3f1849b7f5ef..9fc91f973a9a 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,15 +31,12 @@
31 * 31 *
32 * LOCKING: 32 * LOCKING:
33 * 33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * Both of these are protected by a mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 * 36 *
37 * Each device has a channels list, which runs unlocked but is never modified 37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 38 * once the device is registered, it's just setup by the driver.
39 * 39 *
40 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
42 *
43 * Each device has a kref, which is initialized to 1 when the device is 40 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_get is done for each device registered. When the 41 * registered. A kref_get is done for each device registered. When the
45 * device is released, the corresponding kref_put is done in the release 42 * device is released, the corresponding kref_put is done in the release
@@ -74,7 +71,6 @@
74 71
75static DEFINE_MUTEX(dma_list_mutex); 72static DEFINE_MUTEX(dma_list_mutex);
76static LIST_HEAD(dma_device_list); 73static LIST_HEAD(dma_device_list);
77static LIST_HEAD(dma_client_list);
78static long dmaengine_ref_count; 74static long dmaengine_ref_count;
79 75
80/* --- sysfs implementation --- */ 76/* --- sysfs implementation --- */
@@ -189,7 +185,7 @@ static int dma_chan_get(struct dma_chan *chan)
189 185
190 /* allocate upon first client reference */ 186 /* allocate upon first client reference */
191 if (chan->client_count == 1 && err == 0) { 187 if (chan->client_count == 1 && err == 0) {
192 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL); 188 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
193 189
194 if (desc_cnt < 0) { 190 if (desc_cnt < 0) {
195 err = desc_cnt; 191 err = desc_cnt;
@@ -218,40 +214,6 @@ static void dma_chan_put(struct dma_chan *chan)
218 chan->device->device_free_chan_resources(chan); 214 chan->device->device_free_chan_resources(chan);
219} 215}
220 216
221/**
222 * dma_client_chan_alloc - try to allocate channels to a client
223 * @client: &dma_client
224 *
225 * Called with dma_list_mutex held.
226 */
227static void dma_client_chan_alloc(struct dma_client *client)
228{
229 struct dma_device *device;
230 struct dma_chan *chan;
231 enum dma_state_client ack;
232
233 /* Find a channel */
234 list_for_each_entry(device, &dma_device_list, global_node) {
235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
236 continue;
237 if (!dma_device_satisfies_mask(device, client->cap_mask))
238 continue;
239
240 list_for_each_entry(chan, &device->channels, device_node) {
241 if (!chan->client_count)
242 continue;
243 ack = client->event_callback(client, chan,
244 DMA_RESOURCE_AVAILABLE);
245
246 /* we are done once this client rejects
247 * an available resource
248 */
249 if (ack == DMA_NAK)
250 return;
251 }
252 }
253}
254
255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 217enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
256{ 218{
257 enum dma_status status; 219 enum dma_status status;
@@ -585,21 +547,6 @@ void dma_release_channel(struct dma_chan *chan)
585EXPORT_SYMBOL_GPL(dma_release_channel); 547EXPORT_SYMBOL_GPL(dma_release_channel);
586 548
587/** 549/**
588 * dma_chans_notify_available - broadcast available channels to the clients
589 */
590static void dma_clients_notify_available(void)
591{
592 struct dma_client *client;
593
594 mutex_lock(&dma_list_mutex);
595
596 list_for_each_entry(client, &dma_client_list, global_node)
597 dma_client_chan_alloc(client);
598
599 mutex_unlock(&dma_list_mutex);
600}
601
602/**
603 * dmaengine_get - register interest in dma_channels 550 * dmaengine_get - register interest in dma_channels
604 */ 551 */
605void dmaengine_get(void) 552void dmaengine_get(void)
@@ -660,19 +607,6 @@ void dmaengine_put(void)
660EXPORT_SYMBOL(dmaengine_put); 607EXPORT_SYMBOL(dmaengine_put);
661 608
662/** 609/**
663 * dma_async_client_chan_request - send all available channels to the
664 * client that satisfy the capability mask
665 * @client - requester
666 */
667void dma_async_client_chan_request(struct dma_client *client)
668{
669 mutex_lock(&dma_list_mutex);
670 dma_client_chan_alloc(client);
671 mutex_unlock(&dma_list_mutex);
672}
673EXPORT_SYMBOL(dma_async_client_chan_request);
674
675/**
676 * dma_async_device_register - registers DMA devices found 610 * dma_async_device_register - registers DMA devices found
677 * @device: &dma_device 611 * @device: &dma_device
678 */ 612 */
@@ -765,8 +699,6 @@ int dma_async_device_register(struct dma_device *device)
765 dma_channel_rebalance(); 699 dma_channel_rebalance();
766 mutex_unlock(&dma_list_mutex); 700 mutex_unlock(&dma_list_mutex);
767 701
768 dma_clients_notify_available();
769
770 return 0; 702 return 0;
771 703
772err_out: 704err_out:
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index dbd50804e5d2..a29dda8f801b 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -758,8 +758,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
758 spin_unlock_bh(&dwc->lock); 758 spin_unlock_bh(&dwc->lock);
759} 759}
760 760
761static int dwc_alloc_chan_resources(struct dma_chan *chan, 761static int dwc_alloc_chan_resources(struct dma_chan *chan)
762 struct dma_client *client)
763{ 762{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 763 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device); 764 struct dw_dma *dw = to_dw_dma(chan->device);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b95dcce447e..46e0128929a0 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
370 struct dma_client *client)
371{ 370{
372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
373 372
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 6607fdd00b1c..e42e1aea0f18 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
735 * @chan: the channel to be filled out 735 * @chan: the channel to be filled out
736 */ 736 */
737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, 737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
738 struct dma_client *client)
739{ 738{
740 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 739 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
741 struct ioat_desc_sw *desc; 740 struct ioat_desc_sw *desc;
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1381 dma_chan = container_of(device->common.channels.next, 1380 dma_chan = container_of(device->common.channels.next,
1382 struct dma_chan, 1381 struct dma_chan,
1383 device_node); 1382 device_node);
1384 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { 1383 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1385 dev_err(&device->pdev->dev, 1384 dev_err(&device->pdev->dev,
1386 "selftest cannot allocate chan resource\n"); 1385 "selftest cannot allocate chan resource\n");
1387 err = -ENODEV; 1386 err = -ENODEV;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index be9ea9f88805..c74ac9eb009a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -470,8 +470,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
470 * greater than 2x the number slots needed to satisfy a device->max_xor 470 * greater than 2x the number slots needed to satisfy a device->max_xor
471 * request. 471 * request.
472 * */ 472 * */
473static int iop_adma_alloc_chan_resources(struct dma_chan *chan, 473static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
474 struct dma_client *client)
475{ 474{
476 char *hw_desc; 475 char *hw_desc;
477 int idx; 476 int idx;
@@ -865,7 +864,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
865 dma_chan = container_of(device->common.channels.next, 864 dma_chan = container_of(device->common.channels.next,
866 struct dma_chan, 865 struct dma_chan,
867 device_node); 866 device_node);
868 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 867 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
869 err = -ENODEV; 868 err = -ENODEV;
870 goto out; 869 goto out;
871 } 870 }
@@ -963,7 +962,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
963 dma_chan = container_of(device->common.channels.next, 962 dma_chan = container_of(device->common.channels.next,
964 struct dma_chan, 963 struct dma_chan,
965 device_node); 964 device_node);
966 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 965 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
967 err = -ENODEV; 966 err = -ENODEV;
968 goto out; 967 goto out;
969 } 968 }
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 3f46df3390c7..fbaa2f6225e2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -606,8 +606,7 @@ submit_done:
606} 606}
607 607
608/* returns the number of allocated descriptors */ 608/* returns the number of allocated descriptors */
609static int mv_xor_alloc_chan_resources(struct dma_chan *chan, 609static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
610 struct dma_client *client)
611{ 610{
612 char *hw_desc; 611 char *hw_desc;
613 int idx; 612 int idx;
@@ -957,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
957 dma_chan = container_of(device->common.channels.next, 956 dma_chan = container_of(device->common.channels.next,
958 struct dma_chan, 957 struct dma_chan,
959 device_node); 958 device_node);
960 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 959 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
961 err = -ENODEV; 960 err = -ENODEV;
962 goto out; 961 goto out;
963 } 962 }
@@ -1052,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1052 dma_chan = container_of(device->common.channels.next, 1051 dma_chan = container_of(device->common.channels.next,
1053 struct dma_chan, 1052 struct dma_chan,
1054 device_node); 1053 device_node);
1055 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 1054 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1056 err = -ENODEV; 1055 err = -ENODEV;
1057 goto out; 1056 goto out;
1058 } 1057 }
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 7a34118507db..4b567a0408e1 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -55,7 +55,6 @@ enum atmel_mci_state {
55 55
56struct atmel_mci_dma { 56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA 57#ifdef CONFIG_MMC_ATMELMCI_DMA
58 struct dma_client client;
59 struct dma_chan *chan; 58 struct dma_chan *chan;
60 struct dma_async_tx_descriptor *data_desc; 59 struct dma_async_tx_descriptor *data_desc;
61#endif 60#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 37d95db156d3..db050e97d2b4 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -29,20 +29,6 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30 30
31/** 31/**
32 * enum dma_state - resource PNP/power management state
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */
38enum dma_state {
39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME,
41 DMA_RESOURCE_AVAILABLE,
42 DMA_RESOURCE_REMOVED,
43};
44
45/**
46 * enum dma_state_client - state of the channel in the client 32 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel 33 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel 34 * @DMA_DUP: client has already seen this channel, or is not using this channel
@@ -170,23 +156,6 @@ struct dma_chan {
170 156
171void dma_chan_cleanup(struct kref *kref); 157void dma_chan_cleanup(struct kref *kref);
172 158
173/*
174 * typedef dma_event_callback - function pointer to a DMA event callback
175 * For each channel added to the system this routine is called for each client.
176 * If the client would like to use the channel it returns '1' to signal (ack)
177 * the dmaengine core to take out a reference on the channel and its
178 * corresponding device. A client must not 'ack' an available channel more
179 * than once. When a channel is removed all clients are notified. If a client
180 * is using the channel it must 'ack' the removal. A client must not 'ack' a
181 * removed channel more than once.
182 * @client - 'this' pointer for the client context
183 * @chan - channel to be acted upon
184 * @state - available or removed
185 */
186struct dma_client;
187typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
188 struct dma_chan *chan, enum dma_state state);
189
190/** 159/**
191 * typedef dma_filter_fn - callback filter for dma_request_channel 160 * typedef dma_filter_fn - callback filter for dma_request_channel
192 * @chan: channel to be reviewed 161 * @chan: channel to be reviewed
@@ -199,21 +168,6 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
199 */ 168 */
200typedef enum dma_state_client (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 169typedef enum dma_state_client (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
201 170
202/**
203 * struct dma_client - info on the entity making use of DMA services
204 * @event_callback: func ptr to call when something happens
205 * @cap_mask: only return channels that satisfy the requested capabilities
206 * a value of zero corresponds to any capability
207 * @slave: data for preparing slave transfer. Must be non-NULL iff the
208 * DMA_SLAVE capability is requested.
209 * @global_node: list_head for global dma_client_list
210 */
211struct dma_client {
212 dma_event_callback event_callback;
213 dma_cap_mask_t cap_mask;
214 struct list_head global_node;
215};
216
217typedef void (*dma_async_tx_callback)(void *dma_async_param); 171typedef void (*dma_async_tx_callback)(void *dma_async_param);
218/** 172/**
219 * struct dma_async_tx_descriptor - async transaction descriptor 173 * struct dma_async_tx_descriptor - async transaction descriptor
@@ -285,8 +239,7 @@ struct dma_device {
285 int dev_id; 239 int dev_id;
286 struct device *dev; 240 struct device *dev;
287 241
288 int (*device_alloc_chan_resources)(struct dma_chan *chan, 242 int (*device_alloc_chan_resources)(struct dma_chan *chan);
289 struct dma_client *client);
290 void (*device_free_chan_resources)(struct dma_chan *chan); 243 void (*device_free_chan_resources)(struct dma_chan *chan);
291 244
292 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 245 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -320,7 +273,6 @@ struct dma_device {
320 273
321void dmaengine_get(void); 274void dmaengine_get(void);
322void dmaengine_put(void); 275void dmaengine_put(void);
323void dma_async_client_chan_request(struct dma_client *client);
324dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 276dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
325 void *dest, void *src, size_t len); 277 void *dest, void *src, size_t len);
326dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 278dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
diff --git a/net/core/dev.c b/net/core/dev.c
index 7596fc9403c8..ac55d84d6255 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -167,25 +167,6 @@ static DEFINE_SPINLOCK(ptype_lock);
167static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 167static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
168static struct list_head ptype_all __read_mostly; /* Taps */ 168static struct list_head ptype_all __read_mostly; /* Taps */
169 169
170#ifdef CONFIG_NET_DMA
171struct net_dma {
172 struct dma_client client;
173 spinlock_t lock;
174 cpumask_t channel_mask;
175 struct dma_chan **channels;
176};
177
178static enum dma_state_client
179netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
180 enum dma_state state);
181
182static struct net_dma net_dma = {
183 .client = {
184 .event_callback = netdev_dma_event,
185 },
186};
187#endif
188
189/* 170/*
190 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
191 * semaphore. 172 * semaphore.
@@ -4826,81 +4807,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4826 return NOTIFY_OK; 4807 return NOTIFY_OK;
4827} 4808}
4828 4809
4829#ifdef CONFIG_NET_DMA
4830/**
4831 * netdev_dma_event - event callback for the net_dma_client
4832 * @client: should always be net_dma_client
4833 * @chan: DMA channel for the event
4834 * @state: DMA state to be handled
4835 */
4836static enum dma_state_client
4837netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4838 enum dma_state state)
4839{
4840 int i, found = 0, pos = -1;
4841 struct net_dma *net_dma =
4842 container_of(client, struct net_dma, client);
4843 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4844
4845 spin_lock(&net_dma->lock);
4846 switch (state) {
4847 case DMA_RESOURCE_AVAILABLE:
4848 for (i = 0; i < nr_cpu_ids; i++)
4849 if (net_dma->channels[i] == chan) {
4850 found = 1;
4851 break;
4852 } else if (net_dma->channels[i] == NULL && pos < 0)
4853 pos = i;
4854
4855 if (!found && pos >= 0) {
4856 ack = DMA_ACK;
4857 net_dma->channels[pos] = chan;
4858 cpu_set(pos, net_dma->channel_mask);
4859 }
4860 break;
4861 case DMA_RESOURCE_REMOVED:
4862 for (i = 0; i < nr_cpu_ids; i++)
4863 if (net_dma->channels[i] == chan) {
4864 found = 1;
4865 pos = i;
4866 break;
4867 }
4868
4869 if (found) {
4870 ack = DMA_ACK;
4871 cpu_clear(pos, net_dma->channel_mask);
4872 net_dma->channels[i] = NULL;
4873 }
4874 break;
4875 default:
4876 break;
4877 }
4878 spin_unlock(&net_dma->lock);
4879
4880 return ack;
4881}
4882
4883/**
4884 * netdev_dma_register - register the networking subsystem as a DMA client
4885 */
4886static int __init netdev_dma_register(void)
4887{
4888 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4889 GFP_KERNEL);
4890 if (unlikely(!net_dma.channels)) {
4891 printk(KERN_NOTICE
4892 "netdev_dma: no memory for net_dma.channels\n");
4893 return -ENOMEM;
4894 }
4895 spin_lock_init(&net_dma.lock);
4896 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4897 dmaengine_get();
4898 return 0;
4899}
4900
4901#else
4902static int __init netdev_dma_register(void) { return -ENODEV; }
4903#endif /* CONFIG_NET_DMA */
4904 4810
4905/** 4811/**
4906 * netdev_increment_features - increment feature set by one 4812 * netdev_increment_features - increment feature set by one
@@ -5120,14 +5026,15 @@ static int __init net_dev_init(void)
5120 if (register_pernet_device(&default_device_ops)) 5026 if (register_pernet_device(&default_device_ops))
5121 goto out; 5027 goto out;
5122 5028
5123 netdev_dma_register();
5124
5125 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 5029 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5126 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 5030 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5127 5031
5128 hotcpu_notifier(dev_cpu_callback, 0); 5032 hotcpu_notifier(dev_cpu_callback, 0);
5129 dst_init(); 5033 dst_init();
5130 dev_mcast_init(); 5034 dev_mcast_init();
5035 #ifdef CONFIG_NET_DMA
5036 dmaengine_get();
5037 #endif
5131 rc = 0; 5038 rc = 0;
5132out: 5039out:
5133 return rc; 5040 return rc;