aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/dmaengine.c217
-rw-r--r--drivers/dma/ioatdma.c1
-rw-r--r--drivers/dma/ioatdma.h3
-rw-r--r--include/linux/dmaengine.h58
-rw-r--r--net/core/dev.c112
5 files changed, 224 insertions, 167 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 404cc7b6e705..82489923af09 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -37,11 +37,11 @@
37 * Each device has a channels list, which runs unlocked but is never modified 37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 38 * once the device is registered, it's just setup by the driver.
39 * 39 *
40 * Each client has a channels list, it's only modified under the client->lock 40 * Each client is responsible for keeping track of the channels it uses. See
41 * and in an RCU callback, so it's safe to read under rcu_read_lock(). 41 * the definition of dma_event_callback in dmaengine.h.
42 * 42 *
43 * Each device has a kref, which is initialized to 1 when the device is 43 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_put is done for each class_device registered. When the 44 * registered. A kref_get is done for each class_device registered. When the
45 * class_device is released, the coresponding kref_put is done in the release 45 * class_device is released, the coresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client, 46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the coresponding kref_put 47 * a kref_get occurs. When the channel is freed, the coresponding kref_put
@@ -51,10 +51,12 @@
51 * references to finish. 51 * references to finish.
52 * 52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref," 53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 * with a kref and a per_cpu local_t. A single reference is set when on an 54 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * ADDED event, and removed with a REMOVE event. Net DMA client takes an 55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * extra reference per outstanding transaction. The relase function does a 56 * a channel is removed or a client using it is unregesitered. A client can
57 * kref_put on the device. -ChrisL 57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
58 */ 60 */
59 61
60#include <linux/init.h> 62#include <linux/init.h>
@@ -102,8 +104,19 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
102static ssize_t show_in_use(struct class_device *cd, char *buf) 104static ssize_t show_in_use(struct class_device *cd, char *buf)
103{ 105{
104 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); 106 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
107 int in_use = 0;
108
109 if (unlikely(chan->slow_ref) &&
110 atomic_read(&chan->refcount.refcount) > 1)
111 in_use = 1;
112 else {
113 if (local_read(&(per_cpu_ptr(chan->local,
114 get_cpu())->refcount)) > 0)
115 in_use = 1;
116 put_cpu();
117 }
105 118
106 return sprintf(buf, "%d\n", (chan->client ? 1 : 0)); 119 return sprintf(buf, "%d\n", in_use);
107} 120}
108 121
109static struct class_device_attribute dma_class_attrs[] = { 122static struct class_device_attribute dma_class_attrs[] = {
@@ -129,42 +142,53 @@ static struct class dma_devclass = {
129 142
130/* --- client and device registration --- */ 143/* --- client and device registration --- */
131 144
145#define dma_chan_satisfies_mask(chan, mask) \
146 __dma_chan_satisfies_mask((chan), &(mask))
147static int
148__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
149{
150 dma_cap_mask_t has;
151
152 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
153 DMA_TX_TYPE_END);
154 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
155}
156
132/** 157/**
133 * dma_client_chan_alloc - try to allocate a channel to a client 158 * dma_client_chan_alloc - try to allocate channels to a client
134 * @client: &dma_client 159 * @client: &dma_client
135 * 160 *
136 * Called with dma_list_mutex held. 161 * Called with dma_list_mutex held.
137 */ 162 */
138static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) 163static void dma_client_chan_alloc(struct dma_client *client)
139{ 164{
140 struct dma_device *device; 165 struct dma_device *device;
141 struct dma_chan *chan; 166 struct dma_chan *chan;
142 unsigned long flags;
143 int desc; /* allocated descriptor count */ 167 int desc; /* allocated descriptor count */
168 enum dma_state_client ack;
144 169
145 /* Find a channel, any DMA engine will do */ 170 /* Find a channel */
146 list_for_each_entry(device, &dma_device_list, global_node) { 171 list_for_each_entry(device, &dma_device_list, global_node)
147 list_for_each_entry(chan, &device->channels, device_node) { 172 list_for_each_entry(chan, &device->channels, device_node) {
148 if (chan->client) 173 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
149 continue; 174 continue;
150 175
151 desc = chan->device->device_alloc_chan_resources(chan); 176 desc = chan->device->device_alloc_chan_resources(chan);
152 if (desc >= 0) { 177 if (desc >= 0) {
153 kref_get(&device->refcount); 178 ack = client->event_callback(client,
154 kref_init(&chan->refcount); 179 chan,
155 chan->slow_ref = 0; 180 DMA_RESOURCE_AVAILABLE);
156 INIT_RCU_HEAD(&chan->rcu); 181
157 chan->client = client; 182 /* we are done once this client rejects
158 spin_lock_irqsave(&client->lock, flags); 183 * an available resource
159 list_add_tail_rcu(&chan->client_node, 184 */
160 &client->channels); 185 if (ack == DMA_ACK) {
161 spin_unlock_irqrestore(&client->lock, flags); 186 dma_chan_get(chan);
162 return chan; 187 kref_get(&device->refcount);
188 } else if (ack == DMA_NAK)
189 return;
163 } 190 }
164 } 191 }
165 }
166
167 return NULL;
168} 192}
169 193
170enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -193,7 +217,6 @@ void dma_chan_cleanup(struct kref *kref)
193{ 217{
194 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 218 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
195 chan->device->device_free_chan_resources(chan); 219 chan->device->device_free_chan_resources(chan);
196 chan->client = NULL;
197 kref_put(&chan->device->refcount, dma_async_device_cleanup); 220 kref_put(&chan->device->refcount, dma_async_device_cleanup);
198} 221}
199EXPORT_SYMBOL(dma_chan_cleanup); 222EXPORT_SYMBOL(dma_chan_cleanup);
@@ -209,7 +232,7 @@ static void dma_chan_free_rcu(struct rcu_head *rcu)
209 kref_put(&chan->refcount, dma_chan_cleanup); 232 kref_put(&chan->refcount, dma_chan_cleanup);
210} 233}
211 234
212static void dma_client_chan_free(struct dma_chan *chan) 235static void dma_chan_release(struct dma_chan *chan)
213{ 236{
214 atomic_add(0x7FFFFFFF, &chan->refcount.refcount); 237 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
215 chan->slow_ref = 1; 238 chan->slow_ref = 1;
@@ -217,70 +240,57 @@ static void dma_client_chan_free(struct dma_chan *chan)
217} 240}
218 241
219/** 242/**
220 * dma_chans_rebalance - reallocate channels to clients 243 * dma_chans_notify_available - broadcast available channels to the clients
221 *
222 * When the number of DMA channel in the system changes,
223 * channels need to be rebalanced among clients.
224 */ 244 */
225static void dma_chans_rebalance(void) 245static void dma_clients_notify_available(void)
226{ 246{
227 struct dma_client *client; 247 struct dma_client *client;
228 struct dma_chan *chan;
229 unsigned long flags;
230 248
231 mutex_lock(&dma_list_mutex); 249 mutex_lock(&dma_list_mutex);
232 250
233 list_for_each_entry(client, &dma_client_list, global_node) { 251 list_for_each_entry(client, &dma_client_list, global_node)
234 while (client->chans_desired > client->chan_count) { 252 dma_client_chan_alloc(client);
235 chan = dma_client_chan_alloc(client);
236 if (!chan)
237 break;
238 client->chan_count++;
239 client->event_callback(client,
240 chan,
241 DMA_RESOURCE_ADDED);
242 }
243 while (client->chans_desired < client->chan_count) {
244 spin_lock_irqsave(&client->lock, flags);
245 chan = list_entry(client->channels.next,
246 struct dma_chan,
247 client_node);
248 list_del_rcu(&chan->client_node);
249 spin_unlock_irqrestore(&client->lock, flags);
250 client->chan_count--;
251 client->event_callback(client,
252 chan,
253 DMA_RESOURCE_REMOVED);
254 dma_client_chan_free(chan);
255 }
256 }
257 253
258 mutex_unlock(&dma_list_mutex); 254 mutex_unlock(&dma_list_mutex);
259} 255}
260 256
261/** 257/**
262 * dma_async_client_register - allocate and register a &dma_client 258 * dma_chans_notify_available - tell the clients that a channel is going away
263 * @event_callback: callback for notification of channel addition/removal 259 * @chan: channel on its way out
264 */ 260 */
265struct dma_client *dma_async_client_register(dma_event_callback event_callback) 261static void dma_clients_notify_removed(struct dma_chan *chan)
266{ 262{
267 struct dma_client *client; 263 struct dma_client *client;
264 enum dma_state_client ack;
268 265
269 client = kzalloc(sizeof(*client), GFP_KERNEL); 266 mutex_lock(&dma_list_mutex);
270 if (!client) 267
271 return NULL; 268 list_for_each_entry(client, &dma_client_list, global_node) {
269 ack = client->event_callback(client, chan,
270 DMA_RESOURCE_REMOVED);
271
272 /* client was holding resources for this channel so
273 * free it
274 */
275 if (ack == DMA_ACK) {
276 dma_chan_put(chan);
277 kref_put(&chan->device->refcount,
278 dma_async_device_cleanup);
279 }
280 }
272 281
273 INIT_LIST_HEAD(&client->channels); 282 mutex_unlock(&dma_list_mutex);
274 spin_lock_init(&client->lock); 283}
275 client->chans_desired = 0;
276 client->chan_count = 0;
277 client->event_callback = event_callback;
278 284
285/**
286 * dma_async_client_register - register a &dma_client
287 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
288 */
289void dma_async_client_register(struct dma_client *client)
290{
279 mutex_lock(&dma_list_mutex); 291 mutex_lock(&dma_list_mutex);
280 list_add_tail(&client->global_node, &dma_client_list); 292 list_add_tail(&client->global_node, &dma_client_list);
281 mutex_unlock(&dma_list_mutex); 293 mutex_unlock(&dma_list_mutex);
282
283 return client;
284} 294}
285EXPORT_SYMBOL(dma_async_client_register); 295EXPORT_SYMBOL(dma_async_client_register);
286 296
@@ -292,40 +302,42 @@ EXPORT_SYMBOL(dma_async_client_register);
292 */ 302 */
293void dma_async_client_unregister(struct dma_client *client) 303void dma_async_client_unregister(struct dma_client *client)
294{ 304{
305 struct dma_device *device;
295 struct dma_chan *chan; 306 struct dma_chan *chan;
307 enum dma_state_client ack;
296 308
297 if (!client) 309 if (!client)
298 return; 310 return;
299 311
300 rcu_read_lock();
301 list_for_each_entry_rcu(chan, &client->channels, client_node)
302 dma_client_chan_free(chan);
303 rcu_read_unlock();
304
305 mutex_lock(&dma_list_mutex); 312 mutex_lock(&dma_list_mutex);
313 /* free all channels the client is holding */
314 list_for_each_entry(device, &dma_device_list, global_node)
315 list_for_each_entry(chan, &device->channels, device_node) {
316 ack = client->event_callback(client, chan,
317 DMA_RESOURCE_REMOVED);
318
319 if (ack == DMA_ACK) {
320 dma_chan_put(chan);
321 kref_put(&chan->device->refcount,
322 dma_async_device_cleanup);
323 }
324 }
325
306 list_del(&client->global_node); 326 list_del(&client->global_node);
307 mutex_unlock(&dma_list_mutex); 327 mutex_unlock(&dma_list_mutex);
308
309 kfree(client);
310 dma_chans_rebalance();
311} 328}
312EXPORT_SYMBOL(dma_async_client_unregister); 329EXPORT_SYMBOL(dma_async_client_unregister);
313 330
314/** 331/**
315 * dma_async_client_chan_request - request DMA channels 332 * dma_async_client_chan_request - send all available channels to the
316 * @client: &dma_client 333 * client that satisfy the capability mask
317 * @number: count of DMA channels requested 334 * @client - requester
318 *
319 * Clients call dma_async_client_chan_request() to specify how many
320 * DMA channels they need, 0 to free all currently allocated.
321 * The resulting allocations/frees are indicated to the client via the
322 * event callback.
323 */ 335 */
324void dma_async_client_chan_request(struct dma_client *client, 336void dma_async_client_chan_request(struct dma_client *client)
325 unsigned int number)
326{ 337{
327 client->chans_desired = number; 338 mutex_lock(&dma_list_mutex);
328 dma_chans_rebalance(); 339 dma_client_chan_alloc(client);
340 mutex_unlock(&dma_list_mutex);
329} 341}
330EXPORT_SYMBOL(dma_async_client_chan_request); 342EXPORT_SYMBOL(dma_async_client_chan_request);
331 343
@@ -386,13 +398,16 @@ int dma_async_device_register(struct dma_device *device)
386 } 398 }
387 399
388 kref_get(&device->refcount); 400 kref_get(&device->refcount);
401 kref_init(&chan->refcount);
402 chan->slow_ref = 0;
403 INIT_RCU_HEAD(&chan->rcu);
389 } 404 }
390 405
391 mutex_lock(&dma_list_mutex); 406 mutex_lock(&dma_list_mutex);
392 list_add_tail(&device->global_node, &dma_device_list); 407 list_add_tail(&device->global_node, &dma_device_list);
393 mutex_unlock(&dma_list_mutex); 408 mutex_unlock(&dma_list_mutex);
394 409
395 dma_chans_rebalance(); 410 dma_clients_notify_available();
396 411
397 return 0; 412 return 0;
398 413
@@ -428,26 +443,16 @@ static void dma_async_device_cleanup(struct kref *kref)
428void dma_async_device_unregister(struct dma_device *device) 443void dma_async_device_unregister(struct dma_device *device)
429{ 444{
430 struct dma_chan *chan; 445 struct dma_chan *chan;
431 unsigned long flags;
432 446
433 mutex_lock(&dma_list_mutex); 447 mutex_lock(&dma_list_mutex);
434 list_del(&device->global_node); 448 list_del(&device->global_node);
435 mutex_unlock(&dma_list_mutex); 449 mutex_unlock(&dma_list_mutex);
436 450
437 list_for_each_entry(chan, &device->channels, device_node) { 451 list_for_each_entry(chan, &device->channels, device_node) {
438 if (chan->client) { 452 dma_clients_notify_removed(chan);
439 spin_lock_irqsave(&chan->client->lock, flags);
440 list_del(&chan->client_node);
441 chan->client->chan_count--;
442 spin_unlock_irqrestore(&chan->client->lock, flags);
443 chan->client->event_callback(chan->client,
444 chan,
445 DMA_RESOURCE_REMOVED);
446 dma_client_chan_free(chan);
447 }
448 class_device_unregister(&chan->class_dev); 453 class_device_unregister(&chan->class_dev);
454 dma_chan_release(chan);
449 } 455 }
450 dma_chans_rebalance();
451 456
452 kref_put(&device->refcount, dma_async_device_cleanup); 457 kref_put(&device->refcount, dma_async_device_cleanup);
453 wait_for_completion(&device->done); 458 wait_for_completion(&device->done);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 171044930282..81810b3042f1 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -72,7 +72,6 @@ static int enumerate_dma_channels(struct ioat_device *device)
72 INIT_LIST_HEAD(&ioat_chan->used_desc); 72 INIT_LIST_HEAD(&ioat_chan->used_desc);
73 /* This should be made common somewhere in dmaengine.c */ 73 /* This should be made common somewhere in dmaengine.c */
74 ioat_chan->common.device = &device->common; 74 ioat_chan->common.device = &device->common;
75 ioat_chan->common.client = NULL;
76 list_add_tail(&ioat_chan->common.device_node, 75 list_add_tail(&ioat_chan->common.device_node,
77 &device->common.channels); 76 &device->common.channels);
78 } 77 }
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index d3f69bb15fa0..d3726478031a 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -30,9 +30,6 @@
30 30
31#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 31#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
32 32
33extern struct list_head dma_device_list;
34extern struct list_head dma_client_list;
35
36/** 33/**
37 * struct ioat_device - internal representation of a IOAT device 34 * struct ioat_device - internal representation of a IOAT device
38 * @pdev: PCI-Express device 35 * @pdev: PCI-Express device
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3de1cf71031a..a3b6035b6c86 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -29,20 +29,32 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30 30
31/** 31/**
32 * enum dma_event - resource PNP/power managment events 32 * enum dma_state - resource PNP/power managment state
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state 33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power 34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
35 * @DMA_RESOURCE_ADDED: DMA device added to the system 35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system 36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */ 37 */
38enum dma_event { 38enum dma_state {
39 DMA_RESOURCE_SUSPEND, 39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME, 40 DMA_RESOURCE_RESUME,
41 DMA_RESOURCE_ADDED, 41 DMA_RESOURCE_AVAILABLE,
42 DMA_RESOURCE_REMOVED, 42 DMA_RESOURCE_REMOVED,
43}; 43};
44 44
45/** 45/**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
57/**
46 * typedef dma_cookie_t - an opaque DMA cookie 58 * typedef dma_cookie_t - an opaque DMA cookie
47 * 59 *
48 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
@@ -104,7 +116,6 @@ struct dma_chan_percpu {
104 116
105/** 117/**
106 * struct dma_chan - devices supply DMA channels, clients use them 118 * struct dma_chan - devices supply DMA channels, clients use them
107 * @client: ptr to the client user of this chan, will be %NULL when unused
108 * @device: ptr to the dma device who supplies this channel, always !%NULL 119 * @device: ptr to the dma device who supplies this channel, always !%NULL
109 * @cookie: last cookie value returned to client 120 * @cookie: last cookie value returned to client
110 * @chan_id: channel ID for sysfs 121 * @chan_id: channel ID for sysfs
@@ -112,12 +123,10 @@ struct dma_chan_percpu {
112 * @refcount: kref, used in "bigref" slow-mode 123 * @refcount: kref, used in "bigref" slow-mode
113 * @slow_ref: indicates that the DMA channel is free 124 * @slow_ref: indicates that the DMA channel is free
114 * @rcu: the DMA channel's RCU head 125 * @rcu: the DMA channel's RCU head
115 * @client_node: used to add this to the client chan list
116 * @device_node: used to add this to the device chan list 126 * @device_node: used to add this to the device chan list
117 * @local: per-cpu pointer to a struct dma_chan_percpu 127 * @local: per-cpu pointer to a struct dma_chan_percpu
118 */ 128 */
119struct dma_chan { 129struct dma_chan {
120 struct dma_client *client;
121 struct dma_device *device; 130 struct dma_device *device;
122 dma_cookie_t cookie; 131 dma_cookie_t cookie;
123 132
@@ -129,11 +138,11 @@ struct dma_chan {
129 int slow_ref; 138 int slow_ref;
130 struct rcu_head rcu; 139 struct rcu_head rcu;
131 140
132 struct list_head client_node;
133 struct list_head device_node; 141 struct list_head device_node;
134 struct dma_chan_percpu *local; 142 struct dma_chan_percpu *local;
135}; 143};
136 144
145
137void dma_chan_cleanup(struct kref *kref); 146void dma_chan_cleanup(struct kref *kref);
138 147
139static inline void dma_chan_get(struct dma_chan *chan) 148static inline void dma_chan_get(struct dma_chan *chan)
@@ -158,26 +167,31 @@ static inline void dma_chan_put(struct dma_chan *chan)
158 167
159/* 168/*
160 * typedef dma_event_callback - function pointer to a DMA event callback 169 * typedef dma_event_callback - function pointer to a DMA event callback
170 * For each channel added to the system this routine is called for each client.
171 * If the client would like to use the channel it returns '1' to signal (ack)
172 * the dmaengine core to take out a reference on the channel and its
173 * corresponding device. A client must not 'ack' an available channel more
174 * than once. When a channel is removed all clients are notified. If a client
175 * is using the channel it must 'ack' the removal. A client must not 'ack' a
176 * removed channel more than once.
177 * @client - 'this' pointer for the client context
178 * @chan - channel to be acted upon
179 * @state - available or removed
161 */ 180 */
162typedef void (*dma_event_callback) (struct dma_client *client, 181struct dma_client;
163 struct dma_chan *chan, enum dma_event event); 182typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
183 struct dma_chan *chan, enum dma_state state);
164 184
165/** 185/**
166 * struct dma_client - info on the entity making use of DMA services 186 * struct dma_client - info on the entity making use of DMA services
167 * @event_callback: func ptr to call when something happens 187 * @event_callback: func ptr to call when something happens
168 * @chan_count: number of chans allocated 188 * @cap_mask: only return channels that satisfy the requested capabilities
169 * @chans_desired: number of chans requested. Can be +/- chan_count 189 * a value of zero corresponds to any capability
170 * @lock: protects access to the channels list
171 * @channels: the list of DMA channels allocated
172 * @global_node: list_head for global dma_client_list 190 * @global_node: list_head for global dma_client_list
173 */ 191 */
174struct dma_client { 192struct dma_client {
175 dma_event_callback event_callback; 193 dma_event_callback event_callback;
176 unsigned int chan_count; 194 dma_cap_mask_t cap_mask;
177 unsigned int chans_desired;
178
179 spinlock_t lock;
180 struct list_head channels;
181 struct list_head global_node; 195 struct list_head global_node;
182}; 196};
183 197
@@ -285,10 +299,9 @@ struct dma_device {
285 299
286/* --- public DMA engine API --- */ 300/* --- public DMA engine API --- */
287 301
288struct dma_client *dma_async_client_register(dma_event_callback event_callback); 302void dma_async_client_register(struct dma_client *client);
289void dma_async_client_unregister(struct dma_client *client); 303void dma_async_client_unregister(struct dma_client *client);
290void dma_async_client_chan_request(struct dma_client *client, 304void dma_async_client_chan_request(struct dma_client *client);
291 unsigned int number);
292dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 305dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
293 void *dest, void *src, size_t len); 306 void *dest, void *src, size_t len);
294dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 307dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -299,7 +312,6 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
299void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 312void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
300 struct dma_chan *chan); 313 struct dma_chan *chan);
301 314
302
303static inline void 315static inline void
304async_tx_ack(struct dma_async_tx_descriptor *tx) 316async_tx_ack(struct dma_async_tx_descriptor *tx)
305{ 317{
diff --git a/net/core/dev.c b/net/core/dev.c
index ee051bb398a0..835202fb34c4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -151,9 +151,22 @@ static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
151static struct list_head ptype_all __read_mostly; /* Taps */ 151static struct list_head ptype_all __read_mostly; /* Taps */
152 152
153#ifdef CONFIG_NET_DMA 153#ifdef CONFIG_NET_DMA
154static struct dma_client *net_dma_client; 154struct net_dma {
155static unsigned int net_dma_count; 155 struct dma_client client;
156static spinlock_t net_dma_event_lock; 156 spinlock_t lock;
157 cpumask_t channel_mask;
158 struct dma_chan *channels[NR_CPUS];
159};
160
161static enum dma_state_client
162netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
163 enum dma_state state);
164
165static struct net_dma net_dma = {
166 .client = {
167 .event_callback = netdev_dma_event,
168 },
169};
157#endif 170#endif
158 171
159/* 172/*
@@ -2015,12 +2028,13 @@ out:
2015 * There may not be any more sk_buffs coming right now, so push 2028 * There may not be any more sk_buffs coming right now, so push
2016 * any pending DMA copies to hardware 2029 * any pending DMA copies to hardware
2017 */ 2030 */
2018 if (net_dma_client) { 2031 if (!cpus_empty(net_dma.channel_mask)) {
2019 struct dma_chan *chan; 2032 int chan_idx;
2020 rcu_read_lock(); 2033 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2021 list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node) 2034 struct dma_chan *chan = net_dma.channels[chan_idx];
2022 dma_async_memcpy_issue_pending(chan); 2035 if (chan)
2023 rcu_read_unlock(); 2036 dma_async_memcpy_issue_pending(chan);
2037 }
2024 } 2038 }
2025#endif 2039#endif
2026 return; 2040 return;
@@ -3563,12 +3577,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
3563 * This is called when the number of channels allocated to the net_dma_client 3577 * This is called when the number of channels allocated to the net_dma_client
3564 * changes. The net_dma_client tries to have one DMA channel per CPU. 3578 * changes. The net_dma_client tries to have one DMA channel per CPU.
3565 */ 3579 */
3566static void net_dma_rebalance(void) 3580
3581static void net_dma_rebalance(struct net_dma *net_dma)
3567{ 3582{
3568 unsigned int cpu, i, n; 3583 unsigned int cpu, i, n, chan_idx;
3569 struct dma_chan *chan; 3584 struct dma_chan *chan;
3570 3585
3571 if (net_dma_count == 0) { 3586 if (cpus_empty(net_dma->channel_mask)) {
3572 for_each_online_cpu(cpu) 3587 for_each_online_cpu(cpu)
3573 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); 3588 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3574 return; 3589 return;
@@ -3577,10 +3592,12 @@ static void net_dma_rebalance(void)
3577 i = 0; 3592 i = 0;
3578 cpu = first_cpu(cpu_online_map); 3593 cpu = first_cpu(cpu_online_map);
3579 3594
3580 rcu_read_lock(); 3595 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
3581 list_for_each_entry(chan, &net_dma_client->channels, client_node) { 3596 chan = net_dma->channels[chan_idx];
3582 n = ((num_online_cpus() / net_dma_count) 3597
3583 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3598 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
3599 + (i < (num_online_cpus() %
3600 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
3584 3601
3585 while(n) { 3602 while(n) {
3586 per_cpu(softnet_data, cpu).net_dma = chan; 3603 per_cpu(softnet_data, cpu).net_dma = chan;
@@ -3589,7 +3606,6 @@ static void net_dma_rebalance(void)
3589 } 3606 }
3590 i++; 3607 i++;
3591 } 3608 }
3592 rcu_read_unlock();
3593} 3609}
3594 3610
3595/** 3611/**
@@ -3598,23 +3614,53 @@ static void net_dma_rebalance(void)
3598 * @chan: DMA channel for the event 3614 * @chan: DMA channel for the event
3599 * @event: event type 3615 * @event: event type
3600 */ 3616 */
3601static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan, 3617static enum dma_state_client
3602 enum dma_event event) 3618netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3603{ 3619 enum dma_state state)
3604 spin_lock(&net_dma_event_lock); 3620{
3605 switch (event) { 3621 int i, found = 0, pos = -1;
3606 case DMA_RESOURCE_ADDED: 3622 struct net_dma *net_dma =
3607 net_dma_count++; 3623 container_of(client, struct net_dma, client);
3608 net_dma_rebalance(); 3624 enum dma_state_client ack = DMA_DUP; /* default: take no action */
3625
3626 spin_lock(&net_dma->lock);
3627 switch (state) {
3628 case DMA_RESOURCE_AVAILABLE:
3629 for (i = 0; i < NR_CPUS; i++)
3630 if (net_dma->channels[i] == chan) {
3631 found = 1;
3632 break;
3633 } else if (net_dma->channels[i] == NULL && pos < 0)
3634 pos = i;
3635
3636 if (!found && pos >= 0) {
3637 ack = DMA_ACK;
3638 net_dma->channels[pos] = chan;
3639 cpu_set(pos, net_dma->channel_mask);
3640 net_dma_rebalance(net_dma);
3641 }
3609 break; 3642 break;
3610 case DMA_RESOURCE_REMOVED: 3643 case DMA_RESOURCE_REMOVED:
3611 net_dma_count--; 3644 for (i = 0; i < NR_CPUS; i++)
3612 net_dma_rebalance(); 3645 if (net_dma->channels[i] == chan) {
3646 found = 1;
3647 pos = i;
3648 break;
3649 }
3650
3651 if (found) {
3652 ack = DMA_ACK;
3653 cpu_clear(pos, net_dma->channel_mask);
3654 net_dma->channels[i] = NULL;
3655 net_dma_rebalance(net_dma);
3656 }
3613 break; 3657 break;
3614 default: 3658 default:
3615 break; 3659 break;
3616 } 3660 }
3617 spin_unlock(&net_dma_event_lock); 3661 spin_unlock(&net_dma->lock);
3662
3663 return ack;
3618} 3664}
3619 3665
3620/** 3666/**
@@ -3622,12 +3668,10 @@ static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
3622 */ 3668 */
3623static int __init netdev_dma_register(void) 3669static int __init netdev_dma_register(void)
3624{ 3670{
3625 spin_lock_init(&net_dma_event_lock); 3671 spin_lock_init(&net_dma.lock);
3626 net_dma_client = dma_async_client_register(netdev_dma_event); 3672 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
3627 if (net_dma_client == NULL) 3673 dma_async_client_register(&net_dma.client);
3628 return -ENOMEM; 3674 dma_async_client_chan_request(&net_dma.client);
3629
3630 dma_async_client_chan_request(net_dma_client, num_online_cpus());
3631 return 0; 3675 return 0;
3632} 3676}
3633 3677