aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
authorGertjan van Wingerde <gwingerde@kpnplanet.nl>2008-06-16 13:56:31 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-06-26 16:49:16 -0400
commitc4da004857056e6ee034c4110ccdcba659077b7e (patch)
tree641f8d9ddab7b8b6ba41fefc57a517abce15e8e6 /drivers/net/wireless/rt2x00/rt2x00queue.c
parent30caa6e3d586442f7c3ad081260ee1b22bb123de (diff)
rt2x00: Replace statically allocated DMA buffers with mapped skb's.
The current PCI drivers require a lot of pre-allocated DMA buffers. Reduce this by using dynamically mapped skb's (using pci_map_single) instead of the pre- allocated DMA buffers that are allocated at device start-up time. At the same time move common RX path code into rt2x00lib from rt2x00pci and rt2x00usb, as the RX paths now are now almost the same. Signed-off-by: Gertjan van Wingerde <gwingerde@kpnplanet.nl> Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c85
1 files changed, 72 insertions, 13 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 278f1a1ac926..29d2b9128533 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -25,21 +25,24 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/dma-mapping.h>
28 29
29#include "rt2x00.h" 30#include "rt2x00.h"
30#include "rt2x00lib.h" 31#include "rt2x00lib.h"
31 32
32struct sk_buff *rt2x00queue_alloc_skb(struct data_queue *queue) 33struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry)
33{ 35{
34 struct sk_buff *skb;
35 unsigned int frame_size; 36 unsigned int frame_size;
36 unsigned int reserved_size; 37 unsigned int reserved_size;
38 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc;
37 40
38 /* 41 /*
39 * The frame size includes descriptor size, because the 42 * The frame size includes descriptor size, because the
40 * hardware directly receive the frame into the skbuffer. 43 * hardware directly receive the frame into the skbuffer.
41 */ 44 */
42 frame_size = queue->data_size + queue->desc_size; 45 frame_size = entry->queue->data_size + entry->queue->desc_size;
43 46
44 /* 47 /*
45 * Reserve a few bytes extra headroom to allow drivers some moving 48 * Reserve a few bytes extra headroom to allow drivers some moving
@@ -57,12 +60,67 @@ struct sk_buff *rt2x00queue_alloc_skb(struct data_queue *queue)
57 skb_reserve(skb, reserved_size); 60 skb_reserve(skb, reserved_size);
58 skb_put(skb, frame_size); 61 skb_put(skb, frame_size);
59 62
63 /*
64 * Populate skbdesc.
65 */
66 skbdesc = get_skb_frame_desc(skb);
67 memset(skbdesc, 0, sizeof(*skbdesc));
68 skbdesc->entry = entry;
69
70 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
71 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
72 skb->data,
73 skb->len,
74 DMA_FROM_DEVICE);
75 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
76 }
77
60 return skb; 78 return skb;
61} 79}
62EXPORT_SYMBOL_GPL(rt2x00queue_alloc_skb); 80EXPORT_SYMBOL_GPL(rt2x00queue_alloc_rxskb);
63 81
64void rt2x00queue_free_skb(struct sk_buff *skb) 82void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
65{ 83{
84 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
85
86 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
87 DMA_TO_DEVICE);
88 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
89}
90EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
91
92void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
93{
94 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
95
96 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
97 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
98 DMA_FROM_DEVICE);
99 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
100 }
101
102 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
103 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
104 DMA_TO_DEVICE);
105 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
106 }
107}
108EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
109
110void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
111{
112 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
113
114 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
115 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
116 DMA_FROM_DEVICE);
117 }
118
119 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
120 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
121 DMA_TO_DEVICE);
122 }
123
66 dev_kfree_skb_any(skb); 124 dev_kfree_skb_any(skb);
67} 125}
68EXPORT_SYMBOL_GPL(rt2x00queue_free_skb); 126EXPORT_SYMBOL_GPL(rt2x00queue_free_skb);
@@ -421,7 +479,8 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
421 return 0; 479 return 0;
422} 480}
423 481
424static void rt2x00queue_free_skbs(struct data_queue *queue) 482static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
483 struct data_queue *queue)
425{ 484{
426 unsigned int i; 485 unsigned int i;
427 486
@@ -430,27 +489,27 @@ static void rt2x00queue_free_skbs(struct data_queue *queue)
430 489
431 for (i = 0; i < queue->limit; i++) { 490 for (i = 0; i < queue->limit; i++) {
432 if (queue->entries[i].skb) 491 if (queue->entries[i].skb)
433 rt2x00queue_free_skb(queue->entries[i].skb); 492 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
434 } 493 }
435} 494}
436 495
437static int rt2x00queue_alloc_skbs(struct data_queue *queue) 496static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
497 struct data_queue *queue)
438{ 498{
439 unsigned int i; 499 unsigned int i;
440 struct sk_buff *skb; 500 struct sk_buff *skb;
441 501
442 for (i = 0; i < queue->limit; i++) { 502 for (i = 0; i < queue->limit; i++) {
443 skb = rt2x00queue_alloc_skb(queue); 503 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
444 if (!skb) 504 if (!skb)
445 goto exit; 505 goto exit;
446
447 queue->entries[i].skb = skb; 506 queue->entries[i].skb = skb;
448 } 507 }
449 508
450 return 0; 509 return 0;
451 510
452exit: 511exit:
453 rt2x00queue_free_skbs(queue); 512 rt2x00queue_free_skbs(rt2x00dev, queue);
454 513
455 return -ENOMEM; 514 return -ENOMEM;
456} 515}
@@ -481,7 +540,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
481 goto exit; 540 goto exit;
482 } 541 }
483 542
484 status = rt2x00queue_alloc_skbs(rt2x00dev->rx); 543 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
485 if (status) 544 if (status)
486 goto exit; 545 goto exit;
487 546
@@ -499,7 +558,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
499{ 558{
500 struct data_queue *queue; 559 struct data_queue *queue;
501 560
502 rt2x00queue_free_skbs(rt2x00dev->rx); 561 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
503 562
504 queue_for_each(rt2x00dev, queue) { 563 queue_for_each(rt2x00dev, queue) {
505 kfree(queue->entries); 564 kfree(queue->entries);