aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00pci.c
diff options
context:
space:
mode:
authorGertjan van Wingerde <gwingerde@kpnplanet.nl>2008-06-16 13:56:31 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-06-26 16:49:16 -0400
commitc4da004857056e6ee034c4110ccdcba659077b7e (patch)
tree641f8d9ddab7b8b6ba41fefc57a517abce15e8e6 /drivers/net/wireless/rt2x00/rt2x00pci.c
parent30caa6e3d586442f7c3ad081260ee1b22bb123de (diff)
rt2x00: Replace statically allocated DMA buffers with mapped skb's.
The current PCI drivers require a lot of pre-allocated DMA buffers. Reduce this by using dynamically mapped skb's (using pci_map_single) instead of the pre- allocated DMA buffers that are allocated at device start-up time. At the same time move common RX path code into rt2x00lib from rt2x00pci and rt2x00usb, as the RX paths now are now almost the same. Signed-off-by: Gertjan van Wingerde <gwingerde@kpnplanet.nl> Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00pci.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c125
1 files changed, 33 insertions, 92 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index e7e3a459b66a..f9d0d76f8706 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -65,7 +65,7 @@ int rt2x00pci_write_tx_data(struct queue_entry *entry)
65 skbdesc->desc_len = entry->queue->desc_size; 65 skbdesc->desc_len = entry->queue->desc_size;
66 skbdesc->entry = entry; 66 skbdesc->entry = entry;
67 67
68 memcpy(entry_priv->data, entry->skb->data, entry->skb->len); 68 rt2x00queue_map_txskb(entry->queue->rt2x00dev, entry->skb);
69 69
70 return 0; 70 return 0;
71} 71}
@@ -74,59 +74,12 @@ EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
74/* 74/*
75 * TX/RX data handlers. 75 * TX/RX data handlers.
76 */ 76 */
77static void rt2x00pci_rxdone_entry(struct rt2x00_dev *rt2x00dev,
78 struct queue_entry *entry)
79{
80 struct sk_buff *skb;
81 struct skb_frame_desc *skbdesc;
82 struct rxdone_entry_desc rxdesc;
83 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
84
85 /*
86 * Allocate a new sk_buffer. If no new buffer available, drop the
87 * received frame and reuse the existing buffer.
88 */
89 skb = rt2x00queue_alloc_skb(entry->queue);
90 if (!skb)
91 return;
92
93 /*
94 * Extract the RXD details.
95 */
96 memset(&rxdesc, 0, sizeof(rxdesc));
97 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
98
99 /*
100 * Copy the received data to the entries' skb.
101 */
102 memcpy(entry->skb->data, entry_priv->data, rxdesc.size);
103 skb_trim(entry->skb, rxdesc.size);
104
105 /*
106 * Fill in skb descriptor
107 */
108 skbdesc = get_skb_frame_desc(entry->skb);
109 memset(skbdesc, 0, sizeof(*skbdesc));
110 skbdesc->desc = entry_priv->desc;
111 skbdesc->desc_len = entry->queue->desc_size;
112 skbdesc->entry = entry;
113
114 /*
115 * Send the frame to rt2x00lib for further processing.
116 */
117 rt2x00lib_rxdone(entry, &rxdesc);
118
119 /*
120 * Replace the entries' skb with the newly allocated one.
121 */
122 entry->skb = skb;
123}
124
125void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 77void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
126{ 78{
127 struct data_queue *queue = rt2x00dev->rx; 79 struct data_queue *queue = rt2x00dev->rx;
128 struct queue_entry *entry; 80 struct queue_entry *entry;
129 struct queue_entry_priv_pci *entry_priv; 81 struct queue_entry_priv_pci *entry_priv;
82 struct skb_frame_desc *skbdesc;
130 u32 word; 83 u32 word;
131 84
132 while (1) { 85 while (1) {
@@ -137,12 +90,22 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
137 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 90 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
138 break; 91 break;
139 92
140 rt2x00pci_rxdone_entry(rt2x00dev, entry); 93 /*
94 * Fill in desc fields of the skb descriptor
95 */
96 skbdesc = get_skb_frame_desc(entry->skb);
97 skbdesc->desc = entry_priv->desc;
98 skbdesc->desc_len = entry->queue->desc_size;
99
100 /*
101 * Send the frame to rt2x00lib for further processing.
102 */
103 rt2x00lib_rxdone(rt2x00dev, entry);
141 104
142 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) { 105 /*
143 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); 106 * Reset the RXD for this entry.
144 rt2x00_desc_write(entry_priv->desc, 0, word); 107 */
145 } 108 rt2x00dev->ops->lib->init_rxentry(rt2x00dev, entry);
146 109
147 rt2x00queue_index_inc(queue, Q_INDEX); 110 rt2x00queue_index_inc(queue, Q_INDEX);
148 } 111 }
@@ -156,6 +119,11 @@ void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
156 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); 119 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
157 u32 word; 120 u32 word;
158 121
122 /*
123 * Unmap the skb.
124 */
125 rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
126
159 rt2x00lib_txdone(entry, txdesc); 127 rt2x00lib_txdone(entry, txdesc);
160 128
161 /* 129 /*
@@ -185,33 +153,6 @@ EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
185/* 153/*
186 * Device initialization handlers. 154 * Device initialization handlers.
187 */ 155 */
188#define desc_size(__queue) \
189({ \
190 ((__queue)->limit * (__queue)->desc_size);\
191})
192
193#define data_size(__queue) \
194({ \
195 ((__queue)->limit * (__queue)->data_size);\
196})
197
198#define dma_size(__queue) \
199({ \
200 data_size(__queue) + desc_size(__queue);\
201})
202
203#define desc_offset(__queue, __base, __i) \
204({ \
205 (__base) + data_size(__queue) + \
206 ((__i) * (__queue)->desc_size); \
207})
208
209#define data_offset(__queue, __base, __i) \
210({ \
211 (__base) + \
212 ((__i) * (__queue)->data_size); \
213})
214
215static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, 156static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
216 struct data_queue *queue) 157 struct data_queue *queue)
217{ 158{
@@ -223,22 +164,21 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
223 /* 164 /*
224 * Allocate DMA memory for descriptor and buffer. 165 * Allocate DMA memory for descriptor and buffer.
225 */ 166 */
226 addr = dma_alloc_coherent(rt2x00dev->dev, dma_size(queue), &dma, 167 addr = dma_alloc_coherent(rt2x00dev->dev,
227 GFP_KERNEL | GFP_DMA); 168 queue->limit * queue->desc_size,
169 &dma, GFP_KERNEL | GFP_DMA);
228 if (!addr) 170 if (!addr)
229 return -ENOMEM; 171 return -ENOMEM;
230 172
231 memset(addr, 0, dma_size(queue)); 173 memset(addr, 0, queue->limit * queue->desc_size);
232 174
233 /* 175 /*
234 * Initialize all queue entries to contain valid addresses. 176 * Initialize all queue entries to contain valid addresses.
235 */ 177 */
236 for (i = 0; i < queue->limit; i++) { 178 for (i = 0; i < queue->limit; i++) {
237 entry_priv = queue->entries[i].priv_data; 179 entry_priv = queue->entries[i].priv_data;
238 entry_priv->desc = desc_offset(queue, addr, i); 180 entry_priv->desc = addr + i * queue->desc_size;
239 entry_priv->desc_dma = desc_offset(queue, dma, i); 181 entry_priv->desc_dma = dma + i * queue->desc_size;
240 entry_priv->data = data_offset(queue, addr, i);
241 entry_priv->data_dma = data_offset(queue, dma, i);
242 } 182 }
243 183
244 return 0; 184 return 0;
@@ -250,10 +190,11 @@ static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
250 struct queue_entry_priv_pci *entry_priv = 190 struct queue_entry_priv_pci *entry_priv =
251 queue->entries[0].priv_data; 191 queue->entries[0].priv_data;
252 192
253 if (entry_priv->data) 193 if (entry_priv->desc)
254 dma_free_coherent(rt2x00dev->dev, dma_size(queue), 194 dma_free_coherent(rt2x00dev->dev,
255 entry_priv->data, entry_priv->data_dma); 195 queue->limit * queue->desc_size,
256 entry_priv->data = NULL; 196 entry_priv->desc, entry_priv->desc_dma);
197 entry_priv->desc = NULL;
257} 198}
258 199
259int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 200int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)