aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00usb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00usb.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c290
1 files changed, 163 insertions, 127 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 47cd0a5bf17c..fc606448908e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -40,8 +40,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
40 void *buffer, const u16 buffer_length, 40 void *buffer, const u16 buffer_length,
41 const int timeout) 41 const int timeout)
42{ 42{
43 struct usb_device *usb_dev = 43 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
44 interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
45 int status; 44 int status;
46 unsigned int i; 45 unsigned int i;
47 unsigned int pipe = 46 unsigned int pipe =
@@ -128,15 +127,15 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
128 */ 127 */
129static void rt2x00usb_interrupt_txdone(struct urb *urb) 128static void rt2x00usb_interrupt_txdone(struct urb *urb)
130{ 129{
131 struct data_entry *entry = (struct data_entry *)urb->context; 130 struct queue_entry *entry = (struct queue_entry *)urb->context;
132 struct data_ring *ring = entry->ring; 131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
133 struct rt2x00_dev *rt2x00dev = ring->rt2x00dev; 132 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
133 struct txdone_entry_desc txdesc;
134 __le32 *txd = (__le32 *)entry->skb->data; 134 __le32 *txd = (__le32 *)entry->skb->data;
135 u32 word; 135 u32 word;
136 int tx_status;
137 136
138 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
139 !__test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) 138 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
140 return; 139 return;
141 140
142 rt2x00_desc_read(txd, 0, &word); 141 rt2x00_desc_read(txd, 0, &word);
@@ -144,45 +143,46 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
144 /* 143 /*
145 * Remove the descriptor data from the buffer. 144 * Remove the descriptor data from the buffer.
146 */ 145 */
147 skb_pull(entry->skb, ring->desc_size); 146 skb_pull(entry->skb, entry->queue->desc_size);
148 147
149 /* 148 /*
150 * Obtain the status about this packet. 149 * Obtain the status about this packet.
151 */ 150 */
152 tx_status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; 151 txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
152 txdesc.retry = 0;
153 txdesc.control = &priv_tx->control;
153 154
154 rt2x00lib_txdone(entry, tx_status, 0); 155 rt2x00lib_txdone(entry, &txdesc);
155 156
156 /* 157 /*
157 * Make this entry available for reuse. 158 * Make this entry available for reuse.
158 */ 159 */
159 entry->flags = 0; 160 entry->flags = 0;
160 rt2x00_ring_index_done_inc(entry->ring); 161 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
161 162
162 /* 163 /*
163 * If the data ring was full before the txdone handler 164 * If the data queue was full before the txdone handler
164 * we must make sure the packet queue in the mac80211 stack 165 * we must make sure the packet queue in the mac80211 stack
165 * is reenabled when the txdone handler has finished. 166 * is reenabled when the txdone handler has finished.
166 */ 167 */
167 if (!rt2x00_ring_full(ring)) 168 if (!rt2x00queue_full(entry->queue))
168 ieee80211_wake_queue(rt2x00dev->hw, 169 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
169 entry->tx_status.control.queue);
170} 170}
171 171
172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
173 struct data_ring *ring, struct sk_buff *skb, 173 struct data_queue *queue, struct sk_buff *skb,
174 struct ieee80211_tx_control *control) 174 struct ieee80211_tx_control *control)
175{ 175{
176 struct usb_device *usb_dev = 176 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
177 interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); 177 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
178 struct data_entry *entry = rt2x00_get_data_entry(ring); 178 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
179 struct skb_desc *desc; 179 struct skb_frame_desc *skbdesc;
180 u32 length; 180 u32 length;
181 181
182 if (rt2x00_ring_full(ring)) 182 if (rt2x00queue_full(queue))
183 return -EINVAL; 183 return -EINVAL;
184 184
185 if (test_bit(ENTRY_OWNER_NIC, &entry->flags)) { 185 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
186 ERROR(rt2x00dev, 186 ERROR(rt2x00dev,
187 "Arrived at non-free entry in the non-full queue %d.\n" 187 "Arrived at non-free entry in the non-full queue %d.\n"
188 "Please file bug report to %s.\n", 188 "Please file bug report to %s.\n",
@@ -193,19 +193,19 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
193 /* 193 /*
194 * Add the descriptor in front of the skb. 194 * Add the descriptor in front of the skb.
195 */ 195 */
196 skb_push(skb, ring->desc_size); 196 skb_push(skb, queue->desc_size);
197 memset(skb->data, 0, ring->desc_size); 197 memset(skb->data, 0, queue->desc_size);
198 198
199 /* 199 /*
200 * Fill in skb descriptor 200 * Fill in skb descriptor
201 */ 201 */
202 desc = get_skb_desc(skb); 202 skbdesc = get_skb_frame_desc(skb);
203 desc->desc_len = ring->desc_size; 203 memset(skbdesc, 0, sizeof(*skbdesc));
204 desc->data_len = skb->len - ring->desc_size; 204 skbdesc->data = skb->data + queue->desc_size;
205 desc->desc = skb->data; 205 skbdesc->data_len = queue->data_size;
206 desc->data = skb->data + ring->desc_size; 206 skbdesc->desc = skb->data;
207 desc->ring = ring; 207 skbdesc->desc_len = queue->desc_size;
208 desc->entry = entry; 208 skbdesc->entry = entry;
209 209
210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
211 211
@@ -219,12 +219,12 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
219 /* 219 /*
220 * Initialize URB and send the frame to the device. 220 * Initialize URB and send the frame to the device.
221 */ 221 */
222 __set_bit(ENTRY_OWNER_NIC, &entry->flags); 222 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
223 usb_fill_bulk_urb(entry->priv, usb_dev, usb_sndbulkpipe(usb_dev, 1), 223 usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
224 skb->data, length, rt2x00usb_interrupt_txdone, entry); 224 skb->data, length, rt2x00usb_interrupt_txdone, entry);
225 usb_submit_urb(entry->priv, GFP_ATOMIC); 225 usb_submit_urb(priv_tx->urb, GFP_ATOMIC);
226 226
227 rt2x00_ring_index_inc(ring); 227 rt2x00queue_index_inc(queue, Q_INDEX);
228 228
229 return 0; 229 return 0;
230} 230}
@@ -233,20 +233,41 @@ EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
233/* 233/*
234 * RX data handlers. 234 * RX data handlers.
235 */ 235 */
236static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
237{
238 struct sk_buff *skb;
239 unsigned int frame_size;
240
241 /*
242 * As alignment we use 2 and not NET_IP_ALIGN because we need
243 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
244 * can be 0 on some hardware). We use these 2 bytes for frame
245 * alignment later, we assume that the chance that
246 * header_size % 4 == 2 is bigger then header_size % 2 == 0
247 * and thus optimize alignment by reserving the 2 bytes in
248 * advance.
249 */
250 frame_size = queue->data_size + queue->desc_size;
251 skb = dev_alloc_skb(frame_size + 2);
252 if (!skb)
253 return NULL;
254
255 skb_reserve(skb, 2);
256 skb_put(skb, frame_size);
257
258 return skb;
259}
260
236static void rt2x00usb_interrupt_rxdone(struct urb *urb) 261static void rt2x00usb_interrupt_rxdone(struct urb *urb)
237{ 262{
238 struct data_entry *entry = (struct data_entry *)urb->context; 263 struct queue_entry *entry = (struct queue_entry *)urb->context;
239 struct data_ring *ring = entry->ring; 264 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
240 struct rt2x00_dev *rt2x00dev = ring->rt2x00dev;
241 struct sk_buff *skb; 265 struct sk_buff *skb;
242 struct ieee80211_hdr *hdr; 266 struct skb_frame_desc *skbdesc;
243 struct skb_desc *skbdesc; 267 struct rxdone_entry_desc rxdesc;
244 struct rxdata_entry_desc desc;
245 int header_size;
246 int frame_size;
247 268
248 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 269 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
249 !test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) 270 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
250 return; 271 return;
251 272
252 /* 273 /*
@@ -254,67 +275,32 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
254 * to be actually valid, or if the urb is signaling 275 * to be actually valid, or if the urb is signaling
255 * a problem. 276 * a problem.
256 */ 277 */
257 if (urb->actual_length < entry->ring->desc_size || urb->status) 278 if (urb->actual_length < entry->queue->desc_size || urb->status)
258 goto skip_entry; 279 goto skip_entry;
259 280
260 /* 281 /*
261 * Fill in skb descriptor 282 * Fill in skb descriptor
262 */ 283 */
263 skbdesc = get_skb_desc(entry->skb); 284 skbdesc = get_skb_frame_desc(entry->skb);
264 skbdesc->ring = ring; 285 memset(skbdesc, 0, sizeof(*skbdesc));
265 skbdesc->entry = entry; 286 skbdesc->entry = entry;
266 287
267 memset(&desc, 0, sizeof(desc)); 288 memset(&rxdesc, 0, sizeof(rxdesc));
268 rt2x00dev->ops->lib->fill_rxdone(entry, &desc); 289 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
269 290
270 /* 291 /*
271 * Allocate a new sk buffer to replace the current one. 292 * Allocate a new sk buffer to replace the current one.
272 * If allocation fails, we should drop the current frame 293 * If allocation fails, we should drop the current frame
273 * so we can recycle the existing sk buffer for the new frame. 294 * so we can recycle the existing sk buffer for the new frame.
274 * As alignment we use 2 and not NET_IP_ALIGN because we need
275 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
276 * can be 0 on some hardware). We use these 2 bytes for frame
277 * alignment later, we assume that the chance that
278 * header_size % 4 == 2 is bigger then header_size % 2 == 0
279 * and thus optimize alignment by reserving the 2 bytes in
280 * advance.
281 */ 295 */
282 frame_size = entry->ring->data_size + entry->ring->desc_size; 296 skb = rt2x00usb_alloc_rxskb(entry->queue);
283 skb = dev_alloc_skb(frame_size + 2);
284 if (!skb) 297 if (!skb)
285 goto skip_entry; 298 goto skip_entry;
286 299
287 skb_reserve(skb, 2);
288 skb_put(skb, frame_size);
289
290 /*
291 * The data behind the ieee80211 header must be
292 * aligned on a 4 byte boundary.
293 */
294 hdr = (struct ieee80211_hdr *)entry->skb->data;
295 header_size =
296 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
297
298 if (header_size % 4 == 0) {
299 skb_push(entry->skb, 2);
300 memmove(entry->skb->data, entry->skb->data + 2, skb->len - 2);
301 }
302
303 /*
304 * Trim the entire buffer down to only contain the valid frame data
305 * excluding the device descriptor. The position of the descriptor
306 * varies. This means that we should check where the descriptor is
307 * and decide if we need to pull the data pointer to exclude the
308 * device descriptor.
309 */
310 if (skbdesc->data > skbdesc->desc)
311 skb_pull(entry->skb, skbdesc->desc_len);
312 skb_trim(entry->skb, desc.size);
313
314 /* 300 /*
315 * Send the frame to rt2x00lib for further processing. 301 * Send the frame to rt2x00lib for further processing.
316 */ 302 */
317 rt2x00lib_rxdone(entry, entry->skb, &desc); 303 rt2x00lib_rxdone(entry, &rxdesc);
318 304
319 /* 305 /*
320 * Replace current entry's skb with the newly allocated one, 306 * Replace current entry's skb with the newly allocated one,
@@ -325,12 +311,12 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
325 urb->transfer_buffer_length = entry->skb->len; 311 urb->transfer_buffer_length = entry->skb->len;
326 312
327skip_entry: 313skip_entry:
328 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { 314 if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
329 __set_bit(ENTRY_OWNER_NIC, &entry->flags); 315 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
330 usb_submit_urb(urb, GFP_ATOMIC); 316 usb_submit_urb(urb, GFP_ATOMIC);
331 } 317 }
332 318
333 rt2x00_ring_index_inc(ring); 319 rt2x00queue_index_inc(entry->queue, Q_INDEX);
334} 320}
335 321
336/* 322/*
@@ -338,18 +324,27 @@ skip_entry:
338 */ 324 */
339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 325void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
340{ 326{
341 struct data_ring *ring; 327 struct queue_entry_priv_usb_rx *priv_rx;
328 struct queue_entry_priv_usb_tx *priv_tx;
329 struct data_queue *queue;
342 unsigned int i; 330 unsigned int i;
343 331
344 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, 332 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000,
345 REGISTER_TIMEOUT); 333 REGISTER_TIMEOUT);
346 334
347 /* 335 /*
348 * Cancel all rings. 336 * Cancel all queues.
349 */ 337 */
350 ring_for_each(rt2x00dev, ring) { 338 for (i = 0; i < rt2x00dev->rx->limit; i++) {
351 for (i = 0; i < ring->stats.limit; i++) 339 priv_rx = rt2x00dev->rx->entries[i].priv_data;
352 usb_kill_urb(ring->entry[i].priv); 340 usb_kill_urb(priv_rx->urb);
341 }
342
343 txall_queue_for_each(rt2x00dev, queue) {
344 for (i = 0; i < queue->limit; i++) {
345 priv_tx = queue->entries[i].priv_data;
346 usb_kill_urb(priv_tx->urb);
347 }
353 } 348 }
354} 349}
355EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 350EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
@@ -358,64 +353,108 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
358 * Device initialization handlers. 353 * Device initialization handlers.
359 */ 354 */
360void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev, 355void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
361 struct data_entry *entry) 356 struct queue_entry *entry)
362{ 357{
363 struct usb_device *usb_dev = 358 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
364 interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); 359 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
365 360
366 usb_fill_bulk_urb(entry->priv, usb_dev, 361 usb_fill_bulk_urb(priv_rx->urb, usb_dev,
367 usb_rcvbulkpipe(usb_dev, 1), 362 usb_rcvbulkpipe(usb_dev, 1),
368 entry->skb->data, entry->skb->len, 363 entry->skb->data, entry->skb->len,
369 rt2x00usb_interrupt_rxdone, entry); 364 rt2x00usb_interrupt_rxdone, entry);
370 365
371 __set_bit(ENTRY_OWNER_NIC, &entry->flags); 366 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
372 usb_submit_urb(entry->priv, GFP_ATOMIC); 367 usb_submit_urb(priv_rx->urb, GFP_ATOMIC);
373} 368}
374EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 369EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
375 370
376void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev, 371void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
377 struct data_entry *entry) 372 struct queue_entry *entry)
378{ 373{
379 entry->flags = 0; 374 entry->flags = 0;
380} 375}
381EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry); 376EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
382 377
383static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 378static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
384 struct data_ring *ring) 379 struct data_queue *queue)
385{ 380{
381 struct queue_entry_priv_usb_rx *priv_rx;
382 struct queue_entry_priv_usb_tx *priv_tx;
383 struct queue_entry_priv_usb_bcn *priv_bcn;
384 struct urb *urb;
385 unsigned int guardian =
386 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
386 unsigned int i; 387 unsigned int i;
387 388
388 /* 389 /*
389 * Allocate the URB's 390 * Allocate the URB's
390 */ 391 */
391 for (i = 0; i < ring->stats.limit; i++) { 392 for (i = 0; i < queue->limit; i++) {
392 ring->entry[i].priv = usb_alloc_urb(0, GFP_KERNEL); 393 urb = usb_alloc_urb(0, GFP_KERNEL);
393 if (!ring->entry[i].priv) 394 if (!urb)
394 return -ENOMEM; 395 return -ENOMEM;
396
397 if (queue->qid == QID_RX) {
398 priv_rx = queue->entries[i].priv_data;
399 priv_rx->urb = urb;
400 } else if (queue->qid == QID_MGMT && guardian) {
401 priv_bcn = queue->entries[i].priv_data;
402 priv_bcn->urb = urb;
403
404 urb = usb_alloc_urb(0, GFP_KERNEL);
405 if (!urb)
406 return -ENOMEM;
407
408 priv_bcn->guardian_urb = urb;
409 } else {
410 priv_tx = queue->entries[i].priv_data;
411 priv_tx->urb = urb;
412 }
395 } 413 }
396 414
397 return 0; 415 return 0;
398} 416}
399 417
400static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, 418static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
401 struct data_ring *ring) 419 struct data_queue *queue)
402{ 420{
421 struct queue_entry_priv_usb_rx *priv_rx;
422 struct queue_entry_priv_usb_tx *priv_tx;
423 struct queue_entry_priv_usb_bcn *priv_bcn;
424 struct urb *urb;
425 unsigned int guardian =
426 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
403 unsigned int i; 427 unsigned int i;
404 428
405 if (!ring->entry) 429 if (!queue->entries)
406 return; 430 return;
407 431
408 for (i = 0; i < ring->stats.limit; i++) { 432 for (i = 0; i < queue->limit; i++) {
409 usb_kill_urb(ring->entry[i].priv); 433 if (queue->qid == QID_RX) {
410 usb_free_urb(ring->entry[i].priv); 434 priv_rx = queue->entries[i].priv_data;
411 if (ring->entry[i].skb) 435 urb = priv_rx->urb;
412 kfree_skb(ring->entry[i].skb); 436 } else if (queue->qid == QID_MGMT && guardian) {
437 priv_bcn = queue->entries[i].priv_data;
438
439 usb_kill_urb(priv_bcn->guardian_urb);
440 usb_free_urb(priv_bcn->guardian_urb);
441
442 urb = priv_bcn->urb;
443 } else {
444 priv_tx = queue->entries[i].priv_data;
445 urb = priv_tx->urb;
446 }
447
448 usb_kill_urb(urb);
449 usb_free_urb(urb);
450 if (queue->entries[i].skb)
451 kfree_skb(queue->entries[i].skb);
413 } 452 }
414} 453}
415 454
416int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) 455int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
417{ 456{
418 struct data_ring *ring; 457 struct data_queue *queue;
419 struct sk_buff *skb; 458 struct sk_buff *skb;
420 unsigned int entry_size; 459 unsigned int entry_size;
421 unsigned int i; 460 unsigned int i;
@@ -424,25 +463,22 @@ int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
424 /* 463 /*
425 * Allocate DMA 464 * Allocate DMA
426 */ 465 */
427 ring_for_each(rt2x00dev, ring) { 466 queue_for_each(rt2x00dev, queue) {
428 status = rt2x00usb_alloc_urb(rt2x00dev, ring); 467 status = rt2x00usb_alloc_urb(rt2x00dev, queue);
429 if (status) 468 if (status)
430 goto exit; 469 goto exit;
431 } 470 }
432 471
433 /* 472 /*
434 * For the RX ring, skb's should be allocated. 473 * For the RX queue, skb's should be allocated.
435 */ 474 */
436 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size; 475 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
437 for (i = 0; i < rt2x00dev->rx->stats.limit; i++) { 476 for (i = 0; i < rt2x00dev->rx->limit; i++) {
438 skb = dev_alloc_skb(NET_IP_ALIGN + entry_size); 477 skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
439 if (!skb) 478 if (!skb)
440 goto exit; 479 goto exit;
441 480
442 skb_reserve(skb, NET_IP_ALIGN); 481 rt2x00dev->rx->entries[i].skb = skb;
443 skb_put(skb, entry_size);
444
445 rt2x00dev->rx->entry[i].skb = skb;
446 } 482 }
447 483
448 return 0; 484 return 0;
@@ -456,10 +492,10 @@ EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
456 492
457void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) 493void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
458{ 494{
459 struct data_ring *ring; 495 struct data_queue *queue;
460 496
461 ring_for_each(rt2x00dev, ring) 497 queue_for_each(rt2x00dev, queue)
462 rt2x00usb_free_urb(rt2x00dev, ring); 498 rt2x00usb_free_urb(rt2x00dev, queue);
463} 499}
464EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); 500EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
465 501
@@ -627,9 +663,9 @@ EXPORT_SYMBOL_GPL(rt2x00usb_resume);
627#endif /* CONFIG_PM */ 663#endif /* CONFIG_PM */
628 664
629/* 665/*
630 * rt2x00pci module information. 666 * rt2x00usb module information.
631 */ 667 */
632MODULE_AUTHOR(DRV_PROJECT); 668MODULE_AUTHOR(DRV_PROJECT);
633MODULE_VERSION(DRV_VERSION); 669MODULE_VERSION(DRV_VERSION);
634MODULE_DESCRIPTION("rt2x00 library"); 670MODULE_DESCRIPTION("rt2x00 usb library");
635MODULE_LICENSE("GPL"); 671MODULE_LICENSE("GPL");