diff options
Diffstat (limited to 'drivers/net/wimax/i2400m/rx.c')
-rw-r--r-- | drivers/net/wimax/i2400m/rx.c | 677 |
1 files changed, 652 insertions, 25 deletions
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index cd525066d4b7..02419bfd64b5 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c | |||
@@ -39,7 +39,7 @@ | |||
39 | * - Use skb_clone(), break up processing in chunks | 39 | * - Use skb_clone(), break up processing in chunks |
40 | * - Split transport/device specific | 40 | * - Split transport/device specific |
41 | * - Make buffer size dynamic to exert less memory pressure | 41 | * - Make buffer size dynamic to exert less memory pressure |
42 | * | 42 | * - RX reorder support |
43 | * | 43 | * |
44 | * This handles the RX path. | 44 | * This handles the RX path. |
45 | * | 45 | * |
@@ -77,14 +77,42 @@ | |||
77 | * In firmware >= 1.4, RX packets have an extended header (16 | 77 | * In firmware >= 1.4, RX packets have an extended header (16 |
78 | * bytes). This header conveys information for management of host | 78 | * bytes). This header conveys information for management of host |
79 | * reordering of packets (the device offloads storage of the packets | 79 | * reordering of packets (the device offloads storage of the packets |
80 | * for reordering to the host). | 80 | * for reordering to the host). Read below for more information. |
81 | * | ||
82 | * Currently this information is not used as the current code doesn't | ||
83 | * enable host reordering. | ||
84 | * | 81 | * |
85 | * The header is used as dummy space to emulate an ethernet header and | 82 | * The header is used as dummy space to emulate an ethernet header and |
86 | * thus be able to act as an ethernet device without having to reallocate. | 83 | * thus be able to act as an ethernet device without having to reallocate. |
87 | * | 84 | * |
85 | * DATA RX REORDERING | ||
86 | * | ||
87 | * Starting in firmware v1.4, the device can deliver packets for | ||
88 | * delivery with special reordering information; this allows it to | ||
89 | * more effectively do packet management when some frames were lost in | ||
90 | * the radio traffic. | ||
91 | * | ||
92 | * Thus, for RX packets that come out of order, the device gives the | ||
93 | * driver enough information to queue them properly and then at some | ||
94 | * point, the signal to deliver the whole (or part) of the queued | ||
95 | * packets to the networking stack. There are 16 such queues. | ||
96 | * | ||
97 | * This only happens when a packet comes in with the "need reorder" | ||
98 | * flag set in the RX header. When such bit is set, the following | ||
99 | * operations might be indicated: | ||
100 | * | ||
101 | * - reset queue: send all queued packets to the OS | ||
102 | * | ||
103 | * - queue: queue a packet | ||
104 | * | ||
105 | * - update ws: update the queue's window start and deliver queued | ||
106 | * packets that meet the criteria | ||
107 | * | ||
108 | * - queue & update ws: queue a packet, update the window start and | ||
109 | * deliver queued packets that meet the criteria | ||
110 | * | ||
111 | * (delivery criteria: the packet's [normalized] sequence number is | ||
112 | * lower than the new [normalized] window start). | ||
113 | * | ||
114 | * See the i2400m_roq_*() functions for details. | ||
115 | * | ||
88 | * ROADMAP | 116 | * ROADMAP |
89 | * | 117 | * |
90 | * i2400m_rx | 118 | * i2400m_rx |
@@ -94,6 +122,17 @@ | |||
94 | * i2400m_net_rx | 122 | * i2400m_net_rx |
95 | * i2400m_rx_edata | 123 | * i2400m_rx_edata |
96 | * i2400m_net_erx | 124 | * i2400m_net_erx |
125 | * i2400m_roq_reset | ||
126 | * i2400m_net_erx | ||
127 | * i2400m_roq_queue | ||
128 | * __i2400m_roq_queue | ||
129 | * i2400m_roq_update_ws | ||
130 | * __i2400m_roq_update_ws | ||
131 | * i2400m_net_erx | ||
132 | * i2400m_roq_queue_update_ws | ||
133 | * __i2400m_roq_queue | ||
134 | * __i2400m_roq_update_ws | ||
135 | * i2400m_net_erx | ||
97 | * i2400m_rx_ctl | 136 | * i2400m_rx_ctl |
98 | * i2400m_msg_size_check | 137 | * i2400m_msg_size_check |
99 | * i2400m_report_hook_work [in a workqueue] | 138 | * i2400m_report_hook_work [in a workqueue] |
@@ -330,6 +369,469 @@ error_check: | |||
330 | return; | 369 | return; |
331 | } | 370 | } |
332 | 371 | ||
372 | |||
373 | /* | ||
374 | * Reorder queue data stored on skb->cb while the skb is queued in the | ||
375 | * reorder queues. | ||
376 | */ | ||
377 | struct i2400m_roq_data { | ||
378 | unsigned sn; /* Serial number for the skb */ | ||
379 | enum i2400m_cs cs; /* packet type for the skb */ | ||
380 | }; | ||
381 | |||
382 | |||
383 | /* | ||
384 | * ReOrder Queue | ||
385 | * | ||
386 | * @ws: Window Start; sequence number where the current window start | ||
387 | * is for this queue | ||
388 | * @queue: the skb queue itself | ||
389 | * @log: circular ring buffer used to log information about the | ||
390 | * reorder process in this queue that can be displayed in case of | ||
391 | * error to help diagnose it. | ||
392 | * | ||
393 | * This is the head for a list of skbs. In the skb->cb member of the | ||
394 | * skb when queued here contains a 'struct i2400m_roq_data' were we | ||
395 | * store the sequence number (sn) and the cs (packet type) coming from | ||
396 | * the RX payload header from the device. | ||
397 | */ | ||
398 | struct i2400m_roq | ||
399 | { | ||
400 | unsigned ws; | ||
401 | struct sk_buff_head queue; | ||
402 | struct i2400m_roq_log *log; | ||
403 | }; | ||
404 | |||
405 | |||
406 | static | ||
407 | void __i2400m_roq_init(struct i2400m_roq *roq) | ||
408 | { | ||
409 | roq->ws = 0; | ||
410 | skb_queue_head_init(&roq->queue); | ||
411 | } | ||
412 | |||
413 | |||
414 | static | ||
415 | unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq) | ||
416 | { | ||
417 | return ((unsigned long) roq - (unsigned long) i2400m->rx_roq) | ||
418 | / sizeof(*roq); | ||
419 | } | ||
420 | |||
421 | |||
422 | /* | ||
423 | * Normalize a sequence number based on the queue's window start | ||
424 | * | ||
425 | * nsn = (sn - ws) % 2048 | ||
426 | * | ||
427 | * Note that if @sn < @roq->ws, we still need a positive number; %'s | ||
428 | * sign is implementation specific, so we normalize it by adding 2048 | ||
429 | * to bring it to be positive. | ||
430 | */ | ||
431 | static | ||
432 | unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn) | ||
433 | { | ||
434 | int r; | ||
435 | r = ((int) sn - (int) roq->ws) % 2048; | ||
436 | if (r < 0) | ||
437 | r += 2048; | ||
438 | return r; | ||
439 | } | ||
440 | |||
441 | |||
442 | /* | ||
443 | * Circular buffer to keep the last N reorder operations | ||
444 | * | ||
445 | * In case something fails, dumb then to try to come up with what | ||
446 | * happened. | ||
447 | */ | ||
448 | enum { | ||
449 | I2400M_ROQ_LOG_LENGTH = 32, | ||
450 | }; | ||
451 | |||
452 | struct i2400m_roq_log { | ||
453 | struct i2400m_roq_log_entry { | ||
454 | enum i2400m_ro_type type; | ||
455 | unsigned ws, count, sn, nsn, new_ws; | ||
456 | } entry[I2400M_ROQ_LOG_LENGTH]; | ||
457 | unsigned in, out; | ||
458 | }; | ||
459 | |||
460 | |||
461 | /* Print a log entry */ | ||
462 | static | ||
463 | void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index, | ||
464 | unsigned e_index, | ||
465 | struct i2400m_roq_log_entry *e) | ||
466 | { | ||
467 | struct device *dev = i2400m_dev(i2400m); | ||
468 | |||
469 | switch(e->type) { | ||
470 | case I2400M_RO_TYPE_RESET: | ||
471 | dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u" | ||
472 | " - new nws %u\n", | ||
473 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | ||
474 | break; | ||
475 | case I2400M_RO_TYPE_PACKET: | ||
476 | dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n", | ||
477 | index, e->ws, e->count, e->sn, e->nsn); | ||
478 | break; | ||
479 | case I2400M_RO_TYPE_WS: | ||
480 | dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u" | ||
481 | " - new nws %u\n", | ||
482 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | ||
483 | break; | ||
484 | case I2400M_RO_TYPE_PACKET_WS: | ||
485 | dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u" | ||
486 | " - new nws %u\n", | ||
487 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | ||
488 | break; | ||
489 | default: | ||
490 | dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n", | ||
491 | index, e_index, e->type); | ||
492 | break; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | |||
497 | static | ||
498 | void i2400m_roq_log_add(struct i2400m *i2400m, | ||
499 | struct i2400m_roq *roq, enum i2400m_ro_type type, | ||
500 | unsigned ws, unsigned count, unsigned sn, | ||
501 | unsigned nsn, unsigned new_ws) | ||
502 | { | ||
503 | struct i2400m_roq_log_entry *e; | ||
504 | unsigned cnt_idx; | ||
505 | int index = __i2400m_roq_index(i2400m, roq); | ||
506 | |||
507 | /* if we run out of space, we eat from the end */ | ||
508 | if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH) | ||
509 | roq->log->out++; | ||
510 | cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH; | ||
511 | e = &roq->log->entry[cnt_idx]; | ||
512 | |||
513 | e->type = type; | ||
514 | e->ws = ws; | ||
515 | e->count = count; | ||
516 | e->sn = sn; | ||
517 | e->nsn = nsn; | ||
518 | e->new_ws = new_ws; | ||
519 | |||
520 | if (d_test(1)) | ||
521 | i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); | ||
522 | } | ||
523 | |||
524 | |||
525 | /* Dump all the entries in the FIFO and reinitialize it */ | ||
526 | static | ||
527 | void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq) | ||
528 | { | ||
529 | unsigned cnt, cnt_idx; | ||
530 | struct i2400m_roq_log_entry *e; | ||
531 | int index = __i2400m_roq_index(i2400m, roq); | ||
532 | |||
533 | BUG_ON(roq->log->out > roq->log->in); | ||
534 | for (cnt = roq->log->out; cnt < roq->log->in; cnt++) { | ||
535 | cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH; | ||
536 | e = &roq->log->entry[cnt_idx]; | ||
537 | i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); | ||
538 | memset(e, 0, sizeof(*e)); | ||
539 | } | ||
540 | roq->log->in = roq->log->out = 0; | ||
541 | } | ||
542 | |||
543 | |||
544 | /* | ||
545 | * Backbone for the queuing of an skb (by normalized sequence number) | ||
546 | * | ||
547 | * @i2400m: device descriptor | ||
548 | * @roq: reorder queue where to add | ||
549 | * @skb: the skb to add | ||
550 | * @sn: the sequence number of the skb | ||
551 | * @nsn: the normalized sequence number of the skb (pre-computed by the | ||
552 | * caller from the @sn and @roq->ws). | ||
553 | * | ||
554 | * We try first a couple of quick cases: | ||
555 | * | ||
556 | * - the queue is empty | ||
557 | * - the skb would be appended to the queue | ||
558 | * | ||
559 | * These will be the most common operations. | ||
560 | * | ||
561 | * If these fail, then we have to do a sorted insertion in the queue, | ||
562 | * which is the slowest path. | ||
563 | * | ||
564 | * We don't have to acquire a reference count as we are going to own it. | ||
565 | */ | ||
566 | static | ||
567 | void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, | ||
568 | struct sk_buff *skb, unsigned sn, unsigned nsn) | ||
569 | { | ||
570 | struct device *dev = i2400m_dev(i2400m); | ||
571 | struct sk_buff *skb_itr; | ||
572 | struct i2400m_roq_data *roq_data_itr, *roq_data; | ||
573 | unsigned nsn_itr; | ||
574 | |||
575 | d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n", | ||
576 | i2400m, roq, skb, sn, nsn); | ||
577 | |||
578 | roq_data = (struct i2400m_roq_data *) &skb->cb; | ||
579 | BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb)); | ||
580 | roq_data->sn = sn; | ||
581 | d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n", | ||
582 | roq, roq->ws, nsn, roq_data->sn); | ||
583 | |||
584 | /* Queues will be empty on not-so-bad environments, so try | ||
585 | * that first */ | ||
586 | if (skb_queue_empty(&roq->queue)) { | ||
587 | d_printf(2, dev, "ERX: roq %p - first one\n", roq); | ||
588 | __skb_queue_head(&roq->queue, skb); | ||
589 | goto out; | ||
590 | } | ||
591 | /* Now try append, as most of the operations will be that */ | ||
592 | skb_itr = skb_peek_tail(&roq->queue); | ||
593 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | ||
594 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | ||
595 | /* NSN bounds assumed correct (checked when it was queued) */ | ||
596 | if (nsn >= nsn_itr) { | ||
597 | d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n", | ||
598 | roq, skb_itr, nsn_itr, roq_data_itr->sn); | ||
599 | __skb_queue_tail(&roq->queue, skb); | ||
600 | goto out; | ||
601 | } | ||
602 | /* None of the fast paths option worked. Iterate to find the | ||
603 | * right spot where to insert the packet; we know the queue is | ||
604 | * not empty, so we are not the first ones; we also know we | ||
605 | * are not going to be the last ones. The list is sorted, so | ||
606 | * we have to insert before the the first guy with an nsn_itr | ||
607 | * greater that our nsn. */ | ||
608 | skb_queue_walk(&roq->queue, skb_itr) { | ||
609 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | ||
610 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | ||
611 | /* NSN bounds assumed correct (checked when it was queued) */ | ||
612 | if (nsn_itr > nsn) { | ||
613 | d_printf(2, dev, "ERX: roq %p - queued before %p " | ||
614 | "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr, | ||
615 | roq_data_itr->sn); | ||
616 | __skb_queue_before(&roq->queue, skb_itr, skb); | ||
617 | goto out; | ||
618 | } | ||
619 | } | ||
620 | /* If we get here, that is VERY bad -- print info to help | ||
621 | * diagnose and crash it */ | ||
622 | dev_err(dev, "SW BUG? failed to insert packet\n"); | ||
623 | dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n", | ||
624 | roq, roq->ws, skb, nsn, roq_data->sn); | ||
625 | skb_queue_walk(&roq->queue, skb_itr) { | ||
626 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | ||
627 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | ||
628 | /* NSN bounds assumed correct (checked when it was queued) */ | ||
629 | dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n", | ||
630 | roq, skb_itr, nsn_itr, roq_data_itr->sn); | ||
631 | } | ||
632 | BUG(); | ||
633 | out: | ||
634 | d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", | ||
635 | i2400m, roq, skb, sn, nsn); | ||
636 | return; | ||
637 | } | ||
638 | |||
639 | |||
640 | /* | ||
641 | * Backbone for the update window start operation | ||
642 | * | ||
643 | * @i2400m: device descriptor | ||
644 | * @roq: Reorder queue | ||
645 | * @sn: New sequence number | ||
646 | * | ||
647 | * Updates the window start of a queue; when doing so, it must deliver | ||
648 | * to the networking stack all the queued skb's whose normalized | ||
649 | * sequence number is lower than the new normalized window start. | ||
650 | */ | ||
651 | static | ||
652 | unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | ||
653 | unsigned sn) | ||
654 | { | ||
655 | struct device *dev = i2400m_dev(i2400m); | ||
656 | struct sk_buff *skb_itr, *tmp_itr; | ||
657 | struct i2400m_roq_data *roq_data_itr; | ||
658 | unsigned new_nws, nsn_itr; | ||
659 | |||
660 | new_nws = __i2400m_roq_nsn(roq, sn); | ||
661 | if (unlikely(new_nws >= 1024) && d_test(1)) { | ||
662 | dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", | ||
663 | new_nws, sn, roq->ws); | ||
664 | WARN_ON(1); | ||
665 | i2400m_roq_log_dump(i2400m, roq); | ||
666 | } | ||
667 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { | ||
668 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | ||
669 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | ||
670 | /* NSN bounds assumed correct (checked when it was queued) */ | ||
671 | if (nsn_itr < new_nws) { | ||
672 | d_printf(2, dev, "ERX: roq %p - release skb %p " | ||
673 | "(nsn %u/%u new nws %u)\n", | ||
674 | roq, skb_itr, nsn_itr, roq_data_itr->sn, | ||
675 | new_nws); | ||
676 | __skb_unlink(skb_itr, &roq->queue); | ||
677 | i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); | ||
678 | } | ||
679 | else | ||
680 | break; /* rest of packets all nsn_itr > nws */ | ||
681 | } | ||
682 | roq->ws = sn; | ||
683 | return new_nws; | ||
684 | } | ||
685 | |||
686 | |||
687 | /* | ||
688 | * Reset a queue | ||
689 | * | ||
690 | * @i2400m: device descriptor | ||
691 | * @cin: Queue Index | ||
692 | * | ||
693 | * Deliver all the packets and reset the window-start to zero. Name is | ||
694 | * kind of misleading. | ||
695 | */ | ||
696 | static | ||
697 | void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq) | ||
698 | { | ||
699 | struct device *dev = i2400m_dev(i2400m); | ||
700 | struct sk_buff *skb_itr, *tmp_itr; | ||
701 | struct i2400m_roq_data *roq_data_itr; | ||
702 | |||
703 | d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq); | ||
704 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET, | ||
705 | roq->ws, skb_queue_len(&roq->queue), | ||
706 | ~0, ~0, 0); | ||
707 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { | ||
708 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | ||
709 | d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n", | ||
710 | roq, skb_itr, roq_data_itr->sn); | ||
711 | __skb_unlink(skb_itr, &roq->queue); | ||
712 | i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); | ||
713 | } | ||
714 | roq->ws = 0; | ||
715 | d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); | ||
716 | return; | ||
717 | } | ||
718 | |||
719 | |||
720 | /* | ||
721 | * Queue a packet | ||
722 | * | ||
723 | * @i2400m: device descriptor | ||
724 | * @cin: Queue Index | ||
725 | * @skb: containing the packet data | ||
726 | * @fbn: First block number of the packet in @skb | ||
727 | * @lbn: Last block number of the packet in @skb | ||
728 | * | ||
729 | * The hardware is asking the driver to queue a packet for later | ||
730 | * delivery to the networking stack. | ||
731 | */ | ||
732 | static | ||
733 | void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, | ||
734 | struct sk_buff * skb, unsigned lbn) | ||
735 | { | ||
736 | struct device *dev = i2400m_dev(i2400m); | ||
737 | unsigned nsn, len; | ||
738 | |||
739 | d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", | ||
740 | i2400m, roq, skb, lbn); | ||
741 | len = skb_queue_len(&roq->queue); | ||
742 | nsn = __i2400m_roq_nsn(roq, lbn); | ||
743 | if (unlikely(nsn >= 1024)) { | ||
744 | dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", | ||
745 | nsn, lbn, roq->ws); | ||
746 | i2400m_roq_log_dump(i2400m, roq); | ||
747 | i2400m->bus_reset(i2400m, I2400M_RT_WARM); | ||
748 | } else { | ||
749 | __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); | ||
750 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, | ||
751 | roq->ws, len, lbn, nsn, ~0); | ||
752 | } | ||
753 | d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", | ||
754 | i2400m, roq, skb, lbn); | ||
755 | return; | ||
756 | } | ||
757 | |||
758 | |||
759 | /* | ||
760 | * Update the window start in a reorder queue and deliver all skbs | ||
761 | * with a lower window start | ||
762 | * | ||
763 | * @i2400m: device descriptor | ||
764 | * @roq: Reorder queue | ||
765 | * @sn: New sequence number | ||
766 | */ | ||
767 | static | ||
768 | void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | ||
769 | unsigned sn) | ||
770 | { | ||
771 | struct device *dev = i2400m_dev(i2400m); | ||
772 | unsigned old_ws, nsn, len; | ||
773 | |||
774 | d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn); | ||
775 | old_ws = roq->ws; | ||
776 | len = skb_queue_len(&roq->queue); | ||
777 | nsn = __i2400m_roq_update_ws(i2400m, roq, sn); | ||
778 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, | ||
779 | old_ws, len, sn, nsn, roq->ws); | ||
780 | d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); | ||
781 | return; | ||
782 | } | ||
783 | |||
784 | |||
785 | /* | ||
786 | * Queue a packet and update the window start | ||
787 | * | ||
788 | * @i2400m: device descriptor | ||
789 | * @cin: Queue Index | ||
790 | * @skb: containing the packet data | ||
791 | * @fbn: First block number of the packet in @skb | ||
792 | * @sn: Last block number of the packet in @skb | ||
793 | * | ||
794 | * Note that unlike i2400m_roq_update_ws(), which sets the new window | ||
795 | * start to @sn, in here we'll set it to @sn + 1. | ||
796 | */ | ||
797 | static | ||
798 | void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | ||
799 | struct sk_buff * skb, unsigned sn) | ||
800 | { | ||
801 | struct device *dev = i2400m_dev(i2400m); | ||
802 | unsigned nsn, old_ws, len; | ||
803 | |||
804 | d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n", | ||
805 | i2400m, roq, skb, sn); | ||
806 | len = skb_queue_len(&roq->queue); | ||
807 | nsn = __i2400m_roq_nsn(roq, sn); | ||
808 | old_ws = roq->ws; | ||
809 | if (unlikely(nsn >= 1024)) { | ||
810 | dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", | ||
811 | nsn, sn, roq->ws); | ||
812 | i2400m_roq_log_dump(i2400m, roq); | ||
813 | i2400m->bus_reset(i2400m, I2400M_RT_WARM); | ||
814 | } else { | ||
815 | /* if the queue is empty, don't bother as we'd queue | ||
816 | * it and inmediately unqueue it -- just deliver it */ | ||
817 | if (len == 0) { | ||
818 | struct i2400m_roq_data *roq_data; | ||
819 | roq_data = (struct i2400m_roq_data *) &skb->cb; | ||
820 | i2400m_net_erx(i2400m, skb, roq_data->cs); | ||
821 | } | ||
822 | else { | ||
823 | __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); | ||
824 | __i2400m_roq_update_ws(i2400m, roq, sn + 1); | ||
825 | } | ||
826 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, | ||
827 | old_ws, len, sn, nsn, roq->ws); | ||
828 | } | ||
829 | d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", | ||
830 | i2400m, roq, skb, sn); | ||
831 | return; | ||
832 | } | ||
833 | |||
834 | |||
333 | /* | 835 | /* |
334 | * Receive and send up an extended data packet | 836 | * Receive and send up an extended data packet |
335 | * | 837 | * |
@@ -347,6 +849,28 @@ error_check: | |||
347 | * having to copy packets around. | 849 | * having to copy packets around. |
348 | * | 850 | * |
349 | * This function handles said path. | 851 | * This function handles said path. |
852 | * | ||
853 | * | ||
854 | * Receive and send up an extended data packet that requires no reordering | ||
855 | * | ||
856 | * @i2400m: device descriptor | ||
857 | * @skb_rx: skb that contains the extended data packet | ||
858 | * @single_last: 1 if the payload is the only one or the last one of | ||
859 | * the skb. | ||
860 | * @payload: pointer to the packet's data (past the actual extended | ||
861 | * data payload header). | ||
862 | * @size: size of the payload | ||
863 | * | ||
864 | * Pass over to the networking stack a data packet that might have | ||
865 | * reordering requirements. | ||
866 | * | ||
867 | * This needs to the decide if the skb in which the packet is | ||
868 | * contained can be reused or if it needs to be cloned. Then it has to | ||
869 | * be trimmed in the edges so that the beginning is the space for eth | ||
870 | * header and then pass it to i2400m_net_erx() for the stack | ||
871 | * | ||
872 | * Assumes the caller has verified the sanity of the payload (size, | ||
873 | * etc) already. | ||
350 | */ | 874 | */ |
351 | static | 875 | static |
352 | void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | 876 | void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, |
@@ -357,53 +881,86 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | |||
357 | struct net_device *net_dev = i2400m->wimax_dev.net_dev; | 881 | struct net_device *net_dev = i2400m->wimax_dev.net_dev; |
358 | struct sk_buff *skb; | 882 | struct sk_buff *skb; |
359 | enum i2400m_cs cs; | 883 | enum i2400m_cs cs; |
360 | unsigned reorder_needed; | 884 | u32 reorder; |
885 | unsigned ro_needed, ro_type, ro_cin, ro_sn; | ||
886 | struct i2400m_roq *roq; | ||
887 | struct i2400m_roq_data *roq_data; | ||
361 | 888 | ||
362 | d_fnstart(4, dev, "(i2400m %p skb_rx %p single %u payload %p " | 889 | BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); |
890 | |||
891 | d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p " | ||
363 | "size %zu)\n", i2400m, skb_rx, single_last, payload, size); | 892 | "size %zu)\n", i2400m, skb_rx, single_last, payload, size); |
364 | if (size < sizeof(*hdr)) { | 893 | if (size < sizeof(*hdr)) { |
365 | dev_err(dev, "ERX: HW BUG? message with short header (%zu " | 894 | dev_err(dev, "ERX: HW BUG? message with short header (%zu " |
366 | "vs %zu bytes expected)\n", size, sizeof(*hdr)); | 895 | "vs %zu bytes expected)\n", size, sizeof(*hdr)); |
367 | goto error; | 896 | goto error; |
368 | } | 897 | } |
369 | reorder_needed = le32_to_cpu(hdr->reorder & I2400M_REORDER_NEEDED); | 898 | |
370 | cs = hdr->cs; | ||
371 | if (reorder_needed) { | ||
372 | dev_err(dev, "ERX: HW BUG? reorder needed, it was disabled\n"); | ||
373 | goto error; | ||
374 | } | ||
375 | /* ok, so now decide if we want to clone or reuse the skb, | ||
376 | * pull and trim it so the beginning is the space for the eth | ||
377 | * header and pass it to i2400m_net_erx() for the stack */ | ||
378 | if (single_last) { | 899 | if (single_last) { |
379 | skb = skb_get(skb_rx); | 900 | skb = skb_get(skb_rx); |
380 | d_printf(3, dev, "ERX: reusing single payload skb %p\n", skb); | 901 | d_printf(3, dev, "ERX: skb %p reusing\n", skb); |
381 | } else { | 902 | } else { |
382 | skb = skb_clone(skb_rx, GFP_KERNEL); | 903 | skb = skb_clone(skb_rx, GFP_KERNEL); |
383 | d_printf(3, dev, "ERX: cloning %p\n", skb); | ||
384 | if (skb == NULL) { | 904 | if (skb == NULL) { |
385 | dev_err(dev, "ERX: no memory to clone skb\n"); | 905 | dev_err(dev, "ERX: no memory to clone skb\n"); |
386 | net_dev->stats.rx_dropped++; | 906 | net_dev->stats.rx_dropped++; |
387 | goto error_skb_clone; | 907 | goto error_skb_clone; |
388 | } | 908 | } |
909 | d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx); | ||
389 | } | 910 | } |
390 | /* now we have to pull and trim so that the skb points to the | 911 | /* now we have to pull and trim so that the skb points to the |
391 | * beginning of the IP packet; the netdev part will add the | 912 | * beginning of the IP packet; the netdev part will add the |
392 | * ethernet header as needed. */ | 913 | * ethernet header as needed - we know there is enough space |
393 | BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); | 914 | * because we checked in i2400m_rx_edata(). */ |
394 | skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data); | 915 | skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data); |
395 | skb_trim(skb, (void *) skb_end_pointer(skb) - payload + sizeof(*hdr)); | 916 | skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr)); |
396 | i2400m_net_erx(i2400m, skb, cs); | 917 | |
918 | reorder = le32_to_cpu(hdr->reorder); | ||
919 | ro_needed = reorder & I2400M_RO_NEEDED; | ||
920 | cs = hdr->cs; | ||
921 | if (ro_needed) { | ||
922 | ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE; | ||
923 | ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; | ||
924 | ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; | ||
925 | |||
926 | roq = &i2400m->rx_roq[ro_cin]; | ||
927 | roq_data = (struct i2400m_roq_data *) &skb->cb; | ||
928 | roq_data->sn = ro_sn; | ||
929 | roq_data->cs = cs; | ||
930 | d_printf(2, dev, "ERX: reorder needed: " | ||
931 | "type %u cin %u [ws %u] sn %u/%u len %zuB\n", | ||
932 | ro_type, ro_cin, roq->ws, ro_sn, | ||
933 | __i2400m_roq_nsn(roq, ro_sn), size); | ||
934 | d_dump(2, dev, payload, size); | ||
935 | switch(ro_type) { | ||
936 | case I2400M_RO_TYPE_RESET: | ||
937 | i2400m_roq_reset(i2400m, roq); | ||
938 | kfree_skb(skb); /* no data here */ | ||
939 | break; | ||
940 | case I2400M_RO_TYPE_PACKET: | ||
941 | i2400m_roq_queue(i2400m, roq, skb, ro_sn); | ||
942 | break; | ||
943 | case I2400M_RO_TYPE_WS: | ||
944 | i2400m_roq_update_ws(i2400m, roq, ro_sn); | ||
945 | kfree_skb(skb); /* no data here */ | ||
946 | break; | ||
947 | case I2400M_RO_TYPE_PACKET_WS: | ||
948 | i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn); | ||
949 | break; | ||
950 | default: | ||
951 | dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); | ||
952 | } | ||
953 | } | ||
954 | else | ||
955 | i2400m_net_erx(i2400m, skb, cs); | ||
397 | error_skb_clone: | 956 | error_skb_clone: |
398 | error: | 957 | error: |
399 | d_fnend(4, dev, "(i2400m %p skb_rx %p single %u payload %p " | 958 | d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " |
400 | "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); | 959 | "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); |
401 | return; | 960 | return; |
402 | } | 961 | } |
403 | 962 | ||
404 | 963 | ||
405 | |||
406 | |||
407 | /* | 964 | /* |
408 | * Act on a received payload | 965 | * Act on a received payload |
409 | * | 966 | * |
@@ -632,3 +1189,73 @@ error_msg_hdr_check: | |||
632 | return result; | 1189 | return result; |
633 | } | 1190 | } |
634 | EXPORT_SYMBOL_GPL(i2400m_rx); | 1191 | EXPORT_SYMBOL_GPL(i2400m_rx); |
1192 | |||
1193 | |||
1194 | /* | ||
1195 | * Initialize the RX queue and infrastructure | ||
1196 | * | ||
1197 | * This sets up all the RX reordering infrastructures, which will not | ||
1198 | * be used if reordering is not enabled or if the firmware does not | ||
1199 | * support it. The device is told to do reordering in | ||
1200 | * i2400m_dev_initialize(), where it also looks at the value of the | ||
1201 | * i2400m->rx_reorder switch before taking a decission. | ||
1202 | * | ||
1203 | * Note we allocate the roq queues in one chunk and the actual logging | ||
1204 | * support for it (logging) in another one and then we setup the | ||
1205 | * pointers from the first to the last. | ||
1206 | */ | ||
1207 | int i2400m_rx_setup(struct i2400m *i2400m) | ||
1208 | { | ||
1209 | int result = 0; | ||
1210 | struct device *dev = i2400m_dev(i2400m); | ||
1211 | |||
1212 | i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1; | ||
1213 | if (i2400m->rx_reorder) { | ||
1214 | unsigned itr; | ||
1215 | size_t size; | ||
1216 | struct i2400m_roq_log *rd; | ||
1217 | |||
1218 | result = -ENOMEM; | ||
1219 | |||
1220 | size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1); | ||
1221 | i2400m->rx_roq = kzalloc(size, GFP_KERNEL); | ||
1222 | if (i2400m->rx_roq == NULL) { | ||
1223 | dev_err(dev, "RX: cannot allocate %zu bytes for " | ||
1224 | "reorder queues\n", size); | ||
1225 | goto error_roq_alloc; | ||
1226 | } | ||
1227 | |||
1228 | size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1); | ||
1229 | rd = kzalloc(size, GFP_KERNEL); | ||
1230 | if (rd == NULL) { | ||
1231 | dev_err(dev, "RX: cannot allocate %zu bytes for " | ||
1232 | "reorder queues log areas\n", size); | ||
1233 | result = -ENOMEM; | ||
1234 | goto error_roq_log_alloc; | ||
1235 | } | ||
1236 | |||
1237 | for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) { | ||
1238 | __i2400m_roq_init(&i2400m->rx_roq[itr]); | ||
1239 | i2400m->rx_roq[itr].log = &rd[itr]; | ||
1240 | } | ||
1241 | } | ||
1242 | return 0; | ||
1243 | |||
1244 | error_roq_log_alloc: | ||
1245 | kfree(i2400m->rx_roq); | ||
1246 | error_roq_alloc: | ||
1247 | return result; | ||
1248 | } | ||
1249 | |||
1250 | |||
1251 | /* Tear down the RX queue and infrastructure */ | ||
1252 | void i2400m_rx_release(struct i2400m *i2400m) | ||
1253 | { | ||
1254 | if (i2400m->rx_reorder) { | ||
1255 | unsigned itr; | ||
1256 | for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) | ||
1257 | __skb_queue_purge(&i2400m->rx_roq[itr].queue); | ||
1258 | kfree(i2400m->rx_roq[0].log); | ||
1259 | kfree(i2400m->rx_roq); | ||
1260 | } | ||
1261 | } | ||