aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/rx.c
diff options
context:
space:
mode:
authorChristian Lamparter <chunkeey@googlemail.com>2010-12-30 11:25:29 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-01-04 14:46:13 -0500
commit24a8fdad35835e8d71f7c4b978a246fafed2e7b4 (patch)
tree3ee2b5710b802dca6fe43cbedf461c54e0f2be93 /net/mac80211/rx.c
parent1186488b4a4d4871e40cb1604ba3ede3d4b7cc90 (diff)
mac80211: serialize rx path workers
This patch addresses the issue of serialization between the main rx path and various reorder release timers. <http://www.spinics.net/lists/linux-wireless/msg57214.html> It converts the previously local "frames" queue into a global rx queue [rx_skb_queue]. This way, everyone (be it the main rx-path or some reorder release timeout) can add frames to it. Only one active rx handler worker [ieee80211_rx_handlers] is needed. All other threads which have lost the race of "runnning_rx_handler" can now simply "return", knowing that the thread who had the "edge" will also take care of their workload. Signed-off-by: Christian Lamparter <chunkeey@googlemail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r--net/mac80211/rx.c80
1 files changed, 37 insertions, 43 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 260b48bac42e..12cbb4df81aa 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -533,9 +533,9 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
533 533
534static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 534static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
535 struct tid_ampdu_rx *tid_agg_rx, 535 struct tid_ampdu_rx *tid_agg_rx,
536 int index, 536 int index)
537 struct sk_buff_head *frames)
538{ 537{
538 struct ieee80211_local *local = hw_to_local(hw);
539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
540 struct ieee80211_rx_status *status; 540 struct ieee80211_rx_status *status;
541 541
@@ -549,7 +549,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
549 tid_agg_rx->reorder_buf[index] = NULL; 549 tid_agg_rx->reorder_buf[index] = NULL;
550 status = IEEE80211_SKB_RXCB(skb); 550 status = IEEE80211_SKB_RXCB(skb);
551 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 551 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
552 __skb_queue_tail(frames, skb); 552 skb_queue_tail(&local->rx_skb_queue, skb);
553 553
554no_frame: 554no_frame:
555 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 555 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -557,8 +557,7 @@ no_frame:
557 557
558static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 558static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
559 struct tid_ampdu_rx *tid_agg_rx, 559 struct tid_ampdu_rx *tid_agg_rx,
560 u16 head_seq_num, 560 u16 head_seq_num)
561 struct sk_buff_head *frames)
562{ 561{
563 int index; 562 int index;
564 563
@@ -567,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
567 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 566 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
568 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 567 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
569 tid_agg_rx->buf_size; 568 tid_agg_rx->buf_size;
570 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); 569 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
571 } 570 }
572} 571}
573 572
@@ -583,8 +582,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
583#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 582#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
584 583
585static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, 584static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
586 struct tid_ampdu_rx *tid_agg_rx, 585 struct tid_ampdu_rx *tid_agg_rx)
587 struct sk_buff_head *frames)
588{ 586{
589 int index, j; 587 int index, j;
590 588
@@ -615,8 +613,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
615 wiphy_debug(hw->wiphy, 613 wiphy_debug(hw->wiphy,
616 "release an RX reorder frame due to timeout on earlier frames\n"); 614 "release an RX reorder frame due to timeout on earlier frames\n");
617#endif 615#endif
618 ieee80211_release_reorder_frame(hw, tid_agg_rx, 616 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
619 j, frames);
620 617
621 /* 618 /*
622 * Increment the head seq# also for the skipped slots. 619 * Increment the head seq# also for the skipped slots.
@@ -626,7 +623,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
626 skipped = 0; 623 skipped = 0;
627 } 624 }
628 } else while (tid_agg_rx->reorder_buf[index]) { 625 } else while (tid_agg_rx->reorder_buf[index]) {
629 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); 626 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
630 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 627 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
631 tid_agg_rx->buf_size; 628 tid_agg_rx->buf_size;
632 } 629 }
@@ -682,8 +679,7 @@ set_release_timer:
682 */ 679 */
683static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 680static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
684 struct tid_ampdu_rx *tid_agg_rx, 681 struct tid_ampdu_rx *tid_agg_rx,
685 struct sk_buff *skb, 682 struct sk_buff *skb)
686 struct sk_buff_head *frames)
687{ 683{
688 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
689 u16 sc = le16_to_cpu(hdr->seq_ctrl); 685 u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -710,8 +706,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
710 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 706 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
711 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 707 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
712 /* release stored frames up to new head to stack */ 708 /* release stored frames up to new head to stack */
713 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num, 709 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
714 frames);
715 } 710 }
716 711
717 /* Now the new frame is always in the range of the reordering buffer */ 712 /* Now the new frame is always in the range of the reordering buffer */
@@ -739,7 +734,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
739 tid_agg_rx->reorder_buf[index] = skb; 734 tid_agg_rx->reorder_buf[index] = skb;
740 tid_agg_rx->reorder_time[index] = jiffies; 735 tid_agg_rx->reorder_time[index] = jiffies;
741 tid_agg_rx->stored_mpdu_num++; 736 tid_agg_rx->stored_mpdu_num++;
742 ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); 737 ieee80211_sta_reorder_release(hw, tid_agg_rx);
743 738
744 out: 739 out:
745 spin_unlock(&tid_agg_rx->reorder_lock); 740 spin_unlock(&tid_agg_rx->reorder_lock);
@@ -750,8 +745,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
750 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 745 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
751 * true if the MPDU was buffered, false if it should be processed. 746 * true if the MPDU was buffered, false if it should be processed.
752 */ 747 */
753static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 748static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
754 struct sk_buff_head *frames)
755{ 749{
756 struct sk_buff *skb = rx->skb; 750 struct sk_buff *skb = rx->skb;
757 struct ieee80211_local *local = rx->local; 751 struct ieee80211_local *local = rx->local;
@@ -806,11 +800,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
806 * sure that we cannot get to it any more before doing 800 * sure that we cannot get to it any more before doing
807 * anything with it. 801 * anything with it.
808 */ 802 */
809 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) 803 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
810 return; 804 return;
811 805
812 dont_reorder: 806 dont_reorder:
813 __skb_queue_tail(frames, skb); 807 skb_queue_tail(&local->rx_skb_queue, skb);
814} 808}
815 809
816static ieee80211_rx_result debug_noinline 810static ieee80211_rx_result debug_noinline
@@ -1931,7 +1925,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1931} 1925}
1932 1926
1933static ieee80211_rx_result debug_noinline 1927static ieee80211_rx_result debug_noinline
1934ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 1928ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1935{ 1929{
1936 struct ieee80211_local *local = rx->local; 1930 struct ieee80211_local *local = rx->local;
1937 struct ieee80211_hw *hw = &local->hw; 1931 struct ieee80211_hw *hw = &local->hw;
@@ -1971,8 +1965,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1971 1965
1972 spin_lock(&tid_agg_rx->reorder_lock); 1966 spin_lock(&tid_agg_rx->reorder_lock);
1973 /* release stored frames up to start of BAR */ 1967 /* release stored frames up to start of BAR */
1974 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1968 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
1975 frames);
1976 spin_unlock(&tid_agg_rx->reorder_lock); 1969 spin_unlock(&tid_agg_rx->reorder_lock);
1977 1970
1978 kfree_skb(skb); 1971 kfree_skb(skb);
@@ -2489,8 +2482,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2489 } 2482 }
2490} 2483}
2491 2484
2492static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 2485static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2493 struct sk_buff_head *frames)
2494{ 2486{
2495 ieee80211_rx_result res = RX_DROP_MONITOR; 2487 ieee80211_rx_result res = RX_DROP_MONITOR;
2496 struct sk_buff *skb; 2488 struct sk_buff *skb;
@@ -2502,7 +2494,15 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2502 goto rxh_next; \ 2494 goto rxh_next; \
2503 } while (0); 2495 } while (0);
2504 2496
2505 while ((skb = __skb_dequeue(frames))) { 2497 spin_lock(&rx->local->rx_skb_queue.lock);
2498 if (rx->local->running_rx_handler)
2499 goto unlock;
2500
2501 rx->local->running_rx_handler = true;
2502
2503 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2504 spin_unlock(&rx->local->rx_skb_queue.lock);
2505
2506 /* 2506 /*
2507 * all the other fields are valid across frames 2507 * all the other fields are valid across frames
2508 * that belong to an aMPDU since they are on the 2508 * that belong to an aMPDU since they are on the
@@ -2525,12 +2525,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2525 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2525 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2526#endif 2526#endif
2527 CALL_RXH(ieee80211_rx_h_data) 2527 CALL_RXH(ieee80211_rx_h_data)
2528 2528 CALL_RXH(ieee80211_rx_h_ctrl);
2529 /* special treatment -- needs the queue */
2530 res = ieee80211_rx_h_ctrl(rx, frames);
2531 if (res != RX_CONTINUE)
2532 goto rxh_next;
2533
2534 CALL_RXH(ieee80211_rx_h_mgmt_check) 2529 CALL_RXH(ieee80211_rx_h_mgmt_check)
2535 CALL_RXH(ieee80211_rx_h_action) 2530 CALL_RXH(ieee80211_rx_h_action)
2536 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2531 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2539,18 +2534,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2539 2534
2540 rxh_next: 2535 rxh_next:
2541 ieee80211_rx_handlers_result(rx, res); 2536 ieee80211_rx_handlers_result(rx, res);
2542 2537 spin_lock(&rx->local->rx_skb_queue.lock);
2543#undef CALL_RXH 2538#undef CALL_RXH
2544 } 2539 }
2540
2541 rx->local->running_rx_handler = false;
2542
2543 unlock:
2544 spin_unlock(&rx->local->rx_skb_queue.lock);
2545} 2545}
2546 2546
2547static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2547static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2548{ 2548{
2549 struct sk_buff_head reorder_release;
2550 ieee80211_rx_result res = RX_DROP_MONITOR; 2549 ieee80211_rx_result res = RX_DROP_MONITOR;
2551 2550
2552 __skb_queue_head_init(&reorder_release);
2553
2554#define CALL_RXH(rxh) \ 2551#define CALL_RXH(rxh) \
2555 do { \ 2552 do { \
2556 res = rxh(rx); \ 2553 res = rxh(rx); \
@@ -2561,9 +2558,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2561 CALL_RXH(ieee80211_rx_h_passive_scan) 2558 CALL_RXH(ieee80211_rx_h_passive_scan)
2562 CALL_RXH(ieee80211_rx_h_check) 2559 CALL_RXH(ieee80211_rx_h_check)
2563 2560
2564 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 2561 ieee80211_rx_reorder_ampdu(rx);
2565 2562
2566 ieee80211_rx_handlers(rx, &reorder_release); 2563 ieee80211_rx_handlers(rx);
2567 return; 2564 return;
2568 2565
2569 rxh_next: 2566 rxh_next:
@@ -2578,7 +2575,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2578 */ 2575 */
2579void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2576void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2580{ 2577{
2581 struct sk_buff_head frames;
2582 struct ieee80211_rx_data rx = { 2578 struct ieee80211_rx_data rx = {
2583 .sta = sta, 2579 .sta = sta,
2584 .sdata = sta->sdata, 2580 .sdata = sta->sdata,
@@ -2591,13 +2587,11 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2591 if (!tid_agg_rx) 2587 if (!tid_agg_rx)
2592 return; 2588 return;
2593 2589
2594 __skb_queue_head_init(&frames);
2595
2596 spin_lock(&tid_agg_rx->reorder_lock); 2590 spin_lock(&tid_agg_rx->reorder_lock);
2597 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames); 2591 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2598 spin_unlock(&tid_agg_rx->reorder_lock); 2592 spin_unlock(&tid_agg_rx->reorder_lock);
2599 2593
2600 ieee80211_rx_handlers(&rx, &frames); 2594 ieee80211_rx_handlers(&rx);
2601} 2595}
2602 2596
2603/* main receive path */ 2597/* main receive path */