diff options
author | Christian Lamparter <chunkeey@googlemail.com> | 2010-08-04 19:36:04 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-08-16 15:26:39 -0400 |
commit | aa0c86364f925c6f12195072562b18c5609ff082 (patch) | |
tree | d32f3bff00334ecc49d74a0ecd4d36c3e7bdd78f /net/mac80211 | |
parent | 6044474ed86484b899d46e040ade3872e83fc9f7 (diff) |
mac80211: put rx handlers into separate functions
This patch takes the reorder logic from the RX path and
moves it into separate routines to make the expired frame
release accessible.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r-- | net/mac80211/rx.c | 214 |
1 files changed, 128 insertions, 86 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 225e8ee682e3..8301b4a980a2 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -583,6 +583,57 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
583 | */ | 583 | */ |
584 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | 584 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) |
585 | 585 | ||
586 | static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | ||
587 | struct tid_ampdu_rx *tid_agg_rx, | ||
588 | struct sk_buff_head *frames) | ||
589 | { | ||
590 | int index; | ||
591 | |||
592 | /* release the buffer until next missing frame */ | ||
593 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
594 | tid_agg_rx->buf_size; | ||
595 | if (!tid_agg_rx->reorder_buf[index] && | ||
596 | tid_agg_rx->stored_mpdu_num > 1) { | ||
597 | /* | ||
598 | * No buffers ready to be released, but check whether any | ||
599 | * frames in the reorder buffer have timed out. | ||
600 | */ | ||
601 | int j; | ||
602 | int skipped = 1; | ||
603 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | ||
604 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
605 | if (!tid_agg_rx->reorder_buf[j]) { | ||
606 | skipped++; | ||
607 | continue; | ||
608 | } | ||
609 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | ||
610 | HT_RX_REORDER_BUF_TIMEOUT)) | ||
611 | break; | ||
612 | |||
613 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
614 | if (net_ratelimit()) | ||
615 | printk(KERN_DEBUG "%s: release an RX reorder " | ||
616 | "frame due to timeout on earlier " | ||
617 | "frames\n", | ||
618 | wiphy_name(hw->wiphy)); | ||
619 | #endif | ||
620 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | ||
621 | j, frames); | ||
622 | |||
623 | /* | ||
624 | * Increment the head seq# also for the skipped slots. | ||
625 | */ | ||
626 | tid_agg_rx->head_seq_num = | ||
627 | (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; | ||
628 | skipped = 0; | ||
629 | } | ||
630 | } else while (tid_agg_rx->reorder_buf[index]) { | ||
631 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | ||
632 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
633 | tid_agg_rx->buf_size; | ||
634 | } | ||
635 | } | ||
636 | |||
586 | /* | 637 | /* |
587 | * As this function belongs to the RX path it must be under | 638 | * As this function belongs to the RX path it must be under |
588 | * rcu_read_lock protection. It returns false if the frame | 639 | * rcu_read_lock protection. It returns false if the frame |
@@ -643,49 +694,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
643 | tid_agg_rx->reorder_buf[index] = skb; | 694 | tid_agg_rx->reorder_buf[index] = skb; |
644 | tid_agg_rx->reorder_time[index] = jiffies; | 695 | tid_agg_rx->reorder_time[index] = jiffies; |
645 | tid_agg_rx->stored_mpdu_num++; | 696 | tid_agg_rx->stored_mpdu_num++; |
646 | /* release the buffer until next missing frame */ | 697 | ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); |
647 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
648 | tid_agg_rx->buf_size; | ||
649 | if (!tid_agg_rx->reorder_buf[index] && | ||
650 | tid_agg_rx->stored_mpdu_num > 1) { | ||
651 | /* | ||
652 | * No buffers ready to be released, but check whether any | ||
653 | * frames in the reorder buffer have timed out. | ||
654 | */ | ||
655 | int j; | ||
656 | int skipped = 1; | ||
657 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | ||
658 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
659 | if (!tid_agg_rx->reorder_buf[j]) { | ||
660 | skipped++; | ||
661 | continue; | ||
662 | } | ||
663 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | ||
664 | HT_RX_REORDER_BUF_TIMEOUT)) | ||
665 | break; | ||
666 | |||
667 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
668 | if (net_ratelimit()) | ||
669 | printk(KERN_DEBUG "%s: release an RX reorder " | ||
670 | "frame due to timeout on earlier " | ||
671 | "frames\n", | ||
672 | wiphy_name(hw->wiphy)); | ||
673 | #endif | ||
674 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | ||
675 | j, frames); | ||
676 | |||
677 | /* | ||
678 | * Increment the head seq# also for the skipped slots. | ||
679 | */ | ||
680 | tid_agg_rx->head_seq_num = | ||
681 | (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; | ||
682 | skipped = 0; | ||
683 | } | ||
684 | } else while (tid_agg_rx->reorder_buf[index]) { | ||
685 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | ||
686 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
687 | tid_agg_rx->buf_size; | ||
688 | } | ||
689 | 698 | ||
690 | return true; | 699 | return true; |
691 | } | 700 | } |
@@ -2267,19 +2276,46 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2267 | dev_kfree_skb(skb); | 2276 | dev_kfree_skb(skb); |
2268 | } | 2277 | } |
2269 | 2278 | ||
2270 | 2279 | static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, | |
2271 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | 2280 | ieee80211_rx_result res) |
2272 | struct ieee80211_rx_data *rx, | ||
2273 | struct sk_buff *skb, | ||
2274 | struct ieee80211_rate *rate) | ||
2275 | { | 2281 | { |
2276 | struct sk_buff_head reorder_release; | 2282 | switch (res) { |
2277 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2283 | case RX_DROP_MONITOR: |
2284 | I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); | ||
2285 | if (rx->sta) | ||
2286 | rx->sta->rx_dropped++; | ||
2287 | /* fall through */ | ||
2288 | case RX_CONTINUE: { | ||
2289 | struct ieee80211_rate *rate = NULL; | ||
2290 | struct ieee80211_supported_band *sband; | ||
2291 | struct ieee80211_rx_status *status; | ||
2292 | |||
2293 | status = IEEE80211_SKB_RXCB((rx->skb)); | ||
2294 | |||
2295 | sband = rx->local->hw.wiphy->bands[status->band]; | ||
2296 | if (!(status->flag & RX_FLAG_HT)) | ||
2297 | rate = &sband->bitrates[status->rate_idx]; | ||
2278 | 2298 | ||
2279 | __skb_queue_head_init(&reorder_release); | 2299 | ieee80211_rx_cooked_monitor(rx, rate); |
2300 | break; | ||
2301 | } | ||
2302 | case RX_DROP_UNUSABLE: | ||
2303 | I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); | ||
2304 | if (rx->sta) | ||
2305 | rx->sta->rx_dropped++; | ||
2306 | dev_kfree_skb(rx->skb); | ||
2307 | break; | ||
2308 | case RX_QUEUED: | ||
2309 | I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); | ||
2310 | break; | ||
2311 | } | ||
2312 | } | ||
2280 | 2313 | ||
2281 | rx->skb = skb; | 2314 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, |
2282 | rx->sdata = sdata; | 2315 | struct sk_buff_head *frames) |
2316 | { | ||
2317 | ieee80211_rx_result res = RX_DROP_MONITOR; | ||
2318 | struct sk_buff *skb; | ||
2283 | 2319 | ||
2284 | #define CALL_RXH(rxh) \ | 2320 | #define CALL_RXH(rxh) \ |
2285 | do { \ | 2321 | do { \ |
@@ -2288,17 +2324,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2288 | goto rxh_next; \ | 2324 | goto rxh_next; \ |
2289 | } while (0); | 2325 | } while (0); |
2290 | 2326 | ||
2291 | /* | 2327 | while ((skb = __skb_dequeue(frames))) { |
2292 | * NB: the rxh_next label works even if we jump | ||
2293 | * to it from here because then the list will | ||
2294 | * be empty, which is a trivial check | ||
2295 | */ | ||
2296 | CALL_RXH(ieee80211_rx_h_passive_scan) | ||
2297 | CALL_RXH(ieee80211_rx_h_check) | ||
2298 | |||
2299 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); | ||
2300 | |||
2301 | while ((skb = __skb_dequeue(&reorder_release))) { | ||
2302 | /* | 2328 | /* |
2303 | * all the other fields are valid across frames | 2329 | * all the other fields are valid across frames |
2304 | * that belong to an aMPDU since they are on the | 2330 | * that belong to an aMPDU since they are on the |
@@ -2316,42 +2342,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2316 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | 2342 | CALL_RXH(ieee80211_rx_h_remove_qos_control) |
2317 | CALL_RXH(ieee80211_rx_h_amsdu) | 2343 | CALL_RXH(ieee80211_rx_h_amsdu) |
2318 | #ifdef CONFIG_MAC80211_MESH | 2344 | #ifdef CONFIG_MAC80211_MESH |
2319 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2345 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) |
2320 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 2346 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
2321 | #endif | 2347 | #endif |
2322 | CALL_RXH(ieee80211_rx_h_data) | 2348 | CALL_RXH(ieee80211_rx_h_data) |
2323 | 2349 | ||
2324 | /* special treatment -- needs the queue */ | 2350 | /* special treatment -- needs the queue */ |
2325 | res = ieee80211_rx_h_ctrl(rx, &reorder_release); | 2351 | res = ieee80211_rx_h_ctrl(rx, frames); |
2326 | if (res != RX_CONTINUE) | 2352 | if (res != RX_CONTINUE) |
2327 | goto rxh_next; | 2353 | goto rxh_next; |
2328 | 2354 | ||
2329 | CALL_RXH(ieee80211_rx_h_action) | 2355 | CALL_RXH(ieee80211_rx_h_action) |
2330 | CALL_RXH(ieee80211_rx_h_mgmt) | 2356 | CALL_RXH(ieee80211_rx_h_mgmt) |
2331 | 2357 | ||
2358 | rxh_next: | ||
2359 | ieee80211_rx_handlers_result(rx, res); | ||
2360 | |||
2332 | #undef CALL_RXH | 2361 | #undef CALL_RXH |
2362 | } | ||
2363 | } | ||
2364 | |||
2365 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | ||
2366 | struct ieee80211_rx_data *rx, | ||
2367 | struct sk_buff *skb, | ||
2368 | struct ieee80211_rate *rate) | ||
2369 | { | ||
2370 | struct sk_buff_head reorder_release; | ||
2371 | ieee80211_rx_result res = RX_DROP_MONITOR; | ||
2372 | |||
2373 | __skb_queue_head_init(&reorder_release); | ||
2374 | |||
2375 | rx->skb = skb; | ||
2376 | rx->sdata = sdata; | ||
2377 | |||
2378 | #define CALL_RXH(rxh) \ | ||
2379 | do { \ | ||
2380 | res = rxh(rx); \ | ||
2381 | if (res != RX_CONTINUE) \ | ||
2382 | goto rxh_next; \ | ||
2383 | } while (0); | ||
2384 | |||
2385 | CALL_RXH(ieee80211_rx_h_passive_scan) | ||
2386 | CALL_RXH(ieee80211_rx_h_check) | ||
2387 | |||
2388 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); | ||
2389 | |||
2390 | ieee80211_rx_handlers(rx, &reorder_release); | ||
2391 | return; | ||
2333 | 2392 | ||
2334 | rxh_next: | 2393 | rxh_next: |
2335 | switch (res) { | 2394 | ieee80211_rx_handlers_result(rx, res); |
2336 | case RX_DROP_MONITOR: | 2395 | |
2337 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | 2396 | #undef CALL_RXH |
2338 | if (rx->sta) | ||
2339 | rx->sta->rx_dropped++; | ||
2340 | /* fall through */ | ||
2341 | case RX_CONTINUE: | ||
2342 | ieee80211_rx_cooked_monitor(rx, rate); | ||
2343 | break; | ||
2344 | case RX_DROP_UNUSABLE: | ||
2345 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | ||
2346 | if (rx->sta) | ||
2347 | rx->sta->rx_dropped++; | ||
2348 | dev_kfree_skb(rx->skb); | ||
2349 | break; | ||
2350 | case RX_QUEUED: | ||
2351 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | ||
2352 | break; | ||
2353 | } | ||
2354 | } | ||
2355 | } | 2397 | } |
2356 | 2398 | ||
2357 | /* main receive path */ | 2399 | /* main receive path */ |